forked from mpatacchiola/dissecting-reinforcement-learning
-
Notifications
You must be signed in to change notification settings - Fork 0
/
temporal_differencing_control_sarsa.py
227 lines (196 loc) · 9.11 KB
/
temporal_differencing_control_sarsa.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
#!/usr/bin/env python
#MIT License
#Copyright (c) 2017 Massimiliano Patacchiola
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
#In this example I will use the class gridworld to generate a 3x4 world
#in which the cleaning robot will move. Using the SARSA algorithm I
#will estimate the state-action matrix.
import numpy as np
from gridworld import GridWorld
def update_state_action(state_action_matrix, visit_counter_matrix, observation, new_observation,
action, new_action, reward, alpha, gamma):
'''Return the updated utility matrix
@param state_action_matrix the matrix before the update
@param observation the state obsrved at t
@param new_observation the state observed at t+1
@param action the action at t
@param new_action the action at t+1
@param reward the reward observed after the action
@param alpha the ste size (learning rate)
@param gamma the discount factor
@return the updated state action matrix
'''
#Getting the values of Q at t and at t+1
col = observation[1] + (observation[0]*4)
q = state_action_matrix[action, col]
col_t1 = new_observation[1] + (new_observation[0]*4)
q_t1 = state_action_matrix[int(new_action) ,col_t1]
#Calculate alpha based on how many time it
#has been visited
alpha_counted = 1.0 / (1.0 + visit_counter_matrix[action, col])
#Applying the update rule
#Here you can change "alpha" with "alpha_counted" if you want
#to take into account how many times that particular state-action
#pair has been visited until now.
state_action_matrix[action ,col] = state_action_matrix[action ,col] + alpha * (reward + gamma * q_t1 - q)
return state_action_matrix
def update_visit_counter(visit_counter_matrix, observation, action):
'''Update the visit counter
Counting how many times a state-action pair has been
visited. This information can be used during the update.
@param visit_counter_matrix a matrix initialised with zeros
@param observation the state observed
@param action the action taken
'''
col = observation[1] + (observation[0]*4)
visit_counter_matrix[action ,col] += 1.0
return visit_counter_matrix
def update_policy(policy_matrix, state_action_matrix, observation):
'''Return the updated policy matrix
@param policy_matrix the matrix before the update
@param state_action_matrix the state-action matrix
@param observation the state obsrved at t
@return the updated state action matrix
'''
col = observation[1] + (observation[0]*4)
#Getting the index of the action with the highest utility
best_action = np.argmax(state_action_matrix[:, col])
#Updating the policy
policy_matrix[observation[0], observation[1]] = best_action
return policy_matrix
def return_epsilon_greedy_action(policy_matrix, observation, epsilon=0.1):
'''Return an action choosing it with epsilon-greedy
@param policy_matrix the matrix before the update
@param observation the state obsrved at t
@param epsilon the value used for computing the probabilities
@return the updated policy_matrix
'''
tot_actions = int(np.nanmax(policy_matrix) + 1)
action = int(policy_matrix[observation[0], observation[1]])
non_greedy_prob = epsilon / tot_actions
greedy_prob = 1 - epsilon + non_greedy_prob
weight_array = np.full((tot_actions), non_greedy_prob)
weight_array[action] = greedy_prob
return np.random.choice(tot_actions, 1, p=weight_array)
def print_policy(policy_matrix):
'''Print the policy using specific symbol.
* terminal state
^ > v < up, right, down, left
# obstacle
'''
counter = 0
shape = policy_matrix.shape
policy_string = ""
for row in range(shape[0]):
for col in range(shape[1]):
if(policy_matrix[row,col] == -1): policy_string += " * "
elif(policy_matrix[row,col] == 0): policy_string += " ^ "
elif(policy_matrix[row,col] == 1): policy_string += " > "
elif(policy_matrix[row,col] == 2): policy_string += " v "
elif(policy_matrix[row,col] == 3): policy_string += " < "
elif(np.isnan(policy_matrix[row,col])): policy_string += " # "
counter += 1
policy_string += '\n'
print(policy_string)
def return_decayed_value(starting_value, global_step, decay_step):
"""Returns the decayed value.
decayed_value = starting_value * decay_rate ^ (global_step / decay_steps)
@param starting_value the value before decaying
@param global_step the global step to use for decay (positive integer)
@param decay_step the step at which the value is decayed
"""
decayed_value = starting_value * np.power(0.1, (global_step/decay_step))
return decayed_value
def main():
env = GridWorld(3, 4)
#Define the state matrix
state_matrix = np.zeros((3,4))
state_matrix[0, 3] = 1
state_matrix[1, 3] = 1
state_matrix[1, 1] = -1
print("State Matrix:")
print(state_matrix)
#Define the reward matrix
reward_matrix = np.full((3,4), -0.04)
reward_matrix[0, 3] = 1
reward_matrix[1, 3] = -1
print("Reward Matrix:")
print(reward_matrix)
#Define the transition matrix
transition_matrix = np.array([[0.8, 0.1, 0.0, 0.1],
[0.1, 0.8, 0.1, 0.0],
[0.0, 0.1, 0.8, 0.1],
[0.1, 0.0, 0.1, 0.8]])
#Random policy
policy_matrix = np.random.randint(low=0, high=4, size=(3, 4)).astype(np.float32)
policy_matrix[1,1] = np.NaN #NaN for the obstacle at (1,1)
policy_matrix[0,3] = policy_matrix[1,3] = -1 #No action for the terminal states
print("Policy Matrix:")
print(policy_matrix)
env.setStateMatrix(state_matrix)
env.setRewardMatrix(reward_matrix)
env.setTransitionMatrix(transition_matrix)
#utility_matrix = np.zeros((3,4))
state_action_matrix = np.zeros((4,12))
visit_counter_matrix = np.zeros((4,12))
gamma = 0.999
alpha = 0.001 #constant step size
tot_epoch = 5000000
print_epoch = 1000
for epoch in range(tot_epoch):
epsilon = return_decayed_value(0.1, epoch, decay_step=100000)
#Reset and return the first observation
observation = env.reset(exploring_starts=True)
is_starting = True
for step in range(1000):
#Take the action from the action matrix
#action = policy_matrix[observation[0], observation[1]]
#Take the action using epsilon-greedy
action = return_epsilon_greedy_action(policy_matrix, observation, epsilon=0.1)
if(is_starting):
action = np.random.randint(0, 4)
is_starting = False
#Move one step in the environment and get obs and reward
new_observation, reward, done = env.step(action)
new_action = policy_matrix[new_observation[0], new_observation[1]]
#Updating the state-action matrix
state_action_matrix = update_state_action(state_action_matrix, visit_counter_matrix, observation, new_observation,
action, new_action, reward, alpha, gamma)
#Updating the policy
policy_matrix = update_policy(policy_matrix, state_action_matrix, observation)
#Increment the visit counter
visit_counter_matrix = update_visit_counter(visit_counter_matrix, observation, action)
observation = new_observation
#print(utility_matrix)
if done: break
if(epoch % print_epoch == 0):
print("")
print("Epsilon: " + str(epsilon))
print("State-Action matrix after " + str(epoch+1) + " iterations:")
print(state_action_matrix)
print("Policy matrix after " + str(epoch+1) + " iterations:")
print_policy(policy_matrix)
#Time to check the utility matrix obtained
print("State-Action matrix after " + str(tot_epoch) + " iterations:")
print(state_action_matrix)
print("Policy matrix after " + str(tot_epoch) + " iterations:")
print_policy(policy_matrix)
if __name__ == "__main__":
main()