-
Notifications
You must be signed in to change notification settings - Fork 0
/
VTRBoyan_OLS.py
242 lines (197 loc) · 8.09 KB
/
VTRBoyan_OLS.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
import numpy as np
from tqdm import tqdm
from envs import Boyan
import matplotlib.pyplot as plt
def policy(state):
"""Uniform random policy for Boyan chain env"""
if state<=2:
action = 0
else:
action = np.random.choice([0,1], p=[0.5, 0.5])
return action
class LinearDyna(object):
def __init__(self, env, K, steps, alpha_l, alpha_p, tau,gamma, feature_size, B):
self.env = env
self.K = K
self.steps = steps
self.alpha_l = alpha_l
self.alpha_p = alpha_p
self.tau = tau
self.gamma = gamma
self.B = B
self.eps = 0.0
self.num_actions = 2
self.feature_size = feature_size
self.theta = np.zeros(self.feature_size)
self.F = np.zeros((self.feature_size, self.feature_size))
self.f = np.zeros((self.feature_size))
self.Dinv = 100*np.identity(self.feature_size)
self.I = np.identity(self.feature_size)
self.Phi = np.zeros((self.feature_size,self.feature_size))
self.PhiPhi_ = np.zeros((self.feature_size,self.feature_size))
self.buffer = []
self.model_loss = []
#self.P_dyna = np.load('dyna_model.npy')
def get_phi(self, state):
'''
Computes the boyan chain representation the size is feature vector size
'''
#Gets the coded features
feature_encoder = Boyan.BoyanRep()
phi = feature_encoder.encode(state)
return phi
def Sherman_Morrison(self,vec,Mat):
Matvec = np.dot(Mat,vec)
vec_vec_out = np.outer(Matvec,vec)
top = np.matmul(vec_vec_out,Mat)
vec_mat = np.dot(vec,Mat)
bottom = 1 + np.inner(vec_mat,vec)
return Mat - top/bottom
def update_F(self):
top = self.phi_ - np.dot(self.F,self.phi)
bottom = 1 + np.inner(self.x,self.phi)
frac = top / bottom
mat_update = np.outer(frac,self.x)
self.F = self.F + mat_update
def update_f(self,r):
top = r - np.inner(self.phi,self.f)
bottom = 1 + np.inner(self.phi,self.x)
frac = top / bottom
vec_update = frac * self.x
self.f = self.f + vec_update
def act(self,s):
a = policy(s)
return a
#Commented code below is for computing the exact Least Squares Update. Currently does not work.
def update(self,s,a,r,s_,done):
'''
Updates the values of theta, our estimated transition model F, and our estimated reward model f.
'''
#Computes the coded feature for current state s
self.phi = self.get_phi(s)
#Computes the tile coded feature for next state s_
self.phi_ = self.get_phi(s_)
#Updates our theta values using gradient descent
self.theta = self.theta + self.alpha_l*(r + self.gamma * np.inner(self.phi_,self.theta) \
- np.inner(self.phi,self.theta))*self.phi
self.Dinv = self.Sherman_Morrison(self.phi,self.Dinv)
self.x = np.dot(self.Dinv, self.phi)
self.Phi = self.Phi + np.outer(self.phi,self.phi)
self.PhiPhi_ = self.PhiPhi_ + np.outer(self.phi,self.phi_)
if(np.linalg.det(self.Phi)!=0.0):
theta_outer = np.outer(self.theta, self.theta)
I = np.identity(self.feature_size)
theta_inv = np.linalg.inv(theta_outer + 0.001*I)
phi_inv = np.linalg.inv(self.Phi)
first = np.matmul(phi_inv, self.PhiPhi_)
second = np.matmul(first,theta_outer)
final = np.matmul(second,theta_inv)
self.F = final
#self.model_loss.append(np.linalg.norm(self.P_dyna - self.F))
self.update_f(r)
#Runs our planning step.
self.plan()
def plan(self):
'''
Using Dyna-style planning to update our theta estimate with simulated experience on a learnt model.
'''
#initializes the theta using in planning to be the current theta estimate
theta_tilde = self.theta
#we do the planning portion p many times
for p in range(self.tau):
#Below are different ways to sample a state s for planning
#Here we sample s uniformly from the space of all states
#position = np.random.uniform(-1.2,0.6)
#velocity = np.random.uniform(-0.07,0.07)
#active_tiles_tilde = self.tc.get_tiles(position,velocity)
#phi_tilde = self.get_phi(active_tiles_tilde)
#Here we sample s from a buffer the stores all observed states
row = np.random.randint(len(self.buffer))
sample_state = self.buffer[row]
phi_tilde = self.get_phi(sample_state)
#Here we sample s from the support. Meaning we sample a unit vector as the state
# row = np.random.randint(self.feature_size)
# phi_tilde = self.I[row]
#Compute the featurized next state given a featurized state and non featurized action
phi_tilde_ = np.dot(self.F, phi_tilde)
#compute the reward given a featurized state and a non featurized action
r_tilde = np.inner(phi_tilde, self.f)
#Update theta using the simulated experience
theta_tilde += self.alpha_p * (r_tilde + self.gamma * np.inner(theta_tilde, phi_tilde_) \
- np.inner(theta_tilde,phi_tilde))*phi_tilde
#Update the current estimate of theta to be the estimate from the simulation
self.theta = theta_tilde
def update_state_buffer(self, s):
'''
Updates the buffer with the curretn state s
'''
self.buffer.append(s)
def get_val(self):
"""
Gets true analytical value of states by solving
Bellman equation directly
"""
P, R = Boyan.getPR()
I = np.identity(98)
value_states = np.linalg.inv(( I- self.gamma * P)) @ R
return value_states
def run(self):
'''
Runs the rl algorithm and returns the loss.
'''
print("Linear-Dyna")
true_value_states = self.get_val()
feature_encoder = Boyan.BoyanRep()
map = feature_encoder.getmap()
loss = []
for k in tqdm(range(1,self.K+1)):
s = self.env.reset()
done = None
step = 0
while not done:
step += 1
if s==0 :
s = self.env.reset()
self.update_state_buffer(s)
a = self.act(s)
r, s_, done = self.env.step(a)
self.update(s,a,r,s_,done)
s = s_
L = np.linalg.norm(true_value_states - np.dot(map, self.theta)) / 10
loss.append(L)
print(L)
#np.save('vtr_model', self.F)
#x = np.array(self.model_loss)
#np.save('model_loss', x)
return loss
#number of episodes
K = 500
#num of runs
runs = 1
#the environment
env = Boyan.Boyan()
feature_size= 25
#max number of interactions with an environment before a reset, chosen according to hengshaui's work
steps = 98
#learning rate for theta
alpha_l = 0.25
#learning rate for theta tilde, should somehow scale with tau, the number of planning steps
alpha_p = 0.001
#number of planning steps
tau = 5
#The discounting factor, chosen according to hengshaui's work
gamma = 1.0
#The learning rate for updating the learnt models F and f, chosen according to hengshaui's work
B = 0.01
#A matrix the stores the step for each episode within a run.
loss = np.zeros((runs,K))
for i in tqdm(range(runs)):
agent = LinearDyna(env,K, steps, alpha_l, alpha_p, tau, gamma, feature_size, B)
loss[i,:] = agent.run()
#averages the result for each episode by the steps per run.
results = np.mean(loss, axis=0)
np.save('results_VTR_500', results)
plt.plot(results)
plt.xlabel("Number of Episodes")
plt.ylabel("RMSE(Between analytical and predicted vals)")
plt.show()