# Taxi-v2_LQR
[TOC]
 
 

 
 
```python=
env.P[0]
```
 
 

 
 
```python=
state = env.encode(4,1,2,0) # (taxi row,taxi column,passenger index, destination index)
```
 
x(k+1)= Ax(k) + Bu(k)
u = [0,1,2,3,4,5]
 
 
$\begin{bmatrix}
1\\
0\\0\\
0\end{bmatrix}$= A.dot ($\begin{bmatrix}
0\\
0\\0\\
0\end{bmatrix}$) + B*u[0]
$\begin{bmatrix}
0\\
0\\0\\
0\end{bmatrix}$= A.dot ($\begin{bmatrix}
0\\
0\\0\\
0\end{bmatrix}$) + B*u[1]
$\begin{bmatrix}
0\\
1\\0\\
0\end{bmatrix}$= A.dot ($\begin{bmatrix}
0\\
0\\0\\
0\end{bmatrix}$) + B*u[2]
$\begin{bmatrix}
0\\
0\\0\\
0\end{bmatrix}$= A.dot ($\begin{bmatrix}
0\\
0\\0\\
0\end{bmatrix}$) + B*u[3]
$\begin{bmatrix}
0\\
0\\4\\
0\end{bmatrix}$= A.dot ($\begin{bmatrix}
0\\
0\\0\\
0\end{bmatrix}$) + B*u[4]
$\begin{bmatrix}
0\\
0\\0\\
0\end{bmatrix}$= A.dot ($\begin{bmatrix}
0\\
0\\0\\
0\end{bmatrix}$) + B*u[5]
 
 
state 0~100 共600筆資料
 
A = $\begin{bmatrix}
0.88890033 & 0.05751688 & -0.00554618 & 0.04867368 \\
0.00159491 & 1.00618194 & -0.00132557 & -0.01190085 \\
-0.01216476 & -0.00427677 & 0.99196948 & 0.00688978 \\
0.00751767 & -0.00964972 & -0.00964972 & 0.9721107 \end{bmatrix}$
B = $\begin{bmatrix}
-0.01279649 \\
0.00763112 \\
0.01480316 \\
0.01995349
\end{bmatrix}$
 
 
 
 
 
 
 
 
## taxi.py
```python=
import sys
from contextlib import closing
from six import StringIO
from gym import utils
from gym.envs.toy_text import discrete
import numpy as np
MAP = [
"+---------+",
"|R: | : :G|",
"| : : : : |",
"| : : : : |",
"| | : | : |",
"|Y| : |B: |",
"+---------+",
]
class TaxiEnv(discrete.DiscreteEnv):
"""
The Taxi Problem
from "Hierarchical Reinforcement Learning with the MAXQ Value Function Decomposition"
by Tom Dietterich
Description:
There are four designated locations in the grid world indicated by R(ed), B(lue), G(reen), and Y(ellow). When the episode starts, the taxi starts off at a random square and the passenger is at a random location. The taxi drive to the passenger's location, pick up the passenger, drive to the passenger's destination (another one of the four specified locations), and then drop off the passenger. Once the passenger is dropped off, the episode ends.
Observations:
There are 500 discrete states since there are 25 taxi positions, 5 possible locations of the passenger (including the case when the passenger is the taxi), and 4 destination locations.
Actions:
There are 6 discrete deterministic actions:
- 0: move south
- 1: move north
- 2: move east
- 3: move west
- 4: pickup passenger
- 5: dropoff passenger
Rewards:
There is a reward of -1 for each action and an additional reward of +20 for delievering the passenger. There is a reward of -10 for executing actions "pickup" and "dropoff" illegally.
Rendering:
- blue: passenger
- magenta: destination
- yellow: empty taxi
- green: full taxi
- other letters (R, G, B and Y): locations for passengers and destinations
actions:
- 0: south
- 1: north
- 2: east
- 3: west
- 4: pickup
- 5: dropoff
state space is represented by:
(taxi_row, taxi_col, passenger_location, destination)
"""
metadata = {'render.modes': ['human', 'ansi']}
def __init__(self):
self.desc = np.asarray(MAP, dtype='c')
self.locs = locs = [(0,0), (0,4), (4,0), (4,3)]
num_states = 500
num_rows = 5
num_columns = 5
max_row = num_rows - 1
max_col = num_columns - 1
initial_state_distrib = np.zeros(num_states)
num_actions = 6
P = {state: {action: []
for action in range(num_actions)} for state in range(num_states)}
for row in range(num_rows):
for col in range(num_columns):
for pass_idx in range(len(locs) + 1): # +1 for being inside taxi
for dest_idx in range(len(locs)):
state = self.encode(row, col, pass_idx, dest_idx)
if pass_idx < 4 and pass_idx != dest_idx:
initial_state_distrib[state] += 1
for action in range(num_actions):
# defaults
new_row, new_col, new_pass_idx = row, col, pass_idx
reward = -1 # default reward when there is no pickup/dropoff
done = False
taxi_loc = (row, col)
if action == 0:
new_row = min(row + 1, max_row)
elif action == 1:
new_row = max(row - 1, 0)
if action == 2 and self.desc[1 + row, 2 * col + 2] == b":":
new_col = min(col + 1, max_col)
elif action == 3 and self.desc[1 + row, 2 * col] == b":":
new_col = max(col - 1, 0)
elif action == 4: # pickup
if (pass_idx < 4 and taxi_loc == locs[pass_idx]):
new_pass_idx = 4
else: # passenger not at location
reward = -10
elif action == 5: # dropoff
if (taxi_loc == locs[dest_idx]) and pass_idx == 4:
new_pass_idx = dest_idx
done = True
reward = 20
elif (taxi_loc in locs) and pass_idx == 4:
new_pass_idx = locs.index(taxi_loc)
else: # dropoff at wrong location
reward = -10
new_state = self.encode(
new_row, new_col, new_pass_idx, dest_idx)
P[state][action].append(
(new_state))
initial_state_distrib /= initial_state_distrib.sum()
discrete.DiscreteEnv.__init__(
self, num_states, num_actions, P, initial_state_distrib)
def encode(self, taxi_row, taxi_col, pass_loc, dest_idx):
# (5) 5, 5, 4
i = taxi_row
i *= 5
i += taxi_col
i *= 5
i += pass_loc
i *= 4
i += dest_idx
return i
def decode(self, i):
out = []
out.append(i % 4)
i = i // 4
out.append(i % 5)
i = i // 5
out.append(i % 5)
i = i // 5
out.append(i)
assert 0 <= i < 5
return reversed(out)
def render(self, mode='human'):
outfile = StringIO() if mode == 'ansi' else sys.stdout
out = self.desc.copy().tolist()
out = [[c.decode('utf-8') for c in line] for line in out]
taxi_row, taxi_col, pass_idx, dest_idx = self.decode(self.s)
def ul(x): return "_" if x == " " else x
if pass_idx < 4:
out[1 + taxi_row][2 * taxi_col + 1] = utils.colorize(
out[1 + taxi_row][2 * taxi_col + 1], 'yellow', highlight=True)
pi, pj = self.locs[pass_idx]
out[1 + pi][2 * pj + 1] = utils.colorize(out[1 + pi][2 * pj + 1], 'blue', bold=True)
else: # passenger in taxi
out[1 + taxi_row][2 * taxi_col + 1] = utils.colorize(
ul(out[1 + taxi_row][2 * taxi_col + 1]), 'green', highlight=True)
di, dj = self.locs[dest_idx]
out[1 + di][2 * dj + 1] = utils.colorize(out[1 + di][2 * dj + 1], 'magenta')
outfile.write("\n".join(["".join(row) for row in out]) + "\n")
if self.lastaction is not None:
outfile.write(" ({})\n".format(["South", "North", "East", "West", "Pickup", "Dropoff"][self.lastaction]))
else: outfile.write("\n")
# No need to return anything for human
if mode != 'human':
with closing(outfile):
return outfile.getvalue()
```
 
 
```python=
P = []
P.append((new_row, new_col, new_pass_idx, dest_idx))
env.P
```
 

 
```python=
Y = np.array(env.P)
```
 

 
```python=
Y.reshape(3000,4,1)
```
 

 
 
 
 
## Taxi-v2_LQR.py
 
```python=
import numpy as np
from IPython.display import clear_output
from numpy.linalg import lstsq
import pickle
with open('Taxi-v2_LQR_Y', 'rb') as file:
Y =pickle.load(file)
import pickle
with open('Taxi-v2_LQR_X', 'rb') as file:
X =pickle.load(file)
print(X)
U = np.array([0,1,2,3,4,5]*500)
tmp = np.linalg.lstsq(np.vstack((X.reshape(4,3000),
U.reshape(1,3000))).T,
Y.reshape(4,3000).T)[0]
A_nom = tmp[0:4,:].T
B_nom = tmp[4:5,:].T
print (A_nom,B_nom)
```
 
A = $\begin{bmatrix}
0.96450997 & 0.03484202 & 0.00596637 & 0.02996772 \\
0.02045199 & 0.99110394 & 0.01817487 & 0.00749897 \\
0.01574805 & 0.01123163 & 1.00040326 & 0.0183105 \\
0.03236694 & 0.0028723 & 0.00242112 & 0.97953158
\end{bmatrix}$
B = $\begin{bmatrix}
-0.02600378\\
-0.02880875\\
-0.03655201\\
-0.0188511
\end{bmatrix}$
 
 
將誤差算出,並取 two norm
```python=
error = []
for i in range(3000) :
each_error = Y[i] - (A_nom.dot(X[i]) + B_nom.dot(U[i]))
error.append(each_error)
print(error)
norm_error = []
for i in range(3000):
each_norm_error = np.linalg.norm(error[i])
norm_error.append(each_norm_error)
print(norm_error)
```
 
 
開始畫圖
```python=
import matplotlib.pyplot as plt
import numpy as np
state = np.arange(0,3000)
plt.scatter(state,norm_error,s = 0.1,alpha = 0.6,marker = 'o')
plt.xlabel('state')
plt.ylabel('error')
plt.draw()
```
 

 
```python=
state = np.arange(0,3000)
plt.plot(state,norm_error,ls='--',lw=0.05,alpha = 0.8)
plt.xlabel('state')
plt.ylabel('error')
plt.draw()
```
 

 
