|
|
@@ -1,158 +0,0 @@
|
|
|
-import env
|
|
|
-import plotting
|
|
|
-import motion_model
|
|
|
-
|
|
|
-import numpy as np
|
|
|
-import sys
|
|
|
-import copy
|
|
|
-
|
|
|
-
|
|
|
-class Policy_iteration:
|
|
|
- def __init__(self, x_start, x_goal):
|
|
|
- self.xI, self.xG = x_start, x_goal
|
|
|
- self.e = 0.001 # threshold for convergence
|
|
|
- self.gamma = 0.9 # discount factor
|
|
|
-
|
|
|
- self.env = env.Env(self.xI, self.xG)
|
|
|
- self.motion = motion_model.Motion_model(self.xI, self.xG)
|
|
|
- self.plotting = plotting.Plotting(self.xI, self.xG)
|
|
|
-
|
|
|
- self.u_set = self.env.motions # feasible input set
|
|
|
- self.stateSpace = self.env.stateSpace # state space
|
|
|
- self.obs = self.env.obs_map() # position of obstacles
|
|
|
- self.lose = self.env.lose_map() # position of lose states
|
|
|
-
|
|
|
- self.name1 = "policy_iteration, gamma=" + str(self.gamma)
|
|
|
-
|
|
|
- [self.value, self.policy] = self.iteration()
|
|
|
- self.path = self.extract_path(self.xI, self.xG, self.policy)
|
|
|
- self.plotting.animation(self.path, self.name1)
|
|
|
-
|
|
|
- def policy_evaluation(self, policy, value):
|
|
|
- """
|
|
|
- Evaluate current policy.
|
|
|
-
|
|
|
- :param policy: current policy
|
|
|
- :param value: value table
|
|
|
- :return: new value table generated by current policy
|
|
|
- """
|
|
|
-
|
|
|
- delta = sys.maxsize
|
|
|
-
|
|
|
- while delta > self.e: # convergence condition
|
|
|
- x_value = 0
|
|
|
- for x in self.stateSpace:
|
|
|
- if x not in self.xG:
|
|
|
- [x_next, p_next] = self.motion.move_next(x, policy[x])
|
|
|
- v_Q = self.cal_Q_value(x_next, p_next, value)
|
|
|
- v_diff = abs(value[x] - v_Q)
|
|
|
- value[x] = v_Q
|
|
|
- if v_diff > 0:
|
|
|
- x_value = max(x_value, v_diff)
|
|
|
- delta = x_value
|
|
|
-
|
|
|
- return value
|
|
|
-
|
|
|
- def policy_improvement(self, policy, value):
|
|
|
- """
|
|
|
- Improve policy using current value table.
|
|
|
-
|
|
|
- :param policy: policy table
|
|
|
- :param value: current value table
|
|
|
- :return: improved policy table
|
|
|
- """
|
|
|
-
|
|
|
- for x in self.stateSpace:
|
|
|
- if x not in self.xG:
|
|
|
- value_list = []
|
|
|
- for u in self.u_set:
|
|
|
- [x_next, p_next] = self.motion.move_next(x, u)
|
|
|
- value_list.append(self.cal_Q_value(x_next, p_next, value))
|
|
|
- policy[x] = self.u_set[int(np.argmax(value_list))]
|
|
|
-
|
|
|
- return policy
|
|
|
-
|
|
|
- def iteration(self):
|
|
|
- """
|
|
|
- polity iteration: using evaluate and improvement process until convergence.
|
|
|
- :return: value table and converged policy.
|
|
|
- """
|
|
|
-
|
|
|
- value_table = {}
|
|
|
- policy = {}
|
|
|
- count = 0
|
|
|
-
|
|
|
- for x in self.stateSpace:
|
|
|
- value_table[x] = 0 # initialize value table
|
|
|
- policy[x] = self.u_set[0] # initialize policy table
|
|
|
-
|
|
|
- while True:
|
|
|
- count += 1
|
|
|
- policy_back = copy.deepcopy(policy)
|
|
|
- value_table = self.policy_evaluation(policy, value_table) # evaluation process
|
|
|
- policy = self.policy_improvement(policy, value_table) # policy improvement process
|
|
|
- if policy_back == policy: break # convergence condition
|
|
|
-
|
|
|
- self.message(count)
|
|
|
-
|
|
|
- return value_table, policy
|
|
|
-
|
|
|
- def cal_Q_value(self, x, p, table):
|
|
|
- """
|
|
|
- cal Q_value.
|
|
|
-
|
|
|
- :param x: next state vector
|
|
|
- :param p: probability of each state
|
|
|
- :param table: value table
|
|
|
- :return: Q-value
|
|
|
- """
|
|
|
-
|
|
|
- value = 0
|
|
|
- reward = self.env.get_reward(x) # get reward of next state
|
|
|
- for i in range(len(x)):
|
|
|
- value += p[i] * (reward[i] + self.gamma * table[x[i]]) # cal Q-value
|
|
|
-
|
|
|
- return value
|
|
|
-
|
|
|
- def extract_path(self, xI, xG, policy):
|
|
|
- """
|
|
|
- extract path from converged policy.
|
|
|
-
|
|
|
- :param xI: starting state
|
|
|
- :param xG: goal states
|
|
|
- :param policy: converged policy
|
|
|
- :return: path
|
|
|
- """
|
|
|
-
|
|
|
- x, path = xI, [xI]
|
|
|
- while x not in xG:
|
|
|
- u = policy[x]
|
|
|
- x_next = (x[0] + u[0], x[1] + u[1])
|
|
|
- if x_next in self.obs:
|
|
|
- print("Collision! Please run again!")
|
|
|
- break
|
|
|
- else:
|
|
|
- path.append(x_next)
|
|
|
- x = x_next
|
|
|
- return path
|
|
|
-
|
|
|
- def message(self, count):
|
|
|
- """
|
|
|
- print important message.
|
|
|
-
|
|
|
- :param count: iteration numbers
|
|
|
- :return: print
|
|
|
- """
|
|
|
-
|
|
|
- print("starting state: ", self.xI)
|
|
|
- print("goal states: ", self.xG)
|
|
|
- print("condition for convergence: ", self.e)
|
|
|
- print("discount factor: ", self.gamma)
|
|
|
- print("iteration times: ", count)
|
|
|
-
|
|
|
-
|
|
|
-if __name__ == '__main__':
|
|
|
- x_Start = (5, 5)
|
|
|
- x_Goal = [(49, 5), (49, 25)]
|
|
|
-
|
|
|
- PI = Policy_iteration(x_Start, x_Goal)
|