Q-policy_iteration.py 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155
  1. import env
  2. import plotting
  3. import motion_model
  4. import numpy as np
  5. import copy
  6. import sys
  7. class Q_policy_iteration:
  8. def __init__(self, x_start, x_goal):
  9. self.xI, self.xG = x_start, x_goal
  10. self.e = 0.001 # threshold for convergence
  11. self.gamma = 0.9 # discount factor
  12. self.env = env.Env(self.xI, self.xG) # class Env
  13. self.motion = motion_model.Motion_model(self.xI, self.xG) # class Motion_model
  14. self.plotting = plotting.Plotting(self.xI, self.xG) # class Plotting
  15. self.u_set = self.env.motions # feasible input set
  16. self.stateSpace = self.env.stateSpace # state space
  17. self.obs = self.env.obs_map() # position of obstacles
  18. self.lose = self.env.lose_map() # position of lose states
  19. self.name1 = "Q-policy_iteration, gamma=" + str(self.gamma)
  20. [self.value, self.policy] = self.iteration()
  21. self.path = self.extract_path(self.xI, self.xG, self.policy)
  22. self.plotting.animation(self.path, self.name1)
  23. def policy_evaluation(self, policy, value):
  24. """
  25. evaluation process using current policy.
  26. :param policy: current policy
  27. :param value: value table
  28. :return: converged value table
  29. """
  30. delta = sys.maxsize
  31. while delta > self.e: # convergence condition
  32. x_value = 0
  33. for x in value:
  34. if x not in self.xG:
  35. for k in range(len(self.u_set)):
  36. [x_next, p_next] = self.motion.move_next(x, self.u_set[k])
  37. v_Q = self.cal_Q_value(x_next, p_next, policy, value)
  38. v_diff = abs(value[x][k] - v_Q)
  39. value[x][k] = v_Q
  40. if v_diff > 0:
  41. x_value = max(x_value, v_diff)
  42. delta = x_value
  43. return value
  44. def policy_improvement(self, policy, value):
  45. """
  46. policy improvement process.
  47. :param policy: policy table
  48. :param value: current value table
  49. :return: improved policy
  50. """
  51. for x in self.stateSpace:
  52. if x not in self.xG:
  53. policy[x] = int(np.argmax(value[x]))
  54. return policy
  55. def iteration(self):
  56. """
  57. Q-policy iteration
  58. :return: converged policy and its value table.
  59. """
  60. Q_table = {}
  61. policy = {}
  62. count = 0
  63. for x in self.stateSpace:
  64. Q_table[x] = [0, 0, 0, 0] # initialize Q_value table
  65. policy[x] = 0 # initialize policy table
  66. while True:
  67. count += 1
  68. policy_back = copy.deepcopy(policy)
  69. Q_table = self.policy_evaluation(policy, Q_table) # evaluation process
  70. policy = self.policy_improvement(policy, Q_table) # improvement process
  71. if policy_back == policy: break # convergence condition
  72. self.message(count)
  73. return Q_table, policy
  74. def cal_Q_value(self, x, p, policy, table):
  75. """
  76. cal Q_value.
  77. :param x: next state vector
  78. :param p: probability of each state
  79. :param table: value table
  80. :return: Q-value
  81. """
  82. value = 0
  83. reward = self.env.get_reward(x) # get reward of next state
  84. for i in range(len(x)):
  85. value += p[i] * (reward[i] + self.gamma * table[x[i]][policy[x[i]]])
  86. return value
  87. def extract_path(self, xI, xG, policy):
  88. """
  89. extract path from converged policy.
  90. :param xI: starting state
  91. :param xG: goal states
  92. :param policy: converged policy
  93. :return: path
  94. """
  95. x, path = xI, [xI]
  96. while x not in xG:
  97. u = self.u_set[policy[x]]
  98. x_next = (x[0] + u[0], x[1] + u[1])
  99. if x_next in self.obs:
  100. print("Collision! Please run again!")
  101. break
  102. else:
  103. path.append(x_next)
  104. x = x_next
  105. return path
  106. def message(self, count):
  107. """
  108. print important message.
  109. :param count: iteration numbers
  110. :return: print
  111. """
  112. print("starting state: ", self.xI)
  113. print("goal states: ", self.xG)
  114. print("condition for convergence: ", self.e)
  115. print("discount factor: ", self.gamma)
  116. print("iteration times: ", count)
  117. if __name__ == '__main__':
  118. x_Start = (5, 5)
  119. x_Goal = [(49, 5), (49, 25)]
  120. QPI = Q_policy_iteration(x_Start, x_Goal)