policy_iteration.py 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169
  1. #!/usr/bin/env python3
  2. # -*- coding: utf-8 -*-
  3. """
  4. @author: huiming zhou
  5. """
  6. import env
  7. import plotting
  8. import motion_model
  9. import numpy as np
  10. import sys
  11. import copy
  12. class Policy_iteration:
  13. def __init__(self, x_start, x_goal):
  14. self.xI, self.xG = x_start, x_goal
  15. self.e = 0.001 # threshold for convergence
  16. self.gamma = 0.9 # discount factor
  17. self.env = env.Env(self.xI, self.xG)
  18. self.motion = motion_model.Motion_model(self.xI, self.xG)
  19. self.plotting = plotting.Plotting(self.xI, self.xG)
  20. self.u_set = self.env.motions # feasible input set
  21. self.stateSpace = self.env.stateSpace # state space
  22. self.obs = self.env.obs_map() # position of obstacles
  23. self.lose = self.env.lose_map() # position of lose states
  24. self.name1 = "policy_iteration, gamma=" + str(self.gamma)
  25. [self.value, self.policy] = self.iteration()
  26. self.path = self.extract_path(self.xI, self.xG, self.policy)
  27. self.plotting.animation(self.path, self.name1)
  28. def policy_evaluation(self, policy, value):
  29. """
  30. Evaluate current policy.
  31. :param policy: current policy
  32. :param value: value table
  33. :return: new value table generated by current policy
  34. """
  35. delta = sys.maxsize
  36. while delta > self.e: # convergence condition
  37. x_value = 0
  38. for x in self.stateSpace:
  39. if x not in self.xG:
  40. [x_next, p_next] = self.motion.move_next(x, policy[x])
  41. v_Q = self.cal_Q_value(x_next, p_next, value)
  42. v_diff = abs(value[x] - v_Q)
  43. value[x] = v_Q
  44. if v_diff > 0:
  45. x_value = max(x_value, v_diff)
  46. delta = x_value
  47. return value
  48. def policy_improvement(self, policy, value):
  49. """
  50. Improve policy using current value table.
  51. :param policy: policy table
  52. :param value: current value table
  53. :return: improved policy table
  54. """
  55. for x in self.stateSpace:
  56. if x not in self.xG:
  57. value_list = []
  58. for u in self.u_set:
  59. [x_next, p_next] = self.motion.move_next(x, u)
  60. value_list.append(self.cal_Q_value(x_next, p_next, value))
  61. policy[x] = self.u_set[int(np.argmax(value_list))]
  62. return policy
  63. def iteration(self):
  64. """
  65. polity iteration: using evaluate and improvement process until convergence.
  66. :return: value table and converged policy.
  67. """
  68. value_table = {}
  69. policy = {}
  70. count = 0
  71. for x in self.stateSpace:
  72. value_table[x] = 0 # initialize value table
  73. policy[x] = self.u_set[0] # initialize policy table
  74. while True:
  75. count += 1
  76. policy_back = copy.deepcopy(policy)
  77. value_table = self.policy_evaluation(policy, value_table) # evaluation process
  78. policy = self.policy_improvement(policy, value_table) # policy improvement process
  79. if policy_back == policy: break # convergence condition
  80. self.message(count)
  81. return value_table, policy
  82. def cal_Q_value(self, x, p, table):
  83. """
  84. cal Q_value.
  85. :param x: next state vector
  86. :param p: probability of each state
  87. :param table: value table
  88. :return: Q-value
  89. """
  90. value = 0
  91. reward = self.env.get_reward(x) # get reward of next state
  92. for i in range(len(x)):
  93. value += p[i] * (reward[i] + self.gamma * table[x[i]]) # cal Q-value
  94. return value
  95. def extract_path(self, xI, xG, policy):
  96. """
  97. extract path from converged policy.
  98. :param xI: starting state
  99. :param xG: goal states
  100. :param policy: converged policy
  101. :return: path
  102. """
  103. x, path = xI, [xI]
  104. while x not in xG:
  105. u = policy[x]
  106. x_next = (x[0] + u[0], x[1] + u[1])
  107. if x_next in self.obs:
  108. print("Collision! Please run again!")
  109. break
  110. else:
  111. path.append(x_next)
  112. x = x_next
  113. return path
  114. def message(self, count):
  115. """
  116. print important message.
  117. :param count: iteration numbers
  118. :return: print
  119. """
  120. print("starting state: ", self.xI)
  121. print("goal states: ", self.xG)
  122. print("condition for convergence: ", self.e)
  123. print("discount factor: ", self.gamma)
  124. print("iteration times: ", count)
  125. if __name__ == '__main__':
  126. x_Start = (5, 5)
  127. x_Goal = [(49, 5), (49, 25)]
  128. PI = Policy_iteration(x_Start, x_Goal)