policy_iteration.py 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167
  1. #!/usr/bin/env python3
  2. # -*- coding: utf-8 -*-
  3. """
  4. @author: huiming zhou
  5. """
  6. import env
  7. import tools
  8. import motion_model
  9. import matplotlib.pyplot as plt
  10. import numpy as np
  11. import copy
  12. import sys
  13. class Policy_iteration:
  14. def __init__(self, x_start, x_goal):
  15. self.u_set = motion_model.motions # feasible input set
  16. self.xI, self.xG = x_start, x_goal
  17. self.e = 0.001 # threshold for convergence
  18. self.gamma = 0.9 # discount factor
  19. self.obs = env.obs_map() # position of obstacles
  20. self.lose = env.lose_map() # position of lose states
  21. self.name1 = "policy_iteration, e=" + str(self.e) \
  22. + ", gamma=" + str(self.gamma)
  23. self.name2 = "convergence of error, e=" + str(self.e)
  24. def policy_evaluation(self, policy, value):
  25. """
  26. Evaluate current policy.
  27. :param policy: current policy
  28. :param value: value table
  29. :return: new value table generated by current policy
  30. """
  31. delta = sys.maxsize
  32. while delta > self.e: # convergence condition
  33. x_value = 0
  34. for x in value:
  35. if x not in self.xG:
  36. [x_next, p_next] = motion_model.move_prob(x, policy[x], self.obs)
  37. v_Q = self.cal_Q_value(x_next, p_next, value)
  38. v_diff = abs(value[x] - v_Q)
  39. value[x] = v_Q
  40. if v_diff > 0:
  41. x_value = max(x_value, v_diff)
  42. delta = x_value
  43. return value
  44. def policy_improvement(self, policy, value):
  45. """
  46. Improve policy using current value table.
  47. :param policy: policy table
  48. :param value: current value table
  49. :return: improved policy table
  50. """
  51. for x in value:
  52. if x not in self.xG:
  53. value_list = []
  54. for u in self.u_set:
  55. [x_next, p_next] = motion_model.move_prob(x, u, self.obs)
  56. value_list.append(self.cal_Q_value(x_next, p_next, value))
  57. policy[x] = self.u_set[int(np.argmax(value_list))]
  58. return policy
  59. def iteration(self):
  60. """
  61. polity iteration: using evaluate and improvement process until convergence.
  62. :return: value table and converged policy.
  63. """
  64. value_table = {}
  65. policy = {}
  66. count = 0
  67. for i in range(env.x_range):
  68. for j in range(env.y_range):
  69. if (i, j) not in self.obs:
  70. value_table[(i, j)] = 0 # initialize value table
  71. policy[(i, j)] = self.u_set[0] # initialize policy table
  72. while True:
  73. count += 1
  74. policy_back = copy.deepcopy(policy)
  75. value_table = self.policy_evaluation(policy, value_table) # evaluation process
  76. policy = self.policy_improvement(policy, value_table) # policy improvement process
  77. if policy_back == policy: break # convergence condition
  78. self.message(count)
  79. return value_table, policy
  80. def cal_Q_value(self, x, p, table):
  81. """
  82. cal Q_value.
  83. :param x: next state vector
  84. :param p: probability of each state
  85. :param table: value table
  86. :return: Q-value
  87. """
  88. value = 0
  89. reward = env.get_reward(x, self.xG, self.lose) # get reward of next state
  90. for i in range(len(x)):
  91. value += p[i] * (reward[i] + self.gamma * table[x[i]]) # cal Q-value
  92. return value
  93. def simulation(self, xI, xG, policy):
  94. """
  95. simulate a path using converged policy.
  96. :param xI: starting state
  97. :param xG: goal state
  98. :param policy: converged policy
  99. :return: simulation path
  100. """
  101. plt.figure(1) # path animation
  102. tools.show_map(xI, xG, self.obs, self.lose, self.name1) # show background
  103. x, path = xI, []
  104. while True:
  105. u = policy[x]
  106. x_next = (x[0] + u[0], x[1] + u[1])
  107. if x_next in self.obs:
  108. print("Collision!") # collision: simulation failed
  109. else:
  110. x = x_next
  111. if x_next in xG:
  112. break
  113. else:
  114. tools.plot_dots(x) # each state in optimal path
  115. path.append(x)
  116. plt.show()
  117. return path
  118. def message(self, count):
  119. print("starting state: ", self.xI)
  120. print("goal states: ", self.xG)
  121. print("condition for convergence: ", self.e)
  122. print("discount factor: ", self.gamma)
  123. print("iteration times: ", count)
  124. if __name__ == '__main__':
  125. x_Start = (5, 5)
  126. x_Goal = [(49, 5), (49, 25)]
  127. PI = Policy_iteration(x_Start, x_Goal)
  128. [value_PI, policy_PI] = PI.iteration()
  129. path_PI = PI.simulation(x_Start, x_Goal, policy_PI)