policy_iteration.py 3.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125
  1. #!/usr/bin/env python3
  2. # -*- coding: utf-8 -*-
  3. """
  4. @author: huiming zhou
  5. """
  6. import env
  7. import tools
  8. import motion_model
  9. import matplotlib.pyplot as plt
  10. import numpy as np
  11. import copy
  12. import sys
  13. class Policy_iteration:
  14. def __init__(self, x_start, x_goal):
  15. self.u_set = motion_model.motions # feasible input set
  16. self.xI, self.xG = x_start, x_goal
  17. self.e = 0.001
  18. self.gamma = 0.9
  19. self.obs = env.obs_map() # position of obstacles
  20. self.lose = env.lose_map()
  21. self.name1 = "policy_iteration, e=" + str(self.e) + ", gamma=" + str(self.gamma)
  22. self.name2 = "convergence of error"
  23. def policy_evaluation(self, policy, value):
  24. delta = sys.maxsize
  25. while delta > self.e:
  26. x_value = 0
  27. for x in value:
  28. if x in self.xG: continue
  29. else:
  30. [x_next, p_next] = motion_model.move_prob(x, policy[x], self.obs)
  31. v_Q = self.cal_Q_value(x_next, p_next, value)
  32. v_diff = abs(value[x] - v_Q)
  33. value[x] = v_Q
  34. if v_diff > 0:
  35. x_value = max(x_value, v_diff)
  36. delta = x_value
  37. return value
  38. def policy_improvement(self, policy, value):
  39. for x in value:
  40. if x in self.xG: continue
  41. else:
  42. value_list = []
  43. for u in self.u_set:
  44. [x_next, p_next] = motion_model.move_prob(x, u, self.obs)
  45. value_list.append(self.cal_Q_value(x_next, p_next, value))
  46. policy[x] = self.u_set[int(np.argmax(value_list))]
  47. return policy
  48. def iteration(self):
  49. value_table = {}
  50. policy = {}
  51. for i in range(env.x_range):
  52. for j in range(env.y_range):
  53. if (i, j) not in self.obs:
  54. value_table[(i, j)] = 0
  55. policy[(i, j)] = self.u_set[0]
  56. while True:
  57. policy_back = copy.deepcopy(policy)
  58. value_table = self.policy_evaluation(policy, value_table)
  59. policy = self.policy_improvement(policy, value_table)
  60. if policy_back == policy: break
  61. return value_table, policy
  62. def simulation(self, xI, xG, policy):
  63. path = []
  64. x = xI
  65. while x not in xG:
  66. u = policy[x]
  67. x_next = (x[0] + u[0], x[1] + u[1])
  68. if x_next not in self.obs:
  69. x = x_next
  70. path.append(x)
  71. path.pop()
  72. return path
  73. def animation(self, path):
  74. plt.figure(1)
  75. tools.show_map(self.xI, self.xG, self.obs, self.lose, self.name1)
  76. for x in path:
  77. tools.plot_dots(x)
  78. plt.show()
  79. def cal_Q_value(self, x, p, table):
  80. value = 0
  81. reward = self.get_reward(x)
  82. for i in range(len(x)):
  83. value += p[i] * (reward[i] + self.gamma * table[x[i]])
  84. return value
  85. def get_reward(self, x_next):
  86. reward = []
  87. for x in x_next:
  88. if x in self.xG:
  89. reward.append(10)
  90. elif x in self.lose:
  91. reward.append(-10)
  92. else:
  93. reward.append(0)
  94. return reward
  95. if __name__ == '__main__':
  96. x_Start = (5, 5)
  97. x_Goal = [(49, 5), (49, 25)]
  98. PI = Policy_iteration(x_Start, x_Goal)
  99. [value_PI, policy_PI] = PI.iteration()
  100. path_PI = PI.simulation(x_Start, x_Goal, policy_PI)
  101. PI.animation(path_PI)