value_iteration.py 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154
  1. #!/usr/bin/env python3
  2. # -*- coding: utf-8 -*-
  3. """
  4. @author: huiming zhou
  5. """
  6. import env
  7. import tools
  8. import motion_model
  9. import matplotlib.pyplot as plt
  10. import numpy as np
  11. import sys
  12. class Value_iteration:
  13. def __init__(self, x_start, x_goal):
  14. self.u_set = motion_model.motions # feasible input set
  15. self.xI, self.xG = x_start, x_goal
  16. self.e = 0.001 # threshold for convergence
  17. self.gamma = 0.9 # discount factor
  18. self.obs = env.obs_map() # position of obstacles
  19. self.lose = env.lose_map() # position of lose states
  20. self.name1 = "value_iteration, gamma=" + str(self.gamma)
  21. self.name2 = "converge process, e=" + str(self.e)
  22. def iteration(self):
  23. """
  24. value_iteration.
  25. :return: converged value table, optimal policy and variation of difference,
  26. """
  27. value_table = {} # value table
  28. policy = {} # policy
  29. diff = [] # maximum difference between two successive iteration
  30. delta = sys.maxsize # initialize maximum difference
  31. count = 0 # iteration times
  32. for i in range(env.x_range):
  33. for j in range(env.y_range):
  34. if (i, j) not in self.obs:
  35. value_table[(i, j)] = 0 # initialize value table for feasible states
  36. while delta > self.e: # converged condition
  37. count += 1
  38. x_value = 0
  39. for x in value_table:
  40. if x not in self.xG:
  41. value_list = []
  42. for u in self.u_set:
  43. [x_next, p_next] = motion_model.move_prob(x, u, self.obs) # recall motion model
  44. value_list.append(self.cal_Q_value(x_next, p_next, value_table)) # cal Q value
  45. policy[x] = self.u_set[int(np.argmax(value_list))] # update policy
  46. v_diff = abs(value_table[x] - max(value_list)) # maximum difference
  47. value_table[x] = max(value_list) # update value table
  48. if v_diff > 0:
  49. x_value = max(x_value, v_diff)
  50. delta = x_value # update delta
  51. diff.append(delta)
  52. self.message(count) # print key parameters
  53. return value_table, policy, diff
  54. def cal_Q_value(self, x, p, table):
  55. """
  56. cal Q_value.
  57. :param x: next state vector
  58. :param p: probability of each state
  59. :param table: value table
  60. :return: Q-value
  61. """
  62. value = 0
  63. reward = env.get_reward(x, self.xG, self.lose) # get reward of next state
  64. for i in range(len(x)):
  65. value += p[i] * (reward[i] + self.gamma * table[x[i]]) # cal Q-value
  66. return value
  67. def simulation(self, xI, xG, policy, diff):
  68. """
  69. simulate a path using converged policy.
  70. :param xI: starting state
  71. :param xG: goal state
  72. :param policy: converged policy
  73. :return: simulation path
  74. """
  75. # plt.figure(1) # path animation
  76. # tools.show_map(xI, xG, self.obs, self.lose, self.name1) # show background
  77. #
  78. # x, path = xI, []
  79. # while True:
  80. # u = policy[x]
  81. # x_next = (x[0] + u[0], x[1] + u[1])
  82. # if x_next in self.obs:
  83. # print("Collision!") # collision: simulation failed
  84. # else:
  85. # x = x_next
  86. # if x_next in xG: break
  87. # else:
  88. # tools.plot_dots(x) # each state in optimal path
  89. # path.append(x)
  90. # plt.pause(1)
  91. # plt.figure(2) # difference between two successive iteration
  92. # plt.plot(diff, color='#808080', marker='o')
  93. fig, ax = plt.subplots()
  94. ax.set_xlim(-5, 60)
  95. ax.set_ylim(-1, 9)
  96. plt.title(self.name2, fontdict=None)
  97. plt.xlabel('iterations')
  98. plt.ylabel('difference of successive iterations')
  99. plt.grid('on')
  100. count = 0
  101. for x in diff:
  102. plt.plot(count, x, color='#808080', marker='o') # plot dots for animation
  103. plt.gcf().canvas.mpl_connect('key_release_event',
  104. lambda event: [exit(0) if event.key == 'escape' else None])
  105. plt.pause(0.07)
  106. count += 1
  107. plt.plot(diff, color='#808080')
  108. plt.pause(0.01)
  109. plt.show()
  110. return
  111. def message(self, count):
  112. print("starting state: ", self.xI)
  113. print("goal states: ", self.xG)
  114. print("condition for convergence: ", self.e)
  115. print("discount factor: ", self.gamma)
  116. print("iteration times: ", count)
  117. if __name__ == '__main__':
  118. x_Start = (5, 5) # starting state
  119. x_Goal = [(49, 5), (49, 25)] # goal states
  120. VI = Value_iteration(x_Start, x_Goal)
  121. [value_VI, policy_VI, diff_VI] = VI.iteration()
  122. path_VI = VI.simulation(x_Start, x_Goal, policy_VI, diff_VI)