Q-value_iteration.py 3.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128
  1. import env
  2. import plotting
  3. import motion_model
  4. import numpy as np
  5. import sys
  6. class Q_value_iteration:
  7. def __init__(self, x_start, x_goal):
  8. self.xI, self.xG = x_start, x_goal
  9. self.e = 0.001 # threshold for convergence
  10. self.gamma = 0.9 # discount factor
  11. self.env = env.Env(self.xI, self.xG) # class Env
  12. self.motion = motion_model.Motion_model(self.xI, self.xG) # class Motion_model
  13. self.plotting = plotting.Plotting(self.xI, self.xG) # class Plotting
  14. self.u_set = self.env.motions # feasible input set
  15. self.stateSpace = self.env.stateSpace # state space
  16. self.obs = self.env.obs_map() # position of obstacles
  17. self.lose = self.env.lose_map() # position of lose states
  18. self.name1 = "Q-value_iteration, gamma=" + str(self.gamma)
  19. self.name2 = "converge process, e=" + str(self.e)
  20. [self.value, self.policy, self.diff] = self.iteration(self.xI, self.xG)
  21. self.path = self.extract_path(self.xI, self.xG, self.policy)
  22. self.plotting.animation(self.path, self.name1)
  23. self.plotting.plot_diff(self.diff, self.name2)
  24. def iteration(self, xI, xG):
  25. """
  26. Q_value_iteration
  27. :return: converged Q table and policy
  28. """
  29. Q_table = {}
  30. policy = {}
  31. diff = []
  32. delta = sys.maxsize
  33. count = 0
  34. for x in self.stateSpace:
  35. Q_table[x] = [0, 0, 0, 0] # initialize Q_table
  36. while delta > self.e: # convergence condition
  37. count += 1
  38. x_value = 0
  39. for x in self.stateSpace:
  40. if x not in x_Goal:
  41. for k in range(len(self.u_set)):
  42. [x_next, p_next] = self.motion.move_next(x, self.u_set[k])
  43. Q_value = self.cal_Q_value(x_next, p_next, Q_table)
  44. v_diff = abs(Q_table[x][k] - Q_value)
  45. Q_table[x][k] = Q_value
  46. if v_diff > 0:
  47. x_value = max(x_value, v_diff)
  48. diff.append(x_value)
  49. delta = x_value
  50. for x in self.stateSpace:
  51. if x not in xG:
  52. policy[x] = np.argmax(Q_table[x])
  53. self.message(count)
  54. return Q_table, policy, diff
  55. def cal_Q_value(self, x, p, table):
  56. """
  57. cal Q_value.
  58. :param x: next state vector
  59. :param p: probability of each state
  60. :param table: value table
  61. :return: Q-value
  62. """
  63. value = 0
  64. reward = self.env.get_reward(x) # get reward of next state
  65. for i in range(len(x)):
  66. value += p[i] * (reward[i] + self.gamma * max(table[x[i]]))
  67. return value
  68. def extract_path(self, xI, xG, policy):
  69. """
  70. extract path from converged policy.
  71. :param xI: starting state
  72. :param xG: goal states
  73. :param policy: converged policy
  74. :return: path
  75. """
  76. x, path = xI, [xI]
  77. while x not in xG:
  78. u = self.u_set[policy[x]]
  79. x_next = (x[0] + u[0], x[1] + u[1])
  80. if x_next in self.obs:
  81. print("Collision! Please run again!")
  82. break
  83. else:
  84. path.append(x_next)
  85. x = x_next
  86. return path
  87. def message(self, count):
  88. """
  89. print important message.
  90. :param count: iteration numbers
  91. :return: print
  92. """
  93. print("starting state: ", self.xI)
  94. print("goal states: ", self.xG)
  95. print("condition for convergence: ", self.e)
  96. print("discount factor: ", self.gamma)
  97. print("iteration times: ", count)
  98. if __name__ == '__main__':
  99. x_Start = (5, 5)
  100. x_Goal = [(49, 5), (49, 25)]
  101. QVI = Q_value_iteration(x_Start, x_Goal)