value_iteration.py 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135
  1. #!/usr/bin/env python3
  2. # -*- coding: utf-8 -*-
  3. """
  4. @author: huiming zhou
  5. """
  6. import env
  7. import plotting
  8. import motion_model
  9. import numpy as np
  10. import sys
  11. class Value_iteration:
  12. def __init__(self, x_start, x_goal):
  13. self.xI, self.xG = x_start, x_goal
  14. self.e = 0.001 # threshold for convergence
  15. self.gamma = 0.9 # discount factor
  16. self.env = env.Env(self.xI, self.xG) # class Env
  17. self.motion = motion_model.Motion_model(self.xI, self.xG) # class Motion_model
  18. self.plotting = plotting.Plotting(self.xI, self.xG) # class Plotting
  19. self.u_set = self.env.motions # feasible input set
  20. self.stateSpace = self.env.stateSpace # state space
  21. self.obs = self.env.obs_map() # position of obstacles
  22. self.lose = self.env.lose_map() # position of lose states
  23. self.name1 = "value_iteration, gamma=" + str(self.gamma)
  24. self.name2 = "converge process, e=" + str(self.e)
  25. [self.value, self.policy, self.diff] = self.iteration(self.xI, self.xG)
  26. self.path = self.extract_path(self.xI, self.xG, self.policy)
  27. self.plotting.animation(self.path, self.name1)
  28. self.plotting.plot_diff(self.diff, self.name2)
  29. def iteration(self, xI, xG):
  30. """
  31. value_iteration.
  32. :return: converged value table, optimal policy and variation of difference,
  33. """
  34. value_table = {} # value table
  35. policy = {} # policy
  36. diff = [] # maximum difference between two successive iteration
  37. delta = sys.maxsize # initialize maximum difference
  38. count = 0 # iteration times
  39. for x in self.stateSpace: # initialize value table for feasible states
  40. value_table[x] = 0
  41. while delta > self.e: # converged condition
  42. count += 1
  43. x_value = 0
  44. for x in self.stateSpace:
  45. if x not in xG:
  46. value_list = []
  47. for u in self.u_set:
  48. [x_next, p_next] = self.motion.move_next(x, u) # recall motion model
  49. value_list.append(self.cal_Q_value(x_next, p_next, value_table)) # cal Q value
  50. policy[x] = self.u_set[int(np.argmax(value_list))] # update policy
  51. v_diff = abs(value_table[x] - max(value_list)) # maximum difference
  52. value_table[x] = max(value_list) # update value table
  53. x_value = max(x_value, v_diff)
  54. delta = x_value # update delta
  55. diff.append(delta)
  56. self.message(count) # print messages
  57. return value_table, policy, diff
  58. def cal_Q_value(self, x, p, table):
  59. """
  60. cal Q_value.
  61. :param x: next state vector
  62. :param p: probability of each state
  63. :param table: value table
  64. :return: Q-value
  65. """
  66. value = 0
  67. reward = self.env.get_reward(x) # get reward of next state
  68. for i in range(len(x)):
  69. value += p[i] * (reward[i] + self.gamma * table[x[i]]) # cal Q-value
  70. return value
  71. def extract_path(self, xI, xG, policy):
  72. """
  73. extract path from converged policy.
  74. :param xI: starting state
  75. :param xG: goal states
  76. :param policy: converged policy
  77. :return: path
  78. """
  79. x, path = xI, [xI]
  80. while x not in xG:
  81. u = policy[x]
  82. x_next = (x[0] + u[0], x[1] + u[1])
  83. if x_next in self.obs:
  84. print("Collision! Please run again!")
  85. break
  86. else:
  87. path.append(x_next)
  88. x = x_next
  89. return path
  90. def message(self, count):
  91. """
  92. print important message.
  93. :param count: iteration numbers
  94. :return: print
  95. """
  96. print("starting state: ", self.xI)
  97. print("goal states: ", self.xG)
  98. print("condition for convergence: ", self.e)
  99. print("discount factor: ", self.gamma)
  100. print("iteration times: ", count)
  101. if __name__ == '__main__':
  102. x_Start = (5, 5) # starting state
  103. x_Goal = [(49, 5), (49, 25)] # goal states
  104. VI = Value_iteration(x_Start, x_Goal)