value_iteration.py 3.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113
  1. #!/usr/bin/env python3
  2. # -*- coding: utf-8 -*-
  3. """
  4. @author: huiming zhou
  5. """
  6. import env
  7. import tools
  8. import motion_model
  9. import matplotlib.pyplot as plt
  10. import numpy as np
  11. import sys
  12. class Value_iteration:
  13. def __init__(self, x_start, x_goal):
  14. self.u_set = motion_model.motions # feasible input set
  15. self.xI, self.xG = x_start, x_goal
  16. self.e = 0.001
  17. self.gamma = 0.9
  18. self.obs = env.obs_map() # position of obstacles
  19. self.lose = env.lose_map()
  20. self.name1 = "value_iteration, e=" + str(self.e) + ", gamma=" + str(self.gamma)
  21. self.name2 = "convergence of error"
  22. def iteration(self):
  23. value_table = {}
  24. policy = {}
  25. diff = []
  26. delta = sys.maxsize
  27. for i in range(env.x_range):
  28. for j in range(env.y_range):
  29. if (i, j) not in self.obs:
  30. value_table[(i, j)] = 0
  31. while delta > self.e:
  32. x_value = 0
  33. for x in value_table:
  34. if x in self.xG: continue
  35. else:
  36. value_list = []
  37. for u in self.u_set:
  38. [x_next, p_next] = motion_model.move_prob(x, u, self.obs)
  39. value_list.append(self.cal_Q_value(x_next, p_next, value_table))
  40. policy[x] = self.u_set[int(np.argmax(value_list))]
  41. v_diff = abs(value_table[x] - max(value_list))
  42. value_table[x] = max(value_list)
  43. if v_diff > 0:
  44. x_value = max(x_value, v_diff)
  45. delta = x_value
  46. diff.append(delta)
  47. return value_table, policy, diff
  48. def simulation(self, xI, xG, policy):
  49. path = []
  50. x = xI
  51. while x not in xG:
  52. u = policy[x]
  53. x_next = (x[0] + u[0], x[1] + u[1])
  54. if x_next not in self.obs:
  55. x = x_next
  56. path.append(x)
  57. path.pop()
  58. return path
  59. def animation(self, path, diff):
  60. plt.figure(1)
  61. tools.show_map(self.xI, self.xG, self.obs, self.lose, self.name1)
  62. for x in path:
  63. tools.plot_dots(x)
  64. plt.show()
  65. plt.figure(2)
  66. plt.plot(diff, color='#808080', marker='o')
  67. plt.title(self.name2, fontdict=None)
  68. plt.xlabel('iterations')
  69. plt.grid('on')
  70. plt.show()
  71. def cal_Q_value(self, x, p, table):
  72. value = 0
  73. reward = self.get_reward(x)
  74. for i in range(len(x)):
  75. value += p[i] * (reward[i] + self.gamma * table[x[i]])
  76. return value
  77. def get_reward(self, x_next):
  78. reward = []
  79. for x in x_next:
  80. if x in self.xG:
  81. reward.append(10)
  82. elif x in self.lose:
  83. reward.append(-10)
  84. else:
  85. reward.append(0)
  86. return reward
  87. if __name__ == '__main__':
  88. x_Start = (5, 5)
  89. x_Goal = [(49, 5), (49, 25)]
  90. VI = Value_iteration(x_Start, x_Goal)
  91. [value_VI, policy_VI, diff_VI] = VI.iteration()
  92. path_VI = VI.simulation(x_Start, x_Goal, policy_VI)
  93. VI.animation(path_VI, diff_VI)