Q-learning.py 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172
  1. #!/usr/bin/env python3
  2. # -*- coding: utf-8 -*-
  3. """
  4. @author: huiming zhou
  5. """
  6. import env
  7. import tools
  8. import motion_model
  9. import matplotlib.pyplot as plt
  10. import numpy as np
  11. import sys
  12. class QLEARNING:
  13. def __init__(self, x_start, x_goal):
  14. self.u_set = motion_model.motions # feasible input set
  15. self.xI, self.xG = x_start, x_goal
  16. self.M = 500 # iteration numbers
  17. self.gamma = 0.9 # discount factor
  18. self.alpha = 0.5
  19. self.epsilon = 0.1 # epsilon error
  20. self.obs = env.obs_map() # position of obstacles
  21. self.lose = env.lose_map() # position of lose states
  22. self.name1 = "Qlearning, M=" + str(self.M)
  23. def Monte_Carlo(self):
  24. """
  25. Monte_Carlo experiments
  26. :return: Q_table, policy
  27. """
  28. Q_table = self.table_init() # Q_table initialization
  29. policy = {} # policy table
  30. for k in range(self.M): # iterations
  31. x = self.state_init() # initial state
  32. while x != self.xG: # stop condition
  33. u = self.epsilon_greedy(int(np.argmax(Q_table[x])), self.epsilon) # epsilon_greedy policy
  34. x_next = self.move_next(x, self.u_set[u]) # next state
  35. reward = env.get_reward(x_next, self.lose) # reward observed
  36. Q_table[x][u] = (1 - self.alpha) * Q_table[x][u] + \
  37. self.alpha * (reward + self.gamma * max(Q_table[x_next]))
  38. x = x_next
  39. for x in Q_table:
  40. policy[x] = int(np.argmax(Q_table[x])) # extract policy
  41. return Q_table, policy
  42. def table_init(self):
  43. """
  44. Initialize Q_table: Q(s, a)
  45. :return: Q_table
  46. """
  47. Q_table = {}
  48. for i in range(env.x_range):
  49. for j in range(env.y_range):
  50. u = []
  51. if (i, j) not in self.obs:
  52. for k in range(len(self.u_set)):
  53. if (i, j) == self.xG:
  54. u.append(0)
  55. else:
  56. u.append(np.random.random_sample())
  57. Q_table[(i, j)] = u
  58. return Q_table
  59. def state_init(self):
  60. """
  61. initialize a starting state
  62. :return: starting state
  63. """
  64. while True:
  65. i = np.random.randint(0, env.x_range - 1)
  66. j = np.random.randint(0, env.y_range - 1)
  67. if (i, j) not in self.obs:
  68. return (i, j)
  69. def epsilon_greedy(self, u, error):
  70. """
  71. generate a policy using epsilon_greedy algorithm
  72. :param u: original input
  73. :param error: epsilon value
  74. :return: epsilon policy
  75. """
  76. if np.random.random_sample() < 3 / 4 * error:
  77. u_e = u
  78. while u_e == u:
  79. p = np.random.random_sample()
  80. if p < 0.25: u_e = 0
  81. elif p < 0.5: u_e = 1
  82. elif p < 0.75: u_e = 2
  83. else: u_e = 3
  84. return u_e
  85. return u
  86. def move_next(self, x, u):
  87. """
  88. get next state.
  89. :param x: current state
  90. :param u: input
  91. :return: next state
  92. """
  93. x_next = (x[0] + u[0], x[1] + u[1])
  94. if x_next in self.obs:
  95. return x
  96. return x_next
  97. def simulation(self, xI, xG, policy):
  98. """
  99. simulate a path using converged policy.
  100. :param xI: starting state
  101. :param xG: goal state
  102. :param policy: converged policy
  103. :return: simulation path
  104. """
  105. plt.figure(1) # path animation
  106. tools.show_map(xI, xG, self.obs, self.lose, self.name1) # show background
  107. x, path = xI, []
  108. while True:
  109. u = self.u_set[policy[x]]
  110. x_next = (x[0] + u[0], x[1] + u[1])
  111. if x_next in self.obs:
  112. print("Collision!") # collision: simulation failed
  113. else:
  114. x = x_next
  115. if x_next == xG:
  116. break
  117. else:
  118. tools.plot_dots(x) # each state in optimal path
  119. path.append(x)
  120. plt.show()
  121. self.message()
  122. return path
  123. def message(self):
  124. print("starting state: ", self.xI)
  125. print("goal state: ", self.xG)
  126. print("iteration numbers: ", self.M)
  127. print("discount factor: ", self.gamma)
  128. print("epsilon error: ", self.epsilon)
  129. print("alpha: ", self.alpha)
  130. if __name__ == '__main__':
  131. x_Start = (1, 1)
  132. x_Goal = (12, 1)
  133. Q_CALL = QLEARNING(x_Start, x_Goal)
  134. [value_SARSA, policy_SARSA] = Q_CALL.Monte_Carlo()
  135. path_VI = Q_CALL.simulation(x_Start, x_Goal, policy_SARSA)