Q-learning.py 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173
  1. #!/usr/bin/env python3
  2. # -*- coding: utf-8 -*-
  3. """
  4. @author: huiming zhou
  5. """
  6. import env
  7. import plotting
  8. import motion_model
  9. import matplotlib.pyplot as plt
  10. import numpy as np
  11. import sys
  12. class QLEARNING:
  13. def __init__(self, x_start, x_goal):
  14. self.xI, self.xG = x_start, x_goal
  15. self.M = 500 # iteration numbers
  16. self.gamma = 0.9 # discount factor
  17. self.alpha = 0.5
  18. self.epsilon = 0.1
  19. self.env = env.Env(self.xI, self.xG)
  20. self.motion = motion_model.Motion_model(self.xI, self.xG)
  21. self.plotting = plotting.Plotting(self.xI, self.xG)
  22. self.u_set = self.env.motions # feasible input set
  23. self.stateSpace = self.env.stateSpace # state space
  24. self.obs = self.env.obs_map() # position of obstacles
  25. self.lose = self.env.lose_map() # position of lose states
  26. self.name1 = "SARSA, M=" + str(self.M)
  27. [self.value, self.policy] = self.Monte_Carlo(self.xI, self.xG)
  28. self.path = self.extract_path(self.xI, self.xG, self.policy)
  29. self.plotting.animation(self.path, self.name1)
  30. def Monte_Carlo(self, xI, xG):
  31. """
  32. Monte_Carlo experiments
  33. :return: Q_table, policy
  34. """
  35. Q_table = self.table_init() # Q_table initialization
  36. policy = {} # policy table
  37. for k in range(self.M): # iterations
  38. x = self.state_init() # initial state
  39. while x != xG: # stop condition
  40. u = self.epsilon_greedy(int(np.argmax(Q_table[x])), self.epsilon) # epsilon_greedy policy
  41. x_next = self.move_next(x, self.u_set[u]) # next state
  42. reward = self.env.get_reward(x_next) # reward observed
  43. Q_table[x][u] = (1 - self.alpha) * Q_table[x][u] + \
  44. self.alpha * (reward + self.gamma * max(Q_table[x_next]))
  45. x = x_next
  46. for x in Q_table:
  47. policy[x] = int(np.argmax(Q_table[x])) # extract policy
  48. return Q_table, policy
  49. def table_init(self):
  50. """
  51. Initialize Q_table: Q(s, a)
  52. :return: Q_table
  53. """
  54. Q_table = {}
  55. for x in self.stateSpace:
  56. u = []
  57. if x not in self.obs:
  58. for k in range(len(self.u_set)):
  59. if x == self.xG:
  60. u.append(0)
  61. else:
  62. u.append(np.random.random_sample())
  63. Q_table[x] = u
  64. return Q_table
  65. def state_init(self):
  66. """
  67. initialize a starting state
  68. :return: starting state
  69. """
  70. while True:
  71. i = np.random.randint(0, self.env.x_range - 1)
  72. j = np.random.randint(0, self.env.y_range - 1)
  73. if (i, j) not in self.obs:
  74. return (i, j)
  75. def epsilon_greedy(self, u, error):
  76. """
  77. generate a policy using epsilon_greedy algorithm
  78. :param u: original input
  79. :param error: epsilon value
  80. :return: epsilon policy
  81. """
  82. if np.random.random_sample() < 3 / 4 * error:
  83. u_e = u
  84. while u_e == u:
  85. p = np.random.random_sample()
  86. if p < 0.25: u_e = 0
  87. elif p < 0.5: u_e = 1
  88. elif p < 0.75: u_e = 2
  89. else: u_e = 3
  90. return u_e
  91. return u
  92. def move_next(self, x, u):
  93. """
  94. get next state.
  95. :param x: current state
  96. :param u: input
  97. :return: next state
  98. """
  99. x_next = (x[0] + u[0], x[1] + u[1])
  100. if x_next in self.obs:
  101. return x
  102. return x_next
  103. def extract_path(self, xI, xG, policy):
  104. """
  105. extract path from converged policy.
  106. :param xI: starting state
  107. :param xG: goal states
  108. :param policy: converged policy
  109. :return: path
  110. """
  111. x, path = xI, [xI]
  112. while x not in xG:
  113. u = self.u_set[policy[x]]
  114. x_next = (x[0] + u[0], x[1] + u[1])
  115. if x_next in self.obs:
  116. print("Collision! Please run again!")
  117. break
  118. else:
  119. path.append(x_next)
  120. x = x_next
  121. return path
  122. def message(self):
  123. """
  124. print important message.
  125. :param count: iteration numbers
  126. :return: print
  127. """
  128. print("starting state: ", self.xI)
  129. print("goal state: ", self.xG)
  130. print("iteration numbers: ", self.M)
  131. print("discount factor: ", self.gamma)
  132. print("epsilon error: ", self.epsilon)
  133. print("alpha: ", self.alpha)
  134. if __name__ == '__main__':
  135. x_Start = (1, 1)
  136. x_Goal = (12, 1)
  137. Q_CALL = QLEARNING(x_Start, x_Goal)