LRT_Astar3D.py 3.5 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495
  1. # this is the three dimensional LRTA* algo
  2. # !/usr/bin/env python3
  3. # -*- coding: utf-8 -*-
  4. """
  5. @author: yue qi
  6. """
  7. import numpy as np
  8. import matplotlib.pyplot as plt
  9. import os
  10. import sys
  11. sys.path.append(os.path.dirname(os.path.abspath(__file__)) + "/../../Search-based Planning/")
  12. from Search_3D.env3D import env
  13. from Search_3D.utils3D import getAABB, getDist, getRay, StateSpace, Heuristic, getNearest, isCollide, hash3D, dehash, cost
  14. from Search_3D.plot_util3D import visualization
  15. import queue
  16. class LRT_A_star(object):
  17. def __init__(self,resolution=0.5):
  18. self.Alldirec = np.array([[1 ,0,0],[0,1 ,0],[0,0, 1],[1 ,1 ,0],[1 ,0,1 ],[0, 1, 1],[ 1, 1, 1],\
  19. [-1,0,0],[0,-1,0],[0,0,-1],[-1,-1,0],[-1,0,-1],[0,-1,-1],[-1,-1,-1],\
  20. [1,-1,0],[-1,1,0],[1,0,-1],[-1,0, 1],[0,1, -1],[0, -1,1],\
  21. [1,-1,-1],[-1,1,-1],[-1,-1,1],[1,1,-1],[1,-1,1],[-1,1,1]])
  22. self.env = env(resolution = resolution)
  23. self.Space = StateSpace(self)
  24. self.start, self.goal = getNearest(self.Space,self.env.start), getNearest(self.Space,self.env.goal)
  25. self.AABB = getAABB(self.env.blocks)
  26. self.Space[hash3D(getNearest(self.Space,self.start))] = 0
  27. self.OPEN = queue.QueuePrior()
  28. self.h = Heuristic(self.Space,self.goal) # 1. initialize heuristic h = h0
  29. self.Child = {}
  30. self.CLOSED = set()
  31. self.V = []
  32. self.done = False
  33. self.Path = []
  34. def children(self,x):
  35. allchild = []
  36. for j in self.Alldirec:
  37. collide,child = isCollide(self,x,j)
  38. if not collide:
  39. allchild.append(child)
  40. return allchild
  41. def step(self, xi, strxi):
  42. childs = self.children(xi) # 4. generate depth 1 neighborhood S(s,1) = {s' in S | norm(s,s') = 1}
  43. fvals = [cost(xi,i) + self.h[hash3D(i)] for i in childs]
  44. xj , fmin = childs[np.argmin(fvals)], min(fvals) # 5. compute h'(s) = min(dist(s,s') + h(s'))
  45. strxj = hash3D(xj)
  46. # add the child of xi
  47. self.Child[strxi] = xj
  48. if fmin >= self.h[strxi]: # 6. if h'(s) > h(s) then update h(s) = h'(s)
  49. self.h[strxi] = fmin
  50. # TODO: action to move to xj
  51. self.OPEN.put(strxj, self.h[strxj]) # 7. update current state s = argmin (dist(s,s') + h(s'))
  52. def run(self):
  53. x0 = hash3D(self.start)
  54. xt = hash3D(self.goal)
  55. self.OPEN.put(x0, self.Space[x0] + self.h[x0]) # 2. reset the current state
  56. self.ind = 0
  57. while xt not in self.CLOSED and self.OPEN: # 3. while s not in Sg do
  58. strxi = self.OPEN.get()
  59. xi = dehash(strxi)
  60. self.CLOSED.add(strxi)
  61. self.V.append(xi)
  62. visualization(self)
  63. self.step(xi , strxi)
  64. if self.ind % 100 == 0: print('iteration number = '+ str(self.ind))
  65. self.ind += 1
  66. self.done = True
  67. self.Path = self.path()
  68. visualization(self)
  69. plt.show()
  70. def path(self):
  71. # this is a suboptimal path.
  72. path = []
  73. strgoal = hash3D(self.goal)
  74. strx = hash3D(self.start)
  75. ind = 0
  76. while strx != strgoal:
  77. path.append([dehash(strx),self.Child[strx]])
  78. strx = hash3D(self.Child[strx])
  79. ind += 1
  80. if ind == 1000:
  81. return np.flip(path,axis=0)
  82. path = np.flip(path,axis=0)
  83. return path
  84. if __name__ == '__main__':
  85. Astar = LRT_A_star(0.5)
  86. Astar.run()