text stringlengths 9 39.2M | dir stringlengths 26 295 | lang stringclasses 185
values | created_date timestamp[us] | updated_date timestamp[us] | repo_name stringlengths 1 97 | repo_full_name stringlengths 7 106 | star int64 1k 183k | len_tokens int64 1 13.8M |
|---|---|---|---|---|---|---|---|---|
```python
import conftest # Add root path to sys.path
from PathPlanning.PotentialFieldPlanning import potential_field_planning as m
def test1():
m.show_animation = False
m.main()
if __name__ == '__main__':
conftest.run_this_test(__file__)
``` | /content/code_sandbox/tests/test_potential_field_planning.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 63 |
```python
import conftest
from SLAM.FastSLAM2 import fast_slam2 as m
def test1():
m.show_animation = False
m.SIM_TIME = 3.0
m.main()
if __name__ == '__main__':
conftest.run_this_test(__file__)
``` | /content/code_sandbox/tests/test_fast_slam2.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 66 |
```python
import conftest
from Localization.extended_kalman_filter import extended_kalman_filter as m
def test_1():
m.show_animation = False
m.main()
if __name__ == '__main__':
conftest.run_this_test(__file__)
``` | /content/code_sandbox/tests/test_extended_kalman_filter.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 57 |
```python
import conftest # Add root path to sys.path
from ArmNavigation.n_joint_arm_to_point_control\
import n_joint_arm_to_point_control as m
import random
random.seed(12345)
def test1():
m.show_animation = False
m.animation()
if __name__ == '__main__':
conftest.run_this_test(__file__)
``` | /content/code_sandbox/tests/test_n_joint_arm_to_point_control.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 77 |
```python
import conftest
import PathPlanning.FlowField.flowfield as flow_field
def test():
flow_field.show_animation = False
flow_field.main()
if __name__ == '__main__':
conftest.run_this_test(__file__)
``` | /content/code_sandbox/tests/test_flow_field.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 51 |
```python
import conftest # Add root path to sys.path
from Mapping.normal_vector_estimation import normal_vector_estimation as m
import random
random.seed(12345)
def test_1():
m.show_animation = False
m.main1()
def test_2():
m.show_animation = False
m.main2()
if __name__ == '__main__':
conftest.run_this_test(__file__)
``` | /content/code_sandbox/tests/test_normal_vector_estimation.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 89 |
```python
import conftest # Add root path to sys.path
from PathPlanning.VoronoiRoadMap import voronoi_road_map as m
def test1():
m.show_animation = False
m.main()
if __name__ == '__main__':
conftest.run_this_test(__file__)
``` | /content/code_sandbox/tests/test_visibility_road_map_planner.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 65 |
```yaml
theme: jekyll-theme-slate
show_downloads: true
``` | /content/code_sandbox/_config.yml | yaml | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 15 |
```python
import conftest # Add root path to sys.path
import numpy as np
from PathPlanning.ProbabilisticRoadMap import probabilistic_road_map
def test1():
probabilistic_road_map.show_animation = False
probabilistic_road_map.main(rng=np.random.default_rng(1233))
if __name__ == '__main__':
conftest.run_this_test(__file__)
``` | /content/code_sandbox/tests/test_probabilistic_road_map.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 84 |
```python
import conftest
from PathPlanning.GreedyBestFirstSearch import greedy_best_first_search as m
def test_1():
m.show_animation = False
m.main()
if __name__ == '__main__':
conftest.run_this_test(__file__)
``` | /content/code_sandbox/tests/test_greedy_best_first_search.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 57 |
```python
import conftest # Add root path to sys.path
from PathPlanning.StateLatticePlanner import state_lattice_planner as m
def test1():
m.show_animation = False
m.main()
if __name__ == '__main__':
conftest.run_this_test(__file__)
``` | /content/code_sandbox/tests/test_state_lattice_planner.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 64 |
```python
import conftest
from PathPlanning.ClosedLoopRRTStar import closed_loop_rrt_star_car as m
import random
def test_1():
random.seed(12345)
m.show_animation = False
m.main(gx=1.0, gy=0.0, gyaw=0.0, max_iter=5)
if __name__ == '__main__':
conftest.run_this_test(__file__)
``` | /content/code_sandbox/tests/test_closed_loop_rrt_star_car.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 92 |
```python
import conftest # Add root path to sys.path
from PathPlanning.RRTStarReedsShepp import rrt_star_reeds_shepp as m
def test1():
m.show_animation = False
m.main(max_iter=5)
obstacleList = [
(5, 5, 1),
(4, 6, 1),
(4, 8, 1),
(4, 10, 1),
(6, 5, 1),
(7, 5, 1),
(8, 6, 1),
(8, 8, 1),
(8, 10, 1)
] # [x,y,size(radius)]
start = [0.0, 0.0, m.np.deg2rad(0.0)]
goal = [6.0, 7.0, m.np.deg2rad(90.0)]
def test2():
step_size = 0.2
rrt_star_reeds_shepp = m.RRTStarReedsShepp(start, goal,
obstacleList, [-2.0, 15.0],
max_iter=100, step_size=step_size)
rrt_star_reeds_shepp.set_random_seed(seed=8)
path = rrt_star_reeds_shepp.planning(animation=False)
for i in range(len(path)-1):
# + 0.00000000000001 for acceptable errors arising from the planning process
assert m.math.dist(path[i][0:2], path[i+1][0:2]) < step_size + 0.00000000000001
def test_too_big_step_size():
step_size = 20
rrt_star_reeds_shepp = m.RRTStarReedsShepp(start, goal,
obstacleList, [-2.0, 15.0],
max_iter=100, step_size=step_size)
rrt_star_reeds_shepp.set_random_seed(seed=8)
path = rrt_star_reeds_shepp.planning(animation=False)
assert path is None
if __name__ == '__main__':
conftest.run_this_test(__file__)
``` | /content/code_sandbox/tests/test_rrt_star_reeds_shepp.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 481 |
```python
import conftest
from SLAM.EKFSLAM import ekf_slam as m
def test_1():
m.show_animation = False
m.main()
if __name__ == '__main__':
conftest.run_this_test(__file__)
``` | /content/code_sandbox/tests/test_ekf_slam.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 55 |
```python
import conftest
from PathPlanning.Eta3SplinePath import eta3_spline_path as m
def test_1():
m.show_animation = False
m.main()
if __name__ == '__main__':
conftest.run_this_test(__file__)
``` | /content/code_sandbox/tests/test_eta3_spline_path.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 58 |
```python
import conftest
from PathPlanning.Dijkstra import dijkstra as m
def test_1():
m.show_animation = False
m.main()
if __name__ == '__main__':
conftest.run_this_test(__file__)
``` | /content/code_sandbox/tests/test_dijkstra.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 52 |
```python
import conftest # Add root path to sys.path
from Mapping.rectangle_fitting import rectangle_fitting as m
def test1():
m.show_animation = False
m.main()
if __name__ == '__main__':
conftest.run_this_test(__file__)
``` | /content/code_sandbox/tests/test_rectangle_fitting.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 59 |
```python
import conftest # Add root path to sys.path
import numpy as np
from numpy.testing import suppress_warnings
from AerialNavigation.rocket_powered_landing import rocket_powered_landing as m
def test1():
m.show_animation = False
with suppress_warnings() as sup:
sup.filter(UserWarning,
"You are solving a parameterized problem that is not DPP"
)
sup.filter(UserWarning,
"Solution may be inaccurate")
m.main(rng=np.random.default_rng(1234))
if __name__ == '__main__':
conftest.run_this_test(__file__)
``` | /content/code_sandbox/tests/test_rocket_powered_landing.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 130 |
```python
import conftest
from PathPlanning.DStar import dstar as m
def test_1():
m.show_animation = False
m.main()
if __name__ == '__main__':
conftest.run_this_test(__file__)
``` | /content/code_sandbox/tests/test_dstar.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 51 |
```python
import conftest # Add root path to sys.path
from utils import angle
from numpy.testing import assert_allclose
import numpy as np
def test_rot_mat_2d():
assert_allclose(angle.rot_mat_2d(0.0),
np.array([[1., 0.],
[0., 1.]]))
def test_angle_mod():
assert_allclose(angle.angle_mod(-4.0), 2.28318531)
assert(isinstance(angle.angle_mod(-4.0), float))
assert_allclose(angle.angle_mod([-4.0]), [2.28318531])
assert(isinstance(angle.angle_mod([-4.0]), np.ndarray))
assert_allclose(angle.angle_mod([-150.0, 190.0, 350], degree=True),
[-150., -170., -10.])
assert_allclose(angle.angle_mod(-60.0, zero_2_2pi=True, degree=True),
[300.])
if __name__ == '__main__':
conftest.run_this_test(__file__)
``` | /content/code_sandbox/tests/test_utils.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 224 |
```python
"""Path hack to make tests work."""
import sys
import os
import pytest
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(TEST_DIR) # to import this file from test code.
ROOT_DIR = os.path.dirname(TEST_DIR)
sys.path.append(ROOT_DIR)
def run_this_test(file):
pytest.main([os.path.abspath(file)])
``` | /content/code_sandbox/tests/conftest.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 76 |
```python
import conftest # Add root path to sys.path
from PathTracking.lqr_steer_control import lqr_steer_control as m
def test1():
m.show_animation = False
m.main()
if __name__ == '__main__':
conftest.run_this_test(__file__)
``` | /content/code_sandbox/tests/test_lqr_steer_control.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 64 |
```python
import conftest # Add root path to sys.path
import os
import matplotlib.pyplot as plt
from PathPlanning.SpiralSpanningTreeCPP \
import spiral_spanning_tree_coverage_path_planner
spiral_spanning_tree_coverage_path_planner.do_animation = True
def spiral_stc_cpp(img, start):
num_free = 0
for i in range(img.shape[0]):
for j in range(img.shape[1]):
num_free += img[i][j]
STC_planner = spiral_spanning_tree_coverage_path_planner.\
SpiralSpanningTreeCoveragePlanner(img)
edge, route, path = STC_planner.plan(start)
covered_nodes = set()
for p, q in edge:
covered_nodes.add(p)
covered_nodes.add(q)
# assert complete coverage
assert len(covered_nodes) == num_free / 4
def test_spiral_stc_cpp_1():
img_dir = os.path.dirname(
os.path.abspath(__file__)) + \
"/../PathPlanning/SpiralSpanningTreeCPP"
img = plt.imread(os.path.join(img_dir, 'map', 'test.png'))
start = (0, 0)
spiral_stc_cpp(img, start)
def test_spiral_stc_cpp_2():
img_dir = os.path.dirname(
os.path.abspath(__file__)) + \
"/../PathPlanning/SpiralSpanningTreeCPP"
img = plt.imread(os.path.join(img_dir, 'map', 'test_2.png'))
start = (10, 0)
spiral_stc_cpp(img, start)
def test_spiral_stc_cpp_3():
img_dir = os.path.dirname(
os.path.abspath(__file__)) + \
"/../PathPlanning/SpiralSpanningTreeCPP"
img = plt.imread(os.path.join(img_dir, 'map', 'test_3.png'))
start = (0, 0)
spiral_stc_cpp(img, start)
if __name__ == '__main__':
conftest.run_this_test(__file__)
``` | /content/code_sandbox/tests/test_spiral_spanning_tree_coverage_path_planner.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 443 |
```python
import conftest
from PathPlanning.HybridAStar import hybrid_a_star as m
def test1():
m.show_animation = False
m.main()
if __name__ == '__main__':
conftest.run_this_test(__file__)
``` | /content/code_sandbox/tests/test_hybrid_a_star.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 53 |
```python
import conftest # Add root path to sys.path
from PathPlanning.LQRRRTStar import lqr_rrt_star as m
import random
random.seed(12345)
def test1():
m.show_animation = False
m.main(maxIter=5)
if __name__ == '__main__':
conftest.run_this_test(__file__)
``` | /content/code_sandbox/tests/test_lqr_rrt_star.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 77 |
```python
import conftest
from Mapping.kmeans_clustering import kmeans_clustering as m
def test_1():
m.show_animation = False
m.main()
if __name__ == '__main__':
conftest.run_this_test(__file__)
``` | /content/code_sandbox/tests/test_kmeans_clustering.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 54 |
```python
import conftest
from SLAM.iterative_closest_point import iterative_closest_point as m
def test_1():
m.show_animation = False
m.main()
def test_2():
m.show_animation = False
m.main_3d_points()
if __name__ == '__main__':
conftest.run_this_test(__file__)
``` | /content/code_sandbox/tests/test_iterative_closest_point.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 76 |
```python
import conftest
from PathPlanning.InformedRRTStar import informed_rrt_star as m
def test1():
m.show_animation = False
m.main()
if __name__ == '__main__':
conftest.run_this_test(__file__)
``` | /content/code_sandbox/tests/test_informed_rrt_star.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 55 |
```python
import conftest
import numpy as np
import pytest
from PathPlanning.BSplinePath import bspline_path
def test_list_input():
way_point_x = [-1.0, 3.0, 4.0, 2.0, 1.0]
way_point_y = [0.0, -3.0, 1.0, 1.0, 3.0]
n_course_point = 50 # sampling number
rax, ray, heading, curvature = bspline_path.approximate_b_spline_path(
way_point_x, way_point_y, n_course_point, s=0.5)
assert len(rax) == len(ray) == len(heading) == len(curvature)
rix, riy, heading, curvature = bspline_path.interpolate_b_spline_path(
way_point_x, way_point_y, n_course_point)
assert len(rix) == len(riy) == len(heading) == len(curvature)
def test_array_input():
way_point_x = np.array([-1.0, 3.0, 4.0, 2.0, 1.0])
way_point_y = np.array([0.0, -3.0, 1.0, 1.0, 3.0])
n_course_point = 50 # sampling number
rax, ray, heading, curvature = bspline_path.approximate_b_spline_path(
way_point_x, way_point_y, n_course_point, s=0.5)
assert len(rax) == len(ray) == len(heading) == len(curvature)
rix, riy, heading, curvature = bspline_path.interpolate_b_spline_path(
way_point_x, way_point_y, n_course_point)
assert len(rix) == len(riy) == len(heading) == len(curvature)
def test_degree_change():
way_point_x = np.array([-1.0, 3.0, 4.0, 2.0, 1.0])
way_point_y = np.array([0.0, -3.0, 1.0, 1.0, 3.0])
n_course_point = 50 # sampling number
rax, ray, heading, curvature = bspline_path.approximate_b_spline_path(
way_point_x, way_point_y, n_course_point, s=0.5, degree=4)
assert len(rax) == len(ray) == len(heading) == len(curvature)
rix, riy, heading, curvature = bspline_path.interpolate_b_spline_path(
way_point_x, way_point_y, n_course_point, degree=4)
assert len(rix) == len(riy) == len(heading) == len(curvature)
rax, ray, heading, curvature = bspline_path.approximate_b_spline_path(
way_point_x, way_point_y, n_course_point, s=0.5, degree=2)
assert len(rax) == len(ray) == len(heading) == len(curvature)
rix, riy, heading, curvature = bspline_path.interpolate_b_spline_path(
way_point_x, way_point_y, n_course_point, degree=2)
assert len(rix) == len(riy) == len(heading) == len(curvature)
with pytest.raises(ValueError):
bspline_path.approximate_b_spline_path(
way_point_x, way_point_y, n_course_point, s=0.5, degree=1)
with pytest.raises(ValueError):
bspline_path.interpolate_b_spline_path(
way_point_x, way_point_y, n_course_point, degree=1)
if __name__ == '__main__':
conftest.run_this_test(__file__)
``` | /content/code_sandbox/tests/test_bspline_path.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 833 |
```python
import conftest # Add root path to sys.path
from Mapping.raycasting_grid_map import raycasting_grid_map as m
def test1():
m.show_animation = False
m.main()
if __name__ == '__main__':
conftest.run_this_test(__file__)
``` | /content/code_sandbox/tests/test_raycasting_grid_map.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 62 |
```python
import conftest
from SLAM.FastSLAM1 import fast_slam1 as m
def test1():
m.show_animation = False
m.SIM_TIME = 3.0
m.main()
if __name__ == '__main__':
conftest.run_this_test(__file__)
``` | /content/code_sandbox/tests/test_fast_slam1.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 66 |
```python
import conftest # Add root path to sys.path
from PathPlanning.RRTStarDubins import rrt_star_dubins as m
def test1():
m.show_animation = False
m.main()
if __name__ == '__main__':
conftest.run_this_test(__file__)
``` | /content/code_sandbox/tests/test_rrt_star_dubins.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 65 |
```python
import conftest
import random
from PathPlanning.RRT import rrt as m
from PathPlanning.RRT import rrt_with_pathsmoothing as m1
random.seed(12345)
def test1():
m.show_animation = False
m.main(gx=1.0, gy=1.0)
def test2():
m1.show_animation = False
m1.main()
if __name__ == '__main__':
conftest.run_this_test(__file__)
``` | /content/code_sandbox/tests/test_rrt.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 104 |
```python
import PathPlanning.AStar.a_star_variants as a_star
import conftest
def test_1():
# A* with beam search
a_star.show_animation = False
a_star.use_beam_search = True
a_star.main()
reset_all()
# A* with iterative deepening
a_star.use_iterative_deepening = True
a_star.main()
reset_all()
# A* with dynamic weighting
a_star.use_dynamic_weighting = True
a_star.main()
reset_all()
# theta*
a_star.use_theta_star = True
a_star.main()
reset_all()
# A* with jump point
a_star.use_jump_point = True
a_star.main()
reset_all()
def reset_all():
a_star.show_animation = False
a_star.use_beam_search = False
a_star.use_iterative_deepening = False
a_star.use_dynamic_weighting = False
a_star.use_theta_star = False
a_star.use_jump_point = False
if __name__ == '__main__':
conftest.run_this_test(__file__)
``` | /content/code_sandbox/tests/test_a_star_variants.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 240 |
```python
import conftest
from Mapping.gaussian_grid_map import gaussian_grid_map as m
def test1():
m.show_animation = False
m.main()
if __name__ == '__main__':
conftest.run_this_test(__file__)
``` | /content/code_sandbox/tests/test_gaussian_grid_map.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 52 |
```python
import conftest # Add root path to sys.path
from Mapping.point_cloud_sampling import point_cloud_sampling as m
def test_1(capsys):
m.do_plot = False
m.main()
captured = capsys.readouterr()
assert "voxel_sampling_points.shape=(27, 3)" in captured.out
assert "farthest_sampling_points.shape=(20, 3)" in captured.out
assert "poisson_disk_points.shape=(20, 3)" in captured.out
if __name__ == '__main__':
conftest.run_this_test(__file__)
``` | /content/code_sandbox/tests/test_point_cloud_sampling.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 126 |
```python
import conftest
from PathPlanning.DepthFirstSearch import depth_first_search as m
def test_1():
m.show_animation = False
m.main()
if __name__ == '__main__':
conftest.run_this_test(__file__)
``` | /content/code_sandbox/tests/test_depth_first_search.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 53 |
```python
import conftest
from Localization.histogram_filter import histogram_filter as m
def test1():
m.show_animation = False
m.SIM_TIME = 1.0
m.main()
if __name__ == '__main__':
conftest.run_this_test(__file__)
``` | /content/code_sandbox/tests/test_histogram_filter.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 60 |
```python
import conftest
from SLAM.GraphBasedSLAM import graph_based_slam as m
def test_1():
m.show_animation = False
m.SIM_TIME = 20.0
m.main()
if __name__ == '__main__':
conftest.run_this_test(__file__)
``` | /content/code_sandbox/tests/test_graph_based_slam.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 66 |
```python
import conftest # Add root path to sys.path
from Control.move_to_pose import move_to_pose as m
def test_1():
"""
This unit test tests the move_to_pose_robot.py program
"""
m.show_animation = False
m.main()
if __name__ == '__main__':
conftest.run_this_test(__file__)
``` | /content/code_sandbox/tests/test_move_to_pose_robot.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 77 |
```python
import conftest # Add root path to sys.path
from PathTracking.model_predictive_speed_and_steer_control \
import model_predictive_speed_and_steer_control as m
def test_1():
m.show_animation = False
m.main()
def test_2():
m.show_animation = False
m.main2()
if __name__ == '__main__':
conftest.run_this_test(__file__)
``` | /content/code_sandbox/tests/test_model_predictive_speed_and_steer_control.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 90 |
```python
import conftest
import numpy as np
from PathPlanning.DynamicMovementPrimitives import \
dynamic_movement_primitives
def test_1():
# test that trajectory can be learned from user-passed data
T = 5
t = np.arange(0, T, 0.01)
sin_t = np.sin(t)
train_data = np.array([t, sin_t]).T
DMP_controller = dynamic_movement_primitives.DMP(train_data, T)
DMP_controller.recreate_trajectory(train_data[0], train_data[-1], 4)
def test_2():
# test that length of trajectory is equal to desired number of timesteps
T = 5
t = np.arange(0, T, 0.01)
sin_t = np.sin(t)
train_data = np.array([t, sin_t]).T
DMP_controller = dynamic_movement_primitives.DMP(train_data, T)
t, path = DMP_controller.recreate_trajectory(train_data[0],
train_data[-1], 4)
assert(path.shape[0] == DMP_controller.timesteps)
def test_3():
# check that learned trajectory is close to initial
T = 3*np.pi/2
A_noise = 0.02
t = np.arange(0, T, 0.01)
noisy_sin_t = np.sin(t) + A_noise*np.random.rand(len(t))
train_data = np.array([t, noisy_sin_t]).T
DMP_controller = dynamic_movement_primitives.DMP(train_data, T)
t, pos = DMP_controller.recreate_trajectory(train_data[0],
train_data[-1], T)
diff = abs(pos[:, 1] - noisy_sin_t)
assert(max(diff) < 5*A_noise)
if __name__ == '__main__':
conftest.run_this_test(__file__)
``` | /content/code_sandbox/tests/test_dynamic_movement_primitives.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 409 |
```python
import conftest
from Control.inverted_pendulum import inverted_pendulum_mpc_control as m
def test1():
m.show_animation = False
m.main()
if __name__ == '__main__':
conftest.run_this_test(__file__)
``` | /content/code_sandbox/tests/test_inverted_pendulum_mpc_control.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 57 |
```python
import numpy as np
import conftest # Add root path to sys.path
from PathPlanning.ReedsSheppPath import reeds_shepp_path_planning as m
def check_edge_condition(px, py, pyaw, start_x, start_y, start_yaw, end_x,
end_y, end_yaw):
assert (abs(px[0] - start_x) <= 0.01)
assert (abs(py[0] - start_y) <= 0.01)
assert (abs(pyaw[0] - start_yaw) <= 0.01)
assert (abs(px[-1] - end_x) <= 0.01)
assert (abs(py[-1] - end_y) <= 0.01)
assert (abs(pyaw[-1] - end_yaw) <= 0.01)
def check_path_length(px, py, lengths):
sum_len = sum(abs(length) for length in lengths)
dpx = np.diff(px)
dpy = np.diff(py)
actual_len = sum(
np.hypot(dx, dy) for (dx, dy) in zip(dpx, dpy))
diff_len = sum_len - actual_len
assert (diff_len <= 0.01)
def test1():
m.show_animation = False
m.main()
def test2():
N_TEST = 10
np.random.seed(1234)
for i in range(N_TEST):
start_x = (np.random.rand() - 0.5) * 10.0 # [m]
start_y = (np.random.rand() - 0.5) * 10.0 # [m]
start_yaw = np.deg2rad((np.random.rand() - 0.5) * 180.0) # [rad]
end_x = (np.random.rand() - 0.5) * 10.0 # [m]
end_y = (np.random.rand() - 0.5) * 10.0 # [m]
end_yaw = np.deg2rad((np.random.rand() - 0.5) * 180.0) # [rad]
curvature = 1.0 / (np.random.rand() * 5.0)
px, py, pyaw, mode, lengths = m.reeds_shepp_path_planning(
start_x, start_y, start_yaw,
end_x, end_y, end_yaw, curvature)
check_edge_condition(px, py, pyaw, start_x, start_y, start_yaw, end_x,
end_y, end_yaw)
check_path_length(px, py, lengths)
if __name__ == '__main__':
conftest.run_this_test(__file__)
``` | /content/code_sandbox/tests/test_reeds_shepp_path_planning.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 591 |
```python
import conftest
from PathPlanning.AStar import a_star_searching_from_two_side as m
def test1():
m.show_animation = False
m.main(800)
def test2():
m.show_animation = False
m.main(5000) # increase obstacle number, block path
if __name__ == '__main__':
conftest.run_this_test(__file__)
``` | /content/code_sandbox/tests/test_a_star_searching_two_side.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 84 |
```python
import conftest # Add root path to sys.path
from PathTracking.lqr_speed_steer_control import lqr_speed_steer_control as m
def test_1():
m.show_animation = False
m.main()
if __name__ == '__main__':
conftest.run_this_test(__file__)
``` | /content/code_sandbox/tests/test_lqr_speed_steer_control.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 67 |
```python
import conftest # Add root path to sys.path
from PathTracking.rear_wheel_feedback import rear_wheel_feedback as m
def test1():
m.show_animation = False
m.main()
if __name__ == '__main__':
conftest.run_this_test(__file__)
``` | /content/code_sandbox/tests/test_rear_wheel_feedback.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 61 |
```python
import conftest
from PathPlanning.ClothoidPath import clothoid_path_planner as m
def test_1():
m.show_animation = False
m.main()
if __name__ == '__main__':
conftest.run_this_test(__file__)
``` | /content/code_sandbox/tests/test_clothoidal_paths.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 56 |
```python
import conftest
from Mapping.circle_fitting import circle_fitting as m
def test_1():
m.show_animation = False
m.main()
if __name__ == '__main__':
conftest.run_this_test(__file__)
``` | /content/code_sandbox/tests/test_circle_fitting.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 52 |
```python
import conftest # Add root path to sys.path
from PathPlanning.RRTDubins import rrt_dubins as m
def test1():
m.show_animation = False
m.main()
if __name__ == '__main__':
conftest.run_this_test(__file__)
``` | /content/code_sandbox/tests/test_rrt_dubins.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 63 |
```python
import conftest
from Bipedal.bipedal_planner import bipedal_planner as m
def test_1():
bipedal_planner = m.BipedalPlanner()
footsteps = [[0.0, 0.2, 0.0],
[0.3, 0.2, 0.0],
[0.3, 0.2, 0.2],
[0.3, 0.2, 0.2],
[0.0, 0.2, 0.2]]
bipedal_planner.set_ref_footsteps(footsteps)
bipedal_planner.walk(plot=False)
if __name__ == '__main__':
conftest.run_this_test(__file__)
``` | /content/code_sandbox/tests/test_bipedal_planner.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 167 |
```python
import conftest
from PathPlanning.AStar import a_star as m
def test_1():
m.show_animation = False
m.main()
if __name__ == '__main__':
conftest.run_this_test(__file__)
``` | /content/code_sandbox/tests/test_a_star.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 51 |
```python
import random
import conftest
from PathPlanning.BatchInformedRRTStar import batch_informed_rrtstar as m
def test_1():
m.show_animation = False
random.seed(12345)
m.main(maxIter=10)
if __name__ == '__main__':
conftest.run_this_test(__file__)
``` | /content/code_sandbox/tests/test_batch_informed_rrt_star.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 73 |
```python
"""
Diff code style checker with ruff
This code come from:
path_to_url
Scipy's licence: path_to_url
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import conftest
import os
import subprocess
CONFIG = os.path.join(
os.path.abspath(os.path.dirname(os.path.dirname(__file__))),
'ruff.toml',
)
ROOT_DIR = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
def run_ruff(files, fix):
if not files:
return 0, ""
args = ['--fix'] if fix else []
res = subprocess.run(
['ruff', 'check', f'--config={CONFIG}'] + args + files,
stdout=subprocess.PIPE,
encoding='utf-8'
)
return res.returncode, res.stdout
def rev_list(branch, num_commits):
"""List commits in reverse chronological order.
Only the first `num_commits` are shown.
"""
res = subprocess.run(
[
'git',
'rev-list',
'--max-count',
f'{num_commits}',
'--first-parent',
branch
],
stdout=subprocess.PIPE,
encoding='utf-8',
)
res.check_returncode()
return res.stdout.rstrip('\n').split('\n')
def find_branch_point(branch):
"""Find when the current branch split off from the given branch.
It is based off of this Stackoverflow post:
path_to_url#4991675
"""
branch_commits = rev_list('HEAD', 1000)
main_commits = set(rev_list(branch, 1000))
for branch_commit in branch_commits:
if branch_commit in main_commits:
return branch_commit
# If a branch split off over 1000 commits ago we will fail to find
# the ancestor.
raise RuntimeError(
'Failed to find a common ancestor in the last 1000 commits')
def find_diff(sha):
"""Find the diff since the given sha."""
files = ['*.py']
res = subprocess.run(
['git', 'diff', '--unified=0', sha, '--'] + files,
stdout=subprocess.PIPE,
encoding='utf-8'
)
res.check_returncode()
return res.stdout
def diff_files(sha):
"""Find the diff since the given SHA."""
res = subprocess.run(
['git', 'diff', '--name-only', '--diff-filter=ACMR', '-z', sha, '--',
'*.py', '*.pyx', '*.pxd', '*.pxi'],
stdout=subprocess.PIPE,
encoding='utf-8'
)
res.check_returncode()
return [os.path.join(ROOT_DIR, f) for f in res.stdout.split('\0') if f]
def test():
branch_commit = find_branch_point("origin/master")
files = diff_files(branch_commit)
print(files)
rc, errors = run_ruff(files, fix=True)
if errors:
print(errors)
else:
print("No lint errors found.")
assert rc == 0
if __name__ == '__main__':
conftest.run_this_test(__file__)
``` | /content/code_sandbox/tests/test_codestyle.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 952 |
```python
"""
Extended kalman filter (EKF) localization sample
author: Atsushi Sakai (@Atsushi_twi)
"""
import sys
import pathlib
sys.path.append(str(pathlib.Path(__file__).parent.parent.parent))
import math
import matplotlib.pyplot as plt
import numpy as np
from utils.plot import plot_covariance_ellipse
# Covariance for EKF simulation
Q = np.diag([
0.1, # variance of location on x-axis
0.1, # variance of location on y-axis
np.deg2rad(1.0), # variance of yaw angle
1.0 # variance of velocity
]) ** 2 # predict state covariance
R = np.diag([1.0, 1.0]) ** 2 # Observation x,y position covariance
# Simulation parameter
INPUT_NOISE = np.diag([1.0, np.deg2rad(30.0)]) ** 2
GPS_NOISE = np.diag([0.5, 0.5]) ** 2
DT = 0.1 # time tick [s]
SIM_TIME = 50.0 # simulation time [s]
show_animation = True
def calc_input():
v = 1.0 # [m/s]
yawrate = 0.1 # [rad/s]
u = np.array([[v], [yawrate]])
return u
def observation(xTrue, xd, u):
xTrue = motion_model(xTrue, u)
# add noise to gps x-y
z = observation_model(xTrue) + GPS_NOISE @ np.random.randn(2, 1)
# add noise to input
ud = u + INPUT_NOISE @ np.random.randn(2, 1)
xd = motion_model(xd, ud)
return xTrue, z, xd, ud
def motion_model(x, u):
F = np.array([[1.0, 0, 0, 0],
[0, 1.0, 0, 0],
[0, 0, 1.0, 0],
[0, 0, 0, 0]])
B = np.array([[DT * math.cos(x[2, 0]), 0],
[DT * math.sin(x[2, 0]), 0],
[0.0, DT],
[1.0, 0.0]])
x = F @ x + B @ u
return x
def observation_model(x):
H = np.array([
[1, 0, 0, 0],
[0, 1, 0, 0]
])
z = H @ x
return z
def jacob_f(x, u):
"""
Jacobian of Motion Model
motion model
x_{t+1} = x_t+v*dt*cos(yaw)
y_{t+1} = y_t+v*dt*sin(yaw)
yaw_{t+1} = yaw_t+omega*dt
v_{t+1} = v{t}
so
dx/dyaw = -v*dt*sin(yaw)
dx/dv = dt*cos(yaw)
dy/dyaw = v*dt*cos(yaw)
dy/dv = dt*sin(yaw)
"""
yaw = x[2, 0]
v = u[0, 0]
jF = np.array([
[1.0, 0.0, -DT * v * math.sin(yaw), DT * math.cos(yaw)],
[0.0, 1.0, DT * v * math.cos(yaw), DT * math.sin(yaw)],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0]])
return jF
def jacob_h():
# Jacobian of Observation Model
jH = np.array([
[1, 0, 0, 0],
[0, 1, 0, 0]
])
return jH
def ekf_estimation(xEst, PEst, z, u):
# Predict
xPred = motion_model(xEst, u)
jF = jacob_f(xEst, u)
PPred = jF @ PEst @ jF.T + Q
# Update
jH = jacob_h()
zPred = observation_model(xPred)
y = z - zPred
S = jH @ PPred @ jH.T + R
K = PPred @ jH.T @ np.linalg.inv(S)
xEst = xPred + K @ y
PEst = (np.eye(len(xEst)) - K @ jH) @ PPred
return xEst, PEst
def main():
print(__file__ + " start!!")
time = 0.0
# State Vector [x y yaw v]'
xEst = np.zeros((4, 1))
xTrue = np.zeros((4, 1))
PEst = np.eye(4)
xDR = np.zeros((4, 1)) # Dead reckoning
# history
hxEst = xEst
hxTrue = xTrue
hxDR = xTrue
hz = np.zeros((2, 1))
while SIM_TIME >= time:
time += DT
u = calc_input()
xTrue, z, xDR, ud = observation(xTrue, xDR, u)
xEst, PEst = ekf_estimation(xEst, PEst, z, ud)
# store data history
hxEst = np.hstack((hxEst, xEst))
hxDR = np.hstack((hxDR, xDR))
hxTrue = np.hstack((hxTrue, xTrue))
hz = np.hstack((hz, z))
if show_animation:
plt.cla()
# for stopping simulation with the esc key.
plt.gcf().canvas.mpl_connect('key_release_event',
lambda event: [exit(0) if event.key == 'escape' else None])
plt.plot(hz[0, :], hz[1, :], ".g")
plt.plot(hxTrue[0, :].flatten(),
hxTrue[1, :].flatten(), "-b")
plt.plot(hxDR[0, :].flatten(),
hxDR[1, :].flatten(), "-k")
plt.plot(hxEst[0, :].flatten(),
hxEst[1, :].flatten(), "-r")
plot_covariance_ellipse(xEst[0, 0], xEst[1, 0], PEst)
plt.axis("equal")
plt.grid(True)
plt.pause(0.001)
if __name__ == '__main__':
main()
``` | /content/code_sandbox/Localization/extended_kalman_filter/extended_kalman_filter.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 1,502 |
```python
"""
Ensemble Kalman Filter(EnKF) localization sample
author: Ryohei Sasaki(rsasaki0109)
Ref:
Ensemble Kalman filtering
(path_to_url
"""
import sys
import pathlib
sys.path.append(str(pathlib.Path(__file__).parent.parent.parent))
import math
import matplotlib.pyplot as plt
import numpy as np
from utils.angle import angle_mod
from utils.angle import rot_mat_2d
# Simulation parameter
Q_sim = np.diag([0.2, np.deg2rad(1.0)]) ** 2
R_sim = np.diag([1.0, np.deg2rad(30.0)]) ** 2
DT = 0.1 # time tick [s]
SIM_TIME = 50.0 # simulation time [s]
MAX_RANGE = 20.0 # maximum observation range
# Ensemble Kalman filter parameter
NP = 20 # Number of Particle
show_animation = True
def calc_input():
v = 1.0 # [m/s]
yaw_rate = 0.1 # [rad/s]
u = np.array([[v, yaw_rate]]).T
return u
def observation(xTrue, xd, u, RFID):
xTrue = motion_model(xTrue, u)
z = np.zeros((0, 4))
for i in range(len(RFID[:, 0])):
dx = RFID[i, 0] - xTrue[0, 0]
dy = RFID[i, 1] - xTrue[1, 0]
d = math.hypot(dx, dy)
angle = pi_2_pi(math.atan2(dy, dx) - xTrue[2, 0])
if d <= MAX_RANGE:
dn = d + np.random.randn() * Q_sim[0, 0] ** 0.5 # add noise
angle_with_noise = angle + np.random.randn() * Q_sim[1, 1] ** 0.5
zi = np.array([dn, angle_with_noise, RFID[i, 0], RFID[i, 1]])
z = np.vstack((z, zi))
# add noise to input
ud = np.array([[
u[0, 0] + np.random.randn() * R_sim[0, 0] ** 0.5,
u[1, 0] + np.random.randn() * R_sim[1, 1] ** 0.5]]).T
xd = motion_model(xd, ud)
return xTrue, z, xd, ud
def motion_model(x, u):
F = np.array([[1.0, 0, 0, 0],
[0, 1.0, 0, 0],
[0, 0, 1.0, 0],
[0, 0, 0, 0]])
B = np.array([[DT * math.cos(x[2, 0]), 0],
[DT * math.sin(x[2, 0]), 0],
[0.0, DT],
[1.0, 0.0]])
x = F.dot(x) + B.dot(u)
return x
def observe_landmark_position(x, landmarks):
landmarks_pos = np.zeros((2 * landmarks.shape[0], 1))
for (i, lm) in enumerate(landmarks):
index = 2 * i
q = Q_sim[0, 0] ** 0.5
landmarks_pos[index] = x[0, 0] + lm[0] * math.cos(
x[2, 0] + lm[1]) + np.random.randn() * q / np.sqrt(2)
landmarks_pos[index + 1] = x[1, 0] + lm[0] * math.sin(
x[2, 0] + lm[1]) + np.random.randn() * q / np.sqrt(2)
return landmarks_pos
def calc_covariance(xEst, px):
cov = np.zeros((3, 3))
for i in range(px.shape[1]):
dx = (px[:, i] - xEst)[0:3]
cov += dx.dot(dx.T)
cov /= NP
return cov
def enkf_localization(px, z, u):
"""
Localization with Ensemble Kalman filter
"""
pz = np.zeros((z.shape[0] * 2, NP)) # Particle store of z
for ip in range(NP):
x = np.array([px[:, ip]]).T
# Predict with random input sampling
ud1 = u[0, 0] + np.random.randn() * R_sim[0, 0] ** 0.5
ud2 = u[1, 0] + np.random.randn() * R_sim[1, 1] ** 0.5
ud = np.array([[ud1, ud2]]).T
x = motion_model(x, ud)
px[:, ip] = x[:, 0]
z_pos = observe_landmark_position(x, z)
pz[:, ip] = z_pos[:, 0]
x_ave = np.mean(px, axis=1)
x_dif = px - np.tile(x_ave, (NP, 1)).T
z_ave = np.mean(pz, axis=1)
z_dif = pz - np.tile(z_ave, (NP, 1)).T
U = 1 / (NP - 1) * x_dif @ z_dif.T
V = 1 / (NP - 1) * z_dif @ z_dif.T
K = U @ np.linalg.inv(V) # Kalman Gain
z_lm_pos = z[:, [2, 3]].reshape(-1, )
px_hat = px + K @ (np.tile(z_lm_pos, (NP, 1)).T - pz)
xEst = np.average(px_hat, axis=1).reshape(4, 1)
PEst = calc_covariance(xEst, px_hat)
return xEst, PEst, px_hat
def plot_covariance_ellipse(xEst, PEst): # pragma: no cover
Pxy = PEst[0:2, 0:2]
eig_val, eig_vec = np.linalg.eig(Pxy)
if eig_val[0] >= eig_val[1]:
big_ind = 0
small_ind = 1
else:
big_ind = 1
small_ind = 0
t = np.arange(0, 2 * math.pi + 0.1, 0.1)
# eig_val[big_ind] or eiq_val[small_ind] were occasionally negative
# numbers extremely close to 0 (~10^-20), catch these cases and set
# the respective variable to 0
try:
a = math.sqrt(eig_val[big_ind])
except ValueError:
a = 0
try:
b = math.sqrt(eig_val[small_ind])
except ValueError:
b = 0
x = [a * math.cos(it) for it in t]
y = [b * math.sin(it) for it in t]
angle = math.atan2(eig_vec[1, big_ind], eig_vec[0, big_ind])
fx = np.stack([x, y]).T @ rot_mat_2d(angle)
px = np.array(fx[:, 0] + xEst[0, 0]).flatten()
py = np.array(fx[:, 1] + xEst[1, 0]).flatten()
plt.plot(px, py, "--r")
def pi_2_pi(angle):
return angle_mod(angle)
def main():
print(__file__ + " start!!")
time = 0.0
# RF_ID positions [x, y]
RF_ID = np.array([[10.0, 0.0],
[10.0, 10.0],
[0.0, 15.0],
[-5.0, 20.0]])
# State Vector [x y yaw v]'
xEst = np.zeros((4, 1))
xTrue = np.zeros((4, 1))
px = np.zeros((4, NP)) # Particle store of x
xDR = np.zeros((4, 1)) # Dead reckoning
# history
hxEst = xEst
hxTrue = xTrue
hxDR = xTrue
while SIM_TIME >= time:
time += DT
u = calc_input()
xTrue, z, xDR, ud = observation(xTrue, xDR, u, RF_ID)
xEst, PEst, px = enkf_localization(px, z, ud)
# store data history
hxEst = np.hstack((hxEst, xEst))
hxDR = np.hstack((hxDR, xDR))
hxTrue = np.hstack((hxTrue, xTrue))
if show_animation:
plt.cla()
# for stopping simulation with the esc key.
plt.gcf().canvas.mpl_connect(
'key_release_event',
lambda event: [exit(0) if event.key == 'escape' else None])
for i in range(len(z[:, 0])):
plt.plot([xTrue[0, 0], z[i, 2]], [xTrue[1, 0], z[i, 3]], "-k")
plt.plot(RF_ID[:, 0], RF_ID[:, 1], "*k")
plt.plot(px[0, :], px[1, :], ".r")
plt.plot(np.array(hxTrue[0, :]).flatten(),
np.array(hxTrue[1, :]).flatten(), "-b")
plt.plot(np.array(hxDR[0, :]).flatten(),
np.array(hxDR[1, :]).flatten(), "-k")
plt.plot(np.array(hxEst[0, :]).flatten(),
np.array(hxEst[1, :]).flatten(), "-r")
plot_covariance_ellipse(xEst, PEst)
plt.axis("equal")
plt.grid(True)
plt.pause(0.001)
if __name__ == '__main__':
main()
``` | /content/code_sandbox/Localization/ensemble_kalman_filter/ensemble_kalman_filter.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 2,251 |
```python
"""
Extended kalman filter (EKF) localization with velocity correction sample
author: Atsushi Sakai (@Atsushi_twi)
modified by: Ryohei Sasaki (@rsasaki0109)
"""
import sys
import pathlib
sys.path.append(str(pathlib.Path(__file__).parent.parent.parent))
import math
import matplotlib.pyplot as plt
import numpy as np
from utils.plot import plot_covariance_ellipse
# Covariance for EKF simulation
Q = np.diag([
0.1, # variance of location on x-axis
0.1, # variance of location on y-axis
np.deg2rad(1.0), # variance of yaw angle
0.4, # variance of velocity
0.1 # variance of scale factor
]) ** 2 # predict state covariance
R = np.diag([0.1, 0.1]) ** 2 # Observation x,y position covariance
# Simulation parameter
INPUT_NOISE = np.diag([0.1, np.deg2rad(5.0)]) ** 2
GPS_NOISE = np.diag([0.05, 0.05]) ** 2
DT = 0.1 # time tick [s]
SIM_TIME = 50.0 # simulation time [s]
show_animation = True
def calc_input():
v = 1.0 # [m/s]
yawrate = 0.1 # [rad/s]
u = np.array([[v], [yawrate]])
return u
def observation(xTrue, xd, u):
xTrue = motion_model(xTrue, u)
# add noise to gps x-y
z = observation_model(xTrue) + GPS_NOISE @ np.random.randn(2, 1)
# add noise to input
ud = u + INPUT_NOISE @ np.random.randn(2, 1)
xd = motion_model(xd, ud)
return xTrue, z, xd, ud
def motion_model(x, u):
F = np.array([[1.0, 0, 0, 0, 0],
[0, 1.0, 0, 0, 0],
[0, 0, 1.0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 1.0]])
B = np.array([[DT * math.cos(x[2, 0]) * x[4, 0], 0],
[DT * math.sin(x[2, 0]) * x[4, 0], 0],
[0.0, DT],
[1.0, 0.0],
[0.0, 0.0]])
x = F @ x + B @ u
return x
def observation_model(x):
H = np.array([
[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0]
])
z = H @ x
return z
def jacob_f(x, u):
"""
Jacobian of Motion Model
motion model
x_{t+1} = x_t+v*s*dt*cos(yaw)
y_{t+1} = y_t+v*s*dt*sin(yaw)
yaw_{t+1} = yaw_t+omega*dt
v_{t+1} = v{t}
s_{t+1} = s{t}
so
dx/dyaw = -v*s*dt*sin(yaw)
dx/dv = dt*s*cos(yaw)
dx/ds = dt*v*cos(yaw)
dy/dyaw = v*s*dt*cos(yaw)
dy/dv = dt*s*sin(yaw)
dy/ds = dt*v*sin(yaw)
"""
yaw = x[2, 0]
v = u[0, 0]
s = x[4, 0]
jF = np.array([
[1.0, 0.0, -DT * v * s * math.sin(yaw), DT * s * math.cos(yaw), DT * v * math.cos(yaw)],
[0.0, 1.0, DT * v * s * math.cos(yaw), DT * s * math.sin(yaw), DT * v * math.sin(yaw)],
[0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0]])
return jF
def jacob_h():
jH = np.array([[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0]])
return jH
def ekf_estimation(xEst, PEst, z, u):
# Predict
xPred = motion_model(xEst, u)
jF = jacob_f(xEst, u)
PPred = jF @ PEst @ jF.T + Q
# Update
jH = jacob_h()
zPred = observation_model(xPred)
y = z - zPred
S = jH @ PPred @ jH.T + R
K = PPred @ jH.T @ np.linalg.inv(S)
xEst = xPred + K @ y
PEst = (np.eye(len(xEst)) - K @ jH) @ PPred
return xEst, PEst
def main():
print(__file__ + " start!!")
time = 0.0
# State Vector [x y yaw v s]'
xEst = np.zeros((5, 1))
xEst[4, 0] = 1.0 # Initial scale factor
xTrue = np.zeros((5, 1))
true_scale_factor = 0.9 # True scale factor
xTrue[4, 0] = true_scale_factor
PEst = np.eye(5)
xDR = np.zeros((5, 1)) # Dead reckoning
# history
hxEst = xEst
hxTrue = xTrue
hxDR = xTrue
hz = np.zeros((2, 1))
while SIM_TIME >= time:
time += DT
u = calc_input()
xTrue, z, xDR, ud = observation(xTrue, xDR, u)
xEst, PEst = ekf_estimation(xEst, PEst, z, ud)
# store data history
hxEst = np.hstack((hxEst, xEst))
hxDR = np.hstack((hxDR, xDR))
hxTrue = np.hstack((hxTrue, xTrue))
hz = np.hstack((hz, z))
estimated_scale_factor = hxEst[4, -1]
if show_animation:
plt.cla()
# for stopping simulation with the esc key.
plt.gcf().canvas.mpl_connect('key_release_event',
lambda event: [exit(0) if event.key == 'escape' else None])
plt.plot(hz[0, :], hz[1, :], ".g")
plt.plot(hxTrue[0, :].flatten(),
hxTrue[1, :].flatten(), "-b")
plt.plot(hxDR[0, :].flatten(),
hxDR[1, :].flatten(), "-k")
plt.plot(hxEst[0, :].flatten(),
hxEst[1, :].flatten(), "-r")
plt.text(0.45, 0.85, f"True Velocity Scale Factor: {true_scale_factor:.2f}", ha='left', va='top', transform=plt.gca().transAxes)
plt.text(0.45, 0.95, f"Estimated Velocity Scale Factor: {estimated_scale_factor:.2f}", ha='left', va='top', transform=plt.gca().transAxes)
plot_covariance_ellipse(xEst[0, 0], xEst[1, 0], PEst)
plt.axis("equal")
plt.grid(True)
plt.pause(0.001)
if __name__ == '__main__':
main()
``` | /content/code_sandbox/Localization/extended_kalman_filter/ekf_with_velocity_correction.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 1,852 |
```python
"""
Unscented kalman filter (UKF) localization sample
author: Atsushi Sakai (@Atsushi_twi)
"""
import sys
import pathlib
sys.path.append(str(pathlib.Path(__file__).parent.parent.parent))
import math
import matplotlib.pyplot as plt
import numpy as np
import scipy.linalg
from utils.angle import rot_mat_2d
# Covariance for UKF simulation
Q = np.diag([
0.1, # variance of location on x-axis
0.1, # variance of location on y-axis
np.deg2rad(1.0), # variance of yaw angle
1.0 # variance of velocity
]) ** 2 # predict state covariance
R = np.diag([1.0, 1.0]) ** 2 # Observation x,y position covariance
# Simulation parameter
INPUT_NOISE = np.diag([1.0, np.deg2rad(30.0)]) ** 2
GPS_NOISE = np.diag([0.5, 0.5]) ** 2
DT = 0.1 # time tick [s]
SIM_TIME = 50.0 # simulation time [s]
# UKF Parameter
ALPHA = 0.001
BETA = 2
KAPPA = 0
show_animation = True
def calc_input():
v = 1.0 # [m/s]
yawRate = 0.1 # [rad/s]
u = np.array([[v, yawRate]]).T
return u
def observation(xTrue, xd, u):
xTrue = motion_model(xTrue, u)
# add noise to gps x-y
z = observation_model(xTrue) + GPS_NOISE @ np.random.randn(2, 1)
# add noise to input
ud = u + INPUT_NOISE @ np.random.randn(2, 1)
xd = motion_model(xd, ud)
return xTrue, z, xd, ud
def motion_model(x, u):
F = np.array([[1.0, 0, 0, 0],
[0, 1.0, 0, 0],
[0, 0, 1.0, 0],
[0, 0, 0, 0]])
B = np.array([[DT * math.cos(x[2, 0]), 0],
[DT * math.sin(x[2, 0]), 0],
[0.0, DT],
[1.0, 0.0]])
x = F @ x + B @ u
return x
def observation_model(x):
H = np.array([
[1, 0, 0, 0],
[0, 1, 0, 0]
])
z = H @ x
return z
def generate_sigma_points(xEst, PEst, gamma):
sigma = xEst
Psqrt = scipy.linalg.sqrtm(PEst)
n = len(xEst[:, 0])
# Positive direction
for i in range(n):
sigma = np.hstack((sigma, xEst + gamma * Psqrt[:, i:i + 1]))
# Negative direction
for i in range(n):
sigma = np.hstack((sigma, xEst - gamma * Psqrt[:, i:i + 1]))
return sigma
def predict_sigma_motion(sigma, u):
"""
Sigma Points prediction with motion model
"""
for i in range(sigma.shape[1]):
sigma[:, i:i + 1] = motion_model(sigma[:, i:i + 1], u)
return sigma
def predict_sigma_observation(sigma):
"""
Sigma Points prediction with observation model
"""
for i in range(sigma.shape[1]):
sigma[0:2, i] = observation_model(sigma[:, i])
sigma = sigma[0:2, :]
return sigma
def calc_sigma_covariance(x, sigma, wc, Pi):
nSigma = sigma.shape[1]
d = sigma - x[0:sigma.shape[0]]
P = Pi
for i in range(nSigma):
P = P + wc[0, i] * d[:, i:i + 1] @ d[:, i:i + 1].T
return P
def calc_pxz(sigma, x, z_sigma, zb, wc):
nSigma = sigma.shape[1]
dx = sigma - x
dz = z_sigma - zb[0:2]
P = np.zeros((dx.shape[0], dz.shape[0]))
for i in range(nSigma):
P = P + wc[0, i] * dx[:, i:i + 1] @ dz[:, i:i + 1].T
return P
def ukf_estimation(xEst, PEst, z, u, wm, wc, gamma):
# Predict
sigma = generate_sigma_points(xEst, PEst, gamma)
sigma = predict_sigma_motion(sigma, u)
xPred = (wm @ sigma.T).T
PPred = calc_sigma_covariance(xPred, sigma, wc, Q)
# Update
zPred = observation_model(xPred)
y = z - zPred
sigma = generate_sigma_points(xPred, PPred, gamma)
zb = (wm @ sigma.T).T
z_sigma = predict_sigma_observation(sigma)
st = calc_sigma_covariance(zb, z_sigma, wc, R)
Pxz = calc_pxz(sigma, xPred, z_sigma, zb, wc)
K = Pxz @ np.linalg.inv(st)
xEst = xPred + K @ y
PEst = PPred - K @ st @ K.T
return xEst, PEst
def plot_covariance_ellipse(xEst, PEst): # pragma: no cover
Pxy = PEst[0:2, 0:2]
eigval, eigvec = np.linalg.eig(Pxy)
if eigval[0] >= eigval[1]:
bigind = 0
smallind = 1
else:
bigind = 1
smallind = 0
t = np.arange(0, 2 * math.pi + 0.1, 0.1)
a = math.sqrt(eigval[bigind])
b = math.sqrt(eigval[smallind])
x = [a * math.cos(it) for it in t]
y = [b * math.sin(it) for it in t]
angle = math.atan2(eigvec[1, bigind], eigvec[0, bigind])
fx = rot_mat_2d(angle) @ np.array([x, y])
px = np.array(fx[0, :] + xEst[0, 0]).flatten()
py = np.array(fx[1, :] + xEst[1, 0]).flatten()
plt.plot(px, py, "--r")
def setup_ukf(nx):
lamb = ALPHA ** 2 * (nx + KAPPA) - nx
# calculate weights
wm = [lamb / (lamb + nx)]
wc = [(lamb / (lamb + nx)) + (1 - ALPHA ** 2 + BETA)]
for i in range(2 * nx):
wm.append(1.0 / (2 * (nx + lamb)))
wc.append(1.0 / (2 * (nx + lamb)))
gamma = math.sqrt(nx + lamb)
wm = np.array([wm])
wc = np.array([wc])
return wm, wc, gamma
def main():
print(__file__ + " start!!")
nx = 4 # State Vector [x y yaw v]'
xEst = np.zeros((nx, 1))
xTrue = np.zeros((nx, 1))
PEst = np.eye(nx)
xDR = np.zeros((nx, 1)) # Dead reckoning
wm, wc, gamma = setup_ukf(nx)
# history
hxEst = xEst
hxTrue = xTrue
hxDR = xTrue
hz = np.zeros((2, 1))
time = 0.0
while SIM_TIME >= time:
time += DT
u = calc_input()
xTrue, z, xDR, ud = observation(xTrue, xDR, u)
xEst, PEst = ukf_estimation(xEst, PEst, z, ud, wm, wc, gamma)
# store data history
hxEst = np.hstack((hxEst, xEst))
hxDR = np.hstack((hxDR, xDR))
hxTrue = np.hstack((hxTrue, xTrue))
hz = np.hstack((hz, z))
if show_animation:
plt.cla()
# for stopping simulation with the esc key.
plt.gcf().canvas.mpl_connect('key_release_event',
lambda event: [exit(0) if event.key == 'escape' else None])
plt.plot(hz[0, :], hz[1, :], ".g")
plt.plot(np.array(hxTrue[0, :]).flatten(),
np.array(hxTrue[1, :]).flatten(), "-b")
plt.plot(np.array(hxDR[0, :]).flatten(),
np.array(hxDR[1, :]).flatten(), "-k")
plt.plot(np.array(hxEst[0, :]).flatten(),
np.array(hxEst[1, :]).flatten(), "-r")
plot_covariance_ellipse(xEst, PEst)
plt.axis("equal")
plt.grid(True)
plt.pause(0.001)
if __name__ == '__main__':
main()
``` | /content/code_sandbox/Localization/unscented_kalman_filter/unscented_kalman_filter.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 2,133 |
```python
"""
Cubature Kalman filter using Constant Turn Rate and Velocity (CTRV) model
Fuse sensor data from IMU and GPS to obtain accurate position
path_to_url
Author: Raghuram Shankar
state matrix: 2D x-y position, yaw, velocity and yaw rate
measurement matrix: 2D x-y position, velocity and yaw rate
dt: Duration of time step
N: Number of time steps
show_final: Flag for showing final result
show_animation: Flag for showing each animation frame
show_ellipse: Flag for showing covariance ellipse
z_noise: Measurement noise
x_0: Prior state estimate matrix
P_0: Prior state estimate covariance matrix
q: Process noise covariance
hx: Measurement model matrix
r: Sensor noise covariance
SP: Sigma Points
W: Weights
x_est: State estimate
P_est: State estimate covariance
x_true: Ground truth value of state
x_true_cat: Concatenate all ground truth states
x_est_cat: Concatenate all state estimates
z_cat: Concatenate all measurements
"""
import math
import matplotlib.pyplot as plt
import numpy as np
from scipy.linalg import sqrtm
dt = 0.1
N = 100
show_final = 1
show_animation = 0
show_ellipse = 0
z_noise = np.array([[0.1, 0.0, 0.0, 0.0], # x position [m]
[0.0, 0.1, 0.0, 0.0], # y position [m]
[0.0, 0.0, 0.1, 0.0], # velocity [m/s]
[0.0, 0.0, 0.0, 0.1]]) # yaw rate [rad/s]
x_0 = np.array([[0.0], # x position [m]
[0.0], # y position [m]
[0.0], # yaw [rad]
[1.0], # velocity [m/s]
[0.1]]) # yaw rate [rad/s]
p_0 = np.array([[1e-3, 0.0, 0.0, 0.0, 0.0],
[0.0, 1e-3, 0.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0]])
q = np.array([[1e-11, 0.0, 0.0, 0.0, 0.0],
[0.0, 1e-11, 0.0, 0.0, 0.0],
[0.0, 0.0, np.deg2rad(1e-4), 0.0, 0.0],
[0.0, 0.0, 0.0, 1e-4, 0.0],
[0.0, 0.0, 0.0, 0.0, np.deg2rad(1e-4)]])
hx = np.array([[1.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0]])
r = np.array([[0.015, 0.0, 0.0, 0.0],
[0.0, 0.010, 0.0, 0.0],
[0.0, 0.0, 0.1, 0.0],
[0.0, 0.0, 0.0, 0.01]])**2
def cubature_kalman_filter(x_est, p_est, z):
x_pred, p_pred = cubature_prediction(x_est, p_est)
x_upd, p_upd = cubature_update(x_pred, p_pred, z)
return x_upd, p_upd
def f(x):
"""
Motion Model
References:
path_to_url
path_to_url
"""
x[0] = x[0] + (x[3]/x[4]) * (np.sin(x[4] * dt + x[2]) - np.sin(x[2]))
x[1] = x[1] + (x[3]/x[4]) * (- np.cos(x[4] * dt + x[2]) + np.cos(x[2]))
x[2] = x[2] + x[4] * dt
x[3] = x[3]
x[4] = x[4]
return x
def h(x):
"""Measurement Model"""
x = hx @ x
return x
def sigma(x, p):
"""
Unscented Transform with Cubature Rule
Generate 2n Sigma Points to represent the nonlinear motion
Assign Weights to each Sigma Point, Wi = 1/2n
Cubature Rule - Special Case of Unscented Transform
W0 = 0, no extra tuning parameters, no negative weights
"""
n = np.shape(x)[0]
SP = np.zeros((n, 2*n))
W = np.zeros((1, 2*n))
for i in range(n):
SD = sqrtm(p)
SP[:, i] = (x + (math.sqrt(n) * SD[:, i]).reshape((n, 1))).flatten()
SP[:, i+n] = (x - (math.sqrt(n) * SD[:, i]).reshape((n, 1))).flatten()
W[:, i] = 1/(2*n)
W[:, i+n] = W[:, i]
return SP, W
def cubature_prediction(x_pred, p_pred):
n = np.shape(x_pred)[0]
[SP, W] = sigma(x_pred, p_pred)
x_pred = np.zeros((n, 1))
p_pred = q
for i in range(2*n):
x_pred = x_pred + (f(SP[:, i]).reshape((n, 1)) * W[0, i])
for i in range(2*n):
p_step = (f(SP[:, i]).reshape((n, 1)) - x_pred)
p_pred = p_pred + (p_step @ p_step.T * W[0, i])
return x_pred, p_pred
def cubature_update(x_pred, p_pred, z):
n = np.shape(x_pred)[0]
m = np.shape(z)[0]
[SP, W] = sigma(x_pred, p_pred)
y_k = np.zeros((m, 1))
P_xy = np.zeros((n, m))
s = r
for i in range(2*n):
y_k = y_k + (h(SP[:, i]).reshape((m, 1)) * W[0, i])
for i in range(2*n):
p_step = (h(SP[:, i]).reshape((m, 1)) - y_k)
P_xy = P_xy + ((SP[:, i]).reshape((n, 1)) -
x_pred) @ p_step.T * W[0, i]
s = s + p_step @ p_step.T * W[0, i]
x_pred = x_pred + P_xy @ np.linalg.pinv(s) @ (z - y_k)
p_pred = p_pred - P_xy @ np.linalg.pinv(s) @ P_xy.T
return x_pred, p_pred
def generate_measurement(x_true):
gz = hx @ x_true
z = gz + z_noise @ np.random.randn(4, 1)
return z
def plot_animation(i, x_true_cat, x_est_cat, z):
if i == 0:
plt.plot(x_true_cat[0], x_true_cat[1], '.r')
plt.plot(x_est_cat[0], x_est_cat[1], '.b')
else:
plt.plot(x_true_cat[0:, 0], x_true_cat[0:, 1], 'r')
plt.plot(x_est_cat[0:, 0], x_est_cat[0:, 1], 'b')
plt.plot(z[0], z[1], '+g')
plt.grid(True)
plt.pause(0.001)
def plot_ellipse(x_est, p_est):
phi = np.linspace(0, 2 * math.pi, 100)
p_ellipse = np.array(
[[p_est[0, 0], p_est[0, 1]], [p_est[1, 0], p_est[1, 1]]])
x0 = 3 * sqrtm(p_ellipse)
xy_1 = np.array([])
xy_2 = np.array([])
for i in range(100):
arr = np.array([[math.sin(phi[i])], [math.cos(phi[i])]])
arr = x0 @ arr
xy_1 = np.hstack([xy_1, arr[0]])
xy_2 = np.hstack([xy_2, arr[1]])
plt.plot(xy_1 + x_est[0], xy_2 + x_est[1], 'r')
plt.pause(0.00001)
def plot_final(x_true_cat, x_est_cat, z_cat):
fig = plt.figure()
subplot = fig.add_subplot(111)
subplot.plot(x_true_cat[0:, 0], x_true_cat[0:, 1],
'r', label='True Position')
subplot.plot(x_est_cat[0:, 0], x_est_cat[0:, 1],
'b', label='Estimated Position')
subplot.plot(z_cat[0:, 0], z_cat[0:, 1], '+g', label='Noisy Measurements')
subplot.set_xlabel('x [m]')
subplot.set_ylabel('y [m]')
subplot.set_title('Cubature Kalman Filter - CTRV Model')
subplot.legend(loc='upper left', shadow=True, fontsize='large')
plt.grid(True)
plt.show()
def main():
print(__file__ + " start!!")
x_est = x_0
p_est = p_0
x_true = x_0
x_true_cat = np.array([x_0[0, 0], x_0[1, 0]])
x_est_cat = np.array([x_0[0, 0], x_0[1, 0]])
z_cat = np.array([x_0[0, 0], x_0[1, 0]])
for i in range(N):
x_true = f(x_true)
z = generate_measurement(x_true)
if i == (N - 1) and show_final == 1:
show_final_flag = 1
else:
show_final_flag = 0
if show_animation == 1:
plot_animation(i, x_true_cat, x_est_cat, z)
if show_ellipse == 1:
plot_ellipse(x_est[0:2], p_est)
if show_final_flag == 1:
plot_final(x_true_cat, x_est_cat, z_cat)
x_est, p_est = cubature_kalman_filter(x_est, p_est, z)
x_true_cat = np.vstack((x_true_cat, x_true[0:2].T))
x_est_cat = np.vstack((x_est_cat, x_est[0:2].T))
z_cat = np.vstack((z_cat, z[0:2].T))
print('CKF Over')
if __name__ == '__main__':
main()
``` | /content/code_sandbox/Localization/cubature_kalman_filter/cubature_kalman_filter.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 2,708 |
```python
"""
Histogram Filter 2D localization example
In this simulation, x,y are unknown, yaw is known.
Initial position is not needed.
author: Atsushi Sakai (@Atsushi_twi)
"""
import copy
import math
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
from scipy.ndimage import gaussian_filter
from scipy.stats import norm
# Parameters
EXTEND_AREA = 10.0 # [m] grid map extended length
SIM_TIME = 50.0 # simulation time [s]
DT = 0.1 # time tick [s]
MAX_RANGE = 10.0 # maximum observation range
MOTION_STD = 1.0 # standard deviation for motion gaussian distribution
RANGE_STD = 3.0 # standard deviation for observation gaussian distribution
# grid map param
XY_RESOLUTION = 0.5 # xy grid resolution
MIN_X = -15.0
MIN_Y = -5.0
MAX_X = 15.0
MAX_Y = 25.0
# simulation parameters
NOISE_RANGE = 2.0 # [m] 1 range noise parameter
NOISE_SPEED = 0.5 # [m/s] 1 speed noise parameter
show_animation = True
class GridMap:
def __init__(self):
self.data = None
self.xy_resolution = None
self.min_x = None
self.min_y = None
self.max_x = None
self.max_y = None
self.x_w = None
self.y_w = None
self.dx = 0.0 # movement distance
self.dy = 0.0 # movement distance
def histogram_filter_localization(grid_map, u, z, yaw):
grid_map = motion_update(grid_map, u, yaw)
grid_map = observation_update(grid_map, z, RANGE_STD)
return grid_map
def calc_gaussian_observation_pdf(grid_map, z, iz, ix, iy, std):
# predicted range
x = ix * grid_map.xy_resolution + grid_map.min_x
y = iy * grid_map.xy_resolution + grid_map.min_y
d = math.hypot(x - z[iz, 1], y - z[iz, 2])
# likelihood
pdf = norm.pdf(d - z[iz, 0], 0.0, std)
return pdf
def observation_update(grid_map, z, std):
for iz in range(z.shape[0]):
for ix in range(grid_map.x_w):
for iy in range(grid_map.y_w):
grid_map.data[ix][iy] *= calc_gaussian_observation_pdf(
grid_map, z, iz, ix, iy, std)
grid_map = normalize_probability(grid_map)
return grid_map
def calc_control_input():
v = 1.0 # [m/s]
yaw_rate = 0.1 # [rad/s]
u = np.array([v, yaw_rate]).reshape(2, 1)
return u
def motion_model(x, u):
F = np.array([[1.0, 0, 0, 0],
[0, 1.0, 0, 0],
[0, 0, 1.0, 0],
[0, 0, 0, 0]])
B = np.array([[DT * math.cos(x[2, 0]), 0],
[DT * math.sin(x[2, 0]), 0],
[0.0, DT],
[1.0, 0.0]])
x = F @ x + B @ u
return x
def draw_heat_map(data, mx, my):
max_value = max([max(i_data) for i_data in data])
plt.grid(False)
plt.pcolor(mx, my, data, vmax=max_value, cmap=mpl.colormaps["Blues"])
plt.axis("equal")
def observation(xTrue, u, RFID):
xTrue = motion_model(xTrue, u)
z = np.zeros((0, 3))
for i in range(len(RFID[:, 0])):
dx = xTrue[0, 0] - RFID[i, 0]
dy = xTrue[1, 0] - RFID[i, 1]
d = math.hypot(dx, dy)
if d <= MAX_RANGE:
# add noise to range observation
dn = d + np.random.randn() * NOISE_RANGE
zi = np.array([dn, RFID[i, 0], RFID[i, 1]])
z = np.vstack((z, zi))
# add noise to speed
ud = u[:, :]
ud[0] += np.random.randn() * NOISE_SPEED
return xTrue, z, ud
def normalize_probability(grid_map):
sump = sum([sum(i_data) for i_data in grid_map.data])
for ix in range(grid_map.x_w):
for iy in range(grid_map.y_w):
grid_map.data[ix][iy] /= sump
return grid_map
def init_grid_map(xy_resolution, min_x, min_y, max_x, max_y):
grid_map = GridMap()
grid_map.xy_resolution = xy_resolution
grid_map.min_x = min_x
grid_map.min_y = min_y
grid_map.max_x = max_x
grid_map.max_y = max_y
grid_map.x_w = int(round((grid_map.max_x - grid_map.min_x)
/ grid_map.xy_resolution))
grid_map.y_w = int(round((grid_map.max_y - grid_map.min_y)
/ grid_map.xy_resolution))
grid_map.data = [[1.0 for _ in range(grid_map.y_w)]
for _ in range(grid_map.x_w)]
grid_map = normalize_probability(grid_map)
return grid_map
def map_shift(grid_map, x_shift, y_shift):
tmp_grid_map = copy.deepcopy(grid_map.data)
for ix in range(grid_map.x_w):
for iy in range(grid_map.y_w):
nix = ix + x_shift
niy = iy + y_shift
if 0 <= nix < grid_map.x_w and 0 <= niy < grid_map.y_w:
grid_map.data[ix + x_shift][iy + y_shift] =\
tmp_grid_map[ix][iy]
return grid_map
def motion_update(grid_map, u, yaw):
grid_map.dx += DT * math.cos(yaw) * u[0]
grid_map.dy += DT * math.sin(yaw) * u[0]
x_shift = grid_map.dx // grid_map.xy_resolution
y_shift = grid_map.dy // grid_map.xy_resolution
if abs(x_shift) >= 1.0 or abs(y_shift) >= 1.0: # map should be shifted
grid_map = map_shift(grid_map, int(x_shift[0]), int(y_shift[0]))
grid_map.dx -= x_shift * grid_map.xy_resolution
grid_map.dy -= y_shift * grid_map.xy_resolution
# Add motion noise
grid_map.data = gaussian_filter(grid_map.data, sigma=MOTION_STD)
return grid_map
def calc_grid_index(grid_map):
mx, my = np.mgrid[slice(grid_map.min_x - grid_map.xy_resolution / 2.0,
grid_map.max_x + grid_map.xy_resolution / 2.0,
grid_map.xy_resolution),
slice(grid_map.min_y - grid_map.xy_resolution / 2.0,
grid_map.max_y + grid_map.xy_resolution / 2.0,
grid_map.xy_resolution)]
return mx, my
def main():
print(__file__ + " start!!")
# RF_ID positions [x, y]
RF_ID = np.array([[10.0, 0.0],
[10.0, 10.0],
[0.0, 15.0],
[-5.0, 20.0]])
time = 0.0
xTrue = np.zeros((4, 1))
grid_map = init_grid_map(XY_RESOLUTION, MIN_X, MIN_Y, MAX_X, MAX_Y)
mx, my = calc_grid_index(grid_map) # for grid map visualization
while SIM_TIME >= time:
time += DT
print(f"{time=:.1f}")
u = calc_control_input()
yaw = xTrue[2, 0] # Orientation is known
xTrue, z, ud = observation(xTrue, u, RF_ID)
grid_map = histogram_filter_localization(grid_map, u, z, yaw)
if show_animation:
plt.cla()
# for stopping simulation with the esc key.
plt.gcf().canvas.mpl_connect(
'key_release_event',
lambda event: [exit(0) if event.key == 'escape' else None])
draw_heat_map(grid_map.data, mx, my)
plt.plot(xTrue[0, :], xTrue[1, :], "xr")
plt.plot(RF_ID[:, 0], RF_ID[:, 1], ".k")
for i in range(z.shape[0]):
plt.plot([xTrue[0, 0], z[i, 1]],
[xTrue[1, 0], z[i, 2]],
"-k")
plt.title("Time[s]:" + str(time)[0: 4])
plt.pause(0.1)
print("Done")
if __name__ == '__main__':
main()
``` | /content/code_sandbox/Localization/histogram_filter/histogram_filter.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 2,064 |
```python
"""
Particle Filter localization sample
author: Atsushi Sakai (@Atsushi_twi)
"""
import sys
import pathlib
sys.path.append(str(pathlib.Path(__file__).parent.parent.parent))
import math
import matplotlib.pyplot as plt
import numpy as np
from utils.angle import rot_mat_2d
# Estimation parameter of PF
Q = np.diag([0.2]) ** 2 # range error
R = np.diag([2.0, np.deg2rad(40.0)]) ** 2 # input error
# Simulation parameter
Q_sim = np.diag([0.2]) ** 2
R_sim = np.diag([1.0, np.deg2rad(30.0)]) ** 2
DT = 0.1 # time tick [s]
SIM_TIME = 50.0 # simulation time [s]
MAX_RANGE = 20.0 # maximum observation range
# Particle filter parameter
NP = 100 # Number of Particle
NTh = NP / 2.0 # Number of particle for re-sampling
show_animation = True
def calc_input():
v = 1.0 # [m/s]
yaw_rate = 0.1 # [rad/s]
u = np.array([[v, yaw_rate]]).T
return u
def observation(x_true, xd, u, rf_id):
x_true = motion_model(x_true, u)
# add noise to gps x-y
z = np.zeros((0, 3))
for i in range(len(rf_id[:, 0])):
dx = x_true[0, 0] - rf_id[i, 0]
dy = x_true[1, 0] - rf_id[i, 1]
d = math.hypot(dx, dy)
if d <= MAX_RANGE:
dn = d + np.random.randn() * Q_sim[0, 0] ** 0.5 # add noise
zi = np.array([[dn, rf_id[i, 0], rf_id[i, 1]]])
z = np.vstack((z, zi))
# add noise to input
ud1 = u[0, 0] + np.random.randn() * R_sim[0, 0] ** 0.5
ud2 = u[1, 0] + np.random.randn() * R_sim[1, 1] ** 0.5
ud = np.array([[ud1, ud2]]).T
xd = motion_model(xd, ud)
return x_true, z, xd, ud
def motion_model(x, u):
F = np.array([[1.0, 0, 0, 0],
[0, 1.0, 0, 0],
[0, 0, 1.0, 0],
[0, 0, 0, 0]])
B = np.array([[DT * math.cos(x[2, 0]), 0],
[DT * math.sin(x[2, 0]), 0],
[0.0, DT],
[1.0, 0.0]])
x = F.dot(x) + B.dot(u)
return x
def gauss_likelihood(x, sigma):
p = 1.0 / math.sqrt(2.0 * math.pi * sigma ** 2) * \
math.exp(-x ** 2 / (2 * sigma ** 2))
return p
def calc_covariance(x_est, px, pw):
"""
calculate covariance matrix
see ipynb doc
"""
cov = np.zeros((4, 4))
n_particle = px.shape[1]
for i in range(n_particle):
dx = (px[:, i:i + 1] - x_est)
cov += pw[0, i] * dx @ dx.T
cov *= 1.0 / (1.0 - pw @ pw.T)
return cov
def pf_localization(px, pw, z, u):
"""
Localization with Particle filter
"""
for ip in range(NP):
x = np.array([px[:, ip]]).T
w = pw[0, ip]
# Predict with random input sampling
ud1 = u[0, 0] + np.random.randn() * R[0, 0] ** 0.5
ud2 = u[1, 0] + np.random.randn() * R[1, 1] ** 0.5
ud = np.array([[ud1, ud2]]).T
x = motion_model(x, ud)
# Calc Importance Weight
for i in range(len(z[:, 0])):
dx = x[0, 0] - z[i, 1]
dy = x[1, 0] - z[i, 2]
pre_z = math.hypot(dx, dy)
dz = pre_z - z[i, 0]
w = w * gauss_likelihood(dz, math.sqrt(Q[0, 0]))
px[:, ip] = x[:, 0]
pw[0, ip] = w
pw = pw / pw.sum() # normalize
x_est = px.dot(pw.T)
p_est = calc_covariance(x_est, px, pw)
N_eff = 1.0 / (pw.dot(pw.T))[0, 0] # Effective particle number
if N_eff < NTh:
px, pw = re_sampling(px, pw)
return x_est, p_est, px, pw
def re_sampling(px, pw):
"""
low variance re-sampling
"""
w_cum = np.cumsum(pw)
base = np.arange(0.0, 1.0, 1 / NP)
re_sample_id = base + np.random.uniform(0, 1 / NP)
indexes = []
ind = 0
for ip in range(NP):
while re_sample_id[ip] > w_cum[ind]:
ind += 1
indexes.append(ind)
px = px[:, indexes]
pw = np.zeros((1, NP)) + 1.0 / NP # init weight
return px, pw
def plot_covariance_ellipse(x_est, p_est): # pragma: no cover
p_xy = p_est[0:2, 0:2]
eig_val, eig_vec = np.linalg.eig(p_xy)
if eig_val[0] >= eig_val[1]:
big_ind = 0
small_ind = 1
else:
big_ind = 1
small_ind = 0
t = np.arange(0, 2 * math.pi + 0.1, 0.1)
# eig_val[big_ind] or eiq_val[small_ind] were occasionally negative
# numbers extremely close to 0 (~10^-20), catch these cases and set the
# respective variable to 0
try:
a = math.sqrt(eig_val[big_ind])
except ValueError:
a = 0
try:
b = math.sqrt(eig_val[small_ind])
except ValueError:
b = 0
x = [a * math.cos(it) for it in t]
y = [b * math.sin(it) for it in t]
angle = math.atan2(eig_vec[1, big_ind], eig_vec[0, big_ind])
fx = rot_mat_2d(angle) @ np.array([[x, y]])
px = np.array(fx[:, 0] + x_est[0, 0]).flatten()
py = np.array(fx[:, 1] + x_est[1, 0]).flatten()
plt.plot(px, py, "--r")
def main():
print(__file__ + " start!!")
time = 0.0
# RF_ID positions [x, y]
rf_id = np.array([[10.0, 0.0],
[10.0, 10.0],
[0.0, 15.0],
[-5.0, 20.0]])
# State Vector [x y yaw v]'
x_est = np.zeros((4, 1))
x_true = np.zeros((4, 1))
px = np.zeros((4, NP)) # Particle store
pw = np.zeros((1, NP)) + 1.0 / NP # Particle weight
x_dr = np.zeros((4, 1)) # Dead reckoning
# history
h_x_est = x_est
h_x_true = x_true
h_x_dr = x_true
while SIM_TIME >= time:
time += DT
u = calc_input()
x_true, z, x_dr, ud = observation(x_true, x_dr, u, rf_id)
x_est, PEst, px, pw = pf_localization(px, pw, z, ud)
# store data history
h_x_est = np.hstack((h_x_est, x_est))
h_x_dr = np.hstack((h_x_dr, x_dr))
h_x_true = np.hstack((h_x_true, x_true))
if show_animation:
plt.cla()
# for stopping simulation with the esc key.
plt.gcf().canvas.mpl_connect(
'key_release_event',
lambda event: [exit(0) if event.key == 'escape' else None])
for i in range(len(z[:, 0])):
plt.plot([x_true[0, 0], z[i, 1]], [x_true[1, 0], z[i, 2]], "-k")
plt.plot(rf_id[:, 0], rf_id[:, 1], "*k")
plt.plot(px[0, :], px[1, :], ".r")
plt.plot(np.array(h_x_true[0, :]).flatten(),
np.array(h_x_true[1, :]).flatten(), "-b")
plt.plot(np.array(h_x_dr[0, :]).flatten(),
np.array(h_x_dr[1, :]).flatten(), "-k")
plt.plot(np.array(h_x_est[0, :]).flatten(),
np.array(h_x_est[1, :]).flatten(), "-r")
plot_covariance_ellipse(x_est, PEst)
plt.axis("equal")
plt.grid(True)
plt.pause(0.001)
if __name__ == '__main__':
main()
``` | /content/code_sandbox/Localization/particle_filter/particle_filter.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 2,286 |
```python
"""
Simulator
author: Atsushi Sakai
"""
import sys
import pathlib
sys.path.append(str(pathlib.Path(__file__).parent.parent.parent))
import numpy as np
import matplotlib.pyplot as plt
import math
import random
from utils.angle import rot_mat_2d
class VehicleSimulator:
def __init__(self, i_x, i_y, i_yaw, i_v, max_v, w, L):
self.x = i_x
self.y = i_y
self.yaw = i_yaw
self.v = i_v
self.max_v = max_v
self.W = w
self.L = L
self._calc_vehicle_contour()
def update(self, dt, a, omega):
self.x += self.v * np.cos(self.yaw) * dt
self.y += self.v * np.sin(self.yaw) * dt
self.yaw += omega * dt
self.v += a * dt
if self.v >= self.max_v:
self.v = self.max_v
def plot(self):
plt.plot(self.x, self.y, ".b")
# convert global coordinate
gx, gy = self.calc_global_contour()
plt.plot(gx, gy, "--b")
def calc_global_contour(self):
gxy = np.stack([self.vc_x, self.vc_y]).T @ rot_mat_2d(self.yaw)
gx = gxy[:, 0] + self.x
gy = gxy[:, 1] + self.y
return gx, gy
def _calc_vehicle_contour(self):
self.vc_x = []
self.vc_y = []
self.vc_x.append(self.L / 2.0)
self.vc_y.append(self.W / 2.0)
self.vc_x.append(self.L / 2.0)
self.vc_y.append(-self.W / 2.0)
self.vc_x.append(-self.L / 2.0)
self.vc_y.append(-self.W / 2.0)
self.vc_x.append(-self.L / 2.0)
self.vc_y.append(self.W / 2.0)
self.vc_x.append(self.L / 2.0)
self.vc_y.append(self.W / 2.0)
self.vc_x, self.vc_y = self._interpolate(self.vc_x, self.vc_y)
@staticmethod
def _interpolate(x, y):
rx, ry = [], []
d_theta = 0.05
for i in range(len(x) - 1):
rx.extend([(1.0 - theta) * x[i] + theta * x[i + 1]
for theta in np.arange(0.0, 1.0, d_theta)])
ry.extend([(1.0 - theta) * y[i] + theta * y[i + 1]
for theta in np.arange(0.0, 1.0, d_theta)])
rx.extend([(1.0 - theta) * x[len(x) - 1] + theta * x[1]
for theta in np.arange(0.0, 1.0, d_theta)])
ry.extend([(1.0 - theta) * y[len(y) - 1] + theta * y[1]
for theta in np.arange(0.0, 1.0, d_theta)])
return rx, ry
class LidarSimulator:
def __init__(self):
self.range_noise = 0.01
def get_observation_points(self, v_list, angle_resolution):
x, y, angle, r = [], [], [], []
# store all points
for v in v_list:
gx, gy = v.calc_global_contour()
for vx, vy in zip(gx, gy):
v_angle = math.atan2(vy, vx)
vr = np.hypot(vx, vy) * random.uniform(1.0 - self.range_noise,
1.0 + self.range_noise)
x.append(vx)
y.append(vy)
angle.append(v_angle)
r.append(vr)
# ray casting filter
rx, ry = self.ray_casting_filter(angle, r, angle_resolution)
return rx, ry
@staticmethod
def ray_casting_filter(theta_l, range_l, angle_resolution):
rx, ry = [], []
range_db = [float("inf") for _ in range(
int(np.floor((np.pi * 2.0) / angle_resolution)) + 1)]
for i in range(len(theta_l)):
angle_id = int(round(theta_l[i] / angle_resolution))
if range_db[angle_id] > range_l[i]:
range_db[angle_id] = range_l[i]
for i in range(len(range_db)):
t = i * angle_resolution
if range_db[i] != float("inf"):
rx.append(range_db[i] * np.cos(t))
ry.append(range_db[i] * np.sin(t))
return rx, ry
``` | /content/code_sandbox/Mapping/rectangle_fitting/simulator.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 1,106 |
```python
import sys
import pathlib
sys.path.append(str(pathlib.Path(__file__).parent))
``` | /content/code_sandbox/Mapping/rectangle_fitting/__init_.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 19 |
```python
"""
Object shape recognition with L-shape fitting
author: Atsushi Sakai (@Atsushi_twi)
Ref:
- Efficient L-Shape Fitting for Vehicle Detection Using Laser Scanners -
The Robotics Institute Carnegie Mellon University
path_to_url
your_sha256_hashners/
"""
import sys
import pathlib
sys.path.append(str(pathlib.Path(__file__).parent.parent.parent))
import matplotlib.pyplot as plt
import numpy as np
import itertools
from enum import Enum
from utils.angle import rot_mat_2d
from Mapping.rectangle_fitting.simulator \
import VehicleSimulator, LidarSimulator
show_animation = True
class LShapeFitting:
"""
LShapeFitting class. You can use this class by initializing the class and
changing the parameters, and then calling the fitting method.
"""
class Criteria(Enum):
AREA = 1
CLOSENESS = 2
VARIANCE = 3
def __init__(self):
"""
Default parameter settings
"""
#: Fitting criteria parameter
self.criteria = self.Criteria.VARIANCE
#: Minimum distance for closeness criteria parameter [m]
self.min_dist_of_closeness_criteria = 0.01
#: Angle difference parameter [deg]
self.d_theta_deg_for_search = 1.0
#: Range segmentation parameter [m]
self.R0 = 3.0
#: Range segmentation parameter [m]
self.Rd = 0.001
def fitting(self, ox, oy):
"""
Fitting L-shape model to object points
Parameters
----------
ox : x positions of range points from an object
oy : y positions of range points from an object
Returns
-------
rects: Fitting rectangles
id_sets: id sets of each cluster
"""
# step1: Adaptive Range Segmentation
id_sets = self._adoptive_range_segmentation(ox, oy)
# step2 Rectangle search
rects = []
for ids in id_sets: # for each cluster
cx = [ox[i] for i in range(len(ox)) if i in ids]
cy = [oy[i] for i in range(len(oy)) if i in ids]
rects.append(self._rectangle_search(cx, cy))
return rects, id_sets
@staticmethod
def _calc_area_criterion(c1, c2):
c1_max, c1_min, c2_max, c2_min = LShapeFitting._find_min_max(c1, c2)
alpha = -(c1_max - c1_min) * (c2_max - c2_min)
return alpha
def _calc_closeness_criterion(self, c1, c2):
c1_max, c1_min, c2_max, c2_min = LShapeFitting._find_min_max(c1, c2)
# Vectorization
d1 = np.minimum(c1_max - c1, c1 - c1_min)
d2 = np.minimum(c2_max - c2, c2 - c2_min)
d = np.maximum(np.minimum(d1, d2), self.min_dist_of_closeness_criteria)
beta = (1.0 / d).sum()
return beta
@staticmethod
def _calc_variance_criterion(c1, c2):
c1_max, c1_min, c2_max, c2_min = LShapeFitting._find_min_max(c1, c2)
# Vectorization
d1 = np.minimum(c1_max - c1, c1 - c1_min)
d2 = np.minimum(c2_max - c2, c2 - c2_min)
e1 = d1[d1 < d2]
e2 = d2[d1 >= d2]
v1 = - np.var(e1) if len(e1) > 0 else 0.
v2 = - np.var(e2) if len(e2) > 0 else 0.
gamma = v1 + v2
return gamma
@staticmethod
def _find_min_max(c1, c2):
c1_max = max(c1)
c2_max = max(c2)
c1_min = min(c1)
c2_min = min(c2)
return c1_max, c1_min, c2_max, c2_min
def _rectangle_search(self, x, y):
xy = np.array([x, y]).T
d_theta = np.deg2rad(self.d_theta_deg_for_search)
min_cost = (-float('inf'), None)
for theta in np.arange(0.0, np.pi / 2.0 - d_theta, d_theta):
c = xy @ rot_mat_2d(theta)
c1 = c[:, 0]
c2 = c[:, 1]
# Select criteria
cost = 0.0
if self.criteria == self.Criteria.AREA:
cost = self._calc_area_criterion(c1, c2)
elif self.criteria == self.Criteria.CLOSENESS:
cost = self._calc_closeness_criterion(c1, c2)
elif self.criteria == self.Criteria.VARIANCE:
cost = self._calc_variance_criterion(c1, c2)
if min_cost[0] < cost:
min_cost = (cost, theta)
# calc best rectangle
sin_s = np.sin(min_cost[1])
cos_s = np.cos(min_cost[1])
c1_s = xy @ np.array([cos_s, sin_s]).T
c2_s = xy @ np.array([-sin_s, cos_s]).T
rect = RectangleData()
rect.a[0] = cos_s
rect.b[0] = sin_s
rect.c[0] = min(c1_s)
rect.a[1] = -sin_s
rect.b[1] = cos_s
rect.c[1] = min(c2_s)
rect.a[2] = cos_s
rect.b[2] = sin_s
rect.c[2] = max(c1_s)
rect.a[3] = -sin_s
rect.b[3] = cos_s
rect.c[3] = max(c2_s)
return rect
def _adoptive_range_segmentation(self, ox, oy):
# Setup initial cluster
segment_list = []
for i, _ in enumerate(ox):
c = set()
r = self.R0 + self.Rd * np.linalg.norm([ox[i], oy[i]])
for j, _ in enumerate(ox):
d = np.hypot(ox[i] - ox[j], oy[i] - oy[j])
if d <= r:
c.add(j)
segment_list.append(c)
# Merge cluster
while True:
no_change = True
for (c1, c2) in list(itertools.permutations(range(len(segment_list)), 2)):
if segment_list[c1] & segment_list[c2]:
segment_list[c1] = (segment_list[c1] | segment_list.pop(c2))
no_change = False
break
if no_change:
break
return segment_list
class RectangleData:
def __init__(self):
self.a = [None] * 4
self.b = [None] * 4
self.c = [None] * 4
self.rect_c_x = [None] * 5
self.rect_c_y = [None] * 5
def plot(self):
self.calc_rect_contour()
plt.plot(self.rect_c_x, self.rect_c_y, "-r")
def calc_rect_contour(self):
self.rect_c_x[0], self.rect_c_y[0] = self.calc_cross_point(
self.a[0:2], self.b[0:2], self.c[0:2])
self.rect_c_x[1], self.rect_c_y[1] = self.calc_cross_point(
self.a[1:3], self.b[1:3], self.c[1:3])
self.rect_c_x[2], self.rect_c_y[2] = self.calc_cross_point(
self.a[2:4], self.b[2:4], self.c[2:4])
self.rect_c_x[3], self.rect_c_y[3] = self.calc_cross_point(
[self.a[3], self.a[0]], [self.b[3], self.b[0]], [self.c[3], self.c[0]])
self.rect_c_x[4], self.rect_c_y[4] = self.rect_c_x[0], self.rect_c_y[0]
@staticmethod
def calc_cross_point(a, b, c):
x = (b[0] * -c[1] - b[1] * -c[0]) / (a[0] * b[1] - a[1] * b[0])
y = (a[1] * -c[0] - a[0] * -c[1]) / (a[0] * b[1] - a[1] * b[0])
return x, y
def main():
# simulation parameters
sim_time = 30.0 # simulation time
dt = 0.2 # time tick
angle_resolution = np.deg2rad(3.0) # sensor angle resolution
v1 = VehicleSimulator(-10.0, 0.0, np.deg2rad(90.0),
0.0, 50.0 / 3.6, 3.0, 5.0)
v2 = VehicleSimulator(20.0, 10.0, np.deg2rad(180.0),
0.0, 50.0 / 3.6, 4.0, 10.0)
l_shape_fitting = LShapeFitting()
lidar_sim = LidarSimulator()
time = 0.0
while time <= sim_time:
time += dt
v1.update(dt, 0.1, 0.0)
v2.update(dt, 0.1, -0.05)
ox, oy = lidar_sim.get_observation_points([v1, v2], angle_resolution)
rects, id_sets = l_shape_fitting.fitting(ox, oy)
if show_animation: # pragma: no cover
plt.cla()
# for stopping simulation with the esc key.
plt.gcf().canvas.mpl_connect(
'key_release_event',
lambda event: [exit(0) if event.key == 'escape' else None])
plt.axis("equal")
plt.plot(0.0, 0.0, "*r")
v1.plot()
v2.plot()
# Plot range observation
for ids in id_sets:
x = [ox[i] for i in range(len(ox)) if i in ids]
y = [oy[i] for i in range(len(ox)) if i in ids]
for (ix, iy) in zip(x, y):
plt.plot([0.0, ix], [0.0, iy], "-og")
plt.plot([ox[i] for i in range(len(ox)) if i in ids],
[oy[i] for i in range(len(ox)) if i in ids],
"o")
for rect in rects:
rect.plot()
plt.pause(0.1)
print("Done")
if __name__ == '__main__':
main()
``` | /content/code_sandbox/Mapping/rectangle_fitting/rectangle_fitting.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 2,527 |
```python
"""
2D gaussian grid map sample
author: Atsushi Sakai (@Atsushi_twi)
"""
import math
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
EXTEND_AREA = 10.0 # [m] grid map extention length
show_animation = True
def generate_gaussian_grid_map(ox, oy, xyreso, std):
minx, miny, maxx, maxy, xw, yw = calc_grid_map_config(ox, oy, xyreso)
gmap = [[0.0 for i in range(yw)] for i in range(xw)]
for ix in range(xw):
for iy in range(yw):
x = ix * xyreso + minx
y = iy * xyreso + miny
# Search minimum distance
mindis = float("inf")
for (iox, ioy) in zip(ox, oy):
d = math.hypot(iox - x, ioy - y)
if mindis >= d:
mindis = d
pdf = (1.0 - norm.cdf(mindis, 0.0, std))
gmap[ix][iy] = pdf
return gmap, minx, maxx, miny, maxy
def calc_grid_map_config(ox, oy, xyreso):
minx = round(min(ox) - EXTEND_AREA / 2.0)
miny = round(min(oy) - EXTEND_AREA / 2.0)
maxx = round(max(ox) + EXTEND_AREA / 2.0)
maxy = round(max(oy) + EXTEND_AREA / 2.0)
xw = int(round((maxx - minx) / xyreso))
yw = int(round((maxy - miny) / xyreso))
return minx, miny, maxx, maxy, xw, yw
def draw_heatmap(data, minx, maxx, miny, maxy, xyreso):
x, y = np.mgrid[slice(minx - xyreso / 2.0, maxx + xyreso / 2.0, xyreso),
slice(miny - xyreso / 2.0, maxy + xyreso / 2.0, xyreso)]
plt.pcolor(x, y, data, vmax=1.0, cmap=plt.cm.Blues)
plt.axis("equal")
def main():
print(__file__ + " start!!")
xyreso = 0.5 # xy grid resolution
STD = 5.0 # standard diviation for gaussian distribution
for i in range(5):
ox = (np.random.rand(4) - 0.5) * 10.0
oy = (np.random.rand(4) - 0.5) * 10.0
gmap, minx, maxx, miny, maxy = generate_gaussian_grid_map(
ox, oy, xyreso, STD)
if show_animation: # pragma: no cover
plt.cla()
# for stopping simulation with the esc key.
plt.gcf().canvas.mpl_connect('key_release_event',
lambda event: [exit(0) if event.key == 'escape' else None])
draw_heatmap(gmap, minx, maxx, miny, maxy, xyreso)
plt.plot(ox, oy, "xr")
plt.plot(0.0, 0.0, "ob")
plt.pause(1.0)
if __name__ == '__main__':
main()
``` | /content/code_sandbox/Mapping/gaussian_grid_map/gaussian_grid_map.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 784 |
```python
import numpy as np
from matplotlib import pyplot as plt
from utils.plot import plot_3d_vector_arrow, plot_triangle, set_equal_3d_axis
show_animation = True
def calc_normal_vector(p1, p2, p3):
"""Calculate normal vector of triangle
Parameters
----------
p1 : np.array
3D point
p2 : np.array
3D point
p3 : np.array
3D point
Returns
-------
normal_vector : np.array
normal vector (3,)
"""
# calculate two vectors of triangle
v1 = p2 - p1
v2 = p3 - p1
# calculate normal vector
normal_vector = np.cross(v1, v2)
# normalize vector
normal_vector = normal_vector / np.linalg.norm(normal_vector)
return normal_vector
def sample_3d_points_from_a_plane(num_samples, normal):
points_2d = np.random.normal(size=(num_samples, 2)) # 2D points on a plane
d = 0
for i in range(len(points_2d)):
point_3d = np.append(points_2d[i], 0)
d += normal @ point_3d
d /= len(points_2d)
points_3d = np.zeros((len(points_2d), 3))
for i in range(len(points_2d)):
point_2d = np.append(points_2d[i], 0)
projection_length = (d - normal @ point_2d) / np.linalg.norm(normal)
points_3d[i] = point_2d + projection_length * normal
return points_3d
def distance_to_plane(point, normal, origin):
dot_product = np.dot(normal, point) - np.dot(normal, origin)
if np.isclose(dot_product, 0):
return 0.0
else:
distance = abs(dot_product) / np.linalg.norm(normal)
return distance
def ransac_normal_vector_estimation(points_3d, inlier_radio_th=0.7,
inlier_dist=0.1, p=0.99):
"""
RANSAC based normal vector estimation
Parameters
----------
points_3d : np.array
3D points (N, 3)
inlier_radio_th : float
Inlier ratio threshold. If inlier ratio is larger than this value,
the iteration is stopped. Default is 0.7.
inlier_dist : float
Inlier distance threshold. If distance between points and estimated
plane is smaller than this value, the point is inlier. Default is 0.1.
p : float
Probability that at least one of the sets of random samples does not
include an outlier. If this probability is near 1, the iteration
number is large. Default is 0.99.
Returns
-------
center_vector : np.array
Center of estimated plane. (3,)
normal_vector : np.array
Normal vector of estimated plane. (3,)
"""
center = np.mean(points_3d, axis=0)
max_iter = int(np.floor(np.log(1.0-p)/np.log(1.0-(1.0-inlier_radio_th)**3)))
for ite in range(max_iter):
# Random sampling
sampled_ids = np.random.choice(points_3d.shape[0], size=3,
replace=False)
sampled_points = points_3d[sampled_ids, :]
p1 = sampled_points[0, :]
p2 = sampled_points[1, :]
p3 = sampled_points[2, :]
normal_vector = calc_normal_vector(p1, p2, p3)
# calc inlier ratio
n_inliner = 0
for i in range(points_3d.shape[0]):
p = points_3d[i, :]
if distance_to_plane(p, normal_vector, center) <= inlier_dist:
n_inliner += 1
inlier_ratio = n_inliner / points_3d.shape[0]
print(f"Iter:{ite}, {inlier_ratio=}")
if inlier_ratio > inlier_radio_th:
return center, normal_vector
return center, None
def main1():
p1 = np.array([0.0, 0.0, 1.0])
p2 = np.array([1.0, 1.0, 0.0])
p3 = np.array([0.0, 1.0, 0.0])
center = np.mean([p1, p2, p3], axis=0)
normal_vector = calc_normal_vector(p1, p2, p3)
print(f"{center=}")
print(f"{normal_vector=}")
if show_animation:
fig = plt.figure()
ax = fig.add_subplot(projection='3d')
set_equal_3d_axis(ax, [0.0, 2.5], [0.0, 2.5], [0.0, 3.0])
plot_triangle(p1, p2, p3, ax)
ax.plot(center[0], center[1], center[2], "ro")
plot_3d_vector_arrow(ax, center, center + normal_vector)
plt.show()
def main2(rng=None):
true_normal = np.array([0, 1, 1])
true_normal = true_normal / np.linalg.norm(true_normal)
num_samples = 100
noise_scale = 0.1
points_3d = sample_3d_points_from_a_plane(num_samples, true_normal)
# add random noise
points_3d += np.random.normal(size=points_3d.shape, scale=noise_scale)
print(f"{points_3d.shape=}")
center, estimated_normal = ransac_normal_vector_estimation(
points_3d, inlier_dist=noise_scale)
if estimated_normal is None:
print("Failed to estimate normal vector")
return
print(f"{true_normal=}")
print(f"{estimated_normal=}")
if show_animation:
fig = plt.figure()
ax = fig.add_subplot(projection='3d')
ax.plot(points_3d[:, 0], points_3d[:, 1], points_3d[:, 2], ".r")
plot_3d_vector_arrow(ax, center, center + true_normal)
plot_3d_vector_arrow(ax, center, center + estimated_normal)
set_equal_3d_axis(ax, [-3.0, 3.0], [-3.0, 3.0], [-3.0, 3.0])
plt.title("RANSAC based Normal vector estimation")
plt.show()
if __name__ == '__main__':
# main1()
main2()
``` | /content/code_sandbox/Mapping/normal_vector_estimation/normal_vector_estimation.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 1,503 |
```python
"""
Ray casting 2D grid map example
author: Atsushi Sakai (@Atsushi_twi)
"""
import math
import numpy as np
import matplotlib.pyplot as plt
EXTEND_AREA = 10.0
show_animation = True
def calc_grid_map_config(ox, oy, xyreso):
minx = round(min(ox) - EXTEND_AREA / 2.0)
miny = round(min(oy) - EXTEND_AREA / 2.0)
maxx = round(max(ox) + EXTEND_AREA / 2.0)
maxy = round(max(oy) + EXTEND_AREA / 2.0)
xw = int(round((maxx - minx) / xyreso))
yw = int(round((maxy - miny) / xyreso))
return minx, miny, maxx, maxy, xw, yw
class precastDB:
def __init__(self):
self.px = 0.0
self.py = 0.0
self.d = 0.0
self.angle = 0.0
self.ix = 0
self.iy = 0
def __str__(self):
return str(self.px) + "," + str(self.py) + "," + str(self.d) + "," + str(self.angle)
def atan_zero_to_twopi(y, x):
angle = math.atan2(y, x)
if angle < 0.0:
angle += math.pi * 2.0
return angle
def precasting(minx, miny, xw, yw, xyreso, yawreso):
precast = [[] for i in range(int(round((math.pi * 2.0) / yawreso)) + 1)]
for ix in range(xw):
for iy in range(yw):
px = ix * xyreso + minx
py = iy * xyreso + miny
d = math.hypot(px, py)
angle = atan_zero_to_twopi(py, px)
angleid = int(math.floor(angle / yawreso))
pc = precastDB()
pc.px = px
pc.py = py
pc.d = d
pc.ix = ix
pc.iy = iy
pc.angle = angle
precast[angleid].append(pc)
return precast
def generate_ray_casting_grid_map(ox, oy, xyreso, yawreso):
minx, miny, maxx, maxy, xw, yw = calc_grid_map_config(ox, oy, xyreso)
pmap = [[0.0 for i in range(yw)] for i in range(xw)]
precast = precasting(minx, miny, xw, yw, xyreso, yawreso)
for (x, y) in zip(ox, oy):
d = math.hypot(x, y)
angle = atan_zero_to_twopi(y, x)
angleid = int(math.floor(angle / yawreso))
gridlist = precast[angleid]
ix = int(round((x - minx) / xyreso))
iy = int(round((y - miny) / xyreso))
for grid in gridlist:
if grid.d > d:
pmap[grid.ix][grid.iy] = 0.5
pmap[ix][iy] = 1.0
return pmap, minx, maxx, miny, maxy, xyreso
def draw_heatmap(data, minx, maxx, miny, maxy, xyreso):
x, y = np.mgrid[slice(minx - xyreso / 2.0, maxx + xyreso / 2.0, xyreso),
slice(miny - xyreso / 2.0, maxy + xyreso / 2.0, xyreso)]
plt.pcolor(x, y, data, vmax=1.0, cmap=plt.cm.Blues)
plt.axis("equal")
def main():
print(__file__ + " start!!")
xyreso = 0.25 # x-y grid resolution [m]
yawreso = np.deg2rad(10.0) # yaw angle resolution [rad]
for i in range(5):
ox = (np.random.rand(4) - 0.5) * 10.0
oy = (np.random.rand(4) - 0.5) * 10.0
pmap, minx, maxx, miny, maxy, xyreso = generate_ray_casting_grid_map(
ox, oy, xyreso, yawreso)
if show_animation: # pragma: no cover
plt.cla()
# for stopping simulation with the esc key.
plt.gcf().canvas.mpl_connect('key_release_event',
lambda event: [exit(0) if event.key == 'escape' else None])
draw_heatmap(pmap, minx, maxx, miny, maxy, xyreso)
plt.plot(ox, oy, "xr")
plt.plot(0.0, 0.0, "ob")
plt.pause(1.0)
if __name__ == '__main__':
main()
``` | /content/code_sandbox/Mapping/raycasting_grid_map/raycasting_grid_map.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 1,138 |
```python
"""
Object clustering with k-means algorithm
author: Atsushi Sakai (@Atsushi_twi)
"""
import math
import matplotlib.pyplot as plt
import random
# k means parameters
MAX_LOOP = 10
DCOST_TH = 0.1
show_animation = True
def kmeans_clustering(rx, ry, nc):
clusters = Clusters(rx, ry, nc)
clusters.calc_centroid()
pre_cost = float("inf")
for loop in range(MAX_LOOP):
print("loop:", loop)
cost = clusters.update_clusters()
clusters.calc_centroid()
d_cost = abs(cost - pre_cost)
if d_cost < DCOST_TH:
break
pre_cost = cost
return clusters
class Clusters:
def __init__(self, x, y, n_label):
self.x = x
self.y = y
self.n_data = len(self.x)
self.n_label = n_label
self.labels = [random.randint(0, n_label - 1)
for _ in range(self.n_data)]
self.center_x = [0.0 for _ in range(n_label)]
self.center_y = [0.0 for _ in range(n_label)]
def plot_cluster(self):
for label in set(self.labels):
x, y = self._get_labeled_x_y(label)
plt.plot(x, y, ".")
def calc_centroid(self):
for label in set(self.labels):
x, y = self._get_labeled_x_y(label)
n_data = len(x)
self.center_x[label] = sum(x) / n_data
self.center_y[label] = sum(y) / n_data
def update_clusters(self):
cost = 0.0
for ip in range(self.n_data):
px = self.x[ip]
py = self.y[ip]
dx = [icx - px for icx in self.center_x]
dy = [icy - py for icy in self.center_y]
dist_list = [math.hypot(idx, idy) for (idx, idy) in zip(dx, dy)]
min_dist = min(dist_list)
min_id = dist_list.index(min_dist)
self.labels[ip] = min_id
cost += min_dist
return cost
def _get_labeled_x_y(self, target_label):
x = [self.x[i] for i, label in enumerate(self.labels) if label == target_label]
y = [self.y[i] for i, label in enumerate(self.labels) if label == target_label]
return x, y
def calc_raw_data(cx, cy, n_points, rand_d):
rx, ry = [], []
for (icx, icy) in zip(cx, cy):
for _ in range(n_points):
rx.append(icx + rand_d * (random.random() - 0.5))
ry.append(icy + rand_d * (random.random() - 0.5))
return rx, ry
def update_positions(cx, cy):
# object moving parameters
DX1 = 0.4
DY1 = 0.5
DX2 = -0.3
DY2 = -0.5
cx[0] += DX1
cy[0] += DY1
cx[1] += DX2
cy[1] += DY2
return cx, cy
def main():
print(__file__ + " start!!")
cx = [0.0, 8.0]
cy = [0.0, 8.0]
n_points = 10
rand_d = 3.0
n_cluster = 2
sim_time = 15.0
dt = 1.0
time = 0.0
while time <= sim_time:
print("Time:", time)
time += dt
# objects moving simulation
cx, cy = update_positions(cx, cy)
raw_x, raw_y = calc_raw_data(cx, cy, n_points, rand_d)
clusters = kmeans_clustering(raw_x, raw_y, n_cluster)
# for animation
if show_animation: # pragma: no cover
plt.cla()
# for stopping simulation with the esc key.
plt.gcf().canvas.mpl_connect('key_release_event',
lambda event: [exit(0) if event.key == 'escape' else None])
clusters.plot_cluster()
plt.plot(cx, cy, "or")
plt.xlim(-2.0, 10.0)
plt.ylim(-2.0, 10.0)
plt.pause(dt)
print("Done")
if __name__ == '__main__':
main()
``` | /content/code_sandbox/Mapping/kmeans_clustering/kmeans_clustering.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 1,023 |
```python
"""
Grid map library in python
author: Atsushi Sakai
"""
from functools import total_ordering
import matplotlib.pyplot as plt
import numpy as np
@total_ordering
class FloatGrid:
def __init__(self, init_val=0.0):
self.data = init_val
def get_float_data(self):
return self.data
def __eq__(self, other):
if not isinstance(other, FloatGrid):
return NotImplemented
return self.get_float_data() == other.get_float_data()
def __lt__(self, other):
if not isinstance(other, FloatGrid):
return NotImplemented
return self.get_float_data() < other.get_float_data()
class GridMap:
"""
GridMap class
"""
def __init__(self, width, height, resolution,
center_x, center_y, init_val=FloatGrid(0.0)):
"""__init__
:param width: number of grid for width
:param height: number of grid for height
:param resolution: grid resolution [m]
:param center_x: center x position [m]
:param center_y: center y position [m]
:param init_val: initial value for all grid
"""
self.width = width
self.height = height
self.resolution = resolution
self.center_x = center_x
self.center_y = center_y
self.left_lower_x = self.center_x - self.width / 2.0 * self.resolution
self.left_lower_y = self.center_y - self.height / 2.0 * self.resolution
self.n_data = self.width * self.height
self.data = [init_val] * self.n_data
self.data_type = type(init_val)
def get_value_from_xy_index(self, x_ind, y_ind):
"""get_value_from_xy_index
when the index is out of grid map area, return None
:param x_ind: x index
:param y_ind: y index
"""
grid_ind = self.calc_grid_index_from_xy_index(x_ind, y_ind)
if 0 <= grid_ind < self.n_data:
return self.data[grid_ind]
else:
return None
def get_xy_index_from_xy_pos(self, x_pos, y_pos):
"""get_xy_index_from_xy_pos
:param x_pos: x position [m]
:param y_pos: y position [m]
"""
x_ind = self.calc_xy_index_from_position(
x_pos, self.left_lower_x, self.width)
y_ind = self.calc_xy_index_from_position(
y_pos, self.left_lower_y, self.height)
return x_ind, y_ind
def set_value_from_xy_pos(self, x_pos, y_pos, val):
"""set_value_from_xy_pos
return bool flag, which means setting value is succeeded or not
:param x_pos: x position [m]
:param y_pos: y position [m]
:param val: grid value
"""
x_ind, y_ind = self.get_xy_index_from_xy_pos(x_pos, y_pos)
if (not x_ind) or (not y_ind):
return False # NG
flag = self.set_value_from_xy_index(x_ind, y_ind, val)
return flag
def set_value_from_xy_index(self, x_ind, y_ind, val):
"""set_value_from_xy_index
return bool flag, which means setting value is succeeded or not
:param x_ind: x index
:param y_ind: y index
:param val: grid value
"""
if (x_ind is None) or (y_ind is None):
return False, False
grid_ind = int(y_ind * self.width + x_ind)
if 0 <= grid_ind < self.n_data and isinstance(val, self.data_type):
self.data[grid_ind] = val
return True # OK
else:
return False # NG
def set_value_from_polygon(self, pol_x, pol_y, val, inside=True):
"""set_value_from_polygon
Setting value inside or outside polygon
:param pol_x: x position list for a polygon
:param pol_y: y position list for a polygon
:param val: grid value
:param inside: setting data inside or outside
"""
# making ring polygon
if (pol_x[0] != pol_x[-1]) or (pol_y[0] != pol_y[-1]):
np.append(pol_x, pol_x[0])
np.append(pol_y, pol_y[0])
# setting value for all grid
for x_ind in range(self.width):
for y_ind in range(self.height):
x_pos, y_pos = self.calc_grid_central_xy_position_from_xy_index(
x_ind, y_ind)
flag = self.check_inside_polygon(x_pos, y_pos, pol_x, pol_y)
if flag is inside:
self.set_value_from_xy_index(x_ind, y_ind, val)
def calc_grid_index_from_xy_index(self, x_ind, y_ind):
grid_ind = int(y_ind * self.width + x_ind)
return grid_ind
def calc_xy_index_from_grid_index(self, grid_ind):
y_ind, x_ind = divmod(grid_ind, self.width)
return x_ind, y_ind
def calc_grid_index_from_xy_pos(self, x_pos, y_pos):
"""get_xy_index_from_xy_pos
:param x_pos: x position [m]
:param y_pos: y position [m]
"""
x_ind = self.calc_xy_index_from_position(
x_pos, self.left_lower_x, self.width)
y_ind = self.calc_xy_index_from_position(
y_pos, self.left_lower_y, self.height)
return self.calc_grid_index_from_xy_index(x_ind, y_ind)
def calc_grid_central_xy_position_from_grid_index(self, grid_ind):
x_ind, y_ind = self.calc_xy_index_from_grid_index(grid_ind)
return self.calc_grid_central_xy_position_from_xy_index(x_ind, y_ind)
def calc_grid_central_xy_position_from_xy_index(self, x_ind, y_ind):
x_pos = self.calc_grid_central_xy_position_from_index(
x_ind, self.left_lower_x)
y_pos = self.calc_grid_central_xy_position_from_index(
y_ind, self.left_lower_y)
return x_pos, y_pos
def calc_grid_central_xy_position_from_index(self, index, lower_pos):
return lower_pos + index * self.resolution + self.resolution / 2.0
def calc_xy_index_from_position(self, pos, lower_pos, max_index):
ind = int(np.floor((pos - lower_pos) / self.resolution))
if 0 <= ind <= max_index:
return ind
else:
return None
def check_occupied_from_xy_index(self, x_ind, y_ind, occupied_val):
val = self.get_value_from_xy_index(x_ind, y_ind)
if val is None or val >= occupied_val:
return True
else:
return False
def expand_grid(self, occupied_val=FloatGrid(1.0)):
x_inds, y_inds, values = [], [], []
for ix in range(self.width):
for iy in range(self.height):
if self.check_occupied_from_xy_index(ix, iy, occupied_val):
x_inds.append(ix)
y_inds.append(iy)
values.append(self.get_value_from_xy_index(ix, iy))
for (ix, iy, value) in zip(x_inds, y_inds, values):
self.set_value_from_xy_index(ix + 1, iy, val=value)
self.set_value_from_xy_index(ix, iy + 1, val=value)
self.set_value_from_xy_index(ix + 1, iy + 1, val=value)
self.set_value_from_xy_index(ix - 1, iy, val=value)
self.set_value_from_xy_index(ix, iy - 1, val=value)
self.set_value_from_xy_index(ix - 1, iy - 1, val=value)
@staticmethod
def check_inside_polygon(iox, ioy, x, y):
n_point = len(x) - 1
inside = False
for i1 in range(n_point):
i2 = (i1 + 1) % (n_point + 1)
if x[i1] >= x[i2]:
min_x, max_x = x[i2], x[i1]
else:
min_x, max_x = x[i1], x[i2]
if not min_x <= iox < max_x:
continue
tmp1 = (y[i2] - y[i1]) / (x[i2] - x[i1])
if (y[i1] + tmp1 * (iox - x[i1]) - ioy) > 0.0:
inside = not inside
return inside
def print_grid_map_info(self):
print("width:", self.width)
print("height:", self.height)
print("resolution:", self.resolution)
print("center_x:", self.center_x)
print("center_y:", self.center_y)
print("left_lower_x:", self.left_lower_x)
print("left_lower_y:", self.left_lower_y)
print("n_data:", self.n_data)
def plot_grid_map(self, ax=None):
float_data_array = np.array([d.get_float_data() for d in self.data])
grid_data = np.reshape(float_data_array, (self.height, self.width))
if not ax:
fig, ax = plt.subplots()
heat_map = ax.pcolor(grid_data, cmap="Blues", vmin=0.0, vmax=1.0)
plt.axis("equal")
return heat_map
def polygon_set_demo():
ox = [0.0, 4.35, 20.0, 50.0, 100.0, 130.0, 40.0]
oy = [0.0, -4.15, -20.0, 0.0, 30.0, 60.0, 80.0]
grid_map = GridMap(600, 290, 0.7, 60.0, 30.5)
grid_map.set_value_from_polygon(ox, oy, FloatGrid(1.0), inside=False)
grid_map.plot_grid_map()
plt.axis("equal")
plt.grid(True)
def position_set_demo():
grid_map = GridMap(100, 120, 0.5, 10.0, -0.5)
grid_map.set_value_from_xy_pos(10.1, -1.1, FloatGrid(1.0))
grid_map.set_value_from_xy_pos(10.1, -0.1, FloatGrid(1.0))
grid_map.set_value_from_xy_pos(10.1, 1.1, FloatGrid(1.0))
grid_map.set_value_from_xy_pos(11.1, 0.1, FloatGrid(1.0))
grid_map.set_value_from_xy_pos(10.1, 0.1, FloatGrid(1.0))
grid_map.set_value_from_xy_pos(9.1, 0.1, FloatGrid(1.0))
grid_map.plot_grid_map()
plt.axis("equal")
plt.grid(True)
def main():
print("start!!")
position_set_demo()
polygon_set_demo()
plt.show()
print("done!!")
if __name__ == '__main__':
main()
``` | /content/code_sandbox/Mapping/grid_map_lib/grid_map_lib.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 2,505 |
```python
"""
Object shape recognition with circle fitting
author: Atsushi Sakai (@Atsushi_twi)
"""
import matplotlib.pyplot as plt
import math
import random
import numpy as np
show_animation = True
def circle_fitting(x, y):
"""
Circle Fitting with least squared
input: point x-y positions
output cxe x center position
cye y center position
re radius of circle
error: prediction error
"""
sumx = sum(x)
sumy = sum(y)
sumx2 = sum([ix ** 2 for ix in x])
sumy2 = sum([iy ** 2 for iy in y])
sumxy = sum([ix * iy for (ix, iy) in zip(x, y)])
F = np.array([[sumx2, sumxy, sumx],
[sumxy, sumy2, sumy],
[sumx, sumy, len(x)]])
G = np.array([[-sum([ix ** 3 + ix * iy ** 2 for (ix, iy) in zip(x, y)])],
[-sum([ix ** 2 * iy + iy ** 3 for (ix, iy) in zip(x, y)])],
[-sum([ix ** 2 + iy ** 2 for (ix, iy) in zip(x, y)])]])
T = np.linalg.inv(F).dot(G)
cxe = float(T[0, 0] / -2)
cye = float(T[1, 0] / -2)
re = math.sqrt(cxe**2 + cye**2 - T[2, 0])
error = sum([np.hypot(cxe - ix, cye - iy) - re for (ix, iy) in zip(x, y)])
return (cxe, cye, re, error)
def get_sample_points(cx, cy, cr, angle_reso):
x, y, angle, r = [], [], [], []
# points sampling
for theta in np.arange(0.0, 2.0 * math.pi, angle_reso):
nx = cx + cr * math.cos(theta)
ny = cy + cr * math.sin(theta)
nangle = math.atan2(ny, nx)
nr = math.hypot(nx, ny) * random.uniform(0.95, 1.05)
x.append(nx)
y.append(ny)
angle.append(nangle)
r.append(nr)
# ray casting filter
rx, ry = ray_casting_filter(x, y, angle, r, angle_reso)
return rx, ry
def ray_casting_filter(xl, yl, thetal, rangel, angle_reso):
rx, ry = [], []
rangedb = [float("inf") for _ in range(
int(math.floor((math.pi * 2.0) / angle_reso)) + 1)]
for i, _ in enumerate(thetal):
angleid = math.floor(thetal[i] / angle_reso)
if rangedb[angleid] > rangel[i]:
rangedb[angleid] = rangel[i]
for i, _ in enumerate(rangedb):
t = i * angle_reso
if rangedb[i] != float("inf"):
rx.append(rangedb[i] * math.cos(t))
ry.append(rangedb[i] * math.sin(t))
return rx, ry
def plot_circle(x, y, size, color="-b"): # pragma: no cover
deg = list(range(0, 360, 5))
deg.append(0)
xl = [x + size * math.cos(np.deg2rad(d)) for d in deg]
yl = [y + size * math.sin(np.deg2rad(d)) for d in deg]
plt.plot(xl, yl, color)
def main():
# simulation parameters
simtime = 15.0 # simulation time
dt = 1.0 # time tick
cx = -2.0 # initial x position of obstacle
cy = -8.0 # initial y position of obstacle
cr = 1.0 # obstacle radious
theta = np.deg2rad(30.0) # obstacle moving direction
angle_reso = np.deg2rad(3.0) # sensor angle resolution
time = 0.0
while time <= simtime:
time += dt
cx += math.cos(theta)
cy += math.cos(theta)
x, y = get_sample_points(cx, cy, cr, angle_reso)
ex, ey, er, error = circle_fitting(x, y)
print("Error:", error)
if show_animation: # pragma: no cover
plt.cla()
# for stopping simulation with the esc key.
plt.gcf().canvas.mpl_connect('key_release_event',
lambda event: [exit(0) if event.key == 'escape' else None])
plt.axis("equal")
plt.plot(0.0, 0.0, "*r")
plot_circle(cx, cy, cr)
plt.plot(x, y, "xr")
plot_circle(ex, ey, er, "-r")
plt.pause(dt)
print("Done")
if __name__ == '__main__':
main()
``` | /content/code_sandbox/Mapping/circle_fitting/circle_fitting.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 1,170 |
```python
"""
Normal Distribution Transform (NDTGrid) mapping sample
"""
import matplotlib.pyplot as plt
import numpy as np
from collections import defaultdict
from Mapping.grid_map_lib.grid_map_lib import GridMap
from utils.plot import plot_covariance_ellipse
class NDTMap:
"""
Normal Distribution Transform (NDT) map class
:param ox: obstacle x position list
:param oy: obstacle y position list
:param resolution: grid resolution [m]
"""
class NDTGrid:
"""
NDT grid
"""
def __init__(self):
#: Number of points in the NDTGrid grid
self.n_points = 0
#: Mean x position of points in the NDTGrid cell
self.mean_x = None
#: Mean y position of points in the NDTGrid cell
self.mean_y = None
#: Center x position of the NDT grid
self.center_grid_x = None
#: Center y position of the NDT grid
self.center_grid_y = None
#: Covariance matrix of the NDT grid
self.covariance = None
#: Eigen vectors of the NDT grid
self.eig_vec = None
#: Eigen values of the NDT grid
self.eig_values = None
def __init__(self, ox, oy, resolution):
#: Minimum number of points in the NDT grid
self.min_n_points = 3
#: Resolution of the NDT grid [m]
self.resolution = resolution
width = int((max(ox) - min(ox))/resolution) + 3 # rounding up + right and left margin
height = int((max(oy) - min(oy))/resolution) + 3
center_x = np.mean(ox)
center_y = np.mean(oy)
self.ox = ox
self.oy = oy
#: NDT grid index map
self.grid_index_map = self._create_grid_index_map(ox, oy)
#: NDT grid map. Each grid contains NDTGrid object
self._construct_grid_map(center_x, center_y, height, ox, oy, resolution, width)
def _construct_grid_map(self, center_x, center_y, height, ox, oy, resolution, width):
self.grid_map = GridMap(width, height, resolution, center_x, center_y, self.NDTGrid())
for grid_index, inds in self.grid_index_map.items():
ndt = self.NDTGrid()
ndt.n_points = len(inds)
if ndt.n_points >= self.min_n_points:
ndt.mean_x = np.mean(ox[inds])
ndt.mean_y = np.mean(oy[inds])
ndt.center_grid_x, ndt.center_grid_y = \
self.grid_map.calc_grid_central_xy_position_from_grid_index(grid_index)
ndt.covariance = np.cov(ox[inds], oy[inds])
ndt.eig_values, ndt.eig_vec = np.linalg.eig(ndt.covariance)
self.grid_map.data[grid_index] = ndt
def _create_grid_index_map(self, ox, oy):
grid_index_map = defaultdict(list)
for i in range(len(ox)):
grid_index = self.grid_map.calc_grid_index_from_xy_pos(ox[i], oy[i])
grid_index_map[grid_index].append(i)
return grid_index_map
def create_dummy_observation_data():
ox = []
oy = []
# left corridor
for y in range(-50, 50):
ox.append(-20.0)
oy.append(y)
# right corridor 1
for y in range(-50, 0):
ox.append(20.0)
oy.append(y)
# right corridor 2
for x in range(20, 50):
ox.append(x)
oy.append(0)
# right corridor 3
for x in range(20, 50):
ox.append(x)
oy.append(x/2.0+10)
# right corridor 4
for y in range(20, 50):
ox.append(20)
oy.append(y)
ox = np.array(ox)
oy = np.array(oy)
# Adding random noize
ox += np.random.rand(len(ox)) * 1.0
oy += np.random.rand(len(ox)) * 1.0
return ox, oy
def main():
print(__file__ + " start!!")
ox, oy = create_dummy_observation_data()
grid_resolution = 10.0
ndt_map = NDTMap(ox, oy, grid_resolution)
# plot raw observation
plt.plot(ox, oy, ".r")
# plot grid clustering
[plt.plot(ox[inds], oy[inds], "x") for inds in ndt_map.grid_index_map.values()]
# plot ndt grid map
[plot_covariance_ellipse(ndt.mean_x, ndt.mean_y, ndt.covariance, color="-k") for ndt in ndt_map.grid_map.data if ndt.n_points > 0]
plt.axis("equal")
plt.show()
if __name__ == '__main__':
main()
``` | /content/code_sandbox/Mapping/ndt_map/ndt_map.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 1,145 |
```python
"""
LIDAR to 2D grid map example
author: Erno Horvath, Csaba Hajdu based on Atsushi Sakai's scripts
"""
import math
from collections import deque
import matplotlib.pyplot as plt
import numpy as np
EXTEND_AREA = 1.0
def file_read(f):
"""
Reading LIDAR laser beams (angles and corresponding distance data)
"""
with open(f) as data:
measures = [line.split(",") for line in data]
angles = []
distances = []
for measure in measures:
angles.append(float(measure[0]))
distances.append(float(measure[1]))
angles = np.array(angles)
distances = np.array(distances)
return angles, distances
def bresenham(start, end):
"""
Implementation of Bresenham's line drawing algorithm
See en.wikipedia.org/wiki/Bresenham's_line_algorithm
Bresenham's Line Algorithm
Produces a np.array from start and end (original from roguebasin.com)
>>> points1 = bresenham((4, 4), (6, 10))
>>> print(points1)
np.array([[4,4], [4,5], [5,6], [5,7], [5,8], [6,9], [6,10]])
"""
# setup initial conditions
x1, y1 = start
x2, y2 = end
dx = x2 - x1
dy = y2 - y1
is_steep = abs(dy) > abs(dx) # determine how steep the line is
if is_steep: # rotate line
x1, y1 = y1, x1
x2, y2 = y2, x2
# swap start and end points if necessary and store swap state
swapped = False
if x1 > x2:
x1, x2 = x2, x1
y1, y2 = y2, y1
swapped = True
dx = x2 - x1 # recalculate differentials
dy = y2 - y1 # recalculate differentials
error = int(dx / 2.0) # calculate error
y_step = 1 if y1 < y2 else -1
# iterate over bounding box generating points between start and end
y = y1
points = []
for x in range(x1, x2 + 1):
coord = [y, x] if is_steep else (x, y)
points.append(coord)
error -= abs(dy)
if error < 0:
y += y_step
error += dx
if swapped: # reverse the list if the coordinates were swapped
points.reverse()
points = np.array(points)
return points
def calc_grid_map_config(ox, oy, xy_resolution):
"""
Calculates the size, and the maximum distances according to the the
measurement center
"""
min_x = round(min(ox) - EXTEND_AREA / 2.0)
min_y = round(min(oy) - EXTEND_AREA / 2.0)
max_x = round(max(ox) + EXTEND_AREA / 2.0)
max_y = round(max(oy) + EXTEND_AREA / 2.0)
xw = int(round((max_x - min_x) / xy_resolution))
yw = int(round((max_y - min_y) / xy_resolution))
print("The grid map is ", xw, "x", yw, ".")
return min_x, min_y, max_x, max_y, xw, yw
def atan_zero_to_twopi(y, x):
angle = math.atan2(y, x)
if angle < 0.0:
angle += math.pi * 2.0
return angle
def init_flood_fill(center_point, obstacle_points, xy_points, min_coord,
xy_resolution):
"""
center_point: center point
obstacle_points: detected obstacles points (x,y)
xy_points: (x,y) point pairs
"""
center_x, center_y = center_point
prev_ix, prev_iy = center_x - 1, center_y
ox, oy = obstacle_points
xw, yw = xy_points
min_x, min_y = min_coord
occupancy_map = (np.ones((xw, yw))) * 0.5
for (x, y) in zip(ox, oy):
# x coordinate of the the occupied area
ix = int(round((x - min_x) / xy_resolution))
# y coordinate of the the occupied area
iy = int(round((y - min_y) / xy_resolution))
free_area = bresenham((prev_ix, prev_iy), (ix, iy))
for fa in free_area:
occupancy_map[fa[0]][fa[1]] = 0 # free area 0.0
prev_ix = ix
prev_iy = iy
return occupancy_map
def flood_fill(center_point, occupancy_map):
"""
center_point: starting point (x,y) of fill
occupancy_map: occupancy map generated from Bresenham ray-tracing
"""
# Fill empty areas with queue method
sx, sy = occupancy_map.shape
fringe = deque()
fringe.appendleft(center_point)
while fringe:
n = fringe.pop()
nx, ny = n
# West
if nx > 0:
if occupancy_map[nx - 1, ny] == 0.5:
occupancy_map[nx - 1, ny] = 0.0
fringe.appendleft((nx - 1, ny))
# East
if nx < sx - 1:
if occupancy_map[nx + 1, ny] == 0.5:
occupancy_map[nx + 1, ny] = 0.0
fringe.appendleft((nx + 1, ny))
# North
if ny > 0:
if occupancy_map[nx, ny - 1] == 0.5:
occupancy_map[nx, ny - 1] = 0.0
fringe.appendleft((nx, ny - 1))
# South
if ny < sy - 1:
if occupancy_map[nx, ny + 1] == 0.5:
occupancy_map[nx, ny + 1] = 0.0
fringe.appendleft((nx, ny + 1))
def generate_ray_casting_grid_map(ox, oy, xy_resolution, breshen=True):
"""
The breshen boolean tells if it's computed with bresenham ray casting
(True) or with flood fill (False)
"""
min_x, min_y, max_x, max_y, x_w, y_w = calc_grid_map_config(
ox, oy, xy_resolution)
# default 0.5 -- [[0.5 for i in range(y_w)] for i in range(x_w)]
occupancy_map = np.ones((x_w, y_w)) / 2
center_x = int(
round(-min_x / xy_resolution)) # center x coordinate of the grid map
center_y = int(
round(-min_y / xy_resolution)) # center y coordinate of the grid map
# occupancy grid computed with bresenham ray casting
if breshen:
for (x, y) in zip(ox, oy):
# x coordinate of the the occupied area
ix = int(round((x - min_x) / xy_resolution))
# y coordinate of the the occupied area
iy = int(round((y - min_y) / xy_resolution))
laser_beams = bresenham((center_x, center_y), (
ix, iy)) # line form the lidar to the occupied point
for laser_beam in laser_beams:
occupancy_map[laser_beam[0]][
laser_beam[1]] = 0.0 # free area 0.0
occupancy_map[ix][iy] = 1.0 # occupied area 1.0
occupancy_map[ix + 1][iy] = 1.0 # extend the occupied area
occupancy_map[ix][iy + 1] = 1.0 # extend the occupied area
occupancy_map[ix + 1][iy + 1] = 1.0 # extend the occupied area
# occupancy grid computed with with flood fill
else:
occupancy_map = init_flood_fill((center_x, center_y), (ox, oy),
(x_w, y_w),
(min_x, min_y), xy_resolution)
flood_fill((center_x, center_y), occupancy_map)
occupancy_map = np.array(occupancy_map, dtype=float)
for (x, y) in zip(ox, oy):
ix = int(round((x - min_x) / xy_resolution))
iy = int(round((y - min_y) / xy_resolution))
occupancy_map[ix][iy] = 1.0 # occupied area 1.0
occupancy_map[ix + 1][iy] = 1.0 # extend the occupied area
occupancy_map[ix][iy + 1] = 1.0 # extend the occupied area
occupancy_map[ix + 1][iy + 1] = 1.0 # extend the occupied area
return occupancy_map, min_x, max_x, min_y, max_y, xy_resolution
def main():
"""
Example usage
"""
print(__file__, "start")
xy_resolution = 0.02 # x-y grid resolution
ang, dist = file_read("lidar01.csv")
ox = np.sin(ang) * dist
oy = np.cos(ang) * dist
occupancy_map, min_x, max_x, min_y, max_y, xy_resolution = \
generate_ray_casting_grid_map(ox, oy, xy_resolution, True)
xy_res = np.array(occupancy_map).shape
plt.figure(1, figsize=(10, 4))
plt.subplot(122)
plt.imshow(occupancy_map, cmap="PiYG_r")
# cmap = "binary" "PiYG_r" "PiYG_r" "bone" "bone_r" "RdYlGn_r"
plt.clim(-0.4, 1.4)
plt.gca().set_xticks(np.arange(-.5, xy_res[1], 1), minor=True)
plt.gca().set_yticks(np.arange(-.5, xy_res[0], 1), minor=True)
plt.grid(True, which="minor", color="w", linewidth=0.6, alpha=0.5)
plt.colorbar()
plt.subplot(121)
plt.plot([oy, np.zeros(np.size(oy))], [ox, np.zeros(np.size(oy))], "ro-")
plt.axis("equal")
plt.plot(0.0, 0.0, "ob")
plt.gca().set_aspect("equal", "box")
bottom, top = plt.ylim() # return the current y-lim
plt.ylim((top, bottom)) # rescale y axis, to match the grid orientation
plt.grid(True)
plt.show()
if __name__ == '__main__':
main()
``` | /content/code_sandbox/Mapping/lidar_to_grid_map/lidar_to_grid_map.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 2,502 |
```python
"""
Point cloud sampling example codes. This code supports
- Voxel point sampling
- Farthest point sampling
- Poisson disk sampling
"""
import matplotlib.pyplot as plt
import numpy as np
import numpy.typing as npt
from collections import defaultdict
do_plot = True
def voxel_point_sampling(original_points: npt.NDArray, voxel_size: float):
"""
Voxel Point Sampling function.
This function sample N-dimensional points with voxel grid.
Points in a same voxel grid will be merged by mean operation for sampling.
Parameters
----------
original_points : (M, N) N-dimensional points for sampling.
The number of points is M.
voxel_size : voxel grid size
Returns
-------
sampled points (M', N)
"""
voxel_dict = defaultdict(list)
for i in range(original_points.shape[0]):
xyz = original_points[i, :]
xyz_index = tuple(xyz // voxel_size)
voxel_dict[xyz_index].append(xyz)
points = np.vstack([np.mean(v, axis=0) for v in voxel_dict.values()])
return points
def farthest_point_sampling(orig_points: npt.NDArray,
n_points: int, seed: int):
"""
Farthest point sampling function
This function sample N-dimensional points with the farthest point policy.
Parameters
----------
orig_points : (M, N) N-dimensional points for sampling.
The number of points is M.
n_points : number of points for sampling
seed : random seed number
Returns
-------
sampled points (n_points, N)
"""
rng = np.random.default_rng(seed)
n_orig_points = orig_points.shape[0]
first_point_id = rng.choice(range(n_orig_points))
min_distances = np.ones(n_orig_points) * float("inf")
selected_ids = [first_point_id]
while len(selected_ids) < n_points:
base_point = orig_points[selected_ids[-1], :]
distances = np.linalg.norm(orig_points[np.newaxis, :] - base_point,
axis=2).flatten()
min_distances = np.minimum(min_distances, distances)
distances_rank = np.argsort(-min_distances) # Farthest order
for i in distances_rank: # From the farthest point
if i not in selected_ids: # if not selected yes, select it
selected_ids.append(i)
break
return orig_points[selected_ids, :]
def poisson_disk_sampling(orig_points: npt.NDArray, n_points: int,
min_distance: float, seed: int, MAX_ITER=1000):
"""
Poisson disk sampling function
This function sample N-dimensional points randomly until the number of
points keeping minimum distance between selected points.
Parameters
----------
orig_points : (M, N) N-dimensional points for sampling.
The number of points is M.
n_points : number of points for sampling
min_distance : minimum distance between selected points.
seed : random seed number
MAX_ITER : Maximum number of iteration. Default is 1000.
Returns
-------
sampled points (n_points or less, N)
"""
rng = np.random.default_rng(seed)
selected_id = rng.choice(range(orig_points.shape[0]))
selected_ids = [selected_id]
loop = 0
while len(selected_ids) < n_points and loop <= MAX_ITER:
selected_id = rng.choice(range(orig_points.shape[0]))
base_point = orig_points[selected_id, :]
distances = np.linalg.norm(
orig_points[np.newaxis, selected_ids] - base_point,
axis=2).flatten()
if min(distances) >= min_distance:
selected_ids.append(selected_id)
loop += 1
if len(selected_ids) != n_points:
print("Could not find the specified number of points...")
return orig_points[selected_ids, :]
def plot_sampled_points(original_points, sampled_points, method_name):
fig = plt.figure()
ax = fig.add_subplot(projection='3d')
ax.scatter(original_points[:, 0], original_points[:, 1],
original_points[:, 2], marker=".", label="Original points")
ax.scatter(sampled_points[:, 0], sampled_points[:, 1],
sampled_points[:, 2], marker="o",
label="Filtered points")
plt.legend()
plt.title(method_name)
plt.axis("equal")
def main():
n_points = 1000
seed = 1234
rng = np.random.default_rng(seed)
x = rng.normal(0.0, 10.0, n_points)
y = rng.normal(0.0, 1.0, n_points)
z = rng.normal(0.0, 10.0, n_points)
original_points = np.vstack((x, y, z)).T
print(f"{original_points.shape=}")
print("Voxel point sampling")
voxel_size = 20.0
voxel_sampling_points = voxel_point_sampling(original_points, voxel_size)
print(f"{voxel_sampling_points.shape=}")
print("Farthest point sampling")
n_points = 20
farthest_sampling_points = farthest_point_sampling(original_points,
n_points, seed)
print(f"{farthest_sampling_points.shape=}")
print("Poisson disk sampling")
n_points = 20
min_distance = 10.0
poisson_disk_points = poisson_disk_sampling(original_points, n_points,
min_distance, seed)
print(f"{poisson_disk_points.shape=}")
if do_plot:
plot_sampled_points(original_points, voxel_sampling_points,
"Voxel point sampling")
plot_sampled_points(original_points, farthest_sampling_points,
"Farthest point sampling")
plot_sampled_points(original_points, poisson_disk_points,
"poisson disk sampling")
plt.show()
if __name__ == '__main__':
main()
``` | /content/code_sandbox/Mapping/point_cloud_sampling/point_cloud_sampling.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 1,278 |
```python
"""
Class for plotting a quadrotor
Author: Daniel Ingram (daniel-s-ingram)
"""
from math import cos, sin
import numpy as np
import matplotlib.pyplot as plt
class Quadrotor():
def __init__(self, x=0, y=0, z=0, roll=0, pitch=0, yaw=0, size=0.25, show_animation=True):
self.p1 = np.array([size / 2, 0, 0, 1]).T
self.p2 = np.array([-size / 2, 0, 0, 1]).T
self.p3 = np.array([0, size / 2, 0, 1]).T
self.p4 = np.array([0, -size / 2, 0, 1]).T
self.x_data = []
self.y_data = []
self.z_data = []
self.show_animation = show_animation
if self.show_animation:
plt.ion()
fig = plt.figure()
# for stopping simulation with the esc key.
fig.canvas.mpl_connect('key_release_event',
lambda event: [exit(0) if event.key == 'escape' else None])
self.ax = fig.add_subplot(111, projection='3d')
self.update_pose(x, y, z, roll, pitch, yaw)
def update_pose(self, x, y, z, roll, pitch, yaw):
self.x = x
self.y = y
self.z = z
self.roll = roll
self.pitch = pitch
self.yaw = yaw
self.x_data.append(x)
self.y_data.append(y)
self.z_data.append(z)
if self.show_animation:
self.plot()
def transformation_matrix(self):
x = self.x
y = self.y
z = self.z
roll = self.roll
pitch = self.pitch
yaw = self.yaw
return np.array(
[[cos(yaw) * cos(pitch), -sin(yaw) * cos(roll) + cos(yaw) * sin(pitch) * sin(roll), sin(yaw) * sin(roll) + cos(yaw) * sin(pitch) * cos(roll), x],
[sin(yaw) * cos(pitch), cos(yaw) * cos(roll) + sin(yaw) * sin(pitch)
* sin(roll), -cos(yaw) * sin(roll) + sin(yaw) * sin(pitch) * cos(roll), y],
[-sin(pitch), cos(pitch) * sin(roll), cos(pitch) * cos(roll), z]
])
def plot(self): # pragma: no cover
T = self.transformation_matrix()
p1_t = np.matmul(T, self.p1)
p2_t = np.matmul(T, self.p2)
p3_t = np.matmul(T, self.p3)
p4_t = np.matmul(T, self.p4)
plt.cla()
self.ax.plot([p1_t[0], p2_t[0], p3_t[0], p4_t[0]],
[p1_t[1], p2_t[1], p3_t[1], p4_t[1]],
[p1_t[2], p2_t[2], p3_t[2], p4_t[2]], 'k.')
self.ax.plot([p1_t[0], p2_t[0]], [p1_t[1], p2_t[1]],
[p1_t[2], p2_t[2]], 'r-')
self.ax.plot([p3_t[0], p4_t[0]], [p3_t[1], p4_t[1]],
[p3_t[2], p4_t[2]], 'r-')
self.ax.plot(self.x_data, self.y_data, self.z_data, 'b:')
plt.xlim(-5, 5)
plt.ylim(-5, 5)
self.ax.set_zlim(0, 10)
plt.pause(0.001)
``` | /content/code_sandbox/AerialNavigation/drone_3d_trajectory_following/Quadrotor.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 897 |
```python
"""
Simulate a quadrotor following a 3D trajectory
Author: Daniel Ingram (daniel-s-ingram)
"""
from math import cos, sin
import numpy as np
from Quadrotor import Quadrotor
from TrajectoryGenerator import TrajectoryGenerator
show_animation = True
# Simulation parameters
g = 9.81
m = 0.2
Ixx = 1
Iyy = 1
Izz = 1
T = 5
# Proportional coefficients
Kp_x = 1
Kp_y = 1
Kp_z = 1
Kp_roll = 25
Kp_pitch = 25
Kp_yaw = 25
# Derivative coefficients
Kd_x = 10
Kd_y = 10
Kd_z = 1
def quad_sim(x_c, y_c, z_c):
"""
Calculates the necessary thrust and torques for the quadrotor to
follow the trajectory described by the sets of coefficients
x_c, y_c, and z_c.
"""
x_pos = -5
y_pos = -5
z_pos = 5
x_vel = 0
y_vel = 0
z_vel = 0
x_acc = 0
y_acc = 0
z_acc = 0
roll = 0
pitch = 0
yaw = 0
roll_vel = 0
pitch_vel = 0
yaw_vel = 0
des_yaw = 0
dt = 0.1
t = 0
q = Quadrotor(x=x_pos, y=y_pos, z=z_pos, roll=roll,
pitch=pitch, yaw=yaw, size=1, show_animation=show_animation)
i = 0
n_run = 8
irun = 0
while True:
while t <= T:
# des_x_pos = calculate_position(x_c[i], t)
# des_y_pos = calculate_position(y_c[i], t)
des_z_pos = calculate_position(z_c[i], t)
# des_x_vel = calculate_velocity(x_c[i], t)
# des_y_vel = calculate_velocity(y_c[i], t)
des_z_vel = calculate_velocity(z_c[i], t)
des_x_acc = calculate_acceleration(x_c[i], t)
des_y_acc = calculate_acceleration(y_c[i], t)
des_z_acc = calculate_acceleration(z_c[i], t)
thrust = m * (g + des_z_acc + Kp_z * (des_z_pos -
z_pos) + Kd_z * (des_z_vel - z_vel))
roll_torque = Kp_roll * \
(((des_x_acc * sin(des_yaw) - des_y_acc * cos(des_yaw)) / g) - roll)
pitch_torque = Kp_pitch * \
(((des_x_acc * cos(des_yaw) - des_y_acc * sin(des_yaw)) / g) - pitch)
yaw_torque = Kp_yaw * (des_yaw - yaw)
roll_vel += roll_torque * dt / Ixx
pitch_vel += pitch_torque * dt / Iyy
yaw_vel += yaw_torque * dt / Izz
roll += roll_vel * dt
pitch += pitch_vel * dt
yaw += yaw_vel * dt
R = rotation_matrix(roll, pitch, yaw)
acc = (np.matmul(R, np.array(
[0, 0, thrust.item()]).T) - np.array([0, 0, m * g]).T) / m
x_acc = acc[0]
y_acc = acc[1]
z_acc = acc[2]
x_vel += x_acc * dt
y_vel += y_acc * dt
z_vel += z_acc * dt
x_pos += x_vel * dt
y_pos += y_vel * dt
z_pos += z_vel * dt
q.update_pose(x_pos, y_pos, z_pos, roll, pitch, yaw)
t += dt
t = 0
i = (i + 1) % 4
irun += 1
if irun >= n_run:
break
print("Done")
def calculate_position(c, t):
"""
Calculates a position given a set of quintic coefficients and a time.
Args
c: List of coefficients generated by a quintic polynomial
trajectory generator.
t: Time at which to calculate the position
Returns
Position
"""
return c[0] * t**5 + c[1] * t**4 + c[2] * t**3 + c[3] * t**2 + c[4] * t + c[5]
def calculate_velocity(c, t):
"""
Calculates a velocity given a set of quintic coefficients and a time.
Args
c: List of coefficients generated by a quintic polynomial
trajectory generator.
t: Time at which to calculate the velocity
Returns
Velocity
"""
return 5 * c[0] * t**4 + 4 * c[1] * t**3 + 3 * c[2] * t**2 + 2 * c[3] * t + c[4]
def calculate_acceleration(c, t):
"""
Calculates an acceleration given a set of quintic coefficients and a time.
Args
c: List of coefficients generated by a quintic polynomial
trajectory generator.
t: Time at which to calculate the acceleration
Returns
Acceleration
"""
return 20 * c[0] * t**3 + 12 * c[1] * t**2 + 6 * c[2] * t + 2 * c[3]
def rotation_matrix(roll_array, pitch_array, yaw):
"""
Calculates the ZYX rotation matrix.
Args
Roll: Angular position about the x-axis in radians.
Pitch: Angular position about the y-axis in radians.
Yaw: Angular position about the z-axis in radians.
Returns
3x3 rotation matrix as NumPy array
"""
roll = roll_array[0]
pitch = pitch_array[0]
return np.array(
[[cos(yaw) * cos(pitch), -sin(yaw) * cos(roll) + cos(yaw) * sin(pitch) * sin(roll), sin(yaw) * sin(roll) + cos(yaw) * sin(pitch) * cos(roll)],
[sin(yaw) * cos(pitch), cos(yaw) * cos(roll) + sin(yaw) * sin(pitch) *
sin(roll), -cos(yaw) * sin(roll) + sin(yaw) * sin(pitch) * cos(roll)],
[-sin(pitch), cos(pitch) * sin(roll), cos(pitch) * cos(yaw)]
])
def main():
"""
Calculates the x, y, z coefficients for the four segments
of the trajectory
"""
x_coeffs = [[], [], [], []]
y_coeffs = [[], [], [], []]
z_coeffs = [[], [], [], []]
waypoints = [[-5, -5, 5], [5, -5, 5], [5, 5, 5], [-5, 5, 5]]
for i in range(4):
traj = TrajectoryGenerator(waypoints[i], waypoints[(i + 1) % 4], T)
traj.solve()
x_coeffs[i] = traj.x_c
y_coeffs[i] = traj.y_c
z_coeffs[i] = traj.z_c
quad_sim(x_coeffs, y_coeffs, z_coeffs)
if __name__ == "__main__":
main()
``` | /content/code_sandbox/AerialNavigation/drone_3d_trajectory_following/drone_3d_trajectory_following.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 1,701 |
```python
"""
Generates a quintic polynomial trajectory.
Author: Daniel Ingram (daniel-s-ingram)
"""
import numpy as np
class TrajectoryGenerator():
def __init__(self, start_pos, des_pos, T, start_vel=[0,0,0], des_vel=[0,0,0], start_acc=[0,0,0], des_acc=[0,0,0]):
self.start_x = start_pos[0]
self.start_y = start_pos[1]
self.start_z = start_pos[2]
self.des_x = des_pos[0]
self.des_y = des_pos[1]
self.des_z = des_pos[2]
self.start_x_vel = start_vel[0]
self.start_y_vel = start_vel[1]
self.start_z_vel = start_vel[2]
self.des_x_vel = des_vel[0]
self.des_y_vel = des_vel[1]
self.des_z_vel = des_vel[2]
self.start_x_acc = start_acc[0]
self.start_y_acc = start_acc[1]
self.start_z_acc = start_acc[2]
self.des_x_acc = des_acc[0]
self.des_y_acc = des_acc[1]
self.des_z_acc = des_acc[2]
self.T = T
def solve(self):
A = np.array(
[[0, 0, 0, 0, 0, 1],
[self.T**5, self.T**4, self.T**3, self.T**2, self.T, 1],
[0, 0, 0, 0, 1, 0],
[5*self.T**4, 4*self.T**3, 3*self.T**2, 2*self.T, 1, 0],
[0, 0, 0, 2, 0, 0],
[20*self.T**3, 12*self.T**2, 6*self.T, 2, 0, 0]
])
b_x = np.array(
[[self.start_x],
[self.des_x],
[self.start_x_vel],
[self.des_x_vel],
[self.start_x_acc],
[self.des_x_acc]
])
b_y = np.array(
[[self.start_y],
[self.des_y],
[self.start_y_vel],
[self.des_y_vel],
[self.start_y_acc],
[self.des_y_acc]
])
b_z = np.array(
[[self.start_z],
[self.des_z],
[self.start_z_vel],
[self.des_z_vel],
[self.start_z_acc],
[self.des_z_acc]
])
self.x_c = np.linalg.solve(A, b_x)
self.y_c = np.linalg.solve(A, b_y)
self.z_c = np.linalg.solve(A, b_z)
``` | /content/code_sandbox/AerialNavigation/drone_3d_trajectory_following/TrajectoryGenerator.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 627 |
```python
"""
Bipedal Walking with modifying designated footsteps
author: Takayuki Murooka (takayuki5168)
"""
import numpy as np
import math
from matplotlib import pyplot as plt
import matplotlib.patches as pat
from mpl_toolkits.mplot3d import Axes3D
import mpl_toolkits.mplot3d.art3d as art3d
class BipedalPlanner(object):
def __init__(self):
self.act_p = [] # actual footstep positions
self.ref_p = [] # reference footstep positions
self.com_trajectory = []
self.ref_footsteps = None
self.g = 9.8
def set_ref_footsteps(self, ref_footsteps):
self.ref_footsteps = ref_footsteps
def inverted_pendulum(self, x, x_dot, px_star, y, y_dot, py_star, z_c,
time_width):
time_split = 100
for i in range(time_split):
delta_time = time_width / time_split
x_dot2 = self.g / z_c * (x - px_star)
x += x_dot * delta_time
x_dot += x_dot2 * delta_time
y_dot2 = self.g / z_c * (y - py_star)
y += y_dot * delta_time
y_dot += y_dot2 * delta_time
if i % 10 == 0:
self.com_trajectory.append([x, y])
return x, x_dot, y, y_dot
def walk(self, t_sup=0.8, z_c=0.8, a=10, b=1, plot=False):
if self.ref_footsteps is None:
print("No footsteps")
return
# set up plotter
if plot:
fig = plt.figure()
ax = Axes3D(fig)
fig.add_axes(ax)
com_trajectory_for_plot = []
px, py = 0.0, 0.0 # reference footstep position
px_star, py_star = px, py # modified footstep position
xi, xi_dot, yi, yi_dot = 0.0, 0.0, 0.01, 0.0
time = 0.0
n = 0
self.ref_p.append([px, py, 0])
self.act_p.append([px, py, 0])
for i in range(len(self.ref_footsteps)):
# simulate x, y and those of dot of inverted pendulum
xi, xi_dot, yi, yi_dot = self.inverted_pendulum(
xi, xi_dot, px_star, yi, yi_dot, py_star, z_c, t_sup)
# update time
time += t_sup
n += 1
# calculate px, py, x_, y_, vx_, vy_
f_x, f_y, f_theta = self.ref_footsteps[n - 1]
rotate_mat = np.array([[math.cos(f_theta), -math.sin(f_theta)],
[math.sin(f_theta), math.cos(f_theta)]])
if n == len(self.ref_footsteps):
f_x_next, f_y_next, f_theta_next = 0., 0., 0.
else:
f_x_next, f_y_next, f_theta_next = self.ref_footsteps[n]
rotate_mat_next = np.array(
[[math.cos(f_theta_next), -math.sin(f_theta_next)],
[math.sin(f_theta_next), math.cos(f_theta_next)]])
Tc = math.sqrt(z_c / self.g)
C = math.cosh(t_sup / Tc)
S = math.sinh(t_sup / Tc)
px, py = list(np.array([px, py])
+ np.dot(rotate_mat,
np.array([f_x, -1 * math.pow(-1, n) * f_y])
))
x_, y_ = list(np.dot(rotate_mat_next, np.array(
[f_x_next / 2., math.pow(-1, n) * f_y_next / 2.])))
vx_, vy_ = list(np.dot(rotate_mat_next, np.array(
[(1 + C) / (Tc * S) * x_, (C - 1) / (Tc * S) * y_])))
self.ref_p.append([px, py, f_theta])
# calculate reference COM
xd, xd_dot = px + x_, vx_
yd, yd_dot = py + y_, vy_
# calculate modified footsteps
D = a * math.pow(C - 1, 2) + b * math.pow(S / Tc, 2)
px_star = -a * (C - 1) / D * (xd - C * xi - Tc * S * xi_dot) \
- b * S / (Tc * D) * (xd_dot - S / Tc * xi - C * xi_dot)
py_star = -a * (C - 1) / D * (yd - C * yi - Tc * S * yi_dot) \
- b * S / (Tc * D) * (yd_dot - S / Tc * yi - C * yi_dot)
self.act_p.append([px_star, py_star, f_theta])
# plot
if plot:
self.plot_animation(ax, com_trajectory_for_plot, px_star,
py_star, z_c)
if plot:
plt.show()
def plot_animation(self, ax, com_trajectory_for_plot, px_star, py_star,
z_c): # pragma: no cover
# for plot trajectory, plot in for loop
for c in range(len(self.com_trajectory)):
if c > len(com_trajectory_for_plot):
# set up plotter
plt.cla()
# for stopping simulation with the esc key.
plt.gcf().canvas.mpl_connect(
'key_release_event',
lambda event:
[exit(0) if event.key == 'escape' else None])
ax.set_zlim(0, z_c * 2)
ax.set_xlim(0, 1)
ax.set_ylim(-0.5, 0.5)
# update com_trajectory_for_plot
com_trajectory_for_plot.append(self.com_trajectory[c])
# plot com
ax.plot([p[0] for p in com_trajectory_for_plot],
[p[1] for p in com_trajectory_for_plot], [
0 for _ in com_trajectory_for_plot],
color="red")
# plot inverted pendulum
ax.plot([px_star, com_trajectory_for_plot[-1][0]],
[py_star, com_trajectory_for_plot[-1][1]],
[0, z_c], color="green", linewidth=3)
ax.scatter([com_trajectory_for_plot[-1][0]],
[com_trajectory_for_plot[-1][1]],
[z_c], color="green", s=300)
# foot rectangle for self.ref_p
foot_width = 0.06
foot_height = 0.04
for j in range(len(self.ref_p)):
angle = self.ref_p[j][2] + \
math.atan2(foot_height,
foot_width) - math.pi
r = math.sqrt(
math.pow(foot_width / 3., 2) + math.pow(
foot_height / 2., 2))
rec = pat.Rectangle(xy=(
self.ref_p[j][0] + r * math.cos(angle),
self.ref_p[j][1] + r * math.sin(angle)),
width=foot_width,
height=foot_height,
angle=self.ref_p[j][
2] * 180 / math.pi,
color="blue", fill=False,
ls=":")
ax.add_patch(rec)
art3d.pathpatch_2d_to_3d(rec, z=0, zdir="z")
# foot rectangle for self.act_p
for j in range(len(self.act_p)):
angle = self.act_p[j][2] + \
math.atan2(foot_height,
foot_width) - math.pi
r = math.sqrt(
math.pow(foot_width / 3., 2) + math.pow(
foot_height / 2., 2))
rec = pat.Rectangle(xy=(
self.act_p[j][0] + r * math.cos(angle),
self.act_p[j][1] + r * math.sin(angle)),
width=foot_width,
height=foot_height,
angle=self.act_p[j][
2] * 180 / math.pi,
color="blue", fill=False)
ax.add_patch(rec)
art3d.pathpatch_2d_to_3d(rec, z=0, zdir="z")
plt.draw()
plt.pause(0.001)
if __name__ == "__main__":
bipedal_planner = BipedalPlanner()
footsteps = [[0.0, 0.2, 0.0],
[0.3, 0.2, 0.0],
[0.3, 0.2, 0.2],
[0.3, 0.2, 0.2],
[0.0, 0.2, 0.2]]
bipedal_planner.set_ref_footsteps(footsteps)
bipedal_planner.walk(plot=True)
``` | /content/code_sandbox/Bipedal/bipedal_planner/bipedal_planner.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 2,037 |
```python
"""
Move to specified pose (with Robot class)
Author: Daniel Ingram (daniel-s-ingram)
Atsushi Sakai (@Atsushi_twi)
Seied Muhammad Yazdian (@Muhammad-Yazdian)
P.I. Corke, "Robotics, Vision & Control", Springer 2017, ISBN 978-3-319-54413-7
"""
import matplotlib.pyplot as plt
import numpy as np
import copy
from move_to_pose import PathFinderController
# Simulation parameters
TIME_DURATION = 1000
TIME_STEP = 0.01
AT_TARGET_ACCEPTANCE_THRESHOLD = 0.01
SHOW_ANIMATION = True
PLOT_WINDOW_SIZE_X = 20
PLOT_WINDOW_SIZE_Y = 20
PLOT_FONT_SIZE = 8
simulation_running = True
all_robots_are_at_target = False
class Pose:
"""2D pose"""
def __init__(self, x, y, theta):
self.x = x
self.y = y
self.theta = theta
class Robot:
"""
Constructs an instantiate of the 3-DOF wheeled Robot navigating on a
2D plane
Parameters
----------
name : (string)
The name of the robot
color : (string)
The color of the robot
max_linear_speed : (float)
The maximum linear speed that the robot can go
max_angular_speed : (float)
The maximum angular speed that the robot can rotate about its vertical
axis
path_finder_controller : (PathFinderController)
A configurable controller to finds the path and calculates command
linear and angular velocities.
"""
def __init__(self, name, color, max_linear_speed, max_angular_speed,
path_finder_controller):
self.name = name
self.color = color
self.MAX_LINEAR_SPEED = max_linear_speed
self.MAX_ANGULAR_SPEED = max_angular_speed
self.path_finder_controller = path_finder_controller
self.x_traj = []
self.y_traj = []
self.pose = Pose(0, 0, 0)
self.pose_start = Pose(0, 0, 0)
self.pose_target = Pose(0, 0, 0)
self.is_at_target = False
def set_start_target_poses(self, pose_start, pose_target):
"""
Sets the start and target positions of the robot
Parameters
----------
pose_start : (Pose)
Start position of the robot (see the Pose class)
pose_target : (Pose)
Target position of the robot (see the Pose class)
"""
self.pose_start = copy.copy(pose_start)
self.pose_target = pose_target
self.pose = pose_start
def move(self, dt):
"""
Moves the robot for one time step increment
Parameters
----------
dt : (float)
time step
"""
self.x_traj.append(self.pose.x)
self.y_traj.append(self.pose.y)
rho, linear_velocity, angular_velocity = \
self.path_finder_controller.calc_control_command(
self.pose_target.x - self.pose.x,
self.pose_target.y - self.pose.y,
self.pose.theta, self.pose_target.theta)
if rho < AT_TARGET_ACCEPTANCE_THRESHOLD:
self.is_at_target = True
if abs(linear_velocity) > self.MAX_LINEAR_SPEED:
linear_velocity = (np.sign(linear_velocity)
* self.MAX_LINEAR_SPEED)
if abs(angular_velocity) > self.MAX_ANGULAR_SPEED:
angular_velocity = (np.sign(angular_velocity)
* self.MAX_ANGULAR_SPEED)
self.pose.theta = self.pose.theta + angular_velocity * dt
self.pose.x = self.pose.x + linear_velocity * \
np.cos(self.pose.theta) * dt
self.pose.y = self.pose.y + linear_velocity * \
np.sin(self.pose.theta) * dt
def run_simulation(robots):
"""Simulates all robots simultaneously"""
global all_robots_are_at_target
global simulation_running
robot_names = []
for instance in robots:
robot_names.append(instance.name)
time = 0
while simulation_running and time < TIME_DURATION:
time += TIME_STEP
robots_are_at_target = []
for instance in robots:
if not instance.is_at_target:
instance.move(TIME_STEP)
robots_are_at_target.append(instance.is_at_target)
if all(robots_are_at_target):
simulation_running = False
if SHOW_ANIMATION:
plt.cla()
plt.xlim(0, PLOT_WINDOW_SIZE_X)
plt.ylim(0, PLOT_WINDOW_SIZE_Y)
# for stopping simulation with the esc key.
plt.gcf().canvas.mpl_connect(
'key_release_event',
lambda event: [exit(0) if event.key == 'escape' else None])
plt.text(0.3, PLOT_WINDOW_SIZE_Y - 1,
'Time: {:.2f}'.format(time),
fontsize=PLOT_FONT_SIZE)
plt.text(0.3, PLOT_WINDOW_SIZE_Y - 2,
'Reached target: {} = '.format(robot_names)
+ str(robots_are_at_target),
fontsize=PLOT_FONT_SIZE)
for instance in robots:
plt.arrow(instance.pose_start.x,
instance.pose_start.y,
np.cos(instance.pose_start.theta),
np.sin(instance.pose_start.theta),
color='r',
width=0.1)
plt.arrow(instance.pose_target.x,
instance.pose_target.y,
np.cos(instance.pose_target.theta),
np.sin(instance.pose_target.theta),
color='g',
width=0.1)
plot_vehicle(instance.pose.x,
instance.pose.y,
instance.pose.theta,
instance.x_traj,
instance.y_traj, instance.color)
plt.pause(TIME_STEP)
def plot_vehicle(x, y, theta, x_traj, y_traj, color):
# Corners of triangular vehicle when pointing to the right (0 radians)
p1_i = np.array([0.5, 0, 1]).T
p2_i = np.array([-0.5, 0.25, 1]).T
p3_i = np.array([-0.5, -0.25, 1]).T
T = transformation_matrix(x, y, theta)
p1 = T @ p1_i
p2 = T @ p2_i
p3 = T @ p3_i
plt.plot([p1[0], p2[0]], [p1[1], p2[1]], color+'-')
plt.plot([p2[0], p3[0]], [p2[1], p3[1]], color+'-')
plt.plot([p3[0], p1[0]], [p3[1], p1[1]], color+'-')
plt.plot(x_traj, y_traj, color+'--')
def transformation_matrix(x, y, theta):
return np.array([
[np.cos(theta), -np.sin(theta), x],
[np.sin(theta), np.cos(theta), y],
[0, 0, 1]
])
def main():
pose_target = Pose(15, 15, -1)
pose_start_1 = Pose(5, 2, 0)
pose_start_2 = Pose(5, 2, 0)
pose_start_3 = Pose(5, 2, 0)
controller_1 = PathFinderController(5, 8, 2)
controller_2 = PathFinderController(5, 16, 4)
controller_3 = PathFinderController(10, 25, 6)
robot_1 = Robot("Yellow Robot", "y", 12, 5, controller_1)
robot_2 = Robot("Black Robot", "k", 16, 5, controller_2)
robot_3 = Robot("Blue Robot", "b", 20, 5, controller_3)
robot_1.set_start_target_poses(pose_start_1, pose_target)
robot_2.set_start_target_poses(pose_start_2, pose_target)
robot_3.set_start_target_poses(pose_start_3, pose_target)
robots: list[Robot] = [robot_1, robot_2, robot_3]
run_simulation(robots)
if __name__ == '__main__':
main()
``` | /content/code_sandbox/Control/move_to_pose/move_to_pose_robot.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 1,813 |
```python
"""
A rocket powered landing with successive convexification
author: Sven Niederberger
Atsushi Sakai
Ref:
- Python implementation of 'Successive Convexification for 6-DoF Mars Rocket Powered Landing with Free-Final-Time' paper
by Michael Szmuk and Behcet Ackmese.
- EmbersArc/SuccessiveConvexificationFreeFinalTime: Implementation of "Successive Convexification for 6-DoF Mars Rocket Powered Landing with Free-Final-Time" path_to_url
"""
import warnings
from time import time
import numpy as np
from scipy.integrate import odeint
import cvxpy
import matplotlib.pyplot as plt
# Trajectory points
K = 50
# Max solver iterations
iterations = 30
# Weight constants
W_SIGMA = 1 # flight time
W_DELTA = 1e-3 # difference in state/input
W_DELTA_SIGMA = 1e-1 # difference in flight time
W_NU = 1e5 # virtual control
solver = 'ECOS'
verbose_solver = False
show_animation = True
class Rocket_Model_6DoF:
"""
A 6 degree of freedom rocket landing problem.
"""
def __init__(self, rng):
"""
A large r_scale for a small scale problem will
ead to numerical problems as parameters become excessively small
and (it seems) precision is lost in the dynamics.
"""
self.n_x = 14
self.n_u = 3
# Mass
self.m_wet = 3.0 # 30000 kg
self.m_dry = 2.2 # 22000 kg
# Flight time guess
self.t_f_guess = 10.0 # 10 s
# State constraints
self.r_I_final = np.array((0., 0., 0.))
self.v_I_final = np.array((-1e-1, 0., 0.))
self.q_B_I_final = self.euler_to_quat((0, 0, 0))
self.w_B_final = np.deg2rad(np.array((0., 0., 0.)))
self.w_B_max = np.deg2rad(60)
# Angles
max_gimbal = 20
max_angle = 90
glidelslope_angle = 20
self.tan_delta_max = np.tan(np.deg2rad(max_gimbal))
self.cos_theta_max = np.cos(np.deg2rad(max_angle))
self.tan_gamma_gs = np.tan(np.deg2rad(glidelslope_angle))
# Thrust limits
self.T_max = 5.0
self.T_min = 0.3
# Angular moment of inertia
self.J_B = 1e-2 * np.diag([1., 1., 1.])
# Gravity
self.g_I = np.array((-1, 0., 0.))
# Fuel consumption
self.alpha_m = 0.01
# Vector from thrust point to CoM
self.r_T_B = np.array([-1e-2, 0., 0.])
self.set_random_initial_state(rng)
self.x_init = np.concatenate(
((self.m_wet,), self.r_I_init, self.v_I_init, self.q_B_I_init, self.w_B_init))
self.x_final = np.concatenate(
((self.m_dry,), self.r_I_final, self.v_I_final, self.q_B_I_final, self.w_B_final))
self.r_scale = np.linalg.norm(self.r_I_init)
self.m_scale = self.m_wet
def set_random_initial_state(self, rng):
if rng is None:
rng = np.random.default_rng()
self.r_I_init = np.array((0., 0., 0.))
self.r_I_init[0] = rng.uniform(3, 4)
self.r_I_init[1:3] = rng.uniform(-2, 2, size=2)
self.v_I_init = np.array((0., 0., 0.))
self.v_I_init[0] = rng.uniform(-1, -0.5)
self.v_I_init[1:3] = rng.uniform(-0.5, -0.2,
size=2) * self.r_I_init[1:3]
self.q_B_I_init = self.euler_to_quat((0,
rng.uniform(-30, 30),
rng.uniform(-30, 30)))
self.w_B_init = np.deg2rad((0,
rng.uniform(-20, 20),
rng.uniform(-20, 20)))
def f_func(self, x, u):
m, _, _, _, vx, vy, vz, q0, q1, q2, q3, wx, wy, wz = x[0], x[1], x[
2], x[3], x[4], x[5], x[6], x[7], x[8], x[9], x[10], x[11], x[12], x[13]
ux, uy, uz = u[0], u[1], u[2]
return np.array([
[-0.01 * np.sqrt(ux**2 + uy**2 + uz**2)],
[vx],
[vy],
[vz],
[(-1.0 * m - ux * (2 * q2**2 + 2 * q3**2 - 1) - 2 * uy
* (q0 * q3 - q1 * q2) + 2 * uz * (q0 * q2 + q1 * q3)) / m],
[(2 * ux * (q0 * q3 + q1 * q2) - uy * (2 * q1**2
+ 2 * q3**2 - 1) - 2 * uz * (q0 * q1 - q2 * q3)) / m],
[(-2 * ux * (q0 * q2 - q1 * q3) + 2 * uy
* (q0 * q1 + q2 * q3) - uz * (2 * q1**2 + 2 * q2**2 - 1)) / m],
[-0.5 * q1 * wx - 0.5 * q2 * wy - 0.5 * q3 * wz],
[0.5 * q0 * wx + 0.5 * q2 * wz - 0.5 * q3 * wy],
[0.5 * q0 * wy - 0.5 * q1 * wz + 0.5 * q3 * wx],
[0.5 * q0 * wz + 0.5 * q1 * wy - 0.5 * q2 * wx],
[0],
[1.0 * uz],
[-1.0 * uy]
])
def A_func(self, x, u):
m, _, _, _, _, _, _, q0, q1, q2, q3, wx, wy, wz = x[0], x[1], x[
2], x[3], x[4], x[5], x[6], x[7], x[8], x[9], x[10], x[11], x[12], x[13]
ux, uy, uz = u[0], u[1], u[2]
return np.array([
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[(ux * (2 * q2**2 + 2 * q3**2 - 1) + 2 * uy * (q0 * q3 - q1 * q2) - 2 * uz * (q0 * q2 + q1 * q3)) / m**2, 0, 0, 0, 0, 0, 0, 2 * (q2 * uz
- q3 * uy) / m, 2 * (q2 * uy + q3 * uz) / m, 2 * (q0 * uz + q1 * uy - 2 * q2 * ux) / m, 2 * (-q0 * uy + q1 * uz - 2 * q3 * ux) / m, 0, 0, 0],
[(-2 * ux * (q0 * q3 + q1 * q2) + uy * (2 * q1**2 + 2 * q3**2 - 1) + 2 * uz * (q0 * q1 - q2 * q3)) / m**2, 0, 0, 0, 0, 0, 0, 2 * (-q1 * uz
+ q3 * ux) / m, 2 * (-q0 * uz - 2 * q1 * uy + q2 * ux) / m, 2 * (q1 * ux + q3 * uz) / m, 2 * (q0 * ux + q2 * uz - 2 * q3 * uy) / m, 0, 0, 0],
[(2 * ux * (q0 * q2 - q1 * q3) - 2 * uy * (q0 * q1 + q2 * q3) + uz * (2 * q1**2 + 2 * q2**2 - 1)) / m**2, 0, 0, 0, 0, 0, 0, 2 * (q1 * uy
- q2 * ux) / m, 2 * (q0 * uy - 2 * q1 * uz + q3 * ux) / m, 2 * (-q0 * ux - 2 * q2 * uz + q3 * uy) / m, 2 * (q1 * ux + q2 * uy) / m, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, -0.5 * wx, -0.5 * wy,
- 0.5 * wz, -0.5 * q1, -0.5 * q2, -0.5 * q3],
[0, 0, 0, 0, 0, 0, 0, 0.5 * wx, 0, 0.5 * wz,
- 0.5 * wy, 0.5 * q0, -0.5 * q3, 0.5 * q2],
[0, 0, 0, 0, 0, 0, 0, 0.5 * wy, -0.5 * wz, 0,
0.5 * wx, 0.5 * q3, 0.5 * q0, -0.5 * q1],
[0, 0, 0, 0, 0, 0, 0, 0.5 * wz, 0.5 * wy,
- 0.5 * wx, 0, -0.5 * q2, 0.5 * q1, 0.5 * q0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
def B_func(self, x, u):
m, _, _, _, _, _, _, q0, q1, q2, q3, _, _, _ = x[0], x[1], x[
2], x[3], x[4], x[5], x[6], x[7], x[8], x[9], x[10], x[11], x[12], x[13]
ux, uy, uz = u[0], u[1], u[2]
return np.array([
[-0.01 * ux / np.sqrt(ux**2 + uy**2 + uz**2),
-0.01 * uy / np.sqrt(ux ** 2 + uy**2 + uz**2),
-0.01 * uz / np.sqrt(ux**2 + uy**2 + uz**2)],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[(-2 * q2**2 - 2 * q3**2 + 1) / m, 2
* (-q0 * q3 + q1 * q2) / m, 2 * (q0 * q2 + q1 * q3) / m],
[2 * (q0 * q3 + q1 * q2) / m, (-2 * q1**2 - 2
* q3**2 + 1) / m, 2 * (-q0 * q1 + q2 * q3) / m],
[2 * (-q0 * q2 + q1 * q3) / m, 2 * (q0 * q1 + q2 * q3)
/ m, (-2 * q1**2 - 2 * q2**2 + 1) / m],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 1.0],
[0, -1.0, 0]
])
def euler_to_quat(self, a):
a = np.deg2rad(a)
cy = np.cos(a[1] * 0.5)
sy = np.sin(a[1] * 0.5)
cr = np.cos(a[0] * 0.5)
sr = np.sin(a[0] * 0.5)
cp = np.cos(a[2] * 0.5)
sp = np.sin(a[2] * 0.5)
q = np.zeros(4)
q[0] = cy * cr * cp + sy * sr * sp
q[1] = cy * sr * cp - sy * cr * sp
q[3] = cy * cr * sp + sy * sr * cp
q[2] = sy * cr * cp - cy * sr * sp
return q
def skew(self, v):
return np.array([
[0, -v[2], v[1]],
[v[2], 0, -v[0]],
[-v[1], v[0], 0]
])
def dir_cosine(self, q):
return np.array([
[1 - 2 * (q[2] ** 2 + q[3] ** 2), 2 * (q[1] * q[2]
+ q[0] * q[3]), 2 * (q[1] * q[3] - q[0] * q[2])],
[2 * (q[1] * q[2] - q[0] * q[3]), 1 - 2
* (q[1] ** 2 + q[3] ** 2), 2 * (q[2] * q[3] + q[0] * q[1])],
[2 * (q[1] * q[3] + q[0] * q[2]), 2 * (q[2] * q[3]
- q[0] * q[1]), 1 - 2 * (q[1] ** 2 + q[2] ** 2)]
])
def omega(self, w):
return np.array([
[0, -w[0], -w[1], -w[2]],
[w[0], 0, w[2], -w[1]],
[w[1], -w[2], 0, w[0]],
[w[2], w[1], -w[0], 0],
])
def initialize_trajectory(self, X, U):
"""
Initialize the trajectory with linear approximation.
"""
K = X.shape[1]
for k in range(K):
alpha1 = (K - k) / K
alpha2 = k / K
m_k = (alpha1 * self.x_init[0] + alpha2 * self.x_final[0],)
r_I_k = alpha1 * self.x_init[1:4] + alpha2 * self.x_final[1:4]
v_I_k = alpha1 * self.x_init[4:7] + alpha2 * self.x_final[4:7]
q_B_I_k = np.array([1, 0, 0, 0])
w_B_k = alpha1 * self.x_init[11:14] + alpha2 * self.x_final[11:14]
X[:, k] = np.concatenate((m_k, r_I_k, v_I_k, q_B_I_k, w_B_k))
U[:, k] = m_k * -self.g_I
return X, U
def get_constraints(self, X_v, U_v, X_last_p, U_last_p):
"""
Get model specific constraints.
:param X_v: cvx variable for current states
:param U_v: cvx variable for current inputs
:param X_last_p: cvx parameter for last states
:param U_last_p: cvx parameter for last inputs
:return: A list of cvx constraints
"""
# Boundary conditions:
constraints = [
X_v[0, 0] == self.x_init[0],
X_v[1:4, 0] == self.x_init[1:4],
X_v[4:7, 0] == self.x_init[4:7],
# X_v[7:11, 0] == self.x_init[7:11], # initial orientation is free
X_v[11:14, 0] == self.x_init[11:14],
# X_[0, -1] final mass is free
X_v[1:, -1] == self.x_final[1:],
U_v[1:3, -1] == 0,
]
constraints += [
# State constraints:
X_v[0, :] >= self.m_dry, # minimum mass
cvxpy.norm(X_v[2: 4, :], axis=0) <= X_v[1, :] / \
self.tan_gamma_gs, # glideslope
cvxpy.norm(X_v[9:11, :], axis=0) <= np.sqrt(
(1 - self.cos_theta_max) / 2), # maximum angle
# maximum angular velocity
cvxpy.norm(X_v[11: 14, :], axis=0) <= self.w_B_max,
# Control constraints:
cvxpy.norm(U_v[1:3, :], axis=0) <= self.tan_delta_max * \
U_v[0, :], # gimbal angle constraint
cvxpy.norm(U_v, axis=0) <= self.T_max, # upper thrust constraint
]
# linearized lower thrust constraint
rhs = [U_last_p[:, k] / cvxpy.norm(U_last_p[:, k]) @ U_v[:, k]
for k in range(X_v.shape[1])]
constraints += [
self.T_min <= cvxpy.vstack(rhs)
]
return constraints
class Integrator:
def __init__(self, m, K):
self.K = K
self.m = m
self.n_x = m.n_x
self.n_u = m.n_u
self.A_bar = np.zeros([m.n_x * m.n_x, K - 1])
self.B_bar = np.zeros([m.n_x * m.n_u, K - 1])
self.C_bar = np.zeros([m.n_x * m.n_u, K - 1])
self.S_bar = np.zeros([m.n_x, K - 1])
self.z_bar = np.zeros([m.n_x, K - 1])
# vector indices for flat matrices
x_end = m.n_x
A_bar_end = m.n_x * (1 + m.n_x)
B_bar_end = m.n_x * (1 + m.n_x + m.n_u)
C_bar_end = m.n_x * (1 + m.n_x + m.n_u + m.n_u)
S_bar_end = m.n_x * (1 + m.n_x + m.n_u + m.n_u + 1)
z_bar_end = m.n_x * (1 + m.n_x + m.n_u + m.n_u + 2)
self.x_ind = slice(0, x_end)
self.A_bar_ind = slice(x_end, A_bar_end)
self.B_bar_ind = slice(A_bar_end, B_bar_end)
self.C_bar_ind = slice(B_bar_end, C_bar_end)
self.S_bar_ind = slice(C_bar_end, S_bar_end)
self.z_bar_ind = slice(S_bar_end, z_bar_end)
self.f, self.A, self.B = m.f_func, m.A_func, m.B_func
# integration initial condition
self.V0 = np.zeros((m.n_x * (1 + m.n_x + m.n_u + m.n_u + 2),))
self.V0[self.A_bar_ind] = np.eye(m.n_x).reshape(-1)
self.dt = 1. / (K - 1)
def calculate_discretization(self, X, U, sigma):
"""
Calculate discretization for given states, inputs and total time.
:param X: Matrix of states for all time points
:param U: Matrix of inputs for all time points
:param sigma: Total time
:return: The discretization matrices
"""
for k in range(self.K - 1):
self.V0[self.x_ind] = X[:, k]
V = np.array(odeint(self._ode_dVdt, self.V0, (0, self.dt),
args=(U[:, k], U[:, k + 1], sigma))[1, :])
# using \Phi_A(\tau_{k+1},\xi) = \Phi_A(\tau_{k+1},\tau_k)\Phi_A(\xi,\tau_k)^{-1}
# flatten matrices in column-major (Fortran) order for CVXPY
Phi = V[self.A_bar_ind].reshape((self.n_x, self.n_x))
self.A_bar[:, k] = Phi.flatten(order='F')
self.B_bar[:, k] = np.matmul(Phi, V[self.B_bar_ind].reshape(
(self.n_x, self.n_u))).flatten(order='F')
self.C_bar[:, k] = np.matmul(Phi, V[self.C_bar_ind].reshape(
(self.n_x, self.n_u))).flatten(order='F')
self.S_bar[:, k] = np.matmul(Phi, V[self.S_bar_ind])
self.z_bar[:, k] = np.matmul(Phi, V[self.z_bar_ind])
return self.A_bar, self.B_bar, self.C_bar, self.S_bar, self.z_bar
def _ode_dVdt(self, V, t, u_t0, u_t1, sigma):
"""
ODE function to compute dVdt.
:param V: Evaluation state V = [x, Phi_A, B_bar, C_bar, S_bar, z_bar]
:param t: Evaluation time
:param u_t0: Input at start of interval
:param u_t1: Input at end of interval
:param sigma: Total time
:return: Derivative at current time and state dVdt
"""
alpha = (self.dt - t) / self.dt
beta = t / self.dt
x = V[self.x_ind]
u = u_t0 + beta * (u_t1 - u_t0)
# using \Phi_A(\tau_{k+1},\xi) = \Phi_A(\tau_{k+1},\tau_k)\Phi_A(\xi,\tau_k)^{-1}
# and pre-multiplying with \Phi_A(\tau_{k+1},\tau_k) after integration
Phi_A_xi = np.linalg.inv(
V[self.A_bar_ind].reshape((self.n_x, self.n_x)))
A_subs = sigma * self.A(x, u)
B_subs = sigma * self.B(x, u)
f_subs = self.f(x, u)
dVdt = np.zeros_like(V)
dVdt[self.x_ind] = sigma * f_subs.transpose()
dVdt[self.A_bar_ind] = np.matmul(
A_subs, V[self.A_bar_ind].reshape((self.n_x, self.n_x))).reshape(-1)
dVdt[self.B_bar_ind] = np.matmul(Phi_A_xi, B_subs).reshape(-1) * alpha
dVdt[self.C_bar_ind] = np.matmul(Phi_A_xi, B_subs).reshape(-1) * beta
dVdt[self.S_bar_ind] = np.matmul(Phi_A_xi, f_subs).transpose()
z_t = -np.matmul(A_subs, x) - np.matmul(B_subs, u)
dVdt[self.z_bar_ind] = np.dot(Phi_A_xi, z_t.T).flatten()
return dVdt
class SCProblem:
"""
Defines a standard Successive Convexification problem and
adds the model specific constraints and objectives.
:param m: The model object
:param K: Number of discretization points
"""
def __init__(self, m, K):
# Variables:
self.var = dict()
self.var['X'] = cvxpy.Variable((m.n_x, K))
self.var['U'] = cvxpy.Variable((m.n_u, K))
self.var['sigma'] = cvxpy.Variable(nonneg=True)
self.var['nu'] = cvxpy.Variable((m.n_x, K - 1))
self.var['delta_norm'] = cvxpy.Variable(nonneg=True)
self.var['sigma_norm'] = cvxpy.Variable(nonneg=True)
# Parameters:
self.par = dict()
self.par['A_bar'] = cvxpy.Parameter((m.n_x * m.n_x, K - 1))
self.par['B_bar'] = cvxpy.Parameter((m.n_x * m.n_u, K - 1))
self.par['C_bar'] = cvxpy.Parameter((m.n_x * m.n_u, K - 1))
self.par['S_bar'] = cvxpy.Parameter((m.n_x, K - 1))
self.par['z_bar'] = cvxpy.Parameter((m.n_x, K - 1))
self.par['X_last'] = cvxpy.Parameter((m.n_x, K))
self.par['U_last'] = cvxpy.Parameter((m.n_u, K))
self.par['sigma_last'] = cvxpy.Parameter(nonneg=True)
self.par['weight_sigma'] = cvxpy.Parameter(nonneg=True)
self.par['weight_delta'] = cvxpy.Parameter(nonneg=True)
self.par['weight_delta_sigma'] = cvxpy.Parameter(nonneg=True)
self.par['weight_nu'] = cvxpy.Parameter(nonneg=True)
# Constraints:
constraints = []
# Model:
constraints += m.get_constraints(
self.var['X'], self.var['U'], self.par['X_last'], self.par['U_last'])
# Dynamics:
# x_t+1 = A_*x_t+B_*U_t+C_*U_T+1*S_*sigma+zbar+nu
constraints += [
self.var['X'][:, k + 1] ==
cvxpy.reshape(self.par['A_bar'][:, k], (m.n_x, m.n_x)) @
self.var['X'][:, k] +
cvxpy.reshape(self.par['B_bar'][:, k], (m.n_x, m.n_u)) @
self.var['U'][:, k] +
cvxpy.reshape(self.par['C_bar'][:, k], (m.n_x, m.n_u)) @
self.var['U'][:, k + 1] +
self.par['S_bar'][:, k] * self.var['sigma'] +
self.par['z_bar'][:, k] +
self.var['nu'][:, k]
for k in range(K - 1)
]
# Trust regions:
dx = cvxpy.sum(cvxpy.square(
self.var['X'] - self.par['X_last']), axis=0)
du = cvxpy.sum(cvxpy.square(
self.var['U'] - self.par['U_last']), axis=0)
ds = self.var['sigma'] - self.par['sigma_last']
constraints += [cvxpy.norm(dx + du, 1) <= self.var['delta_norm']]
constraints += [cvxpy.norm(ds, 'inf') <= self.var['sigma_norm']]
# Flight time positive:
constraints += [self.var['sigma'] >= 0.1]
# Objective:
sc_objective = cvxpy.Minimize(
self.par['weight_sigma'] * self.var['sigma'] +
self.par['weight_nu'] * cvxpy.norm(self.var['nu'], 'inf') +
self.par['weight_delta'] * self.var['delta_norm'] +
self.par['weight_delta_sigma'] * self.var['sigma_norm']
)
objective = sc_objective
self.prob = cvxpy.Problem(objective, constraints)
def set_parameters(self, **kwargs):
"""
All parameters have to be filled before calling solve().
Takes the following arguments as keywords:
A_bar
B_bar
C_bar
S_bar
z_bar
X_last
U_last
sigma_last
E
weight_sigma
weight_nu
radius_trust_region
"""
for key in kwargs:
if key in self.par:
self.par[key].value = kwargs[key]
else:
print(f'Parameter \'{key}\' does not exist.')
def get_variable(self, name):
if name in self.var:
return self.var[name].value
else:
print(f'Variable \'{name}\' does not exist.')
return None
def solve(self, **kwargs):
error = False
try:
with warnings.catch_warnings(): # For User warning from solver
warnings.simplefilter('ignore')
self.prob.solve(verbose=verbose_solver,
solver=solver)
except cvxpy.SolverError:
error = True
stats = self.prob.solver_stats
info = {
'setup_time': stats.setup_time,
'solver_time': stats.solve_time,
'iterations': stats.num_iters,
'solver_error': error
}
return info
def axis3d_equal(X, Y, Z, ax):
max_range = np.array([X.max() - X.min(), Y.max()
- Y.min(), Z.max() - Z.min()]).max()
Xb = 0.5 * max_range * np.mgrid[-1:2:2, -1:2:2,
- 1:2:2][0].flatten() + 0.5 * (X.max() + X.min())
Yb = 0.5 * max_range * np.mgrid[-1:2:2, -1:2:2,
- 1:2:2][1].flatten() + 0.5 * (Y.max() + Y.min())
Zb = 0.5 * max_range * np.mgrid[-1:2:2, -1:2:2,
- 1:2:2][2].flatten() + 0.5 * (Z.max() + Z.min())
# Comment or uncomment following both lines to test the fake bounding box:
for xb, yb, zb in zip(Xb, Yb, Zb):
ax.plot([xb], [yb], [zb], 'w')
def plot_animation(X, U): # pragma: no cover
fig = plt.figure()
ax = fig.add_subplot(projection='3d')
# for stopping simulation with the esc key.
fig.canvas.mpl_connect('key_release_event',
lambda event: [exit(0) if event.key == 'escape' else None])
for k in range(K):
plt.cla()
ax.plot(X[2, :], X[3, :], X[1, :]) # trajectory
ax.scatter3D([0.0], [0.0], [0.0], c="r",
marker="x") # target landing point
axis3d_equal(X[2, :], X[3, :], X[1, :], ax)
rx, ry, rz = X[1:4, k]
# vx, vy, vz = X[4:7, k]
qw, qx, qy, qz = X[7:11, k]
CBI = np.array([
[1 - 2 * (qy ** 2 + qz ** 2), 2 * (qx * qy + qw * qz),
2 * (qx * qz - qw * qy)],
[2 * (qx * qy - qw * qz), 1 - 2
* (qx ** 2 + qz ** 2), 2 * (qy * qz + qw * qx)],
[2 * (qx * qz + qw * qy), 2 * (qy * qz - qw * qx),
1 - 2 * (qx ** 2 + qy ** 2)]
])
Fx, Fy, Fz = np.dot(np.transpose(CBI), U[:, k])
dx, dy, dz = np.dot(np.transpose(CBI), np.array([1., 0., 0.]))
# attitude vector
ax.quiver(ry, rz, rx, dy, dz, dx, length=0.5, linewidth=3.0,
arrow_length_ratio=0.0, color='black')
# thrust vector
ax.quiver(ry, rz, rx, -Fy, -Fz, -Fx, length=0.1,
arrow_length_ratio=0.0, color='red')
ax.set_title("Rocket powered landing")
plt.pause(0.5)
def main(rng=None):
print("start!!")
m = Rocket_Model_6DoF(rng)
# state and input list
X = np.empty(shape=[m.n_x, K])
U = np.empty(shape=[m.n_u, K])
# INITIALIZATION
sigma = m.t_f_guess
X, U = m.initialize_trajectory(X, U)
integrator = Integrator(m, K)
problem = SCProblem(m, K)
converged = False
w_delta = W_DELTA
for it in range(iterations):
t0_it = time()
print('-' * 18 + f' Iteration {str(it + 1).zfill(2)} ' + '-' * 18)
A_bar, B_bar, C_bar, S_bar, z_bar = integrator.calculate_discretization(
X, U, sigma)
problem.set_parameters(A_bar=A_bar, B_bar=B_bar, C_bar=C_bar, S_bar=S_bar, z_bar=z_bar,
X_last=X, U_last=U, sigma_last=sigma,
weight_sigma=W_SIGMA, weight_nu=W_NU,
weight_delta=w_delta, weight_delta_sigma=W_DELTA_SIGMA)
problem.solve()
X = problem.get_variable('X')
U = problem.get_variable('U')
sigma = problem.get_variable('sigma')
delta_norm = problem.get_variable('delta_norm')
sigma_norm = problem.get_variable('sigma_norm')
nu_norm = np.linalg.norm(problem.get_variable('nu'), np.inf)
print('delta_norm', delta_norm)
print('sigma_norm', sigma_norm)
print('nu_norm', nu_norm)
if delta_norm < 1e-3 and sigma_norm < 1e-3 and nu_norm < 1e-7:
converged = True
w_delta *= 1.5
print('Time for iteration', time() - t0_it, 's')
if converged:
print(f'Converged after {it + 1} iterations.')
break
if show_animation: # pragma: no cover
plot_animation(X, U)
print("done!!")
if __name__ == '__main__':
main()
``` | /content/code_sandbox/AerialNavigation/rocket_powered_landing/rocket_powered_landing.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 8,366 |
```python
"""
Inverted Pendulum LQR control
author: Trung Kien - letrungkien.k53.hut@gmail.com
"""
import math
import time
import matplotlib.pyplot as plt
import numpy as np
from numpy.linalg import inv, eig
# Model parameters
l_bar = 2.0 # length of bar
M = 1.0 # [kg]
m = 0.3 # [kg]
g = 9.8 # [m/s^2]
nx = 4 # number of state
nu = 1 # number of input
Q = np.diag([0.0, 1.0, 1.0, 0.0]) # state cost matrix
R = np.diag([0.01]) # input cost matrix
delta_t = 0.1 # time tick [s]
sim_time = 5.0 # simulation time [s]
show_animation = True
def main():
x0 = np.array([
[0.0],
[0.0],
[0.3],
[0.0]
])
x = np.copy(x0)
time = 0.0
while sim_time > time:
time += delta_t
# calc control input
u = lqr_control(x)
# simulate inverted pendulum cart
x = simulation(x, u)
if show_animation:
plt.clf()
px = float(x[0, 0])
theta = float(x[2, 0])
plot_cart(px, theta)
plt.xlim([-5.0, 2.0])
plt.pause(0.001)
print("Finish")
print(f"x={float(x[0, 0]):.2f} [m] , theta={math.degrees(x[2, 0]):.2f} [deg]")
if show_animation:
plt.show()
def simulation(x, u):
A, B = get_model_matrix()
x = A @ x + B @ u
return x
def solve_DARE(A, B, Q, R, maxiter=150, eps=0.01):
"""
Solve a discrete time_Algebraic Riccati equation (DARE)
"""
P = Q
for i in range(maxiter):
Pn = A.T @ P @ A - A.T @ P @ B @ \
inv(R + B.T @ P @ B) @ B.T @ P @ A + Q
if (abs(Pn - P)).max() < eps:
break
P = Pn
return Pn
def dlqr(A, B, Q, R):
"""
Solve the discrete time lqr controller.
x[k+1] = A x[k] + B u[k]
cost = sum x[k].T*Q*x[k] + u[k].T*R*u[k]
# ref Bertsekas, p.151
"""
# first, try to solve the ricatti equation
P = solve_DARE(A, B, Q, R)
# compute the LQR gain
K = inv(B.T @ P @ B + R) @ (B.T @ P @ A)
eigVals, eigVecs = eig(A - B @ K)
return K, P, eigVals
def lqr_control(x):
A, B = get_model_matrix()
start = time.time()
K, _, _ = dlqr(A, B, Q, R)
u = -K @ x
elapsed_time = time.time() - start
print(f"calc time:{elapsed_time:.6f} [sec]")
return u
def get_numpy_array_from_matrix(x):
"""
get build-in list from matrix
"""
return np.array(x).flatten()
def get_model_matrix():
A = np.array([
[0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, m * g / M, 0.0],
[0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, g * (M + m) / (l_bar * M), 0.0]
])
A = np.eye(nx) + delta_t * A
B = np.array([
[0.0],
[1.0 / M],
[0.0],
[1.0 / (l_bar * M)]
])
B = delta_t * B
return A, B
def flatten(a):
return np.array(a).flatten()
def plot_cart(xt, theta):
cart_w = 1.0
cart_h = 0.5
radius = 0.1
cx = np.array([-cart_w / 2.0, cart_w / 2.0, cart_w /
2.0, -cart_w / 2.0, -cart_w / 2.0])
cy = np.array([0.0, 0.0, cart_h, cart_h, 0.0])
cy += radius * 2.0
cx = cx + xt
bx = np.array([0.0, l_bar * math.sin(-theta)])
bx += xt
by = np.array([cart_h, l_bar * math.cos(-theta) + cart_h])
by += radius * 2.0
angles = np.arange(0.0, math.pi * 2.0, math.radians(3.0))
ox = np.array([radius * math.cos(a) for a in angles])
oy = np.array([radius * math.sin(a) for a in angles])
rwx = np.copy(ox) + cart_w / 4.0 + xt
rwy = np.copy(oy) + radius
lwx = np.copy(ox) - cart_w / 4.0 + xt
lwy = np.copy(oy) + radius
wx = np.copy(ox) + bx[-1]
wy = np.copy(oy) + by[-1]
plt.plot(flatten(cx), flatten(cy), "-b")
plt.plot(flatten(bx), flatten(by), "-k")
plt.plot(flatten(rwx), flatten(rwy), "-k")
plt.plot(flatten(lwx), flatten(lwy), "-k")
plt.plot(flatten(wx), flatten(wy), "-k")
plt.title(f"x: {xt:.2f} , theta: {math.degrees(theta):.2f}")
# for stopping simulation with the esc key.
plt.gcf().canvas.mpl_connect(
'key_release_event',
lambda event: [exit(0) if event.key == 'escape' else None])
plt.axis("equal")
if __name__ == '__main__':
main()
``` | /content/code_sandbox/Control/inverted_pendulum/inverted_pendulum_lqr_control.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 1,497 |
```python
"""
Move to specified pose
Author: Daniel Ingram (daniel-s-ingram)
Atsushi Sakai (@Atsushi_twi)
Seied Muhammad Yazdian (@Muhammad-Yazdian)
P. I. Corke, "Robotics, Vision & Control", Springer 2017, ISBN 978-3-319-54413-7
"""
import matplotlib.pyplot as plt
import numpy as np
from random import random
from utils.angle import angle_mod
class PathFinderController:
"""
Constructs an instantiate of the PathFinderController for navigating a
3-DOF wheeled robot on a 2D plane
Parameters
----------
Kp_rho : The linear velocity gain to translate the robot along a line
towards the goal
Kp_alpha : The angular velocity gain to rotate the robot towards the goal
Kp_beta : The offset angular velocity gain accounting for smooth merging to
the goal angle (i.e., it helps the robot heading to be parallel
to the target angle.)
"""
def __init__(self, Kp_rho, Kp_alpha, Kp_beta):
self.Kp_rho = Kp_rho
self.Kp_alpha = Kp_alpha
self.Kp_beta = Kp_beta
def calc_control_command(self, x_diff, y_diff, theta, theta_goal):
"""
Returns the control command for the linear and angular velocities as
well as the distance to goal
Parameters
----------
x_diff : The position of target with respect to current robot position
in x direction
y_diff : The position of target with respect to current robot position
in y direction
theta : The current heading angle of robot with respect to x axis
theta_goal: The target angle of robot with respect to x axis
Returns
-------
rho : The distance between the robot and the goal position
v : Command linear velocity
w : Command angular velocity
"""
# Description of local variables:
# - alpha is the angle to the goal relative to the heading of the robot
# - beta is the angle between the robot's position and the goal
# position plus the goal angle
# - Kp_rho*rho and Kp_alpha*alpha drive the robot along a line towards
# the goal
# - Kp_beta*beta rotates the line so that it is parallel to the goal
# angle
#
# Note:
# we restrict alpha and beta (angle differences) to the range
# [-pi, pi] to prevent unstable behavior e.g. difference going
# from 0 rad to 2*pi rad with slight turn
rho = np.hypot(x_diff, y_diff)
alpha = angle_mod(np.arctan2(y_diff, x_diff) - theta)
beta = angle_mod(theta_goal - theta - alpha)
v = self.Kp_rho * rho
w = self.Kp_alpha * alpha - self.Kp_beta * beta
if alpha > np.pi / 2 or alpha < -np.pi / 2:
v = -v
return rho, v, w
# simulation parameters
controller = PathFinderController(9, 15, 3)
dt = 0.01
# Robot specifications
MAX_LINEAR_SPEED = 15
MAX_ANGULAR_SPEED = 7
show_animation = True
def move_to_pose(x_start, y_start, theta_start, x_goal, y_goal, theta_goal):
x = x_start
y = y_start
theta = theta_start
x_diff = x_goal - x
y_diff = y_goal - y
x_traj, y_traj = [], []
rho = np.hypot(x_diff, y_diff)
while rho > 0.001:
x_traj.append(x)
y_traj.append(y)
x_diff = x_goal - x
y_diff = y_goal - y
rho, v, w = controller.calc_control_command(
x_diff, y_diff, theta, theta_goal)
if abs(v) > MAX_LINEAR_SPEED:
v = np.sign(v) * MAX_LINEAR_SPEED
if abs(w) > MAX_ANGULAR_SPEED:
w = np.sign(w) * MAX_ANGULAR_SPEED
theta = theta + w * dt
x = x + v * np.cos(theta) * dt
y = y + v * np.sin(theta) * dt
if show_animation: # pragma: no cover
plt.cla()
plt.arrow(x_start, y_start, np.cos(theta_start),
np.sin(theta_start), color='r', width=0.1)
plt.arrow(x_goal, y_goal, np.cos(theta_goal),
np.sin(theta_goal), color='g', width=0.1)
plot_vehicle(x, y, theta, x_traj, y_traj)
def plot_vehicle(x, y, theta, x_traj, y_traj): # pragma: no cover
# Corners of triangular vehicle when pointing to the right (0 radians)
p1_i = np.array([0.5, 0, 1]).T
p2_i = np.array([-0.5, 0.25, 1]).T
p3_i = np.array([-0.5, -0.25, 1]).T
T = transformation_matrix(x, y, theta)
p1 = np.matmul(T, p1_i)
p2 = np.matmul(T, p2_i)
p3 = np.matmul(T, p3_i)
plt.plot([p1[0], p2[0]], [p1[1], p2[1]], 'k-')
plt.plot([p2[0], p3[0]], [p2[1], p3[1]], 'k-')
plt.plot([p3[0], p1[0]], [p3[1], p1[1]], 'k-')
plt.plot(x_traj, y_traj, 'b--')
# for stopping simulation with the esc key.
plt.gcf().canvas.mpl_connect(
'key_release_event',
lambda event: [exit(0) if event.key == 'escape' else None])
plt.xlim(0, 20)
plt.ylim(0, 20)
plt.pause(dt)
def transformation_matrix(x, y, theta):
return np.array([
[np.cos(theta), -np.sin(theta), x],
[np.sin(theta), np.cos(theta), y],
[0, 0, 1]
])
def main():
for i in range(5):
x_start = 20.0 * random()
y_start = 20.0 * random()
theta_start: float = 2 * np.pi * random() - np.pi
x_goal = 20 * random()
y_goal = 20 * random()
theta_goal = 2 * np.pi * random() - np.pi
print(f"Initial x: {round(x_start, 2)} m\nInitial y: {round(y_start, 2)} m\nInitial theta: {round(theta_start, 2)} rad\n")
print(f"Goal x: {round(x_goal, 2)} m\nGoal y: {round(y_goal, 2)} m\nGoal theta: {round(theta_goal, 2)} rad\n")
move_to_pose(x_start, y_start, theta_start, x_goal, y_goal, theta_goal)
if __name__ == '__main__':
main()
``` | /content/code_sandbox/Control/move_to_pose/move_to_pose.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 1,628 |
```python
"""
Inverted Pendulum MPC control
author: Atsushi Sakai
"""
import math
import time
import cvxpy
import matplotlib.pyplot as plt
import numpy as np
# Model parameters
l_bar = 2.0 # length of bar
M = 1.0 # [kg]
m = 0.3 # [kg]
g = 9.8 # [m/s^2]
nx = 4 # number of state
nu = 1 # number of input
Q = np.diag([0.0, 1.0, 1.0, 0.0]) # state cost matrix
R = np.diag([0.01]) # input cost matrix
T = 30 # Horizon length
delta_t = 0.1 # time tick
sim_time = 5.0 # simulation time [s]
show_animation = True
def main():
x0 = np.array([
[0.0],
[0.0],
[0.3],
[0.0]
])
x = np.copy(x0)
time = 0.0
while sim_time > time:
time += delta_t
# calc control input
opt_x, opt_delta_x, opt_theta, opt_delta_theta, opt_input = \
mpc_control(x)
# get input
u = opt_input[0]
# simulate inverted pendulum cart
x = simulation(x, u)
if show_animation:
plt.clf()
px = float(x[0, 0])
theta = float(x[2, 0])
plot_cart(px, theta)
plt.xlim([-5.0, 2.0])
plt.pause(0.001)
print("Finish")
print(f"x={float(x[0, 0]):.2f} [m] , theta={math.degrees(x[2, 0]):.2f} [deg]")
if show_animation:
plt.show()
def simulation(x, u):
A, B = get_model_matrix()
x = np.dot(A, x) + np.dot(B, u)
return x
def mpc_control(x0):
x = cvxpy.Variable((nx, T + 1))
u = cvxpy.Variable((nu, T))
A, B = get_model_matrix()
cost = 0.0
constr = []
for t in range(T):
cost += cvxpy.quad_form(x[:, t + 1], Q)
cost += cvxpy.quad_form(u[:, t], R)
constr += [x[:, t + 1] == A @ x[:, t] + B @ u[:, t]]
constr += [x[:, 0] == x0[:, 0]]
prob = cvxpy.Problem(cvxpy.Minimize(cost), constr)
start = time.time()
prob.solve(verbose=False)
elapsed_time = time.time() - start
print(f"calc time:{elapsed_time:.6f} [sec]")
if prob.status == cvxpy.OPTIMAL:
ox = get_numpy_array_from_matrix(x.value[0, :])
dx = get_numpy_array_from_matrix(x.value[1, :])
theta = get_numpy_array_from_matrix(x.value[2, :])
d_theta = get_numpy_array_from_matrix(x.value[3, :])
ou = get_numpy_array_from_matrix(u.value[0, :])
else:
ox, dx, theta, d_theta, ou = None, None, None, None, None
return ox, dx, theta, d_theta, ou
def get_numpy_array_from_matrix(x):
"""
get build-in list from matrix
"""
return np.array(x).flatten()
def get_model_matrix():
A = np.array([
[0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, m * g / M, 0.0],
[0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, g * (M + m) / (l_bar * M), 0.0]
])
A = np.eye(nx) + delta_t * A
B = np.array([
[0.0],
[1.0 / M],
[0.0],
[1.0 / (l_bar * M)]
])
B = delta_t * B
return A, B
def flatten(a):
return np.array(a).flatten()
def plot_cart(xt, theta):
cart_w = 1.0
cart_h = 0.5
radius = 0.1
cx = np.array([-cart_w / 2.0, cart_w / 2.0, cart_w /
2.0, -cart_w / 2.0, -cart_w / 2.0])
cy = np.array([0.0, 0.0, cart_h, cart_h, 0.0])
cy += radius * 2.0
cx = cx + xt
bx = np.array([0.0, l_bar * math.sin(-theta)])
bx += xt
by = np.array([cart_h, l_bar * math.cos(-theta) + cart_h])
by += radius * 2.0
angles = np.arange(0.0, math.pi * 2.0, math.radians(3.0))
ox = np.array([radius * math.cos(a) for a in angles])
oy = np.array([radius * math.sin(a) for a in angles])
rwx = np.copy(ox) + cart_w / 4.0 + xt
rwy = np.copy(oy) + radius
lwx = np.copy(ox) - cart_w / 4.0 + xt
lwy = np.copy(oy) + radius
wx = np.copy(ox) + bx[-1]
wy = np.copy(oy) + by[-1]
plt.plot(flatten(cx), flatten(cy), "-b")
plt.plot(flatten(bx), flatten(by), "-k")
plt.plot(flatten(rwx), flatten(rwy), "-k")
plt.plot(flatten(lwx), flatten(lwy), "-k")
plt.plot(flatten(wx), flatten(wy), "-k")
plt.title(f"x: {xt:.2f} , theta: {math.degrees(theta):.2f}")
# for stopping simulation with the esc key.
plt.gcf().canvas.mpl_connect(
'key_release_event',
lambda event: [exit(0) if event.key == 'escape' else None])
plt.axis("equal")
if __name__ == '__main__':
main()
``` | /content/code_sandbox/Control/inverted_pendulum/inverted_pendulum_mpc_control.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 1,491 |
```python
import numpy as np
from scipy.spatial.transform import Rotation as Rot
def rot_mat_2d(angle):
"""
Create 2D rotation matrix from an angle
Parameters
----------
angle :
Returns
-------
A 2D rotation matrix
Examples
--------
>>> angle_mod(-4.0)
"""
return Rot.from_euler('z', angle).as_matrix()[0:2, 0:2]
def angle_mod(x, zero_2_2pi=False, degree=False):
"""
Angle modulo operation
Default angle modulo range is [-pi, pi)
Parameters
----------
x : float or array_like
A angle or an array of angles. This array is flattened for
the calculation. When an angle is provided, a float angle is returned.
zero_2_2pi : bool, optional
Change angle modulo range to [0, 2pi)
Default is False.
degree : bool, optional
If True, then the given angles are assumed to be in degrees.
Default is False.
Returns
-------
ret : float or ndarray
an angle or an array of modulated angle.
Examples
--------
>>> angle_mod(-4.0)
2.28318531
>>> angle_mod([-4.0])
np.array(2.28318531)
>>> angle_mod([-150.0, 190.0, 350], degree=True)
array([-150., -170., -10.])
>>> angle_mod(-60.0, zero_2_2pi=True, degree=True)
array([300.])
"""
if isinstance(x, float):
is_float = True
else:
is_float = False
x = np.asarray(x).flatten()
if degree:
x = np.deg2rad(x)
if zero_2_2pi:
mod_angle = x % (2 * np.pi)
else:
mod_angle = (x + np.pi) % (2 * np.pi) - np.pi
if degree:
mod_angle = np.rad2deg(mod_angle)
if is_float:
return mod_angle.item()
else:
return mod_angle
``` | /content/code_sandbox/utils/angle.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 479 |
```python
"""
Matplotlib based plotting utilities
"""
import math
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import art3d
from matplotlib.patches import FancyArrowPatch
from mpl_toolkits.mplot3d.proj3d import proj_transform
from mpl_toolkits.mplot3d import Axes3D
from utils.angle import rot_mat_2d
def plot_covariance_ellipse(x, y, cov, chi2=3.0, color="-r", ax=None):
"""
This function plots an ellipse that represents a covariance matrix. The ellipse is centered at (x, y) and its shape, size and rotation are determined by the covariance matrix.
Parameters:
x : (float) The x-coordinate of the center of the ellipse.
y : (float) The y-coordinate of the center of the ellipse.
cov : (numpy.ndarray) A 2x2 covariance matrix that determines the shape, size, and rotation of the ellipse.
chi2 : (float, optional) A scalar value that scales the ellipse size. This value is typically set based on chi-squared distribution quantiles to achieve certain confidence levels (e.g., 3.0 corresponds to ~95% confidence for a 2D Gaussian). Defaults to 3.0.
color : (str, optional) The color and line style of the ellipse plot, following matplotlib conventions. Defaults to "-r" (a red solid line).
ax : (matplotlib.axes.Axes, optional) The Axes object to draw the ellipse on. If None (default), a new figure and axes are created.
Returns:
None. This function plots the covariance ellipse on the specified axes.
"""
eig_val, eig_vec = np.linalg.eig(cov)
if eig_val[0] >= eig_val[1]:
big_ind = 0
small_ind = 1
else:
big_ind = 1
small_ind = 0
a = math.sqrt(chi2 * eig_val[big_ind])
b = math.sqrt(chi2 * eig_val[small_ind])
angle = math.atan2(eig_vec[1, big_ind], eig_vec[0, big_ind])
plot_ellipse(x, y, a, b, angle, color=color, ax=ax)
def plot_ellipse(x, y, a, b, angle, color="-r", ax=None, **kwargs):
"""
This function plots an ellipse based on the given parameters.
Parameters
----------
x : (float) The x-coordinate of the center of the ellipse.
y : (float) The y-coordinate of the center of the ellipse.
a : (float) The length of the semi-major axis of the ellipse.
b : (float) The length of the semi-minor axis of the ellipse.
angle : (float) The rotation angle of the ellipse, in radians.
color : (str, optional) The color and line style of the ellipse plot, following matplotlib conventions. Defaults to "-r" (a red solid line).
ax : (matplotlib.axes.Axes, optional) The Axes object to draw the ellipse on. If None (default), a new figure and axes are created.
**kwargs: Additional keyword arguments to pass to plt.plot or ax.plot.
Returns
---------
None. This function plots the ellipse based on the specified parameters.
"""
t = np.arange(0, 2 * math.pi + 0.1, 0.1)
px = [a * math.cos(it) for it in t]
py = [b * math.sin(it) for it in t]
fx = rot_mat_2d(angle) @ (np.array([px, py]))
px = np.array(fx[0, :] + x).flatten()
py = np.array(fx[1, :] + y).flatten()
if ax is None:
plt.plot(px, py, color, **kwargs)
else:
ax.plot(px, py, color, **kwargs)
def plot_arrow(x, y, yaw, arrow_length=1.0,
origin_point_plot_style="xr",
head_width=0.1, fc="r", ec="k", **kwargs):
"""
Plot an arrow or arrows based on 2D state (x, y, yaw)
All optional settings of matplotlib.pyplot.arrow can be used.
- matplotlib.pyplot.arrow:
path_to_url
Parameters
----------
x : a float or array_like
a value or a list of arrow origin x position.
y : a float or array_like
a value or a list of arrow origin y position.
yaw : a float or array_like
a value or a list of arrow yaw angle (orientation).
arrow_length : a float (optional)
arrow length. default is 1.0
origin_point_plot_style : str (optional)
origin point plot style. If None, not plotting.
head_width : a float (optional)
arrow head width. default is 0.1
fc : string (optional)
face color
ec : string (optional)
edge color
"""
if not isinstance(x, float):
for (i_x, i_y, i_yaw) in zip(x, y, yaw):
plot_arrow(i_x, i_y, i_yaw, head_width=head_width,
fc=fc, ec=ec, **kwargs)
else:
plt.arrow(x, y,
arrow_length * math.cos(yaw),
arrow_length * math.sin(yaw),
head_width=head_width,
fc=fc, ec=ec,
**kwargs)
if origin_point_plot_style is not None:
plt.plot(x, y, origin_point_plot_style)
def plot_curvature(x_list, y_list, heading_list, curvature,
k=0.01, c="-c", label="Curvature"):
"""
Plot curvature on 2D path. This plot is a line from the original path,
the lateral distance from the original path shows curvature magnitude.
Left turning shows right side plot, right turning shows left side plot.
For straight path, the curvature plot will be on the path, because
curvature is 0 on the straight path.
Parameters
----------
x_list : array_like
x position list of the path
y_list : array_like
y position list of the path
heading_list : array_like
heading list of the path
curvature : array_like
curvature list of the path
k : float
curvature scale factor to calculate distance from the original path
c : string
color of the plot
label : string
label of the plot
"""
cx = [x + d * k * np.cos(yaw - np.pi / 2.0) for x, y, yaw, d in
zip(x_list, y_list, heading_list, curvature)]
cy = [y + d * k * np.sin(yaw - np.pi / 2.0) for x, y, yaw, d in
zip(x_list, y_list, heading_list, curvature)]
plt.plot(cx, cy, c, label=label)
for ix, iy, icx, icy in zip(x_list, y_list, cx, cy):
plt.plot([ix, icx], [iy, icy], c)
class Arrow3D(FancyArrowPatch):
def __init__(self, x, y, z, dx, dy, dz, *args, **kwargs):
super().__init__((0, 0), (0, 0), *args, **kwargs)
self._xyz = (x, y, z)
self._dxdydz = (dx, dy, dz)
def draw(self, renderer):
x1, y1, z1 = self._xyz
dx, dy, dz = self._dxdydz
x2, y2, z2 = (x1 + dx, y1 + dy, z1 + dz)
xs, ys, zs = proj_transform((x1, x2), (y1, y2), (z1, z2), self.axes.M)
self.set_positions((xs[0], ys[0]), (xs[1], ys[1]))
super().draw(renderer)
def do_3d_projection(self, renderer=None):
x1, y1, z1 = self._xyz
dx, dy, dz = self._dxdydz
x2, y2, z2 = (x1 + dx, y1 + dy, z1 + dz)
xs, ys, zs = proj_transform((x1, x2), (y1, y2), (z1, z2), self.axes.M)
self.set_positions((xs[0], ys[0]), (xs[1], ys[1]))
return np.min(zs)
def _arrow3D(ax, x, y, z, dx, dy, dz, *args, **kwargs):
'''Add an 3d arrow to an `Axes3D` instance.'''
arrow = Arrow3D(x, y, z, dx, dy, dz, *args, **kwargs)
ax.add_artist(arrow)
def plot_3d_vector_arrow(ax, p1, p2):
setattr(Axes3D, 'arrow3D', _arrow3D)
ax.arrow3D(p1[0], p1[1], p1[2],
p2[0]-p1[0], p2[1]-p1[1], p2[2]-p1[2],
mutation_scale=20,
arrowstyle="-|>",
)
def plot_triangle(p1, p2, p3, ax):
ax.add_collection3d(art3d.Poly3DCollection([[p1, p2, p3]], color='b'))
def set_equal_3d_axis(ax, x_lims, y_lims, z_lims):
"""Helper function to set equal axis
Args:
ax (Axes3DSubplot): matplotlib 3D axis, created by
`ax = fig.add_subplot(projection='3d')`
x_lims (np.array): array containing min and max value of x
y_lims (np.array): array containing min and max value of y
z_lims (np.array): array containing min and max value of z
"""
x_lims = np.asarray(x_lims)
y_lims = np.asarray(y_lims)
z_lims = np.asarray(z_lims)
# compute max required range
max_range = np.array([x_lims.max() - x_lims.min(),
y_lims.max() - y_lims.min(),
z_lims.max() - z_lims.min()]).max() / 2.0
# compute mid-point along each axis
mid_x = (x_lims.max() + x_lims.min()) * 0.5
mid_y = (y_lims.max() + y_lims.min()) * 0.5
mid_z = (z_lims.max() + z_lims.min()) * 0.5
# set limits to axis
ax.set_xlim(mid_x - max_range, mid_x + max_range)
ax.set_ylim(mid_y - max_range, mid_y + max_range)
ax.set_zlim(mid_z - max_range, mid_z + max_range)
if __name__ == '__main__':
plot_ellipse(0, 0, 1, 2, np.deg2rad(15))
plt.axis('equal')
plt.show()
``` | /content/code_sandbox/utils/plot.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 2,505 |
```python
"""
Extended Kalman Filter SLAM example
author: Atsushi Sakai (@Atsushi_twi)
"""
import math
import matplotlib.pyplot as plt
import numpy as np
from utils.angle import angle_mod
# EKF state covariance
Cx = np.diag([0.5, 0.5, np.deg2rad(30.0)]) ** 2
# Simulation parameter
Q_sim = np.diag([0.2, np.deg2rad(1.0)]) ** 2
R_sim = np.diag([1.0, np.deg2rad(10.0)]) ** 2
DT = 0.1 # time tick [s]
SIM_TIME = 50.0 # simulation time [s]
MAX_RANGE = 20.0 # maximum observation range
M_DIST_TH = 2.0 # Threshold of Mahalanobis distance for data association.
STATE_SIZE = 3 # State size [x,y,yaw]
LM_SIZE = 2 # LM state size [x,y]
show_animation = True
def ekf_slam(xEst, PEst, u, z):
# Predict
G, Fx = jacob_motion(xEst, u)
xEst[0:STATE_SIZE] = motion_model(xEst[0:STATE_SIZE], u)
PEst = G.T @ PEst @ G + Fx.T @ Cx @ Fx
initP = np.eye(2)
# Update
for iz in range(len(z[:, 0])): # for each observation
min_id = search_correspond_landmark_id(xEst, PEst, z[iz, 0:2])
nLM = calc_n_lm(xEst)
if min_id == nLM:
print("New LM")
# Extend state and covariance matrix
xAug = np.vstack((xEst, calc_landmark_position(xEst, z[iz, :])))
PAug = np.vstack((np.hstack((PEst, np.zeros((len(xEst), LM_SIZE)))),
np.hstack((np.zeros((LM_SIZE, len(xEst))), initP))))
xEst = xAug
PEst = PAug
lm = get_landmark_position_from_state(xEst, min_id)
y, S, H = calc_innovation(lm, xEst, PEst, z[iz, 0:2], min_id)
K = (PEst @ H.T) @ np.linalg.inv(S)
xEst = xEst + (K @ y)
PEst = (np.eye(len(xEst)) - (K @ H)) @ PEst
xEst[2] = pi_2_pi(xEst[2])
return xEst, PEst
def calc_input():
v = 1.0 # [m/s]
yaw_rate = 0.1 # [rad/s]
u = np.array([[v, yaw_rate]]).T
return u
def observation(xTrue, xd, u, RFID):
xTrue = motion_model(xTrue, u)
# add noise to gps x-y
z = np.zeros((0, 3))
for i in range(len(RFID[:, 0])):
dx = RFID[i, 0] - xTrue[0, 0]
dy = RFID[i, 1] - xTrue[1, 0]
d = math.hypot(dx, dy)
angle = pi_2_pi(math.atan2(dy, dx) - xTrue[2, 0])
if d <= MAX_RANGE:
dn = d + np.random.randn() * Q_sim[0, 0] ** 0.5 # add noise
angle_n = angle + np.random.randn() * Q_sim[1, 1] ** 0.5 # add noise
zi = np.array([dn, angle_n, i])
z = np.vstack((z, zi))
# add noise to input
ud = np.array([[
u[0, 0] + np.random.randn() * R_sim[0, 0] ** 0.5,
u[1, 0] + np.random.randn() * R_sim[1, 1] ** 0.5]]).T
xd = motion_model(xd, ud)
return xTrue, z, xd, ud
def motion_model(x, u):
F = np.array([[1.0, 0, 0],
[0, 1.0, 0],
[0, 0, 1.0]])
B = np.array([[DT * math.cos(x[2, 0]), 0],
[DT * math.sin(x[2, 0]), 0],
[0.0, DT]])
x = (F @ x) + (B @ u)
return x
def calc_n_lm(x):
n = int((len(x) - STATE_SIZE) / LM_SIZE)
return n
def jacob_motion(x, u):
Fx = np.hstack((np.eye(STATE_SIZE), np.zeros(
(STATE_SIZE, LM_SIZE * calc_n_lm(x)))))
jF = np.array([[0.0, 0.0, -DT * u[0, 0] * math.sin(x[2, 0])],
[0.0, 0.0, DT * u[0, 0] * math.cos(x[2, 0])],
[0.0, 0.0, 0.0]], dtype=float)
G = np.eye(len(x)) + Fx.T @ jF @ Fx
return G, Fx,
def calc_landmark_position(x, z):
zp = np.zeros((2, 1))
zp[0, 0] = x[0, 0] + z[0] * math.cos(x[2, 0] + z[1])
zp[1, 0] = x[1, 0] + z[0] * math.sin(x[2, 0] + z[1])
return zp
def get_landmark_position_from_state(x, ind):
lm = x[STATE_SIZE + LM_SIZE * ind: STATE_SIZE + LM_SIZE * (ind + 1), :]
return lm
def search_correspond_landmark_id(xAug, PAug, zi):
"""
Landmark association with Mahalanobis distance
"""
nLM = calc_n_lm(xAug)
min_dist = []
for i in range(nLM):
lm = get_landmark_position_from_state(xAug, i)
y, S, H = calc_innovation(lm, xAug, PAug, zi, i)
min_dist.append(y.T @ np.linalg.inv(S) @ y)
min_dist.append(M_DIST_TH) # new landmark
min_id = min_dist.index(min(min_dist))
return min_id
def calc_innovation(lm, xEst, PEst, z, LMid):
delta = lm - xEst[0:2]
q = (delta.T @ delta)[0, 0]
z_angle = math.atan2(delta[1, 0], delta[0, 0]) - xEst[2, 0]
zp = np.array([[math.sqrt(q), pi_2_pi(z_angle)]])
y = (z - zp).T
y[1] = pi_2_pi(y[1])
H = jacob_h(q, delta, xEst, LMid + 1)
S = H @ PEst @ H.T + Cx[0:2, 0:2]
return y, S, H
def jacob_h(q, delta, x, i):
sq = math.sqrt(q)
G = np.array([[-sq * delta[0, 0], - sq * delta[1, 0], 0, sq * delta[0, 0], sq * delta[1, 0]],
[delta[1, 0], - delta[0, 0], - q, - delta[1, 0], delta[0, 0]]])
G = G / q
nLM = calc_n_lm(x)
F1 = np.hstack((np.eye(3), np.zeros((3, 2 * nLM))))
F2 = np.hstack((np.zeros((2, 3)), np.zeros((2, 2 * (i - 1))),
np.eye(2), np.zeros((2, 2 * nLM - 2 * i))))
F = np.vstack((F1, F2))
H = G @ F
return H
def pi_2_pi(angle):
return angle_mod(angle)
def main():
print(__file__ + " start!!")
time = 0.0
# RFID positions [x, y]
RFID = np.array([[10.0, -2.0],
[15.0, 10.0],
[3.0, 15.0],
[-5.0, 20.0]])
# State Vector [x y yaw v]'
xEst = np.zeros((STATE_SIZE, 1))
xTrue = np.zeros((STATE_SIZE, 1))
PEst = np.eye(STATE_SIZE)
xDR = np.zeros((STATE_SIZE, 1)) # Dead reckoning
# history
hxEst = xEst
hxTrue = xTrue
hxDR = xTrue
while SIM_TIME >= time:
time += DT
u = calc_input()
xTrue, z, xDR, ud = observation(xTrue, xDR, u, RFID)
xEst, PEst = ekf_slam(xEst, PEst, ud, z)
x_state = xEst[0:STATE_SIZE]
# store data history
hxEst = np.hstack((hxEst, x_state))
hxDR = np.hstack((hxDR, xDR))
hxTrue = np.hstack((hxTrue, xTrue))
if show_animation: # pragma: no cover
plt.cla()
# for stopping simulation with the esc key.
plt.gcf().canvas.mpl_connect(
'key_release_event',
lambda event: [exit(0) if event.key == 'escape' else None])
plt.plot(RFID[:, 0], RFID[:, 1], "*k")
plt.plot(xEst[0], xEst[1], ".r")
# plot landmark
for i in range(calc_n_lm(xEst)):
plt.plot(xEst[STATE_SIZE + i * 2],
xEst[STATE_SIZE + i * 2 + 1], "xg")
plt.plot(hxTrue[0, :],
hxTrue[1, :], "-b")
plt.plot(hxDR[0, :],
hxDR[1, :], "-k")
plt.plot(hxEst[0, :],
hxEst[1, :], "-r")
plt.axis("equal")
plt.grid(True)
plt.pause(0.001)
if __name__ == '__main__':
main()
``` | /content/code_sandbox/SLAM/EKFSLAM/ekf_slam.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 2,423 |
```python
"""
Graph based SLAM example
author: Atsushi Sakai (@Atsushi_twi)
Ref
[A Tutorial on Graph-Based SLAM]
(path_to_url~stachnis/pdf/grisetti10titsmag.pdf)
"""
import sys
import pathlib
sys.path.append(str(pathlib.Path(__file__).parent.parent.parent))
import copy
import itertools
import math
import matplotlib.pyplot as plt
import numpy as np
from scipy.spatial.transform import Rotation as Rot
from utils.angle import angle_mod
# Simulation parameter
Q_sim = np.diag([0.2, np.deg2rad(1.0)]) ** 2
R_sim = np.diag([0.1, np.deg2rad(10.0)]) ** 2
DT = 2.0 # time tick [s]
SIM_TIME = 100.0 # simulation time [s]
MAX_RANGE = 30.0 # maximum observation range
STATE_SIZE = 3 # State size [x,y,yaw]
# Covariance parameter of Graph Based SLAM
C_SIGMA1 = 0.1
C_SIGMA2 = 0.1
C_SIGMA3 = np.deg2rad(1.0)
MAX_ITR = 20 # Maximum iteration
show_graph_d_time = 20.0 # [s]
show_animation = True
class Edge:
def __init__(self):
self.e = np.zeros((3, 1))
self.omega = np.zeros((3, 3)) # information matrix
self.d1 = 0.0
self.d2 = 0.0
self.yaw1 = 0.0
self.yaw2 = 0.0
self.angle1 = 0.0
self.angle2 = 0.0
self.id1 = 0
self.id2 = 0
def cal_observation_sigma():
sigma = np.zeros((3, 3))
sigma[0, 0] = C_SIGMA1 ** 2
sigma[1, 1] = C_SIGMA2 ** 2
sigma[2, 2] = C_SIGMA3 ** 2
return sigma
def calc_3d_rotational_matrix(angle):
return Rot.from_euler('z', angle).as_matrix()
def calc_edge(x1, y1, yaw1, x2, y2, yaw2, d1,
angle1, d2, angle2, t1, t2):
edge = Edge()
tangle1 = pi_2_pi(yaw1 + angle1)
tangle2 = pi_2_pi(yaw2 + angle2)
tmp1 = d1 * math.cos(tangle1)
tmp2 = d2 * math.cos(tangle2)
tmp3 = d1 * math.sin(tangle1)
tmp4 = d2 * math.sin(tangle2)
edge.e[0, 0] = x2 - x1 - tmp1 + tmp2
edge.e[1, 0] = y2 - y1 - tmp3 + tmp4
edge.e[2, 0] = 0
Rt1 = calc_3d_rotational_matrix(tangle1)
Rt2 = calc_3d_rotational_matrix(tangle2)
sig1 = cal_observation_sigma()
sig2 = cal_observation_sigma()
edge.omega = np.linalg.inv(Rt1 @ sig1 @ Rt1.T + Rt2 @ sig2 @ Rt2.T)
edge.d1, edge.d2 = d1, d2
edge.yaw1, edge.yaw2 = yaw1, yaw2
edge.angle1, edge.angle2 = angle1, angle2
edge.id1, edge.id2 = t1, t2
return edge
def calc_edges(x_list, z_list):
edges = []
cost = 0.0
z_ids = list(itertools.combinations(range(len(z_list)), 2))
for (t1, t2) in z_ids:
x1, y1, yaw1 = x_list[0, t1], x_list[1, t1], x_list[2, t1]
x2, y2, yaw2 = x_list[0, t2], x_list[1, t2], x_list[2, t2]
if z_list[t1] is None or z_list[t2] is None:
continue # No observation
for iz1 in range(len(z_list[t1][:, 0])):
for iz2 in range(len(z_list[t2][:, 0])):
if z_list[t1][iz1, 3] == z_list[t2][iz2, 3]:
d1 = z_list[t1][iz1, 0]
angle1, _ = z_list[t1][iz1, 1], z_list[t1][iz1, 2]
d2 = z_list[t2][iz2, 0]
angle2, _ = z_list[t2][iz2, 1], z_list[t2][iz2, 2]
edge = calc_edge(x1, y1, yaw1, x2, y2, yaw2, d1,
angle1, d2, angle2, t1, t2)
edges.append(edge)
cost += (edge.e.T @ edge.omega @ edge.e)[0, 0]
print("cost:", cost, ",n_edge:", len(edges))
return edges
def calc_jacobian(edge):
t1 = edge.yaw1 + edge.angle1
A = np.array([[-1.0, 0, edge.d1 * math.sin(t1)],
[0, -1.0, -edge.d1 * math.cos(t1)],
[0, 0, 0]])
t2 = edge.yaw2 + edge.angle2
B = np.array([[1.0, 0, -edge.d2 * math.sin(t2)],
[0, 1.0, edge.d2 * math.cos(t2)],
[0, 0, 0]])
return A, B
def fill_H_and_b(H, b, edge):
A, B = calc_jacobian(edge)
id1 = edge.id1 * STATE_SIZE
id2 = edge.id2 * STATE_SIZE
H[id1:id1 + STATE_SIZE, id1:id1 + STATE_SIZE] += A.T @ edge.omega @ A
H[id1:id1 + STATE_SIZE, id2:id2 + STATE_SIZE] += A.T @ edge.omega @ B
H[id2:id2 + STATE_SIZE, id1:id1 + STATE_SIZE] += B.T @ edge.omega @ A
H[id2:id2 + STATE_SIZE, id2:id2 + STATE_SIZE] += B.T @ edge.omega @ B
b[id1:id1 + STATE_SIZE] += (A.T @ edge.omega @ edge.e)
b[id2:id2 + STATE_SIZE] += (B.T @ edge.omega @ edge.e)
return H, b
def graph_based_slam(x_init, hz):
print("start graph based slam")
z_list = copy.deepcopy(hz)
x_opt = copy.deepcopy(x_init)
nt = x_opt.shape[1]
n = nt * STATE_SIZE
for itr in range(MAX_ITR):
edges = calc_edges(x_opt, z_list)
H = np.zeros((n, n))
b = np.zeros((n, 1))
for edge in edges:
H, b = fill_H_and_b(H, b, edge)
# to fix origin
H[0:STATE_SIZE, 0:STATE_SIZE] += np.identity(STATE_SIZE)
dx = - np.linalg.inv(H) @ b
for i in range(nt):
x_opt[0:3, i] += dx[i * 3:i * 3 + 3, 0]
diff = (dx.T @ dx)[0, 0]
print("iteration: %d, diff: %f" % (itr + 1, diff))
if diff < 1.0e-5:
break
return x_opt
def calc_input():
v = 1.0 # [m/s]
yaw_rate = 0.1 # [rad/s]
u = np.array([[v, yaw_rate]]).T
return u
def observation(xTrue, xd, u, RFID):
xTrue = motion_model(xTrue, u)
# add noise to gps x-y
z = np.zeros((0, 4))
for i in range(len(RFID[:, 0])):
dx = RFID[i, 0] - xTrue[0, 0]
dy = RFID[i, 1] - xTrue[1, 0]
d = math.hypot(dx, dy)
angle = pi_2_pi(math.atan2(dy, dx)) - xTrue[2, 0]
phi = pi_2_pi(math.atan2(dy, dx))
if d <= MAX_RANGE:
dn = d + np.random.randn() * Q_sim[0, 0] # add noise
angle_noise = np.random.randn() * Q_sim[1, 1]
angle += angle_noise
phi += angle_noise
zi = np.array([dn, angle, phi, i])
z = np.vstack((z, zi))
# add noise to input
ud1 = u[0, 0] + np.random.randn() * R_sim[0, 0]
ud2 = u[1, 0] + np.random.randn() * R_sim[1, 1]
ud = np.array([[ud1, ud2]]).T
xd = motion_model(xd, ud)
return xTrue, z, xd, ud
def motion_model(x, u):
F = np.array([[1.0, 0, 0],
[0, 1.0, 0],
[0, 0, 1.0]])
B = np.array([[DT * math.cos(x[2, 0]), 0],
[DT * math.sin(x[2, 0]), 0],
[0.0, DT]])
x = F @ x + B @ u
return x
def pi_2_pi(angle):
return angle_mod(angle)
def main():
print(__file__ + " start!!")
time = 0.0
# RFID positions [x, y, yaw]
RFID = np.array([[10.0, -2.0, 0.0],
[15.0, 10.0, 0.0],
[3.0, 15.0, 0.0],
[-5.0, 20.0, 0.0],
[-5.0, 5.0, 0.0]
])
# State Vector [x y yaw v]'
xTrue = np.zeros((STATE_SIZE, 1))
xDR = np.zeros((STATE_SIZE, 1)) # Dead reckoning
# history
hxTrue = []
hxDR = []
hz = []
d_time = 0.0
init = False
while SIM_TIME >= time:
if not init:
hxTrue = xTrue
hxDR = xTrue
init = True
else:
hxDR = np.hstack((hxDR, xDR))
hxTrue = np.hstack((hxTrue, xTrue))
time += DT
d_time += DT
u = calc_input()
xTrue, z, xDR, ud = observation(xTrue, xDR, u, RFID)
hz.append(z)
if d_time >= show_graph_d_time:
x_opt = graph_based_slam(hxDR, hz)
d_time = 0.0
if show_animation: # pragma: no cover
plt.cla()
# for stopping simulation with the esc key.
plt.gcf().canvas.mpl_connect(
'key_release_event',
lambda event: [exit(0) if event.key == 'escape' else None])
plt.plot(RFID[:, 0], RFID[:, 1], "*k")
plt.plot(hxTrue[0, :].flatten(),
hxTrue[1, :].flatten(), "-b")
plt.plot(hxDR[0, :].flatten(),
hxDR[1, :].flatten(), "-k")
plt.plot(x_opt[0, :].flatten(),
x_opt[1, :].flatten(), "-r")
plt.axis("equal")
plt.grid(True)
plt.title("Time" + str(time)[0:5])
plt.pause(1.0)
if __name__ == '__main__':
main()
``` | /content/code_sandbox/SLAM/GraphBasedSLAM/graph_based_slam.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 2,809 |
```python
"""
FastSLAM 2.0 example
author: Atsushi Sakai (@Atsushi_twi)
"""
import math
import matplotlib.pyplot as plt
import numpy as np
from utils.angle import angle_mod
# Fast SLAM covariance
Q = np.diag([3.0, np.deg2rad(10.0)]) ** 2
R = np.diag([1.0, np.deg2rad(20.0)]) ** 2
# Simulation parameter
Q_SIM = np.diag([0.3, np.deg2rad(2.0)]) ** 2
R_SIM = np.diag([0.5, np.deg2rad(10.0)]) ** 2
OFFSET_YAW_RATE_NOISE = 0.01
DT = 0.1 # time tick [s]
SIM_TIME = 50.0 # simulation time [s]
MAX_RANGE = 20.0 # maximum observation range
M_DIST_TH = 2.0 # Threshold of Mahalanobis distance for data association.
STATE_SIZE = 3 # State size [x,y,yaw]
LM_SIZE = 2 # LM state size [x,y]
N_PARTICLE = 100 # number of particle
NTH = N_PARTICLE / 1.5 # Number of particle for re-sampling
show_animation = True
class Particle:
def __init__(self, n_landmark):
self.w = 1.0 / N_PARTICLE
self.x = 0.0
self.y = 0.0
self.yaw = 0.0
self.P = np.eye(3)
# landmark x-y positions
self.lm = np.zeros((n_landmark, LM_SIZE))
# landmark position covariance
self.lmP = np.zeros((n_landmark * LM_SIZE, LM_SIZE))
def fast_slam2(particles, u, z):
particles = predict_particles(particles, u)
particles = update_with_observation(particles, z)
particles = resampling(particles)
return particles
def normalize_weight(particles):
sum_w = sum([p.w for p in particles])
try:
for i in range(N_PARTICLE):
particles[i].w /= sum_w
except ZeroDivisionError:
for i in range(N_PARTICLE):
particles[i].w = 1.0 / N_PARTICLE
return particles
return particles
def calc_final_state(particles):
x_est = np.zeros((STATE_SIZE, 1))
particles = normalize_weight(particles)
for i in range(N_PARTICLE):
x_est[0, 0] += particles[i].w * particles[i].x
x_est[1, 0] += particles[i].w * particles[i].y
x_est[2, 0] += particles[i].w * particles[i].yaw
x_est[2, 0] = pi_2_pi(x_est[2, 0])
return x_est
def predict_particles(particles, u):
for i in range(N_PARTICLE):
px = np.zeros((STATE_SIZE, 1))
px[0, 0] = particles[i].x
px[1, 0] = particles[i].y
px[2, 0] = particles[i].yaw
ud = u + (np.random.randn(1, 2) @ R ** 0.5).T # add noise
px = motion_model(px, ud)
particles[i].x = px[0, 0]
particles[i].y = px[1, 0]
particles[i].yaw = px[2, 0]
return particles
def add_new_lm(particle, z, Q_cov):
r = z[0]
b = z[1]
lm_id = int(z[2])
s = math.sin(pi_2_pi(particle.yaw + b))
c = math.cos(pi_2_pi(particle.yaw + b))
particle.lm[lm_id, 0] = particle.x + r * c
particle.lm[lm_id, 1] = particle.y + r * s
# covariance
dx = r * c
dy = r * s
d2 = dx ** 2 + dy ** 2
d = math.sqrt(d2)
Gz = np.array([[dx / d, dy / d],
[-dy / d2, dx / d2]])
particle.lmP[2 * lm_id:2 * lm_id + 2] = np.linalg.inv(
Gz) @ Q_cov @ np.linalg.inv(Gz.T)
return particle
def compute_jacobians(particle, xf, Pf, Q_cov):
dx = xf[0, 0] - particle.x
dy = xf[1, 0] - particle.y
d2 = dx ** 2 + dy ** 2
d = math.sqrt(d2)
zp = np.array(
[d, pi_2_pi(math.atan2(dy, dx) - particle.yaw)]).reshape(2, 1)
Hv = np.array([[-dx / d, -dy / d, 0.0],
[dy / d2, -dx / d2, -1.0]])
Hf = np.array([[dx / d, dy / d],
[-dy / d2, dx / d2]])
Sf = Hf @ Pf @ Hf.T + Q_cov
return zp, Hv, Hf, Sf
def update_kf_with_cholesky(xf, Pf, v, Q_cov, Hf):
PHt = Pf @ Hf.T
S = Hf @ PHt + Q_cov
S = (S + S.T) * 0.5
SChol = np.linalg.cholesky(S).T
SCholInv = np.linalg.inv(SChol)
W1 = PHt @ SCholInv
W = W1 @ SCholInv.T
x = xf + W @ v
P = Pf - W1 @ W1.T
return x, P
def update_landmark(particle, z, Q_cov):
lm_id = int(z[2])
xf = np.array(particle.lm[lm_id, :]).reshape(2, 1)
Pf = np.array(particle.lmP[2 * lm_id:2 * lm_id + 2])
zp, Hv, Hf, Sf = compute_jacobians(particle, xf, Pf, Q_cov)
dz = z[0:2].reshape(2, 1) - zp
dz[1, 0] = pi_2_pi(dz[1, 0])
xf, Pf = update_kf_with_cholesky(xf, Pf, dz, Q, Hf)
particle.lm[lm_id, :] = xf.T
particle.lmP[2 * lm_id:2 * lm_id + 2, :] = Pf
return particle
def compute_weight(particle, z, Q_cov):
lm_id = int(z[2])
xf = np.array(particle.lm[lm_id, :]).reshape(2, 1)
Pf = np.array(particle.lmP[2 * lm_id:2 * lm_id + 2])
zp, Hv, Hf, Sf = compute_jacobians(particle, xf, Pf, Q_cov)
dz = z[0:2].reshape(2, 1) - zp
dz[1, 0] = pi_2_pi(dz[1, 0])
try:
invS = np.linalg.inv(Sf)
except np.linalg.linalg.LinAlgError:
return 1.0
num = np.exp(-0.5 * dz.T @ invS @ dz)[0, 0]
den = 2.0 * math.pi * math.sqrt(np.linalg.det(Sf))
w = num / den
return w
def proposal_sampling(particle, z, Q_cov):
lm_id = int(z[2])
xf = particle.lm[lm_id, :].reshape(2, 1)
Pf = particle.lmP[2 * lm_id:2 * lm_id + 2]
# State
x = np.array([particle.x, particle.y, particle.yaw]).reshape(3, 1)
P = particle.P
zp, Hv, Hf, Sf = compute_jacobians(particle, xf, Pf, Q_cov)
Sfi = np.linalg.inv(Sf)
dz = z[0:2].reshape(2, 1) - zp
dz[1] = pi_2_pi(dz[1])
Pi = np.linalg.inv(P)
particle.P = np.linalg.inv(Hv.T @ Sfi @ Hv + Pi) # proposal covariance
x += particle.P @ Hv.T @ Sfi @ dz # proposal mean
particle.x = x[0, 0]
particle.y = x[1, 0]
particle.yaw = x[2, 0]
return particle
def update_with_observation(particles, z):
for iz in range(len(z[0, :])):
landmark_id = int(z[2, iz])
for ip in range(N_PARTICLE):
# new landmark
if abs(particles[ip].lm[landmark_id, 0]) <= 0.01:
particles[ip] = add_new_lm(particles[ip], z[:, iz], Q)
# known landmark
else:
w = compute_weight(particles[ip], z[:, iz], Q)
particles[ip].w *= w
particles[ip] = update_landmark(particles[ip], z[:, iz], Q)
particles[ip] = proposal_sampling(particles[ip], z[:, iz], Q)
return particles
def resampling(particles):
"""
low variance re-sampling
"""
particles = normalize_weight(particles)
pw = []
for i in range(N_PARTICLE):
pw.append(particles[i].w)
pw = np.array(pw)
n_eff = 1.0 / (pw @ pw.T) # Effective particle number
if n_eff < NTH: # resampling
w_cum = np.cumsum(pw)
base = np.cumsum(pw * 0.0 + 1 / N_PARTICLE) - 1 / N_PARTICLE
resample_id = base + np.random.rand(base.shape[0]) / N_PARTICLE
indexes = []
index = 0
for ip in range(N_PARTICLE):
while (index < w_cum.shape[0] - 1) \
and (resample_id[ip] > w_cum[index]):
index += 1
indexes.append(index)
tmp_particles = particles[:]
for i in range(len(indexes)):
particles[i].x = tmp_particles[indexes[i]].x
particles[i].y = tmp_particles[indexes[i]].y
particles[i].yaw = tmp_particles[indexes[i]].yaw
particles[i].lm = tmp_particles[indexes[i]].lm[:, :]
particles[i].lmP = tmp_particles[indexes[i]].lmP[:, :]
particles[i].w = 1.0 / N_PARTICLE
return particles
def calc_input(time):
if time <= 3.0: # wait at first
v = 0.0
yaw_rate = 0.0
else:
v = 1.0 # [m/s]
yaw_rate = 0.1 # [rad/s]
u = np.array([v, yaw_rate]).reshape(2, 1)
return u
def observation(x_true, xd, u, rfid):
# calc true state
x_true = motion_model(x_true, u)
# add noise to range observation
z = np.zeros((3, 0))
for i in range(len(rfid[:, 0])):
dx = rfid[i, 0] - x_true[0, 0]
dy = rfid[i, 1] - x_true[1, 0]
d = math.hypot(dx, dy)
angle = pi_2_pi(math.atan2(dy, dx) - x_true[2, 0])
if d <= MAX_RANGE:
dn = d + np.random.randn() * Q_SIM[0, 0] ** 0.5 # add noise
angle_noise = np.random.randn() * Q_SIM[1, 1] ** 0.5
angle_with_noise = angle + angle_noise # add noise
zi = np.array([dn, pi_2_pi(angle_with_noise), i]).reshape(3, 1)
z = np.hstack((z, zi))
# add noise to input
ud1 = u[0, 0] + np.random.randn() * R_SIM[0, 0] ** 0.5
ud2 = u[1, 0] + np.random.randn() * R_SIM[
1, 1] ** 0.5 + OFFSET_YAW_RATE_NOISE
ud = np.array([ud1, ud2]).reshape(2, 1)
xd = motion_model(xd, ud)
return x_true, z, xd, ud
def motion_model(x, u):
F = np.array([[1.0, 0, 0],
[0, 1.0, 0],
[0, 0, 1.0]])
B = np.array([[DT * math.cos(x[2, 0]), 0],
[DT * math.sin(x[2, 0]), 0],
[0.0, DT]])
x = F @ x + B @ u
x[2, 0] = pi_2_pi(x[2, 0])
return x
def pi_2_pi(angle):
return angle_mod(angle)
def main():
print(__file__ + " start!!")
time = 0.0
# RFID positions [x, y]
rfid = np.array([[10.0, -2.0],
[15.0, 10.0],
[15.0, 15.0],
[10.0, 20.0],
[3.0, 15.0],
[-5.0, 20.0],
[-5.0, 5.0],
[-10.0, 15.0]
])
n_landmark = rfid.shape[0]
# State Vector [x y yaw v]'
x_est = np.zeros((STATE_SIZE, 1)) # SLAM estimation
x_true = np.zeros((STATE_SIZE, 1)) # True state
x_dr = np.zeros((STATE_SIZE, 1)) # Dead reckoning
# history
hist_x_est = x_est
hist_x_true = x_true
hist_x_dr = x_dr
particles = [Particle(n_landmark) for _ in range(N_PARTICLE)]
while SIM_TIME >= time:
time += DT
u = calc_input(time)
x_true, z, x_dr, ud = observation(x_true, x_dr, u, rfid)
particles = fast_slam2(particles, ud, z)
x_est = calc_final_state(particles)
x_state = x_est[0: STATE_SIZE]
# store data history
hist_x_est = np.hstack((hist_x_est, x_state))
hist_x_dr = np.hstack((hist_x_dr, x_dr))
hist_x_true = np.hstack((hist_x_true, x_true))
if show_animation: # pragma: no cover
plt.cla()
# for stopping simulation with the esc key.
plt.gcf().canvas.mpl_connect(
'key_release_event',
lambda event: [exit(0) if event.key == 'escape' else None])
plt.plot(rfid[:, 0], rfid[:, 1], "*k")
for iz in range(len(z[:, 0])):
landmark_id = int(z[2, iz])
plt.plot([x_est[0][0], rfid[landmark_id, 0]], [
x_est[1][0], rfid[landmark_id, 1]], "-k")
for i in range(N_PARTICLE):
plt.plot(particles[i].x, particles[i].y, ".r")
plt.plot(particles[i].lm[:, 0], particles[i].lm[:, 1], "xb")
plt.plot(hist_x_true[0, :], hist_x_true[1, :], "-b")
plt.plot(hist_x_dr[0, :], hist_x_dr[1, :], "-k")
plt.plot(hist_x_est[0, :], hist_x_est[1, :], "-r")
plt.plot(x_est[0], x_est[1], "xk")
plt.axis("equal")
plt.grid(True)
plt.pause(0.001)
if __name__ == '__main__':
main()
``` | /content/code_sandbox/SLAM/FastSLAM2/fast_slam2.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 3,740 |
```python
#
# This file originated from the `graphslam` package:
#
# path_to_url
"""Functions for loading graphs.
"""
import logging
import numpy as np
from .edge.edge_odometry import EdgeOdometry
from .graph import Graph
from .pose.se2 import PoseSE2
from .util import upper_triangular_matrix_to_full_matrix
from .vertex import Vertex
_LOGGER = logging.getLogger(__name__)
def load_g2o_se2(infile):
"""Load an :math:`SE(2)` graph from a .g2o file.
Parameters
----------
infile : str
The path to the .g2o file
Returns
-------
Graph
The loaded graph
"""
edges = []
vertices = []
with open(infile) as f:
for line in f.readlines():
if line.startswith("VERTEX_SE2"):
numbers = line[10:].split()
arr = np.array([float(number) for number in numbers[1:]],
dtype=float)
p = PoseSE2(arr[:2], arr[2])
v = Vertex(int(numbers[0]), p)
vertices.append(v)
continue
if line.startswith("EDGE_SE2"):
numbers = line[9:].split()
arr = np.array([float(number) for number in numbers[2:]],
dtype=float)
vertex_ids = [int(numbers[0]), int(numbers[1])]
estimate = PoseSE2(arr[:2], arr[2])
information = upper_triangular_matrix_to_full_matrix(arr[3:], 3)
e = EdgeOdometry(vertex_ids, information, estimate)
edges.append(e)
continue
if line.strip():
_LOGGER.warning("Line not supported -- '%s'", line.rstrip())
return Graph(edges, vertices)
``` | /content/code_sandbox/SLAM/GraphBasedSLAM/graphslam/load.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 382 |
```python
#
# This file originated from the `graphslam` package:
#
# path_to_url
"""Graph SLAM solver in Python.
"""
``` | /content/code_sandbox/SLAM/GraphBasedSLAM/graphslam/__init__.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 31 |
```python
#
# This file originated from the `graphslam` package:
#
# path_to_url
"""Utility functions used throughout the package.
"""
import numpy as np
TWO_PI = 2 * np.pi
def neg_pi_to_pi(angle):
r"""Normalize ``angle`` to be in :math:`[-\pi, \pi)`.
Parameters
----------
angle : float
An angle (in radians)
Returns
-------
float
The angle normalized to :math:`[-\pi, \pi)`
"""
return (angle + np.pi) % (TWO_PI) - np.pi
def solve_for_edge_dimensionality(n):
r"""Solve for the dimensionality of an edge.
In a .g2o file, an edge is specified as ``<estimate> <information matrix>``, where only the upper triangular portion of the matrix is provided.
This solves the problem:
.. math::
d + \frac{d (d + 1)}{2} = n
Returns
-------
int
The dimensionality of the edge
"""
return int(round(np.sqrt(2 * n + 2.25) - 1.5))
def upper_triangular_matrix_to_full_matrix(arr, n):
"""Given an upper triangular matrix, return the full matrix.
Parameters
----------
arr : np.ndarray
The upper triangular portion of the matrix
n : int
The size of the matrix
Returns
-------
mat : np.ndarray
The full matrix
"""
triu0 = np.triu_indices(n, 0)
tril1 = np.tril_indices(n, -1)
mat = np.zeros((n, n), dtype=float)
mat[triu0] = arr
mat[tril1] = mat.T[tril1]
return mat
``` | /content/code_sandbox/SLAM/GraphBasedSLAM/graphslam/util.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 405 |
```python
#
# This file originated from the `graphslam` package:
#
# path_to_url
r"""A ``Graph`` class that stores the edges and vertices required for Graph SLAM.
"""
import warnings
from collections import defaultdict
from functools import reduce
import matplotlib.pyplot as plt
import numpy as np
from scipy.sparse import SparseEfficiencyWarning, lil_matrix
from scipy.sparse.linalg import spsolve
warnings.simplefilter("ignore", SparseEfficiencyWarning)
warnings.filterwarnings("ignore", category=SparseEfficiencyWarning)
# pylint: disable=too-few-public-methods
class _Chi2GradientHessian:
r"""A class that is used to aggregate the :math:`\chi^2` error, gradient, and Hessian.
Parameters
----------
dim : int
The compact dimensionality of the poses
Attributes
----------
chi2 : float
The :math:`\chi^2` error
dim : int
The compact dimensionality of the poses
gradient : defaultdict
The contributions to the gradient vector
hessian : defaultdict
The contributions to the Hessian matrix
"""
def __init__(self, dim):
self.chi2 = 0.
self.dim = dim
self.gradient = defaultdict(lambda: np.zeros(dim))
self.hessian = defaultdict(lambda: np.zeros((dim, dim)))
@staticmethod
def update(chi2_grad_hess, incoming):
r"""Update the :math:`\chi^2` error and the gradient and Hessian dictionaries.
Parameters
----------
chi2_grad_hess : _Chi2GradientHessian
The ``_Chi2GradientHessian`` that will be updated
incoming : tuple
"""
chi2_grad_hess.chi2 += incoming[0]
for idx, contrib in incoming[1].items():
chi2_grad_hess.gradient[idx] += contrib
for (idx1, idx2), contrib in incoming[2].items():
if idx1 <= idx2:
chi2_grad_hess.hessian[idx1, idx2] += contrib
else:
chi2_grad_hess.hessian[idx2, idx1] += np.transpose(contrib)
return chi2_grad_hess
class Graph(object):
r"""A graph that will be optimized via Graph SLAM.
Parameters
----------
edges : list[graphslam.edge.edge_odometry.EdgeOdometry]
A list of the vertices in the graph
vertices : list[graphslam.vertex.Vertex]
A list of the vertices in the graph
Attributes
----------
_chi2 : float, None
The current :math:`\chi^2` error, or ``None`` if it has not yet been computed
_edges : list[graphslam.edge.edge_odometry.EdgeOdometry]
A list of the edges (i.e., constraints) in the graph
_gradient : numpy.ndarray, None
The gradient :math:`\mathbf{b}` of the :math:`\chi^2` error, or ``None`` if it has not yet been computed
_hessian : scipy.sparse.lil_matrix, None
The Hessian matrix :math:`H`, or ``None`` if it has not yet been computed
_vertices : list[graphslam.vertex.Vertex]
A list of the vertices in the graph
"""
def __init__(self, edges, vertices):
# The vertices and edges lists
self._edges = edges
self._vertices = vertices
# The chi^2 error, gradient, and Hessian
self._chi2 = None
self._gradient = None
self._hessian = None
self._link_edges()
def _link_edges(self):
"""Fill in the ``vertices`` attributes for the graph's edges.
"""
index_id_dict = {i: v.id for i, v in enumerate(self._vertices)}
id_index_dict = {v_id: v_index for v_index, v_id in
index_id_dict.items()}
# Fill in the vertices' `index` attribute
for v in self._vertices:
v.index = id_index_dict[v.id]
for e in self._edges:
e.vertices = [self._vertices[id_index_dict[v_id]] for v_id in
e.vertex_ids]
def calc_chi2(self):
r"""Calculate the :math:`\chi^2` error for the ``Graph``.
Returns
-------
float
The :math:`\chi^2` error
"""
self._chi2 = sum((e.calc_chi2() for e in self._edges))
return self._chi2
def _calc_chi2_gradient_hessian(self):
r"""Calculate the :math:`\chi^2` error, the gradient :math:`\mathbf{b}`, and the Hessian :math:`H`.
"""
n = len(self._vertices)
dim = len(self._vertices[0].pose.to_compact())
chi2_gradient_hessian = reduce(_Chi2GradientHessian.update,
(e.calc_chi2_gradient_hessian()
for e in self._edges),
_Chi2GradientHessian(dim))
self._chi2 = chi2_gradient_hessian.chi2
# Fill in the gradient vector
self._gradient = np.zeros(n * dim, dtype=float)
for idx, cont in chi2_gradient_hessian.gradient.items():
self._gradient[idx * dim: (idx + 1) * dim] += cont
# Fill in the Hessian matrix
self._hessian = lil_matrix((n * dim, n * dim), dtype=float)
for (row_idx, col_idx), cont in chi2_gradient_hessian.hessian.items():
x_start = row_idx * dim
x_end = (row_idx + 1) * dim
y_start = col_idx * dim
y_end = (col_idx + 1) * dim
self._hessian[x_start:x_end, y_start:y_end] = cont
if row_idx != col_idx:
x_start = col_idx * dim
x_end = (col_idx + 1) * dim
y_start = row_idx * dim
y_end = (row_idx + 1) * dim
self._hessian[x_start:x_end, y_start:y_end] = \
np.transpose(cont)
def optimize(self, tol=1e-4, max_iter=20, fix_first_pose=True):
r"""Optimize the :math:`\chi^2` error for the ``Graph``.
Parameters
----------
tol : float
If the relative decrease in the :math:`\chi^2` error between iterations is less than ``tol``, we will stop
max_iter : int
The maximum number of iterations
fix_first_pose : bool
If ``True``, we will fix the first pose
"""
n = len(self._vertices)
dim = len(self._vertices[0].pose.to_compact())
# Previous iteration's chi^2 error
chi2_prev = -1.
# For displaying the optimization progress
print("\nIteration chi^2 rel. change")
print("--------- ----- -----------")
for i in range(max_iter):
self._calc_chi2_gradient_hessian()
# Check for convergence (from the previous iteration); this avoids having to calculate chi^2 twice
if i > 0:
rel_diff = (chi2_prev - self._chi2) / (
chi2_prev + np.finfo(float).eps)
print(
"{:9d} {:20.4f} {:18.6f}".format(i, self._chi2, -rel_diff))
if self._chi2 < chi2_prev and rel_diff < tol:
return
else:
print("{:9d} {:20.4f}".format(i, self._chi2))
# Update the previous iteration's chi^2 error
chi2_prev = self._chi2
# Hold the first pose fixed
if fix_first_pose:
self._hessian[:dim, :] = 0.
self._hessian[:, :dim] = 0.
self._hessian[:dim, :dim] += np.eye(dim)
self._gradient[:dim] = 0.
# Solve for the updates
dx = spsolve(self._hessian, -self._gradient)
# Apply the updates
for v, dx_i in zip(self._vertices, np.split(dx, n)):
v.pose += dx_i
# If we reached the maximum number of iterations, print out the final iteration's results
self.calc_chi2()
rel_diff = (chi2_prev - self._chi2) / (chi2_prev + np.finfo(float).eps)
print("{:9d} {:20.4f} {:18.6f}".format(
max_iter, self._chi2, -rel_diff))
def to_g2o(self, outfile):
"""Save the graph in .g2o format.
Parameters
----------
outfile : str
The path where the graph will be saved
"""
with open(outfile, 'w') as f:
for v in self._vertices:
f.write(v.to_g2o())
for e in self._edges:
f.write(e.to_g2o())
def plot(self, vertex_color='r', vertex_marker='o', vertex_markersize=3,
edge_color='b', title=None):
"""Plot the graph.
Parameters
----------
vertex_color : str
The color that will be used to plot the vertices
vertex_marker : str
The marker that will be used to plot the vertices
vertex_markersize : int
The size of the plotted vertices
edge_color : str
The color that will be used to plot the edges
title : str, None
The title that will be used for the plot
"""
plt.figure()
for e in self._edges:
e.plot(edge_color)
for v in self._vertices:
v.plot(vertex_color, vertex_marker, vertex_markersize)
if title:
plt.title(title)
plt.show()
``` | /content/code_sandbox/SLAM/GraphBasedSLAM/graphslam/graph.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 2,225 |
```python
#
# This file originated from the `graphslam` package:
#
# path_to_url
"""A ``Vertex`` class.
"""
import matplotlib.pyplot as plt
# pylint: disable=too-few-public-methods
class Vertex:
"""A class for representing a vertex in Graph SLAM.
Parameters
----------
vertex_id : int
The vertex's unique ID
pose : graphslam.pose.se2.PoseSE2
The pose associated with the vertex
vertex_index : int, None
The vertex's index in the graph's ``vertices`` list
Attributes
----------
id : int
The vertex's unique ID
index : int, None
The vertex's index in the graph's ``vertices`` list
pose : graphslam.pose.se2.PoseSE2
The pose associated with the vertex
"""
def __init__(self, vertex_id, pose, vertex_index=None):
self.id = vertex_id
self.pose = pose
self.index = vertex_index
def to_g2o(self):
"""Export the vertex to the .g2o format.
Returns
-------
str
The vertex in .g2o format
"""
return "VERTEX_SE2 {} {} {} {}\n".format(self.id, self.pose[0], self.pose[1], self.pose[2])
def plot(self, color='r', marker='o', markersize=3):
"""Plot the vertex.
Parameters
----------
color : str
The color that will be used to plot the vertex
marker : str
The marker that will be used to plot the vertex
markersize : int
The size of the plotted vertex
"""
x, y = self.pose.position
plt.plot(x, y, color=color, marker=marker, markersize=markersize)
``` | /content/code_sandbox/SLAM/GraphBasedSLAM/graphslam/vertex.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 404 |
```python
#
# This file originated from the `graphslam` package:
#
# path_to_url
``` | /content/code_sandbox/SLAM/GraphBasedSLAM/graphslam/pose/__init__.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 22 |
```python
#
# This file originated from the `graphslam` package:
#
# path_to_url
r"""Representation of a pose in :math:`SE(2)`.
"""
import math
import numpy as np
from ..util import neg_pi_to_pi
class PoseSE2(np.ndarray):
r"""A representation of a pose in :math:`SE(2)`.
Parameters
----------
position : np.ndarray, list
The position in :math:`\mathbb{R}^2`
orientation : float
The angle of the pose (in radians)
"""
def __new__(cls, position, orientation):
obj = np.array([position[0], position[1], neg_pi_to_pi(orientation)],
dtype=float).view(cls)
return obj
# pylint: disable=arguments-differ
def copy(self):
"""Return a copy of the pose.
Returns
-------
PoseSE2
A copy of the pose
"""
return PoseSE2(self[:2], self[2])
def to_array(self):
"""Return the pose as a numpy array.
Returns
-------
np.ndarray
The pose as a numpy array
"""
return np.array(self)
def to_compact(self):
"""Return the pose as a compact numpy array.
Returns
-------
np.ndarray
The pose as a compact numpy array
"""
return np.array(self)
def to_matrix(self):
"""Return the pose as an :math:`SE(2)` matrix.
Returns
-------
np.ndarray
The pose as an :math:`SE(2)` matrix
"""
return np.array([[np.cos(self[2]), -np.sin(self[2]), self[0]],
[np.sin(self[2]), np.cos(self[2]), self[1]],
[0., 0., 1.]], dtype=float)
@classmethod
def from_matrix(cls, matrix):
"""Return the pose as an :math:`SE(2)` matrix.
Parameters
----------
matrix : np.ndarray
The :math:`SE(2)` matrix that will be converted to a `PoseSE2` instance
Returns
-------
PoseSE2
The matrix as a `PoseSE2` object
"""
return cls([matrix[0, 2], matrix[1, 2]],
math.atan2(matrix[1, 0], matrix[0, 0]))
# ======================================================================= #
# #
# Properties #
# #
# ======================================================================= #
@property
def position(self):
"""Return the pose's position.
Returns
-------
np.ndarray
The position portion of the pose
"""
return np.array(self[:2])
@property
def orientation(self):
"""Return the pose's orientation.
Returns
-------
float
The angle of the pose
"""
return self[2]
@property
def inverse(self):
"""Return the pose's inverse.
Returns
-------
PoseSE2
The pose's inverse
"""
return PoseSE2([-self[0] * np.cos(self[2]) - self[1] * np.sin(self[2]),
self[0] * np.sin(self[2]) - self[1] * np.cos(self[2])],
-self[2])
# ======================================================================= #
# #
# Magic Methods #
# #
# ======================================================================= #
def __add__(self, other):
r"""Add poses (i.e., pose composition): :math:`p_1 \oplus p_2`.
Parameters
----------
other : PoseSE2
The other pose
Returns
-------
PoseSE2
The result of pose composition
"""
return PoseSE2(
[self[0] + other[0] * np.cos(self[2]) - other[1] * np.sin(self[2]),
self[1] + other[0] * np.sin(self[2]) + other[1] * np.cos(self[2])
], neg_pi_to_pi(self[2] + other[2]))
def __sub__(self, other):
r"""Subtract poses (i.e., inverse pose composition): :math:`p_1 \ominus p_2`.
Parameters
----------
other : PoseSE2
The other pose
Returns
-------
PoseSE2
The result of inverse pose composition
"""
return PoseSE2([(self[0] - other[0]) * np.cos(other[2]) + (
self[1] - other[1]) * np.sin(other[2]),
(other[0] - self[0]) * np.sin(other[2]) + (
self[1] - other[1]) * np.cos(other[2])],
neg_pi_to_pi(self[2] - other[2]))
``` | /content/code_sandbox/SLAM/GraphBasedSLAM/graphslam/pose/se2.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 1,076 |
```python
#
# This file originated from the `graphslam` package:
#
# path_to_url
r"""A class for odometry edges.
"""
import numpy as np
import matplotlib.pyplot as plt
#: The difference that will be used for numerical differentiation
EPSILON = 1e-6
class EdgeOdometry:
r"""A class for representing odometry edges in Graph SLAM.
Parameters
----------
vertices : list[graphslam.vertex.Vertex]
A list of the vertices constrained by the edge
information : np.ndarray
The information matrix :math:`\Omega_j` associated with the edge
estimate : graphslam.pose.se2.PoseSE2
The expected measurement :math:`\mathbf{z}_j`
Attributes
----------
vertices : list[graphslam.vertex.Vertex]
A list of the vertices constrained by the edge
information : np.ndarray
The information matrix :math:`\Omega_j` associated with the edge
estimate : PoseSE2
The expected measurement :math:`\mathbf{z}_j`
"""
def __init__(self, vertex_ids, information, estimate, vertices=None):
self.vertex_ids = vertex_ids
self.information = information
self.estimate = estimate
self.vertices = vertices
def calc_error(self):
r"""Calculate the error for the edge: :math:`\mathbf{e}_j \in \mathbb{R}^\bullet`.
.. math::
\mathbf{e}_j = \mathbf{z}_j - (p_2 \ominus p_1)
Returns
-------
np.ndarray
The error for the edge
"""
return (self.estimate - (self.vertices[1].pose - self.vertices[0].pose)).to_compact()
def calc_chi2(self):
r"""Calculate the :math:`\chi^2` error for the edge.
.. math::
\mathbf{e}_j^T \Omega_j \mathbf{e}_j
Returns
-------
float
The :math:`\chi^2` error for the edge
"""
err = self.calc_error()
return np.dot(np.dot(np.transpose(err), self.information), err)
def calc_chi2_gradient_hessian(self):
r"""Calculate the edge's contributions to the graph's :math:`\chi^2` error, gradient (:math:`\mathbf{b}`), and Hessian (:math:`H`).
Returns
-------
float
The :math:`\chi^2` error for the edge
dict
The edge's contribution(s) to the gradient
dict
The edge's contribution(s) to the Hessian
"""
chi2 = self.calc_chi2()
err = self.calc_error()
jacobians = self.calc_jacobians()
return chi2, {v.index: np.dot(np.dot(np.transpose(err), self.information), jacobian) for v, jacobian in zip(self.vertices, jacobians)}, {(self.vertices[i].index, self.vertices[j].index): np.dot(np.dot(np.transpose(jacobians[i]), self.information), jacobians[j]) for i in range(len(jacobians)) for j in range(i, len(jacobians))}
def calc_jacobians(self):
r"""Calculate the Jacobian of the edge's error with respect to each constrained pose.
.. math::
\frac{\partial}{\partial \Delta \mathbf{x}^k} \left[ \mathbf{e}_j(\mathbf{x}^k \boxplus \Delta \mathbf{x}^k) \right]
Returns
-------
list[np.ndarray]
The Jacobian matrices for the edge with respect to each constrained pose
"""
err = self.calc_error()
# The dimensionality of the compact pose representation
dim = len(self.vertices[0].pose.to_compact())
return [self._calc_jacobian(err, dim, i) for i in range(len(self.vertices))]
def _calc_jacobian(self, err, dim, vertex_index):
r"""Calculate the Jacobian of the edge with respect to the specified vertex's pose.
Parameters
----------
err : np.ndarray
The current error for the edge (see :meth:`EdgeOdometry.calc_error`)
dim : int
The dimensionality of the compact pose representation
vertex_index : int
The index of the vertex (pose) for which we are computing the Jacobian
Returns
-------
np.ndarray
The Jacobian of the edge with respect to the specified vertex's pose
"""
jacobian = np.zeros(err.shape + (dim,))
p0 = self.vertices[vertex_index].pose.copy()
for d in range(dim):
# update the pose
delta_pose = np.zeros(dim)
delta_pose[d] = EPSILON
self.vertices[vertex_index].pose += delta_pose
# compute the numerical derivative
jacobian[:, d] = (self.calc_error() - err) / EPSILON
# restore the pose
self.vertices[vertex_index].pose = p0.copy()
return jacobian
def to_g2o(self):
"""Export the edge to the .g2o format.
Returns
-------
str
The edge in .g2o format
"""
return "EDGE_SE2 {} {} {} {} {} ".format(self.vertex_ids[0], self.vertex_ids[1], self.estimate[0], self.estimate[1], self.estimate[2]) + " ".join([str(x) for x in self.information[np.triu_indices(3, 0)]]) + "\n"
def plot(self, color='b'):
"""Plot the edge.
Parameters
----------
color : str
The color that will be used to plot the edge
"""
xy = np.array([v.pose.position for v in self.vertices])
plt.plot(xy[:, 0], xy[:, 1], color=color)
``` | /content/code_sandbox/SLAM/GraphBasedSLAM/graphslam/edge/edge_odometry.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 1,305 |
```python
"""
FastSLAM 1.0 example
author: Atsushi Sakai (@Atsushi_twi)
"""
import math
import matplotlib.pyplot as plt
import numpy as np
from utils.angle import angle_mod
# Fast SLAM covariance
Q = np.diag([3.0, np.deg2rad(10.0)]) ** 2
R = np.diag([1.0, np.deg2rad(20.0)]) ** 2
# Simulation parameter
Q_SIM = np.diag([0.3, np.deg2rad(2.0)]) ** 2
R_SIM = np.diag([0.5, np.deg2rad(10.0)]) ** 2
OFFSET_YAW_RATE_NOISE = 0.01
DT = 0.1 # time tick [s]
SIM_TIME = 50.0 # simulation time [s]
MAX_RANGE = 20.0 # maximum observation range
M_DIST_TH = 2.0 # Threshold of Mahalanobis distance for data association.
STATE_SIZE = 3 # State size [x,y,yaw]
LM_SIZE = 2 # LM state size [x,y]
N_PARTICLE = 100 # number of particle
NTH = N_PARTICLE / 1.5 # Number of particle for re-sampling
show_animation = True
class Particle:
def __init__(self, n_landmark):
self.w = 1.0 / N_PARTICLE
self.x = 0.0
self.y = 0.0
self.yaw = 0.0
# landmark x-y positions
self.lm = np.zeros((n_landmark, LM_SIZE))
# landmark position covariance
self.lmP = np.zeros((n_landmark * LM_SIZE, LM_SIZE))
def fast_slam1(particles, u, z):
particles = predict_particles(particles, u)
particles = update_with_observation(particles, z)
particles = resampling(particles)
return particles
def normalize_weight(particles):
sum_w = sum([p.w for p in particles])
try:
for i in range(N_PARTICLE):
particles[i].w /= sum_w
except ZeroDivisionError:
for i in range(N_PARTICLE):
particles[i].w = 1.0 / N_PARTICLE
return particles
return particles
def calc_final_state(particles):
x_est = np.zeros((STATE_SIZE, 1))
particles = normalize_weight(particles)
for i in range(N_PARTICLE):
x_est[0, 0] += particles[i].w * particles[i].x
x_est[1, 0] += particles[i].w * particles[i].y
x_est[2, 0] += particles[i].w * particles[i].yaw
x_est[2, 0] = pi_2_pi(x_est[2, 0])
return x_est
def predict_particles(particles, u):
for i in range(N_PARTICLE):
px = np.zeros((STATE_SIZE, 1))
px[0, 0] = particles[i].x
px[1, 0] = particles[i].y
px[2, 0] = particles[i].yaw
ud = u + (np.random.randn(1, 2) @ R ** 0.5).T # add noise
px = motion_model(px, ud)
particles[i].x = px[0, 0]
particles[i].y = px[1, 0]
particles[i].yaw = px[2, 0]
return particles
def add_new_landmark(particle, z, Q_cov):
r = z[0]
b = z[1]
lm_id = int(z[2])
s = math.sin(pi_2_pi(particle.yaw + b))
c = math.cos(pi_2_pi(particle.yaw + b))
particle.lm[lm_id, 0] = particle.x + r * c
particle.lm[lm_id, 1] = particle.y + r * s
# covariance
dx = r * c
dy = r * s
d2 = dx**2 + dy**2
d = math.sqrt(d2)
Gz = np.array([[dx / d, dy / d],
[-dy / d2, dx / d2]])
particle.lmP[2 * lm_id:2 * lm_id + 2] = np.linalg.inv(
Gz) @ Q_cov @ np.linalg.inv(Gz.T)
return particle
def compute_jacobians(particle, xf, Pf, Q_cov):
dx = xf[0, 0] - particle.x
dy = xf[1, 0] - particle.y
d2 = dx ** 2 + dy ** 2
d = math.sqrt(d2)
zp = np.array(
[d, pi_2_pi(math.atan2(dy, dx) - particle.yaw)]).reshape(2, 1)
Hv = np.array([[-dx / d, -dy / d, 0.0],
[dy / d2, -dx / d2, -1.0]])
Hf = np.array([[dx / d, dy / d],
[-dy / d2, dx / d2]])
Sf = Hf @ Pf @ Hf.T + Q_cov
return zp, Hv, Hf, Sf
def update_kf_with_cholesky(xf, Pf, v, Q_cov, Hf):
PHt = Pf @ Hf.T
S = Hf @ PHt + Q_cov
S = (S + S.T) * 0.5
s_chol = np.linalg.cholesky(S).T
s_chol_inv = np.linalg.inv(s_chol)
W1 = PHt @ s_chol_inv
W = W1 @ s_chol_inv.T
x = xf + W @ v
P = Pf - W1 @ W1.T
return x, P
def update_landmark(particle, z, Q_cov):
lm_id = int(z[2])
xf = np.array(particle.lm[lm_id, :]).reshape(2, 1)
Pf = np.array(particle.lmP[2 * lm_id:2 * lm_id + 2, :])
zp, Hv, Hf, Sf = compute_jacobians(particle, xf, Pf, Q)
dz = z[0:2].reshape(2, 1) - zp
dz[1, 0] = pi_2_pi(dz[1, 0])
xf, Pf = update_kf_with_cholesky(xf, Pf, dz, Q_cov, Hf)
particle.lm[lm_id, :] = xf.T
particle.lmP[2 * lm_id:2 * lm_id + 2, :] = Pf
return particle
def compute_weight(particle, z, Q_cov):
lm_id = int(z[2])
xf = np.array(particle.lm[lm_id, :]).reshape(2, 1)
Pf = np.array(particle.lmP[2 * lm_id:2 * lm_id + 2])
zp, Hv, Hf, Sf = compute_jacobians(particle, xf, Pf, Q_cov)
dx = z[0:2].reshape(2, 1) - zp
dx[1, 0] = pi_2_pi(dx[1, 0])
try:
invS = np.linalg.inv(Sf)
except np.linalg.linalg.LinAlgError:
print("singular")
return 1.0
num = np.exp(-0.5 * (dx.T @ invS @ dx))[0, 0]
den = 2.0 * math.pi * math.sqrt(np.linalg.det(Sf))
w = num / den
return w
def update_with_observation(particles, z):
for iz in range(len(z[0, :])):
landmark_id = int(z[2, iz])
for ip in range(N_PARTICLE):
# new landmark
if abs(particles[ip].lm[landmark_id, 0]) <= 0.01:
particles[ip] = add_new_landmark(particles[ip], z[:, iz], Q)
# known landmark
else:
w = compute_weight(particles[ip], z[:, iz], Q)
particles[ip].w *= w
particles[ip] = update_landmark(particles[ip], z[:, iz], Q)
return particles
def resampling(particles):
"""
low variance re-sampling
"""
particles = normalize_weight(particles)
pw = []
for i in range(N_PARTICLE):
pw.append(particles[i].w)
pw = np.array(pw)
n_eff = 1.0 / (pw @ pw.T) # Effective particle number
if n_eff < NTH: # resampling
w_cum = np.cumsum(pw)
base = np.cumsum(pw * 0.0 + 1 / N_PARTICLE) - 1 / N_PARTICLE
resample_id = base + np.random.rand(base.shape[0]) / N_PARTICLE
indexes = []
index = 0
for ip in range(N_PARTICLE):
while (index < w_cum.shape[0] - 1) \
and (resample_id[ip] > w_cum[index]):
index += 1
indexes.append(index)
tmp_particles = particles[:]
for i in range(len(indexes)):
particles[i].x = tmp_particles[indexes[i]].x
particles[i].y = tmp_particles[indexes[i]].y
particles[i].yaw = tmp_particles[indexes[i]].yaw
particles[i].lm = tmp_particles[indexes[i]].lm[:, :]
particles[i].lmP = tmp_particles[indexes[i]].lmP[:, :]
particles[i].w = 1.0 / N_PARTICLE
return particles
def calc_input(time):
if time <= 3.0: # wait at first
v = 0.0
yaw_rate = 0.0
else:
v = 1.0 # [m/s]
yaw_rate = 0.1 # [rad/s]
u = np.array([v, yaw_rate]).reshape(2, 1)
return u
def observation(x_true, xd, u, rfid):
# calc true state
x_true = motion_model(x_true, u)
# add noise to range observation
z = np.zeros((3, 0))
for i in range(len(rfid[:, 0])):
dx = rfid[i, 0] - x_true[0, 0]
dy = rfid[i, 1] - x_true[1, 0]
d = math.hypot(dx, dy)
angle = pi_2_pi(math.atan2(dy, dx) - x_true[2, 0])
if d <= MAX_RANGE:
dn = d + np.random.randn() * Q_SIM[0, 0] ** 0.5 # add noise
angle_with_noize = angle + np.random.randn() * Q_SIM[
1, 1] ** 0.5 # add noise
zi = np.array([dn, pi_2_pi(angle_with_noize), i]).reshape(3, 1)
z = np.hstack((z, zi))
# add noise to input
ud1 = u[0, 0] + np.random.randn() * R_SIM[0, 0] ** 0.5
ud2 = u[1, 0] + np.random.randn() * R_SIM[
1, 1] ** 0.5 + OFFSET_YAW_RATE_NOISE
ud = np.array([ud1, ud2]).reshape(2, 1)
xd = motion_model(xd, ud)
return x_true, z, xd, ud
def motion_model(x, u):
F = np.array([[1.0, 0, 0],
[0, 1.0, 0],
[0, 0, 1.0]])
B = np.array([[DT * math.cos(x[2, 0]), 0],
[DT * math.sin(x[2, 0]), 0],
[0.0, DT]])
x = F @ x + B @ u
x[2, 0] = pi_2_pi(x[2, 0])
return x
def pi_2_pi(angle):
return angle_mod(angle)
def main():
print(__file__ + " start!!")
time = 0.0
# RFID positions [x, y]
rfid = np.array([[10.0, -2.0],
[15.0, 10.0],
[15.0, 15.0],
[10.0, 20.0],
[3.0, 15.0],
[-5.0, 20.0],
[-5.0, 5.0],
[-10.0, 15.0]
])
n_landmark = rfid.shape[0]
# State Vector [x y yaw v]'
x_est = np.zeros((STATE_SIZE, 1)) # SLAM estimation
x_true = np.zeros((STATE_SIZE, 1)) # True state
x_dr = np.zeros((STATE_SIZE, 1)) # Dead reckoning
# history
hist_x_est = x_est
hist_x_true = x_true
hist_x_dr = x_dr
particles = [Particle(n_landmark) for _ in range(N_PARTICLE)]
while SIM_TIME >= time:
time += DT
u = calc_input(time)
x_true, z, x_dr, ud = observation(x_true, x_dr, u, rfid)
particles = fast_slam1(particles, ud, z)
x_est = calc_final_state(particles)
x_state = x_est[0: STATE_SIZE]
# store data history
hist_x_est = np.hstack((hist_x_est, x_state))
hist_x_dr = np.hstack((hist_x_dr, x_dr))
hist_x_true = np.hstack((hist_x_true, x_true))
if show_animation: # pragma: no cover
plt.cla()
# for stopping simulation with the esc key.
plt.gcf().canvas.mpl_connect(
'key_release_event', lambda event:
[exit(0) if event.key == 'escape' else None])
plt.plot(rfid[:, 0], rfid[:, 1], "*k")
for i in range(N_PARTICLE):
plt.plot(particles[i].x, particles[i].y, ".r")
plt.plot(particles[i].lm[:, 0], particles[i].lm[:, 1], "xb")
plt.plot(hist_x_true[0, :], hist_x_true[1, :], "-b")
plt.plot(hist_x_dr[0, :], hist_x_dr[1, :], "-k")
plt.plot(hist_x_est[0, :], hist_x_est[1, :], "-r")
plt.plot(x_est[0], x_est[1], "xk")
plt.axis("equal")
plt.grid(True)
plt.pause(0.001)
if __name__ == '__main__':
main()
``` | /content/code_sandbox/SLAM/FastSLAM1/fast_slam1.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 3,405 |
```python
"""
Iterative Closest Point (ICP) SLAM example
author: Atsushi Sakai (@Atsushi_twi), Gktu Karakal, Shamil Gemuev
"""
import math
from mpl_toolkits.mplot3d import Axes3D # noqa: F401 unused import
import matplotlib.pyplot as plt
import numpy as np
# ICP parameters
EPS = 0.0001
MAX_ITER = 100
show_animation = True
def icp_matching(previous_points, current_points):
"""
Iterative Closest Point matching
- input
previous_points: 2D or 3D points in the previous frame
current_points: 2D or 3D points in the current frame
- output
R: Rotation matrix
T: Translation vector
"""
H = None # homogeneous transformation matrix
dError = np.inf
preError = np.inf
count = 0
if show_animation:
fig = plt.figure()
if previous_points.shape[0] == 3:
fig.add_subplot(111, projection='3d')
while dError >= EPS:
count += 1
if show_animation: # pragma: no cover
plot_points(previous_points, current_points, fig)
plt.pause(0.1)
indexes, error = nearest_neighbor_association(previous_points, current_points)
Rt, Tt = svd_motion_estimation(previous_points[:, indexes], current_points)
# update current points
current_points = (Rt @ current_points) + Tt[:, np.newaxis]
dError = preError - error
print("Residual:", error)
if dError < 0: # prevent matrix H changing, exit loop
print("Not Converge...", preError, dError, count)
break
preError = error
H = update_homogeneous_matrix(H, Rt, Tt)
if dError <= EPS:
print("Converge", error, dError, count)
break
elif MAX_ITER <= count:
print("Not Converge...", error, dError, count)
break
R = np.array(H[0:-1, 0:-1])
T = np.array(H[0:-1, -1])
return R, T
def update_homogeneous_matrix(Hin, R, T):
r_size = R.shape[0]
H = np.zeros((r_size + 1, r_size + 1))
H[0:r_size, 0:r_size] = R
H[0:r_size, r_size] = T
H[r_size, r_size] = 1.0
if Hin is None:
return H
else:
return Hin @ H
def nearest_neighbor_association(previous_points, current_points):
# calc the sum of residual errors
delta_points = previous_points - current_points
d = np.linalg.norm(delta_points, axis=0)
error = sum(d)
# calc index with nearest neighbor assosiation
d = np.linalg.norm(np.repeat(current_points, previous_points.shape[1], axis=1)
- np.tile(previous_points, (1, current_points.shape[1])), axis=0)
indexes = np.argmin(d.reshape(current_points.shape[1], previous_points.shape[1]), axis=1)
return indexes, error
def svd_motion_estimation(previous_points, current_points):
pm = np.mean(previous_points, axis=1)
cm = np.mean(current_points, axis=1)
p_shift = previous_points - pm[:, np.newaxis]
c_shift = current_points - cm[:, np.newaxis]
W = c_shift @ p_shift.T
u, s, vh = np.linalg.svd(W)
R = (u @ vh).T
t = pm - (R @ cm)
return R, t
def plot_points(previous_points, current_points, figure):
# for stopping simulation with the esc key.
plt.gcf().canvas.mpl_connect(
'key_release_event',
lambda event: [exit(0) if event.key == 'escape' else None])
if previous_points.shape[0] == 3:
plt.clf()
axes = figure.add_subplot(111, projection='3d')
axes.scatter(previous_points[0, :], previous_points[1, :],
previous_points[2, :], c="r", marker=".")
axes.scatter(current_points[0, :], current_points[1, :],
current_points[2, :], c="b", marker=".")
axes.scatter(0.0, 0.0, 0.0, c="r", marker="x")
figure.canvas.draw()
else:
plt.cla()
plt.plot(previous_points[0, :], previous_points[1, :], ".r")
plt.plot(current_points[0, :], current_points[1, :], ".b")
plt.plot(0.0, 0.0, "xr")
plt.axis("equal")
def main():
print(__file__ + " start!!")
# simulation parameters
nPoint = 1000
fieldLength = 50.0
motion = [0.5, 2.0, np.deg2rad(-10.0)] # movement [x[m],y[m],yaw[deg]]
nsim = 3 # number of simulation
for _ in range(nsim):
# previous points
px = (np.random.rand(nPoint) - 0.5) * fieldLength
py = (np.random.rand(nPoint) - 0.5) * fieldLength
previous_points = np.vstack((px, py))
# current points
cx = [math.cos(motion[2]) * x - math.sin(motion[2]) * y + motion[0]
for (x, y) in zip(px, py)]
cy = [math.sin(motion[2]) * x + math.cos(motion[2]) * y + motion[1]
for (x, y) in zip(px, py)]
current_points = np.vstack((cx, cy))
R, T = icp_matching(previous_points, current_points)
print("R:", R)
print("T:", T)
def main_3d_points():
print(__file__ + " start!!")
# simulation parameters for 3d point set
nPoint = 1000
fieldLength = 50.0
motion = [0.5, 2.0, -5, np.deg2rad(-10.0)] # [x[m],y[m],z[m],roll[deg]]
nsim = 3 # number of simulation
for _ in range(nsim):
# previous points
px = (np.random.rand(nPoint) - 0.5) * fieldLength
py = (np.random.rand(nPoint) - 0.5) * fieldLength
pz = (np.random.rand(nPoint) - 0.5) * fieldLength
previous_points = np.vstack((px, py, pz))
# current points
cx = [math.cos(motion[3]) * x - math.sin(motion[3]) * z + motion[0]
for (x, z) in zip(px, pz)]
cy = [y + motion[1] for y in py]
cz = [math.sin(motion[3]) * x + math.cos(motion[3]) * z + motion[2]
for (x, z) in zip(px, pz)]
current_points = np.vstack((cx, cy, cz))
R, T = icp_matching(previous_points, current_points)
print("R:", R)
print("T:", T)
if __name__ == '__main__':
main()
main_3d_points()
``` | /content/code_sandbox/SLAM/iterative_closest_point/iterative_closest_point.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 1,706 |
```python
"""
Path tracking simulation with pure pursuit steering and PID speed control.
author: Atsushi Sakai (@Atsushi_twi)
Guillaume Jacquenot (@Gjacquenot)
"""
import numpy as np
import math
import matplotlib.pyplot as plt
# Parameters
k = 0.1 # look forward gain
Lfc = 2.0 # [m] look-ahead distance
Kp = 1.0 # speed proportional gain
dt = 0.1 # [s] time tick
WB = 2.9 # [m] wheel base of vehicle
show_animation = True
class State:
def __init__(self, x=0.0, y=0.0, yaw=0.0, v=0.0):
self.x = x
self.y = y
self.yaw = yaw
self.v = v
self.rear_x = self.x - ((WB / 2) * math.cos(self.yaw))
self.rear_y = self.y - ((WB / 2) * math.sin(self.yaw))
def update(self, a, delta):
self.x += self.v * math.cos(self.yaw) * dt
self.y += self.v * math.sin(self.yaw) * dt
self.yaw += self.v / WB * math.tan(delta) * dt
self.v += a * dt
self.rear_x = self.x - ((WB / 2) * math.cos(self.yaw))
self.rear_y = self.y - ((WB / 2) * math.sin(self.yaw))
def calc_distance(self, point_x, point_y):
dx = self.rear_x - point_x
dy = self.rear_y - point_y
return math.hypot(dx, dy)
class States:
def __init__(self):
self.x = []
self.y = []
self.yaw = []
self.v = []
self.t = []
def append(self, t, state):
self.x.append(state.x)
self.y.append(state.y)
self.yaw.append(state.yaw)
self.v.append(state.v)
self.t.append(t)
def proportional_control(target, current):
a = Kp * (target - current)
return a
class TargetCourse:
def __init__(self, cx, cy):
self.cx = cx
self.cy = cy
self.old_nearest_point_index = None
def search_target_index(self, state):
# To speed up nearest point search, doing it at only first time.
if self.old_nearest_point_index is None:
# search nearest point index
dx = [state.rear_x - icx for icx in self.cx]
dy = [state.rear_y - icy for icy in self.cy]
d = np.hypot(dx, dy)
ind = np.argmin(d)
self.old_nearest_point_index = ind
else:
ind = self.old_nearest_point_index
distance_this_index = state.calc_distance(self.cx[ind],
self.cy[ind])
while True:
distance_next_index = state.calc_distance(self.cx[ind + 1],
self.cy[ind + 1])
if distance_this_index < distance_next_index:
break
ind = ind + 1 if (ind + 1) < len(self.cx) else ind
distance_this_index = distance_next_index
self.old_nearest_point_index = ind
Lf = k * state.v + Lfc # update look ahead distance
# search look ahead target point index
while Lf > state.calc_distance(self.cx[ind], self.cy[ind]):
if (ind + 1) >= len(self.cx):
break # not exceed goal
ind += 1
return ind, Lf
def pure_pursuit_steer_control(state, trajectory, pind):
ind, Lf = trajectory.search_target_index(state)
if pind >= ind:
ind = pind
if ind < len(trajectory.cx):
tx = trajectory.cx[ind]
ty = trajectory.cy[ind]
else: # toward goal
tx = trajectory.cx[-1]
ty = trajectory.cy[-1]
ind = len(trajectory.cx) - 1
alpha = math.atan2(ty - state.rear_y, tx - state.rear_x) - state.yaw
delta = math.atan2(2.0 * WB * math.sin(alpha) / Lf, 1.0)
return delta, ind
def plot_arrow(x, y, yaw, length=1.0, width=0.5, fc="r", ec="k"):
"""
Plot arrow
"""
if not isinstance(x, float):
for ix, iy, iyaw in zip(x, y, yaw):
plot_arrow(ix, iy, iyaw)
else:
plt.arrow(x, y, length * math.cos(yaw), length * math.sin(yaw),
fc=fc, ec=ec, head_width=width, head_length=width)
plt.plot(x, y)
def main():
# target course
cx = np.arange(0, 50, 0.5)
cy = [math.sin(ix / 5.0) * ix / 2.0 for ix in cx]
target_speed = 10.0 / 3.6 # [m/s]
T = 100.0 # max simulation time
# initial state
state = State(x=-0.0, y=-3.0, yaw=0.0, v=0.0)
lastIndex = len(cx) - 1
time = 0.0
states = States()
states.append(time, state)
target_course = TargetCourse(cx, cy)
target_ind, _ = target_course.search_target_index(state)
while T >= time and lastIndex > target_ind:
# Calc control input
ai = proportional_control(target_speed, state.v)
di, target_ind = pure_pursuit_steer_control(
state, target_course, target_ind)
state.update(ai, di) # Control vehicle
time += dt
states.append(time, state)
if show_animation: # pragma: no cover
plt.cla()
# for stopping simulation with the esc key.
plt.gcf().canvas.mpl_connect(
'key_release_event',
lambda event: [exit(0) if event.key == 'escape' else None])
plot_arrow(state.x, state.y, state.yaw)
plt.plot(cx, cy, "-r", label="course")
plt.plot(states.x, states.y, "-b", label="trajectory")
plt.plot(cx[target_ind], cy[target_ind], "xg", label="target")
plt.axis("equal")
plt.grid(True)
plt.title("Speed[km/h]:" + str(state.v * 3.6)[:4])
plt.pause(0.001)
# Test
assert lastIndex >= target_ind, "Cannot goal"
if show_animation: # pragma: no cover
plt.cla()
plt.plot(cx, cy, ".r", label="course")
plt.plot(states.x, states.y, "-b", label="trajectory")
plt.legend()
plt.xlabel("x[m]")
plt.ylabel("y[m]")
plt.axis("equal")
plt.grid(True)
plt.subplots(1)
plt.plot(states.t, [iv * 3.6 for iv in states.v], "-r")
plt.xlabel("Time[s]")
plt.ylabel("Speed[km/h]")
plt.grid(True)
plt.show()
if __name__ == '__main__':
print("Pure pursuit path tracking simulation start")
main()
``` | /content/code_sandbox/PathTracking/pure_pursuit/pure_pursuit.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 1,678 |
```python
"""
Path tracking simulation with LQR steering control and PID speed control.
author Atsushi Sakai (@Atsushi_twi)
"""
import scipy.linalg as la
import matplotlib.pyplot as plt
import math
import numpy as np
import sys
import pathlib
from utils.angle import angle_mod
sys.path.append(str(pathlib.Path(__file__).parent.parent.parent))
from PathPlanning.CubicSpline import cubic_spline_planner
Kp = 1.0 # speed proportional gain
# LQR parameter
Q = np.eye(4)
R = np.eye(1)
# parameters
dt = 0.1 # time tick[s]
L = 0.5 # Wheelbase of the vehicle [m]
max_steer = np.deg2rad(45.0) # maximum steering angle[rad]
show_animation = True
# show_animation = False
class State:
def __init__(self, x=0.0, y=0.0, yaw=0.0, v=0.0):
self.x = x
self.y = y
self.yaw = yaw
self.v = v
def update(state, a, delta):
if delta >= max_steer:
delta = max_steer
if delta <= - max_steer:
delta = - max_steer
state.x = state.x + state.v * math.cos(state.yaw) * dt
state.y = state.y + state.v * math.sin(state.yaw) * dt
state.yaw = state.yaw + state.v / L * math.tan(delta) * dt
state.v = state.v + a * dt
return state
def pid_control(target, current):
a = Kp * (target - current)
return a
def pi_2_pi(angle):
return angle_mod(angle)
def solve_DARE(A, B, Q, R):
"""
solve a discrete time_Algebraic Riccati equation (DARE)
"""
X = Q
Xn = Q
max_iter = 150
eps = 0.01
for i in range(max_iter):
Xn = A.T @ X @ A - A.T @ X @ B @ \
la.inv(R + B.T @ X @ B) @ B.T @ X @ A + Q
if (abs(Xn - X)).max() < eps:
break
X = Xn
return Xn
def dlqr(A, B, Q, R):
"""Solve the discrete time lqr controller.
x[k+1] = A x[k] + B u[k]
cost = sum x[k].T*Q*x[k] + u[k].T*R*u[k]
# ref Bertsekas, p.151
"""
# first, try to solve the ricatti equation
X = solve_DARE(A, B, Q, R)
# compute the LQR gain
K = la.inv(B.T @ X @ B + R) @ (B.T @ X @ A)
eigVals, eigVecs = la.eig(A - B @ K)
return K, X, eigVals
def lqr_steering_control(state, cx, cy, cyaw, ck, pe, pth_e):
ind, e = calc_nearest_index(state, cx, cy, cyaw)
k = ck[ind]
v = state.v
th_e = pi_2_pi(state.yaw - cyaw[ind])
A = np.zeros((4, 4))
A[0, 0] = 1.0
A[0, 1] = dt
A[1, 2] = v
A[2, 2] = 1.0
A[2, 3] = dt
# print(A)
B = np.zeros((4, 1))
B[3, 0] = v / L
K, _, _ = dlqr(A, B, Q, R)
x = np.zeros((4, 1))
x[0, 0] = e
x[1, 0] = (e - pe) / dt
x[2, 0] = th_e
x[3, 0] = (th_e - pth_e) / dt
ff = math.atan2(L * k, 1)
fb = pi_2_pi((-K @ x)[0, 0])
delta = ff + fb
return delta, ind, e, th_e
def calc_nearest_index(state, cx, cy, cyaw):
dx = [state.x - icx for icx in cx]
dy = [state.y - icy for icy in cy]
d = [idx ** 2 + idy ** 2 for (idx, idy) in zip(dx, dy)]
mind = min(d)
ind = d.index(mind)
mind = math.sqrt(mind)
dxl = cx[ind] - state.x
dyl = cy[ind] - state.y
angle = pi_2_pi(cyaw[ind] - math.atan2(dyl, dxl))
if angle < 0:
mind *= -1
return ind, mind
def closed_loop_prediction(cx, cy, cyaw, ck, speed_profile, goal):
T = 500.0 # max simulation time
goal_dis = 0.3
stop_speed = 0.05
state = State(x=-0.0, y=-0.0, yaw=0.0, v=0.0)
time = 0.0
x = [state.x]
y = [state.y]
yaw = [state.yaw]
v = [state.v]
t = [0.0]
e, e_th = 0.0, 0.0
while T >= time:
dl, target_ind, e, e_th = lqr_steering_control(
state, cx, cy, cyaw, ck, e, e_th)
ai = pid_control(speed_profile[target_ind], state.v)
state = update(state, ai, dl)
if abs(state.v) <= stop_speed:
target_ind += 1
time = time + dt
# check goal
dx = state.x - goal[0]
dy = state.y - goal[1]
if math.hypot(dx, dy) <= goal_dis:
print("Goal")
break
x.append(state.x)
y.append(state.y)
yaw.append(state.yaw)
v.append(state.v)
t.append(time)
if target_ind % 1 == 0 and show_animation:
plt.cla()
# for stopping simulation with the esc key.
plt.gcf().canvas.mpl_connect(
'key_release_event',
lambda event: [exit(0) if event.key == 'escape' else None])
plt.plot(cx, cy, "-r", label="course")
plt.plot(x, y, "ob", label="trajectory")
plt.plot(cx[target_ind], cy[target_ind], "xg", label="target")
plt.axis("equal")
plt.grid(True)
plt.title("speed[km/h]:" + str(round(state.v * 3.6, 2))
+ ",target index:" + str(target_ind))
plt.pause(0.0001)
return t, x, y, yaw, v
def calc_speed_profile(cx, cy, cyaw, target_speed):
speed_profile = [target_speed] * len(cx)
direction = 1.0
# Set stop point
for i in range(len(cx) - 1):
dyaw = abs(cyaw[i + 1] - cyaw[i])
switch = math.pi / 4.0 <= dyaw < math.pi / 2.0
if switch:
direction *= -1
if direction != 1.0:
speed_profile[i] = - target_speed
else:
speed_profile[i] = target_speed
if switch:
speed_profile[i] = 0.0
speed_profile[-1] = 0.0
return speed_profile
def main():
print("LQR steering control tracking start!!")
ax = [0.0, 6.0, 12.5, 10.0, 7.5, 3.0, -1.0]
ay = [0.0, -3.0, -5.0, 6.5, 3.0, 5.0, -2.0]
goal = [ax[-1], ay[-1]]
cx, cy, cyaw, ck, s = cubic_spline_planner.calc_spline_course(
ax, ay, ds=0.1)
target_speed = 10.0 / 3.6 # simulation parameter km/h -> m/s
sp = calc_speed_profile(cx, cy, cyaw, target_speed)
t, x, y, yaw, v = closed_loop_prediction(cx, cy, cyaw, ck, sp, goal)
if show_animation: # pragma: no cover
plt.close()
plt.subplots(1)
plt.plot(ax, ay, "xb", label="input")
plt.plot(cx, cy, "-r", label="spline")
plt.plot(x, y, "-g", label="tracking")
plt.grid(True)
plt.axis("equal")
plt.xlabel("x[m]")
plt.ylabel("y[m]")
plt.legend()
plt.subplots(1)
plt.plot(s, [np.rad2deg(iyaw) for iyaw in cyaw], "-r", label="yaw")
plt.grid(True)
plt.legend()
plt.xlabel("line length[m]")
plt.ylabel("yaw angle[deg]")
plt.subplots(1)
plt.plot(s, ck, "-r", label="curvature")
plt.grid(True)
plt.legend()
plt.xlabel("line length[m]")
plt.ylabel("curvature [1/m]")
plt.show()
if __name__ == '__main__':
main()
``` | /content/code_sandbox/PathTracking/lqr_steer_control/lqr_steer_control.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 2,207 |
```python
"""
Path tracking simulation with iterative linear model predictive control for speed and steer control
author: Atsushi Sakai (@Atsushi_twi)
"""
import matplotlib.pyplot as plt
import time
import cvxpy
import math
import numpy as np
import sys
import pathlib
sys.path.append(str(pathlib.Path(__file__).parent.parent.parent))
from utils.angle import angle_mod
from PathPlanning.CubicSpline import cubic_spline_planner
NX = 4 # x = x, y, v, yaw
NU = 2 # a = [accel, steer]
T = 5 # horizon length
# mpc parameters
R = np.diag([0.01, 0.01]) # input cost matrix
Rd = np.diag([0.01, 1.0]) # input difference cost matrix
Q = np.diag([1.0, 1.0, 0.5, 0.5]) # state cost matrix
Qf = Q # state final matrix
GOAL_DIS = 1.5 # goal distance
STOP_SPEED = 0.5 / 3.6 # stop speed
MAX_TIME = 500.0 # max simulation time
# iterative paramter
MAX_ITER = 3 # Max iteration
DU_TH = 0.1 # iteration finish param
TARGET_SPEED = 10.0 / 3.6 # [m/s] target speed
N_IND_SEARCH = 10 # Search index number
DT = 0.2 # [s] time tick
# Vehicle parameters
LENGTH = 4.5 # [m]
WIDTH = 2.0 # [m]
BACKTOWHEEL = 1.0 # [m]
WHEEL_LEN = 0.3 # [m]
WHEEL_WIDTH = 0.2 # [m]
TREAD = 0.7 # [m]
WB = 2.5 # [m]
MAX_STEER = np.deg2rad(45.0) # maximum steering angle [rad]
MAX_DSTEER = np.deg2rad(30.0) # maximum steering speed [rad/s]
MAX_SPEED = 55.0 / 3.6 # maximum speed [m/s]
MIN_SPEED = -20.0 / 3.6 # minimum speed [m/s]
MAX_ACCEL = 1.0 # maximum accel [m/ss]
show_animation = True
class State:
"""
vehicle state class
"""
def __init__(self, x=0.0, y=0.0, yaw=0.0, v=0.0):
self.x = x
self.y = y
self.yaw = yaw
self.v = v
self.predelta = None
def pi_2_pi(angle):
return angle_mod(angle)
def get_linear_model_matrix(v, phi, delta):
A = np.zeros((NX, NX))
A[0, 0] = 1.0
A[1, 1] = 1.0
A[2, 2] = 1.0
A[3, 3] = 1.0
A[0, 2] = DT * math.cos(phi)
A[0, 3] = - DT * v * math.sin(phi)
A[1, 2] = DT * math.sin(phi)
A[1, 3] = DT * v * math.cos(phi)
A[3, 2] = DT * math.tan(delta) / WB
B = np.zeros((NX, NU))
B[2, 0] = DT
B[3, 1] = DT * v / (WB * math.cos(delta) ** 2)
C = np.zeros(NX)
C[0] = DT * v * math.sin(phi) * phi
C[1] = - DT * v * math.cos(phi) * phi
C[3] = - DT * v * delta / (WB * math.cos(delta) ** 2)
return A, B, C
def plot_car(x, y, yaw, steer=0.0, cabcolor="-r", truckcolor="-k"): # pragma: no cover
outline = np.array([[-BACKTOWHEEL, (LENGTH - BACKTOWHEEL), (LENGTH - BACKTOWHEEL), -BACKTOWHEEL, -BACKTOWHEEL],
[WIDTH / 2, WIDTH / 2, - WIDTH / 2, -WIDTH / 2, WIDTH / 2]])
fr_wheel = np.array([[WHEEL_LEN, -WHEEL_LEN, -WHEEL_LEN, WHEEL_LEN, WHEEL_LEN],
[-WHEEL_WIDTH - TREAD, -WHEEL_WIDTH - TREAD, WHEEL_WIDTH - TREAD, WHEEL_WIDTH - TREAD, -WHEEL_WIDTH - TREAD]])
rr_wheel = np.copy(fr_wheel)
fl_wheel = np.copy(fr_wheel)
fl_wheel[1, :] *= -1
rl_wheel = np.copy(rr_wheel)
rl_wheel[1, :] *= -1
Rot1 = np.array([[math.cos(yaw), math.sin(yaw)],
[-math.sin(yaw), math.cos(yaw)]])
Rot2 = np.array([[math.cos(steer), math.sin(steer)],
[-math.sin(steer), math.cos(steer)]])
fr_wheel = (fr_wheel.T.dot(Rot2)).T
fl_wheel = (fl_wheel.T.dot(Rot2)).T
fr_wheel[0, :] += WB
fl_wheel[0, :] += WB
fr_wheel = (fr_wheel.T.dot(Rot1)).T
fl_wheel = (fl_wheel.T.dot(Rot1)).T
outline = (outline.T.dot(Rot1)).T
rr_wheel = (rr_wheel.T.dot(Rot1)).T
rl_wheel = (rl_wheel.T.dot(Rot1)).T
outline[0, :] += x
outline[1, :] += y
fr_wheel[0, :] += x
fr_wheel[1, :] += y
rr_wheel[0, :] += x
rr_wheel[1, :] += y
fl_wheel[0, :] += x
fl_wheel[1, :] += y
rl_wheel[0, :] += x
rl_wheel[1, :] += y
plt.plot(np.array(outline[0, :]).flatten(),
np.array(outline[1, :]).flatten(), truckcolor)
plt.plot(np.array(fr_wheel[0, :]).flatten(),
np.array(fr_wheel[1, :]).flatten(), truckcolor)
plt.plot(np.array(rr_wheel[0, :]).flatten(),
np.array(rr_wheel[1, :]).flatten(), truckcolor)
plt.plot(np.array(fl_wheel[0, :]).flatten(),
np.array(fl_wheel[1, :]).flatten(), truckcolor)
plt.plot(np.array(rl_wheel[0, :]).flatten(),
np.array(rl_wheel[1, :]).flatten(), truckcolor)
plt.plot(x, y, "*")
def update_state(state, a, delta):
# input check
if delta >= MAX_STEER:
delta = MAX_STEER
elif delta <= -MAX_STEER:
delta = -MAX_STEER
state.x = state.x + state.v * math.cos(state.yaw) * DT
state.y = state.y + state.v * math.sin(state.yaw) * DT
state.yaw = state.yaw + state.v / WB * math.tan(delta) * DT
state.v = state.v + a * DT
if state.v > MAX_SPEED:
state.v = MAX_SPEED
elif state.v < MIN_SPEED:
state.v = MIN_SPEED
return state
def get_nparray_from_matrix(x):
return np.array(x).flatten()
def calc_nearest_index(state, cx, cy, cyaw, pind):
dx = [state.x - icx for icx in cx[pind:(pind + N_IND_SEARCH)]]
dy = [state.y - icy for icy in cy[pind:(pind + N_IND_SEARCH)]]
d = [idx ** 2 + idy ** 2 for (idx, idy) in zip(dx, dy)]
mind = min(d)
ind = d.index(mind) + pind
mind = math.sqrt(mind)
dxl = cx[ind] - state.x
dyl = cy[ind] - state.y
angle = pi_2_pi(cyaw[ind] - math.atan2(dyl, dxl))
if angle < 0:
mind *= -1
return ind, mind
def predict_motion(x0, oa, od, xref):
xbar = xref * 0.0
for i, _ in enumerate(x0):
xbar[i, 0] = x0[i]
state = State(x=x0[0], y=x0[1], yaw=x0[3], v=x0[2])
for (ai, di, i) in zip(oa, od, range(1, T + 1)):
state = update_state(state, ai, di)
xbar[0, i] = state.x
xbar[1, i] = state.y
xbar[2, i] = state.v
xbar[3, i] = state.yaw
return xbar
def iterative_linear_mpc_control(xref, x0, dref, oa, od):
"""
MPC control with updating operational point iteratively
"""
ox, oy, oyaw, ov = None, None, None, None
if oa is None or od is None:
oa = [0.0] * T
od = [0.0] * T
for i in range(MAX_ITER):
xbar = predict_motion(x0, oa, od, xref)
poa, pod = oa[:], od[:]
oa, od, ox, oy, oyaw, ov = linear_mpc_control(xref, xbar, x0, dref)
du = sum(abs(oa - poa)) + sum(abs(od - pod)) # calc u change value
if du <= DU_TH:
break
else:
print("Iterative is max iter")
return oa, od, ox, oy, oyaw, ov
def linear_mpc_control(xref, xbar, x0, dref):
"""
linear mpc control
xref: reference point
xbar: operational point
x0: initial state
dref: reference steer angle
"""
x = cvxpy.Variable((NX, T + 1))
u = cvxpy.Variable((NU, T))
cost = 0.0
constraints = []
for t in range(T):
cost += cvxpy.quad_form(u[:, t], R)
if t != 0:
cost += cvxpy.quad_form(xref[:, t] - x[:, t], Q)
A, B, C = get_linear_model_matrix(
xbar[2, t], xbar[3, t], dref[0, t])
constraints += [x[:, t + 1] == A @ x[:, t] + B @ u[:, t] + C]
if t < (T - 1):
cost += cvxpy.quad_form(u[:, t + 1] - u[:, t], Rd)
constraints += [cvxpy.abs(u[1, t + 1] - u[1, t]) <=
MAX_DSTEER * DT]
cost += cvxpy.quad_form(xref[:, T] - x[:, T], Qf)
constraints += [x[:, 0] == x0]
constraints += [x[2, :] <= MAX_SPEED]
constraints += [x[2, :] >= MIN_SPEED]
constraints += [cvxpy.abs(u[0, :]) <= MAX_ACCEL]
constraints += [cvxpy.abs(u[1, :]) <= MAX_STEER]
prob = cvxpy.Problem(cvxpy.Minimize(cost), constraints)
prob.solve(solver=cvxpy.CLARABEL, verbose=False)
if prob.status == cvxpy.OPTIMAL or prob.status == cvxpy.OPTIMAL_INACCURATE:
ox = get_nparray_from_matrix(x.value[0, :])
oy = get_nparray_from_matrix(x.value[1, :])
ov = get_nparray_from_matrix(x.value[2, :])
oyaw = get_nparray_from_matrix(x.value[3, :])
oa = get_nparray_from_matrix(u.value[0, :])
odelta = get_nparray_from_matrix(u.value[1, :])
else:
print("Error: Cannot solve mpc..")
oa, odelta, ox, oy, oyaw, ov = None, None, None, None, None, None
return oa, odelta, ox, oy, oyaw, ov
def calc_ref_trajectory(state, cx, cy, cyaw, ck, sp, dl, pind):
xref = np.zeros((NX, T + 1))
dref = np.zeros((1, T + 1))
ncourse = len(cx)
ind, _ = calc_nearest_index(state, cx, cy, cyaw, pind)
if pind >= ind:
ind = pind
xref[0, 0] = cx[ind]
xref[1, 0] = cy[ind]
xref[2, 0] = sp[ind]
xref[3, 0] = cyaw[ind]
dref[0, 0] = 0.0 # steer operational point should be 0
travel = 0.0
for i in range(T + 1):
travel += abs(state.v) * DT
dind = int(round(travel / dl))
if (ind + dind) < ncourse:
xref[0, i] = cx[ind + dind]
xref[1, i] = cy[ind + dind]
xref[2, i] = sp[ind + dind]
xref[3, i] = cyaw[ind + dind]
dref[0, i] = 0.0
else:
xref[0, i] = cx[ncourse - 1]
xref[1, i] = cy[ncourse - 1]
xref[2, i] = sp[ncourse - 1]
xref[3, i] = cyaw[ncourse - 1]
dref[0, i] = 0.0
return xref, ind, dref
def check_goal(state, goal, tind, nind):
# check goal
dx = state.x - goal[0]
dy = state.y - goal[1]
d = math.hypot(dx, dy)
isgoal = (d <= GOAL_DIS)
if abs(tind - nind) >= 5:
isgoal = False
isstop = (abs(state.v) <= STOP_SPEED)
if isgoal and isstop:
return True
return False
def do_simulation(cx, cy, cyaw, ck, sp, dl, initial_state):
"""
Simulation
cx: course x position list
cy: course y position list
cy: course yaw position list
ck: course curvature list
sp: speed profile
dl: course tick [m]
"""
goal = [cx[-1], cy[-1]]
state = initial_state
# initial yaw compensation
if state.yaw - cyaw[0] >= math.pi:
state.yaw -= math.pi * 2.0
elif state.yaw - cyaw[0] <= -math.pi:
state.yaw += math.pi * 2.0
time = 0.0
x = [state.x]
y = [state.y]
yaw = [state.yaw]
v = [state.v]
t = [0.0]
d = [0.0]
a = [0.0]
target_ind, _ = calc_nearest_index(state, cx, cy, cyaw, 0)
odelta, oa = None, None
cyaw = smooth_yaw(cyaw)
while MAX_TIME >= time:
xref, target_ind, dref = calc_ref_trajectory(
state, cx, cy, cyaw, ck, sp, dl, target_ind)
x0 = [state.x, state.y, state.v, state.yaw] # current state
oa, odelta, ox, oy, oyaw, ov = iterative_linear_mpc_control(
xref, x0, dref, oa, odelta)
di, ai = 0.0, 0.0
if odelta is not None:
di, ai = odelta[0], oa[0]
state = update_state(state, ai, di)
time = time + DT
x.append(state.x)
y.append(state.y)
yaw.append(state.yaw)
v.append(state.v)
t.append(time)
d.append(di)
a.append(ai)
if check_goal(state, goal, target_ind, len(cx)):
print("Goal")
break
if show_animation: # pragma: no cover
plt.cla()
# for stopping simulation with the esc key.
plt.gcf().canvas.mpl_connect('key_release_event',
lambda event: [exit(0) if event.key == 'escape' else None])
if ox is not None:
plt.plot(ox, oy, "xr", label="MPC")
plt.plot(cx, cy, "-r", label="course")
plt.plot(x, y, "ob", label="trajectory")
plt.plot(xref[0, :], xref[1, :], "xk", label="xref")
plt.plot(cx[target_ind], cy[target_ind], "xg", label="target")
plot_car(state.x, state.y, state.yaw, steer=di)
plt.axis("equal")
plt.grid(True)
plt.title("Time[s]:" + str(round(time, 2))
+ ", speed[km/h]:" + str(round(state.v * 3.6, 2)))
plt.pause(0.0001)
return t, x, y, yaw, v, d, a
def calc_speed_profile(cx, cy, cyaw, target_speed):
speed_profile = [target_speed] * len(cx)
direction = 1.0 # forward
# Set stop point
for i in range(len(cx) - 1):
dx = cx[i + 1] - cx[i]
dy = cy[i + 1] - cy[i]
move_direction = math.atan2(dy, dx)
if dx != 0.0 and dy != 0.0:
dangle = abs(pi_2_pi(move_direction - cyaw[i]))
if dangle >= math.pi / 4.0:
direction = -1.0
else:
direction = 1.0
if direction != 1.0:
speed_profile[i] = - target_speed
else:
speed_profile[i] = target_speed
speed_profile[-1] = 0.0
return speed_profile
def smooth_yaw(yaw):
for i in range(len(yaw) - 1):
dyaw = yaw[i + 1] - yaw[i]
while dyaw >= math.pi / 2.0:
yaw[i + 1] -= math.pi * 2.0
dyaw = yaw[i + 1] - yaw[i]
while dyaw <= -math.pi / 2.0:
yaw[i + 1] += math.pi * 2.0
dyaw = yaw[i + 1] - yaw[i]
return yaw
def get_straight_course(dl):
ax = [0.0, 5.0, 10.0, 20.0, 30.0, 40.0, 50.0]
ay = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
cx, cy, cyaw, ck, s = cubic_spline_planner.calc_spline_course(
ax, ay, ds=dl)
return cx, cy, cyaw, ck
def get_straight_course2(dl):
ax = [0.0, -10.0, -20.0, -40.0, -50.0, -60.0, -70.0]
ay = [0.0, -1.0, 1.0, 0.0, -1.0, 1.0, 0.0]
cx, cy, cyaw, ck, s = cubic_spline_planner.calc_spline_course(
ax, ay, ds=dl)
return cx, cy, cyaw, ck
def get_straight_course3(dl):
ax = [0.0, -10.0, -20.0, -40.0, -50.0, -60.0, -70.0]
ay = [0.0, -1.0, 1.0, 0.0, -1.0, 1.0, 0.0]
cx, cy, cyaw, ck, s = cubic_spline_planner.calc_spline_course(
ax, ay, ds=dl)
cyaw = [i - math.pi for i in cyaw]
return cx, cy, cyaw, ck
def get_forward_course(dl):
ax = [0.0, 60.0, 125.0, 50.0, 75.0, 30.0, -10.0]
ay = [0.0, 0.0, 50.0, 65.0, 30.0, 50.0, -20.0]
cx, cy, cyaw, ck, s = cubic_spline_planner.calc_spline_course(
ax, ay, ds=dl)
return cx, cy, cyaw, ck
def get_switch_back_course(dl):
ax = [0.0, 30.0, 6.0, 20.0, 35.0]
ay = [0.0, 0.0, 20.0, 35.0, 20.0]
cx, cy, cyaw, ck, s = cubic_spline_planner.calc_spline_course(
ax, ay, ds=dl)
ax = [35.0, 10.0, 0.0, 0.0]
ay = [20.0, 30.0, 5.0, 0.0]
cx2, cy2, cyaw2, ck2, s2 = cubic_spline_planner.calc_spline_course(
ax, ay, ds=dl)
cyaw2 = [i - math.pi for i in cyaw2]
cx.extend(cx2)
cy.extend(cy2)
cyaw.extend(cyaw2)
ck.extend(ck2)
return cx, cy, cyaw, ck
def main():
print(__file__ + " start!!")
start = time.time()
dl = 1.0 # course tick
# cx, cy, cyaw, ck = get_straight_course(dl)
# cx, cy, cyaw, ck = get_straight_course2(dl)
# cx, cy, cyaw, ck = get_straight_course3(dl)
# cx, cy, cyaw, ck = get_forward_course(dl)
cx, cy, cyaw, ck = get_switch_back_course(dl)
sp = calc_speed_profile(cx, cy, cyaw, TARGET_SPEED)
initial_state = State(x=cx[0], y=cy[0], yaw=cyaw[0], v=0.0)
t, x, y, yaw, v, d, a = do_simulation(
cx, cy, cyaw, ck, sp, dl, initial_state)
elapsed_time = time.time() - start
print(f"calc time:{elapsed_time:.6f} [sec]")
if show_animation: # pragma: no cover
plt.close("all")
plt.subplots()
plt.plot(cx, cy, "-r", label="spline")
plt.plot(x, y, "-g", label="tracking")
plt.grid(True)
plt.axis("equal")
plt.xlabel("x[m]")
plt.ylabel("y[m]")
plt.legend()
plt.subplots()
plt.plot(t, v, "-r", label="speed")
plt.grid(True)
plt.xlabel("Time [s]")
plt.ylabel("Speed [kmh]")
plt.show()
def main2():
print(__file__ + " start!!")
start = time.time()
dl = 1.0 # course tick
cx, cy, cyaw, ck = get_straight_course3(dl)
sp = calc_speed_profile(cx, cy, cyaw, TARGET_SPEED)
initial_state = State(x=cx[0], y=cy[0], yaw=0.0, v=0.0)
t, x, y, yaw, v, d, a = do_simulation(
cx, cy, cyaw, ck, sp, dl, initial_state)
elapsed_time = time.time() - start
print(f"calc time:{elapsed_time:.6f} [sec]")
if show_animation: # pragma: no cover
plt.close("all")
plt.subplots()
plt.plot(cx, cy, "-r", label="spline")
plt.plot(x, y, "-g", label="tracking")
plt.grid(True)
plt.axis("equal")
plt.xlabel("x[m]")
plt.ylabel("y[m]")
plt.legend()
plt.subplots()
plt.plot(t, v, "-r", label="speed")
plt.grid(True)
plt.xlabel("Time [s]")
plt.ylabel("Speed [kmh]")
plt.show()
if __name__ == '__main__':
main()
# main2()
``` | /content/code_sandbox/PathTracking/model_predictive_speed_and_steer_control/model_predictive_speed_and_steer_control.py | python | 2016-03-21T09:34:43 | 2024-08-16T19:00:08 | PythonRobotics | AtsushiSakai/PythonRobotics | 22,516 | 5,805 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.