repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/people/wu_ros_tools/easy_markers/src/easy_markers/generator.py | people/wu_ros_tools/easy_markers/src/easy_markers/generator.py | import roslib; roslib.load_manifest('easy_markers')
import tf
import rospy
from visualization_msgs.msg import Marker, MarkerArray
from geometry_msgs.msg import Point, Quaternion
def get_point(position, scale=1.0):
pt = Point()
if position is None:
pt.x = 0.0
pt.y = 0.0
pt.z = 0.0
elif('x' in dir(position)):
pt.x = position.x
pt.y = position.y
pt.z = position.z
else:
pt.x = position[0]
pt.y = position[1]
pt.z = position[2]
pt.x /= scale
pt.y /= scale
pt.z /= scale
return pt
def get_quat(orientation):
quat = Quaternion()
if orientation is None:
quat.x = 0.0
quat.y = 0.0
quat.z = 0.0
quat.w = 1.0
elif('x' in dir(orientation)):
quat.w = orientation.w
quat.x = orientation.x
quat.y = orientation.y
quat.z = orientation.z
elif len(orientation)==4:
quat.x = orientation[0]
quat.y = orientation[1]
quat.z = orientation[2]
quat.w = orientation[3]
else:
q2 = tf.transformations.quaternion_from_euler(orientation[0],orientation[1],orientation[2])
quat.x = q2[0]
quat.y = q2[1]
quat.z = q2[2]
quat.w = q2[3]
return quat
class MarkerGenerator:
def __init__(self):
self.reset()
def reset(self):
self.counter = 0
self.frame_id = ''
self.ns = 'marker'
self.type = 0
self.action = Marker.ADD
self.scale = [1.0] *3
self.color = [1.0] * 4
self.points = []
self.colors = []
self.text = ''
self.lifetime = 0.0
def marker(self, position=None, orientation=None, points=None, colors=None, scale=1.0):
mark = Marker()
mark.header.stamp = rospy.Time.now()
mark.header.frame_id = self.frame_id
mark.ns = self.ns
mark.type = self.type
mark.id = self.counter
mark.action = self.action
mark.scale.x = self.scale[0]
mark.scale.y = self.scale[1]
mark.scale.z = self.scale[2]
mark.color.r = self.color[0]
mark.color.g = self.color[1]
mark.color.b = self.color[2]
mark.color.a = self.color[3]
mark.lifetime = rospy.Duration(self.lifetime)
if points is not None:
mark.points = []
for point in points:
mark.points.append(get_point(point, scale))
if colors is not None:
mark.colors = colors
if position is not None or orientation is not None:
mark.pose.position = get_point(position, scale)
mark.pose.orientation = get_quat(orientation)
self.counter+=1
return mark
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/people/wu_ros_tools/easy_markers/src/easy_markers/interactive.py | people/wu_ros_tools/easy_markers/src/easy_markers/interactive.py | import roslib; roslib.load_manifest('easy_markers')
from easy_markers.generator import MarkerGenerator
from interactive_markers.interactive_marker_server import *
from interactive_markers.menu_handler import *
from visualization_msgs.msg import InteractiveMarkerControl
TYPEDATA = {
'rotate_x': [1,1,0,0, InteractiveMarkerControl.ROTATE_AXIS],
'move_x' : [1,1,0,0, InteractiveMarkerControl.MOVE_AXIS],
'rotate_z': [1,0,1,0, InteractiveMarkerControl.ROTATE_AXIS],
'move_z' : [1,0,1,0, InteractiveMarkerControl.MOVE_AXIS],
'rotate_y': [1,0,0,1, InteractiveMarkerControl.ROTATE_AXIS],
'move_y' : [1,0,0,1, InteractiveMarkerControl.MOVE_AXIS]
}
def default_callback(feedback):
print feedback
class InteractiveGenerator:
def __init__(self, name="interactive_markers"):
self.server = InteractiveMarkerServer(name)
self.mg = MarkerGenerator()
self.mg.type = 1
self.mg.scale = [.25]*3
self.c = 0
self.markers = {}
def makeMarker( self, callback=None, marker=None, pose=[0,0,0], controls=[],
fixed=False, name=None, frame="/map", description="", imode=0, rot=[0,0,0,1]):
if marker is None:
marker = self.mg.marker()
if callback is None:
callback = default_callback
if name is None:
name = "control%d"%self.c
self.c += 1
int_marker = InteractiveMarker()
int_marker.header.frame_id = frame
int_marker.pose.position.x = pose[0]
int_marker.pose.position.y = pose[1]
int_marker.pose.position.z = pose[2]
int_marker.pose.orientation.x = rot[0]
int_marker.pose.orientation.y = rot[1]
int_marker.pose.orientation.z = rot[2]
int_marker.pose.orientation.w = rot[3]
int_marker.scale = 1
int_marker.name = name
int_marker.description = description
control = InteractiveMarkerControl()
control.always_visible = True
control.interaction_mode = imode
control.markers.append( marker )
int_marker.controls.append(control)
for control_name in controls:
data = TYPEDATA[control_name]
control = InteractiveMarkerControl()
control.orientation.w = data[0]
control.orientation.x = data[1]
control.orientation.y = data[2]
control.orientation.z = data[3]
control.name = control_name
control.interaction_mode = data[4]
if fixed:
control.orientation_mode = InteractiveMarkerControl.FIXED
int_marker.controls.append(control)
self.server.insert(int_marker, callback)
self.markers[name] = int_marker
self.server.applyChanges()
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/people/wu_ros_tools/easy_markers/src/easy_markers/__init__.py | people/wu_ros_tools/easy_markers/src/easy_markers/__init__.py | python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false | |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/people/wu_ros_tools/kalman_filter/setup.py | people/wu_ros_tools/kalman_filter/setup.py | #!/usr/bin/env python
from distutils.core import setup
from catkin_pkg.python_setup import generate_distutils_setup
package_info = generate_distutils_setup(
packages=['kalman_filter'],
package_dir={'': 'src'},
)
setup(**package_info)
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/people/wu_ros_tools/kalman_filter/src/kalman_filter/__init__.py | people/wu_ros_tools/kalman_filter/src/kalman_filter/__init__.py | #!/usr/bin/python
class Kalman:
def __init__(self, Q=.002, R=1.0, P=.01):
self.x = None
self.Q = Q
self.R = R
self.P = P
self.p = None
def update(self, values):
N = len(values)
if self.x is None:
self.x = values
self.p = [self.P]*N
else:
for i in range(N):
self.p[i] += self.Q
k = self.p[i] / (self.p[i] + self.R)
self.x[i] += k * (values[i] - self.x[i])
self.p[i] = (1 - k) * self.p[i]
def values(self):
return self.x
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/people/people_velocity_tracker/scripts/tracker.py | people/people_velocity_tracker/scripts/tracker.py | #!/usr/bin/python
import roslib; roslib.load_manifest('people_velocity_tracker')
import rospy
import tf
import math
from geometry_msgs.msg import Point, Vector3
from geometry_msgs.msg import Twist, TwistWithCovariance
from people_msgs.msg import PositionMeasurementArray, Person, People
from easy_markers.generator import MarkerGenerator, Marker
from kalman_filter import Kalman
def distance(leg1, leg2):
return math.sqrt(math.pow(leg1.x - leg2.x, 2) +
math.pow(leg1.y - leg2.y, 2) +
math.pow(leg1.z - leg2.z, 2))
def average(leg1, leg2):
return Point((leg1.x + leg2.x) / 2,
(leg1.y + leg2.y) / 2,
(leg1.z + leg2.z) / 2)
def add(v1, v2):
return Vector3(v1.x + v2.x, v1.y + v2.y, v1.z + v2.z)
def subtract(v1, v2):
return Vector3(v1.x - v2.x, v1.y - v2.y, v1.z - v2.z)
def scale(v, s):
v.x *= s
v.y *= s
v.z *= s
def printv(v):
print "%.2f %.2f %.2f"%(v.x, v.y, v.z),
class PersonEstimate(object):
def __init__(self, msg):
self.pos = msg
self.reliability = 0.1
self.k = Kalman()
def update(self, msg):
last = self.pos
self.pos = msg
self.reliability = max(self.reliability, msg.reliability)
ivel = subtract(self.pos.pos, last.pos)
time = (self.pos.header.stamp - last.header.stamp).to_sec()
scale(ivel, 1.0 / time)
self.k.update([ivel.x, ivel.y, ivel.z])
def age(self):
return self.pos.header.stamp
def id(self):
return self.pos.object_id
def velocity(self):
k = self.k.values()
if k == None:
return Vector3()
v = Vector3(k[0], k[1], k[2])
return v
def publish_markers(self, pub):
gen.scale = [.1, .3, 0]
gen.color = [1, 1, 1, 1]
vel = self.velocity()
#scale(vel, 15)
m = gen.marker(points=[self.pos.pos, add(self.pos.pos, vel)])
m.header = self.pos.header
pub.publish(m)
def get_person(self):
p = Person()
p.name = self.id()
p.position = self.pos.pos
p.velocity = self.velocity()
p.reliability = self.reliability
return self.pos.header.frame_id, p
class VelocityTracker(object):
def __init__(self):
self.people = {}
self.robot_vel_on_map = Twist()
self.TIMEOUT = rospy.Duration(rospy.get_param('~timeout', 1.0))
self.sub = rospy.Subscriber('/people_tracker_measurements',
PositionMeasurementArray,
self.pm_cb)
self.mpub = rospy.Publisher('/visualization_marker',
Marker,
queue_size=10)
self.ppub = rospy.Publisher('/people',
People,
queue_size=10)
def pm_cb(self, msg):
for pm in msg.people:
if pm.object_id in self.people:
self.people[pm.object_id].update(pm)
else:
p = PersonEstimate(pm)
self.people[pm.object_id] = p
def spin(self):
rate = rospy.Rate(10)
while not rospy.is_shutdown():
# Remove People Older Than timeout param
now = rospy.Time.now()
for p in self.people.values():
if now - p.age() > self.TIMEOUT:
del self.people[p.id()]
self.publish()
rate.sleep()
def publish(self):
gen.counter = 0
pl = People()
pl.header.frame_id = None
for p in self.people.values():
p.publish_markers(self.mpub)
frame, person = p.get_person()
pl.header.frame_id = frame
pl.people.append(person)
self.ppub.publish(pl)
if __name__ == '__main__':
gen = MarkerGenerator()
gen.type = Marker.ARROW
gen.ns = 'people_velocities'
gen.lifetime = .5
rospy.init_node("people_velocity_tracker")
vt = VelocityTracker()
vt.spin()
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/people/people_velocity_tracker/scripts/static.py | people/people_velocity_tracker/scripts/static.py | #!/usr/bin/python
import roslib; roslib.load_manifest('people_velocity_tracker')
import rospy
import sys
from people_msgs.msg import Person, People
class VelocityTracker(object):
def __init__(self):
self.ppub = rospy.Publisher('/people', People, queue_size=10)
def spin(self):
rate = rospy.Rate(10)
while not rospy.is_shutdown():
pv = Person()
pl = People()
pl.header.stamp = rospy.Time.now()
pl.header.frame_id = '/base_link'
pv.position.x = float(sys.argv[1])
pv.position.y = float(sys.argv[2])
pv.position.z = .5
pv.velocity.x = float(sys.argv[3])
pv.velocity.y = float(sys.argv[4])
pv.name = 'asdf'
pv.reliability = .90
pl.people.append(pv)
self.ppub.publish(pl)
rate.sleep()
rospy.init_node("people_velocity_tracker")
vt = VelocityTracker()
vt.spin()
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/people/face_detector/scripts/face_detector_action_client.py | people/face_detector/scripts/face_detector_action_client.py | #! /usr/bin/env python
import roslib; roslib.load_manifest('face_detector')
import rospy
# Brings in the SimpleActionClient
import actionlib
# Brings in the messages used by the face_detector action, including the
# goal message and the result message.
import face_detector.msg
def face_detector_client():
# Creates the SimpleActionClient, passing the type of the action to the constructor.
client = actionlib.SimpleActionClient('face_detector_action', face_detector.msg.FaceDetectorAction)
# Waits until the action server has started up and started
# listening for goals.
client.wait_for_server()
# Creates a goal to send to the action server.
goal = face_detector.msg.FaceDetectorGoal()
# Sends the goal to the action server.
client.send_goal(goal)
# Waits for the server to finish performing the action.
client.wait_for_result()
# Prints out the result of executing the action
return client.get_result() # A FibonacciResult
if __name__ == '__main__':
try:
# Initializes a rospy node so that the SimpleActionClient can
# publish and subscribe over ROS.
rospy.init_node('face_detector_action_client')
result = face_detector_client()
print "Done action"
except rospy.ROSInterruptException:
print "Program interrupted before completion"
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/laser_filters/test/fake_laser.py | laser_filters/test/fake_laser.py | #!/usr/bin/python
PKG = 'laser_filters' # this package name
import roslib; roslib.load_manifest(PKG)
import rospy
from sensor_msgs.msg import LaserScan
from Numeric import ones
def laser_test():
pub = rospy.Publisher('laser_scan', LaserScan)
rospy.init_node('laser_test')
laser_msg = LaserScan()
laser_msg.header.frame_id = 'laser'
laser_msg.angle_min = -1.5
laser_msg.angle_max = 1.5
laser_msg.angle_increment = 0.1
laser_msg.time_increment = 0.1
laser_msg.scan_time = 0.1
laser_msg.range_min = 0.5
laser_msg.range_max = 1.5
laser_msg.ranges = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 9.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.1, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 5.0, 1.0]
laser_msg.intensities = laser_msg.ranges
r = rospy.Rate(10) # 10hz
while not rospy.is_shutdown():
laser_msg.header.stamp = rospy.get_rostime()
pub.publish(laser_msg)
r.sleep()
if __name__ == '__main__':
try:
laser_test()
except rospy.ROSInterruptException: pass
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/Python-RVO2/setup.py | Python-RVO2/setup.py | from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext as _build_ext
from Cython.Build import cythonize
class BuildRvo2Ext(_build_ext):
"""Builds RVO2 before our module."""
def run(self):
# Build RVO2
import os
import os.path
import subprocess
build_dir = os.path.abspath('build/RVO2')
if not os.path.exists(build_dir):
os.makedirs(build_dir)
subprocess.check_call(['cmake', '../..', '-DCMAKE_CXX_FLAGS=-fPIC'],
cwd=build_dir)
subprocess.check_call(['cmake', '--build', '.'], cwd=build_dir)
_build_ext.run(self)
extensions = [
Extension('rvo2', ['src/*.pyx'],
include_dirs=['src'],
libraries=['RVO'],
library_dirs=['build/RVO2/src'],
extra_compile_args=['-fPIC']),
]
setup(
name="pyrvo2",
ext_modules=cythonize(extensions),
cmdclass={'build_ext': BuildRvo2Ext},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Information Technology',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Cython',
'Topic :: Games/Entertainment :: Simulation',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/Python-RVO2/example.py | Python-RVO2/example.py | #!/usr/bin/env python
import rvo2
sim = rvo2.PyRVOSimulator(1/60., 1.5, 5, 1.5, 2, 0.4, 2)
# Pass either just the position (the other parameters then use
# the default values passed to the PyRVOSimulator constructor),
# or pass all available parameters.
a0 = sim.addAgent((0, 0))
a1 = sim.addAgent((1, 0))
a2 = sim.addAgent((1, 1))
a3 = sim.addAgent((0, 1), 1.5, 5, 1.5, 2, 0.4, 2, (0, 0))
# Obstacles are also supported.
o1 = sim.addObstacle([(0.1, 0.1), (-0.1, 0.1), (-0.1, -0.1)])
sim.processObstacles()
sim.setAgentPrefVelocity(a0, (1, 1))
sim.setAgentPrefVelocity(a1, (-1, 1))
sim.setAgentPrefVelocity(a2, (-1, -1))
sim.setAgentPrefVelocity(a3, (1, -1))
print('Simulation has %i agents and %i obstacle vertices in it.' %
(sim.getNumAgents(), sim.getNumObstacleVertices()))
print('Running simulation')
for step in range(20):
sim.doStep()
positions = ['(%5.3f, %5.3f)' % sim.getAgentPosition(agent_no)
for agent_no in (a0, a1, a2, a3)]
print('step=%2i t=%.3f %s' % (step, sim.getGlobalTime(), ' '.join(positions)))
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/navigation/base_local_planner/setup.py | navigation/base_local_planner/setup.py | #!/usr/bin/env python
from distutils.core import setup
from catkin_pkg.python_setup import generate_distutils_setup
d = generate_distutils_setup(
packages = ['local_planner_limits'],
package_dir = {'': 'src'},
)
setup(**d)
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/navigation/base_local_planner/src/local_planner_limits/__init__.py | navigation/base_local_planner/src/local_planner_limits/__init__.py | # Generic set of parameters to use with base local planners
# To use:
#
# from local_planner_limits import add_generic_localplanner_params
# gen = ParameterGenerator()
# add_generic_localplanner_params(gen)
# ...
#
# Using these standard parameters instead of your own allows easier switching of local planners
# need this only for dataype declarations
from dynamic_reconfigure.parameter_generator_catkin import double_t, bool_t
def add_generic_localplanner_params(gen):
# velocities
gen.add("max_vel_trans", double_t, 0, "The absolute value of the maximum translational velocity for the robot in m/s", 0.55, 0)
gen.add("min_vel_trans", double_t, 0, "The absolute value of the minimum translational velocity for the robot in m/s", 0.1, 0)
gen.add("max_vel_x", double_t, 0, "The maximum x velocity for the robot in m/s", 0.55)
gen.add("min_vel_x", double_t, 0, "The minimum x velocity for the robot in m/s", 0.0)
gen.add("max_vel_y", double_t, 0, "The maximum y velocity for the robot in m/s", 0.1)
gen.add("min_vel_y", double_t, 0, "The minimum y velocity for the robot in m/s", -0.1)
gen.add("max_vel_theta", double_t, 0, "The absolute value of the maximum rotational velocity for the robot in rad/s", 1.0, 0)
gen.add("min_vel_theta", double_t, 0, "The absolute value of the minimum rotational velocity for the robot in rad/s", 0.4, 0)
# acceleration
gen.add("acc_lim_x", double_t, 0, "The acceleration limit of the robot in the x direction", 2.5, 0, 20.0)
gen.add("acc_lim_y", double_t, 0, "The acceleration limit of the robot in the y direction", 2.5, 0, 20.0)
gen.add("acc_lim_theta", double_t, 0, "The acceleration limit of the robot in the theta direction", 3.2, 0, 20.0)
gen.add("acc_lim_trans", double_t, 0, "The absolute value of the maximum translational acceleration for the robot in m/s^2", 0.1, 0)
gen.add("prune_plan", bool_t, 0, "Start following closest point of global plan, not first point (if different).", False)
gen.add("xy_goal_tolerance", double_t, 0, "Within what maximum distance we consider the robot to be in goal", 0.1)
gen.add("yaw_goal_tolerance", double_t, 0, "Within what maximum angle difference we consider the robot to face goal direction", 0.1)
gen.add("trans_stopped_vel", double_t, 0, "Below what maximum velocity we consider the robot to be stopped in translation", 0.1)
gen.add("theta_stopped_vel", double_t, 0, "Below what maximum rotation velocity we consider the robot to be stopped in rotation", 0.1)
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/navigation/amcl/test/basic_localization.py | navigation/amcl/test/basic_localization.py | #!/usr/bin/env python
import sys
import time
from math import fmod, pi
import unittest
import rospy
import rostest
from tf2_msgs.msg import TFMessage
import PyKDL
from std_srvs.srv import Empty
class TestBasicLocalization(unittest.TestCase):
def setUp(self):
self.tf = None
self.target_x = None
self.target_y = None
self.target_a = None
def tf_cb(self, msg):
for t in msg.transforms:
if t.header.frame_id == 'map':
self.tf = t.transform
(a_curr, a_diff) = self.compute_angle_diff()
print 'Curr:\t %16.6f %16.6f %16.6f' % (self.tf.translation.x, self.tf.translation.y, a_curr)
print 'Target:\t %16.6f %16.6f %16.6f' % (self.target_x, self.target_y, self.target_a)
print 'Diff:\t %16.6f %16.6f %16.6f' % (
abs(self.tf.translation.x - self.target_x), abs(self.tf.translation.y - self.target_y), a_diff)
def compute_angle_diff(self):
rot = self.tf.rotation
a = PyKDL.Rotation.Quaternion(rot.x, rot.y, rot.z, rot.w).GetRPY()[2]
d_a = self.target_a
return (a, abs(fmod(a - d_a + 5*pi, 2*pi) - pi))
def test_basic_localization(self):
global_localization = int(sys.argv[1])
self.target_x = float(sys.argv[2])
self.target_y = float(sys.argv[3])
self.target_a = float(sys.argv[4])
tolerance_d = float(sys.argv[5])
tolerance_a = float(sys.argv[6])
target_time = float(sys.argv[7])
if global_localization == 1:
#print 'Waiting for service global_localization'
rospy.wait_for_service('global_localization')
global_localization = rospy.ServiceProxy('global_localization', Empty)
global_localization()
rospy.init_node('test', anonymous=True)
while(rospy.rostime.get_time() == 0.0):
#print 'Waiting for initial time publication'
time.sleep(0.1)
start_time = rospy.rostime.get_time()
# TODO: This should be replace by a pytf listener
rospy.Subscriber('/tf', TFMessage, self.tf_cb)
while (rospy.rostime.get_time() - start_time) < target_time:
#print 'Waiting for end time %.6f (current: %.6f)'%(target_time,(rospy.rostime.get_time() - start_time))
time.sleep(0.1)
(a_curr, a_diff) = self.compute_angle_diff()
print 'Curr:\t %16.6f %16.6f %16.6f' % (self.tf.translation.x, self.tf.translation.y, a_curr)
print 'Target:\t %16.6f %16.6f %16.6f' % (self.target_x, self.target_y, self.target_a)
print 'Diff:\t %16.6f %16.6f %16.6f' % (
abs(self.tf.translation.x - self.target_x), abs(self.tf.translation.y - self.target_y), a_diff)
self.assertNotEquals(self.tf, None)
self.assertTrue(abs(self.tf.translation.x - self.target_x) <= tolerance_d)
self.assertTrue(abs(self.tf.translation.y - self.target_y) <= tolerance_d)
self.assertTrue(a_diff <= tolerance_a)
if __name__ == '__main__':
rostest.run('amcl', 'amcl_localization',
TestBasicLocalization, sys.argv)
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/navigation/amcl/test/set_pose.py | navigation/amcl/test/set_pose.py | #!/usr/bin/env python
import rospy
import math
import PyKDL
from geometry_msgs.msg import PoseWithCovarianceStamped
class PoseSetter(rospy.SubscribeListener):
def __init__(self, pose, stamp, publish_time):
self.pose = pose
self.stamp = stamp
self.publish_time = publish_time
def peer_subscribe(self, topic_name, topic_publish, peer_publish):
p = PoseWithCovarianceStamped()
p.header.frame_id = "map"
p.header.stamp = self.stamp
p.pose.pose.position.x = self.pose[0]
p.pose.pose.position.y = self.pose[1]
(p.pose.pose.orientation.x,
p.pose.pose.orientation.y,
p.pose.pose.orientation.z,
p.pose.pose.orientation.w) = PyKDL.Rotation.RPY(0, 0, self.pose[2]).GetQuaternion()
p.pose.covariance[6*0+0] = 0.5 * 0.5
p.pose.covariance[6*1+1] = 0.5 * 0.5
p.pose.covariance[6*3+3] = math.pi/12.0 * math.pi/12.0
# wait for the desired publish time
while rospy.get_rostime() < self.publish_time:
rospy.sleep(0.01)
peer_publish(p)
if __name__ == '__main__':
pose = map(float, rospy.myargv()[1:4])
t_stamp = rospy.Time()
t_publish = rospy.Time()
if len(rospy.myargv()) > 4:
t_stamp = rospy.Time.from_sec(float(rospy.myargv()[4]))
if len(rospy.myargv()) > 5:
t_publish = rospy.Time.from_sec(float(rospy.myargv()[5]))
rospy.init_node('pose_setter', anonymous=True)
rospy.loginfo("Going to publish pose {} with stamp {} at {}".format(pose, t_stamp.to_sec(), t_publish.to_sec()))
pub = rospy.Publisher("initialpose", PoseWithCovarianceStamped, PoseSetter(pose, stamp=t_stamp, publish_time=t_publish), queue_size=1)
rospy.spin()
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/navigation/map_server/test/consumer.py | navigation/map_server/test/consumer.py | #!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2008, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the Willow Garage nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
from __future__ import print_function
PKG = 'static_map_server'
NAME = 'consumer'
import sys
import unittest
import time
import rospy
import rostest
from nav_msgs.srv import GetMap
class TestConsumer(unittest.TestCase):
def __init__(self, *args):
super(TestConsumer, self).__init__(*args)
self.success = False
def callback(self, data):
print(rospy.get_caller_id(), "I heard %s" % data.data)
self.success = data.data and data.data.startswith('hello world')
rospy.signal_shutdown('test done')
def test_consumer(self):
rospy.wait_for_service('static_map')
mapsrv = rospy.ServiceProxy('static_map', GetMap)
resp = mapsrv()
self.success = True
print(resp)
while not rospy.is_shutdown() and not self.success: # and time.time() < timeout_t: <== timeout_t doesn't exists??
time.sleep(0.1)
self.assert_(self.success)
rospy.signal_shutdown('test done')
if __name__ == '__main__':
rostest.rosrun(PKG, NAME, TestConsumer, sys.argv)
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/navigation/fake_localization/static_odom_broadcaster.py | navigation/fake_localization/static_odom_broadcaster.py | #!/usr/bin/python
#
# Similar to static_transform_broadcaster, this node constantly publishes
# static odometry information (Odometry msg and tf). This can be used
# with fake_localization to evaluate planning algorithms without running
# an actual robot with odometry or localization
#
# Author: Armin Hornung
# License: BSD
import rospy
import tf
from nav_msgs.msg import Odometry
from geometry_msgs.msg import Pose, Quaternion, Point
def publishOdom():
rospy.init_node('fake_odom')
base_frame_id = rospy.get_param("~base_frame_id", "base_link")
odom_frame_id = rospy.get_param("~odom_frame_id", "odom")
publish_frequency = rospy.get_param("~publish_frequency", 10.0)
pub = rospy.Publisher('odom', Odometry)
tf_pub = tf.TransformBroadcaster()
#TODO: static pose could be made configurable (cmd.line or parameters)
quat = tf.transformations.quaternion_from_euler(0, 0, 0)
odom = Odometry()
odom.header.frame_id = odom_frame_id
odom.pose.pose = Pose(Point(0, 0, 0), Quaternion(*quat))
rospy.loginfo("Publishing static odometry from \"%s\" to \"%s\"", odom_frame_id, base_frame_id)
r = rospy.Rate(publish_frequency)
while not rospy.is_shutdown():
odom.header.stamp = rospy.Time.now()
pub.publish(odom)
tf_pub.sendTransform((0, 0, 0), quat,
odom.header.stamp, base_frame_id, odom_frame_id)
r.sleep()
if __name__ == '__main__':
try:
publishOdom()
except rospy.ROSInterruptException:
pass
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/turtlebot_apps/turtlebot_calibration/src/turtlebot_calibration/scan_to_angle.py | turtlebot_apps/turtlebot_calibration/src/turtlebot_calibration/scan_to_angle.py | #! /usr/bin/python
#***********************************************************
# Software License Agreement (BSD License)
#
# Copyright (c) 2008, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the Willow Garage nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Author: Wim Meeussen
from __future__ import with_statement
import roslib; roslib.load_manifest('turtlebot_calibration')
import yaml
import rospy
from sensor_msgs.msg import LaserScan
from turtlebot_calibration.msg import ScanAngle
from math import *
class ScanToAngle:
def __init__(self):
self.min_angle = rospy.get_param('min_angle', -0.3)
self.max_angle = rospy.get_param('max_angle', 0.3)
self.pub = rospy.Publisher('scan_angle', ScanAngle)
self.sub = rospy.Subscriber('scan', LaserScan, self.scan_cb)
def scan_cb(self, msg):
angle = msg.angle_min
d_angle = msg.angle_increment
sum_x = 0
sum_y = 0
sum_xx = 0
sum_xy = 0
num = 0
for r in msg.ranges:
if angle > self.min_angle and angle < self.max_angle and r < msg.range_max:
x = sin(angle) * r
y = cos(angle) * r
sum_x += x
sum_y += y
sum_xx += x*x
sum_xy += x*y
num += 1
angle += d_angle
if num > 0:
angle=atan2((-sum_x*sum_y+num*sum_xy)/(num*sum_xx-sum_x*sum_x), 1)
res = ScanAngle()
res.header = msg.header
res.scan_angle = angle
self.pub.publish(res)
else:
rospy.logerr("Please point me at a wall.")
def main():
rospy.init_node('scan_to_angle')
s = ScanToAngle()
rospy.spin()
if __name__ == '__main__':
main()
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/turtlebot_apps/turtlebot_calibration/src/turtlebot_calibration/calibrate.py | turtlebot_apps/turtlebot_calibration/src/turtlebot_calibration/calibrate.py | #! /usr/bin/python
#***********************************************************
# Software License Agreement (BSD License)
#
# Copyright (c) 2008, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the Willow Garage nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Author: Wim Meeussen
from __future__ import with_statement
import roslib; roslib.load_manifest('turtlebot_calibration')
import yaml
import PyKDL
import rospy
from sensor_msgs.msg import Imu
from nav_msgs.msg import Odometry
from geometry_msgs.msg import Twist
from turtlebot_calibration.msg import ScanAngle
from math import *
import threading
import dynamic_reconfigure.client
import os
import subprocess
import yaml
def quat_to_angle(quat):
rot = PyKDL.Rotation.Quaternion(quat.x, quat.y, quat.z, quat.w)
return rot.GetRPY()[2]
def normalize_angle(angle):
res = angle
while res > pi:
res -= 2.0*pi
while res < -pi:
res += 2.0*pi
return res
class CalibrateRobot:
def __init__(self):
self.lock = threading.Lock()
self.has_gyro = rospy.get_param("turtlebot_node/has_gyro")
rospy.loginfo('has_gyro %s'%self.has_gyro)
if self.has_gyro:
self.sub_imu = rospy.Subscriber('imu', Imu, self.imu_cb)
self.sub_odom = rospy.Subscriber('odom', Odometry, self.odom_cb)
self.sub_scan = rospy.Subscriber('scan_angle', ScanAngle, self.scan_cb)
self.cmd_pub = rospy.Publisher('cmd_vel', Twist)
self.imu_time = rospy.Time()
self.odom_time = rospy.Time()
self.scan_time = rospy.Time()
# params
self.inital_wall_angle = rospy.get_param("inital_wall_angle", 0.1)
self.imu_calibrate_time = rospy.get_param("imu_calibrate_time", 10.0)
self.imu_angle = 0
self.imu_time = rospy.Time.now()
self.scan_angle = 0
self.scan_time = rospy.Time.now()
self.odom_angle = 0
self.odom_time = rospy.Time.now()
def calibrate(self, speed, imu_drift=0):
# rotate 360 degrees
(imu_start_angle, odom_start_angle, scan_start_angle,
imu_start_time, odom_start_time, scan_start_time) = self.sync_timestamps()
last_angle = odom_start_angle
turn_angle = 0
while turn_angle < 2*pi:
if rospy.is_shutdown():
return
cmd = Twist()
cmd.angular.z = speed
self.cmd_pub.publish(cmd)
rospy.sleep(0.1)
with self.lock:
delta_angle = normalize_angle(self.odom_angle - last_angle)
turn_angle += delta_angle
last_angle = self.odom_angle
self.cmd_pub.publish(Twist())
(imu_end_angle, odom_end_angle, scan_end_angle,
imu_end_time, odom_end_time, scan_end_time) = self.sync_timestamps()
scan_delta = 2*pi + normalize_angle(scan_end_angle - scan_start_angle)
odom_delta = 2*pi + normalize_angle(odom_end_angle - odom_start_angle)
rospy.loginfo('Odom error: %f percent'%(100.0*((odom_delta/scan_delta)-1.0)))
if self.has_gyro:
imu_delta = 2*pi + normalize_angle(imu_end_angle - imu_start_angle) - imu_drift*(imu_end_time - imu_start_time).to_sec()
rospy.loginfo('Imu error: %f percent'%(100.0*((imu_delta/scan_delta)-1.0)))
imu_result = imu_delta/scan_delta
else:
imu_result = None
return (imu_result, odom_delta/scan_delta)
def imu_drift(self):
if not self.has_gyro:
return 0
# estimate imu drift
rospy.loginfo('Estimating imu drift')
(imu_start_angle, odom_start_angle, scan_start_angle,
imu_start_time, odom_start_time, scan_start_time) = self.sync_timestamps()
rospy.sleep(self.imu_calibrate_time)
(imu_end_angle, odom_end_angle, scan_end_angle,
imu_end_time, odom_end_time, scan_end_time) = self.sync_timestamps()
imu_drift = normalize_angle(imu_end_angle - imu_start_angle) / ((imu_end_time - imu_start_time).to_sec())
rospy.loginfo(' ... imu drift is %f degrees per second'%(imu_drift*180.0/pi))
return imu_drift
def align(self):
self.sync_timestamps()
rospy.loginfo("Aligning base with wall")
with self.lock:
angle = self.scan_angle
cmd = Twist()
while angle < -self.inital_wall_angle or angle > self.inital_wall_angle:
if rospy.is_shutdown():
exit(0)
if angle > 0:
cmd.angular.z = -0.3
else:
cmd.angular.z = 0.3
self.cmd_pub.publish(cmd)
rospy.sleep(0.05)
with self.lock:
angle = self.scan_angle
def sync_timestamps(self, start_time=None):
if not start_time:
start_time = rospy.Time.now() + rospy.Duration(0.5)
while not rospy.is_shutdown():
rospy.sleep(0.3)
with self.lock:
if self.imu_time < start_time and self.has_gyro:
rospy.loginfo("Still waiting for imu")
elif self.odom_time < start_time:
rospy.loginfo("Still waiting for odom")
elif self.scan_time < start_time:
rospy.loginfo("Still waiting for scan")
else:
return (self.imu_angle, self.odom_angle, self.scan_angle,
self.imu_time, self.odom_time, self.scan_time)
exit(0)
def imu_cb(self, msg):
with self.lock:
angle = quat_to_angle(msg.orientation)
self.imu_angle = angle
self.imu_time = msg.header.stamp
def odom_cb(self, msg):
with self.lock:
angle = quat_to_angle(msg.pose.pose.orientation)
self.odom_angle = angle
self.odom_time = msg.header.stamp
def scan_cb(self, msg):
with self.lock:
angle = msg.scan_angle
self.scan_angle = angle
self.scan_time = msg.header.stamp
def get_usb_to_serial_id():
usbpath = subprocess.check_output("readlink -f /sys/class/tty/ttyUSB0", shell=True)
usbpath = usbpath.strip()
if len(usbpath) == 0:
return None
serialid = ""
try:
f = open(usbpath + "/../../../../serial", "r")
serialid = f.read().strip()
f.close()
except:
pass
try:
f = open(usbpath + "/../../../../idVendor", "r")
serialid += f.read().strip()
f.close()
f = open(usbpath + "/../../../../idProduct", "r")
serialid += f.read().strip()
f.close()
except:
pass
if len(serialid.strip()) == 0:
return None
return serialid
def get_kinect_serial():
ret = subprocess.check_output("lsusb -v -d 045e:02ae | grep Serial | awk '{print $3}'", shell=True)
if len(ret) > 0:
return ret.strip()
return None
def getCurrentParams(drclient):
allparams = drclient.get_configuration()
return (allparams['gyro_scale_correction'], allparams['odom_angular_scale_correction'], allparams['gyro_measurement_range'])
def writeParams(drclient, newparams):
r = drclient.update_configuration(newparams)
rospy.loginfo("Automatically updated the params in the current running instance of ROS, no need to restart.")
def writeParamsToCalibrationFile(newparams):
kinect_serial = get_kinect_serial()
if kinect_serial is None:
kinect_serial = get_usb_to_serial_id() # can't find a kinect, attempt to use the usb to serial convert's id as a backup
if kinect_serial is None:
return
ros_home = os.environ.get('ROS_HOME')
if ros_home is None:
ros_home = "~/.ros"
calib_dir = os.path.expanduser(ros_home +"/turtlebot_create/")
calib_file = calib_dir +str(kinect_serial) + ".yaml"
# if the file exists, load into a dict, update the new params, and then save
if os.path.isfile(calib_file):
f = open(calib_file, 'r')
docs = yaml.load_all(f)
d = docs.next()
for k,v in newparams.iteritems():
d[k] = v
newparams = d
f.close()
try:
os.makedirs(calib_dir)
except:
pass
with open(calib_file, 'w') as outfile:
outfile.write( yaml.dump(newparams, default_flow_style=False) )
rospy.loginfo("Saved the params to the calibration file: %s" % calib_file)
def writeParamsToLaunchFile(gyro, odom, gyro_range):
try:
f = open("/etc/ros/distro/turtlebot.launch", "r")
# this is totally NOT the best way to solve this problem.
foo = []
for lines in f:
if "turtlebot_node/gyro_scale_correction" in lines:
foo.append(" <param name=\"turtlebot_node/gyro_scale_correction\" value=\"%f\"/>\n" % gyro)
elif "turtlebot_node/odom_angular_scale_correction" in lines:
foo.append(" <param name=\"turtlebot_node/odom_angular_scale_correction\" value=\"%f\"/>\n" % odom)
elif "turtlebot_node/gyro_measurement_range" in lines:
foo.append(" <param name=\"turtlebot_node/gyro_measurement_range\" value=\"%f\"/>\n" % gyro_range)
else:
foo.append(lines)
f.close()
# and... write!
f = open("/etc/ros/distro/turtlebot.launch", "w")
for i in foo:
f.write(i)
f.close()
rospy.loginfo("Automatically updated turtlebot.launch, please restart the turtlebot service.")
except:
rospy.loginfo("Could not automatically update turtlebot.launch, please manually update it.")
def warnAboutGyroRange(drclient):
params = getCurrentParams(drclient)
rospy.logwarn("***** If you have not manually set the gyro range parameter you must do so before running calibration. Cancel this run and see http://wiki.ros.org/turtlebot_calibration/Tutorials/Calibrate%20Odometry%20and%20Gyro")
rospy.logwarn("******* turtlebot_node/gyro_measurement_range is currently set to: %d ******" % params[2])
def main():
rospy.init_node('scan_to_angle')
robot = CalibrateRobot()
imu_res = 1.0
drclient = dynamic_reconfigure.client.Client("turtlebot_node")
warnAboutGyroRange(drclient)
imu_drift = robot.imu_drift()
imu_corr = []
odom_corr = []
for speed in (0.3, 0.7, 1.0, 1.5):
robot.align()
(imu, odom) = robot.calibrate(speed, imu_drift)
if imu:
imu_corr.append(imu)
odom_corr.append(odom)
(prev_gyro, prev_odom, gyro_range) = getCurrentParams(drclient)
if len(imu_corr)>0:
imu_res = prev_gyro * (1.0/(sum(imu_corr)/len(imu_corr)))
rospy.loginfo("Set the 'turtlebot_node/gyro_scale_correction' parameter to %f"%imu_res)
odom_res = prev_odom * (1.0/(sum(odom_corr)/len(odom_corr)))
rospy.loginfo("Set the 'turtlebot_node/odom_angular_scale_correction' parameter to %f"%odom_res)
writeParamsToLaunchFile(imu_res, odom_res, gyro_range)
newparams = {'gyro_scale_correction' : imu_res, 'odom_angular_scale_correction' : odom_res, 'gyro_measurement_range' : gyro_range}
writeParamsToCalibrationFile(newparams)
writeParams(drclient, newparams)
if __name__ == '__main__':
main()
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/turtlebot_apps/turtlebot_actions/scripts/test_move.py | turtlebot_apps/turtlebot_actions/scripts/test_move.py | #!/usr/bin/env python
import roslib
roslib.load_manifest('turtlebot_actions')
import rospy
import os
import sys
import time
import math
from turtlebot_actions.msg import *
from actionlib_msgs.msg import *
import actionlib
'''
Very simple move action test - commands the robot to turn 45 degrees and travel 0.5 metres forward.
'''
def main():
rospy.init_node("test_move_action_client")
# Construct action ac
rospy.loginfo("Starting action client...")
action_client = actionlib.SimpleActionClient('turtlebot_move', TurtlebotMoveAction)
action_client.wait_for_server()
rospy.loginfo("Action client connected to action server.")
# Call the action
rospy.loginfo("Calling the action server...")
action_goal = TurtlebotMoveGoal()
action_goal.turn_distance = -math.pi/4.0
action_goal.forward_distance = 0.5 # metres
if action_client.send_goal_and_wait(action_goal, rospy.Duration(50.0), rospy.Duration(50.0)) == GoalStatus.SUCCEEDED:
rospy.loginfo('Call to action server succeeded')
else:
rospy.logerr('Call to action server failed')
if __name__ == "__main__":
main()
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/turtlebot_apps/turtlebot_actions/scripts/test_fiducial.py | turtlebot_apps/turtlebot_actions/scripts/test_fiducial.py | #!/usr/bin/env python
import roslib
roslib.load_manifest('turtlebot_actions')
import rospy
import os
import sys
import time
from turtlebot_actions.msg import *
from actionlib_msgs.msg import *
import actionlib
def main():
rospy.init_node("find_fiducial_pose_test")
# Construct action ac
rospy.loginfo("Starting action client...")
action_client = actionlib.SimpleActionClient('find_fiducial_pose', FindFiducialAction)
action_client.wait_for_server()
rospy.loginfo("Action client connected to action server.")
# Call the action
rospy.loginfo("Calling the action server...")
action_goal = FindFiducialGoal()
action_goal.camera_name = "/camera/rgb"
action_goal.pattern_width = 7
action_goal.pattern_height = 6
action_goal.pattern_size = 0.027
action_goal.pattern_type = 1 # CHESSBOARD
if action_client.send_goal_and_wait(action_goal, rospy.Duration(50.0), rospy.Duration(50.0)) == GoalStatus.SUCCEEDED:
rospy.loginfo('Call to action server succeeded')
else:
rospy.logerr('Call to action server failed')
if __name__ == "__main__":
main()
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/turtlebot_apps/turtlebot_follower/scripts/switch.py | turtlebot_apps/turtlebot_follower/scripts/switch.py | #!/usr/bin/env python
# license removed for brevity
import rospy
from geometry_msgs.msg import Twist
from sensor_msgs.msg import Joy
# This script will listen for joystick button 5 being toggled and
# send zero speed messages to the mux to disable the follower until
# button 5 is pressed again.
class BehaviorSwitch(object):
def __init__(self):
self.running = False
def callback(self, joy_msg):
if joy_msg.buttons[5] == 1:
self.running = not self.running
rospy.loginfo(repr(joy_msg))
def run(self):
rospy.init_node('behavior_switch', anonymous=True)
pub = rospy.Publisher('cmd_vel_mux/input/switch', Twist, queue_size=10)
rospy.Subscriber('joy', Joy, self.callback)
rate = rospy.Rate(10)
while not rospy.is_shutdown():
if self.running:
empty_msg = Twist()
pub.publish(empty_msg)
rate.sleep()
if __name__ == '__main__':
try:
behavior_switch = BehaviorSwitch()
behavior_switch.run()
except rospy.ROSInterruptException:
pass
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/scripts/sarl_original_node.py | sarl_star_ros/scripts/sarl_original_node.py | #!/usr/bin/python2.7
# Author: Keyu Li <kyli@link.cuhk.edu.hk>
from __future__ import division
import logging
import os
import torch
import numpy as np
from nav_msgs.msg import Odometry, OccupancyGrid
import configparser
import gym
import tf
from crowd_nav.policy.policy_factory import policy_factory
from crowd_sim.envs.utils.state import ObservableState, FullState, JointState
import rospy
from geometry_msgs.msg import Point, Vector3, Twist, Pose, PoseStamped, PoseWithCovarianceStamped, TwistWithCovariance
from std_msgs.msg import Int32, ColorRGBA
from people_msgs.msg import Person, People
from visualization_msgs.msg import Marker, MarkerArray
HUMAN_RADIUS = 0.3
ROBOT_RADIUS = 0.3
ROBOT_V_PREF = 0.5
DISCOMFORT_DIST = 0.1
FAKE_HUMAN_PX = -1.7
FAKE_HUMAN_PY = 14.3
TIME_LIMIT = 120
GOAL_TOLERANCE = 0.5
def add(v1, v2):
return Vector3(v1.x + v2.x, v1.y + v2.y, v1.z + v2.z)
class Robot(object):
def __init__(self):
self.v_pref = ROBOT_V_PREF
self.radius = ROBOT_RADIUS
self.px = None
self.py = None
self.gx = None
self.gy = None
self.vx = None
self.vy = None
self.theta = None
def set(self, px, py, gx, gy, vx, vy, theta):
self.px = px
self.py = py
self.gx = gx
self.gy = gy
self.vx = vx
self.vy = vy
self.theta = theta
def get_full_state(self):
return FullState(self.px, self.py, self.vx, self.vy, self.radius, self.gx, self.gy, self.v_pref, self.theta)
def get_position(self):
return self.px, self.py
def get_goal_position(self):
return self.gx, self.gy
def reached_destination(self):
return np.linalg.norm(np.array(self.get_position()) - np.array(self.get_goal_position())) < GOAL_TOLERANCE
# || (position - goal position) ||
class Human(object):
def __init__(self, px, py, vx, vy):
self.radius = HUMAN_RADIUS
self.px = px
self.py = py
self.vx = vx
self.vy = vy
def get_observable_state(self):
return ObservableState(self.px, self.py, self.vx, self.vy, self.radius)
class RobotAction(object):
def __init__(self):
self.Is_goal_received = False
self.IsAMCLReceived = False
self.IsObReceived = False
self.Is_gc_Received = False
self.getStartPoint = False
self.Is_goal_reached = False
self.received_gx = None
self.received_gy = None
self.px = None
self.py = None
self.vx = None
self.vy = None
self.gx = None
self.gy = None
self.v_pref = None
self.theta = None
self.humans = None
self.ob = None
self.state = None
self.cmd_vel = Twist()
self.plan_counter = 0
self.num_pos = 0
self.start_px = None
self.start_py = None
# subscribers
self.robot_pose_sub = rospy.Subscriber('/amcl_pose', PoseWithCovarianceStamped, self.update_robot_pos)
self.robot_odom_sub = rospy.Subscriber('/odom', Odometry, self.robot_vel_on_map_calculator)
self.people_sub = rospy.Subscriber('/people', People, self.update_humans)
self.goal_sub = rospy.Subscriber('/move_base_simple/goal', PoseStamped, self.get_goal_on_map)
self.global_costmap_sub = rospy.Subscriber('/move_base/global_costmap/costmap', OccupancyGrid, self.get_gc)
# publishers
self.cmd_vel_pub = rospy.Publisher('/cmd_vel_mux/input/teleop', Twist, queue_size=1)
self.goal_marker_pub = rospy.Publisher('/goal_marker', Marker, queue_size=1)
self.action_marker_pub = rospy.Publisher('/action_marker', Marker, queue_size=1)
self.trajectory_marker_pub = rospy.Publisher('/trajectory_marker', Marker, queue_size=1)
self.vehicle_marker_pub = rospy.Publisher('/vehicle_marker', Marker, queue_size=1)
def update_robot_pos(self, msg):
self.IsAMCLReceived = True
self.num_pos += 1
position = msg.pose.pose.position
orientation = msg.pose.pose.orientation
self.px = msg.pose.pose.position.x
self.py = msg.pose.pose.position.y
q = msg.pose.pose.orientation
self.theta = np.arctan2(2.0*(q.w*q.z + q.x*q.y), 1-2*(q.y*q.y+q.z*q.z)) # bounded by [-pi, pi]
if not self.getStartPoint:
rospy.loginfo("Start point is:(%s,%s)" % (self.px,self.py))
self.getStartPoint = True
self.visualize_trajectory(position, orientation)
def robot_vel_on_map_calculator(self, msg):
vel_linear = msg.twist.twist.linear
listener_v.waitForTransform('/map', '/base_footprint', rospy.Time(), rospy.Duration(4))
trans, rot = listener_v.lookupTransform('/map', '/base_footprint', rospy.Time())
# rotate vector 'vel_linear' by quaternion 'rot'
q1 = rot
q2 = list()
q2.append(vel_linear.x)
q2.append(vel_linear.y)
q2.append(vel_linear.z)
q2.append(0.0)
output_vel = tf.transformations.quaternion_multiply(
tf.transformations.quaternion_multiply(q1, q2),
tf.transformations.quaternion_conjugate(q1)
)[:3]
self.vx = output_vel[0]
self.vy = output_vel[1]
def update_humans(self, msg):
# observable state: px,py,vx,vy,radius
self.IsObReceived = True
self.humans = list()
self.ob = list()
for p in msg.people:
# dist = np.linalg.norm(np.array([self.px,self.py])-np.array([p.position.x,p.position.y]))
human = Human(p.position.x, p.position.y, p.velocity.x, p.velocity.y)
self.humans.append(human)
for human in self.humans:
self.ob.append(human.get_observable_state())
def get_goal_on_map(self, msg):
self.Is_goal_received = True
self.received_gx = msg.pose.position.x
self.received_gy = msg.pose.position.y
def get_gc(self, msg):
if not self.Is_gc_Received:
policy.gc = msg.data
policy.gc_resolution = msg.info.resolution
policy.gc_width = msg.info.width
policy.gc_ox = msg.info.origin.position.x
policy.gc_oy = msg.info.origin.position.y
# print(policy.gc_resolution, policy.gc_width, policy.gc_ox, policy.gc_oy)
print("************ Global costmap is received. **************")
self.Is_gc_Received = True
def visualize_goal(self):
# red cube for goal
marker = Marker()
marker.header.frame_id = "/map"
marker.header.stamp = rospy.Time.now()
marker.ns = "goal"
marker.id = 0
marker.type = marker.CUBE
marker.action = marker.ADD
marker.pose.position.x = self.gx
marker.pose.position.y = self.gy
marker.pose.position.z = 0.2
marker.scale = Vector3(x=0.1, y=0.1, z=0.1)
marker.color = ColorRGBA(r=1.0, a=1.0)
marker.lifetime = rospy.Duration()
self.goal_marker_pub.publish(marker)
def visualize_trajectory(self, position, orientation):
# Purple track for robot trajectory over time
marker = Marker()
marker.header.stamp = rospy.Time.now()
marker.header.frame_id = '/map'
marker.ns = 'robot'
marker.id = self.num_pos
marker.type = marker.CYLINDER
marker.action = marker.ADD
marker.pose.position = position
marker.pose.orientation = orientation
marker.scale = Vector3(x=0.1, y=0.1, z=0.1)
marker.color = ColorRGBA(r=0.5, b=0.8, a=1.0)
marker.lifetime = rospy.Duration()
self.trajectory_marker_pub.publish(marker)
def visualize_action(self):
robot_pos = Point(x=self.px, y=self.py, z=0)
next_theta = self.theta + self.cmd_vel.angular.z
next_vx = self.cmd_vel.linear.x * np.cos(next_theta)
next_vy = self.cmd_vel.linear.x * np.sin(next_theta)
action = Vector3(x=next_vx, y=next_vy, z=0)
# green arrow for action (command velocity)
marker = Marker()
marker.header.stamp = rospy.Time.now()
marker.header.frame_id = "/map"
marker.ns = "action"
marker.id = 0
marker.type = marker.ARROW
marker.action = marker.ADD
marker.points = [robot_pos, add(robot_pos, action)]
marker.scale = Vector3(x=0.1, y=0.3, z=0)
marker.color = ColorRGBA(g=1.0, a=1.0)
marker.lifetime = rospy.Duration(0.5)
self.action_marker_pub.publish(marker)
def planner(self):
# update robot
robot.set(self.px, self.py, self.gx, self.gy, self.vx, self.vy, self.theta)
dist_to_goal = np.linalg.norm(np.array(robot.get_position()) - np.array(robot.get_goal_position()))
# compute command velocity
if robot.reached_destination():
self.cmd_vel.linear.x = 0
self.cmd_vel.linear.y = 0
self.cmd_vel.linear.z = 0
self.cmd_vel.angular.x = 0
self.cmd_vel.angular.y = 0
self.cmd_vel.angular.z = 0
self.Is_goal_reached = True
else:
"""
self state: FullState(px, py, vx, vy, radius, gx, gy, v_pref, theta)
ob:[ObservableState(px1, py1, vx1, vy1, radius1),
ObservableState(px1, py1, vx1, vy1, radius1),
.......
ObservableState(pxn, pyn, vxn, vyn, radiusn)]
"""
if len(self.ob)==0:
self.ob = [ObservableState(FAKE_HUMAN_PX, FAKE_HUMAN_PY, 0, 0, HUMAN_RADIUS)]
self.state = JointState(robot.get_full_state(), self.ob)
action = policy.predict(self.state) # max_action
self.cmd_vel.linear.x = action.v
self.cmd_vel.linear.y = 0
self.cmd_vel.linear.z = 0
self.cmd_vel.angular.x = 0
self.cmd_vel.angular.y = 0
self.cmd_vel.angular.z = action.r
########### for debug ##########
# dist_to_goal = np.linalg.norm(np.array(robot.get_position()) - np.array(robot.get_goal_position()))
# if self.plan_counter % 10 == 0:
# rospy.loginfo("robot position:(%s,%s)" % (self.px, self.py))
# rospy.loginfo("Distance to goal is %s" % dist_to_goal)
# rospy.loginfo("self state:\n %s" % self.state.self_state)
# for i in range(len(self.state.human_states)):
# rospy.loginfo("human %s :\n %s" % (i+1, self.state.human_states[i]))
# rospy.loginfo("%s-th action is planned: \n v: %s m/s \n r: %s rad/s"
# % (self.plan_counter, self.cmd_vel.linear.x, self.cmd_vel.angular.z))
# publish velocity
self.cmd_vel_pub.publish(self.cmd_vel)
self.plan_counter += 1
self.visualize_action()
if __name__ == '__main__':
begin_travel = False
# set file dirs
model_dir = '/home/likeyu/sarl_ws/src/sarl_star/CrowdNav/crowd_nav/data/output/'
env_config_file = '/home/likeyu/sarl_ws/src/sarl_star/CrowdNav/crowd_nav/data/output/env.config'
policy_config_file = '/home/likeyu/sarl_ws/src/sarl_star/CrowdNav/crowd_nav/data/output/policy.config'
if os.path.exists(os.path.join(model_dir, 'resumed_rl_model.pth')):
model_weights = os.path.join(model_dir, 'resumed_rl_model.pth')
else:
model_weights = os.path.join(model_dir, 'rl_model.pth')
# configure logging and device
logging.basicConfig(level=logging.INFO, format='%(asctime)s, x%(levelname)s: %(message)s',
datefmt="%Y-%m-%d %H:%M:%S")
device = torch.device("cpu")
logging.info('Using device: %s', device)
# configure RL policy
policy = 'sarl'
phase = 'test'
env_config = configparser.RawConfigParser()
env_config.read(env_config_file)
env = gym.make('CrowdSim-v0')
env.configure(env_config)
env.discomfort_dist = DISCOMFORT_DIST
policy = policy_factory[policy]()
policy_config = configparser.RawConfigParser()
policy_config.read(policy_config_file)
policy.configure(policy_config)
policy.with_costmap = True
# use constant velocity model to predict next state
policy.query_env = False
policy.get_model().load_state_dict(torch.load(model_weights))
policy.set_phase(phase)
policy.set_device(device)
policy.set_env(env)
policy.time_step = 0.25
policy.gc = []
robot = Robot()
try:
rospy.init_node('sarl_original_node', anonymous=True)
rate = rospy.Rate(4) # 4 Hz, time_step=0.25
robot_act = RobotAction()
listener_v = tf.TransformListener()
while not rospy.is_shutdown():
if robot_act.Is_goal_reached:
finish_travel_time = rospy.get_time()
t = finish_travel_time - begin_travel_time
rospy.loginfo("Goal is reached. Travel time: %s s." % t)
break
# wait for msgs of goal, AMCL and ob
if robot_act.Is_goal_received and robot_act.IsAMCLReceived and robot_act.IsObReceived:
# travel time
if not begin_travel:
begin_travel_time = rospy.get_time()
begin_travel = True
# update goal (gx,gy)
robot_act.gx = robot_act.received_gx
robot_act.gy = robot_act.received_gy
robot_act.visualize_goal()
robot_act.planner()
finish_travel_time = rospy.get_time()
t = finish_travel_time - begin_travel_time
if t > TIME_LIMIT:
rospy.loginfo("Timeout. Travel time: %s s." % t)
break
rate.sleep()
except rospy.ROSInterruptException, e:
raise e
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/scripts/__init__.py | sarl_star_ros/scripts/__init__.py | python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false | |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/scripts/sarl_star_node.py | sarl_star_ros/scripts/sarl_star_node.py | #!/usr/bin/python2.7
# Author: Keyu Li <kyli@link.cuhk.edu.hk>
from __future__ import division
import logging
import os
import torch
import numpy as np
from nav_msgs.msg import Odometry, OccupancyGrid
import configparser
import gym
import tf
from crowd_nav.policy.policy_factory import policy_factory
from crowd_sim.envs.utils.state import ObservableState, FullState, JointState
import rospy
from geometry_msgs.msg import Point, Vector3, Twist, Pose, PoseStamped, PoseWithCovarianceStamped, TwistWithCovariance
from std_msgs.msg import Int32, ColorRGBA
from people_msgs.msg import Person, People
from visualization_msgs.msg import Marker, MarkerArray
HUMAN_RADIUS = 0.3
ROBOT_RADIUS = 0.3
ROBOT_V_PREF = 0.5
DISCOMFORT_DIST = 0.5
FAKE_HUMAN_PX = -1.7
FAKE_HUMAN_PY = 14.3
TIME_LIMIT = 120
GOAL_TOLERANCE = 0.6
def add(v1, v2):
return Vector3(v1.x + v2.x, v1.y + v2.y, v1.z + v2.z)
class Robot(object):
def __init__(self):
self.v_pref = ROBOT_V_PREF
self.radius = ROBOT_RADIUS
self.px = None
self.py = None
self.gx = None
self.gy = None
self.vx = None
self.vy = None
self.theta = None
def set(self, px, py, gx, gy, vx, vy, theta):
self.px = px
self.py = py
self.gx = gx
self.gy = gy
self.vx = vx
self.vy = vy
self.theta = theta
def get_full_state(self):
return FullState(self.px, self.py, self.vx, self.vy, self.radius, self.gx, self.gy, self.v_pref, self.theta)
def get_position(self):
return self.px, self.py
def get_goal_position(self):
return self.gx, self.gy
def reached_destination(self):
return np.linalg.norm(np.array(self.get_position()) - np.array(self.get_goal_position())) < GOAL_TOLERANCE
# || (position - goal position) ||
class Human(object):
def __init__(self, px, py, vx, vy):
self.radius = HUMAN_RADIUS
self.px = px
self.py = py
self.vx = vx
self.vy = vy
def get_observable_state(self):
return ObservableState(self.px, self.py, self.vx, self.vy, self.radius)
class RobotAction(object):
def __init__(self):
self.Is_lg_Received = False
self.IsAMCLReceived = False
self.IsObReceived = False
self.Is_gc_Received = False
self.getStartPoint = False
self.Is_lg_Reached = False
self.Is_gg_Reached = False
self.received_gx = None
self.received_gy = None
self.px = None
self.py = None
self.vx = None
self.vy = None
self.gx = None
self.gy = None
self.v_pref = None
self.theta = None
self.humans = None
self.ob = None
self.state = None
self.cmd_vel = Twist()
self.plan_counter = 0
self.num_pos = 0
self.num_lg = 0
self.start_px = None
self.start_py = None
# ROS subscribers
self.robot_pose_sub = rospy.Subscriber('/amcl_pose', PoseWithCovarianceStamped, self.update_robot_pos)
self.robot_odom_sub = rospy.Subscriber('/odom', Odometry, self.robot_vel_on_map_calculator)
self.people_sub = rospy.Subscriber('/people', People, self.update_humans)
self.goal_sub = rospy.Subscriber('/local_goal', PoseStamped, self.get_goal_on_map)
self.global_costmap_sub = rospy.Subscriber('/move_base/global_costmap/costmap', OccupancyGrid, self.get_gc)
# ROS publishers
self.cmd_vel_pub = rospy.Publisher('/cmd_vel_mux/input/teleop', Twist, queue_size=1)
self.goal_marker_pub = rospy.Publisher('/goal_marker', Marker, queue_size=1)
self.action_marker_pub = rospy.Publisher('/action_marker', Marker, queue_size=1)
self.trajectory_marker_pub = rospy.Publisher('/trajectory_marker', Marker, queue_size=1)
self.vehicle_marker_pub = rospy.Publisher('/vehicle_marker', Marker, queue_size=1)
def update_robot_pos(self, msg):
self.IsAMCLReceived = True
self.num_pos += 1
position = msg.pose.pose.position
orientation = msg.pose.pose.orientation
self.px = msg.pose.pose.position.x
self.py = msg.pose.pose.position.y
q = msg.pose.pose.orientation
self.theta = np.arctan2(2.0*(q.w*q.z + q.x*q.y), 1-2*(q.y*q.y+q.z*q.z)) # bounded by [-pi, pi]
if not self.getStartPoint:
rospy.loginfo("Start point is:(%s,%s)" % (self.px,self.py))
self.getStartPoint = True
self.visualize_trajectory(position, orientation)
def robot_vel_on_map_calculator(self, msg):
vel_linear = msg.twist.twist.linear
listener_v.waitForTransform('/map', '/base_footprint', rospy.Time(0), rospy.Duration(10))
trans, rot = listener_v.lookupTransform('/map', '/base_footprint', rospy.Time(0))
# rotate vector 'vel_linear' by quaternion 'rot'
q1 = rot
q2 = list()
q2.append(vel_linear.x)
q2.append(vel_linear.y)
q2.append(vel_linear.z)
q2.append(0.0)
output_vel = tf.transformations.quaternion_multiply(
tf.transformations.quaternion_multiply(q1, q2),
tf.transformations.quaternion_conjugate(q1)
)[:3]
self.vx = output_vel[0]
self.vy = output_vel[1]
def update_humans(self, msg):
# observable state: px,py,vx,vy,radius
self.IsObReceived = True
self.humans = list()
self.ob = list()
for p in msg.people:
# dist = np.linalg.norm(np.array([self.px,self.py])-np.array([p.position.x,p.position.y]))
human = Human(p.position.x, p.position.y, p.velocity.x, p.velocity.y)
self.humans.append(human)
for human in self.humans:
self.ob.append(human.get_observable_state())
def get_goal_on_map(self, msg):
self.Is_lg_Received = True
listener_g.waitForTransform('/map', '/odom', rospy.Time(0), rospy.Duration(10))
tfmsg = listener_g.transformPose("/map", msg)
self.received_gx = tfmsg.pose.position.x
self.received_gy = tfmsg.pose.position.y
def get_gc(self, msg):
if not self.Is_gc_Received:
policy.gc = msg.data
policy.gc_resolution = msg.info.resolution
policy.gc_width = msg.info.width
policy.gc_ox = msg.info.origin.position.x
policy.gc_oy = msg.info.origin.position.y
# print(policy.gc_resolution, policy.gc_width, policy.gc_ox, policy.gc_oy)
print("************ Global costmap is received. **************")
self.Is_gc_Received = True
def visualize_goal(self):
# red cube for local goals
marker = Marker()
marker.header.frame_id = "/map"
marker.header.stamp = rospy.Time.now()
marker.ns = "goal"
marker.id = self.num_lg
marker.type = marker.CUBE
marker.action = marker.ADD
marker.pose.position.x = self.gx
marker.pose.position.y = self.gy
marker.pose.position.z = 0.2
marker.scale = Vector3(x=0.1, y=0.1, z=0.1)
marker.color = ColorRGBA(r=1.0, a=1.0)
marker.lifetime = rospy.Duration()
self.goal_marker_pub.publish(marker)
def visualize_trajectory(self, position, orientation):
# Purple track for robot trajectory over time
marker = Marker()
marker.header.stamp = rospy.Time.now()
marker.header.frame_id = '/map'
marker.ns = 'robot'
marker.id = self.num_pos
marker.type = marker.CYLINDER
marker.action = marker.ADD
marker.pose.position = position
marker.pose.orientation = orientation
marker.scale = Vector3(x=0.1, y=0.1, z=0.1)
marker.color = ColorRGBA(r=0.5, b=0.8, a=1.0)
marker.lifetime = rospy.Duration()
self.trajectory_marker_pub.publish(marker)
def visualize_action(self):
robot_pos = Point(x=self.px, y=self.py, z=0)
next_theta = self.theta + self.cmd_vel.angular.z
next_vx = self.cmd_vel.linear.x * np.cos(next_theta)
next_vy = self.cmd_vel.linear.x * np.sin(next_theta)
action = Vector3(x=next_vx, y=next_vy, z=0)
# green arrow for action (command velocity)
marker = Marker()
marker.header.stamp = rospy.Time.now()
marker.header.frame_id = "/map"
marker.ns = "action"
marker.id = 0
marker.type = marker.ARROW
marker.action = marker.ADD
marker.points = [robot_pos, add(robot_pos, action)]
marker.scale = Vector3(x=0.1, y=0.3, z=0)
marker.color = ColorRGBA(g=1.0, a=1.0)
marker.lifetime = rospy.Duration(0.5)
self.action_marker_pub.publish(marker)
def planner(self):
# update robot
robot.set(self.px, self.py, self.gx, self.gy, self.vx, self.vy, self.theta)
# compute command velocity
if robot.reached_destination():
self.cmd_vel.linear.x = 0
self.cmd_vel.linear.y = 0
self.cmd_vel.linear.z = 0
self.cmd_vel.angular.x = 0
self.cmd_vel.angular.y = 0
self.cmd_vel.angular.z = 0
self.Is_lg_Reached = True
if self.gx == self.received_gx and self.gy == self.received_gy:
self.Is_gg_Reached = True
else:
"""
self state: FullState(px, py, vx, vy, radius, gx, gy, v_pref, theta)
ob:[ObservableState(px1, py1, vx1, vy1, radius1),
ObservableState(px1, py1, vx1, vy1, radius1),
.......
ObservableState(pxn, pyn, vxn, vyn, radiusn)]
"""
if len(self.ob)==0:
self.ob = [ObservableState(FAKE_HUMAN_PX, FAKE_HUMAN_PY, 0, 0, HUMAN_RADIUS)]
self.state = JointState(robot.get_full_state(), self.ob)
action = policy.predict(self.state) # max_action
self.cmd_vel.linear.x = action.v
self.cmd_vel.linear.y = 0
self.cmd_vel.linear.z = 0
self.cmd_vel.angular.x = 0
self.cmd_vel.angular.y = 0
self.cmd_vel.angular.z = action.r
########### for debug ##########
# dist_to_goal = np.linalg.norm(np.array(robot.get_position()) - np.array(robot.get_goal_position()))
# if self.plan_counter % 10 == 0:
# rospy.loginfo("robot position:(%s,%s)" % (self.px, self.py))
# rospy.loginfo("Distance to goal is %s" % dist_to_goal)
# rospy.loginfo("self state:\n %s" % self.state.self_state)
# for i in range(len(self.state.human_states)):
# rospy.loginfo("human %s :\n %s" % (i+1, self.state.human_states[i]))
# rospy.loginfo("%s-th action is planned: \n v: %s m/s \n r: %s rad/s"
# % (self.plan_counter, self.cmd_vel.linear.x, self.cmd_vel.angular.z))
# publish command velocity
self.cmd_vel_pub.publish(self.cmd_vel)
self.plan_counter += 1
self.visualize_action()
if __name__ == '__main__':
begin_travel = False
# set file dirs
model_dir = '/sarl_star_ros/CrowdNav/crowd_nav/data/output/'
env_config_file = '/sarl_star_ros/CrowdNav/crowd_nav/data/output/env.config'
policy_config_file = '/sarl_star_ros/CrowdNav/crowd_nav/data/output/policy.config'
if os.path.exists(os.path.join(model_dir, 'resumed_rl_model.pth')):
model_weights = os.path.join(model_dir, 'resumed_rl_model.pth')
else:
model_weights = os.path.join(model_dir, 'rl_model.pth')
# configure logging and device
logging.basicConfig(level=logging.INFO, format='%(asctime)s, x%(levelname)s: %(message)s',
datefmt="%Y-%m-%d %H:%M:%S")
device = torch.device("cpu")
logging.info('Using device: %s', device)
# configure RL policy
policy = 'sarl'
phase = 'test'
env_config = configparser.RawConfigParser()
env_config.read(env_config_file)
env = gym.make('CrowdSim-v0')
env.configure(env_config)
env.discomfort_dist = DISCOMFORT_DIST
policy = policy_factory[policy]()
policy_config = configparser.RawConfigParser()
policy_config.read(policy_config_file)
policy.configure(policy_config)
policy.with_costmap = True
# use constant velocity model to predict next state
policy.query_env = False
policy.get_model().load_state_dict(torch.load(model_weights))
policy.set_phase(phase)
policy.set_device(device)
policy.set_env(env)
policy.time_step = 0.25
policy.gc = []
robot = Robot()
try:
rospy.init_node('sarl_star_node', anonymous=True)
rate = rospy.Rate(4) # 4Hz, time_step=0.25
robot_act = RobotAction()
listener_v = tf.TransformListener()
listener_g = tf.TransformListener()
while not rospy.is_shutdown():
if robot_act.Is_gg_Reached:
finish_travel_time = rospy.get_time()
t = finish_travel_time - begin_travel_time
rospy.loginfo("Goal is reached. Travel time: %s s." % t)
break
# wait for msgs of goal, AMCL and ob
if robot_act.Is_lg_Received and robot_act.IsAMCLReceived and robot_act.IsObReceived:
# travel time
if not begin_travel:
begin_travel_time = rospy.get_time()
begin_travel = True
# update local goal (gx,gy)
robot_act.gx = robot_act.received_gx
robot_act.gy = robot_act.received_gy
robot_act.num_lg += 1
robot_act.visualize_goal()
robot_act.planner()
finish_travel_time = rospy.get_time()
t = finish_travel_time - begin_travel_time
if t > TIME_LIMIT:
rospy.loginfo("Timeout. Travel time: %s s." % t)
break
rate.sleep()
except rospy.ROSInterruptException, e:
raise e
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/setup.py | sarl_star_ros/CrowdNav/setup.py | from setuptools import setup
setup(
name='crowdnav',
version='0.0.1',
packages=[
'crowd_nav',
'crowd_nav.configs',
'crowd_nav.policy',
'crowd_nav.utils',
'crowd_sim',
'crowd_sim.envs',
'crowd_sim.envs.policy',
'crowd_sim.envs.utils',
],
install_requires=[
'gitpython',
'gym',
'matplotlib',
'numpy',
'scipy',
'torch',
'torchvision',
],
extras_require={
'test': [
'pylint',
'pytest',
],
},
)
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/build/lib.linux-x86_64-2.7/crowd_sim/__init__.py | sarl_star_ros/CrowdNav/build/lib.linux-x86_64-2.7/crowd_sim/__init__.py | from gym.envs.registration import register
register(
id='CrowdSim-v0',
entry_point='crowd_sim.envs:CrowdSim',
)
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/build/lib.linux-x86_64-2.7/crowd_sim/envs/__init__.py | sarl_star_ros/CrowdNav/build/lib.linux-x86_64-2.7/crowd_sim/envs/__init__.py | from .crowd_sim import CrowdSim
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/build/lib.linux-x86_64-2.7/crowd_sim/envs/crowd_sim.py | sarl_star_ros/CrowdNav/build/lib.linux-x86_64-2.7/crowd_sim/envs/crowd_sim.py | import logging
import gym
import matplotlib.lines as mlines
import numpy as np
import rvo2
import torch
from matplotlib import patches
from numpy.linalg import norm
from crowd_sim.envs.utils.human import Human
from crowd_sim.envs.utils.info import *
from crowd_sim.envs.utils.utils import point_to_segment_dist
class CrowdSim(gym.Env):
metadata = {'render.modes': ['human']}
def __init__(self):
"""
Movement simulation for n+1 agents
Agent can either be human or robot.
humans are controlled by a unknown and fixed policy.
robot is controlled by a known and learnable policy.
"""
self.time_limit = None
self.time_step = None
self.robot = None
self.humans = None
self.global_time = None
self.human_times = None
# reward function
self.success_reward = None
self.collision_penalty = None
self.discomfort_dist = None
self.discomfort_penalty_factor = None
# simulation configuration
self.config = None
self.case_capacity = None
self.case_size = None
self.case_counter = None
self.randomize_attributes = None
self.train_val_sim = None
self.test_sim = None
self.square_width = None
self.circle_radius = None
self.human_num = None
# for visualization
self.states = None
self.action_values = None
self.attention_weights = None
def set_human_num(self, human_num):
self.human_num = human_num
def set_humans(self, humans):
self.humans = humans
def configure(self, config):
self.config = config
self.time_limit = config.getint('env', 'time_limit')
self.time_step = config.getfloat('env', 'time_step')
self.randomize_attributes = config.getboolean('env', 'randomize_attributes')
self.success_reward = config.getfloat('reward', 'success_reward')
self.collision_penalty = config.getfloat('reward', 'collision_penalty')
self.discomfort_dist = config.getfloat('reward', 'discomfort_dist')
self.discomfort_penalty_factor = config.getfloat('reward', 'discomfort_penalty_factor')
if self.config.get('humans', 'policy') == 'orca':
self.case_capacity = {'train': np.iinfo(np.uint32).max - 2000, 'val': 1000, 'test': 1000}
self.case_size = {'train': np.iinfo(np.uint32).max - 2000, 'val': config.getint('env', 'val_size'),
'test': config.getint('env', 'test_size')}
self.train_val_sim = config.get('sim', 'train_val_sim')
self.test_sim = config.get('sim', 'test_sim')
self.square_width = config.getfloat('sim', 'square_width')
self.circle_radius = config.getfloat('sim', 'circle_radius')
self.human_num = config.getint('sim', 'human_num')
else:
raise NotImplementedError
self.case_counter = {'train': 0, 'test': 0, 'val': 0}
logging.info('human number: {}'.format(self.human_num))
if self.randomize_attributes:
logging.info("Randomize human's radius and preferred speed")
else:
logging.info("Not randomize human's radius and preferred speed")
logging.info('Training simulation: {}, test simulation: {}'.format(self.train_val_sim, self.test_sim))
logging.info('Square width: {}, circle width: {}'.format(self.square_width, self.circle_radius))
def set_robot(self, robot):
self.robot = robot
def generate_random_human_position(self, human_num, rule):
"""
Generate human position according to certain rule
Rule square_crossing: generate start/goal position at two sides of y-axis
Rule circle_crossing: generate start position on a circle, goal position is at the opposite side
:param human_num:
:param rule:
:return:
"""
# initial min separation distance to avoid danger penalty at beginning
if rule == 'square_crossing':
self.humans = []
for i in range(human_num):
self.humans.append(self.generate_square_crossing_human())
elif rule == 'circle_crossing':
self.humans = []
for i in range(human_num):
self.humans.append(self.generate_circle_crossing_human())
elif rule == 'mixed':
# mix different raining simulation with certain distribution
static_human_num = {0: 0.05, 1: 0.2, 2: 0.2, 3: 0.3, 4: 0.1, 5: 0.15}
dynamic_human_num = {1: 0.3, 2: 0.3, 3: 0.2, 4: 0.1, 5: 0.1}
static = True if np.random.random() < 0.2 else False
prob = np.random.random()
for key, value in sorted(static_human_num.items() if static else dynamic_human_num.items()):
if prob - value <= 0:
human_num = key
break
else:
prob -= value
self.human_num = human_num
self.humans = []
if static:
# randomly initialize static objects in a square of (width, height)
width = 4
height = 8
if human_num == 0:
human = Human(self.config, 'humans')
human.set(0, -10, 0, -10, 0, 0, 0)
self.humans.append(human)
for i in range(human_num):
human = Human(self.config, 'humans')
if np.random.random() > 0.5:
sign = -1
else:
sign = 1
while True:
px = np.random.random() * width * 0.5 * sign
py = (np.random.random() - 0.5) * height
collide = False
for agent in [self.robot] + self.humans:
if norm((px - agent.px, py - agent.py)) < human.radius + agent.radius + self.discomfort_dist:
collide = True
break
if not collide:
break
human.set(px, py, px, py, 0, 0, 0)
self.humans.append(human)
else:
# the first 2 two humans will be in the circle crossing scenarios
# the rest humans will have a random starting and end position
for i in range(human_num):
if i < 2:
human = self.generate_circle_crossing_human()
else:
human = self.generate_square_crossing_human()
self.humans.append(human)
else:
raise ValueError("Rule doesn't exist")
def generate_circle_crossing_human(self):
human = Human(self.config, 'humans')
if self.randomize_attributes:
human.sample_random_attributes()
while True:
angle = np.random.random() * np.pi * 2
# add some noise to simulate all the possible cases robot could meet with human
px_noise = (np.random.random() - 0.5) * human.v_pref
py_noise = (np.random.random() - 0.5) * human.v_pref
px = self.circle_radius * np.cos(angle) + px_noise
py = self.circle_radius * np.sin(angle) + py_noise
collide = False
for agent in [self.robot] + self.humans:
min_dist = human.radius + agent.radius + self.discomfort_dist
if norm((px - agent.px, py - agent.py)) < min_dist or \
norm((px - agent.gx, py - agent.gy)) < min_dist:
collide = True
break # jump out of 'for' loop
if not collide:
break # jump out of 'while' loop
human.set(px, py, -px, -py, 0, 0, 0)
return human
def generate_square_crossing_human(self):
human = Human(self.config, 'humans')
if self.randomize_attributes:
human.sample_random_attributes()
if np.random.random() > 0.5:
sign = -1
else:
sign = 1
while True:
px = np.random.random() * self.square_width * 0.5 * sign
py = (np.random.random() - 0.5) * self.square_width
collide = False
for agent in [self.robot] + self.humans:
if norm((px - agent.px, py - agent.py)) < human.radius + agent.radius + self.discomfort_dist:
collide = True
break # jump out of 'for' loop
if not collide:
break # jump out of 'while' loop
while True:
gx = np.random.random() * self.square_width * 0.5 * -sign
gy = (np.random.random() - 0.5) * self.square_width
collide = False
for agent in [self.robot] + self.humans:
if norm((gx - agent.gx, gy - agent.gy)) < human.radius + agent.radius + self.discomfort_dist:
collide = True
break
if not collide:
break
human.set(px, py, gx, gy, 0, 0, 0)
return human
def get_human_times(self):
"""
Run the whole simulation to the end and compute the average time for human to reach goal.
Once an agent reaches the goal, it stops moving and becomes an obstacle
(doesn't need to take half responsibility to avoid collision).
:return:
"""
# centralized orca simulator for all humans
if not self.robot.reached_destination():
raise ValueError('Episode is not done yet')
params = (10, 10, 5, 5)
sim = rvo2.PyRVOSimulator(self.time_step, params[0],params[1],params[2],params[3], 0.3, 1)
sim.addAgent(self.robot.get_position(), params[0],params[1],params[2],params[3], self.robot.radius, self.robot.v_pref,
self.robot.get_velocity())
for human in self.humans:
sim.addAgent(human.get_position(), params[0],params[1],params[2],params[3], human.radius, human.v_pref, human.get_velocity())
max_time = 1000
while not all(self.human_times):
for i, agent in enumerate([self.robot] + self.humans):
vel_pref = np.array(agent.get_goal_position()) - np.array(agent.get_position())
if norm(vel_pref) > 1:
vel_pref /= norm(vel_pref)
sim.setAgentPrefVelocity(i, tuple(vel_pref))
sim.doStep()
self.global_time += self.time_step
if self.global_time > max_time:
logging.warning('Simulation cannot terminate!')
for i, human in enumerate(self.humans):
if self.human_times[i] == 0 and human.reached_destination():
self.human_times[i] = self.global_time
# for visualization
self.robot.set_position(sim.getAgentPosition(0))
for i, human in enumerate(self.humans):
human.set_position(sim.getAgentPosition(i + 1))
self.states.append([self.robot.get_full_state(), [human.get_full_state() for human in self.humans]])
del sim
return self.human_times
def reset(self, phase='test', test_case=None):
"""
Set px, py, gx, gy, vx, vy, theta for robot and humans
:return:
"""
if self.robot is None:
raise AttributeError('robot has to be set!')
assert phase in ['train', 'val', 'test']
if test_case is not None:
self.case_counter[phase] = test_case
self.global_time = 0
if phase == 'test':
self.human_times = [0] * self.human_num
else:
self.human_times = [0] * (self.human_num if self.robot.policy.multiagent_training else 1)
if not self.robot.policy.multiagent_training:
self.train_val_sim = 'circle_crossing'
if self.config.get('humans', 'policy') == 'trajnet':
raise NotImplementedError
else:
counter_offset = {'train': self.case_capacity['val'] + self.case_capacity['test'],
'val': 0, 'test': self.case_capacity['val']}
self.robot.set(0, -self.circle_radius, 0, self.circle_radius, 0, 0, np.pi / 2)
if self.case_counter[phase] >= 0:
np.random.seed(counter_offset[phase] + self.case_counter[phase])
if phase in ['train', 'val']:
human_num = self.human_num if self.robot.policy.multiagent_training else 1
self.generate_random_human_position(human_num=human_num, rule=self.train_val_sim)
else:
self.generate_random_human_position(human_num=self.human_num, rule=self.test_sim)
# case_counter is always between 0 and case_size[phase]
self.case_counter[phase] = (self.case_counter[phase] + 1) % self.case_size[phase]
else:
assert phase == 'test'
if self.case_counter[phase] == -1:
# for debugging purposes
self.human_num = 3
self.humans = [Human(self.config, 'humans') for _ in range(self.human_num)]
self.humans[0].set(0, -6, 0, 5, 0, 0, np.pi / 2)
self.humans[1].set(-5, -5, -5, 5, 0, 0, np.pi / 2)
self.humans[2].set(5, -5, 5, 5, 0, 0, np.pi / 2)
else:
raise NotImplementedError
for agent in [self.robot] + self.humans:
agent.time_step = self.time_step
agent.policy.time_step = self.time_step
self.states = list()
if hasattr(self.robot.policy, 'action_values'):
self.action_values = list()
if hasattr(self.robot.policy, 'get_attention_weights'):
self.attention_weights = list()
# get current observation
if self.robot.sensor == 'coordinates':
ob = [human.get_observable_state() for human in self.humans]
elif self.robot.sensor == 'RGB':
raise NotImplementedError
return ob
def onestep_lookahead(self, action):
return self.step(action, update=False)
def step(self, action, update=True):
"""
Compute actions for all agents, detect collision, update environment and return (ob, reward, done, info)
"""
human_actions = []
for human in self.humans:
# observation for humans is always coordinates
ob = [other_human.get_observable_state() for other_human in self.humans if other_human != human]
if self.robot.visible:
ob += [self.robot.get_observable_state()]
human_actions.append(human.act(ob))
# collision detection
dmin = float('inf') # zheng wu qiong
collision = False
for i, human in enumerate(self.humans):
px = human.px - self.robot.px
py = human.py - self.robot.py
if self.robot.kinematics == 'holonomic':
vx = human.vx - action.vx
vy = human.vy - action.vy
else:
vx = human.vx - action.v * np.cos(action.r + self.robot.theta)
vy = human.vy - action.v * np.sin(action.r + self.robot.theta)
ex = px + vx * self.time_step
ey = py + vy * self.time_step
# closest distance between boundaries of two agents
closest_dist = point_to_segment_dist(px, py, ex, ey, 0, 0) - human.radius - self.robot.radius
if closest_dist < 0:
collision = True
# logging.debug("Collision: distance between robot and p{} is {:.2E}".format(i, closest_dist))
break
elif closest_dist < dmin:
dmin = closest_dist
# collision detection between humans
human_num = len(self.humans)
for i in range(human_num):
for j in range(i + 1, human_num):
dx = self.humans[i].px - self.humans[j].px
dy = self.humans[i].py - self.humans[j].py
dist = (dx ** 2 + dy ** 2) ** (1 / 2) - self.humans[i].radius - self.humans[j].radius
if dist < 0:
# detect collision but don't take humans' collision into account
logging.debug('Collision happens between humans in step()')
# check if reaching the goal
end_position = np.array(self.robot.compute_position(action, self.time_step))
reaching_goal = norm(end_position - np.array(self.robot.get_goal_position())) < self.robot.radius
if self.global_time >= self.time_limit - 1:
reward = 0
done = True
info = Timeout()
elif collision:
reward = self.collision_penalty
done = True
info = Collision()
elif reaching_goal:
reward = self.success_reward
done = True
info = ReachGoal()
elif dmin < self.discomfort_dist:
# only penalize agent for getting too close if it's visible
# adjust the reward based on FPS
reward = (dmin - self.discomfort_dist) * self.discomfort_penalty_factor * self.time_step
done = False
info = Danger(dmin)
else:
reward = 0
done = False
info = Nothing()
if update:
# store state, action value and attention weights
self.states.append([self.robot.get_full_state(), [human.get_full_state() for human in self.humans]])
if hasattr(self.robot.policy, 'action_values'):
self.action_values.append(self.robot.policy.action_values)
if hasattr(self.robot.policy, 'get_attention_weights'):
self.attention_weights.append(self.robot.policy.get_attention_weights())
# update all agents
self.robot.step(action)
for i, human_action in enumerate(human_actions):
self.humans[i].step(human_action)
self.global_time += self.time_step
for i, human in enumerate(self.humans):
# only record the first time the human reaches the goal
if self.human_times[i] == 0 and human.reached_destination():
self.human_times[i] = self.global_time
# compute the observation
if self.robot.sensor == 'coordinates':
ob = [human.get_observable_state() for human in self.humans]
elif self.robot.sensor == 'RGB':
raise NotImplementedError
else:
if self.robot.sensor == 'coordinates':
ob = [human.get_next_observable_state(action) for human, action in zip(self.humans, human_actions)]
elif self.robot.sensor == 'RGB':
raise NotImplementedError
return ob, reward, done, info
def render(self, mode='human', output_file=None):
from matplotlib import animation
import matplotlib.pyplot as plt
plt.rcParams['animation.ffmpeg_path'] = '/usr/bin/ffmpeg'
x_offset = 0.11
y_offset = 0.11
cmap = plt.cm.get_cmap('hsv', 10)
robot_color = 'yellow'
goal_color = 'red'
arrow_color = 'red'
arrow_style = patches.ArrowStyle("->", head_length=4, head_width=2)
if mode == 'human':
fig, ax = plt.subplots(figsize=(7, 7))
ax.set_xlim(-4, 4)
ax.set_ylim(-4, 4)
for human in self.humans:
human_circle = plt.Circle(human.get_position(), human.radius, fill=False, color='b')
ax.add_artist(human_circle)
ax.add_artist(plt.Circle(self.robot.get_position(), self.robot.radius, fill=True, color='r'))
plt.show()
elif mode == 'traj':
fig, ax = plt.subplots(figsize=(7, 7))
ax.tick_params(labelsize=16)
ax.set_xlim(-5, 5)
ax.set_ylim(-5, 5)
ax.set_xlabel('x(m)', fontsize=16)
ax.set_ylabel('y(m)', fontsize=16)
robot_positions = [self.states[i][0].position for i in range(len(self.states))]
human_positions = [[self.states[i][1][j].position for j in range(len(self.humans))]
for i in range(len(self.states))]
for k in range(len(self.states)):
if k % 4 == 0 or k == len(self.states) - 1:
robot = plt.Circle(robot_positions[k], self.robot.radius, fill=True, color=robot_color)
humans = [plt.Circle(human_positions[k][i], self.humans[i].radius, fill=False, color=cmap(i))
for i in range(len(self.humans))]
ax.add_artist(robot)
for human in humans:
ax.add_artist(human)
# add time annotation
global_time = k * self.time_step
if global_time % 4 == 0 or k == len(self.states) - 1:
agents = humans + [robot]
times = [plt.text(agents[i].center[0] - x_offset, agents[i].center[1] - y_offset,
'{:.1f}'.format(global_time),
color='black', fontsize=14) for i in range(self.human_num + 1)]
for time in times:
ax.add_artist(time)
if k != 0:
nav_direction = plt.Line2D((self.states[k - 1][0].px, self.states[k][0].px),
(self.states[k - 1][0].py, self.states[k][0].py),
color=robot_color, ls='solid')
human_directions = [plt.Line2D((self.states[k - 1][1][i].px, self.states[k][1][i].px),
(self.states[k - 1][1][i].py, self.states[k][1][i].py),
color=cmap(i), ls='solid')
for i in range(self.human_num)]
ax.add_artist(nav_direction)
for human_direction in human_directions:
ax.add_artist(human_direction)
plt.legend([robot], ['Robot'], fontsize=16)
plt.show()
elif mode == 'video':
fig, ax = plt.subplots(figsize=(7, 7))
ax.tick_params(labelsize=16)
ax.set_xlim(-6, 6)
ax.set_ylim(-6, 6)
ax.set_xlabel('x(m)', fontsize=16)
ax.set_ylabel('y(m)', fontsize=16)
# add robot and its goal
robot_positions = [state[0].position for state in self.states]
goal = mlines.Line2D([0], [4], color=goal_color, marker='*', linestyle='None', markersize=15, label='Goal')
robot = plt.Circle(robot_positions[0], self.robot.radius, fill=True, color=robot_color)
ax.add_artist(robot)
ax.add_artist(goal)
plt.legend([robot, goal], ['Robot', 'Goal'], fontsize=16)
# add humans and their numbers
human_positions = [[state[1][j].position for j in range(len(self.humans))] for state in self.states]
humans = [plt.Circle(human_positions[0][i], self.humans[i].radius, fill=False)
for i in range(len(self.humans))]
human_numbers = [plt.text(humans[i].center[0] - x_offset, humans[i].center[1] - y_offset, str(i),
color='black', fontsize=12) for i in range(len(self.humans))]
for i, human in enumerate(humans):
ax.add_artist(human)
ax.add_artist(human_numbers[i])
# add time annotation
time = plt.text(-1, 5, 'Time: {}'.format(0), fontsize=16)
ax.add_artist(time)
# compute attention scores
if self.attention_weights is not None:
attention_scores = [
plt.text(-5.5, 5 - 0.5 * i, 'Human {}: {:.2f}'.format(i + 1, self.attention_weights[0][i]),
fontsize=16) for i in range(len(self.humans))]
# compute orientation in each step and use arrow to show the direction
radius = self.robot.radius
if self.robot.kinematics == 'unicycle':
orientation = [((state[0].px, state[0].py), (state[0].px + radius * np.cos(state[0].theta),
state[0].py + radius * np.sin(state[0].theta))) for state
in self.states]
orientations = [orientation]
else:
orientations = []
for i in range(self.human_num + 1):
orientation = []
for state in self.states:
if i == 0:
agent_state = state[0]
else:
agent_state = state[1][i - 1]
theta = np.arctan2(agent_state.vy, agent_state.vx)
orientation.append(((agent_state.px, agent_state.py), (agent_state.px + radius * np.cos(theta),
agent_state.py + radius * np.sin(theta))))
orientations.append(orientation)
arrows = [[patches.FancyArrowPatch(*orientation[0], color=arrow_color, arrowstyle=arrow_style)
for orientation in orientations]]
for arrow in arrows[0]:
ax.add_artist(arrow)
global_step = [0]
def update(frame_num):
# nonlocal global_step
# nonlocal arrows
global_step[0] = frame_num
robot.center = robot_positions[frame_num]
for i, human in enumerate(humans):
human.center = human_positions[frame_num][i]
human_numbers[i].set_position((human.center[0] - x_offset, human.center[1] - y_offset))
for arrow in arrows[0]:
arrow.remove()
arrows = [patches.FancyArrowPatch(*orientation[frame_num], color=arrow_color,
arrowstyle=arrow_style) for orientation in orientations]
for arrow in arrows[0]:
ax.add_artist(arrow)
if self.attention_weights is not None:
human.set_color(str(self.attention_weights[frame_num][i]))
attention_scores[i].set_text('human {}: {:.2f}'.format(i, self.attention_weights[frame_num][i]))
time.set_text('Time: {:.2f}'.format(frame_num * self.time_step))
def plot_value_heatmap():
assert self.robot.kinematics == 'holonomic'
for agent in [self.states[global_step][0]] + self.states[global_step][1]:
print(('{:.4f}, ' * 6 + '{:.4f}').format(agent.px, agent.py, agent.gx, agent.gy,
agent.vx, agent.vy, agent.theta))
# when any key is pressed draw the action value plot
fig, axis = plt.subplots()
speeds = [0] + self.robot.policy.speeds
rotations = self.robot.policy.rotations + [np.pi * 2]
r, th = np.meshgrid(speeds, rotations)
z = np.array(self.action_values[global_step % len(self.states)][1:])
z = (z - np.min(z)) / (np.max(z) - np.min(z))
z = np.reshape(z, (16, 5))
polar = plt.subplot(projection="polar")
polar.tick_params(labelsize=16)
mesh = plt.pcolormesh(th, r, z, vmin=0, vmax=1)
plt.plot(rotations, r, color='k', ls='none')
plt.grid()
cbaxes = fig.add_axes([0.85, 0.1, 0.03, 0.8])
cbar = plt.colorbar(mesh, cax=cbaxes)
cbar.ax.tick_params(labelsize=16)
plt.show()
def on_click(event):
anim.running ^= True
if anim.running:
anim.event_source.stop()
if hasattr(self.robot.policy, 'action_values'):
plot_value_heatmap()
else:
anim.event_source.start()
fig.canvas.mpl_connect('key_press_event', on_click)
anim = animation.FuncAnimation(fig, update, frames=len(self.states), interval=self.time_step * 1000)
anim.running = True
if output_file is not None:
ffmpeg_writer = animation.writers['ffmpeg']
writer = ffmpeg_writer(fps=8, metadata=dict(artist='Me'), bitrate=1800)
anim.save(output_file, writer=writer)
else:
plt.show()
else:
raise NotImplementedError
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/build/lib.linux-x86_64-2.7/crowd_sim/envs/utils/state.py | sarl_star_ros/CrowdNav/build/lib.linux-x86_64-2.7/crowd_sim/envs/utils/state.py | class FullState(object):
def __init__(self, px, py, vx, vy, radius, gx, gy, v_pref, theta):
self.px = px
self.py = py
self.vx = vx
self.vy = vy
self.radius = radius
self.gx = gx
self.gy = gy
self.v_pref = v_pref
self.theta = theta
self.position = (self.px, self.py)
self.goal_position = (self.gx, self.gy)
self.velocity = (self.vx, self.vy)
def __add__(self, other):
return other + (self.px, self.py, self.vx, self.vy, self.radius, self.gx, self.gy, self.v_pref, self.theta)
def __str__(self):
return ' '.join([str(x) for x in [self.px, self.py, self.vx, self.vy, self.radius, self.gx, self.gy,
self.v_pref, self.theta]])
class ObservableState(object):
def __init__(self, px, py, vx, vy, radius):
self.px = px
self.py = py
self.vx = vx
self.vy = vy
self.radius = radius
self.position = (self.px, self.py)
self.velocity = (self.vx, self.vy)
# ObservableState(...) + ObservableState(...)
def __add__(self, other):
return other + (self.px, self.py, self.vx, self.vy, self.radius)
def __str__(self):
return ' '.join([str(x) for x in [self.px, self.py, self.vx, self.vy, self.radius]])
class JointState(object):
def __init__(self, self_state, human_states):
assert isinstance(self_state, FullState)
for human_state in human_states:
assert isinstance(human_state, ObservableState)
self.self_state = self_state
self.human_states = human_states
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/build/lib.linux-x86_64-2.7/crowd_sim/envs/utils/robot.py | sarl_star_ros/CrowdNav/build/lib.linux-x86_64-2.7/crowd_sim/envs/utils/robot.py | from crowd_sim.envs.utils.agent import Agent
from crowd_sim.envs.utils.state import JointState
class Robot(Agent):
def __init__(self, config, section):
super().__init__(config, section)
def act(self, ob):
if self.policy is None:
raise AttributeError('Policy attribute has to be set!')
state = JointState(self.get_full_state(), ob)
action = self.policy.predict(state)
return action
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/build/lib.linux-x86_64-2.7/crowd_sim/envs/utils/human.py | sarl_star_ros/CrowdNav/build/lib.linux-x86_64-2.7/crowd_sim/envs/utils/human.py | from crowd_sim.envs.utils.agent import Agent
from crowd_sim.envs.utils.state import JointState
class Human(Agent):
def __init__(self, config, section):
super().__init__(config, section)
def act(self, ob):
"""
The state for human is its full state and all other agents' observable states
:param ob:
:return:
"""
state = JointState(self.get_full_state(), ob)
action = self.policy.predict(state)
return action
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/build/lib.linux-x86_64-2.7/crowd_sim/envs/utils/action.py | sarl_star_ros/CrowdNav/build/lib.linux-x86_64-2.7/crowd_sim/envs/utils/action.py | from collections import namedtuple
ActionXY = namedtuple('ActionXY', ['vx', 'vy'])
ActionRot = namedtuple('ActionRot', ['v', 'r'])
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/build/lib.linux-x86_64-2.7/crowd_sim/envs/utils/utils.py | sarl_star_ros/CrowdNav/build/lib.linux-x86_64-2.7/crowd_sim/envs/utils/utils.py | import numpy as np
def point_to_segment_dist(x1, y1, x2, y2, x3, y3):
"""
Calculate the closest distance between point(x3, y3) and a line segment with two endpoints (x1, y1), (x2, y2)
"""
px = x2 - x1
py = y2 - y1
if px == 0 and py == 0:
return np.linalg.norm((x3-x1, y3-y1))
u = ((x3 - x1) * px + (y3 - y1) * py) / (px * px + py * py)
if u > 1:
u = 1
elif u < 0:
u = 0
# (x, y) is the closest point to (x3, y3) on the line segment
x = x1 + u * px
y = y1 + u * py
return np.linalg.norm((x - x3, y-y3))
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/build/lib.linux-x86_64-2.7/crowd_sim/envs/utils/__init__.py | sarl_star_ros/CrowdNav/build/lib.linux-x86_64-2.7/crowd_sim/envs/utils/__init__.py | python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false | |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/build/lib.linux-x86_64-2.7/crowd_sim/envs/utils/info.py | sarl_star_ros/CrowdNav/build/lib.linux-x86_64-2.7/crowd_sim/envs/utils/info.py | class Timeout(object):
def __init__(self):
pass
def __str__(self):
return 'Timeout'
class ReachGoal(object):
def __init__(self):
pass
def __str__(self):
return 'Reaching goal'
class Danger(object):
def __init__(self, min_dist):
self.min_dist = min_dist
def __str__(self):
return 'Too close'
class Collision(object):
def __init__(self):
pass
def __str__(self):
return 'Collision'
class Nothing(object):
def __init__(self):
pass
def __str__(self):
return ''
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/build/lib.linux-x86_64-2.7/crowd_sim/envs/utils/agent.py | sarl_star_ros/CrowdNav/build/lib.linux-x86_64-2.7/crowd_sim/envs/utils/agent.py | import numpy as np
from numpy.linalg import norm
import abc
import logging
from crowd_sim.envs.policy.policy_factory import policy_factory
from crowd_sim.envs.utils.action import ActionXY, ActionRot
from crowd_sim.envs.utils.state import ObservableState, FullState
class Agent(object):
def __init__(self, config, section):
"""
Base class for robot and human. Have the physical attributes of an agent.
"""
self.visible = config.getboolean(section, 'visible')
self.v_pref = config.getfloat(section, 'v_pref')
self.radius = config.getfloat(section, 'radius')
self.policy = policy_factory[config.get(section, 'policy')]()
self.sensor = config.get(section, 'sensor')
self.kinematics = self.policy.kinematics if self.policy is not None else None
self.px = None
self.py = None
self.gx = None
self.gy = None
self.vx = None
self.vy = None
self.theta = None
self.time_step = None
def print_info(self):
logging.info('Agent is {} and has {} kinematic constraint'.format(
'visible' if self.visible else 'invisible', self.kinematics))
def set_policy(self, policy):
self.policy = policy
self.kinematics = policy.kinematics
def sample_random_attributes(self):
"""
Sample agent radius and v_pref attribute from certain distribution
:return:
"""
self.v_pref = np.random.uniform(0.5, 1.5)
self.radius = np.random.uniform(0.3, 0.5)
def set(self, px, py, gx, gy, vx, vy, theta, radius=None, v_pref=None):
self.px = px
self.py = py
self.gx = gx
self.gy = gy
self.vx = vx
self.vy = vy
self.theta = theta
if radius is not None:
self.radius = radius
if v_pref is not None:
self.v_pref = v_pref
def get_observable_state(self):
return ObservableState(self.px, self.py, self.vx, self.vy, self.radius)
def get_next_observable_state(self, action):
self.check_validity(action)
pos = self.compute_position(action, self.time_step)
next_px, next_py = pos
if self.kinematics == 'holonomic':
next_vx = action.vx
next_vy = action.vy
else:
next_theta = self.theta + action.r
next_vx = action.v * np.cos(next_theta)
next_vy = action.v * np.sin(next_theta)
return ObservableState(next_px, next_py, next_vx, next_vy, self.radius)
def get_full_state(self):
return FullState(self.px, self.py, self.vx, self.vy, self.radius, self.gx, self.gy, self.v_pref, self.theta)
def get_position(self):
return self.px, self.py
def set_position(self, position):
self.px = position[0]
self.py = position[1]
def get_goal_position(self):
return self.gx, self.gy
def get_velocity(self):
return self.vx, self.vy
def set_velocity(self, velocity):
self.vx = velocity[0]
self.vy = velocity[1]
@abc.abstractmethod
def act(self, ob):
"""
Compute state using received observation and pass it to policy
"""
return
def check_validity(self, action):
if self.kinematics == 'holonomic':
assert isinstance(action, ActionXY)
else:
assert isinstance(action, ActionRot)
def compute_position(self, action, delta_t):
self.check_validity(action)
if self.kinematics == 'holonomic':
px = self.px + action.vx * delta_t
py = self.py + action.vy * delta_t
else:
theta = self.theta + action.r
px = self.px + np.cos(theta) * action.v * delta_t
py = self.py + np.sin(theta) * action.v * delta_t
return px, py
def step(self, action):
"""
Perform an action and update the state
"""
self.check_validity(action)
pos = self.compute_position(action, self.time_step)
self.px, self.py = pos
if self.kinematics == 'holonomic':
self.vx = action.vx
self.vy = action.vy
else:
self.theta = (self.theta + action.r) % (2 * np.pi)
self.vx = action.v * np.cos(self.theta)
self.vy = action.v * np.sin(self.theta)
def reached_destination(self):
return norm(np.array(self.get_position()) - np.array(self.get_goal_position())) < self.radius
# || (position - goal position) ||
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/build/lib.linux-x86_64-2.7/crowd_sim/envs/policy/orca.py | sarl_star_ros/CrowdNav/build/lib.linux-x86_64-2.7/crowd_sim/envs/policy/orca.py | import numpy as np
import rvo2
from crowd_sim.envs.policy.policy import Policy
from crowd_sim.envs.utils.action import ActionXY
class ORCA(Policy):
def __init__(self):
"""
timeStep The time step of the simulation.
Must be positive.
neighborDist The default maximum distance (center point
to center point) to other agents a new agent
takes into account in the navigation. The
larger this number, the longer the running
time of the simulation. If the number is too
low, the simulation will not be safe. Must be
non-negative.
maxNeighbors The default maximum number of other agents a
new agent takes into account in the
navigation. The larger this number, the
longer the running time of the simulation.
If the number is too low, the simulation
will not be safe.
timeHorizon The default minimal amount of time for which
a new agent's velocities that are computed
by the simulation are safe with respect to
other agents. The larger this number, the
sooner an agent will respond to the presence
of other agents, but the less freedom the
agent has in choosing its velocities.
Must be positive.
timeHorizonObst The default minimal amount of time for which
a new agent's velocities that are computed
by the simulation are safe with respect to
obstacles. The larger this number, the
sooner an agent will respond to the presence
of obstacles, but the less freedom the agent
has in choosing its velocities.
Must be positive.
radius The default radius of a new agent.
Must be non-negative.
maxSpeed The default maximum speed of a new agent.
Must be non-negative.
velocity The default initial two-dimensional linear
velocity of a new agent (optional).
ORCA first uses neighborDist and maxNeighbors to find neighbors that need to be taken into account.
Here set them to be large enough so that all agents will be considered as neighbors.
Time_horizon should be set that at least it's safe for one time step
In this work, obstacles are not considered. So the value of time_horizon_obst doesn't matter.
"""
super().__init__()
self.name = 'ORCA'
self.trainable = False
self.multiagent_training = None
self.kinematics = 'holonomic'
self.safety_space = 0
self.neighbor_dist = 10
self.max_neighbors = 10
self.time_horizon = 5
self.time_horizon_obst = 5
self.radius = 0.3
self.max_speed = 1
self.sim = None
def configure(self, config):
# self.time_step = config.getfloat('orca', 'time_step')
# self.neighbor_dist = config.getfloat('orca', 'neighbor_dist')
# self.max_neighbors = config.getint('orca', 'max_neighbors')
# self.time_horizon = config.getfloat('orca', 'time_horizon')
# self.time_horizon_obst = config.getfloat('orca', 'time_horizon_obst')
# self.radius = config.getfloat('orca', 'radius')
# self.max_speed = config.getfloat('orca', 'max_speed')
return
def set_phase(self, phase):
return
def predict(self, state):
"""
Create a rvo2 simulation at each time step and run one step
Python-RVO2 API: https://github.com/sybrenstuvel/Python-RVO2/blob/master/src/rvo2.pyx
How simulation is done in RVO2: https://github.com/sybrenstuvel/Python-RVO2/blob/master/src/Agent.cpp
Agent doesn't stop moving after it reaches the goal, because once it stops moving, the reciprocal rule is broken
:param state:
:return:
"""
self_state = state.self_state
params = self.neighbor_dist, self.max_neighbors, self.time_horizon, self.time_horizon_obst
if self.sim is not None and self.sim.getNumAgents() != len(state.human_states) + 1:
del self.sim
self.sim = None
if self.sim is None:
self.sim = rvo2.PyRVOSimulator(self.time_step, params[0],params[1],params[2],params[3], self.radius, self.max_speed)
self.sim.addAgent(self_state.position, params[0],params[1],params[2],params[3], self_state.radius + 0.01 + self.safety_space,
self_state.v_pref, self_state.velocity)
for human_state in state.human_states:
self.sim.addAgent(human_state.position, params[0],params[1],params[2],params[3], human_state.radius + 0.01 + self.safety_space,
self.max_speed, human_state.velocity)
else:
self.sim.setAgentPosition(0, self_state.position)
self.sim.setAgentVelocity(0, self_state.velocity)
for i, human_state in enumerate(state.human_states):
self.sim.setAgentPosition(i + 1, human_state.position)
self.sim.setAgentVelocity(i + 1, human_state.velocity)
# Set the preferred velocity to be a vector of unit magnitude (speed) in the direction of the goal.
velocity = np.array((self_state.gx - self_state.px, self_state.gy - self_state.py))
speed = np.linalg.norm(velocity)
pref_vel = velocity / speed if speed > 1 else velocity
# Perturb a little to avoid deadlocks due to perfect symmetry.
# perturb_angle = np.random.random() * 2 * np.pi
# perturb_dist = np.random.random() * 0.01
# perturb_vel = np.array((np.cos(perturb_angle), np.sin(perturb_angle))) * perturb_dist
# pref_vel += perturb_vel
self.sim.setAgentPrefVelocity(0, tuple(pref_vel))
for i, human_state in enumerate(state.human_states):
# unknown goal position of other humans
self.sim.setAgentPrefVelocity(i + 1, (0, 0))
self.sim.doStep()
action = ActionXY(*self.sim.getAgentVelocity(0))
self.last_state = state
return action
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/build/lib.linux-x86_64-2.7/crowd_sim/envs/policy/policy.py | sarl_star_ros/CrowdNav/build/lib.linux-x86_64-2.7/crowd_sim/envs/policy/policy.py | import abc
import numpy as np
class Policy(object):
def __init__(self):
"""
Base class for all policies, has an abstract method predict().
"""
self.trainable = False
self.phase = None
self.model = None
self.device = None
self.last_state = None
self.time_step = None
# if agent is assumed to know the dynamics of real world
self.env = None
@abc.abstractmethod
def configure(self, config):
return
def set_phase(self, phase):
self.phase = phase
def set_device(self, device):
self.device = device
def set_env(self, env):
self.env = env
def get_model(self):
return self.model
@abc.abstractmethod
def predict(self, state):
"""
Policy takes state as input and output an action
"""
return
@staticmethod
def reach_destination(state):
self_state = state.self_state
if np.linalg.norm((self_state.py - self_state.gy, self_state.px - self_state.gx)) < self_state.radius:
return True
else:
return False
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/build/lib.linux-x86_64-2.7/crowd_sim/envs/policy/__init__.py | sarl_star_ros/CrowdNav/build/lib.linux-x86_64-2.7/crowd_sim/envs/policy/__init__.py | python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false | |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/build/lib.linux-x86_64-2.7/crowd_sim/envs/policy/linear.py | sarl_star_ros/CrowdNav/build/lib.linux-x86_64-2.7/crowd_sim/envs/policy/linear.py | import numpy as np
from crowd_sim.envs.policy.policy import Policy
from crowd_sim.envs.utils.action import ActionXY
class Linear(Policy):
def __init__(self):
super().__init__()
self.trainable = False
self.kinematics = 'holonomic'
self.multiagent_training = True
def configure(self, config):
assert True
def predict(self, state):
self_state = state.self_state
theta = np.arctan2(self_state.gy-self_state.py, self_state.gx-self_state.px)
vx = np.cos(theta) * self_state.v_pref
vy = np.sin(theta) * self_state.v_pref
action = ActionXY(vx, vy)
return action
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/build/lib.linux-x86_64-2.7/crowd_sim/envs/policy/policy_factory.py | sarl_star_ros/CrowdNav/build/lib.linux-x86_64-2.7/crowd_sim/envs/policy/policy_factory.py | from crowd_sim.envs.policy.linear import Linear
from crowd_sim.envs.policy.orca import ORCA
def none_policy():
return None
policy_factory = dict()
policy_factory['linear'] = Linear
policy_factory['orca'] = ORCA
policy_factory['none'] = none_policy
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/build/lib.linux-x86_64-2.7/crowd_nav/train.py | sarl_star_ros/CrowdNav/build/lib.linux-x86_64-2.7/crowd_nav/train.py | import sys
import logging
import argparse
import configparser
import os
import shutil
import torch
import gym
import git
from crowd_sim.envs.utils.robot import Robot
from crowd_nav.utils.trainer import Trainer
from crowd_nav.utils.memory import ReplayMemory
from crowd_nav.utils.explorer import Explorer
from crowd_nav.policy.policy_factory import policy_factory
def main():
parser = argparse.ArgumentParser('Parse configuration file')
parser.add_argument('--env_config', type=str, default='configs/env.config')
parser.add_argument('--policy', type=str, default='cadrl') # --policy sarl
parser.add_argument('--policy_config', type=str, default='configs/policy.config')
parser.add_argument('--train_config', type=str, default='configs/train.config')
parser.add_argument('--output_dir', type=str, default='data/output')
parser.add_argument('--weights', type=str)
parser.add_argument('--resume', default=False, action='store_true')
parser.add_argument('--gpu', default=False, action='store_true')
parser.add_argument('--debug', default=False, action='store_true')
args = parser.parse_args()
# configure paths
make_new_dir = True
if os.path.exists(args.output_dir):
key = input('Output directory already exists! Overwrite the folder? (y/n)')
if key == 'y' and not args.resume:
shutil.rmtree(args.output_dir)
else:
make_new_dir = False
args.env_config = os.path.join(args.output_dir, os.path.basename(args.env_config))
args.policy_config = os.path.join(args.output_dir, os.path.basename(args.policy_config))
args.train_config = os.path.join(args.output_dir, os.path.basename(args.train_config))
if make_new_dir:
os.makedirs(args.output_dir)
shutil.copy(args.env_config, args.output_dir)
shutil.copy(args.policy_config, args.output_dir)
shutil.copy(args.train_config, args.output_dir)
log_file = os.path.join(args.output_dir, 'output.log')
il_weight_file = os.path.join(args.output_dir, 'il_model.pth')
rl_weight_file = os.path.join(args.output_dir, 'rl_model.pth')
# configure logging
mode = 'a' if args.resume else 'w'
file_handler = logging.FileHandler(log_file, mode=mode)
stdout_handler = logging.StreamHandler(sys.stdout)
level = logging.INFO if not args.debug else logging.DEBUG
logging.basicConfig(level=level, handlers=[stdout_handler, file_handler],
format='%(asctime)s, %(levelname)s: %(message)s', datefmt="%Y-%m-%d %H:%M:%S")
repo = git.Repo(search_parent_directories=True)
logging.info('Current git head hash code: %s', format(repo.head.object.hexsha))
device = torch.device("cuda:0" if torch.cuda.is_available() and args.gpu else "cpu")
logging.info('Using device: %s', device)
# configure policy
policy = policy_factory[args.policy]() # /crowd_nav/policy/policy_factory.py
if not policy.trainable:
parser.error('Policy has to be trainable')
if args.policy_config is None:
parser.error('Policy config has to be specified for a trainable network')
policy_config = configparser.RawConfigParser()
policy_config.read(args.policy_config)
policy.configure(policy_config) # SARL.configure
policy.set_device(device) # SARL.set_device
# configure environment
env_config = configparser.RawConfigParser()
env_config.read(args.env_config)
env = gym.make('CrowdSim-v0')
env.configure(env_config)
robot = Robot(env_config, 'robot')
env.set_robot(robot)
# read training parameters
if args.train_config is None:
parser.error('Train config has to be specified for a trainable network')
train_config = configparser.RawConfigParser()
train_config.read(args.train_config)
rl_learning_rate = train_config.getfloat('train', 'rl_learning_rate')
train_batches = train_config.getint('train', 'train_batches')
train_episodes = train_config.getint('train', 'train_episodes')
sample_episodes = train_config.getint('train', 'sample_episodes')
target_update_interval = train_config.getint('train', 'target_update_interval')
evaluation_interval = train_config.getint('train', 'evaluation_interval')
capacity = train_config.getint('train', 'capacity')
epsilon_start = train_config.getfloat('train', 'epsilon_start')
epsilon_end = train_config.getfloat('train', 'epsilon_end')
epsilon_decay = train_config.getfloat('train', 'epsilon_decay')
checkpoint_interval = train_config.getint('train', 'checkpoint_interval')
# configure trainer and explorer
memory = ReplayMemory(capacity)
model = policy.get_model()
batch_size = train_config.getint('trainer', 'batch_size')
trainer = Trainer(model, memory, device, batch_size)
explorer = Explorer(env, robot, device, memory, policy.gamma, target_policy=policy) # target policy: sarl
# imitation learning
if args.resume:
if not os.path.exists(rl_weight_file):
logging.error('RL weights does not exist')
model.load_state_dict(torch.load(rl_weight_file))
rl_weight_file = os.path.join(args.output_dir, 'resumed_rl_model.pth')
logging.info('Load reinforcement learning trained weights. Resume training')
elif os.path.exists(il_weight_file):
model.load_state_dict(torch.load(il_weight_file))
logging.info('Load imitation learning trained weights.')
else:
il_episodes = train_config.getint('imitation_learning', 'il_episodes')
il_policy = train_config.get('imitation_learning', 'il_policy')
il_epochs = train_config.getint('imitation_learning', 'il_epochs')
il_learning_rate = train_config.getfloat('imitation_learning', 'il_learning_rate')
trainer.set_learning_rate(il_learning_rate)
if robot.visible:
safety_space = 0
else:
safety_space = train_config.getfloat('imitation_learning', 'safety_space')
il_policy = policy_factory[il_policy]()
il_policy.multiagent_training = policy.multiagent_training
il_policy.safety_space = safety_space
robot.set_policy(il_policy) # robot.policy: orca
explorer.run_k_episodes(il_episodes, 'train', update_memory=True, imitation_learning=True)
trainer.optimize_epoch(il_epochs)
torch.save(model.state_dict(), il_weight_file)
logging.info('Finish imitation learning. Weights saved.')
logging.info('Experience set size: %d/%d', len(memory), memory.capacity)
explorer.update_target_model(model)
# reinforcement learning
policy.set_env(env)
robot.set_policy(policy)
robot.print_info()
trainer.set_learning_rate(rl_learning_rate)
# fill the memory pool with some RL experience
if args.resume:
robot.policy.set_epsilon(epsilon_end)
explorer.run_k_episodes(100, 'train', update_memory=True, episode=0)
logging.info('Experience set size: %d/%d', len(memory), memory.capacity)
episode = 0
while episode < train_episodes:
if args.resume:
epsilon = epsilon_end
else:
if episode < epsilon_decay:
epsilon = epsilon_start + (epsilon_end - epsilon_start) / epsilon_decay * episode
else:
epsilon = epsilon_end
robot.policy.set_epsilon(epsilon)
# evaluate the model
if episode % evaluation_interval == 0:
explorer.run_k_episodes(env.case_size['val'], 'val', episode=episode)
# sample k episodes into memory and optimize over the generated memory
explorer.run_k_episodes(sample_episodes, 'train', update_memory=True, episode=episode)
trainer.optimize_batch(train_batches)
episode += 1
if episode % target_update_interval == 0:
explorer.update_target_model(model)
if episode != 0 and episode % checkpoint_interval == 0:
torch.save(model.state_dict(), rl_weight_file)
# final test
explorer.run_k_episodes(env.case_size['test'], 'test', episode=episode)
if __name__ == '__main__':
main()
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/build/lib.linux-x86_64-2.7/crowd_nav/__init__.py | sarl_star_ros/CrowdNav/build/lib.linux-x86_64-2.7/crowd_nav/__init__.py | python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false | |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/build/lib.linux-x86_64-2.7/crowd_nav/test.py | sarl_star_ros/CrowdNav/build/lib.linux-x86_64-2.7/crowd_nav/test.py | import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../../')
import logging
import argparse
import configparser
import os
import torch
import numpy as np
import gym
from crowd_nav.utils.explorer import Explorer
from crowd_nav.policy.policy_factory import policy_factory
from crowd_sim.envs.utils.robot import Robot
from crowd_sim.envs.policy.orca import ORCA
def main():
parser = argparse.ArgumentParser('Parse configuration file')
parser.add_argument('--env_config', type=str, default='configs/env.config')
parser.add_argument('--policy_config', type=str, default='configs/policy.config')
parser.add_argument('--policy', type=str, default='sarl')
parser.add_argument('--model_dir', type=str, default='data/output')
parser.add_argument('--il', default=False, action='store_true')
parser.add_argument('--gpu', default=False, action='store_true')
parser.add_argument('--visualize', default=True, action='store_true')
parser.add_argument('--phase', type=str, default='test')
parser.add_argument('--test_case', type=int, default=0)
parser.add_argument('--square', default=False, action='store_true')
parser.add_argument('--circle', default=False, action='store_true')
parser.add_argument('--video_file', type=str, default=None)
parser.add_argument('--traj', default=False, action='store_true')
args = parser.parse_args()
if args.model_dir is not None:
env_config_file = os.path.join(args.model_dir, os.path.basename(args.env_config))
policy_config_file = os.path.join(args.model_dir, os.path.basename(args.policy_config))
if args.il:
model_weights = os.path.join(args.model_dir, 'il_model.pth')
else:
if os.path.exists(os.path.join(args.model_dir, 'resumed_rl_model.pth')):
model_weights = os.path.join(args.model_dir, 'resumed_rl_model.pth')
else:
model_weights = os.path.join(args.model_dir, 'rl_model.pth')
else:
env_config_file = args.env_config
policy_config_file = args.policy_config
# configure logging and device
logging.basicConfig(level=logging.INFO, format='%(asctime)s, %(levelname)s: %(message)s',
datefmt="%Y-%m-%d %H:%M:%S")
device = torch.device("cuda:0" if torch.cuda.is_available() and args.gpu else "cpu")
logging.info('Using device: %s', device)
# configure policy
policy = policy_factory[args.policy]()
policy_config = configparser.RawConfigParser()
policy_config.read(policy_config_file)
policy.configure(policy_config)
if policy.trainable:
if args.model_dir is None:
parser.error('Trainable policy must be specified with a model weights directory')
policy.get_model().load_state_dict(torch.load(model_weights))
# configure environment
env_config = configparser.RawConfigParser()
env_config.read(env_config_file)
env = gym.make('CrowdSim-v0')
env.configure(env_config)
if args.square:
env.test_sim = 'square_crossing'
if args.circle:
env.test_sim = 'circle_crossing'
robot = Robot(env_config, 'robot')
robot.set_policy(policy)
env.set_robot(robot)
explorer = Explorer(env, robot, device, gamma=0.9)
policy.set_phase(args.phase)
policy.set_device(device)
# set safety space for ORCA in non-cooperative simulation
if isinstance(robot.policy, ORCA):
if robot.visible:
robot.policy.safety_space = 0
else:
robot.policy.safety_space = 0
logging.info('ORCA agent buffer: %f', robot.policy.safety_space)
policy.set_env(env)
robot.print_info()
if args.visualize:
ob = env.reset(args.phase, args.test_case)
done = False
last_pos = np.array(robot.get_position())
while not done:
action = robot.act(ob)
ob, _, done, info = env.step(action)
current_pos = np.array(robot.get_position())
logging.debug('Speed: %.2f', np.linalg.norm(current_pos - last_pos) / robot.time_step)
last_pos = current_pos
if args.traj:
env.render('traj', args.video_file)
else:
env.render('video', args.video_file)
logging.info('It takes %.2f seconds to finish. Final status is %s', env.global_time, info)
if robot.visible and info == 'reach goal':
human_times = env.get_human_times()
logging.info('Average time for humans to reach goal: %.2f', sum(human_times) / len(human_times))
else:
explorer.run_k_episodes(env.case_size[args.phase], args.phase, print_failure=True)
if __name__ == '__main__':
main()
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/build/lib.linux-x86_64-2.7/crowd_nav/utils/memory.py | sarl_star_ros/CrowdNav/build/lib.linux-x86_64-2.7/crowd_nav/utils/memory.py | from torch.utils.data import Dataset
# memory: list(state,value)
class ReplayMemory(Dataset):
def __init__(self, capacity):
self.capacity = capacity
self.memory = list()
self.position = 0
def push(self, item):
# replace old experience with new experience
if len(self.memory) < self.position + 1:
self.memory.append(item)
else:
self.memory[self.position] = item
self.position = (self.position + 1) % self.capacity
def is_full(self):
return len(self.memory) == self.capacity
def __getitem__(self, item):
return self.memory[item]
def __len__(self):
return len(self.memory)
def clear(self):
self.memory = list()
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/build/lib.linux-x86_64-2.7/crowd_nav/utils/plot.py | sarl_star_ros/CrowdNav/build/lib.linux-x86_64-2.7/crowd_nav/utils/plot.py | import re
import argparse
import matplotlib.pyplot as plt
import numpy as np
def running_mean(x, n):
cumsum = np.cumsum(np.insert(x, 0, 0))
return (cumsum[n:] - cumsum[:-n]) / float(n)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('log_files', type=str, nargs='+')
parser.add_argument('--plot_sr', default=False, action='store_true')
parser.add_argument('--plot_cr', default=False, action='store_true')
parser.add_argument('--plot_time', default=False, action='store_true')
parser.add_argument('--plot_reward', default=True, action='store_true')
parser.add_argument('--plot_train', default=True, action='store_true')
parser.add_argument('--plot_val', default=False, action='store_true')
parser.add_argument('--window_size', type=int, default=200)
args = parser.parse_args()
# define the names of the models you want to plot and the longest episodes you want to show
models = ['LSTM-RL', 'SARL', 'OM-SARL']
max_episodes = 10000
ax1 = ax2 = ax3 = ax4 = None
ax1_legends = []
ax2_legends = []
ax3_legends = []
ax4_legends = []
for i, log_file in enumerate(args.log_files):
with open(log_file, 'r') as file:
log = file.read()
val_pattern = r"VAL in episode (?P<episode>\d+) has success rate: (?P<sr>[0-1].\d+), " \
r"collision rate: (?P<cr>[0-1].\d+), nav time: (?P<time>\d+.\d+), " \
r"total reward: (?P<reward>[-+]?\d+.\d+)"
val_episode = []
val_sr = []
val_cr = []
val_time = []
val_reward = []
for r in re.findall(val_pattern, log):
val_episode.append(int(r[0]))
val_sr.append(float(r[1]))
val_cr.append(float(r[2]))
val_time.append(float(r[3]))
val_reward.append(float(r[4]))
train_pattern = r"TRAIN in episode (?P<episode>\d+) has success rate: (?P<sr>[0-1].\d+), " \
r"collision rate: (?P<cr>[0-1].\d+), nav time: (?P<time>\d+.\d+), " \
r"total reward: (?P<reward>[-+]?\d+.\d+)"
train_episode = []
train_sr = []
train_cr = []
train_time = []
train_reward = []
for r in re.findall(train_pattern, log):
train_episode.append(int(r[0]))
train_sr.append(float(r[1]))
train_cr.append(float(r[2]))
train_time.append(float(r[3]))
train_reward.append(float(r[4]))
train_episode = train_episode[:max_episodes]
train_sr = train_sr[:max_episodes]
train_cr = train_cr[:max_episodes]
train_time = train_time[:max_episodes]
train_reward = train_reward[:max_episodes]
# smooth training plot
train_sr_smooth = running_mean(train_sr, args.window_size)
train_cr_smooth = running_mean(train_cr, args.window_size)
train_time_smooth = running_mean(train_time, args.window_size)
train_reward_smooth = running_mean(train_reward, args.window_size)
# plot sr
if args.plot_sr:
if ax1 is None:
_, ax1 = plt.subplots()
if args.plot_train:
ax1.plot(range(len(train_sr_smooth)), train_sr_smooth)
ax1_legends.append(models[i])
if args.plot_val:
ax1.plot(val_episode, val_sr)
ax1_legends.append(models[i])
ax1.legend(ax1_legends)
ax1.set_xlabel('Episodes')
ax1.set_ylabel('Success Rate')
ax1.set_title('Success rate')
# plot time
if args.plot_time:
if ax2 is None:
_, ax2 = plt.subplots()
if args.plot_train:
ax2.plot(range(len(train_time_smooth)), train_time_smooth)
ax2_legends.append(models[i])
if args.plot_val:
ax2.plot(val_episode, val_time)
ax2_legends.append(models[i])
ax2.legend(ax2_legends)
ax2.set_xlabel('Episodes')
ax2.set_ylabel('Time(s)')
ax2.set_title("Robot's Time to Reach Goal")
# plot cr
if args.plot_cr:
if ax3 is None:
_, ax3 = plt.subplots()
if args.plot_train:
ax3.plot(range(len(train_cr_smooth)), train_cr_smooth)
ax3_legends.append(models[i])
if args.plot_val:
ax3.plot(val_episode, val_cr)
ax3_legends.append(models[i])
ax3.legend(ax3_legends)
ax3.set_xlabel('Episodes')
ax3.set_ylabel('Collision Rate')
ax3.set_title('Collision Rate')
# plot reward
if args.plot_reward:
if ax4 is None:
_, ax4 = plt.subplots()
if args.plot_train:
ax4.plot(range(len(train_reward_smooth)), train_reward_smooth)
ax4_legends.append(models[i])
if args.plot_val:
ax4.plot(val_episode, val_reward)
ax4_legends.append(models[i])
ax4.legend(ax4_legends)
ax4.set_xlabel('Episodes')
ax4.set_ylabel('Reward')
ax4.set_title('Cumulative Discounted Reward')
plt.show()
if __name__ == '__main__':
main()
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/build/lib.linux-x86_64-2.7/crowd_nav/utils/trainer.py | sarl_star_ros/CrowdNav/build/lib.linux-x86_64-2.7/crowd_nav/utils/trainer.py | import logging
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from torch.utils.data import DataLoader
class Trainer(object):
def __init__(self, model, memory, device, batch_size):
"""
Train the trainable model of a policy
"""
self.model = model
self.device = device
self.criterion = nn.MSELoss().to(device) # mean square error loss
self.memory = memory
self.data_loader = None
self.batch_size = batch_size
self.optimizer = None
def set_learning_rate(self, learning_rate):
logging.info('Current learning rate: %f', learning_rate)
self.optimizer = optim.SGD(self.model.parameters(), lr=learning_rate, momentum=0.9)
def optimize_epoch(self, num_epochs):
if self.optimizer is None:
raise ValueError('Learning rate is not set!')
if self.data_loader is None:
# randomly sample a batch of data from memory
self.data_loader = DataLoader(self.memory, self.batch_size, shuffle=True)
average_epoch_loss = 0
for epoch in range(num_epochs):
epoch_loss = 0
for data in self.data_loader: # until all the dataset is read
inputs, values = data
inputs = Variable(inputs) # inputs: state: batchsize * human_num * (6+7)
values = Variable(values) # value of the state
self.optimizer.zero_grad() # clear the gradients
outputs = self.model(inputs) # forward function in SARL: input state, output value
loss = self.criterion(outputs, values)
loss.backward()
self.optimizer.step()
epoch_loss += loss.data.item()
average_epoch_loss = epoch_loss / len(self.memory)
logging.debug('Average loss in epoch %d: %.2E', epoch, average_epoch_loss)
return average_epoch_loss
def optimize_batch(self, num_batches):
if self.optimizer is None:
raise ValueError('Learning rate is not set!')
if self.data_loader is None:
self.data_loader = DataLoader(self.memory, self.batch_size, shuffle=True)
losses = 0
for _ in range(num_batches):
inputs, values = next(iter(self.data_loader))
inputs = Variable(inputs)
values = Variable(values)
self.optimizer.zero_grad()
outputs = self.model(inputs)
loss = self.criterion(outputs, values)
loss.backward()
self.optimizer.step()
losses += loss.data.item()
average_loss = losses / num_batches
logging.debug('Average loss : %.2E', average_loss)
return average_loss
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/build/lib.linux-x86_64-2.7/crowd_nav/utils/__init__.py | sarl_star_ros/CrowdNav/build/lib.linux-x86_64-2.7/crowd_nav/utils/__init__.py | python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false | |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/build/lib.linux-x86_64-2.7/crowd_nav/utils/explorer.py | sarl_star_ros/CrowdNav/build/lib.linux-x86_64-2.7/crowd_nav/utils/explorer.py | import logging
import copy
import torch
from crowd_sim.envs.utils.info import *
class Explorer(object):
def __init__(self, env, robot, device, memory=None, gamma=None, target_policy=None):
self.env = env
self.robot = robot
self.device = device
self.memory = memory
self.gamma = gamma
self.target_policy = target_policy
self.target_model = None
def update_target_model(self, target_model):
self.target_model = copy.deepcopy(target_model)
# @profile
def run_k_episodes(self, k, phase, update_memory=False, imitation_learning=False, episode=None,
print_failure=False):
self.robot.policy.set_phase(phase)
success_times = []
collision_times = []
timeout_times = []
success = 0
collision = 0
timeout = 0
too_close = 0
min_dist = []
cumulative_rewards = []
collision_cases = []
timeout_cases = []
for i in range(k):
ob = self.env.reset(phase)
done = False
states = []
actions = []
rewards = []
while not done:
action = self.robot.act(ob)
ob, reward, done, info = self.env.step(action)
states.append(self.robot.policy.last_state)
actions.append(action)
rewards.append(reward)
if isinstance(info, Danger):
too_close += 1
min_dist.append(info.min_dist)
if isinstance(info, ReachGoal):
success += 1
success_times.append(self.env.global_time)
elif isinstance(info, Collision):
collision += 1
collision_cases.append(i)
collision_times.append(self.env.global_time)
elif isinstance(info, Timeout):
timeout += 1
timeout_cases.append(i)
timeout_times.append(self.env.time_limit)
else:
raise ValueError('Invalid end signal from environment')
if update_memory:
if isinstance(info, ReachGoal) or isinstance(info, Collision):
# only add positive(success) or negative(collision) experience in experience set
self.update_memory(states, actions, rewards, imitation_learning)
cumulative_rewards.append(sum([pow(self.gamma, t * self.robot.time_step * self.robot.v_pref)
* reward for t, reward in enumerate(rewards)])) # enumerate from 0
success_rate = success / k
collision_rate = collision / k
assert success + collision + timeout == k
avg_nav_time = sum(success_times) / len(success_times) if success_times else self.env.time_limit
extra_info = '' if episode is None else 'in episode {} '.format(episode)
logging.info('{:<5} {}has success rate: {:.2f}, collision rate: {:.2f}, nav time: {:.2f}, total reward: {:.4f}'.
format(phase.upper(), extra_info, success_rate, collision_rate, avg_nav_time,
average(cumulative_rewards)))
if phase in ['val', 'test']:
total_time = sum(success_times + collision_times + timeout_times) * self.robot.time_step
logging.info('Frequency of being in danger: %.2f and average min separate distance in danger: %.2f',
too_close / total_time, average(min_dist))
if print_failure:
logging.info('Collision cases: ' + ' '.join([str(x) for x in collision_cases]))
logging.info('Timeout cases: ' + ' '.join([str(x) for x in timeout_cases]))
def update_memory(self, states, actions, rewards, imitation_learning=False):
if self.memory is None or self.gamma is None:
raise ValueError('Memory or gamma value is not set!')
for i, state in enumerate(states):
reward = rewards[i]
# VALUE UPDATE
if imitation_learning:
# define the value of states in IL as cumulative discounted rewards, which is the same in RL
state = self.target_policy.transform(state)
# value = pow(self.gamma, (len(states) - 1 - i) * self.robot.time_step * self.robot.v_pref)
value = sum([pow(self.gamma, max(t - i, 0) * self.robot.time_step * self.robot.v_pref) * reward
for t, reward in enumerate(rewards)])
else:
if i == len(states) - 1:
# terminal state
value = reward
else:
next_state = states[i + 1]
gamma_bar = pow(self.gamma, self.robot.time_step * self.robot.v_pref)
value = reward + gamma_bar * self.target_model(next_state.unsqueeze(0)).data.item()
value = torch.Tensor([value]).to(self.device)
# # transform state of different human_num into fixed-size tensor
# if len(state.size()) == 1:
# human_num = 1
# feature_size = state.size()[0]
# else:
# human_num, feature_size = state.size()
# if human_num != 5:
# padding = torch.zeros((5 - human_num, feature_size))
# state = torch.cat([state, padding])
self.memory.push((state, value))
def average(input_list):
if input_list:
return sum(input_list) / len(input_list)
else:
return 0
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/build/lib.linux-x86_64-2.7/crowd_nav/configs/__init__.py | sarl_star_ros/CrowdNav/build/lib.linux-x86_64-2.7/crowd_nav/configs/__init__.py | python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false | |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/build/lib.linux-x86_64-2.7/crowd_nav/policy/lstm_rl.py | sarl_star_ros/CrowdNav/build/lib.linux-x86_64-2.7/crowd_nav/policy/lstm_rl.py | import torch
import torch.nn as nn
import numpy as np
import logging
from crowd_nav.policy.cadrl import mlp
from crowd_nav.policy.multi_human_rl import MultiHumanRL
class ValueNetwork1(nn.Module):
def __init__(self, input_dim, self_state_dim, mlp_dims, lstm_hidden_dim):
super().__init__()
self.self_state_dim = self_state_dim
self.lstm_hidden_dim = lstm_hidden_dim
self.mlp = mlp(self_state_dim + lstm_hidden_dim, mlp_dims)
self.lstm = nn.LSTM(input_dim, lstm_hidden_dim, batch_first=True)
def forward(self, state):
"""
First transform the world coordinates to self-centric coordinates and then do forward computation
:param state: tensor of shape (batch_size, # of humans, length of a joint state)
:return:
"""
size = state.shape
self_state = state[:, 0, :self.self_state_dim]
# human_state = state[:, :, self.self_state_dim:]
h0 = torch.zeros(1, size[0], self.lstm_hidden_dim)
c0 = torch.zeros(1, size[0], self.lstm_hidden_dim)
output, (hn, cn) = self.lstm(state, (h0, c0))
hn = hn.squeeze(0)
joint_state = torch.cat([self_state, hn], dim=1)
value = self.mlp(joint_state)
return value
class ValueNetwork2(nn.Module):
def __init__(self, input_dim, self_state_dim, mlp1_dims, mlp_dims, lstm_hidden_dim):
super().__init__()
self.self_state_dim = self_state_dim
self.lstm_hidden_dim = lstm_hidden_dim
self.mlp1 = mlp(input_dim, mlp1_dims)
self.mlp = mlp(self_state_dim + lstm_hidden_dim, mlp_dims)
self.lstm = nn.LSTM(mlp1_dims[-1], lstm_hidden_dim, batch_first=True)
def forward(self, state):
"""
First transform the world coordinates to self-centric coordinates and then do forward computation
:param state: tensor of shape (batch_size, # of humans, length of a joint state)
:return:
"""
size = state.shape
self_state = state[:, 0, :self.self_state_dim]
state = torch.reshape(state, (-1, size[2]))
mlp1_output = self.mlp1(state)
mlp1_output = torch.reshape(mlp1_output, (size[0], size[1], -1))
h0 = torch.zeros(1, size[0], self.lstm_hidden_dim)
c0 = torch.zeros(1, size[0], self.lstm_hidden_dim)
output, (hn, cn) = self.lstm(mlp1_output, (h0, c0))
hn = hn.squeeze(0)
joint_state = torch.cat([self_state, hn], dim=1)
value = self.mlp(joint_state)
return value
class LstmRL(MultiHumanRL):
def __init__(self):
super().__init__()
self.name = 'LSTM-RL'
self.with_interaction_module = None
self.interaction_module_dims = None
def configure(self, config):
self.set_common_parameters(config)
mlp_dims = [int(x) for x in config.get('lstm_rl', 'mlp2_dims').split(', ')]
global_state_dim = config.getint('lstm_rl', 'global_state_dim')
self.with_om = config.getboolean('lstm_rl', 'with_om')
with_interaction_module = config.getboolean('lstm_rl', 'with_interaction_module')
if with_interaction_module:
mlp1_dims = [int(x) for x in config.get('lstm_rl', 'mlp1_dims').split(', ')]
self.model = ValueNetwork2(self.input_dim(), self.self_state_dim, mlp1_dims, mlp_dims, global_state_dim)
else:
self.model = ValueNetwork1(self.input_dim(), self.self_state_dim, mlp_dims, global_state_dim)
self.multiagent_training = config.getboolean('lstm_rl', 'multiagent_training')
logging.info('Policy: {}LSTM-RL {} pairwise interaction module'.format(
'OM-' if self.with_om else '', 'w/' if with_interaction_module else 'w/o'))
def predict(self, state):
"""
Input state is the joint state of robot concatenated with the observable state of other agents
To predict the best action, agent samples actions and propagates one step to see how good the next state is
thus the reward function is needed
"""
def dist(human):
# sort human order by decreasing distance to the robot
return np.linalg.norm(np.array(human.position) - np.array(state.self_state.position))
state.human_states = sorted(state.human_states, key=dist, reverse=True)
return super().predict(state)
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/build/lib.linux-x86_64-2.7/crowd_nav/policy/sarl.py | sarl_star_ros/CrowdNav/build/lib.linux-x86_64-2.7/crowd_nav/policy/sarl.py | import torch
import torch.nn as nn
from torch.nn.functional import softmax
import logging
from crowd_nav.policy.cadrl import mlp
from crowd_nav.policy.multi_human_rl import MultiHumanRL
class ValueNetwork(nn.Module):
def __init__(self, input_dim, self_state_dim, mlp1_dims, mlp2_dims, mlp3_dims, attention_dims, with_global_state,
cell_size, cell_num):
super().__init__()
self.self_state_dim = self_state_dim
self.global_state_dim = mlp1_dims[-1]
self.mlp1 = mlp(input_dim, mlp1_dims, last_relu=True)
self.mlp2 = mlp(mlp1_dims[-1], mlp2_dims)
self.with_global_state = with_global_state
if with_global_state:
self.attention = mlp(mlp1_dims[-1] * 2, attention_dims)
else:
self.attention = mlp(mlp1_dims[-1], attention_dims)
self.cell_size = cell_size
self.cell_num = cell_num
mlp3_input_dim = mlp2_dims[-1] + self.self_state_dim
self.mlp3 = mlp(mlp3_input_dim, mlp3_dims)
self.attention_weights = None
def forward(self, state):
"""
First transform the world coordinates to self-centric coordinates and then do forward computation
:param state: tensor of shape (batch_size, # of humans, length of a rotated state)
:return:
"""
size = state.shape
self_state = state[:, 0, :self.self_state_dim]
mlp1_output = self.mlp1(state.view((-1, size[2])))
mlp2_output = self.mlp2(mlp1_output)
if self.with_global_state:
# compute attention scores
global_state = torch.mean(mlp1_output.view(size[0], size[1], -1), 1, keepdim=True)
global_state = global_state.expand((size[0], size[1], self.global_state_dim)).\
contiguous().view(-1, self.global_state_dim)
attention_input = torch.cat([mlp1_output, global_state], dim=1)
else:
attention_input = mlp1_output
scores = self.attention(attention_input).view(size[0], size[1], 1).squeeze(dim=2) # size=[100,5]
# masked softmax
# weights = softmax(scores, dim=1).unsqueeze(2)
scores_exp = torch.exp(scores) * (scores != 0).float()
weights = (scores_exp / torch.sum(scores_exp, dim=1, keepdim=True)).unsqueeze(2)
self.attention_weights = weights[0, :, 0].data.cpu().numpy()
# output feature is a linear combination of input features
features = mlp2_output.view(size[0], size[1], -1)
# for converting to onnx
# expanded_weights = torch.cat([torch.zeros(weights.size()).copy_(weights) for _ in range(50)], dim=2)
weighted_feature = torch.sum(torch.mul(weights, features), dim=1)
# concatenate agent's state with global weighted humans' state
joint_state = torch.cat([self_state, weighted_feature], dim=1)
value = self.mlp3(joint_state)
return value
class SARL(MultiHumanRL): # derived classes: Policy > CADRL > MultiHumanRL > SARL
def __init__(self):
super().__init__() # super: use the definitions in the parent class
self.name = 'SARL'
def configure(self, config):
self.set_common_parameters(config)
mlp1_dims = [int(x) for x in config.get('sarl', 'mlp1_dims').split(', ')]
mlp2_dims = [int(x) for x in config.get('sarl', 'mlp2_dims').split(', ')]
mlp3_dims = [int(x) for x in config.get('sarl', 'mlp3_dims').split(', ')]
attention_dims = [int(x) for x in config.get('sarl', 'attention_dims').split(', ')]
self.with_om = config.getboolean('sarl', 'with_om')
with_global_state = config.getboolean('sarl', 'with_global_state')
self.model = ValueNetwork(self.input_dim(), self.self_state_dim, mlp1_dims, mlp2_dims, mlp3_dims,
attention_dims, with_global_state, self.cell_size, self.cell_num)
self.multiagent_training = config.getboolean('sarl', 'multiagent_training')
if self.with_om:
self.name = 'OM-SARL'
logging.info('Policy: {} {} global state'.format(self.name, 'w/' if with_global_state else 'w/o'))
def get_attention_weights(self):
return self.model.attention_weights
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/build/lib.linux-x86_64-2.7/crowd_nav/policy/cadrl.py | sarl_star_ros/CrowdNav/build/lib.linux-x86_64-2.7/crowd_nav/policy/cadrl.py | import torch
import torch.nn as nn
import numpy as np
import itertools
import logging
from crowd_sim.envs.policy.policy import Policy
from crowd_sim.envs.utils.action import ActionRot, ActionXY
from crowd_sim.envs.utils.state import ObservableState, FullState
def mlp(input_dim, mlp_dims, last_relu=False):
layers = []
mlp_dims = [input_dim] + mlp_dims # chuan qi lai
for i in range(len(mlp_dims) - 1):
layers.append(nn.Linear(mlp_dims[i], mlp_dims[i + 1]))
if i != len(mlp_dims) - 2 or last_relu:
layers.append(nn.ReLU())
net = nn.Sequential(*layers)
return net
class ValueNetwork(nn.Module):
def __init__(self, input_dim, mlp_dims):
super().__init__()
self.value_network = mlp(input_dim, mlp_dims)
def forward(self, state):
value = self.value_network(state)
return value
class CADRL(Policy):
def __init__(self):
super().__init__()
self.name = 'CADRL'
self.trainable = True
self.multiagent_training = None
self.kinematics = None
self.epsilon = None
self.gamma = None
self.sampling = None
self.speed_samples = None
self.rotation_samples = None
self.query_env = None
self.action_space = None
self.speeds = None
self.rotations = None
self.action_values = None
self.with_om = None
self.cell_num = None
self.cell_size = None
self.om_channel_size = None
self.self_state_dim = 6
self.human_state_dim = 7
self.joint_state_dim = self.self_state_dim + self.human_state_dim
def configure(self, config):
self.set_common_parameters(config)
mlp_dims = [int(x) for x in config.get('cadrl', 'mlp_dims').split(', ')]
self.model = ValueNetwork(self.joint_state_dim, mlp_dims)
self.multiagent_training = config.getboolean('cadrl', 'multiagent_training')
logging.info('Policy: CADRL without occupancy map')
def set_common_parameters(self, config):
self.gamma = config.getfloat('rl', 'gamma')
self.kinematics = config.get('action_space', 'kinematics')
self.sampling = config.get('action_space', 'sampling')
self.speed_samples = config.getint('action_space', 'speed_samples')
self.rotation_samples = config.getint('action_space', 'rotation_samples')
self.query_env = config.getboolean('action_space', 'query_env')
self.cell_num = config.getint('om', 'cell_num')
self.cell_size = config.getfloat('om', 'cell_size')
self.om_channel_size = config.getint('om', 'om_channel_size')
def set_device(self, device):
self.device = device
self.model.to(device)
def set_epsilon(self, epsilon):
self.epsilon = epsilon
def build_action_space(self, v_pref):
"""
Action space consists of 25 uniformly sampled actions in permitted range and 25 randomly sampled actions.
"""
holonomic = True if self.kinematics == 'holonomic' else False
speeds = [(np.exp((i + 1) / self.speed_samples) - 1) / (np.e - 1) * v_pref for i in range(self.speed_samples)]
if holonomic:
rotations = np.linspace(0, 2 * np.pi, self.rotation_samples, endpoint=False)
else:
rotations = np.linspace(-np.pi / 4, np.pi / 4, self.rotation_samples)
action_space = [ActionXY(0, 0) if holonomic else ActionRot(0, 0)]
for rotation, speed in itertools.product(rotations, speeds):
if holonomic:
action_space.append(ActionXY(speed * np.cos(rotation), speed * np.sin(rotation)))
else:
action_space.append(ActionRot(speed, rotation))
self.speeds = speeds
self.rotations = rotations
self.action_space = action_space
def propagate(self, state, action):
if isinstance(state, ObservableState):
# propagate state of humans
next_px = state.px + action.vx * self.time_step
next_py = state.py + action.vy * self.time_step
next_state = ObservableState(next_px, next_py, action.vx, action.vy, state.radius)
elif isinstance(state, FullState):
# propagate state of current agent
# perform action without rotation
if self.kinematics == 'holonomic':
next_px = state.px + action.vx * self.time_step
next_py = state.py + action.vy * self.time_step
next_state = FullState(next_px, next_py, action.vx, action.vy, state.radius,
state.gx, state.gy, state.v_pref, state.theta)
else:
next_theta = state.theta + action.r
next_vx = action.v * np.cos(next_theta)
next_vy = action.v * np.sin(next_theta)
next_px = state.px + next_vx * self.time_step
next_py = state.py + next_vy * self.time_step
next_state = FullState(next_px, next_py, next_vx, next_vy, state.radius, state.gx, state.gy,
state.v_pref, next_theta)
else:
raise ValueError('Type error')
return next_state
def predict(self, state):
"""
Input state is the joint state of robot concatenated by the observable state of other agents
To predict the best action, agent samples actions and propagates one step to see how good the next state is
thus the reward function is needed
"""
if self.phase is None or self.device is None:
raise AttributeError('Phase, device attributes have to be set!')
if self.phase == 'train' and self.epsilon is None:
raise AttributeError('Epsilon attribute has to be set in training phase')
if self.reach_destination(state):
return ActionXY(0, 0) if self.kinematics == 'holonomic' else ActionRot(0, 0)
if self.action_space is None:
self.build_action_space(state.self_state.v_pref)
probability = np.random.random()
if self.phase == 'train' and probability < self.epsilon:
max_action = self.action_space[np.random.choice(len(self.action_space))]
else:
self.action_values = list()
max_min_value = float('-inf')
max_action = None
for action in self.action_space:
next_self_state = self.propagate(state.self_state, action)
ob, reward, done, info = self.env.onestep_lookahead(action)
batch_next_states = torch.cat([torch.Tensor([next_self_state + next_human_state]).to(self.device)
for next_human_state in ob], dim=0)
# VALUE UPDATE
outputs = self.model(self.rotate(batch_next_states))
min_output, min_index = torch.min(outputs, 0)
min_value = reward + pow(self.gamma, self.time_step * state.self_state.v_pref) * min_output.data.item()
self.action_values.append(min_value)
if min_value > max_min_value:
max_min_value = min_value
max_action = action
if self.phase == 'train':
self.last_state = self.transform(state)
return max_action
def transform(self, state):
"""
Take the state passed from agent and transform it to tensor for batch training
:param state:
:return: tensor of shape (len(state), )
"""
assert len(state.human_states) == 1
state = torch.Tensor(state.self_state + state.human_states[0]).to(self.device)
state = self.rotate(state.unsqueeze(0)).squeeze(dim=0)
return state
def rotate(self, state):
"""
Transform the coordinate to agent-centric. x axis: position -> goal
Input state tensor is of size (batch_size, state_length)
"""
# 'px', 'py', 'vx', 'vy', 'radius', 'gx', 'gy', 'v_pref', 'theta', 'px1', 'py1', 'vx1', 'vy1', 'radius1'
# 0 1 2 3 4 5 6 7 8 9 10 11 12 13
batch = state.shape[0]
dx = (state[:, 5] - state[:, 0]).reshape((batch, -1)) # -1 means calculated automatically
dy = (state[:, 6] - state[:, 1]).reshape((batch, -1))
rot = torch.atan2(state[:, 6] - state[:, 1], state[:, 5] - state[:, 0])
dg = torch.norm(torch.cat([dx, dy], dim=1), 2, dim=1, keepdim=True)
v_pref = state[:, 7].reshape((batch, -1))
vx = (state[:, 2] * torch.cos(rot) + state[:, 3] * torch.sin(rot)).reshape((batch, -1))
vy = (state[:, 3] * torch.cos(rot) - state[:, 2] * torch.sin(rot)).reshape((batch, -1))
radius = state[:, 4].reshape((batch, -1))
if self.kinematics == 'unicycle':
theta = (state[:, 8] - rot).reshape((batch, -1))
else:
# set theta to be zero since it's not used
theta = torch.zeros_like(v_pref)
vx1 = (state[:, 11] * torch.cos(rot) + state[:, 12] * torch.sin(rot)).reshape((batch, -1))
vy1 = (state[:, 12] * torch.cos(rot) - state[:, 11] * torch.sin(rot)).reshape((batch, -1))
px1 = (state[:, 9] - state[:, 0]) * torch.cos(rot) + (state[:, 10] - state[:, 1]) * torch.sin(rot)
px1 = px1.reshape((batch, -1))
py1 = (state[:, 10] - state[:, 1]) * torch.cos(rot) - (state[:, 9] - state[:, 0]) * torch.sin(rot)
py1 = py1.reshape((batch, -1))
radius1 = state[:, 13].reshape((batch, -1))
radius_sum = radius + radius1
da = torch.norm(torch.cat([(state[:, 0] - state[:, 9]).reshape((batch, -1)), (state[:, 1] - state[:, 10]).
reshape((batch, -1))], dim=1), 2, dim=1, keepdim=True)
new_state = torch.cat([dg, v_pref, theta, radius, vx, vy, px1, py1, vx1, vy1, radius1, da, radius_sum], dim=1)
return new_state
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/build/lib.linux-x86_64-2.7/crowd_nav/policy/__init__.py | sarl_star_ros/CrowdNav/build/lib.linux-x86_64-2.7/crowd_nav/policy/__init__.py | python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false | |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/build/lib.linux-x86_64-2.7/crowd_nav/policy/multi_human_rl.py | sarl_star_ros/CrowdNav/build/lib.linux-x86_64-2.7/crowd_nav/policy/multi_human_rl.py | import torch
import numpy as np
from crowd_sim.envs.utils.action import ActionRot, ActionXY
from crowd_nav.policy.cadrl import CADRL
class MultiHumanRL(CADRL):
def __init__(self):
super().__init__()
def predict(self, state):
"""
A base class for all methods that takes pairwise joint state as input to value network.
The input to the value network is always of shape (batch_size, # humans, rotated joint state length)
"""
if self.phase is None or self.device is None:
raise AttributeError('Phase, device attributes have to be set!')
if self.phase == 'train' and self.epsilon is None:
raise AttributeError('Epsilon attribute has to be set in training phase')
if self.reach_destination(state):
return ActionXY(0, 0) if self.kinematics == 'holonomic' else ActionRot(0, 0)
if self.action_space is None:
self.build_action_space(state.self_state.v_pref)
occupancy_maps = None
probability = np.random.random()
if self.phase == 'train' and probability < self.epsilon:
max_action = self.action_space[np.random.choice(len(self.action_space))]
else:
self.action_values = list()
max_value = float('-inf')
max_action = None
for action in self.action_space:
next_self_state = self.propagate(state.self_state, action)
if self.query_env:
next_human_states, reward, done, info = self.env.onestep_lookahead(action)
else:
next_human_states = [self.propagate(human_state, ActionXY(human_state.vx, human_state.vy))
for human_state in state.human_states]
reward = self.compute_reward(next_self_state, next_human_states)
batch_next_states = torch.cat([torch.Tensor([next_self_state + next_human_state]).to(self.device)
for next_human_state in next_human_states], dim=0)
rotated_batch_input = self.rotate(batch_next_states).unsqueeze(0)
if self.with_om:
if occupancy_maps is None:
occupancy_maps = self.build_occupancy_maps(next_human_states).unsqueeze(0)
rotated_batch_input = torch.cat([rotated_batch_input, occupancy_maps], dim=2)
# VALUE UPDATE
next_state_value = self.model(rotated_batch_input).data.item()
value = reward + pow(self.gamma, self.time_step * state.self_state.v_pref) * next_state_value
self.action_values.append(value)
if value > max_value:
max_value = value
max_action = action
if max_action is None:
raise ValueError('Value network is not well trained. ')
if self.phase == 'train':
self.last_state = self.transform(state)
return max_action
def compute_reward(self, nav, humans):
# collision detection
dmin = float('inf')
collision = False
for i, human in enumerate(humans):
dist = np.linalg.norm((nav.px - human.px, nav.py - human.py)) - nav.radius - human.radius
if dist < 0:
collision = True
break
if dist < dmin:
dmin = dist
# check if reaching the goal
reaching_goal = np.linalg.norm((nav.px - nav.gx, nav.py - nav.gy)) < nav.radius
if collision:
reward = -0.25
elif reaching_goal:
reward = 1
elif dmin < 0.2:
reward = (dmin - 0.2) * 0.5 * self.time_step
else:
reward = 0
return reward
def transform(self, state):
"""
Take the state passed from agent and transform it to the input of value network
:param state:
:return: tensor of shape (# of humans, len(state))
"""
state_tensor = torch.cat([torch.Tensor([state.self_state + human_state]).to(self.device)
for human_state in state.human_states], dim=0)
if self.with_om:
occupancy_maps = self.build_occupancy_maps(state.human_states)
state_tensor = torch.cat([self.rotate(state_tensor), occupancy_maps], dim=1)
else:
state_tensor = self.rotate(state_tensor)
return state_tensor
def input_dim(self):
return self.joint_state_dim + (self.cell_num ** 2 * self.om_channel_size if self.with_om else 0)
# a**b means a^b
# if not with_om, input_dim = joint_state_dim
def build_occupancy_maps(self, human_states):
"""
:param human_states:
:return: tensor of shape (# human - 1, self.cell_num ** 2)
"""
occupancy_maps = []
for human in human_states:
other_humans = np.concatenate([np.array([(other_human.px, other_human.py, other_human.vx, other_human.vy)])
for other_human in human_states if other_human != human], axis=0)
other_px = other_humans[:, 0] - human.px
other_py = other_humans[:, 1] - human.py
# new x-axis is in the direction of human's velocity
human_velocity_angle = np.arctan2(human.vy, human.vx)
other_human_orientation = np.arctan2(other_py, other_px)
rotation = other_human_orientation - human_velocity_angle
distance = np.linalg.norm([other_px, other_py], axis=0)
other_px = np.cos(rotation) * distance
other_py = np.sin(rotation) * distance
# compute indices of humans in the grid
other_x_index = np.floor(other_px / self.cell_size + self.cell_num / 2)
other_y_index = np.floor(other_py / self.cell_size + self.cell_num / 2)
other_x_index[other_x_index < 0] = float('-inf')
other_x_index[other_x_index >= self.cell_num] = float('-inf')
other_y_index[other_y_index < 0] = float('-inf')
other_y_index[other_y_index >= self.cell_num] = float('-inf')
grid_indices = self.cell_num * other_y_index + other_x_index
occupancy_map = np.isin(range(self.cell_num ** 2), grid_indices)
if self.om_channel_size == 1:
occupancy_maps.append([occupancy_map.astype(int)])
else:
# calculate relative velocity for other agents
other_human_velocity_angles = np.arctan2(other_humans[:, 3], other_humans[:, 2])
rotation = other_human_velocity_angles - human_velocity_angle
speed = np.linalg.norm(other_humans[:, 2:4], axis=1)
other_vx = np.cos(rotation) * speed
other_vy = np.sin(rotation) * speed
dm = [list() for _ in range(self.cell_num ** 2 * self.om_channel_size)]
for i, index in np.ndenumerate(grid_indices):
if index in range(self.cell_num ** 2):
if self.om_channel_size == 2:
dm[2 * int(index)].append(other_vx[i])
dm[2 * int(index) + 1].append(other_vy[i])
elif self.om_channel_size == 3:
dm[2 * int(index)].append(1)
dm[2 * int(index) + 1].append(other_vx[i])
dm[2 * int(index) + 2].append(other_vy[i])
else:
raise NotImplementedError
for i, cell in enumerate(dm):
dm[i] = sum(dm[i]) / len(dm[i]) if len(dm[i]) != 0 else 0
occupancy_maps.append([dm])
return torch.from_numpy(np.concatenate(occupancy_maps, axis=0)).float()
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/build/lib.linux-x86_64-2.7/crowd_nav/policy/policy_factory.py | sarl_star_ros/CrowdNav/build/lib.linux-x86_64-2.7/crowd_nav/policy/policy_factory.py | from crowd_sim.envs.policy.policy_factory import policy_factory
from crowd_nav.policy.cadrl import CADRL
from crowd_nav.policy.lstm_rl import LstmRL
from crowd_nav.policy.sarl import SARL
policy_factory['cadrl'] = CADRL
policy_factory['lstm_rl'] = LstmRL
policy_factory['sarl'] = SARL
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/build/lib/crowd_sim/__init__.py | sarl_star_ros/CrowdNav/build/lib/crowd_sim/__init__.py | from gym.envs.registration import register
register(
id='CrowdSim-v0',
entry_point='crowd_sim.envs:CrowdSim',
)
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/build/lib/crowd_sim/envs/__init__.py | sarl_star_ros/CrowdNav/build/lib/crowd_sim/envs/__init__.py | from .crowd_sim import CrowdSim
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/build/lib/crowd_sim/envs/crowd_sim.py | sarl_star_ros/CrowdNav/build/lib/crowd_sim/envs/crowd_sim.py | import logging
import gym
import matplotlib.lines as mlines
import numpy as np
import rvo2
import torch
from matplotlib import patches
from numpy.linalg import norm
from crowd_sim.envs.utils.human import Human
from crowd_sim.envs.utils.info import *
from crowd_sim.envs.utils.utils import point_to_segment_dist
class CrowdSim(gym.Env):
metadata = {'render.modes': ['human']}
def __init__(self):
"""
Movement simulation for n+1 agents
Agent can either be human or robot.
humans are controlled by a unknown and fixed policy.
robot is controlled by a known and learnable policy.
"""
self.time_limit = None
self.time_step = None
self.robot = None
self.humans = None
self.global_time = None
self.human_times = None
# reward function
self.success_reward = None
self.collision_penalty = None
self.discomfort_dist = None
self.discomfort_penalty_factor = None
# simulation configuration
self.config = None
self.case_capacity = None
self.case_size = None
self.case_counter = None
self.randomize_attributes = None
self.train_val_sim = None
self.test_sim = None
self.square_width = None
self.circle_radius = None
self.human_num = None
# for visualization
self.states = None
self.action_values = None
self.attention_weights = None
def configure(self, config):
self.config = config
self.time_limit = config.getint('env', 'time_limit')
self.time_step = config.getfloat('env', 'time_step')
self.randomize_attributes = config.getboolean('env', 'randomize_attributes')
self.success_reward = config.getfloat('reward', 'success_reward')
self.collision_penalty = config.getfloat('reward', 'collision_penalty')
self.discomfort_dist = config.getfloat('reward', 'discomfort_dist')
self.discomfort_penalty_factor = config.getfloat('reward', 'discomfort_penalty_factor')
if self.config.get('humans', 'policy') == 'orca':
self.case_capacity = {'train': np.iinfo(np.uint32).max - 2000, 'val': 1000, 'test': 1000}
self.case_size = {'train': np.iinfo(np.uint32).max - 2000, 'val': config.getint('env', 'val_size'),
'test': config.getint('env', 'test_size')}
self.train_val_sim = config.get('sim', 'train_val_sim')
self.test_sim = config.get('sim', 'test_sim')
self.square_width = config.getfloat('sim', 'square_width')
self.circle_radius = config.getfloat('sim', 'circle_radius')
self.human_num = config.getint('sim', 'human_num')
else:
raise NotImplementedError
self.case_counter = {'train': 0, 'test': 0, 'val': 0}
logging.info('human number: {}'.format(self.human_num))
if self.randomize_attributes:
logging.info("Randomize human's radius and preferred speed")
else:
logging.info("Not randomize human's radius and preferred speed")
logging.info('Training simulation: {}, test simulation: {}'.format(self.train_val_sim, self.test_sim))
logging.info('Square width: {}, circle width: {}'.format(self.square_width, self.circle_radius))
def set_robot(self, robot):
self.robot = robot
def generate_random_human_position(self, human_num, rule):
"""
Generate human position according to certain rule
Rule square_crossing: generate start/goal position at two sides of y-axis
Rule circle_crossing: generate start position on a circle, goal position is at the opposite side
:param human_num:
:param rule:
:return:
"""
# initial min separation distance to avoid danger penalty at beginning
if rule == 'square_crossing':
self.humans = []
for i in range(human_num):
self.humans.append(self.generate_square_crossing_human())
elif rule == 'circle_crossing':
self.humans = []
for i in range(human_num):
self.humans.append(self.generate_circle_crossing_human())
elif rule == 'mixed':
# mix different raining simulation with certain distribution
static_human_num = {0: 0.05, 1: 0.2, 2: 0.2, 3: 0.3, 4: 0.1, 5: 0.15}
dynamic_human_num = {1: 0.3, 2: 0.3, 3: 0.2, 4: 0.1, 5: 0.1}
static = True if np.random.random() < 0.2 else False
prob = np.random.random()
for key, value in sorted(static_human_num.items() if static else dynamic_human_num.items()):
if prob - value <= 0:
human_num = key
break
else:
prob -= value
self.human_num = human_num
self.humans = []
if static:
# randomly initialize static objects in a square of (width, height)
width = 4
height = 8
if human_num == 0:
human = Human(self.config, 'humans')
human.set(0, -10, 0, -10, 0, 0, 0)
self.humans.append(human)
for i in range(human_num):
human = Human(self.config, 'humans')
if np.random.random() > 0.5:
sign = -1
else:
sign = 1
while True:
px = np.random.random() * width * 0.5 * sign
py = (np.random.random() - 0.5) * height
collide = False
for agent in [self.robot] + self.humans:
if norm((px - agent.px, py - agent.py)) < human.radius + agent.radius + self.discomfort_dist:
collide = True
break
if not collide:
break
human.set(px, py, px, py, 0, 0, 0)
self.humans.append(human)
else:
# the first 2 two humans will be in the circle crossing scenarios
# the rest humans will have a random starting and end position
for i in range(human_num):
if i < 2:
human = self.generate_circle_crossing_human()
else:
human = self.generate_square_crossing_human()
self.humans.append(human)
else:
raise ValueError("Rule doesn't exist")
def generate_circle_crossing_human(self):
human = Human(self.config, 'humans')
if self.randomize_attributes:
human.sample_random_attributes()
while True:
angle = np.random.random() * np.pi * 2
# add some noise to simulate all the possible cases robot could meet with human
px_noise = (np.random.random() - 0.5) * human.v_pref
py_noise = (np.random.random() - 0.5) * human.v_pref
px = self.circle_radius * np.cos(angle) + px_noise
py = self.circle_radius * np.sin(angle) + py_noise
collide = False
for agent in [self.robot] + self.humans:
min_dist = human.radius + agent.radius + self.discomfort_dist
if norm((px - agent.px, py - agent.py)) < min_dist or \
norm((px - agent.gx, py - agent.gy)) < min_dist:
collide = True
break
if not collide:
break
human.set(px, py, -px, -py, 0, 0, 0)
return human
def generate_square_crossing_human(self):
human = Human(self.config, 'humans')
if self.randomize_attributes:
human.sample_random_attributes()
if np.random.random() > 0.5:
sign = -1
else:
sign = 1
while True:
px = np.random.random() * self.square_width * 0.5 * sign
py = (np.random.random() - 0.5) * self.square_width
collide = False
for agent in [self.robot] + self.humans:
if norm((px - agent.px, py - agent.py)) < human.radius + agent.radius + self.discomfort_dist:
collide = True
break
if not collide:
break
while True:
gx = np.random.random() * self.square_width * 0.5 * -sign
gy = (np.random.random() - 0.5) * self.square_width
collide = False
for agent in [self.robot] + self.humans:
if norm((gx - agent.gx, gy - agent.gy)) < human.radius + agent.radius + self.discomfort_dist:
collide = True
break
if not collide:
break
human.set(px, py, gx, gy, 0, 0, 0)
return human
def get_human_times(self):
"""
Run the whole simulation to the end and compute the average time for human to reach goal.
Once an agent reaches the goal, it stops moving and becomes an obstacle
(doesn't need to take half responsibility to avoid collision).
:return:
"""
# centralized orca simulator for all humans
if not self.robot.reached_destination():
raise ValueError('Episode is not done yet')
params = (10, 10, 5, 5)
sim = rvo2.PyRVOSimulator(self.time_step, *params, 0.3, 1)
sim.addAgent(self.robot.get_position(), *params, self.robot.radius, self.robot.v_pref,
self.robot.get_velocity())
for human in self.humans:
sim.addAgent(human.get_position(), *params, human.radius, human.v_pref, human.get_velocity())
max_time = 1000
while not all(self.human_times):
for i, agent in enumerate([self.robot] + self.humans):
vel_pref = np.array(agent.get_goal_position()) - np.array(agent.get_position())
if norm(vel_pref) > 1:
vel_pref /= norm(vel_pref)
sim.setAgentPrefVelocity(i, tuple(vel_pref))
sim.doStep()
self.global_time += self.time_step
if self.global_time > max_time:
logging.warning('Simulation cannot terminate!')
for i, human in enumerate(self.humans):
if self.human_times[i] == 0 and human.reached_destination():
self.human_times[i] = self.global_time
# for visualization
self.robot.set_position(sim.getAgentPosition(0))
for i, human in enumerate(self.humans):
human.set_position(sim.getAgentPosition(i + 1))
self.states.append([self.robot.get_full_state(), [human.get_full_state() for human in self.humans]])
del sim
return self.human_times
def reset(self, phase='test', test_case=None):
"""
Set px, py, gx, gy, vx, vy, theta for robot and humans
:return:
"""
if self.robot is None:
raise AttributeError('robot has to be set!')
assert phase in ['train', 'val', 'test']
if test_case is not None:
self.case_counter[phase] = test_case
self.global_time = 0
if phase == 'test':
self.human_times = [0] * self.human_num
else:
self.human_times = [0] * (self.human_num if self.robot.policy.multiagent_training else 1)
if not self.robot.policy.multiagent_training:
self.train_val_sim = 'circle_crossing'
if self.config.get('humans', 'policy') == 'trajnet':
raise NotImplementedError
else:
counter_offset = {'train': self.case_capacity['val'] + self.case_capacity['test'],
'val': 0, 'test': self.case_capacity['val']}
self.robot.set(0, -self.circle_radius, 0, self.circle_radius, 0, 0, np.pi / 2)
if self.case_counter[phase] >= 0:
np.random.seed(counter_offset[phase] + self.case_counter[phase])
if phase in ['train', 'val']:
human_num = self.human_num if self.robot.policy.multiagent_training else 1
self.generate_random_human_position(human_num=human_num, rule=self.train_val_sim)
else:
self.generate_random_human_position(human_num=self.human_num, rule=self.test_sim)
# case_counter is always between 0 and case_size[phase]
self.case_counter[phase] = (self.case_counter[phase] + 1) % self.case_size[phase]
else:
assert phase == 'test'
if self.case_counter[phase] == -1:
# for debugging purposes
self.human_num = 3
self.humans = [Human(self.config, 'humans') for _ in range(self.human_num)]
self.humans[0].set(0, -6, 0, 5, 0, 0, np.pi / 2)
self.humans[1].set(-5, -5, -5, 5, 0, 0, np.pi / 2)
self.humans[2].set(5, -5, 5, 5, 0, 0, np.pi / 2)
else:
raise NotImplementedError
for agent in [self.robot] + self.humans:
agent.time_step = self.time_step
agent.policy.time_step = self.time_step
self.states = list()
if hasattr(self.robot.policy, 'action_values'):
self.action_values = list()
if hasattr(self.robot.policy, 'get_attention_weights'):
self.attention_weights = list()
# get current observation
if self.robot.sensor == 'coordinates':
ob = [human.get_observable_state() for human in self.humans]
elif self.robot.sensor == 'RGB':
raise NotImplementedError
return ob
def onestep_lookahead(self, action):
return self.step(action, update=False)
def step(self, action, update=True):
"""
Compute actions for all agents, detect collision, update environment and return (ob, reward, done, info)
"""
human_actions = []
for human in self.humans:
# observation for humans is always coordinates
ob = [other_human.get_observable_state() for other_human in self.humans if other_human != human]
if self.robot.visible:
ob += [self.robot.get_observable_state()]
human_actions.append(human.act(ob))
# collision detection
dmin = float('inf')
collision = False
for i, human in enumerate(self.humans):
px = human.px - self.robot.px
py = human.py - self.robot.py
if self.robot.kinematics == 'holonomic':
vx = human.vx - action.vx
vy = human.vy - action.vy
else:
vx = human.vx - action.v * np.cos(action.r + self.robot.theta)
vy = human.vy - action.v * np.sin(action.r + self.robot.theta)
ex = px + vx * self.time_step
ey = py + vy * self.time_step
# closest distance between boundaries of two agents
closest_dist = point_to_segment_dist(px, py, ex, ey, 0, 0) - human.radius - self.robot.radius
if closest_dist < 0:
collision = True
# logging.debug("Collision: distance between robot and p{} is {:.2E}".format(i, closest_dist))
break
elif closest_dist < dmin:
dmin = closest_dist
# collision detection between humans
human_num = len(self.humans)
for i in range(human_num):
for j in range(i + 1, human_num):
dx = self.humans[i].px - self.humans[j].px
dy = self.humans[i].py - self.humans[j].py
dist = (dx ** 2 + dy ** 2) ** (1 / 2) - self.humans[i].radius - self.humans[j].radius
if dist < 0:
# detect collision but don't take humans' collision into account
logging.debug('Collision happens between humans in step()')
# check if reaching the goal
end_position = np.array(self.robot.compute_position(action, self.time_step))
reaching_goal = norm(end_position - np.array(self.robot.get_goal_position())) < self.robot.radius
if self.global_time >= self.time_limit - 1:
reward = 0
done = True
info = Timeout()
elif collision:
reward = self.collision_penalty
done = True
info = Collision()
elif reaching_goal:
reward = self.success_reward
done = True
info = ReachGoal()
elif dmin < self.discomfort_dist:
# only penalize agent for getting too close if it's visible
# adjust the reward based on FPS
reward = (dmin - self.discomfort_dist) * self.discomfort_penalty_factor * self.time_step
done = False
info = Danger(dmin)
else:
reward = 0
done = False
info = Nothing()
if update:
# store state, action value and attention weights
self.states.append([self.robot.get_full_state(), [human.get_full_state() for human in self.humans]])
if hasattr(self.robot.policy, 'action_values'):
self.action_values.append(self.robot.policy.action_values)
if hasattr(self.robot.policy, 'get_attention_weights'):
self.attention_weights.append(self.robot.policy.get_attention_weights())
# update all agents
self.robot.step(action)
for i, human_action in enumerate(human_actions):
self.humans[i].step(human_action)
self.global_time += self.time_step
for i, human in enumerate(self.humans):
# only record the first time the human reaches the goal
if self.human_times[i] == 0 and human.reached_destination():
self.human_times[i] = self.global_time
# compute the observation
if self.robot.sensor == 'coordinates':
ob = [human.get_observable_state() for human in self.humans]
elif self.robot.sensor == 'RGB':
raise NotImplementedError
else:
if self.robot.sensor == 'coordinates':
ob = [human.get_next_observable_state(action) for human, action in zip(self.humans, human_actions)]
elif self.robot.sensor == 'RGB':
raise NotImplementedError
return ob, reward, done, info
def render(self, mode='human', output_file=None):
from matplotlib import animation
import matplotlib.pyplot as plt
plt.rcParams['animation.ffmpeg_path'] = '/usr/bin/ffmpeg'
x_offset = 0.11
y_offset = 0.11
cmap = plt.cm.get_cmap('hsv', 10)
robot_color = 'yellow'
goal_color = 'red'
arrow_color = 'red'
arrow_style = patches.ArrowStyle("->", head_length=4, head_width=2)
if mode == 'human':
fig, ax = plt.subplots(figsize=(7, 7))
ax.set_xlim(-4, 4)
ax.set_ylim(-4, 4)
for human in self.humans:
human_circle = plt.Circle(human.get_position(), human.radius, fill=False, color='b')
ax.add_artist(human_circle)
ax.add_artist(plt.Circle(self.robot.get_position(), self.robot.radius, fill=True, color='r'))
plt.show()
elif mode == 'traj':
fig, ax = plt.subplots(figsize=(7, 7))
ax.tick_params(labelsize=16)
ax.set_xlim(-5, 5)
ax.set_ylim(-5, 5)
ax.set_xlabel('x(m)', fontsize=16)
ax.set_ylabel('y(m)', fontsize=16)
robot_positions = [self.states[i][0].position for i in range(len(self.states))]
human_positions = [[self.states[i][1][j].position for j in range(len(self.humans))]
for i in range(len(self.states))]
for k in range(len(self.states)):
if k % 4 == 0 or k == len(self.states) - 1:
robot = plt.Circle(robot_positions[k], self.robot.radius, fill=True, color=robot_color)
humans = [plt.Circle(human_positions[k][i], self.humans[i].radius, fill=False, color=cmap(i))
for i in range(len(self.humans))]
ax.add_artist(robot)
for human in humans:
ax.add_artist(human)
# add time annotation
global_time = k * self.time_step
if global_time % 4 == 0 or k == len(self.states) - 1:
agents = humans + [robot]
times = [plt.text(agents[i].center[0] - x_offset, agents[i].center[1] - y_offset,
'{:.1f}'.format(global_time),
color='black', fontsize=14) for i in range(self.human_num + 1)]
for time in times:
ax.add_artist(time)
if k != 0:
nav_direction = plt.Line2D((self.states[k - 1][0].px, self.states[k][0].px),
(self.states[k - 1][0].py, self.states[k][0].py),
color=robot_color, ls='solid')
human_directions = [plt.Line2D((self.states[k - 1][1][i].px, self.states[k][1][i].px),
(self.states[k - 1][1][i].py, self.states[k][1][i].py),
color=cmap(i), ls='solid')
for i in range(self.human_num)]
ax.add_artist(nav_direction)
for human_direction in human_directions:
ax.add_artist(human_direction)
plt.legend([robot], ['Robot'], fontsize=16)
plt.show()
elif mode == 'video':
fig, ax = plt.subplots(figsize=(7, 7))
ax.tick_params(labelsize=16)
ax.set_xlim(-6, 6)
ax.set_ylim(-6, 6)
ax.set_xlabel('x(m)', fontsize=16)
ax.set_ylabel('y(m)', fontsize=16)
# add robot and its goal
robot_positions = [state[0].position for state in self.states]
goal = mlines.Line2D([0], [4], color=goal_color, marker='*', linestyle='None', markersize=15, label='Goal')
robot = plt.Circle(robot_positions[0], self.robot.radius, fill=True, color=robot_color)
ax.add_artist(robot)
ax.add_artist(goal)
plt.legend([robot, goal], ['Robot', 'Goal'], fontsize=16)
# add humans and their numbers
human_positions = [[state[1][j].position for j in range(len(self.humans))] for state in self.states]
humans = [plt.Circle(human_positions[0][i], self.humans[i].radius, fill=False)
for i in range(len(self.humans))]
human_numbers = [plt.text(humans[i].center[0] - x_offset, humans[i].center[1] - y_offset, str(i),
color='black', fontsize=12) for i in range(len(self.humans))]
for i, human in enumerate(humans):
ax.add_artist(human)
ax.add_artist(human_numbers[i])
# add time annotation
time = plt.text(-1, 5, 'Time: {}'.format(0), fontsize=16)
ax.add_artist(time)
# compute attention scores
if self.attention_weights is not None:
attention_scores = [
plt.text(-5.5, 5 - 0.5 * i, 'Human {}: {:.2f}'.format(i + 1, self.attention_weights[0][i]),
fontsize=16) for i in range(len(self.humans))]
# compute orientation in each step and use arrow to show the direction
radius = self.robot.radius
if self.robot.kinematics == 'unicycle':
orientation = [((state[0].px, state[0].py), (state[0].px + radius * np.cos(state[0].theta),
state[0].py + radius * np.sin(state[0].theta))) for state
in self.states]
orientations = [orientation]
else:
orientations = []
for i in range(self.human_num + 1):
orientation = []
for state in self.states:
if i == 0:
agent_state = state[0]
else:
agent_state = state[1][i - 1]
theta = np.arctan2(agent_state.vy, agent_state.vx)
orientation.append(((agent_state.px, agent_state.py), (agent_state.px + radius * np.cos(theta),
agent_state.py + radius * np.sin(theta))))
orientations.append(orientation)
arrows = [patches.FancyArrowPatch(*orientation[0], color=arrow_color, arrowstyle=arrow_style)
for orientation in orientations]
for arrow in arrows:
ax.add_artist(arrow)
global_step = 0
def update(frame_num):
nonlocal global_step
nonlocal arrows
global_step = frame_num
robot.center = robot_positions[frame_num]
for i, human in enumerate(humans):
human.center = human_positions[frame_num][i]
human_numbers[i].set_position((human.center[0] - x_offset, human.center[1] - y_offset))
for arrow in arrows:
arrow.remove()
arrows = [patches.FancyArrowPatch(*orientation[frame_num], color=arrow_color,
arrowstyle=arrow_style) for orientation in orientations]
for arrow in arrows:
ax.add_artist(arrow)
if self.attention_weights is not None:
human.set_color(str(self.attention_weights[frame_num][i]))
attention_scores[i].set_text('human {}: {:.2f}'.format(i, self.attention_weights[frame_num][i]))
time.set_text('Time: {:.2f}'.format(frame_num * self.time_step))
def plot_value_heatmap():
assert self.robot.kinematics == 'holonomic'
for agent in [self.states[global_step][0]] + self.states[global_step][1]:
print(('{:.4f}, ' * 6 + '{:.4f}').format(agent.px, agent.py, agent.gx, agent.gy,
agent.vx, agent.vy, agent.theta))
# when any key is pressed draw the action value plot
fig, axis = plt.subplots()
speeds = [0] + self.robot.policy.speeds
rotations = self.robot.policy.rotations + [np.pi * 2]
r, th = np.meshgrid(speeds, rotations)
z = np.array(self.action_values[global_step % len(self.states)][1:])
z = (z - np.min(z)) / (np.max(z) - np.min(z))
z = np.reshape(z, (16, 5))
polar = plt.subplot(projection="polar")
polar.tick_params(labelsize=16)
mesh = plt.pcolormesh(th, r, z, vmin=0, vmax=1)
plt.plot(rotations, r, color='k', ls='none')
plt.grid()
cbaxes = fig.add_axes([0.85, 0.1, 0.03, 0.8])
cbar = plt.colorbar(mesh, cax=cbaxes)
cbar.ax.tick_params(labelsize=16)
plt.show()
def on_click(event):
anim.running ^= True
if anim.running:
anim.event_source.stop()
if hasattr(self.robot.policy, 'action_values'):
plot_value_heatmap()
else:
anim.event_source.start()
fig.canvas.mpl_connect('key_press_event', on_click)
anim = animation.FuncAnimation(fig, update, frames=len(self.states), interval=self.time_step * 1000)
anim.running = True
if output_file is not None:
ffmpeg_writer = animation.writers['ffmpeg']
writer = ffmpeg_writer(fps=8, metadata=dict(artist='Me'), bitrate=1800)
anim.save(output_file, writer=writer)
else:
plt.show()
else:
raise NotImplementedError
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/build/lib/crowd_sim/envs/utils/state.py | sarl_star_ros/CrowdNav/build/lib/crowd_sim/envs/utils/state.py | class FullState(object):
def __init__(self, px, py, vx, vy, radius, gx, gy, v_pref, theta):
self.px = px
self.py = py
self.vx = vx
self.vy = vy
self.radius = radius
self.gx = gx
self.gy = gy
self.v_pref = v_pref
self.theta = theta
self.position = (self.px, self.py)
self.goal_position = (self.gx, self.gy)
self.velocity = (self.vx, self.vy)
def __add__(self, other):
return other + (self.px, self.py, self.vx, self.vy, self.radius, self.gx, self.gy, self.v_pref, self.theta)
def __str__(self):
return ' '.join([str(x) for x in [self.px, self.py, self.vx, self.vy, self.radius, self.gx, self.gy,
self.v_pref, self.theta]])
class ObservableState(object):
def __init__(self, px, py, vx, vy, radius):
self.px = px
self.py = py
self.vx = vx
self.vy = vy
self.radius = radius
self.position = (self.px, self.py)
self.velocity = (self.vx, self.vy)
def __add__(self, other):
return other + (self.px, self.py, self.vx, self.vy, self.radius)
def __str__(self):
return ' '.join([str(x) for x in [self.px, self.py, self.vx, self.vy, self.radius]])
class JointState(object):
def __init__(self, self_state, human_states):
assert isinstance(self_state, FullState)
for human_state in human_states:
assert isinstance(human_state, ObservableState)
self.self_state = self_state
self.human_states = human_states
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/build/lib/crowd_sim/envs/utils/robot.py | sarl_star_ros/CrowdNav/build/lib/crowd_sim/envs/utils/robot.py | from crowd_sim.envs.utils.agent import Agent
from crowd_sim.envs.utils.state import JointState
class Robot(Agent):
def __init__(self, config, section):
super().__init__(config, section)
def act(self, ob):
if self.policy is None:
raise AttributeError('Policy attribute has to be set!')
state = JointState(self.get_full_state(), ob)
action = self.policy.predict(state)
return action
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/build/lib/crowd_sim/envs/utils/human.py | sarl_star_ros/CrowdNav/build/lib/crowd_sim/envs/utils/human.py | from crowd_sim.envs.utils.agent import Agent
from crowd_sim.envs.utils.state import JointState
class Human(Agent):
def __init__(self, config, section):
super().__init__(config, section)
def act(self, ob):
"""
The state for human is its full state and all other agents' observable states
:param ob:
:return:
"""
state = JointState(self.get_full_state(), ob)
action = self.policy.predict(state)
return action
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/build/lib/crowd_sim/envs/utils/action.py | sarl_star_ros/CrowdNav/build/lib/crowd_sim/envs/utils/action.py | from collections import namedtuple
ActionXY = namedtuple('ActionXY', ['vx', 'vy'])
ActionRot = namedtuple('ActionRot', ['v', 'r'])
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/build/lib/crowd_sim/envs/utils/utils.py | sarl_star_ros/CrowdNav/build/lib/crowd_sim/envs/utils/utils.py | import numpy as np
def point_to_segment_dist(x1, y1, x2, y2, x3, y3):
"""
Calculate the closest distance between point(x3, y3) and a line segment with two endpoints (x1, y1), (x2, y2)
"""
px = x2 - x1
py = y2 - y1
if px == 0 and py == 0:
return np.linalg.norm((x3-x1, y3-y1))
u = ((x3 - x1) * px + (y3 - y1) * py) / (px * px + py * py)
if u > 1:
u = 1
elif u < 0:
u = 0
# (x, y) is the closest point to (x3, y3) on the line segment
x = x1 + u * px
y = y1 + u * py
return np.linalg.norm((x - x3, y-y3))
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/build/lib/crowd_sim/envs/utils/__init__.py | sarl_star_ros/CrowdNav/build/lib/crowd_sim/envs/utils/__init__.py | python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false | |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/build/lib/crowd_sim/envs/utils/info.py | sarl_star_ros/CrowdNav/build/lib/crowd_sim/envs/utils/info.py | class Timeout(object):
def __init__(self):
pass
def __str__(self):
return 'Timeout'
class ReachGoal(object):
def __init__(self):
pass
def __str__(self):
return 'Reaching goal'
class Danger(object):
def __init__(self, min_dist):
self.min_dist = min_dist
def __str__(self):
return 'Too close'
class Collision(object):
def __init__(self):
pass
def __str__(self):
return 'Collision'
class Nothing(object):
def __init__(self):
pass
def __str__(self):
return ''
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/build/lib/crowd_sim/envs/utils/agent.py | sarl_star_ros/CrowdNav/build/lib/crowd_sim/envs/utils/agent.py | import numpy as np
from numpy.linalg import norm
import abc
import logging
from crowd_sim.envs.policy.policy_factory import policy_factory
from crowd_sim.envs.utils.action import ActionXY, ActionRot
from crowd_sim.envs.utils.state import ObservableState, FullState
class Agent(object):
def __init__(self, config, section):
"""
Base class for robot and human. Have the physical attributes of an agent.
"""
self.visible = config.getboolean(section, 'visible')
self.v_pref = config.getfloat(section, 'v_pref')
self.radius = config.getfloat(section, 'radius')
self.policy = policy_factory[config.get(section, 'policy')]()
self.sensor = config.get(section, 'sensor')
self.kinematics = self.policy.kinematics if self.policy is not None else None
self.px = None
self.py = None
self.gx = None
self.gy = None
self.vx = None
self.vy = None
self.theta = None
self.time_step = None
def print_info(self):
logging.info('Agent is {} and has {} kinematic constraint'.format(
'visible' if self.visible else 'invisible', self.kinematics))
def set_policy(self, policy):
self.policy = policy
self.kinematics = policy.kinematics
def sample_random_attributes(self):
"""
Sample agent radius and v_pref attribute from certain distribution
:return:
"""
self.v_pref = np.random.uniform(0.5, 1.5)
self.radius = np.random.uniform(0.3, 0.5)
def set(self, px, py, gx, gy, vx, vy, theta, radius=None, v_pref=None):
self.px = px
self.py = py
self.gx = gx
self.gy = gy
self.vx = vx
self.vy = vy
self.theta = theta
if radius is not None:
self.radius = radius
if v_pref is not None:
self.v_pref = v_pref
def get_observable_state(self):
return ObservableState(self.px, self.py, self.vx, self.vy, self.radius)
def get_next_observable_state(self, action):
self.check_validity(action)
pos = self.compute_position(action, self.time_step)
next_px, next_py = pos
if self.kinematics == 'holonomic':
next_vx = action.vx
next_vy = action.vy
else:
next_vx = action.v * np.cos(self.theta)
next_vy = action.v * np.sin(self.theta)
return ObservableState(next_px, next_py, next_vx, next_vy, self.radius)
def get_full_state(self):
return FullState(self.px, self.py, self.vx, self.vy, self.radius, self.gx, self.gy, self.v_pref, self.theta)
def get_position(self):
return self.px, self.py
def set_position(self, position):
self.px = position[0]
self.py = position[1]
def get_goal_position(self):
return self.gx, self.gy
def get_velocity(self):
return self.vx, self.vy
def set_velocity(self, velocity):
self.vx = velocity[0]
self.vy = velocity[1]
@abc.abstractmethod
def act(self, ob):
"""
Compute state using received observation and pass it to policy
"""
return
def check_validity(self, action):
if self.kinematics == 'holonomic':
assert isinstance(action, ActionXY)
else:
assert isinstance(action, ActionRot)
def compute_position(self, action, delta_t):
self.check_validity(action)
if self.kinematics == 'holonomic':
px = self.px + action.vx * delta_t
py = self.py + action.vy * delta_t
else:
theta = self.theta + action.r
px = self.px + np.cos(theta) * action.v * delta_t
py = self.py + np.sin(theta) * action.v * delta_t
return px, py
def step(self, action):
"""
Perform an action and update the state
"""
self.check_validity(action)
pos = self.compute_position(action, self.time_step)
self.px, self.py = pos
if self.kinematics == 'holonomic':
self.vx = action.vx
self.vy = action.vy
else:
self.theta = (self.theta + action.r) % (2 * np.pi)
self.vx = action.v * np.cos(self.theta)
self.vy = action.v * np.sin(self.theta)
def reached_destination(self):
return norm(np.array(self.get_position()) - np.array(self.get_goal_position())) < self.radius
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/build/lib/crowd_sim/envs/policy/orca.py | sarl_star_ros/CrowdNav/build/lib/crowd_sim/envs/policy/orca.py | import numpy as np
import rvo2
from crowd_sim.envs.policy.policy import Policy
from crowd_sim.envs.utils.action import ActionXY
class ORCA(Policy):
def __init__(self):
"""
timeStep The time step of the simulation.
Must be positive.
neighborDist The default maximum distance (center point
to center point) to other agents a new agent
takes into account in the navigation. The
larger this number, the longer the running
time of the simulation. If the number is too
low, the simulation will not be safe. Must be
non-negative.
maxNeighbors The default maximum number of other agents a
new agent takes into account in the
navigation. The larger this number, the
longer the running time of the simulation.
If the number is too low, the simulation
will not be safe.
timeHorizon The default minimal amount of time for which
a new agent's velocities that are computed
by the simulation are safe with respect to
other agents. The larger this number, the
sooner an agent will respond to the presence
of other agents, but the less freedom the
agent has in choosing its velocities.
Must be positive.
timeHorizonObst The default minimal amount of time for which
a new agent's velocities that are computed
by the simulation are safe with respect to
obstacles. The larger this number, the
sooner an agent will respond to the presence
of obstacles, but the less freedom the agent
has in choosing its velocities.
Must be positive.
radius The default radius of a new agent.
Must be non-negative.
maxSpeed The default maximum speed of a new agent.
Must be non-negative.
velocity The default initial two-dimensional linear
velocity of a new agent (optional).
ORCA first uses neighborDist and maxNeighbors to find neighbors that need to be taken into account.
Here set them to be large enough so that all agents will be considered as neighbors.
Time_horizon should be set that at least it's safe for one time step
In this work, obstacles are not considered. So the value of time_horizon_obst doesn't matter.
"""
super().__init__()
self.name = 'ORCA'
self.trainable = False
self.multiagent_training = None
self.kinematics = 'holonomic'
self.safety_space = 0
self.neighbor_dist = 10
self.max_neighbors = 10
self.time_horizon = 5
self.time_horizon_obst = 5
self.radius = 0.3
self.max_speed = 1
self.sim = None
def configure(self, config):
# self.time_step = config.getfloat('orca', 'time_step')
# self.neighbor_dist = config.getfloat('orca', 'neighbor_dist')
# self.max_neighbors = config.getint('orca', 'max_neighbors')
# self.time_horizon = config.getfloat('orca', 'time_horizon')
# self.time_horizon_obst = config.getfloat('orca', 'time_horizon_obst')
# self.radius = config.getfloat('orca', 'radius')
# self.max_speed = config.getfloat('orca', 'max_speed')
return
def set_phase(self, phase):
return
def predict(self, state):
"""
Create a rvo2 simulation at each time step and run one step
Python-RVO2 API: https://github.com/sybrenstuvel/Python-RVO2/blob/master/src/rvo2.pyx
How simulation is done in RVO2: https://github.com/sybrenstuvel/Python-RVO2/blob/master/src/Agent.cpp
Agent doesn't stop moving after it reaches the goal, because once it stops moving, the reciprocal rule is broken
:param state:
:return:
"""
self_state = state.self_state
params = self.neighbor_dist, self.max_neighbors, self.time_horizon, self.time_horizon_obst
if self.sim is not None and self.sim.getNumAgents() != len(state.human_states) + 1:
del self.sim
self.sim = None
if self.sim is None:
self.sim = rvo2.PyRVOSimulator(self.time_step, *params, self.radius, self.max_speed)
self.sim.addAgent(self_state.position, *params, self_state.radius + 0.01 + self.safety_space,
self_state.v_pref, self_state.velocity)
for human_state in state.human_states:
self.sim.addAgent(human_state.position, *params, human_state.radius + 0.01 + self.safety_space,
self.max_speed, human_state.velocity)
else:
self.sim.setAgentPosition(0, self_state.position)
self.sim.setAgentVelocity(0, self_state.velocity)
for i, human_state in enumerate(state.human_states):
self.sim.setAgentPosition(i + 1, human_state.position)
self.sim.setAgentVelocity(i + 1, human_state.velocity)
# Set the preferred velocity to be a vector of unit magnitude (speed) in the direction of the goal.
velocity = np.array((self_state.gx - self_state.px, self_state.gy - self_state.py))
speed = np.linalg.norm(velocity)
pref_vel = velocity / speed if speed > 1 else velocity
# Perturb a little to avoid deadlocks due to perfect symmetry.
# perturb_angle = np.random.random() * 2 * np.pi
# perturb_dist = np.random.random() * 0.01
# perturb_vel = np.array((np.cos(perturb_angle), np.sin(perturb_angle))) * perturb_dist
# pref_vel += perturb_vel
self.sim.setAgentPrefVelocity(0, tuple(pref_vel))
for i, human_state in enumerate(state.human_states):
# unknown goal position of other humans
self.sim.setAgentPrefVelocity(i + 1, (0, 0))
self.sim.doStep()
action = ActionXY(*self.sim.getAgentVelocity(0))
self.last_state = state
return action
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/build/lib/crowd_sim/envs/policy/policy.py | sarl_star_ros/CrowdNav/build/lib/crowd_sim/envs/policy/policy.py | import abc
import numpy as np
class Policy(object):
def __init__(self):
"""
Base class for all policies, has an abstract method predict().
"""
self.trainable = False
self.phase = None
self.model = None
self.device = None
self.last_state = None
self.time_step = None
# if agent is assumed to know the dynamics of real world
self.env = None
@abc.abstractmethod
def configure(self, config):
return
def set_phase(self, phase):
self.phase = phase
def set_device(self, device):
self.device = device
def set_env(self, env):
self.env = env
def get_model(self):
return self.model
@abc.abstractmethod
def predict(self, state):
"""
Policy takes state as input and output an action
"""
return
@staticmethod
def reach_destination(state):
self_state = state.self_state
if np.linalg.norm((self_state.py - self_state.gy, self_state.px - self_state.gx)) < self_state.radius:
return True
else:
return False
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/build/lib/crowd_sim/envs/policy/__init__.py | sarl_star_ros/CrowdNav/build/lib/crowd_sim/envs/policy/__init__.py | python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false | |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/build/lib/crowd_sim/envs/policy/linear.py | sarl_star_ros/CrowdNav/build/lib/crowd_sim/envs/policy/linear.py | import numpy as np
from crowd_sim.envs.policy.policy import Policy
from crowd_sim.envs.utils.action import ActionXY
class Linear(Policy):
def __init__(self):
super().__init__()
self.trainable = False
self.kinematics = 'holonomic'
self.multiagent_training = True
def configure(self, config):
assert True
def predict(self, state):
self_state = state.self_state
theta = np.arctan2(self_state.gy-self_state.py, self_state.gx-self_state.px)
vx = np.cos(theta) * self_state.v_pref
vy = np.sin(theta) * self_state.v_pref
action = ActionXY(vx, vy)
return action
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/build/lib/crowd_sim/envs/policy/policy_factory.py | sarl_star_ros/CrowdNav/build/lib/crowd_sim/envs/policy/policy_factory.py | from crowd_sim.envs.policy.linear import Linear
from crowd_sim.envs.policy.orca import ORCA
def none_policy():
return None
policy_factory = dict()
policy_factory['linear'] = Linear
policy_factory['orca'] = ORCA
policy_factory['none'] = none_policy
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/build/lib/crowd_nav/train.py | sarl_star_ros/CrowdNav/build/lib/crowd_nav/train.py | import sys
import logging
import argparse
import configparser
import os
import shutil
import torch
import gym
import git
from crowd_sim.envs.utils.robot import Robot
from crowd_nav.utils.trainer import Trainer
from crowd_nav.utils.memory import ReplayMemory
from crowd_nav.utils.explorer import Explorer
from crowd_nav.policy.policy_factory import policy_factory
def main():
parser = argparse.ArgumentParser('Parse configuration file')
parser.add_argument('--env_config', type=str, default='configs/env.config')
parser.add_argument('--policy', type=str, default='cadrl')
parser.add_argument('--policy_config', type=str, default='configs/policy.config')
parser.add_argument('--train_config', type=str, default='configs/train.config')
parser.add_argument('--output_dir', type=str, default='data/output')
parser.add_argument('--weights', type=str)
parser.add_argument('--resume', default=False, action='store_true')
parser.add_argument('--gpu', default=False, action='store_true')
parser.add_argument('--debug', default=False, action='store_true')
args = parser.parse_args()
# configure paths
make_new_dir = True
if os.path.exists(args.output_dir):
key = input('Output directory already exists! Overwrite the folder? (y/n)')
if key == 'y' and not args.resume:
shutil.rmtree(args.output_dir)
else:
make_new_dir = False
args.env_config = os.path.join(args.output_dir, os.path.basename(args.env_config))
args.policy_config = os.path.join(args.output_dir, os.path.basename(args.policy_config))
args.train_config = os.path.join(args.output_dir, os.path.basename(args.train_config))
if make_new_dir:
os.makedirs(args.output_dir)
shutil.copy(args.env_config, args.output_dir)
shutil.copy(args.policy_config, args.output_dir)
shutil.copy(args.train_config, args.output_dir)
log_file = os.path.join(args.output_dir, 'output.log')
il_weight_file = os.path.join(args.output_dir, 'il_model.pth')
rl_weight_file = os.path.join(args.output_dir, 'rl_model.pth')
# configure logging
mode = 'a' if args.resume else 'w'
file_handler = logging.FileHandler(log_file, mode=mode)
stdout_handler = logging.StreamHandler(sys.stdout)
level = logging.INFO if not args.debug else logging.DEBUG
logging.basicConfig(level=level, handlers=[stdout_handler, file_handler],
format='%(asctime)s, %(levelname)s: %(message)s', datefmt="%Y-%m-%d %H:%M:%S")
repo = git.Repo(search_parent_directories=True)
logging.info('Current git head hash code: %s'.format(repo.head.object.hexsha))
device = torch.device("cuda:0" if torch.cuda.is_available() and args.gpu else "cpu")
logging.info('Using device: %s', device)
# configure policy
policy = policy_factory[args.policy]()
if not policy.trainable:
parser.error('Policy has to be trainable')
if args.policy_config is None:
parser.error('Policy config has to be specified for a trainable network')
policy_config = configparser.RawConfigParser()
policy_config.read(args.policy_config)
policy.configure(policy_config)
policy.set_device(device)
# configure environment
env_config = configparser.RawConfigParser()
env_config.read(args.env_config)
env = gym.make('CrowdSim-v0')
env.configure(env_config)
robot = Robot(env_config, 'robot')
env.set_robot(robot)
# read training parameters
if args.train_config is None:
parser.error('Train config has to be specified for a trainable network')
train_config = configparser.RawConfigParser()
train_config.read(args.train_config)
rl_learning_rate = train_config.getfloat('train', 'rl_learning_rate')
train_batches = train_config.getint('train', 'train_batches')
train_episodes = train_config.getint('train', 'train_episodes')
sample_episodes = train_config.getint('train', 'sample_episodes')
target_update_interval = train_config.getint('train', 'target_update_interval')
evaluation_interval = train_config.getint('train', 'evaluation_interval')
capacity = train_config.getint('train', 'capacity')
epsilon_start = train_config.getfloat('train', 'epsilon_start')
epsilon_end = train_config.getfloat('train', 'epsilon_end')
epsilon_decay = train_config.getfloat('train', 'epsilon_decay')
checkpoint_interval = train_config.getint('train', 'checkpoint_interval')
# configure trainer and explorer
memory = ReplayMemory(capacity)
model = policy.get_model()
batch_size = train_config.getint('trainer', 'batch_size')
trainer = Trainer(model, memory, device, batch_size)
explorer = Explorer(env, robot, device, memory, policy.gamma, target_policy=policy)
# imitation learning
if args.resume:
if not os.path.exists(rl_weight_file):
logging.error('RL weights does not exist')
model.load_state_dict(torch.load(rl_weight_file))
rl_weight_file = os.path.join(args.output_dir, 'resumed_rl_model.pth')
logging.info('Load reinforcement learning trained weights. Resume training')
elif os.path.exists(il_weight_file):
model.load_state_dict(torch.load(il_weight_file))
logging.info('Load imitation learning trained weights.')
else:
il_episodes = train_config.getint('imitation_learning', 'il_episodes')
il_policy = train_config.get('imitation_learning', 'il_policy')
il_epochs = train_config.getint('imitation_learning', 'il_epochs')
il_learning_rate = train_config.getfloat('imitation_learning', 'il_learning_rate')
trainer.set_learning_rate(il_learning_rate)
if robot.visible:
safety_space = 0
else:
safety_space = train_config.getfloat('imitation_learning', 'safety_space')
il_policy = policy_factory[il_policy]()
il_policy.multiagent_training = policy.multiagent_training
il_policy.safety_space = safety_space
robot.set_policy(il_policy)
explorer.run_k_episodes(il_episodes, 'train', update_memory=True, imitation_learning=True)
trainer.optimize_epoch(il_epochs)
torch.save(model.state_dict(), il_weight_file)
logging.info('Finish imitation learning. Weights saved.')
logging.info('Experience set size: %d/%d', len(memory), memory.capacity)
explorer.update_target_model(model)
# reinforcement learning
policy.set_env(env)
robot.set_policy(policy)
robot.print_info()
trainer.set_learning_rate(rl_learning_rate)
# fill the memory pool with some RL experience
if args.resume:
robot.policy.set_epsilon(epsilon_end)
explorer.run_k_episodes(100, 'train', update_memory=True, episode=0)
logging.info('Experience set size: %d/%d', len(memory), memory.capacity)
episode = 0
while episode < train_episodes:
if args.resume:
epsilon = epsilon_end
else:
if episode < epsilon_decay:
epsilon = epsilon_start + (epsilon_end - epsilon_start) / epsilon_decay * episode
else:
epsilon = epsilon_end
robot.policy.set_epsilon(epsilon)
# evaluate the model
if episode % evaluation_interval == 0:
explorer.run_k_episodes(env.case_size['val'], 'val', episode=episode)
# sample k episodes into memory and optimize over the generated memory
explorer.run_k_episodes(sample_episodes, 'train', update_memory=True, episode=episode)
trainer.optimize_batch(train_batches)
episode += 1
if episode % target_update_interval == 0:
explorer.update_target_model(model)
if episode != 0 and episode % checkpoint_interval == 0:
torch.save(model.state_dict(), rl_weight_file)
# final test
explorer.run_k_episodes(env.case_size['test'], 'test', episode=episode)
if __name__ == '__main__':
main()
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/build/lib/crowd_nav/__init__.py | sarl_star_ros/CrowdNav/build/lib/crowd_nav/__init__.py | python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false | |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/build/lib/crowd_nav/test.py | sarl_star_ros/CrowdNav/build/lib/crowd_nav/test.py | import logging
import argparse
import configparser
import os
import torch
import numpy as np
import gym
from crowd_nav.utils.explorer import Explorer
from crowd_nav.policy.policy_factory import policy_factory
from crowd_sim.envs.utils.robot import Robot
from crowd_sim.envs.policy.orca import ORCA
def main():
parser = argparse.ArgumentParser('Parse configuration file')
parser.add_argument('--env_config', type=str, default='configs/env.config')
parser.add_argument('--policy_config', type=str, default='configs/policy.config')
parser.add_argument('--policy', type=str, default='orca')
parser.add_argument('--model_dir', type=str, default=None)
parser.add_argument('--il', default=False, action='store_true')
parser.add_argument('--gpu', default=False, action='store_true')
parser.add_argument('--visualize', default=False, action='store_true')
parser.add_argument('--phase', type=str, default='test')
parser.add_argument('--test_case', type=int, default=None)
parser.add_argument('--square', default=False, action='store_true')
parser.add_argument('--circle', default=False, action='store_true')
parser.add_argument('--video_file', type=str, default=None)
parser.add_argument('--traj', default=False, action='store_true')
args = parser.parse_args()
if args.model_dir is not None:
env_config_file = os.path.join(args.model_dir, os.path.basename(args.env_config))
policy_config_file = os.path.join(args.model_dir, os.path.basename(args.policy_config))
if args.il:
model_weights = os.path.join(args.model_dir, 'il_model.pth')
else:
if os.path.exists(os.path.join(args.model_dir, 'resumed_rl_model.pth')):
model_weights = os.path.join(args.model_dir, 'resumed_rl_model.pth')
else:
model_weights = os.path.join(args.model_dir, 'rl_model.pth')
else:
env_config_file = args.env_config
policy_config_file = args.env_config
# configure logging and device
logging.basicConfig(level=logging.INFO, format='%(asctime)s, %(levelname)s: %(message)s',
datefmt="%Y-%m-%d %H:%M:%S")
device = torch.device("cuda:0" if torch.cuda.is_available() and args.gpu else "cpu")
logging.info('Using device: %s', device)
# configure policy
policy = policy_factory[args.policy]()
policy_config = configparser.RawConfigParser()
policy_config.read(policy_config_file)
policy.configure(policy_config)
if policy.trainable:
if args.model_dir is None:
parser.error('Trainable policy must be specified with a model weights directory')
policy.get_model().load_state_dict(torch.load(model_weights))
# configure environment
env_config = configparser.RawConfigParser()
env_config.read(env_config_file)
env = gym.make('CrowdSim-v0')
env.configure(env_config)
if args.square:
env.test_sim = 'square_crossing'
if args.circle:
env.test_sim = 'circle_crossing'
robot = Robot(env_config, 'robot')
robot.set_policy(policy)
env.set_robot(robot)
explorer = Explorer(env, robot, device, gamma=0.9)
policy.set_phase(args.phase)
policy.set_device(device)
# set safety space for ORCA in non-cooperative simulation
if isinstance(robot.policy, ORCA):
if robot.visible:
robot.policy.safety_space = 0
else:
robot.policy.safety_space = 0
logging.info('ORCA agent buffer: %f', robot.policy.safety_space)
policy.set_env(env)
robot.print_info()
if args.visualize:
ob = env.reset(args.phase, args.test_case)
done = False
last_pos = np.array(robot.get_position())
while not done:
action = robot.act(ob)
ob, _, done, info = env.step(action)
current_pos = np.array(robot.get_position())
logging.debug('Speed: %.2f', np.linalg.norm(current_pos - last_pos) / robot.time_step)
last_pos = current_pos
if args.traj:
env.render('traj', args.video_file)
else:
env.render('video', args.video_file)
logging.info('It takes %.2f seconds to finish. Final status is %s', env.global_time, info)
if robot.visible and info == 'reach goal':
human_times = env.get_human_times()
logging.info('Average time for humans to reach goal: %.2f', sum(human_times) / len(human_times))
else:
explorer.run_k_episodes(env.case_size[args.phase], args.phase, print_failure=True)
if __name__ == '__main__':
main()
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/build/lib/crowd_nav/utils/memory.py | sarl_star_ros/CrowdNav/build/lib/crowd_nav/utils/memory.py | from torch.utils.data import Dataset
class ReplayMemory(Dataset):
def __init__(self, capacity):
self.capacity = capacity
self.memory = list()
self.position = 0
def push(self, item):
# replace old experience with new experience
if len(self.memory) < self.position + 1:
self.memory.append(item)
else:
self.memory[self.position] = item
self.position = (self.position + 1) % self.capacity
def is_full(self):
return len(self.memory) == self.capacity
def __getitem__(self, item):
return self.memory[item]
def __len__(self):
return len(self.memory)
def clear(self):
self.memory = list()
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/build/lib/crowd_nav/utils/plot.py | sarl_star_ros/CrowdNav/build/lib/crowd_nav/utils/plot.py | import re
import argparse
import matplotlib.pyplot as plt
import numpy as np
def running_mean(x, n):
cumsum = np.cumsum(np.insert(x, 0, 0))
return (cumsum[n:] - cumsum[:-n]) / float(n)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('log_files', type=str, nargs='+')
parser.add_argument('--plot_sr', default=False, action='store_true')
parser.add_argument('--plot_cr', default=False, action='store_true')
parser.add_argument('--plot_time', default=False, action='store_true')
parser.add_argument('--plot_reward', default=True, action='store_true')
parser.add_argument('--plot_train', default=True, action='store_true')
parser.add_argument('--plot_val', default=False, action='store_true')
parser.add_argument('--window_size', type=int, default=200)
args = parser.parse_args()
# define the names of the models you want to plot and the longest episodes you want to show
models = ['LSTM-RL', 'SARL', 'OM-SARL']
max_episodes = 10000
ax1 = ax2 = ax3 = ax4 = None
ax1_legends = []
ax2_legends = []
ax3_legends = []
ax4_legends = []
for i, log_file in enumerate(args.log_files):
with open(log_file, 'r') as file:
log = file.read()
val_pattern = r"VAL in episode (?P<episode>\d+) has success rate: (?P<sr>[0-1].\d+), " \
r"collision rate: (?P<cr>[0-1].\d+), nav time: (?P<time>\d+.\d+), " \
r"total reward: (?P<reward>[-+]?\d+.\d+)"
val_episode = []
val_sr = []
val_cr = []
val_time = []
val_reward = []
for r in re.findall(val_pattern, log):
val_episode.append(int(r[0]))
val_sr.append(float(r[1]))
val_cr.append(float(r[2]))
val_time.append(float(r[3]))
val_reward.append(float(r[4]))
train_pattern = r"TRAIN in episode (?P<episode>\d+) has success rate: (?P<sr>[0-1].\d+), " \
r"collision rate: (?P<cr>[0-1].\d+), nav time: (?P<time>\d+.\d+), " \
r"total reward: (?P<reward>[-+]?\d+.\d+)"
train_episode = []
train_sr = []
train_cr = []
train_time = []
train_reward = []
for r in re.findall(train_pattern, log):
train_episode.append(int(r[0]))
train_sr.append(float(r[1]))
train_cr.append(float(r[2]))
train_time.append(float(r[3]))
train_reward.append(float(r[4]))
train_episode = train_episode[:max_episodes]
train_sr = train_sr[:max_episodes]
train_cr = train_cr[:max_episodes]
train_time = train_time[:max_episodes]
train_reward = train_reward[:max_episodes]
# smooth training plot
train_sr_smooth = running_mean(train_sr, args.window_size)
train_cr_smooth = running_mean(train_cr, args.window_size)
train_time_smooth = running_mean(train_time, args.window_size)
train_reward_smooth = running_mean(train_reward, args.window_size)
# plot sr
if args.plot_sr:
if ax1 is None:
_, ax1 = plt.subplots()
if args.plot_train:
ax1.plot(range(len(train_sr_smooth)), train_sr_smooth)
ax1_legends.append(models[i])
if args.plot_val:
ax1.plot(val_episode, val_sr)
ax1_legends.append(models[i])
ax1.legend(ax1_legends)
ax1.set_xlabel('Episodes')
ax1.set_ylabel('Success Rate')
ax1.set_title('Success rate')
# plot time
if args.plot_time:
if ax2 is None:
_, ax2 = plt.subplots()
if args.plot_train:
ax2.plot(range(len(train_time_smooth)), train_time_smooth)
ax2_legends.append(models[i])
if args.plot_val:
ax2.plot(val_episode, val_time)
ax2_legends.append(models[i])
ax2.legend(ax2_legends)
ax2.set_xlabel('Episodes')
ax2.set_ylabel('Time(s)')
ax2.set_title("Robot's Time to Reach Goal")
# plot cr
if args.plot_cr:
if ax3 is None:
_, ax3 = plt.subplots()
if args.plot_train:
ax3.plot(range(len(train_cr_smooth)), train_cr_smooth)
ax3_legends.append(models[i])
if args.plot_val:
ax3.plot(val_episode, val_cr)
ax3_legends.append(models[i])
ax3.legend(ax3_legends)
ax3.set_xlabel('Episodes')
ax3.set_ylabel('Collision Rate')
ax3.set_title('Collision Rate')
# plot reward
if args.plot_reward:
if ax4 is None:
_, ax4 = plt.subplots()
if args.plot_train:
ax4.plot(range(len(train_reward_smooth)), train_reward_smooth)
ax4_legends.append(models[i])
if args.plot_val:
ax4.plot(val_episode, val_reward)
ax4_legends.append(models[i])
ax4.legend(ax4_legends)
ax4.set_xlabel('Episodes')
ax4.set_ylabel('Reward')
ax4.set_title('Cumulative Discounted Reward')
plt.show()
if __name__ == '__main__':
main()
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/build/lib/crowd_nav/utils/trainer.py | sarl_star_ros/CrowdNav/build/lib/crowd_nav/utils/trainer.py | import logging
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from torch.utils.data import DataLoader
class Trainer(object):
def __init__(self, model, memory, device, batch_size):
"""
Train the trainable model of a policy
"""
self.model = model
self.device = device
self.criterion = nn.MSELoss().to(device)
self.memory = memory
self.data_loader = None
self.batch_size = batch_size
self.optimizer = None
def set_learning_rate(self, learning_rate):
logging.info('Current learning rate: %f', learning_rate)
self.optimizer = optim.SGD(self.model.parameters(), lr=learning_rate, momentum=0.9)
def optimize_epoch(self, num_epochs):
if self.optimizer is None:
raise ValueError('Learning rate is not set!')
if self.data_loader is None:
self.data_loader = DataLoader(self.memory, self.batch_size, shuffle=True)
average_epoch_loss = 0
for epoch in range(num_epochs):
epoch_loss = 0
for data in self.data_loader:
inputs, values = data
inputs = Variable(inputs)
values = Variable(values)
self.optimizer.zero_grad()
outputs = self.model(inputs)
loss = self.criterion(outputs, values)
loss.backward()
self.optimizer.step()
epoch_loss += loss.data.item()
average_epoch_loss = epoch_loss / len(self.memory)
logging.debug('Average loss in epoch %d: %.2E', epoch, average_epoch_loss)
return average_epoch_loss
def optimize_batch(self, num_batches):
if self.optimizer is None:
raise ValueError('Learning rate is not set!')
if self.data_loader is None:
self.data_loader = DataLoader(self.memory, self.batch_size, shuffle=True)
losses = 0
for _ in range(num_batches):
inputs, values = next(iter(self.data_loader))
inputs = Variable(inputs)
values = Variable(values)
self.optimizer.zero_grad()
outputs = self.model(inputs)
loss = self.criterion(outputs, values)
loss.backward()
self.optimizer.step()
losses += loss.data.item()
average_loss = losses / num_batches
logging.debug('Average loss : %.2E', average_loss)
return average_loss
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/build/lib/crowd_nav/utils/__init__.py | sarl_star_ros/CrowdNav/build/lib/crowd_nav/utils/__init__.py | python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false | |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/build/lib/crowd_nav/utils/explorer.py | sarl_star_ros/CrowdNav/build/lib/crowd_nav/utils/explorer.py | import logging
import copy
import torch
from crowd_sim.envs.utils.info import *
class Explorer(object):
def __init__(self, env, robot, device, memory=None, gamma=None, target_policy=None):
self.env = env
self.robot = robot
self.device = device
self.memory = memory
self.gamma = gamma
self.target_policy = target_policy
self.target_model = None
def update_target_model(self, target_model):
self.target_model = copy.deepcopy(target_model)
# @profile
def run_k_episodes(self, k, phase, update_memory=False, imitation_learning=False, episode=None,
print_failure=False):
self.robot.policy.set_phase(phase)
success_times = []
collision_times = []
timeout_times = []
success = 0
collision = 0
timeout = 0
too_close = 0
min_dist = []
cumulative_rewards = []
collision_cases = []
timeout_cases = []
for i in range(k):
ob = self.env.reset(phase)
done = False
states = []
actions = []
rewards = []
while not done:
action = self.robot.act(ob)
ob, reward, done, info = self.env.step(action)
states.append(self.robot.policy.last_state)
actions.append(action)
rewards.append(reward)
if isinstance(info, Danger):
too_close += 1
min_dist.append(info.min_dist)
if isinstance(info, ReachGoal):
success += 1
success_times.append(self.env.global_time)
elif isinstance(info, Collision):
collision += 1
collision_cases.append(i)
collision_times.append(self.env.global_time)
elif isinstance(info, Timeout):
timeout += 1
timeout_cases.append(i)
timeout_times.append(self.env.time_limit)
else:
raise ValueError('Invalid end signal from environment')
if update_memory:
if isinstance(info, ReachGoal) or isinstance(info, Collision):
# only add positive(success) or negative(collision) experience in experience set
self.update_memory(states, actions, rewards, imitation_learning)
cumulative_rewards.append(sum([pow(self.gamma, t * self.robot.time_step * self.robot.v_pref)
* reward for t, reward in enumerate(rewards)]))
success_rate = success / k
collision_rate = collision / k
assert success + collision + timeout == k
avg_nav_time = sum(success_times) / len(success_times) if success_times else self.env.time_limit
extra_info = '' if episode is None else 'in episode {} '.format(episode)
logging.info('{:<5} {}has success rate: {:.2f}, collision rate: {:.2f}, nav time: {:.2f}, total reward: {:.4f}'.
format(phase.upper(), extra_info, success_rate, collision_rate, avg_nav_time,
average(cumulative_rewards)))
if phase in ['val', 'test']:
total_time = sum(success_times + collision_times + timeout_times) * self.robot.time_step
logging.info('Frequency of being in danger: %.2f and average min separate distance in danger: %.2f',
too_close / total_time, average(min_dist))
if print_failure:
logging.info('Collision cases: ' + ' '.join([str(x) for x in collision_cases]))
logging.info('Timeout cases: ' + ' '.join([str(x) for x in timeout_cases]))
def update_memory(self, states, actions, rewards, imitation_learning=False):
if self.memory is None or self.gamma is None:
raise ValueError('Memory or gamma value is not set!')
for i, state in enumerate(states):
reward = rewards[i]
# VALUE UPDATE
if imitation_learning:
# define the value of states in IL as cumulative discounted rewards, which is the same in RL
state = self.target_policy.transform(state)
# value = pow(self.gamma, (len(states) - 1 - i) * self.robot.time_step * self.robot.v_pref)
value = sum([pow(self.gamma, max(t - i, 0) * self.robot.time_step * self.robot.v_pref) * reward
for t, reward in enumerate(rewards)])
else:
if i == len(states) - 1:
# terminal state
value = reward
else:
next_state = states[i + 1]
gamma_bar = pow(self.gamma, self.robot.time_step * self.robot.v_pref)
value = reward + gamma_bar * self.target_model(next_state.unsqueeze(0)).data.item()
value = torch.Tensor([value]).to(self.device)
# # transform state of different human_num into fixed-size tensor
# if len(state.size()) == 1:
# human_num = 1
# feature_size = state.size()[0]
# else:
# human_num, feature_size = state.size()
# if human_num != 5:
# padding = torch.zeros((5 - human_num, feature_size))
# state = torch.cat([state, padding])
self.memory.push((state, value))
def average(input_list):
if input_list:
return sum(input_list) / len(input_list)
else:
return 0
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/build/lib/crowd_nav/policy/lstm_rl.py | sarl_star_ros/CrowdNav/build/lib/crowd_nav/policy/lstm_rl.py | import torch
import torch.nn as nn
import numpy as np
import logging
from crowd_nav.policy.cadrl import mlp
from crowd_nav.policy.multi_human_rl import MultiHumanRL
class ValueNetwork1(nn.Module):
def __init__(self, input_dim, self_state_dim, mlp_dims, lstm_hidden_dim):
super().__init__()
self.self_state_dim = self_state_dim
self.lstm_hidden_dim = lstm_hidden_dim
self.mlp = mlp(self_state_dim + lstm_hidden_dim, mlp_dims)
self.lstm = nn.LSTM(input_dim, lstm_hidden_dim, batch_first=True)
def forward(self, state):
"""
First transform the world coordinates to self-centric coordinates and then do forward computation
:param state: tensor of shape (batch_size, # of humans, length of a joint state)
:return:
"""
size = state.shape
self_state = state[:, 0, :self.self_state_dim]
# human_state = state[:, :, self.self_state_dim:]
h0 = torch.zeros(1, size[0], self.lstm_hidden_dim)
c0 = torch.zeros(1, size[0], self.lstm_hidden_dim)
output, (hn, cn) = self.lstm(state, (h0, c0))
hn = hn.squeeze(0)
joint_state = torch.cat([self_state, hn], dim=1)
value = self.mlp(joint_state)
return value
class ValueNetwork2(nn.Module):
def __init__(self, input_dim, self_state_dim, mlp1_dims, mlp_dims, lstm_hidden_dim):
super().__init__()
self.self_state_dim = self_state_dim
self.lstm_hidden_dim = lstm_hidden_dim
self.mlp1 = mlp(input_dim, mlp1_dims)
self.mlp = mlp(self_state_dim + lstm_hidden_dim, mlp_dims)
self.lstm = nn.LSTM(mlp1_dims[-1], lstm_hidden_dim, batch_first=True)
def forward(self, state):
"""
First transform the world coordinates to self-centric coordinates and then do forward computation
:param state: tensor of shape (batch_size, # of humans, length of a joint state)
:return:
"""
size = state.shape
self_state = state[:, 0, :self.self_state_dim]
state = torch.reshape(state, (-1, size[2]))
mlp1_output = self.mlp1(state)
mlp1_output = torch.reshape(mlp1_output, (size[0], size[1], -1))
h0 = torch.zeros(1, size[0], self.lstm_hidden_dim)
c0 = torch.zeros(1, size[0], self.lstm_hidden_dim)
output, (hn, cn) = self.lstm(mlp1_output, (h0, c0))
hn = hn.squeeze(0)
joint_state = torch.cat([self_state, hn], dim=1)
value = self.mlp(joint_state)
return value
class LstmRL(MultiHumanRL):
def __init__(self):
super().__init__()
self.name = 'LSTM-RL'
self.with_interaction_module = None
self.interaction_module_dims = None
def configure(self, config):
self.set_common_parameters(config)
mlp_dims = [int(x) for x in config.get('lstm_rl', 'mlp2_dims').split(', ')]
global_state_dim = config.getint('lstm_rl', 'global_state_dim')
self.with_om = config.getboolean('lstm_rl', 'with_om')
with_interaction_module = config.getboolean('lstm_rl', 'with_interaction_module')
if with_interaction_module:
mlp1_dims = [int(x) for x in config.get('lstm_rl', 'mlp1_dims').split(', ')]
self.model = ValueNetwork2(self.input_dim(), self.self_state_dim, mlp1_dims, mlp_dims, global_state_dim)
else:
self.model = ValueNetwork1(self.input_dim(), self.self_state_dim, mlp_dims, global_state_dim)
self.multiagent_training = config.getboolean('lstm_rl', 'multiagent_training')
logging.info('Policy: {}LSTM-RL {} pairwise interaction module'.format(
'OM-' if self.with_om else '', 'w/' if with_interaction_module else 'w/o'))
def predict(self, state):
"""
Input state is the joint state of robot concatenated with the observable state of other agents
To predict the best action, agent samples actions and propagates one step to see how good the next state is
thus the reward function is needed
"""
def dist(human):
# sort human order by decreasing distance to the robot
return np.linalg.norm(np.array(human.position) - np.array(state.self_state.position))
state.human_states = sorted(state.human_states, key=dist, reverse=True)
return super().predict(state)
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/build/lib/crowd_nav/policy/sarl.py | sarl_star_ros/CrowdNav/build/lib/crowd_nav/policy/sarl.py | import torch
import torch.nn as nn
from torch.nn.functional import softmax
import logging
from crowd_nav.policy.cadrl import mlp
from crowd_nav.policy.multi_human_rl import MultiHumanRL
class ValueNetwork(nn.Module):
def __init__(self, input_dim, self_state_dim, mlp1_dims, mlp2_dims, mlp3_dims, attention_dims, with_global_state,
cell_size, cell_num):
super().__init__()
self.self_state_dim = self_state_dim
self.global_state_dim = mlp1_dims[-1]
self.mlp1 = mlp(input_dim, mlp1_dims, last_relu=True)
self.mlp2 = mlp(mlp1_dims[-1], mlp2_dims)
self.with_global_state = with_global_state
if with_global_state:
self.attention = mlp(mlp1_dims[-1] * 2, attention_dims)
else:
self.attention = mlp(mlp1_dims[-1], attention_dims)
self.cell_size = cell_size
self.cell_num = cell_num
mlp3_input_dim = mlp2_dims[-1] + self.self_state_dim
self.mlp3 = mlp(mlp3_input_dim, mlp3_dims)
self.attention_weights = None
def forward(self, state):
"""
First transform the world coordinates to self-centric coordinates and then do forward computation
:param state: tensor of shape (batch_size, # of humans, length of a rotated state)
:return:
"""
size = state.shape
self_state = state[:, 0, :self.self_state_dim]
mlp1_output = self.mlp1(state.view((-1, size[2])))
mlp2_output = self.mlp2(mlp1_output)
if self.with_global_state:
# compute attention scores
global_state = torch.mean(mlp1_output.view(size[0], size[1], -1), 1, keepdim=True)
global_state = global_state.expand((size[0], size[1], self.global_state_dim)).\
contiguous().view(-1, self.global_state_dim)
attention_input = torch.cat([mlp1_output, global_state], dim=1)
else:
attention_input = mlp1_output
scores = self.attention(attention_input).view(size[0], size[1], 1).squeeze(dim=2)
# masked softmax
# weights = softmax(scores, dim=1).unsqueeze(2)
scores_exp = torch.exp(scores) * (scores != 0).float()
weights = (scores_exp / torch.sum(scores_exp, dim=1, keepdim=True)).unsqueeze(2)
self.attention_weights = weights[0, :, 0].data.cpu().numpy()
# output feature is a linear combination of input features
features = mlp2_output.view(size[0], size[1], -1)
# for converting to onnx
# expanded_weights = torch.cat([torch.zeros(weights.size()).copy_(weights) for _ in range(50)], dim=2)
weighted_feature = torch.sum(torch.mul(weights, features), dim=1)
# concatenate agent's state with global weighted humans' state
joint_state = torch.cat([self_state, weighted_feature], dim=1)
value = self.mlp3(joint_state)
return value
class SARL(MultiHumanRL):
def __init__(self):
super().__init__()
self.name = 'SARL'
def configure(self, config):
self.set_common_parameters(config)
mlp1_dims = [int(x) for x in config.get('sarl', 'mlp1_dims').split(', ')]
mlp2_dims = [int(x) for x in config.get('sarl', 'mlp2_dims').split(', ')]
mlp3_dims = [int(x) for x in config.get('sarl', 'mlp3_dims').split(', ')]
attention_dims = [int(x) for x in config.get('sarl', 'attention_dims').split(', ')]
self.with_om = config.getboolean('sarl', 'with_om')
with_global_state = config.getboolean('sarl', 'with_global_state')
self.model = ValueNetwork(self.input_dim(), self.self_state_dim, mlp1_dims, mlp2_dims, mlp3_dims,
attention_dims, with_global_state, self.cell_size, self.cell_num)
self.multiagent_training = config.getboolean('sarl', 'multiagent_training')
if self.with_om:
self.name = 'OM-SARL'
logging.info('Policy: {} {} global state'.format(self.name, 'w/' if with_global_state else 'w/o'))
def get_attention_weights(self):
return self.model.attention_weights
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/build/lib/crowd_nav/policy/cadrl.py | sarl_star_ros/CrowdNav/build/lib/crowd_nav/policy/cadrl.py | import torch
import torch.nn as nn
import numpy as np
import itertools
import logging
from crowd_sim.envs.policy.policy import Policy
from crowd_sim.envs.utils.action import ActionRot, ActionXY
from crowd_sim.envs.utils.state import ObservableState, FullState
def mlp(input_dim, mlp_dims, last_relu=False):
layers = []
mlp_dims = [input_dim] + mlp_dims
for i in range(len(mlp_dims) - 1):
layers.append(nn.Linear(mlp_dims[i], mlp_dims[i + 1]))
if i != len(mlp_dims) - 2 or last_relu:
layers.append(nn.ReLU())
net = nn.Sequential(*layers)
return net
class ValueNetwork(nn.Module):
def __init__(self, input_dim, mlp_dims):
super().__init__()
self.value_network = mlp(input_dim, mlp_dims)
def forward(self, state):
value = self.value_network(state)
return value
class CADRL(Policy):
def __init__(self):
super().__init__()
self.name = 'CADRL'
self.trainable = True
self.multiagent_training = None
self.kinematics = None
self.epsilon = None
self.gamma = None
self.sampling = None
self.speed_samples = None
self.rotation_samples = None
self.query_env = None
self.action_space = None
self.speeds = None
self.rotations = None
self.action_values = None
self.with_om = None
self.cell_num = None
self.cell_size = None
self.om_channel_size = None
self.self_state_dim = 6
self.human_state_dim = 7
self.joint_state_dim = self.self_state_dim + self.human_state_dim
def configure(self, config):
self.set_common_parameters(config)
mlp_dims = [int(x) for x in config.get('cadrl', 'mlp_dims').split(', ')]
self.model = ValueNetwork(self.joint_state_dim, mlp_dims)
self.multiagent_training = config.getboolean('cadrl', 'multiagent_training')
logging.info('Policy: CADRL without occupancy map')
def set_common_parameters(self, config):
self.gamma = config.getfloat('rl', 'gamma')
self.kinematics = config.get('action_space', 'kinematics')
self.sampling = config.get('action_space', 'sampling')
self.speed_samples = config.getint('action_space', 'speed_samples')
self.rotation_samples = config.getint('action_space', 'rotation_samples')
self.query_env = config.getboolean('action_space', 'query_env')
self.cell_num = config.getint('om', 'cell_num')
self.cell_size = config.getfloat('om', 'cell_size')
self.om_channel_size = config.getint('om', 'om_channel_size')
def set_device(self, device):
self.device = device
self.model.to(device)
def set_epsilon(self, epsilon):
self.epsilon = epsilon
def build_action_space(self, v_pref):
"""
Action space consists of 25 uniformly sampled actions in permitted range and 25 randomly sampled actions.
"""
holonomic = True if self.kinematics == 'holonomic' else False
speeds = [(np.exp((i + 1) / self.speed_samples) - 1) / (np.e - 1) * v_pref for i in range(self.speed_samples)]
if holonomic:
rotations = np.linspace(0, 2 * np.pi, self.rotation_samples, endpoint=False)
else:
rotations = np.linspace(-np.pi / 4, np.pi / 4, self.rotation_samples)
action_space = [ActionXY(0, 0) if holonomic else ActionRot(0, 0)]
for rotation, speed in itertools.product(rotations, speeds):
if holonomic:
action_space.append(ActionXY(speed * np.cos(rotation), speed * np.sin(rotation)))
else:
action_space.append(ActionRot(speed, rotation))
self.speeds = speeds
self.rotations = rotations
self.action_space = action_space
def propagate(self, state, action):
if isinstance(state, ObservableState):
# propagate state of humans
next_px = state.px + action.vx * self.time_step
next_py = state.py + action.vy * self.time_step
next_state = ObservableState(next_px, next_py, action.vx, action.vy, state.radius)
elif isinstance(state, FullState):
# propagate state of current agent
# perform action without rotation
if self.kinematics == 'holonomic':
next_px = state.px + action.vx * self.time_step
next_py = state.py + action.vy * self.time_step
next_state = FullState(next_px, next_py, action.vx, action.vy, state.radius,
state.gx, state.gy, state.v_pref, state.theta)
else:
next_theta = state.theta + action.r
next_vx = action.v * np.cos(next_theta)
next_vy = action.v * np.sin(next_theta)
next_px = state.px + next_vx * self.time_step
next_py = state.py + next_vy * self.time_step
next_state = FullState(next_px, next_py, next_vx, next_vy, state.radius, state.gx, state.gy,
state.v_pref, next_theta)
else:
raise ValueError('Type error')
return next_state
def predict(self, state):
"""
Input state is the joint state of robot concatenated by the observable state of other agents
To predict the best action, agent samples actions and propagates one step to see how good the next state is
thus the reward function is needed
"""
if self.phase is None or self.device is None:
raise AttributeError('Phase, device attributes have to be set!')
if self.phase == 'train' and self.epsilon is None:
raise AttributeError('Epsilon attribute has to be set in training phase')
if self.reach_destination(state):
return ActionXY(0, 0) if self.kinematics == 'holonomic' else ActionRot(0, 0)
if self.action_space is None:
self.build_action_space(state.self_state.v_pref)
probability = np.random.random()
if self.phase == 'train' and probability < self.epsilon:
max_action = self.action_space[np.random.choice(len(self.action_space))]
else:
self.action_values = list()
max_min_value = float('-inf')
max_action = None
for action in self.action_space:
next_self_state = self.propagate(state.self_state, action)
ob, reward, done, info = self.env.onestep_lookahead(action)
batch_next_states = torch.cat([torch.Tensor([next_self_state + next_human_state]).to(self.device)
for next_human_state in ob], dim=0)
# VALUE UPDATE
outputs = self.model(self.rotate(batch_next_states))
min_output, min_index = torch.min(outputs, 0)
min_value = reward + pow(self.gamma, self.time_step * state.self_state.v_pref) * min_output.data.item()
self.action_values.append(min_value)
if min_value > max_min_value:
max_min_value = min_value
max_action = action
if self.phase == 'train':
self.last_state = self.transform(state)
return max_action
def transform(self, state):
"""
Take the state passed from agent and transform it to tensor for batch training
:param state:
:return: tensor of shape (len(state), )
"""
assert len(state.human_states) == 1
state = torch.Tensor(state.self_state + state.human_states[0]).to(self.device)
state = self.rotate(state.unsqueeze(0)).squeeze(dim=0)
return state
def rotate(self, state):
"""
Transform the coordinate to agent-centric.
Input state tensor is of size (batch_size, state_length)
"""
# 'px', 'py', 'vx', 'vy', 'radius', 'gx', 'gy', 'v_pref', 'theta', 'px1', 'py1', 'vx1', 'vy1', 'radius1'
# 0 1 2 3 4 5 6 7 8 9 10 11 12 13
batch = state.shape[0]
dx = (state[:, 5] - state[:, 0]).reshape((batch, -1))
dy = (state[:, 6] - state[:, 1]).reshape((batch, -1))
rot = torch.atan2(state[:, 6] - state[:, 1], state[:, 5] - state[:, 0])
dg = torch.norm(torch.cat([dx, dy], dim=1), 2, dim=1, keepdim=True)
v_pref = state[:, 7].reshape((batch, -1))
vx = (state[:, 2] * torch.cos(rot) + state[:, 3] * torch.sin(rot)).reshape((batch, -1))
vy = (state[:, 3] * torch.cos(rot) - state[:, 2] * torch.sin(rot)).reshape((batch, -1))
radius = state[:, 4].reshape((batch, -1))
if self.kinematics == 'unicycle':
theta = (state[:, 8] - rot).reshape((batch, -1))
else:
# set theta to be zero since it's not used
theta = torch.zeros_like(v_pref)
vx1 = (state[:, 11] * torch.cos(rot) + state[:, 12] * torch.sin(rot)).reshape((batch, -1))
vy1 = (state[:, 12] * torch.cos(rot) - state[:, 11] * torch.sin(rot)).reshape((batch, -1))
px1 = (state[:, 9] - state[:, 0]) * torch.cos(rot) + (state[:, 10] - state[:, 1]) * torch.sin(rot)
px1 = px1.reshape((batch, -1))
py1 = (state[:, 10] - state[:, 1]) * torch.cos(rot) - (state[:, 9] - state[:, 0]) * torch.sin(rot)
py1 = py1.reshape((batch, -1))
radius1 = state[:, 13].reshape((batch, -1))
radius_sum = radius + radius1
da = torch.norm(torch.cat([(state[:, 0] - state[:, 9]).reshape((batch, -1)), (state[:, 1] - state[:, 10]).
reshape((batch, -1))], dim=1), 2, dim=1, keepdim=True)
new_state = torch.cat([dg, v_pref, theta, radius, vx, vy, px1, py1, vx1, vy1, radius1, da, radius_sum], dim=1)
return new_state
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/build/lib/crowd_nav/policy/multi_human_rl.py | sarl_star_ros/CrowdNav/build/lib/crowd_nav/policy/multi_human_rl.py | import torch
import numpy as np
from crowd_sim.envs.utils.action import ActionRot, ActionXY
from crowd_nav.policy.cadrl import CADRL
class MultiHumanRL(CADRL):
def __init__(self):
super().__init__()
def predict(self, state):
"""
A base class for all methods that takes pairwise joint state as input to value network.
The input to the value network is always of shape (batch_size, # humans, rotated joint state length)
"""
if self.phase is None or self.device is None:
raise AttributeError('Phase, device attributes have to be set!')
if self.phase == 'train' and self.epsilon is None:
raise AttributeError('Epsilon attribute has to be set in training phase')
if self.reach_destination(state):
return ActionXY(0, 0) if self.kinematics == 'holonomic' else ActionRot(0, 0)
if self.action_space is None:
self.build_action_space(state.self_state.v_pref)
occupancy_maps = None
probability = np.random.random()
if self.phase == 'train' and probability < self.epsilon:
max_action = self.action_space[np.random.choice(len(self.action_space))]
else:
self.action_values = list()
max_value = float('-inf')
max_action = None
for action in self.action_space:
next_self_state = self.propagate(state.self_state, action)
if self.query_env:
next_human_states, reward, done, info = self.env.onestep_lookahead(action)
else:
next_human_states = [self.propagate(human_state, ActionXY(human_state.vx, human_state.vy))
for human_state in state.human_states]
reward = self.compute_reward(next_self_state, next_human_states)
batch_next_states = torch.cat([torch.Tensor([next_self_state + next_human_state]).to(self.device)
for next_human_state in next_human_states], dim=0)
rotated_batch_input = self.rotate(batch_next_states).unsqueeze(0)
if self.with_om:
if occupancy_maps is None:
occupancy_maps = self.build_occupancy_maps(next_human_states).unsqueeze(0)
rotated_batch_input = torch.cat([rotated_batch_input, occupancy_maps], dim=2)
# VALUE UPDATE
next_state_value = self.model(rotated_batch_input).data.item()
value = reward + pow(self.gamma, self.time_step * state.self_state.v_pref) * next_state_value
self.action_values.append(value)
if value > max_value:
max_value = value
max_action = action
if max_action is None:
raise ValueError('Value network is not well trained. ')
if self.phase == 'train':
self.last_state = self.transform(state)
return max_action
def compute_reward(self, nav, humans):
# collision detection
dmin = float('inf')
collision = False
for i, human in enumerate(humans):
dist = np.linalg.norm((nav.px - human.px, nav.py - human.py)) - nav.radius - human.radius
if dist < 0:
collision = True
break
if dist < dmin:
dmin = dist
# check if reaching the goal
reaching_goal = np.linalg.norm((nav.px - nav.gx, nav.py - nav.gy)) < nav.radius
if collision:
reward = -0.25
elif reaching_goal:
reward = 1
elif dmin < 0.2:
reward = (dmin - 0.2) * 0.5 * self.time_step
else:
reward = 0
return reward
def transform(self, state):
"""
Take the state passed from agent and transform it to the input of value network
:param state:
:return: tensor of shape (# of humans, len(state))
"""
state_tensor = torch.cat([torch.Tensor([state.self_state + human_state]).to(self.device)
for human_state in state.human_states], dim=0)
if self.with_om:
occupancy_maps = self.build_occupancy_maps(state.human_states)
state_tensor = torch.cat([self.rotate(state_tensor), occupancy_maps], dim=1)
else:
state_tensor = self.rotate(state_tensor)
return state_tensor
def input_dim(self):
return self.joint_state_dim + (self.cell_num ** 2 * self.om_channel_size if self.with_om else 0)
def build_occupancy_maps(self, human_states):
"""
:param human_states:
:return: tensor of shape (# human - 1, self.cell_num ** 2)
"""
occupancy_maps = []
for human in human_states:
other_humans = np.concatenate([np.array([(other_human.px, other_human.py, other_human.vx, other_human.vy)])
for other_human in human_states if other_human != human], axis=0)
other_px = other_humans[:, 0] - human.px
other_py = other_humans[:, 1] - human.py
# new x-axis is in the direction of human's velocity
human_velocity_angle = np.arctan2(human.vy, human.vx)
other_human_orientation = np.arctan2(other_py, other_px)
rotation = other_human_orientation - human_velocity_angle
distance = np.linalg.norm([other_px, other_py], axis=0)
other_px = np.cos(rotation) * distance
other_py = np.sin(rotation) * distance
# compute indices of humans in the grid
other_x_index = np.floor(other_px / self.cell_size + self.cell_num / 2)
other_y_index = np.floor(other_py / self.cell_size + self.cell_num / 2)
other_x_index[other_x_index < 0] = float('-inf')
other_x_index[other_x_index >= self.cell_num] = float('-inf')
other_y_index[other_y_index < 0] = float('-inf')
other_y_index[other_y_index >= self.cell_num] = float('-inf')
grid_indices = self.cell_num * other_y_index + other_x_index
occupancy_map = np.isin(range(self.cell_num ** 2), grid_indices)
if self.om_channel_size == 1:
occupancy_maps.append([occupancy_map.astype(int)])
else:
# calculate relative velocity for other agents
other_human_velocity_angles = np.arctan2(other_humans[:, 3], other_humans[:, 2])
rotation = other_human_velocity_angles - human_velocity_angle
speed = np.linalg.norm(other_humans[:, 2:4], axis=1)
other_vx = np.cos(rotation) * speed
other_vy = np.sin(rotation) * speed
dm = [list() for _ in range(self.cell_num ** 2 * self.om_channel_size)]
for i, index in np.ndenumerate(grid_indices):
if index in range(self.cell_num ** 2):
if self.om_channel_size == 2:
dm[2 * int(index)].append(other_vx[i])
dm[2 * int(index) + 1].append(other_vy[i])
elif self.om_channel_size == 3:
dm[2 * int(index)].append(1)
dm[2 * int(index) + 1].append(other_vx[i])
dm[2 * int(index) + 2].append(other_vy[i])
else:
raise NotImplementedError
for i, cell in enumerate(dm):
dm[i] = sum(dm[i]) / len(dm[i]) if len(dm[i]) != 0 else 0
occupancy_maps.append([dm])
return torch.from_numpy(np.concatenate(occupancy_maps, axis=0)).float()
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/build/lib/crowd_nav/policy/policy_factory.py | sarl_star_ros/CrowdNav/build/lib/crowd_nav/policy/policy_factory.py | from crowd_sim.envs.policy.policy_factory import policy_factory
from crowd_nav.policy.cadrl import CADRL
from crowd_nav.policy.lstm_rl import LstmRL
from crowd_nav.policy.sarl import SARL
policy_factory['cadrl'] = CADRL
policy_factory['lstm_rl'] = LstmRL
policy_factory['sarl'] = SARL
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/crowd_sim/__init__.py | sarl_star_ros/CrowdNav/crowd_sim/__init__.py | from gym.envs.registration import register
register(
id='CrowdSim-v0',
entry_point='crowd_sim.envs:CrowdSim',
)
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/crowd_sim/envs/__init__.py | sarl_star_ros/CrowdNav/crowd_sim/envs/__init__.py | from .crowd_sim import CrowdSim
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/crowd_sim/envs/crowd_sim.py | sarl_star_ros/CrowdNav/crowd_sim/envs/crowd_sim.py | # Author: Changan Chen <changanvr@gmail.com>
# Modified by: Keyu Li <kyli@link.cuhk.edu.hk>
from __future__ import absolute_import
import logging
import gym
import matplotlib.lines as mlines
import numpy as np
import rvo2
import torch
from matplotlib import patches
from numpy.linalg import norm
from crowd_sim.envs.utils.human import Human
from crowd_sim.envs.utils.info import *
from crowd_sim.envs.utils.utils import point_to_segment_dist
class CrowdSim(gym.Env):
metadata = {'render.modes': ['human']}
def __init__(self):
"""
Movement simulation for n+1 agents
Agent can either be human or robot.
humans are controlled by a unknown and fixed policy.
robot is controlled by a known and learnable policy.
"""
self.time_limit = None
self.time_step = None
self.robot = None
self.robot_path_length = 0 # @lky
self.humans = None
self.global_time = None
self.human_times = None
# reward function
self.success_reward = None
self.collision_penalty = None
self.discomfort_dist = None
self.discomfort_penalty_factor = None
# simulation configuration
self.config = None
self.case_capacity = None
self.case_size = None
self.case_counter = None
self.randomize_attributes = None
self.train_val_sim = None
self.test_sim = None
self.square_width = None
self.circle_radius = None
self.human_num = None
# for visualization
self.states = None
self.action_values = None
self.attention_weights = None
def set_human_num(self, human_num):
self.human_num = human_num
def set_humans(self, humans):
self.humans = humans
def configure(self, config):
self.config = config
self.time_limit = config.getint('env', 'time_limit')
self.time_step = config.getfloat('env', 'time_step')
self.randomize_attributes = config.getboolean('env', 'randomize_attributes')
self.success_reward = config.getfloat('reward', 'success_reward')
self.collision_penalty = config.getfloat('reward', 'collision_penalty')
self.discomfort_dist = config.getfloat('reward', 'discomfort_dist')
self.discomfort_penalty_factor = config.getfloat('reward', 'discomfort_penalty_factor')
if self.config.get('humans', 'policy') == 'orca':
self.case_capacity = {'train': np.iinfo(np.uint32).max - 2000, 'val': 1000, 'test': 1000}
self.case_size = {'train': np.iinfo(np.uint32).max - 2000, 'val': config.getint('env', 'val_size'),
'test': config.getint('env', 'test_size')}
self.train_val_sim = config.get('sim', 'train_val_sim')
self.test_sim = config.get('sim', 'test_sim')
self.square_width = config.getfloat('sim', 'square_width')
self.circle_radius = config.getfloat('sim', 'circle_radius')
self.human_num = config.getint('sim', 'human_num')
else:
raise NotImplementedError
self.case_counter = {'train': 0, 'test': 0, 'val': 0}
logging.info('human number: {}'.format(self.human_num))
if self.randomize_attributes:
logging.info("Randomize human's radius and preferred speed")
else:
logging.info("Not randomize human's radius and preferred speed")
logging.info('Training simulation: {}, test simulation: {}'.format(self.train_val_sim, self.test_sim))
logging.info('Square width: {}, circle width: {}'.format(self.square_width, self.circle_radius))
def set_robot(self, robot):
self.robot = robot
def generate_random_human_position(self, human_num, rule):
"""
Generate human position according to certain rule
Rule square_crossing: generate start/goal position at two sides of y-axis
Rule circle_crossing: generate start position on a circle, goal position is at the opposite side
:param human_num:
:param rule:
:return:
"""
# initial min separation distance to avoid danger penalty at beginning
if rule == 'square_crossing':
self.humans = []
for i in range(human_num):
self.humans.append(self.generate_square_crossing_human())
elif rule == 'circle_crossing':
self.humans = []
for i in range(human_num):
self.humans.append(self.generate_circle_crossing_human())
elif rule == 'mixed':
# mix different raining simulation with certain distribution
static_human_num = {0: 0.05, 1: 0.2, 2: 0.2, 3: 0.3, 4: 0.1, 5: 0.15}
dynamic_human_num = {1: 0.3, 2: 0.3, 3: 0.2, 4: 0.1, 5: 0.1}
static = True if np.random.random() < 0.2 else False
prob = np.random.random()
for key, value in sorted(static_human_num.items() if static else dynamic_human_num.items()):
if prob - value <= 0:
human_num = key
break
else:
prob -= value
self.humans = []
if static:
print("mode: static")
# randomly initialize static objects in a square of (width, height)
width = 4
height = 8
if human_num == 0:
print("human num: 0, set fake human:(0, -10, 0, -10, 0, 0, 0)")
human = Human(self.config, 'humans')
human.set(0, -10, 0, -10, 0, 0, 0)
self.humans.append(human)
for i in range(human_num):
human = Human(self.config, 'humans')
if np.random.random() > 0.5:
sign = -1
else:
sign = 1
while True:
px = np.random.random() * width * 0.5 * sign
py = (np.random.random() - 0.5) * height
collide = False
for agent in [self.robot] + self.humans:
if norm((px - agent.px, py - agent.py)) < human.radius + agent.radius + self.discomfort_dist:
collide = True
break
if not collide:
break
human.set(px, py, px, py, 0, 0, 0)
self.humans.append(human)
else:
# the first 2 two humans will be in the circle crossing scenarios
# the rest humans will have a random starting and end position
print("mode: dynamic")
for i in range(human_num):
if i < 2:
human = self.generate_circle_crossing_human()
else:
human = self.generate_square_crossing_human()
self.humans.append(human)
self.human_num = len(self.humans)
self.human_times = [0] * self.human_num
print("human number:", self.human_num)
else:
raise ValueError("Rule doesn't exist")
def generate_circle_crossing_human(self):
human = Human(self.config, 'humans')
if self.randomize_attributes:
human.sample_random_attributes()
while True:
angle = np.random.random() * np.pi * 2
# add some noise to simulate all the possible cases robot could meet with human
px_noise = (np.random.random() - 0.5) * human.v_pref
py_noise = (np.random.random() - 0.5) * human.v_pref
px = self.circle_radius * np.cos(angle) + px_noise
py = self.circle_radius * np.sin(angle) + py_noise
collide = False
for agent in [self.robot] + self.humans:
min_dist = human.radius + agent.radius + self.discomfort_dist
if norm((px - agent.px, py - agent.py)) < min_dist or \
norm((px - agent.gx, py - agent.gy)) < min_dist:
collide = True
break # jump out of 'for' loop
if not collide:
break # jump out of 'while' loop
human.set(px, py, -px, -py, 0, 0, 0)
return human
def generate_square_crossing_human(self):
human = Human(self.config, 'humans')
if self.randomize_attributes:
human.sample_random_attributes()
if np.random.random() > 0.5:
sign = -1
else:
sign = 1
while True:
px = np.random.random() * self.square_width * 0.5 * sign
py = (np.random.random() - 0.5) * self.square_width
collide = False
for agent in [self.robot] + self.humans:
if norm((px - agent.px, py - agent.py)) < human.radius + agent.radius + self.discomfort_dist:
collide = True
break # jump out of 'for' loop
if not collide:
break # jump out of 'while' loop
while True:
gx = np.random.random() * self.square_width * 0.5 * -sign
gy = (np.random.random() - 0.5) * self.square_width
collide = False
for agent in [self.robot] + self.humans:
if norm((gx - agent.gx, gy - agent.gy)) < human.radius + agent.radius + self.discomfort_dist:
collide = True
break
if not collide:
break
human.set(px, py, gx, gy, 0, 0, 0)
return human
def get_human_times(self):
"""
Run the whole simulation to the end and compute the average time for human to reach goal.
Once an agent reaches the goal, it stops moving and becomes an obstacle
(doesn't need to take half responsibility to avoid collision).
:return:
"""
# centralized orca simulator for all humans
if not self.robot.reached_destination():
raise ValueError('Episode is not done yet')
params = (10, 10, 5, 5)
sim = rvo2.PyRVOSimulator(self.time_step, params[0],params[1],params[2],params[3], 0.3, 1)
sim.addAgent(self.robot.get_position(), params[0],params[1],params[2],params[3], self.robot.radius, self.robot.v_pref,
self.robot.get_velocity())
for human in self.humans:
sim.addAgent(human.get_position(), params[0],params[1],params[2],params[3], human.radius, human.v_pref, human.get_velocity())
max_time = 1000
while not all(self.human_times):
for i, agent in enumerate([self.robot] + self.humans):
vel_pref = np.array(agent.get_goal_position()) - np.array(agent.get_position())
if norm(vel_pref) > 1:
vel_pref /= norm(vel_pref)
sim.setAgentPrefVelocity(i, tuple(vel_pref))
sim.doStep()
self.global_time += self.time_step
if self.global_time > max_time:
logging.warning('Simulation cannot terminate!')
for i, human in enumerate(self.humans):
if self.human_times[i] == 0 and human.reached_destination():
self.human_times[i] = self.global_time
# for visualization
self.robot.set_position(sim.getAgentPosition(0))
for i, human in enumerate(self.humans):
human.set_position(sim.getAgentPosition(i + 1))
self.states.append([self.robot.get_full_state(), [human.get_full_state() for human in self.humans]])
del sim
return self.human_times
def reset(self, phase='test', test_case=None):
"""
Set px, py, gx, gy, vx, vy, theta for robot and humans
:return:
"""
if self.robot is None:
raise AttributeError('robot has to be set!')
assert phase in ['train', 'val', 'test']
if test_case is not None:
self.case_counter[phase] = test_case
self.global_time = 0
if phase == 'test':
self.human_times = [0] * self.human_num
else:
self.human_times = [0] * (self.human_num if self.robot.policy.multiagent_training else 1)
if not self.robot.policy.multiagent_training:
self.train_val_sim = 'circle_crossing'
if self.config.get('humans', 'policy') == 'trajnet':
raise NotImplementedError
else:
counter_offset = {'train': self.case_capacity['val'] + self.case_capacity['test'],
'val': 0, 'test': self.case_capacity['val']}
self.robot.set(0, -self.circle_radius, 0, self.circle_radius, 0, 0, np.pi / 2)
if self.case_counter[phase] >= 0:
np.random.seed(counter_offset[phase] + self.case_counter[phase])
if phase in ['train', 'val']:
human_num = self.human_num if self.robot.policy.multiagent_training else 1
self.generate_random_human_position(human_num=human_num, rule=self.train_val_sim)
else:
self.generate_random_human_position(human_num=self.human_num, rule=self.test_sim)
# case_counter is always between 0 and case_size[phase]
self.case_counter[phase] = (self.case_counter[phase] + 1) % self.case_size[phase]
else:
assert phase == 'test'
if self.case_counter[phase] == -1:
# for debugging purposes
self.human_num = 3
self.humans = [Human(self.config, 'humans') for _ in range(self.human_num)]
self.humans[0].set(0, -6, 0, 5, 0, 0, np.pi / 2)
self.humans[1].set(-5, -5, -5, 5, 0, 0, np.pi / 2)
self.humans[2].set(5, -5, 5, 5, 0, 0, np.pi / 2)
else:
raise NotImplementedError
for agent in [self.robot] + self.humans:
agent.time_step = self.time_step
agent.policy.time_step = self.time_step
self.states = list()
if hasattr(self.robot.policy, 'action_values'):
self.action_values = list()
if hasattr(self.robot.policy, 'get_attention_weights'):
self.attention_weights = list()
# get current observation
if self.robot.sensor == 'coordinates':
ob = [human.get_observable_state() for human in self.humans]
elif self.robot.sensor == 'RGB':
raise NotImplementedError
return ob
def onestep_lookahead(self, action):
return self.step(action, update=False)
def step(self, action, update=True):
"""
Compute actions for all agents, detect collision, update environment and return (ob, reward, done, info)
"""
human_actions = []
for human in self.humans:
# observation for humans is always coordinates
ob = [other_human.get_observable_state() for other_human in self.humans if other_human != human]
if self.robot.visible:
ob += [self.robot.get_observable_state()]
human_actions.append(human.act(ob))
# collision detection
dmin = float('inf')
collision = False
for i, human in enumerate(self.humans):
px = human.px - self.robot.px
py = human.py - self.robot.py
if self.robot.kinematics == 'holonomic':
vx = human.vx - action.vx
vy = human.vy - action.vy
else:
vx = human.vx - action.v * np.cos(action.r + self.robot.theta)
vy = human.vy - action.v * np.sin(action.r + self.robot.theta)
ex = px + vx * self.time_step
ey = py + vy * self.time_step
# closest distance between boundaries of two agents
closest_dist = point_to_segment_dist(px, py, ex, ey, 0, 0) - human.radius - self.robot.radius
if closest_dist < 0:
collision = True
# logging.debug("Collision: distance between robot and p{} is {:.2E}".format(i, closest_dist))
break
elif closest_dist < dmin:
dmin = closest_dist
# collision detection between humans
human_num = len(self.humans)
for i in range(human_num):
for j in range(i + 1, human_num):
dx = self.humans[i].px - self.humans[j].px
dy = self.humans[i].py - self.humans[j].py
dist = (dx ** 2 + dy ** 2) ** (1 / 2) - self.humans[i].radius - self.humans[j].radius
if dist < 0:
# detect collision but don't take humans' collision into account
logging.debug('Collision happens between humans in step()')
# check if reaching the goal
end_position = np.array(self.robot.compute_position(action, self.time_step))
reaching_goal = norm(end_position - np.array(self.robot.get_goal_position())) < self.robot.radius
if self.global_time >= self.time_limit - 1:
reward = 0
done = True
info = Timeout()
elif collision:
reward = self.collision_penalty
done = True
info = Collision()
elif reaching_goal:
reward = self.success_reward
done = True
info = ReachGoal()
elif dmin < self.discomfort_dist:
# only penalize agent for getting too close if it's visible
# adjust the reward based on FPS
reward = (dmin - self.discomfort_dist) * self.discomfort_penalty_factor * self.time_step
done = False
info = Danger(dmin)
else:
reward = 0
done = False
info = Nothing()
if update:
# store state, action value and attention weights
self.states.append([self.robot.get_full_state(), [human.get_full_state() for human in self.humans]])
if hasattr(self.robot.policy, 'action_values'):
self.action_values.append(self.robot.policy.action_values)
if hasattr(self.robot.policy, 'get_attention_weights'):
self.attention_weights.append(self.robot.policy.get_attention_weights())
# update all agents
self.robot.step(action)
for i, human_action in enumerate(human_actions):
self.humans[i].step(human_action)
self.global_time += self.time_step
for i, human in enumerate(self.humans):
# only record the first time the human reaches the goal
if self.human_times[i] == 0 and human.reached_destination():
self.human_times[i] = self.global_time
# compute the observation
if self.robot.sensor == 'coordinates':
ob = [human.get_observable_state() for human in self.humans]
elif self.robot.sensor == 'RGB':
raise NotImplementedError
else:
if self.robot.sensor == 'coordinates':
ob = [human.get_next_observable_state(action) for human, action in zip(self.humans, human_actions)]
elif self.robot.sensor == 'RGB':
raise NotImplementedError
return ob, reward, done, info
def render(self, mode='human', output_file=None):
from matplotlib import animation
import matplotlib.pyplot as plt
plt.rcParams['animation.ffmpeg_path'] = '/usr/bin/ffmpeg'
x_offset = 0.11
y_offset = 0.11
cmap = plt.cm.get_cmap('hsv', 10)
robot_color = 'yellow'
goal_color = 'red'
arrow_color = 'red'
arrow_style = patches.ArrowStyle("->", head_length=4, head_width=2)
if mode == 'human':
fig, ax = plt.subplots(figsize=(7, 7))
ax.set_xlim(-4, 4)
ax.set_ylim(-4, 4)
for human in self.humans:
human_circle = plt.Circle(human.get_position(), human.radius, fill=False, color='b')
ax.add_artist(human_circle)
ax.add_artist(plt.Circle(self.robot.get_position(), self.robot.radius, fill=True, color='r'))
plt.show()
elif mode == 'traj':
fig, ax = plt.subplots(figsize=(7, 7))
ax.tick_params(labelsize=16)
ax.set_xlim(-5, 5)
ax.set_ylim(-5, 5)
ax.set_xlabel('x(m)', fontsize=16)
ax.set_ylabel('y(m)', fontsize=16)
robot_positions = [self.states[i][0].position for i in range(len(self.states))]
human_positions = [[self.states[i][1][j].position for j in range(len(self.humans))]
for i in range(len(self.states))]
for k in range(len(self.states)):
if k % 4 == 0 or k == len(self.states) - 1:
robot = plt.Circle(robot_positions[k], self.robot.radius, fill=True, color=robot_color)
humans = [plt.Circle(human_positions[k][i], self.humans[i].radius, fill=False, color=cmap(i))
for i in range(len(self.humans))]
ax.add_artist(robot)
for human in humans:
ax.add_artist(human)
# add time annotation
global_time = k * self.time_step
if global_time % 4 == 0 or k == len(self.states) - 1:
agents = humans + [robot]
times = [plt.text(agents[i].center[0] - x_offset, agents[i].center[1] - y_offset,
'{:.1f}'.format(global_time),
color='black', fontsize=14) for i in range(self.human_num + 1)]
for time in times:
ax.add_artist(time)
if k != 0:
nav_direction = plt.Line2D((self.states[k - 1][0].px, self.states[k][0].px),
(self.states[k - 1][0].py, self.states[k][0].py),
color=robot_color, ls='solid')
human_directions = [plt.Line2D((self.states[k - 1][1][i].px, self.states[k][1][i].px),
(self.states[k - 1][1][i].py, self.states[k][1][i].py),
color=cmap(i), ls='solid')
for i in range(self.human_num)]
ax.add_artist(nav_direction)
for human_direction in human_directions:
ax.add_artist(human_direction)
plt.legend([robot], ['Robot'], fontsize=16)
plt.show()
elif mode == 'video':
fig, ax = plt.subplots(figsize=(7, 7))
ax.tick_params(labelsize=16)
ax.set_xlim(-6, 6)
ax.set_ylim(-6, 6)
ax.set_xlabel('x(m)', fontsize=16)
ax.set_ylabel('y(m)', fontsize=16)
# add robot and its goal
robot_positions = [state[0].position for state in self.states]
# draw a star at the goal position (0,4)
goal = mlines.Line2D([0], [4], color=goal_color, marker='*', linestyle='None', markersize=15, label='Goal')
robot = plt.Circle(robot_positions[0], self.robot.radius, fill=True, color=robot_color)
ax.add_artist(robot)
ax.add_artist(goal)
plt.legend([robot, goal], ['Robot', 'Goal'], fontsize=16, numpoints=1) # numpoints=1: only 1 star in the legend
# add humans and their numbers
human_positions = [[state[1][j].position for j in range(len(self.humans))] for state in self.states]
humans = [plt.Circle(human_positions[0][i], self.humans[i].radius, fill=False)
for i in range(len(self.humans))]
human_numbers = [plt.text(humans[i].center[0] - x_offset, humans[i].center[1] - y_offset, str(i),
color='black', fontsize=12) for i in range(len(self.humans))]
for i, human in enumerate(humans):
ax.add_artist(human)
ax.add_artist(human_numbers[i])
# add time annotation
time = plt.text(-1, 5, 'Time: {}'.format(0), fontsize=16)
ax.add_artist(time)
# compute attention scores
if self.attention_weights is not None:
attention_scores = [
plt.text(-5.5, 5 - 0.5 * i, 'Human {}: {:.2f}'.format(i + 1, self.attention_weights[0][i]),
fontsize=16) for i in range(len(self.humans))]
# compute orientation in each step and use arrow to show the direction
radius = self.robot.radius
if self.robot.kinematics == 'unicycle':
orientation = [((state[0].px, state[0].py), (state[0].px + radius * np.cos(state[0].theta),
state[0].py + radius * np.sin(state[0].theta))) for state
in self.states]
orientations = [orientation]
else:
orientations = []
for i in range(self.human_num + 1):
orientation = []
for state in self.states:
if i == 0:
agent_state = state[0]
else:
agent_state = state[1][i - 1]
theta = np.arctan2(agent_state.vy, agent_state.vx)
orientation.append(((agent_state.px, agent_state.py), (agent_state.px + radius * np.cos(theta),
agent_state.py + radius * np.sin(theta))))
orientations.append(orientation)
arrows = {}
arrows[0] = [patches.FancyArrowPatch(*orientation[0], color=arrow_color, arrowstyle=arrow_style)
for orientation in orientations]
for arrow in arrows[0]:
ax.add_artist(arrow)
global_step = {}
global_step[0] = 0
def update(frame_num):
# nonlocal global_step
# nonlocal arrows
global_step[0] = frame_num
robot.center = robot_positions[frame_num]
for i, human in enumerate(humans):
human.center = human_positions[frame_num][i]
human_numbers[i].set_position((human.center[0] - x_offset, human.center[1] - y_offset))
for arrow in arrows[0]:
arrow.remove()
arrows[0] = [patches.FancyArrowPatch(*orientation[frame_num], color=arrow_color,
arrowstyle=arrow_style) for orientation in orientations]
for arrow in arrows[0]:
ax.add_artist(arrow)
if self.attention_weights is not None:
human.set_color(str(self.attention_weights[frame_num][i]))
attention_scores[i].set_text('human {}: {:.2f}'.format(i, self.attention_weights[frame_num][i]))
time.set_text('Time: {:.2f}'.format(frame_num * self.time_step))
def plot_value_heatmap():
assert self.robot.kinematics == 'holonomic'
# when any key is pressed draw the action value plot
fig, axis = plt.subplots()
speeds = self.robot.policy.speeds
rotations = self.robot.policy.rotations + [np.pi * 2]
r, th = np.meshgrid(speeds, rotations)
z = np.array(self.action_values[global_step[0] % len(self.states)][1:])
z = (z - np.min(z)) / (np.max(z) - np.min(z)) # z: normalized action values
z = np.append(z, z[:6])
z = z.reshape(16, 6) # rotations: 16 speeds:6
polar = plt.subplot(projection="polar")
polar.tick_params(labelsize=16)
mesh = plt.pcolormesh(th, r, z, cmap=plt.cm.viridis)
plt.plot(rotations, r, color='k', ls='none')
plt.grid()
cbaxes = fig.add_axes([0.85, 0.1, 0.03, 0.8])
cbar = plt.colorbar(mesh, cax=cbaxes)
cbar.ax.tick_params(labelsize=16)
plt.show()
def on_click(event):
anim.running ^= True
if anim.running:
anim.event_source.stop()
if hasattr(self.robot.policy, 'action_values'):
plot_value_heatmap()
else:
anim.event_source.start()
fig.canvas.mpl_connect('key_press_event', on_click)
anim = animation.FuncAnimation(fig, update, frames=len(self.states), interval=self.time_step * 1000)
anim.running = True
anim.save('testcase_animation.gif', writer='imagemagick')
if output_file is not None:
ffmpeg_writer = animation.writers['ffmpeg']
writer = ffmpeg_writer(fps=8, metadata=dict(artist='Me'), bitrate=1800)
anim.save(output_file, writer=writer)
else:
plt.show()
else:
raise NotImplementedError
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/crowd_sim/envs/utils/state.py | sarl_star_ros/CrowdNav/crowd_sim/envs/utils/state.py | class FullState(object):
def __init__(self, px, py, vx, vy, radius, gx, gy, v_pref, theta):
self.px = px
self.py = py
self.vx = vx
self.vy = vy
self.radius = radius
self.gx = gx
self.gy = gy
self.v_pref = v_pref
self.theta = theta
self.position = (self.px, self.py)
self.goal_position = (self.gx, self.gy)
self.velocity = (self.vx, self.vy)
def __add__(self, other):
return other + (self.px, self.py, self.vx, self.vy, self.radius, self.gx, self.gy, self.v_pref, self.theta)
def __str__(self):
return ' '.join([str(x) for x in [self.px, self.py, self.vx, self.vy, self.radius, self.gx, self.gy,
self.v_pref, self.theta]])
class ObservableState(object):
def __init__(self, px, py, vx, vy, radius):
self.px = px
self.py = py
self.vx = vx
self.vy = vy
self.radius = radius
self.position = (self.px, self.py)
self.velocity = (self.vx, self.vy)
# ObservableState(...) + ObservableState(...)
def __add__(self, other):
return other + (self.px, self.py, self.vx, self.vy, self.radius)
def __str__(self):
return ' '.join([str(x) for x in [self.px, self.py, self.vx, self.vy, self.radius]])
class JointState(object):
def __init__(self, self_state, human_states):
assert isinstance(self_state, FullState)
for human_state in human_states:
assert isinstance(human_state, ObservableState)
self.self_state = self_state
self.human_states = human_states
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/crowd_sim/envs/utils/robot.py | sarl_star_ros/CrowdNav/crowd_sim/envs/utils/robot.py | from crowd_sim.envs.utils.agent import Agent
from crowd_sim.envs.utils.state import JointState
class Robot(Agent):
def __init__(self, config, section):
super(Robot, self).__init__(config, section)
def act(self, ob):
if self.policy is None:
raise AttributeError('Policy attribute has to be set!')
state = JointState(self.get_full_state(), ob)
action = self.policy.predict(state)
return action
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/crowd_sim/envs/utils/human.py | sarl_star_ros/CrowdNav/crowd_sim/envs/utils/human.py | from crowd_sim.envs.utils.agent import Agent
from crowd_sim.envs.utils.state import JointState
class Human(Agent):
def __init__(self, config, section):
super(Human,self).__init__(config, section)
def act(self, ob):
"""
The state for human is its full state and all other agents' observable states
:param ob:
:return:
"""
state = JointState(self.get_full_state(), ob)
action = self.policy.predict(state)
return action
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/crowd_sim/envs/utils/action.py | sarl_star_ros/CrowdNav/crowd_sim/envs/utils/action.py | from collections import namedtuple
ActionXY = namedtuple('ActionXY', ['vx', 'vy'])
ActionRot = namedtuple('ActionRot', ['v', 'r'])
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/crowd_sim/envs/utils/utils.py | sarl_star_ros/CrowdNav/crowd_sim/envs/utils/utils.py | import numpy as np
def point_to_segment_dist(x1, y1, x2, y2, x3, y3):
"""
Calculate the closest distance between point(x3, y3) and a line segment with two endpoints (x1, y1), (x2, y2)
"""
px = x2 - x1
py = y2 - y1
if px == 0 and py == 0:
return np.linalg.norm((x3-x1, y3-y1))
u = ((x3 - x1) * px + (y3 - y1) * py) / (px * px + py * py)
if u > 1:
u = 1
elif u < 0:
u = 0
# (x, y) is the closest point to (x3, y3) on the line segment
x = x1 + u * px
y = y1 + u * py
return np.linalg.norm((x - x3, y-y3))
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/crowd_sim/envs/utils/__init__.py | sarl_star_ros/CrowdNav/crowd_sim/envs/utils/__init__.py | python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false | |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/crowd_sim/envs/utils/info.py | sarl_star_ros/CrowdNav/crowd_sim/envs/utils/info.py | class Timeout(object):
def __init__(self):
pass
def __str__(self):
return 'Timeout'
class ReachGoal(object):
def __init__(self):
pass
def __str__(self):
return 'Reaching goal'
class Danger(object):
def __init__(self, min_dist):
self.min_dist = min_dist
def __str__(self):
return 'Too close'
class Collision(object):
def __init__(self):
pass
def __str__(self):
return 'Collision'
class Nothing(object):
def __init__(self):
pass
def __str__(self):
return ''
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/crowd_sim/envs/utils/agent.py | sarl_star_ros/CrowdNav/crowd_sim/envs/utils/agent.py | from __future__ import division
import numpy as np
from numpy.linalg import norm
import abc
import logging
from crowd_sim.envs.policy.policy_factory import policy_factory
from crowd_sim.envs.utils.action import ActionXY, ActionRot
from crowd_sim.envs.utils.state import ObservableState, FullState
class Agent(object):
def __init__(self, config, section):
"""
Base class for robot and human. Have the physical attributes of an agent.
"""
self.visible = config.getboolean(section, 'visible')
self.v_pref = config.getfloat(section, 'v_pref')
self.radius = config.getfloat(section, 'radius')
self.policy = policy_factory[config.get(section, 'policy')]()
self.sensor = config.get(section, 'sensor')
self.kinematics = self.policy.kinematics if self.policy is not None else None
self.px = None
self.py = None
self.gx = None
self.gy = None
self.vx = None
self.vy = None
self.theta = None
self.time_step = None
def print_info(self):
logging.info('Agent is {} and has {} kinematic constraint'.format(
'visible' if self.visible else 'invisible', self.kinematics))
def set_policy(self, policy):
self.policy = policy
self.kinematics = policy.kinematics
def sample_random_attributes(self):
"""
Sample agent radius and v_pref attribute from certain distribution
:return:
"""
self.v_pref = np.random.uniform(0.5, 1.5)
self.radius = np.random.uniform(0.3, 0.5)
def set(self, px, py, gx, gy, vx, vy, theta, radius=None, v_pref=None):
self.px = px
self.py = py
self.gx = gx
self.gy = gy
self.vx = vx
self.vy = vy
self.theta = theta
if radius is not None:
self.radius = radius
if v_pref is not None:
self.v_pref = v_pref
def get_observable_state(self):
return ObservableState(self.px, self.py, self.vx, self.vy, self.radius)
def get_next_observable_state(self, action):
self.check_validity(action)
pos = self.compute_position(action, self.time_step)
next_px, next_py = pos
if self.kinematics == 'holonomic':
next_vx = action.vx
next_vy = action.vy
else:
next_theta = self.theta + action.r
next_vx = action.v * np.cos(next_theta)
next_vy = action.v * np.sin(next_theta)
return ObservableState(next_px, next_py, next_vx, next_vy, self.radius)
def get_full_state(self):
return FullState(self.px, self.py, self.vx, self.vy, self.radius, self.gx, self.gy, self.v_pref, self.theta)
def get_position(self):
return self.px, self.py
def set_position(self, position):
self.px = position[0]
self.py = position[1]
def get_goal_position(self):
return self.gx, self.gy
def get_velocity(self):
return self.vx, self.vy
def set_velocity(self, velocity):
self.vx = velocity[0]
self.vy = velocity[1]
@abc.abstractmethod
def act(self, ob):
"""
Compute state using received observation and pass it to policy
"""
return
def check_validity(self, action):
if self.kinematics == 'holonomic':
assert isinstance(action, ActionXY)
else:
assert isinstance(action, ActionRot)
def compute_position(self, action, delta_t):
self.check_validity(action)
if self.kinematics == 'holonomic':
px = self.px + action.vx * delta_t
py = self.py + action.vy * delta_t
else:
theta = self.theta + action.r
px = self.px + np.cos(theta) * action.v * delta_t
py = self.py + np.sin(theta) * action.v * delta_t
return px, py
def step(self, action):
"""
Perform an action and update the state
"""
self.check_validity(action)
pos = self.compute_position(action, self.time_step)
self.px, self.py = pos
if self.kinematics == 'holonomic':
self.vx = action.vx
self.vy = action.vy
else:
self.theta = (self.theta + action.r) % (2 * np.pi)
self.vx = action.v * np.cos(self.theta)
self.vy = action.v * np.sin(self.theta)
def reached_destination(self):
return norm(np.array(self.get_position()) - np.array(self.get_goal_position())) < self.radius
# || (position - goal position) ||
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/crowd_sim/envs/policy/orca.py | sarl_star_ros/CrowdNav/crowd_sim/envs/policy/orca.py | from __future__ import division
import numpy as np
import rvo2
from crowd_sim.envs.policy.policy import Policy
from crowd_sim.envs.utils.action import ActionXY
import logging
class ORCA(Policy):
def __init__(self):
"""
timeStep The time step of the simulation.
Must be positive.
neighborDist The default maximum distance (center point
to center point) to other agents a new agent
takes into account in the navigation. The
larger this number, the longer the running
time of the simulation. If the number is too
low, the simulation will not be safe. Must be
non-negative.
maxNeighbors The default maximum number of other agents a
new agent takes into account in the
navigation. The larger this number, the
longer the running time of the simulation.
If the number is too low, the simulation
will not be safe.
timeHorizon The default minimal amount of time for which
a new agent's velocities that are computed
by the simulation are safe with respect to
other agents. The larger this number, the
sooner an agent will respond to the presence
of other agents, but the less freedom the
agent has in choosing its velocities.
Must be positive.
timeHorizonObst The default minimal amount of time for which
a new agent's velocities that are computed
by the simulation are safe with respect to
obstacles. The larger this number, the
sooner an agent will respond to the presence
of obstacles, but the less freedom the agent
has in choosing its velocities.
Must be positive.
radius The default radius of a new agent.
Must be non-negative.
maxSpeed The default maximum speed of a new agent.
Must be non-negative.
velocity The default initial two-dimensional linear
velocity of a new agent (optional).
ORCA first uses neighborDist and maxNeighbors to find neighbors that need to be taken into account.
Here set them to be large enough so that all agents will be considered as neighbors.
Time_horizon should be set that at least it's safe for one time step
In this work, obstacles are not considered. So the value of time_horizon_obst doesn't matter.
"""
super(ORCA, self).__init__()
self.name = 'ORCA'
self.trainable = False
self.multiagent_training = None
self.kinematics = 'holonomic'
self.safety_space = 0
self.neighbor_dist = 10
self.max_neighbors = 10
self.time_horizon = 5
self.time_horizon_obst = 5
self.radius = 0.3
self.max_speed = 1
self.sim = None
def configure(self, config):
# self.time_step = config.getfloat('orca', 'time_step')
# self.neighbor_dist = config.getfloat('orca', 'neighbor_dist')
# self.max_neighbors = config.getint('orca', 'max_neighbors')
# self.time_horizon = config.getfloat('orca', 'time_horizon')
# self.time_horizon_obst = config.getfloat('orca', 'time_horizon_obst')
# self.radius = config.getfloat('orca', 'radius')
# self.max_speed = config.getfloat('orca', 'max_speed')
return
def set_phase(self, phase):
return
def predict(self, state):
"""
Create a rvo2 simulation at each time step and run one step
Python-RVO2 API: https://github.com/sybrenstuvel/Python-RVO2/blob/master/src/rvo2.pyx
How simulation is done in RVO2: https://github.com/sybrenstuvel/Python-RVO2/blob/master/src/Agent.cpp
Agent doesn't stop moving after it reaches the goal, because once it stops moving, the reciprocal rule is broken
:param state:
:return:
"""
self_state = state.self_state
params = self.neighbor_dist, self.max_neighbors, self.time_horizon, self.time_horizon_obst
if self.sim is not None and self.sim.getNumAgents() != len(state.human_states) + 1:
del self.sim
self.sim = None
if self.sim is None:
self.sim = rvo2.PyRVOSimulator(self.time_step, params[0],params[1],params[2],params[3], self.radius, self.max_speed)
self.sim.addAgent(self_state.position, params[0],params[1],params[2],params[3], self_state.radius + 0.01 + self.safety_space,
self_state.v_pref, self_state.velocity)
for human_state in state.human_states:
self.sim.addAgent(human_state.position, params[0],params[1],params[2],params[3], human_state.radius + 0.01 + self.safety_space,
self.max_speed, human_state.velocity)
# @lky add the obstacles
# self.sim.addObstacle([(0, 0), (2, 0), (2, 2), (0, 2)])
# self.sim.processObstacles()
else:
self.sim.setAgentPosition(0, self_state.position)
self.sim.setAgentVelocity(0, self_state.velocity)
for i, human_state in enumerate(state.human_states):
self.sim.setAgentPosition(i + 1, human_state.position)
self.sim.setAgentVelocity(i + 1, human_state.velocity)
# Set the preferred velocity to be a vector of unit magnitude (speed) in the direction of the goal.
velocity = np.array((self_state.gx - self_state.px, self_state.gy - self_state.py))
speed = np.linalg.norm(velocity)
human_vmax = 1
# human_vmax = np.random.uniform(0, 1.5)
pref_vel = human_vmax * (velocity / speed)
# Perturb a little to avoid deadlocks due to perfect symmetry.
# perturb_angle = np.random.random() * 2 * np.pi
# perturb_dist = np.random.random() * 0.01
# perturb_vel = np.array((np.cos(perturb_angle), np.sin(perturb_angle))) * perturb_dist
# pref_vel += perturb_vel
self.sim.setAgentPrefVelocity(0, tuple(pref_vel))
for i, human_state in enumerate(state.human_states):
# unknown goal position of other humans
self.sim.setAgentPrefVelocity(i + 1, (0, 0))
self.sim.doStep()
action = ActionXY(*self.sim.getAgentVelocity(0))
self.last_state = state
return action
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/crowd_sim/envs/policy/policy.py | sarl_star_ros/CrowdNav/crowd_sim/envs/policy/policy.py | import abc
import numpy as np
class Policy(object):
def __init__(self):
"""
Base class for all policies, has an abstract method predict().
"""
self.trainable = False
self.phase = None
self.model = None
self.device = None
self.last_state = None
self.time_step = None
# if agent is assumed to know the dynamics of real world
self.env = None
@abc.abstractmethod
def configure(self, config):
return
def set_phase(self, phase):
self.phase = phase
def set_device(self, device):
self.device = device
def set_env(self, env):
self.env = env
def get_model(self):
return self.model
@abc.abstractmethod
def predict(self, state):
"""
Policy takes state as input and output an action
"""
return
@staticmethod
def reach_destination(state):
self_state = state.self_state
if np.linalg.norm((self_state.py - self_state.gy, self_state.px - self_state.gx)) < self_state.radius:
return True
else:
return False
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/crowd_sim/envs/policy/__init__.py | sarl_star_ros/CrowdNav/crowd_sim/envs/policy/__init__.py | python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.