repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
ChenQian0618/TFN | https://github.com/ChenQian0618/TFN/blob/bb8af32c380fa047631b960e6a08ed0b261f1874/Models/__init__.py | Models/__init__.py | from Models.BackboneCNN import CNN as Backbone_CNN
from Models.TFN import TFN_STTF, TFN_Chirplet, TFN_Morlet, Random_CNN
| python | MIT | bb8af32c380fa047631b960e6a08ed0b261f1874 | 2026-01-05T07:14:43.861108Z | false |
kpdemetriou/fuuid | https://github.com/kpdemetriou/fuuid/blob/caa236449b45eb14fe1a1d3499d3dfedb307c2a3/fuuid/__init__.py | fuuid/__init__.py | import os
import uuid
from base64 import b64encode
from time import time, time_ns
from base58 import b58encode
__all__ = ["raw_fuuid", "raw_fuuid_ns", "b58_fuuid", "b58_fuuid_ns", "b64_fuuid", "b64_fuuid_ns", "fuuid", "fuuid_ns"]
def raw_fuuid():
ts = int(time() - 16 * 10 ** 8)
return ts.to_bytes(4, "big") + os.urandom(12)
def raw_fuuid_ns():
ts = int(time_ns() - 16 * 10 ** 17)
return ts.to_bytes(8, "big") + os.urandom(8)
def b58_fuuid():
return b58encode(raw_fuuid()).decode()
def b58_fuuid_ns():
return b58encode(raw_fuuid_ns()).decode()
def b64_fuuid():
return b64encode(raw_fuuid()).decode()
def b64_fuuid_ns():
return b64encode(raw_fuuid_ns()).decode()
def fuuid():
return uuid.UUID(bytes=raw_fuuid())
def fuuid_ns():
return uuid.UUID(bytes=raw_fuuid_ns())
| python | BSD-3-Clause | caa236449b45eb14fe1a1d3499d3dfedb307c2a3 | 2026-01-05T07:14:45.385867Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/setup.py | setup.py | #
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import find_packages
from sys import platform as _platform
import sys
import glob
import os
from distutils.core import setup
from distutils.extension import Extension
from distutils.util import get_platform
from glob import glob
# monkey-patch for parallel compilation
import multiprocessing
import multiprocessing.pool
def parallelCCompile(self,
sources,
output_dir=None,
macros=None,
include_dirs=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
depends=None):
# those lines are copied from distutils.ccompiler.CCompiler directly
macros, objects, extra_postargs, pp_opts, build = self._setup_compile(
output_dir, macros, include_dirs, sources, depends, extra_postargs)
cc_args = self._get_cc_args(pp_opts, debug, extra_preargs)
# parallel code
N = 2 * multiprocessing.cpu_count() # number of parallel compilations
try:
# On Unix-like platforms attempt to obtain the total memory in the
# machine and limit the number of parallel jobs to the number of Gbs
# of RAM (to avoid killing smaller platforms like the Pi)
mem = os.sysconf('SC_PHYS_PAGES') * os.sysconf('SC_PAGE_SIZE') # bytes
except (AttributeError, ValueError):
# Couldn't query RAM; don't limit parallelism (it's probably a well
# equipped Windows / Mac OS X box)
pass
else:
mem = max(1, int(round(mem / 1024 ** 3))) # convert to Gb
N = min(mem, N)
def _single_compile(obj):
try:
src, ext = build[obj]
except KeyError:
return
newcc_args = cc_args
if _platform == "darwin":
if src.endswith('.cpp') or src.endswith('.cc'):
newcc_args = cc_args + ["-mmacosx-version-min=10.7", "-std=c++17", "-stdlib=libc++"]
self._compile(obj, src, ext, newcc_args, extra_postargs, pp_opts)
# convert to list, imap is evaluated on-demand
pool = multiprocessing.pool.ThreadPool(N)
list(pool.imap(_single_compile, objects))
return objects
import distutils.ccompiler
distutils.ccompiler.CCompiler.compile = parallelCCompile
# see http://stackoverflow.com/a/8719066/295157
import os
platform = get_platform()
print(platform)
CXX_FLAGS = ''
# libraries += [current_python]
libraries = []
include_dirs = [
'.',
'third_party/unitree_legged_sdk/pybind11/include',
'third_party/eigen3/include',
'third_party/osqp/include',
'third_party',
'third_party/osqp/lin_sys/direct/qdldl',
'third_party/osqp/lin_sys/direct/qdldl/qdldl_sources/include',
'third_party/osqp/lin_sys/direct/qdldl/amd/include',
]
try:
import numpy
NP_DIRS = [numpy.get_include()]
except:
print("numpy is disabled. getCameraImage maybe slower.")
else:
print("numpy is enabled.")
CXX_FLAGS += '-DPYBULLET_USE_NUMPY '
for d in NP_DIRS:
print("numpy_include_dirs = %s" % d)
include_dirs += NP_DIRS
sources = [
"mpc_controller/mpc_osqp.cc",
"third_party/osqp/src/auxil.c",
"third_party/osqp/src/cs.c",
"third_party/osqp/src/ctrlc.c",
"third_party/osqp/src/error.c",
"third_party/osqp/src/kkt.c",
"third_party/osqp/src/lin_alg.c",
"third_party/osqp/src/lin_sys.c",
"third_party/osqp/src/osqp.c",
"third_party/osqp/src/polish.c",
"third_party/osqp/src/proj.c",
"third_party/osqp/src/scaling.c",
"third_party/osqp/src/util.c",
"third_party/osqp/lin_sys/direct/qdldl/qdldl_interface.c",
"third_party/osqp/lin_sys/direct/qdldl/qdldl_sources/src/qdldl.c",
"third_party/osqp/lin_sys/direct/qdldl/amd/src/amd_1.c",
"third_party/osqp/lin_sys/direct/qdldl/amd/src/amd_2.c",
"third_party/osqp/lin_sys/direct/qdldl/amd/src/amd_aat.c",
"third_party/osqp/lin_sys/direct/qdldl/amd/src/amd_control.c",
"third_party/osqp/lin_sys/direct/qdldl/amd/src/amd_defaults.c",
"third_party/osqp/lin_sys/direct/qdldl/amd/src/amd_info.c",
"third_party/osqp/lin_sys/direct/qdldl/amd/src/amd_order.c",
"third_party/osqp/lin_sys/direct/qdldl/amd/src/amd_post_tree.c",
"third_party/osqp/lin_sys/direct/qdldl/amd/src/amd_postorder.c",
"third_party/osqp/lin_sys/direct/qdldl/amd/src/amd_preprocess.c",
"third_party/osqp/lin_sys/direct/qdldl/amd/src/amd_valid.c",
"third_party/osqp/lin_sys/direct/qdldl/amd/src/SuiteSparse_config.c",
]
if _platform == "linux" or _platform == "linux2":
print("linux")
include_dirs += ['third_party/osqp/include/linux']
CXX_FLAGS += '-fpermissive '
libraries = ['dl', 'pthread']
CXX_FLAGS += '-D_LINUX '
CXX_FLAGS += '-DGLEW_STATIC '
CXX_FLAGS += '-DGLEW_INIT_OPENGL11_FUNCTIONS=1 '
CXX_FLAGS += '-DGLEW_DYNAMIC_LOAD_ALL_GLX_FUNCTIONS=1 '
CXX_FLAGS += '-DDYNAMIC_LOAD_X11_FUNCTIONS '
CXX_FLAGS += '-DHAS_SOCKLEN_T '
CXX_FLAGS += '-fno-inline-functions-called-once '
CXX_FLAGS += '-fvisibility=hidden '
CXX_FLAGS += '-fvisibility-inlines-hidden '
CXX_FLAGS += '-std=c++1z '
CXX_FLAGS += '-Wno-sign-compare '
CXX_FLAGS += '-Wno-reorder '
CXX_FLAGS += '-Wno-unused-local-typedefs '
CXX_FLAGS += '-Wno-unused-variable '
CXX_FLAGS += '-Wno-unused-but-set-variable '
elif _platform == "win32":
print("win32!")
include_dirs += ['third_party/osqp/include/windows']
print(include_dirs)
libraries = ['User32', 'kernel32']
#CXX_FLAGS += '-DIS_WINDOWS '
CXX_FLAGS += '-DWIN32 '
CXX_FLAGS += '-DGLEW_STATIC '
CXX_FLAGS += '/std:c++17 '
elif _platform == "darwin":
print("darwin!")
CXX_FLAGS += '-fpermissive '
include_dirs += ['third_party/osqp/include/macosx']
os.environ['LDFLAGS'] = '-framework Cocoa -mmacosx-version-min=10.7 -stdlib=libc++ -framework OpenGL'
CXX_FLAGS += '-DB3_NO_PYTHON_FRAMEWORK '
CXX_FLAGS += '-DHAS_SOCKLEN_T '
CXX_FLAGS += '-D_DARWIN '
CXX_FLAGS += '-stdlib=libc++ '
CXX_FLAGS += '-mmacosx-version-min=10.7 '
# CXX_FLAGS += '-framework Cocoa '
else:
print("bsd!")
include_dirs += ['third_party/osqp/include/linux']
libraries = ['GL', 'GLEW', 'pthread']
os.environ['LDFLAGS'] = '-L/usr/X11R6/lib'
CXX_FLAGS += '-D_BSD '
CXX_FLAGS += '-I/usr/X11R6/include '
CXX_FLAGS += '-DHAS_SOCKLEN_T '
CXX_FLAGS += '-fno-inline-functions-called-once'
setup_py_dir = os.path.dirname(os.path.realpath(__file__))
extensions = []
mpc_osqp_ext = Extension(
"mpc_osqp",
sources=sources,
libraries=libraries,
extra_compile_args=CXX_FLAGS.split(),
include_dirs=include_dirs + ["."])
extensions.append(mpc_osqp_ext)
print(find_packages('.'))
setup(
name='motion_imitation',
version='0.0.4',
description=
'Motion Imitation and MPC with python bindings for MPC using the osqp solver',
long_description=
'Motion Imitation and MPC with python bindings for MPC using the PyBullet and the OSQP solver',
url='https://github.com/google-research/motion_imitation',
author='Erwin Coumans',
author_email='erwincoumans@google.com',
license='mixed',
platforms='any',
keywords=[
'robotics','control', 'physics simulation'
],
install_requires=[
'numpy',
# 'pybullet',
],
ext_modules=extensions,
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: zlib/libpng License',
'Operating System :: Microsoft :: Windows', 'Operating System :: POSIX :: Linux',
'Operating System :: MacOS', 'Intended Audience :: Science/Research',
"Programming Language :: Python", 'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8', 'Topic :: Games/Entertainment :: Simulation',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Framework :: Robot Framework'
],
packages=[x for x in find_packages('.')],
)
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/third_party/unitree_legged_sdk/pybind11/setup.py | third_party/unitree_legged_sdk/pybind11/setup.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Setup script for PyPI; use CMakeFile.txt to build extension modules
import contextlib
import os
import re
import shutil
import string
import subprocess
import sys
import tempfile
import setuptools.command.sdist
DIR = os.path.abspath(os.path.dirname(__file__))
VERSION_REGEX = re.compile(
r"^\s*#\s*define\s+PYBIND11_VERSION_([A-Z]+)\s+(.*)$", re.MULTILINE
)
# PYBIND11_GLOBAL_SDIST will build a different sdist, with the python-headers
# files, and the sys.prefix files (CMake and headers).
global_sdist = os.environ.get("PYBIND11_GLOBAL_SDIST", False)
setup_py = "tools/setup_global.py.in" if global_sdist else "tools/setup_main.py.in"
extra_cmd = 'cmdclass["sdist"] = SDist\n'
to_src = (
("pyproject.toml", "tools/pyproject.toml"),
("setup.py", setup_py),
)
# Read the listed version
with open("pybind11/_version.py") as f:
code = compile(f.read(), "pybind11/_version.py", "exec")
loc = {}
exec(code, loc)
version = loc["__version__"]
# Verify that the version matches the one in C++
with open("include/pybind11/detail/common.h") as f:
matches = dict(VERSION_REGEX.findall(f.read()))
cpp_version = "{MAJOR}.{MINOR}.{PATCH}".format(**matches)
if version != cpp_version:
msg = "Python version {} does not match C++ version {}!".format(
version, cpp_version
)
raise RuntimeError(msg)
def get_and_replace(filename, binary=False, **opts):
with open(filename, "rb" if binary else "r") as f:
contents = f.read()
# Replacement has to be done on text in Python 3 (both work in Python 2)
if binary:
return string.Template(contents.decode()).substitute(opts).encode()
else:
return string.Template(contents).substitute(opts)
# Use our input files instead when making the SDist (and anything that depends
# on it, like a wheel)
class SDist(setuptools.command.sdist.sdist):
def make_release_tree(self, base_dir, files):
setuptools.command.sdist.sdist.make_release_tree(self, base_dir, files)
for to, src in to_src:
txt = get_and_replace(src, binary=True, version=version, extra_cmd="")
dest = os.path.join(base_dir, to)
# This is normally linked, so unlink before writing!
os.unlink(dest)
with open(dest, "wb") as f:
f.write(txt)
# Backport from Python 3
@contextlib.contextmanager
def TemporaryDirectory(): # noqa: N802
"Prepare a temporary directory, cleanup when done"
try:
tmpdir = tempfile.mkdtemp()
yield tmpdir
finally:
shutil.rmtree(tmpdir)
# Remove the CMake install directory when done
@contextlib.contextmanager
def remove_output(*sources):
try:
yield
finally:
for src in sources:
shutil.rmtree(src)
with remove_output("pybind11/include", "pybind11/share"):
# Generate the files if they are not present.
with TemporaryDirectory() as tmpdir:
cmd = ["cmake", "-S", ".", "-B", tmpdir] + [
"-DCMAKE_INSTALL_PREFIX=pybind11",
"-DBUILD_TESTING=OFF",
"-DPYBIND11_NOPYTHON=ON",
]
cmake_opts = dict(cwd=DIR, stdout=sys.stdout, stderr=sys.stderr)
subprocess.check_call(cmd, **cmake_opts)
subprocess.check_call(["cmake", "--install", tmpdir], **cmake_opts)
txt = get_and_replace(setup_py, version=version, extra_cmd=extra_cmd)
code = compile(txt, setup_py, "exec")
exec(code, {"SDist": SDist})
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/third_party/unitree_legged_sdk/pybind11/tools/libsize.py | third_party/unitree_legged_sdk/pybind11/tools/libsize.py | # -*- coding: utf-8 -*-
from __future__ import print_function, division
import os
import sys
# Internal build script for generating debugging test .so size.
# Usage:
# python libsize.py file.so save.txt -- displays the size of file.so and, if save.txt exists, compares it to the
# size in it, then overwrites save.txt with the new size for future runs.
if len(sys.argv) != 3:
sys.exit("Invalid arguments: usage: python libsize.py file.so save.txt")
lib = sys.argv[1]
save = sys.argv[2]
if not os.path.exists(lib):
sys.exit("Error: requested file ({}) does not exist".format(lib))
libsize = os.path.getsize(lib)
print("------", os.path.basename(lib), "file size:", libsize, end='')
if os.path.exists(save):
with open(save) as sf:
oldsize = int(sf.readline())
if oldsize > 0:
change = libsize - oldsize
if change == 0:
print(" (no change)")
else:
print(" (change of {:+} bytes = {:+.2%})".format(change, change / oldsize))
else:
print()
with open(save, 'w') as sf:
sf.write(str(libsize))
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/third_party/unitree_legged_sdk/pybind11/tests/test_smart_ptr.py | third_party/unitree_legged_sdk/pybind11/tests/test_smart_ptr.py | # -*- coding: utf-8 -*-
import pytest
m = pytest.importorskip("pybind11_tests.smart_ptr")
from pybind11_tests import ConstructorStats # noqa: E402
def test_smart_ptr(capture):
# Object1
for i, o in enumerate([m.make_object_1(), m.make_object_2(), m.MyObject1(3)], start=1):
assert o.getRefCount() == 1
with capture:
m.print_object_1(o)
m.print_object_2(o)
m.print_object_3(o)
m.print_object_4(o)
assert capture == "MyObject1[{i}]\n".format(i=i) * 4
for i, o in enumerate([m.make_myobject1_1(), m.make_myobject1_2(), m.MyObject1(6), 7],
start=4):
print(o)
with capture:
if not isinstance(o, int):
m.print_object_1(o)
m.print_object_2(o)
m.print_object_3(o)
m.print_object_4(o)
m.print_myobject1_1(o)
m.print_myobject1_2(o)
m.print_myobject1_3(o)
m.print_myobject1_4(o)
assert capture == "MyObject1[{i}]\n".format(i=i) * (4 if isinstance(o, int) else 8)
cstats = ConstructorStats.get(m.MyObject1)
assert cstats.alive() == 0
expected_values = ['MyObject1[{}]'.format(i) for i in range(1, 7)] + ['MyObject1[7]'] * 4
assert cstats.values() == expected_values
assert cstats.default_constructions == 0
assert cstats.copy_constructions == 0
# assert cstats.move_constructions >= 0 # Doesn't invoke any
assert cstats.copy_assignments == 0
assert cstats.move_assignments == 0
# Object2
for i, o in zip([8, 6, 7], [m.MyObject2(8), m.make_myobject2_1(), m.make_myobject2_2()]):
print(o)
with capture:
m.print_myobject2_1(o)
m.print_myobject2_2(o)
m.print_myobject2_3(o)
m.print_myobject2_4(o)
assert capture == "MyObject2[{i}]\n".format(i=i) * 4
cstats = ConstructorStats.get(m.MyObject2)
assert cstats.alive() == 1
o = None
assert cstats.alive() == 0
assert cstats.values() == ['MyObject2[8]', 'MyObject2[6]', 'MyObject2[7]']
assert cstats.default_constructions == 0
assert cstats.copy_constructions == 0
# assert cstats.move_constructions >= 0 # Doesn't invoke any
assert cstats.copy_assignments == 0
assert cstats.move_assignments == 0
# Object3
for i, o in zip([9, 8, 9], [m.MyObject3(9), m.make_myobject3_1(), m.make_myobject3_2()]):
print(o)
with capture:
m.print_myobject3_1(o)
m.print_myobject3_2(o)
m.print_myobject3_3(o)
m.print_myobject3_4(o)
assert capture == "MyObject3[{i}]\n".format(i=i) * 4
cstats = ConstructorStats.get(m.MyObject3)
assert cstats.alive() == 1
o = None
assert cstats.alive() == 0
assert cstats.values() == ['MyObject3[9]', 'MyObject3[8]', 'MyObject3[9]']
assert cstats.default_constructions == 0
assert cstats.copy_constructions == 0
# assert cstats.move_constructions >= 0 # Doesn't invoke any
assert cstats.copy_assignments == 0
assert cstats.move_assignments == 0
# Object
cstats = ConstructorStats.get(m.Object)
assert cstats.alive() == 0
assert cstats.values() == []
assert cstats.default_constructions == 10
assert cstats.copy_constructions == 0
# assert cstats.move_constructions >= 0 # Doesn't invoke any
assert cstats.copy_assignments == 0
assert cstats.move_assignments == 0
# ref<>
cstats = m.cstats_ref()
assert cstats.alive() == 0
assert cstats.values() == ['from pointer'] * 10
assert cstats.default_constructions == 30
assert cstats.copy_constructions == 12
# assert cstats.move_constructions >= 0 # Doesn't invoke any
assert cstats.copy_assignments == 30
assert cstats.move_assignments == 0
def test_smart_ptr_refcounting():
assert m.test_object1_refcounting()
def test_unique_nodelete():
o = m.MyObject4(23)
assert o.value == 23
cstats = ConstructorStats.get(m.MyObject4)
assert cstats.alive() == 1
del o
assert cstats.alive() == 1 # Leak, but that's intentional
def test_unique_nodelete4a():
o = m.MyObject4a(23)
assert o.value == 23
cstats = ConstructorStats.get(m.MyObject4a)
assert cstats.alive() == 1
del o
assert cstats.alive() == 1 # Leak, but that's intentional
def test_unique_deleter():
o = m.MyObject4b(23)
assert o.value == 23
cstats4a = ConstructorStats.get(m.MyObject4a)
assert cstats4a.alive() == 2 # Two because of previous test
cstats4b = ConstructorStats.get(m.MyObject4b)
assert cstats4b.alive() == 1
del o
assert cstats4a.alive() == 1 # Should now only be one leftover from previous test
assert cstats4b.alive() == 0 # Should be deleted
def test_large_holder():
o = m.MyObject5(5)
assert o.value == 5
cstats = ConstructorStats.get(m.MyObject5)
assert cstats.alive() == 1
del o
assert cstats.alive() == 0
def test_shared_ptr_and_references():
s = m.SharedPtrRef()
stats = ConstructorStats.get(m.A)
assert stats.alive() == 2
ref = s.ref # init_holder_helper(holder_ptr=false, owned=false)
assert stats.alive() == 2
assert s.set_ref(ref)
with pytest.raises(RuntimeError) as excinfo:
assert s.set_holder(ref)
assert "Unable to cast from non-held to held instance" in str(excinfo.value)
copy = s.copy # init_holder_helper(holder_ptr=false, owned=true)
assert stats.alive() == 3
assert s.set_ref(copy)
assert s.set_holder(copy)
holder_ref = s.holder_ref # init_holder_helper(holder_ptr=true, owned=false)
assert stats.alive() == 3
assert s.set_ref(holder_ref)
assert s.set_holder(holder_ref)
holder_copy = s.holder_copy # init_holder_helper(holder_ptr=true, owned=true)
assert stats.alive() == 3
assert s.set_ref(holder_copy)
assert s.set_holder(holder_copy)
del ref, copy, holder_ref, holder_copy, s
assert stats.alive() == 0
def test_shared_ptr_from_this_and_references():
s = m.SharedFromThisRef()
stats = ConstructorStats.get(m.B)
assert stats.alive() == 2
ref = s.ref # init_holder_helper(holder_ptr=false, owned=false, bad_wp=false)
assert stats.alive() == 2
assert s.set_ref(ref)
assert s.set_holder(ref) # std::enable_shared_from_this can create a holder from a reference
bad_wp = s.bad_wp # init_holder_helper(holder_ptr=false, owned=false, bad_wp=true)
assert stats.alive() == 2
assert s.set_ref(bad_wp)
with pytest.raises(RuntimeError) as excinfo:
assert s.set_holder(bad_wp)
assert "Unable to cast from non-held to held instance" in str(excinfo.value)
copy = s.copy # init_holder_helper(holder_ptr=false, owned=true, bad_wp=false)
assert stats.alive() == 3
assert s.set_ref(copy)
assert s.set_holder(copy)
holder_ref = s.holder_ref # init_holder_helper(holder_ptr=true, owned=false, bad_wp=false)
assert stats.alive() == 3
assert s.set_ref(holder_ref)
assert s.set_holder(holder_ref)
holder_copy = s.holder_copy # init_holder_helper(holder_ptr=true, owned=true, bad_wp=false)
assert stats.alive() == 3
assert s.set_ref(holder_copy)
assert s.set_holder(holder_copy)
del ref, bad_wp, copy, holder_ref, holder_copy, s
assert stats.alive() == 0
z = m.SharedFromThisVirt.get()
y = m.SharedFromThisVirt.get()
assert y is z
def test_move_only_holder():
a = m.TypeWithMoveOnlyHolder.make()
b = m.TypeWithMoveOnlyHolder.make_as_object()
stats = ConstructorStats.get(m.TypeWithMoveOnlyHolder)
assert stats.alive() == 2
del b
assert stats.alive() == 1
del a
assert stats.alive() == 0
def test_holder_with_addressof_operator():
# this test must not throw exception from c++
a = m.TypeForHolderWithAddressOf.make()
a.print_object_1()
a.print_object_2()
a.print_object_3()
a.print_object_4()
stats = ConstructorStats.get(m.TypeForHolderWithAddressOf)
assert stats.alive() == 1
np = m.TypeForHolderWithAddressOf.make()
assert stats.alive() == 2
del a
assert stats.alive() == 1
del np
assert stats.alive() == 0
b = m.TypeForHolderWithAddressOf.make()
c = b
assert b.get() is c.get()
assert stats.alive() == 1
del b
assert stats.alive() == 1
del c
assert stats.alive() == 0
def test_move_only_holder_with_addressof_operator():
a = m.TypeForMoveOnlyHolderWithAddressOf.make()
a.print_object()
stats = ConstructorStats.get(m.TypeForMoveOnlyHolderWithAddressOf)
assert stats.alive() == 1
a.value = 42
assert a.value == 42
del a
assert stats.alive() == 0
def test_smart_ptr_from_default():
instance = m.HeldByDefaultHolder()
with pytest.raises(RuntimeError) as excinfo:
m.HeldByDefaultHolder.load_shared_ptr(instance)
assert "Unable to load a custom holder type from a " \
"default-holder instance" in str(excinfo.value)
def test_shared_ptr_gc():
"""#187: issue involving std::shared_ptr<> return value policy & garbage collection"""
el = m.ElementList()
for i in range(10):
el.add(m.ElementA(i))
pytest.gc_collect()
for i, v in enumerate(el.get()):
assert i == v.value()
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/third_party/unitree_legged_sdk/pybind11/tests/test_custom_type_casters.py | third_party/unitree_legged_sdk/pybind11/tests/test_custom_type_casters.py | # -*- coding: utf-8 -*-
import pytest
from pybind11_tests import custom_type_casters as m
def test_noconvert_args(msg):
a = m.ArgInspector()
assert msg(a.f("hi")) == """
loading ArgInspector1 argument WITH conversion allowed. Argument value = hi
"""
assert msg(a.g("this is a", "this is b")) == """
loading ArgInspector1 argument WITHOUT conversion allowed. Argument value = this is a
loading ArgInspector1 argument WITH conversion allowed. Argument value = this is b
13
loading ArgInspector2 argument WITH conversion allowed. Argument value = (default arg inspector 2)
""" # noqa: E501 line too long
assert msg(a.g("this is a", "this is b", 42)) == """
loading ArgInspector1 argument WITHOUT conversion allowed. Argument value = this is a
loading ArgInspector1 argument WITH conversion allowed. Argument value = this is b
42
loading ArgInspector2 argument WITH conversion allowed. Argument value = (default arg inspector 2)
""" # noqa: E501 line too long
assert msg(a.g("this is a", "this is b", 42, "this is d")) == """
loading ArgInspector1 argument WITHOUT conversion allowed. Argument value = this is a
loading ArgInspector1 argument WITH conversion allowed. Argument value = this is b
42
loading ArgInspector2 argument WITH conversion allowed. Argument value = this is d
"""
assert (a.h("arg 1") ==
"loading ArgInspector2 argument WITHOUT conversion allowed. Argument value = arg 1")
assert msg(m.arg_inspect_func("A1", "A2")) == """
loading ArgInspector2 argument WITH conversion allowed. Argument value = A1
loading ArgInspector1 argument WITHOUT conversion allowed. Argument value = A2
"""
assert m.floats_preferred(4) == 2.0
assert m.floats_only(4.0) == 2.0
with pytest.raises(TypeError) as excinfo:
m.floats_only(4)
assert msg(excinfo.value) == """
floats_only(): incompatible function arguments. The following argument types are supported:
1. (f: float) -> float
Invoked with: 4
"""
assert m.ints_preferred(4) == 2
assert m.ints_preferred(True) == 0
with pytest.raises(TypeError) as excinfo:
m.ints_preferred(4.0)
assert msg(excinfo.value) == """
ints_preferred(): incompatible function arguments. The following argument types are supported:
1. (i: int) -> int
Invoked with: 4.0
""" # noqa: E501 line too long
assert m.ints_only(4) == 2
with pytest.raises(TypeError) as excinfo:
m.ints_only(4.0)
assert msg(excinfo.value) == """
ints_only(): incompatible function arguments. The following argument types are supported:
1. (i: int) -> int
Invoked with: 4.0
"""
def test_custom_caster_destruction():
"""Tests that returning a pointer to a type that gets converted with a custom type caster gets
destroyed when the function has py::return_value_policy::take_ownership policy applied."""
cstats = m.destruction_tester_cstats()
# This one *doesn't* have take_ownership: the pointer should be used but not destroyed:
z = m.custom_caster_no_destroy()
assert cstats.alive() == 1 and cstats.default_constructions == 1
assert z
# take_ownership applied: this constructs a new object, casts it, then destroys it:
z = m.custom_caster_destroy()
assert z
assert cstats.default_constructions == 2
# Same, but with a const pointer return (which should *not* inhibit destruction):
z = m.custom_caster_destroy_const()
assert z
assert cstats.default_constructions == 3
# Make sure we still only have the original object (from ..._no_destroy()) alive:
assert cstats.alive() == 1
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/third_party/unitree_legged_sdk/pybind11/tests/test_builtin_casters.py | third_party/unitree_legged_sdk/pybind11/tests/test_builtin_casters.py | # -*- coding: utf-8 -*-
import pytest
import env # noqa: F401
from pybind11_tests import builtin_casters as m
from pybind11_tests import UserType, IncType
def test_simple_string():
assert m.string_roundtrip("const char *") == "const char *"
def test_unicode_conversion():
"""Tests unicode conversion and error reporting."""
assert m.good_utf8_string() == u"Say utf8β½ π π"
assert m.good_utf16_string() == u"bβ½ππz"
assert m.good_utf32_string() == u"aππβ½z"
assert m.good_wchar_string() == u"aβΈπz"
if hasattr(m, "has_u8string"):
assert m.good_utf8_u8string() == u"Say utf8β½ π π"
with pytest.raises(UnicodeDecodeError):
m.bad_utf8_string()
with pytest.raises(UnicodeDecodeError):
m.bad_utf16_string()
# These are provided only if they actually fail (they don't when 32-bit and under Python 2.7)
if hasattr(m, "bad_utf32_string"):
with pytest.raises(UnicodeDecodeError):
m.bad_utf32_string()
if hasattr(m, "bad_wchar_string"):
with pytest.raises(UnicodeDecodeError):
m.bad_wchar_string()
if hasattr(m, "has_u8string"):
with pytest.raises(UnicodeDecodeError):
m.bad_utf8_u8string()
assert m.u8_Z() == 'Z'
assert m.u8_eacute() == u'Γ©'
assert m.u16_ibang() == u'β½'
assert m.u32_mathbfA() == u'π'
assert m.wchar_heart() == u'β₯'
if hasattr(m, "has_u8string"):
assert m.u8_char8_Z() == 'Z'
def test_single_char_arguments():
"""Tests failures for passing invalid inputs to char-accepting functions"""
def toobig_message(r):
return "Character code point not in range({0:#x})".format(r)
toolong_message = "Expected a character, but multi-character string found"
assert m.ord_char(u'a') == 0x61 # simple ASCII
assert m.ord_char_lv(u'b') == 0x62
assert m.ord_char(u'Γ©') == 0xE9 # requires 2 bytes in utf-8, but can be stuffed in a char
with pytest.raises(ValueError) as excinfo:
assert m.ord_char(u'Δ') == 0x100 # requires 2 bytes, doesn't fit in a char
assert str(excinfo.value) == toobig_message(0x100)
with pytest.raises(ValueError) as excinfo:
assert m.ord_char(u'ab')
assert str(excinfo.value) == toolong_message
assert m.ord_char16(u'a') == 0x61
assert m.ord_char16(u'Γ©') == 0xE9
assert m.ord_char16_lv(u'Γͺ') == 0xEA
assert m.ord_char16(u'Δ') == 0x100
assert m.ord_char16(u'β½') == 0x203d
assert m.ord_char16(u'β₯') == 0x2665
assert m.ord_char16_lv(u'β‘') == 0x2661
with pytest.raises(ValueError) as excinfo:
assert m.ord_char16(u'π') == 0x1F382 # requires surrogate pair
assert str(excinfo.value) == toobig_message(0x10000)
with pytest.raises(ValueError) as excinfo:
assert m.ord_char16(u'aa')
assert str(excinfo.value) == toolong_message
assert m.ord_char32(u'a') == 0x61
assert m.ord_char32(u'Γ©') == 0xE9
assert m.ord_char32(u'Δ') == 0x100
assert m.ord_char32(u'β½') == 0x203d
assert m.ord_char32(u'β₯') == 0x2665
assert m.ord_char32(u'π') == 0x1F382
with pytest.raises(ValueError) as excinfo:
assert m.ord_char32(u'aa')
assert str(excinfo.value) == toolong_message
assert m.ord_wchar(u'a') == 0x61
assert m.ord_wchar(u'Γ©') == 0xE9
assert m.ord_wchar(u'Δ') == 0x100
assert m.ord_wchar(u'β½') == 0x203d
assert m.ord_wchar(u'β₯') == 0x2665
if m.wchar_size == 2:
with pytest.raises(ValueError) as excinfo:
assert m.ord_wchar(u'π') == 0x1F382 # requires surrogate pair
assert str(excinfo.value) == toobig_message(0x10000)
else:
assert m.ord_wchar(u'π') == 0x1F382
with pytest.raises(ValueError) as excinfo:
assert m.ord_wchar(u'aa')
assert str(excinfo.value) == toolong_message
if hasattr(m, "has_u8string"):
assert m.ord_char8(u'a') == 0x61 # simple ASCII
assert m.ord_char8_lv(u'b') == 0x62
assert m.ord_char8(u'Γ©') == 0xE9 # requires 2 bytes in utf-8, but can be stuffed in a char
with pytest.raises(ValueError) as excinfo:
assert m.ord_char8(u'Δ') == 0x100 # requires 2 bytes, doesn't fit in a char
assert str(excinfo.value) == toobig_message(0x100)
with pytest.raises(ValueError) as excinfo:
assert m.ord_char8(u'ab')
assert str(excinfo.value) == toolong_message
def test_bytes_to_string():
"""Tests the ability to pass bytes to C++ string-accepting functions. Note that this is
one-way: the only way to return bytes to Python is via the pybind11::bytes class."""
# Issue #816
def to_bytes(s):
b = s if env.PY2 else s.encode("utf8")
assert isinstance(b, bytes)
return b
assert m.strlen(to_bytes("hi")) == 2
assert m.string_length(to_bytes("world")) == 5
assert m.string_length(to_bytes("a\x00b")) == 3
assert m.strlen(to_bytes("a\x00b")) == 1 # C-string limitation
# passing in a utf8 encoded string should work
assert m.string_length(u'π©'.encode("utf8")) == 4
@pytest.mark.skipif(not hasattr(m, "has_string_view"), reason="no <string_view>")
def test_string_view(capture):
"""Tests support for C++17 string_view arguments and return values"""
assert m.string_view_chars("Hi") == [72, 105]
assert m.string_view_chars("Hi π") == [72, 105, 32, 0xf0, 0x9f, 0x8e, 0x82]
assert m.string_view16_chars(u"Hi π") == [72, 105, 32, 0xd83c, 0xdf82]
assert m.string_view32_chars(u"Hi π") == [72, 105, 32, 127874]
if hasattr(m, "has_u8string"):
assert m.string_view8_chars("Hi") == [72, 105]
assert m.string_view8_chars(u"Hi π") == [72, 105, 32, 0xf0, 0x9f, 0x8e, 0x82]
assert m.string_view_return() == u"utf8 secret π"
assert m.string_view16_return() == u"utf16 secret π"
assert m.string_view32_return() == u"utf32 secret π"
if hasattr(m, "has_u8string"):
assert m.string_view8_return() == u"utf8 secret π"
with capture:
m.string_view_print("Hi")
m.string_view_print("utf8 π")
m.string_view16_print(u"utf16 π")
m.string_view32_print(u"utf32 π")
assert capture == u"""
Hi 2
utf8 π 9
utf16 π 8
utf32 π 7
"""
if hasattr(m, "has_u8string"):
with capture:
m.string_view8_print("Hi")
m.string_view8_print(u"utf8 π")
assert capture == u"""
Hi 2
utf8 π 9
"""
with capture:
m.string_view_print("Hi, ascii")
m.string_view_print("Hi, utf8 π")
m.string_view16_print(u"Hi, utf16 π")
m.string_view32_print(u"Hi, utf32 π")
assert capture == u"""
Hi, ascii 9
Hi, utf8 π 13
Hi, utf16 π 12
Hi, utf32 π 11
"""
if hasattr(m, "has_u8string"):
with capture:
m.string_view8_print("Hi, ascii")
m.string_view8_print(u"Hi, utf8 π")
assert capture == u"""
Hi, ascii 9
Hi, utf8 π 13
"""
def test_integer_casting():
"""Issue #929 - out-of-range integer values shouldn't be accepted"""
assert m.i32_str(-1) == "-1"
assert m.i64_str(-1) == "-1"
assert m.i32_str(2000000000) == "2000000000"
assert m.u32_str(2000000000) == "2000000000"
if env.PY2:
assert m.i32_str(long(-1)) == "-1" # noqa: F821 undefined name 'long'
assert m.i64_str(long(-1)) == "-1" # noqa: F821 undefined name 'long'
assert m.i64_str(long(-999999999999)) == "-999999999999" # noqa: F821 undefined name
assert m.u64_str(long(999999999999)) == "999999999999" # noqa: F821 undefined name 'long'
else:
assert m.i64_str(-999999999999) == "-999999999999"
assert m.u64_str(999999999999) == "999999999999"
with pytest.raises(TypeError) as excinfo:
m.u32_str(-1)
assert "incompatible function arguments" in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
m.u64_str(-1)
assert "incompatible function arguments" in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
m.i32_str(-3000000000)
assert "incompatible function arguments" in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
m.i32_str(3000000000)
assert "incompatible function arguments" in str(excinfo.value)
if env.PY2:
with pytest.raises(TypeError) as excinfo:
m.u32_str(long(-1)) # noqa: F821 undefined name 'long'
assert "incompatible function arguments" in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
m.u64_str(long(-1)) # noqa: F821 undefined name 'long'
assert "incompatible function arguments" in str(excinfo.value)
def test_tuple(doc):
"""std::pair <-> tuple & std::tuple <-> tuple"""
assert m.pair_passthrough((True, "test")) == ("test", True)
assert m.tuple_passthrough((True, "test", 5)) == (5, "test", True)
# Any sequence can be cast to a std::pair or std::tuple
assert m.pair_passthrough([True, "test"]) == ("test", True)
assert m.tuple_passthrough([True, "test", 5]) == (5, "test", True)
assert m.empty_tuple() == ()
assert doc(m.pair_passthrough) == """
pair_passthrough(arg0: Tuple[bool, str]) -> Tuple[str, bool]
Return a pair in reversed order
"""
assert doc(m.tuple_passthrough) == """
tuple_passthrough(arg0: Tuple[bool, str, int]) -> Tuple[int, str, bool]
Return a triple in reversed order
"""
assert m.rvalue_pair() == ("rvalue", "rvalue")
assert m.lvalue_pair() == ("lvalue", "lvalue")
assert m.rvalue_tuple() == ("rvalue", "rvalue", "rvalue")
assert m.lvalue_tuple() == ("lvalue", "lvalue", "lvalue")
assert m.rvalue_nested() == ("rvalue", ("rvalue", ("rvalue", "rvalue")))
assert m.lvalue_nested() == ("lvalue", ("lvalue", ("lvalue", "lvalue")))
assert m.int_string_pair() == (2, "items")
def test_builtins_cast_return_none():
"""Casters produced with PYBIND11_TYPE_CASTER() should convert nullptr to None"""
assert m.return_none_string() is None
assert m.return_none_char() is None
assert m.return_none_bool() is None
assert m.return_none_int() is None
assert m.return_none_float() is None
assert m.return_none_pair() is None
def test_none_deferred():
"""None passed as various argument types should defer to other overloads"""
assert not m.defer_none_cstring("abc")
assert m.defer_none_cstring(None)
assert not m.defer_none_custom(UserType())
assert m.defer_none_custom(None)
assert m.nodefer_none_void(None)
def test_void_caster():
assert m.load_nullptr_t(None) is None
assert m.cast_nullptr_t() is None
def test_reference_wrapper():
"""std::reference_wrapper for builtin and user types"""
assert m.refwrap_builtin(42) == 420
assert m.refwrap_usertype(UserType(42)) == 42
with pytest.raises(TypeError) as excinfo:
m.refwrap_builtin(None)
assert "incompatible function arguments" in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
m.refwrap_usertype(None)
assert "incompatible function arguments" in str(excinfo.value)
a1 = m.refwrap_list(copy=True)
a2 = m.refwrap_list(copy=True)
assert [x.value for x in a1] == [2, 3]
assert [x.value for x in a2] == [2, 3]
assert not a1[0] is a2[0] and not a1[1] is a2[1]
b1 = m.refwrap_list(copy=False)
b2 = m.refwrap_list(copy=False)
assert [x.value for x in b1] == [1, 2]
assert [x.value for x in b2] == [1, 2]
assert b1[0] is b2[0] and b1[1] is b2[1]
assert m.refwrap_iiw(IncType(5)) == 5
assert m.refwrap_call_iiw(IncType(10), m.refwrap_iiw) == [10, 10, 10, 10]
def test_complex_cast():
"""std::complex casts"""
assert m.complex_cast(1) == "1.0"
assert m.complex_cast(2j) == "(0.0, 2.0)"
def test_bool_caster():
"""Test bool caster implicit conversions."""
convert, noconvert = m.bool_passthrough, m.bool_passthrough_noconvert
def require_implicit(v):
pytest.raises(TypeError, noconvert, v)
def cant_convert(v):
pytest.raises(TypeError, convert, v)
# straight up bool
assert convert(True) is True
assert convert(False) is False
assert noconvert(True) is True
assert noconvert(False) is False
# None requires implicit conversion
require_implicit(None)
assert convert(None) is False
class A(object):
def __init__(self, x):
self.x = x
def __nonzero__(self):
return self.x
def __bool__(self):
return self.x
class B(object):
pass
# Arbitrary objects are not accepted
cant_convert(object())
cant_convert(B())
# Objects with __nonzero__ / __bool__ defined can be converted
require_implicit(A(True))
assert convert(A(True)) is True
assert convert(A(False)) is False
def test_numpy_bool():
np = pytest.importorskip("numpy")
convert, noconvert = m.bool_passthrough, m.bool_passthrough_noconvert
def cant_convert(v):
pytest.raises(TypeError, convert, v)
# np.bool_ is not considered implicit
assert convert(np.bool_(True)) is True
assert convert(np.bool_(False)) is False
assert noconvert(np.bool_(True)) is True
assert noconvert(np.bool_(False)) is False
cant_convert(np.zeros(2, dtype='int'))
def test_int_long():
"""In Python 2, a C++ int should return a Python int rather than long
if possible: longs are not always accepted where ints are used (such
as the argument to sys.exit()). A C++ long long is always a Python
long."""
import sys
must_be_long = type(getattr(sys, 'maxint', 1) + 1)
assert isinstance(m.int_cast(), int)
assert isinstance(m.long_cast(), int)
assert isinstance(m.longlong_cast(), must_be_long)
def test_void_caster_2():
assert m.test_void_caster()
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/third_party/unitree_legged_sdk/pybind11/tests/test_local_bindings.py | third_party/unitree_legged_sdk/pybind11/tests/test_local_bindings.py | # -*- coding: utf-8 -*-
import pytest
import env # noqa: F401
from pybind11_tests import local_bindings as m
def test_load_external():
"""Load a `py::module_local` type that's only registered in an external module"""
import pybind11_cross_module_tests as cm
assert m.load_external1(cm.ExternalType1(11)) == 11
assert m.load_external2(cm.ExternalType2(22)) == 22
with pytest.raises(TypeError) as excinfo:
assert m.load_external2(cm.ExternalType1(21)) == 21
assert "incompatible function arguments" in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
assert m.load_external1(cm.ExternalType2(12)) == 12
assert "incompatible function arguments" in str(excinfo.value)
def test_local_bindings():
"""Tests that duplicate `py::module_local` class bindings work across modules"""
# Make sure we can load the second module with the conflicting (but local) definition:
import pybind11_cross_module_tests as cm
i1 = m.LocalType(5)
assert i1.get() == 4
assert i1.get3() == 8
i2 = cm.LocalType(10)
assert i2.get() == 11
assert i2.get2() == 12
assert not hasattr(i1, 'get2')
assert not hasattr(i2, 'get3')
# Loading within the local module
assert m.local_value(i1) == 5
assert cm.local_value(i2) == 10
# Cross-module loading works as well (on failure, the type loader looks for
# external module-local converters):
assert m.local_value(i2) == 10
assert cm.local_value(i1) == 5
def test_nonlocal_failure():
"""Tests that attempting to register a non-local type in multiple modules fails"""
import pybind11_cross_module_tests as cm
with pytest.raises(RuntimeError) as excinfo:
cm.register_nonlocal()
assert str(excinfo.value) == 'generic_type: type "NonLocalType" is already registered!'
def test_duplicate_local():
"""Tests expected failure when registering a class twice with py::local in the same module"""
with pytest.raises(RuntimeError) as excinfo:
m.register_local_external()
import pybind11_tests
assert str(excinfo.value) == (
'generic_type: type "LocalExternal" is already registered!'
if hasattr(pybind11_tests, 'class_') else 'test_class not enabled')
def test_stl_bind_local():
import pybind11_cross_module_tests as cm
v1, v2 = m.LocalVec(), cm.LocalVec()
v1.append(m.LocalType(1))
v1.append(m.LocalType(2))
v2.append(cm.LocalType(1))
v2.append(cm.LocalType(2))
# Cross module value loading:
v1.append(cm.LocalType(3))
v2.append(m.LocalType(3))
assert [i.get() for i in v1] == [0, 1, 2]
assert [i.get() for i in v2] == [2, 3, 4]
v3, v4 = m.NonLocalVec(), cm.NonLocalVec2()
v3.append(m.NonLocalType(1))
v3.append(m.NonLocalType(2))
v4.append(m.NonLocal2(3))
v4.append(m.NonLocal2(4))
assert [i.get() for i in v3] == [1, 2]
assert [i.get() for i in v4] == [13, 14]
d1, d2 = m.LocalMap(), cm.LocalMap()
d1["a"] = v1[0]
d1["b"] = v1[1]
d2["c"] = v2[0]
d2["d"] = v2[1]
assert {i: d1[i].get() for i in d1} == {'a': 0, 'b': 1}
assert {i: d2[i].get() for i in d2} == {'c': 2, 'd': 3}
def test_stl_bind_global():
import pybind11_cross_module_tests as cm
with pytest.raises(RuntimeError) as excinfo:
cm.register_nonlocal_map()
assert str(excinfo.value) == 'generic_type: type "NonLocalMap" is already registered!'
with pytest.raises(RuntimeError) as excinfo:
cm.register_nonlocal_vec()
assert str(excinfo.value) == 'generic_type: type "NonLocalVec" is already registered!'
with pytest.raises(RuntimeError) as excinfo:
cm.register_nonlocal_map2()
assert str(excinfo.value) == 'generic_type: type "NonLocalMap2" is already registered!'
def test_mixed_local_global():
"""Local types take precedence over globally registered types: a module with a `module_local`
type can be registered even if the type is already registered globally. With the module,
casting will go to the local type; outside the module casting goes to the global type."""
import pybind11_cross_module_tests as cm
m.register_mixed_global()
m.register_mixed_local()
a = []
a.append(m.MixedGlobalLocal(1))
a.append(m.MixedLocalGlobal(2))
a.append(m.get_mixed_gl(3))
a.append(m.get_mixed_lg(4))
assert [x.get() for x in a] == [101, 1002, 103, 1004]
cm.register_mixed_global_local()
cm.register_mixed_local_global()
a.append(m.MixedGlobalLocal(5))
a.append(m.MixedLocalGlobal(6))
a.append(cm.MixedGlobalLocal(7))
a.append(cm.MixedLocalGlobal(8))
a.append(m.get_mixed_gl(9))
a.append(m.get_mixed_lg(10))
a.append(cm.get_mixed_gl(11))
a.append(cm.get_mixed_lg(12))
assert [x.get() for x in a] == \
[101, 1002, 103, 1004, 105, 1006, 207, 2008, 109, 1010, 211, 2012]
def test_internal_locals_differ():
"""Makes sure the internal local type map differs across the two modules"""
import pybind11_cross_module_tests as cm
assert m.local_cpp_types_addr() != cm.local_cpp_types_addr()
@pytest.mark.xfail("env.PYPY and sys.pypy_version_info < (7, 3, 2)")
def test_stl_caster_vs_stl_bind(msg):
"""One module uses a generic vector caster from `<pybind11/stl.h>` while the other
exports `std::vector<int>` via `py:bind_vector` and `py::module_local`"""
import pybind11_cross_module_tests as cm
v1 = cm.VectorInt([1, 2, 3])
assert m.load_vector_via_caster(v1) == 6
assert cm.load_vector_via_binding(v1) == 6
v2 = [1, 2, 3]
assert m.load_vector_via_caster(v2) == 6
with pytest.raises(TypeError) as excinfo:
cm.load_vector_via_binding(v2) == 6
assert msg(excinfo.value) == """
load_vector_via_binding(): incompatible function arguments. The following argument types are supported:
1. (arg0: pybind11_cross_module_tests.VectorInt) -> int
Invoked with: [1, 2, 3]
""" # noqa: E501 line too long
def test_cross_module_calls():
import pybind11_cross_module_tests as cm
v1 = m.LocalVec()
v1.append(m.LocalType(1))
v2 = cm.LocalVec()
v2.append(cm.LocalType(2))
# Returning the self pointer should get picked up as returning an existing
# instance (even when that instance is of a foreign, non-local type).
assert m.return_self(v1) is v1
assert cm.return_self(v2) is v2
assert m.return_self(v2) is v2
assert cm.return_self(v1) is v1
assert m.LocalVec is not cm.LocalVec
# Returning a copy, on the other hand, always goes to the local type,
# regardless of where the source type came from.
assert type(m.return_copy(v1)) is m.LocalVec
assert type(m.return_copy(v2)) is m.LocalVec
assert type(cm.return_copy(v1)) is cm.LocalVec
assert type(cm.return_copy(v2)) is cm.LocalVec
# Test the example given in the documentation (which also tests inheritance casting):
mycat = m.Cat("Fluffy")
mydog = cm.Dog("Rover")
assert mycat.get_name() == "Fluffy"
assert mydog.name() == "Rover"
assert m.Cat.__base__.__name__ == "Pet"
assert cm.Dog.__base__.__name__ == "Pet"
assert m.Cat.__base__ is not cm.Dog.__base__
assert m.pet_name(mycat) == "Fluffy"
assert m.pet_name(mydog) == "Rover"
assert cm.pet_name(mycat) == "Fluffy"
assert cm.pet_name(mydog) == "Rover"
assert m.MixGL is not cm.MixGL
a = m.MixGL(1)
b = cm.MixGL(2)
assert m.get_gl_value(a) == 11
assert m.get_gl_value(b) == 12
assert cm.get_gl_value(a) == 101
assert cm.get_gl_value(b) == 102
c, d = m.MixGL2(3), cm.MixGL2(4)
with pytest.raises(TypeError) as excinfo:
m.get_gl_value(c)
assert "incompatible function arguments" in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
m.get_gl_value(d)
assert "incompatible function arguments" in str(excinfo.value)
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/third_party/unitree_legged_sdk/pybind11/tests/test_docstring_options.py | third_party/unitree_legged_sdk/pybind11/tests/test_docstring_options.py | # -*- coding: utf-8 -*-
from pybind11_tests import docstring_options as m
def test_docstring_options():
# options.disable_function_signatures()
assert not m.test_function1.__doc__
assert m.test_function2.__doc__ == "A custom docstring"
# docstring specified on just the first overload definition:
assert m.test_overloaded1.__doc__ == "Overload docstring"
# docstring on both overloads:
assert m.test_overloaded2.__doc__ == "overload docstring 1\noverload docstring 2"
# docstring on only second overload:
assert m.test_overloaded3.__doc__ == "Overload docstr"
# options.enable_function_signatures()
assert m.test_function3.__doc__ .startswith("test_function3(a: int, b: int) -> None")
assert m.test_function4.__doc__ .startswith("test_function4(a: int, b: int) -> None")
assert m.test_function4.__doc__ .endswith("A custom docstring\n")
# options.disable_function_signatures()
# options.disable_user_defined_docstrings()
assert not m.test_function5.__doc__
# nested options.enable_user_defined_docstrings()
assert m.test_function6.__doc__ == "A custom docstring"
# RAII destructor
assert m.test_function7.__doc__ .startswith("test_function7(a: int, b: int) -> None")
assert m.test_function7.__doc__ .endswith("A custom docstring\n")
# Suppression of user-defined docstrings for non-function objects
assert not m.DocstringTestFoo.__doc__
assert not m.DocstringTestFoo.value_prop.__doc__
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/third_party/unitree_legged_sdk/pybind11/tests/test_buffers.py | third_party/unitree_legged_sdk/pybind11/tests/test_buffers.py | # -*- coding: utf-8 -*-
import io
import struct
import ctypes
import pytest
import env # noqa: F401
from pybind11_tests import buffers as m
from pybind11_tests import ConstructorStats
np = pytest.importorskip("numpy")
def test_from_python():
with pytest.raises(RuntimeError) as excinfo:
m.Matrix(np.array([1, 2, 3])) # trying to assign a 1D array
assert str(excinfo.value) == "Incompatible buffer format!"
m3 = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32)
m4 = m.Matrix(m3)
for i in range(m4.rows()):
for j in range(m4.cols()):
assert m3[i, j] == m4[i, j]
cstats = ConstructorStats.get(m.Matrix)
assert cstats.alive() == 1
del m3, m4
assert cstats.alive() == 0
assert cstats.values() == ["2x3 matrix"]
assert cstats.copy_constructions == 0
# assert cstats.move_constructions >= 0 # Don't invoke any
assert cstats.copy_assignments == 0
assert cstats.move_assignments == 0
# https://foss.heptapod.net/pypy/pypy/-/issues/2444
def test_to_python():
mat = m.Matrix(5, 4)
assert memoryview(mat).shape == (5, 4)
assert mat[2, 3] == 0
mat[2, 3] = 4.0
mat[3, 2] = 7.0
assert mat[2, 3] == 4
assert mat[3, 2] == 7
assert struct.unpack_from('f', mat, (3 * 4 + 2) * 4) == (7, )
assert struct.unpack_from('f', mat, (2 * 4 + 3) * 4) == (4, )
mat2 = np.array(mat, copy=False)
assert mat2.shape == (5, 4)
assert abs(mat2).sum() == 11
assert mat2[2, 3] == 4 and mat2[3, 2] == 7
mat2[2, 3] = 5
assert mat2[2, 3] == 5
cstats = ConstructorStats.get(m.Matrix)
assert cstats.alive() == 1
del mat
pytest.gc_collect()
assert cstats.alive() == 1
del mat2 # holds a mat reference
pytest.gc_collect()
assert cstats.alive() == 0
assert cstats.values() == ["5x4 matrix"]
assert cstats.copy_constructions == 0
# assert cstats.move_constructions >= 0 # Don't invoke any
assert cstats.copy_assignments == 0
assert cstats.move_assignments == 0
def test_inherited_protocol():
"""SquareMatrix is derived from Matrix and inherits the buffer protocol"""
matrix = m.SquareMatrix(5)
assert memoryview(matrix).shape == (5, 5)
assert np.asarray(matrix).shape == (5, 5)
def test_pointer_to_member_fn():
for cls in [m.Buffer, m.ConstBuffer, m.DerivedBuffer]:
buf = cls()
buf.value = 0x12345678
value = struct.unpack('i', bytearray(buf))[0]
assert value == 0x12345678
def test_readonly_buffer():
buf = m.BufferReadOnly(0x64)
view = memoryview(buf)
assert view[0] == b'd' if env.PY2 else 0x64
assert view.readonly
def test_selective_readonly_buffer():
buf = m.BufferReadOnlySelect()
memoryview(buf)[0] = b'd' if env.PY2 else 0x64
assert buf.value == 0x64
io.BytesIO(b'A').readinto(buf)
assert buf.value == ord(b'A')
buf.readonly = True
with pytest.raises(TypeError):
memoryview(buf)[0] = b'\0' if env.PY2 else 0
with pytest.raises(TypeError):
io.BytesIO(b'1').readinto(buf)
def test_ctypes_array_1d():
char1d = (ctypes.c_char * 10)()
int1d = (ctypes.c_int * 15)()
long1d = (ctypes.c_long * 7)()
for carray in (char1d, int1d, long1d):
info = m.get_buffer_info(carray)
assert info.itemsize == ctypes.sizeof(carray._type_)
assert info.size == len(carray)
assert info.ndim == 1
assert info.shape == [info.size]
assert info.strides == [info.itemsize]
assert not info.readonly
def test_ctypes_array_2d():
char2d = ((ctypes.c_char * 10) * 4)()
int2d = ((ctypes.c_int * 15) * 3)()
long2d = ((ctypes.c_long * 7) * 2)()
for carray in (char2d, int2d, long2d):
info = m.get_buffer_info(carray)
assert info.itemsize == ctypes.sizeof(carray[0]._type_)
assert info.size == len(carray) * len(carray[0])
assert info.ndim == 2
assert info.shape == [len(carray), len(carray[0])]
assert info.strides == [info.itemsize * len(carray[0]), info.itemsize]
assert not info.readonly
@pytest.mark.skipif(
"env.PYPY and env.PY2", reason="PyPy2 bytes buffer not reported as readonly"
)
def test_ctypes_from_buffer():
test_pystr = b"0123456789"
for pyarray in (test_pystr, bytearray(test_pystr)):
pyinfo = m.get_buffer_info(pyarray)
if pyinfo.readonly:
cbytes = (ctypes.c_char * len(pyarray)).from_buffer_copy(pyarray)
cinfo = m.get_buffer_info(cbytes)
else:
cbytes = (ctypes.c_char * len(pyarray)).from_buffer(pyarray)
cinfo = m.get_buffer_info(cbytes)
assert cinfo.size == pyinfo.size
assert cinfo.ndim == pyinfo.ndim
assert cinfo.shape == pyinfo.shape
assert cinfo.strides == pyinfo.strides
assert not cinfo.readonly
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/third_party/unitree_legged_sdk/pybind11/tests/test_numpy_array.py | third_party/unitree_legged_sdk/pybind11/tests/test_numpy_array.py | # -*- coding: utf-8 -*-
import pytest
import env # noqa: F401
from pybind11_tests import numpy_array as m
np = pytest.importorskip("numpy")
def test_dtypes():
# See issue #1328.
# - Platform-dependent sizes.
for size_check in m.get_platform_dtype_size_checks():
print(size_check)
assert size_check.size_cpp == size_check.size_numpy, size_check
# - Concrete sizes.
for check in m.get_concrete_dtype_checks():
print(check)
assert check.numpy == check.pybind11, check
if check.numpy.num != check.pybind11.num:
print("NOTE: typenum mismatch for {}: {} != {}".format(
check, check.numpy.num, check.pybind11.num))
@pytest.fixture(scope='function')
def arr():
return np.array([[1, 2, 3], [4, 5, 6]], '=u2')
def test_array_attributes():
a = np.array(0, 'f8')
assert m.ndim(a) == 0
assert all(m.shape(a) == [])
assert all(m.strides(a) == [])
with pytest.raises(IndexError) as excinfo:
m.shape(a, 0)
assert str(excinfo.value) == 'invalid axis: 0 (ndim = 0)'
with pytest.raises(IndexError) as excinfo:
m.strides(a, 0)
assert str(excinfo.value) == 'invalid axis: 0 (ndim = 0)'
assert m.writeable(a)
assert m.size(a) == 1
assert m.itemsize(a) == 8
assert m.nbytes(a) == 8
assert m.owndata(a)
a = np.array([[1, 2, 3], [4, 5, 6]], 'u2').view()
a.flags.writeable = False
assert m.ndim(a) == 2
assert all(m.shape(a) == [2, 3])
assert m.shape(a, 0) == 2
assert m.shape(a, 1) == 3
assert all(m.strides(a) == [6, 2])
assert m.strides(a, 0) == 6
assert m.strides(a, 1) == 2
with pytest.raises(IndexError) as excinfo:
m.shape(a, 2)
assert str(excinfo.value) == 'invalid axis: 2 (ndim = 2)'
with pytest.raises(IndexError) as excinfo:
m.strides(a, 2)
assert str(excinfo.value) == 'invalid axis: 2 (ndim = 2)'
assert not m.writeable(a)
assert m.size(a) == 6
assert m.itemsize(a) == 2
assert m.nbytes(a) == 12
assert not m.owndata(a)
@pytest.mark.parametrize('args, ret', [([], 0), ([0], 0), ([1], 3), ([0, 1], 1), ([1, 2], 5)])
def test_index_offset(arr, args, ret):
assert m.index_at(arr, *args) == ret
assert m.index_at_t(arr, *args) == ret
assert m.offset_at(arr, *args) == ret * arr.dtype.itemsize
assert m.offset_at_t(arr, *args) == ret * arr.dtype.itemsize
def test_dim_check_fail(arr):
for func in (m.index_at, m.index_at_t, m.offset_at, m.offset_at_t, m.data, m.data_t,
m.mutate_data, m.mutate_data_t):
with pytest.raises(IndexError) as excinfo:
func(arr, 1, 2, 3)
assert str(excinfo.value) == 'too many indices for an array: 3 (ndim = 2)'
@pytest.mark.parametrize('args, ret',
[([], [1, 2, 3, 4, 5, 6]),
([1], [4, 5, 6]),
([0, 1], [2, 3, 4, 5, 6]),
([1, 2], [6])])
def test_data(arr, args, ret):
from sys import byteorder
assert all(m.data_t(arr, *args) == ret)
assert all(m.data(arr, *args)[(0 if byteorder == 'little' else 1)::2] == ret)
assert all(m.data(arr, *args)[(1 if byteorder == 'little' else 0)::2] == 0)
@pytest.mark.parametrize('dim', [0, 1, 3])
def test_at_fail(arr, dim):
for func in m.at_t, m.mutate_at_t:
with pytest.raises(IndexError) as excinfo:
func(arr, *([0] * dim))
assert str(excinfo.value) == 'index dimension mismatch: {} (ndim = 2)'.format(dim)
def test_at(arr):
assert m.at_t(arr, 0, 2) == 3
assert m.at_t(arr, 1, 0) == 4
assert all(m.mutate_at_t(arr, 0, 2).ravel() == [1, 2, 4, 4, 5, 6])
assert all(m.mutate_at_t(arr, 1, 0).ravel() == [1, 2, 4, 5, 5, 6])
def test_mutate_readonly(arr):
arr.flags.writeable = False
for func, args in (m.mutate_data, ()), (m.mutate_data_t, ()), (m.mutate_at_t, (0, 0)):
with pytest.raises(ValueError) as excinfo:
func(arr, *args)
assert str(excinfo.value) == 'array is not writeable'
def test_mutate_data(arr):
assert all(m.mutate_data(arr).ravel() == [2, 4, 6, 8, 10, 12])
assert all(m.mutate_data(arr).ravel() == [4, 8, 12, 16, 20, 24])
assert all(m.mutate_data(arr, 1).ravel() == [4, 8, 12, 32, 40, 48])
assert all(m.mutate_data(arr, 0, 1).ravel() == [4, 16, 24, 64, 80, 96])
assert all(m.mutate_data(arr, 1, 2).ravel() == [4, 16, 24, 64, 80, 192])
assert all(m.mutate_data_t(arr).ravel() == [5, 17, 25, 65, 81, 193])
assert all(m.mutate_data_t(arr).ravel() == [6, 18, 26, 66, 82, 194])
assert all(m.mutate_data_t(arr, 1).ravel() == [6, 18, 26, 67, 83, 195])
assert all(m.mutate_data_t(arr, 0, 1).ravel() == [6, 19, 27, 68, 84, 196])
assert all(m.mutate_data_t(arr, 1, 2).ravel() == [6, 19, 27, 68, 84, 197])
def test_bounds_check(arr):
for func in (m.index_at, m.index_at_t, m.data, m.data_t,
m.mutate_data, m.mutate_data_t, m.at_t, m.mutate_at_t):
with pytest.raises(IndexError) as excinfo:
func(arr, 2, 0)
assert str(excinfo.value) == 'index 2 is out of bounds for axis 0 with size 2'
with pytest.raises(IndexError) as excinfo:
func(arr, 0, 4)
assert str(excinfo.value) == 'index 4 is out of bounds for axis 1 with size 3'
def test_make_c_f_array():
assert m.make_c_array().flags.c_contiguous
assert not m.make_c_array().flags.f_contiguous
assert m.make_f_array().flags.f_contiguous
assert not m.make_f_array().flags.c_contiguous
def test_make_empty_shaped_array():
m.make_empty_shaped_array()
# empty shape means numpy scalar, PEP 3118
assert m.scalar_int().ndim == 0
assert m.scalar_int().shape == ()
assert m.scalar_int() == 42
def test_wrap():
def assert_references(a, b, base=None):
from distutils.version import LooseVersion
if base is None:
base = a
assert a is not b
assert a.__array_interface__['data'][0] == b.__array_interface__['data'][0]
assert a.shape == b.shape
assert a.strides == b.strides
assert a.flags.c_contiguous == b.flags.c_contiguous
assert a.flags.f_contiguous == b.flags.f_contiguous
assert a.flags.writeable == b.flags.writeable
assert a.flags.aligned == b.flags.aligned
if LooseVersion(np.__version__) >= LooseVersion("1.14.0"):
assert a.flags.writebackifcopy == b.flags.writebackifcopy
else:
assert a.flags.updateifcopy == b.flags.updateifcopy
assert np.all(a == b)
assert not b.flags.owndata
assert b.base is base
if a.flags.writeable and a.ndim == 2:
a[0, 0] = 1234
assert b[0, 0] == 1234
a1 = np.array([1, 2], dtype=np.int16)
assert a1.flags.owndata and a1.base is None
a2 = m.wrap(a1)
assert_references(a1, a2)
a1 = np.array([[1, 2], [3, 4]], dtype=np.float32, order='F')
assert a1.flags.owndata and a1.base is None
a2 = m.wrap(a1)
assert_references(a1, a2)
a1 = np.array([[1, 2], [3, 4]], dtype=np.float32, order='C')
a1.flags.writeable = False
a2 = m.wrap(a1)
assert_references(a1, a2)
a1 = np.random.random((4, 4, 4))
a2 = m.wrap(a1)
assert_references(a1, a2)
a1t = a1.transpose()
a2 = m.wrap(a1t)
assert_references(a1t, a2, a1)
a1d = a1.diagonal()
a2 = m.wrap(a1d)
assert_references(a1d, a2, a1)
a1m = a1[::-1, ::-1, ::-1]
a2 = m.wrap(a1m)
assert_references(a1m, a2, a1)
def test_numpy_view(capture):
with capture:
ac = m.ArrayClass()
ac_view_1 = ac.numpy_view()
ac_view_2 = ac.numpy_view()
assert np.all(ac_view_1 == np.array([1, 2], dtype=np.int32))
del ac
pytest.gc_collect()
assert capture == """
ArrayClass()
ArrayClass::numpy_view()
ArrayClass::numpy_view()
"""
ac_view_1[0] = 4
ac_view_1[1] = 3
assert ac_view_2[0] == 4
assert ac_view_2[1] == 3
with capture:
del ac_view_1
del ac_view_2
pytest.gc_collect()
pytest.gc_collect()
assert capture == """
~ArrayClass()
"""
def test_cast_numpy_int64_to_uint64():
m.function_taking_uint64(123)
m.function_taking_uint64(np.uint64(123))
def test_isinstance():
assert m.isinstance_untyped(np.array([1, 2, 3]), "not an array")
assert m.isinstance_typed(np.array([1.0, 2.0, 3.0]))
def test_constructors():
defaults = m.default_constructors()
for a in defaults.values():
assert a.size == 0
assert defaults["array"].dtype == np.array([]).dtype
assert defaults["array_t<int32>"].dtype == np.int32
assert defaults["array_t<double>"].dtype == np.float64
results = m.converting_constructors([1, 2, 3])
for a in results.values():
np.testing.assert_array_equal(a, [1, 2, 3])
assert results["array"].dtype == np.int_
assert results["array_t<int32>"].dtype == np.int32
assert results["array_t<double>"].dtype == np.float64
def test_overload_resolution(msg):
# Exact overload matches:
assert m.overloaded(np.array([1], dtype='float64')) == 'double'
assert m.overloaded(np.array([1], dtype='float32')) == 'float'
assert m.overloaded(np.array([1], dtype='ushort')) == 'unsigned short'
assert m.overloaded(np.array([1], dtype='intc')) == 'int'
assert m.overloaded(np.array([1], dtype='longlong')) == 'long long'
assert m.overloaded(np.array([1], dtype='complex')) == 'double complex'
assert m.overloaded(np.array([1], dtype='csingle')) == 'float complex'
# No exact match, should call first convertible version:
assert m.overloaded(np.array([1], dtype='uint8')) == 'double'
with pytest.raises(TypeError) as excinfo:
m.overloaded("not an array")
assert msg(excinfo.value) == """
overloaded(): incompatible function arguments. The following argument types are supported:
1. (arg0: numpy.ndarray[numpy.float64]) -> str
2. (arg0: numpy.ndarray[numpy.float32]) -> str
3. (arg0: numpy.ndarray[numpy.int32]) -> str
4. (arg0: numpy.ndarray[numpy.uint16]) -> str
5. (arg0: numpy.ndarray[numpy.int64]) -> str
6. (arg0: numpy.ndarray[numpy.complex128]) -> str
7. (arg0: numpy.ndarray[numpy.complex64]) -> str
Invoked with: 'not an array'
"""
assert m.overloaded2(np.array([1], dtype='float64')) == 'double'
assert m.overloaded2(np.array([1], dtype='float32')) == 'float'
assert m.overloaded2(np.array([1], dtype='complex64')) == 'float complex'
assert m.overloaded2(np.array([1], dtype='complex128')) == 'double complex'
assert m.overloaded2(np.array([1], dtype='float32')) == 'float'
assert m.overloaded3(np.array([1], dtype='float64')) == 'double'
assert m.overloaded3(np.array([1], dtype='intc')) == 'int'
expected_exc = """
overloaded3(): incompatible function arguments. The following argument types are supported:
1. (arg0: numpy.ndarray[numpy.int32]) -> str
2. (arg0: numpy.ndarray[numpy.float64]) -> str
Invoked with: """
with pytest.raises(TypeError) as excinfo:
m.overloaded3(np.array([1], dtype='uintc'))
assert msg(excinfo.value) == expected_exc + repr(np.array([1], dtype='uint32'))
with pytest.raises(TypeError) as excinfo:
m.overloaded3(np.array([1], dtype='float32'))
assert msg(excinfo.value) == expected_exc + repr(np.array([1.], dtype='float32'))
with pytest.raises(TypeError) as excinfo:
m.overloaded3(np.array([1], dtype='complex'))
assert msg(excinfo.value) == expected_exc + repr(np.array([1. + 0.j]))
# Exact matches:
assert m.overloaded4(np.array([1], dtype='double')) == 'double'
assert m.overloaded4(np.array([1], dtype='longlong')) == 'long long'
# Non-exact matches requiring conversion. Since float to integer isn't a
# save conversion, it should go to the double overload, but short can go to
# either (and so should end up on the first-registered, the long long).
assert m.overloaded4(np.array([1], dtype='float32')) == 'double'
assert m.overloaded4(np.array([1], dtype='short')) == 'long long'
assert m.overloaded5(np.array([1], dtype='double')) == 'double'
assert m.overloaded5(np.array([1], dtype='uintc')) == 'unsigned int'
assert m.overloaded5(np.array([1], dtype='float32')) == 'unsigned int'
def test_greedy_string_overload():
"""Tests fix for #685 - ndarray shouldn't go to std::string overload"""
assert m.issue685("abc") == "string"
assert m.issue685(np.array([97, 98, 99], dtype='b')) == "array"
assert m.issue685(123) == "other"
def test_array_unchecked_fixed_dims(msg):
z1 = np.array([[1, 2], [3, 4]], dtype='float64')
m.proxy_add2(z1, 10)
assert np.all(z1 == [[11, 12], [13, 14]])
with pytest.raises(ValueError) as excinfo:
m.proxy_add2(np.array([1., 2, 3]), 5.0)
assert msg(excinfo.value) == "array has incorrect number of dimensions: 1; expected 2"
expect_c = np.ndarray(shape=(3, 3, 3), buffer=np.array(range(3, 30)), dtype='int')
assert np.all(m.proxy_init3(3.0) == expect_c)
expect_f = np.transpose(expect_c)
assert np.all(m.proxy_init3F(3.0) == expect_f)
assert m.proxy_squared_L2_norm(np.array(range(6))) == 55
assert m.proxy_squared_L2_norm(np.array(range(6), dtype="float64")) == 55
assert m.proxy_auxiliaries2(z1) == [11, 11, True, 2, 8, 2, 2, 4, 32]
assert m.proxy_auxiliaries2(z1) == m.array_auxiliaries2(z1)
assert m.proxy_auxiliaries1_const_ref(z1[0, :])
assert m.proxy_auxiliaries2_const_ref(z1)
def test_array_unchecked_dyn_dims(msg):
z1 = np.array([[1, 2], [3, 4]], dtype='float64')
m.proxy_add2_dyn(z1, 10)
assert np.all(z1 == [[11, 12], [13, 14]])
expect_c = np.ndarray(shape=(3, 3, 3), buffer=np.array(range(3, 30)), dtype='int')
assert np.all(m.proxy_init3_dyn(3.0) == expect_c)
assert m.proxy_auxiliaries2_dyn(z1) == [11, 11, True, 2, 8, 2, 2, 4, 32]
assert m.proxy_auxiliaries2_dyn(z1) == m.array_auxiliaries2(z1)
def test_array_failure():
with pytest.raises(ValueError) as excinfo:
m.array_fail_test()
assert str(excinfo.value) == 'cannot create a pybind11::array from a nullptr'
with pytest.raises(ValueError) as excinfo:
m.array_t_fail_test()
assert str(excinfo.value) == 'cannot create a pybind11::array_t from a nullptr'
with pytest.raises(ValueError) as excinfo:
m.array_fail_test_negative_size()
assert str(excinfo.value) == 'negative dimensions are not allowed'
def test_initializer_list():
assert m.array_initializer_list1().shape == (1,)
assert m.array_initializer_list2().shape == (1, 2)
assert m.array_initializer_list3().shape == (1, 2, 3)
assert m.array_initializer_list4().shape == (1, 2, 3, 4)
def test_array_resize(msg):
a = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9], dtype='float64')
m.array_reshape2(a)
assert(a.size == 9)
assert(np.all(a == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]))
# total size change should succced with refcheck off
m.array_resize3(a, 4, False)
assert(a.size == 64)
# ... and fail with refcheck on
try:
m.array_resize3(a, 3, True)
except ValueError as e:
assert(str(e).startswith("cannot resize an array"))
# transposed array doesn't own data
b = a.transpose()
try:
m.array_resize3(b, 3, False)
except ValueError as e:
assert(str(e).startswith("cannot resize this array: it does not own its data"))
# ... but reshape should be fine
m.array_reshape2(b)
assert(b.shape == (8, 8))
@pytest.mark.xfail("env.PYPY")
def test_array_create_and_resize(msg):
a = m.create_and_resize(2)
assert(a.size == 4)
assert(np.all(a == 42.))
def test_index_using_ellipsis():
a = m.index_using_ellipsis(np.zeros((5, 6, 7)))
assert a.shape == (6,)
@pytest.mark.parametrize("forcecast", [False, True])
@pytest.mark.parametrize("contiguity", [None, 'C', 'F'])
@pytest.mark.parametrize("noconvert", [False, True])
@pytest.mark.filterwarnings(
"ignore:Casting complex values to real discards the imaginary part:numpy.ComplexWarning"
)
def test_argument_conversions(forcecast, contiguity, noconvert):
function_name = "accept_double"
if contiguity == 'C':
function_name += "_c_style"
elif contiguity == 'F':
function_name += "_f_style"
if forcecast:
function_name += "_forcecast"
if noconvert:
function_name += "_noconvert"
function = getattr(m, function_name)
for dtype in [np.dtype('float32'), np.dtype('float64'), np.dtype('complex128')]:
for order in ['C', 'F']:
for shape in [(2, 2), (1, 3, 1, 1), (1, 1, 1), (0,)]:
if not noconvert:
# If noconvert is not passed, only complex128 needs to be truncated and
# "cannot be safely obtained". So without `forcecast`, the argument shouldn't
# be accepted.
should_raise = dtype.name == 'complex128' and not forcecast
else:
# If noconvert is passed, only float64 and the matching order is accepted.
# If at most one dimension has a size greater than 1, the array is also
# trivially contiguous.
trivially_contiguous = sum(1 for d in shape if d > 1) <= 1
should_raise = (
dtype.name != 'float64' or
(contiguity is not None and
contiguity != order and
not trivially_contiguous)
)
array = np.zeros(shape, dtype=dtype, order=order)
if not should_raise:
function(array)
else:
with pytest.raises(TypeError, match="incompatible function arguments"):
function(array)
@pytest.mark.xfail("env.PYPY")
def test_dtype_refcount_leak():
from sys import getrefcount
dtype = np.dtype(np.float_)
a = np.array([1], dtype=dtype)
before = getrefcount(dtype)
m.ndim(a)
after = getrefcount(dtype)
assert after == before
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/third_party/unitree_legged_sdk/pybind11/tests/test_operator_overloading.py | third_party/unitree_legged_sdk/pybind11/tests/test_operator_overloading.py | # -*- coding: utf-8 -*-
import pytest
from pybind11_tests import operators as m
from pybind11_tests import ConstructorStats
def test_operator_overloading():
v1 = m.Vector2(1, 2)
v2 = m.Vector(3, -1)
v3 = m.Vector2(1, 2) # Same value as v1, but different instance.
assert v1 is not v3
assert str(v1) == "[1.000000, 2.000000]"
assert str(v2) == "[3.000000, -1.000000]"
assert str(-v2) == "[-3.000000, 1.000000]"
assert str(v1 + v2) == "[4.000000, 1.000000]"
assert str(v1 - v2) == "[-2.000000, 3.000000]"
assert str(v1 - 8) == "[-7.000000, -6.000000]"
assert str(v1 + 8) == "[9.000000, 10.000000]"
assert str(v1 * 8) == "[8.000000, 16.000000]"
assert str(v1 / 8) == "[0.125000, 0.250000]"
assert str(8 - v1) == "[7.000000, 6.000000]"
assert str(8 + v1) == "[9.000000, 10.000000]"
assert str(8 * v1) == "[8.000000, 16.000000]"
assert str(8 / v1) == "[8.000000, 4.000000]"
assert str(v1 * v2) == "[3.000000, -2.000000]"
assert str(v2 / v1) == "[3.000000, -0.500000]"
assert v1 == v3
assert v1 != v2
assert hash(v1) == 4
# TODO(eric.cousineau): Make this work.
# assert abs(v1) == "abs(Vector2)"
v1 += 2 * v2
assert str(v1) == "[7.000000, 0.000000]"
v1 -= v2
assert str(v1) == "[4.000000, 1.000000]"
v1 *= 2
assert str(v1) == "[8.000000, 2.000000]"
v1 /= 16
assert str(v1) == "[0.500000, 0.125000]"
v1 *= v2
assert str(v1) == "[1.500000, -0.125000]"
v2 /= v1
assert str(v2) == "[2.000000, 8.000000]"
cstats = ConstructorStats.get(m.Vector2)
assert cstats.alive() == 3
del v1
assert cstats.alive() == 2
del v2
assert cstats.alive() == 1
del v3
assert cstats.alive() == 0
assert cstats.values() == [
'[1.000000, 2.000000]',
'[3.000000, -1.000000]',
'[1.000000, 2.000000]',
'[-3.000000, 1.000000]',
'[4.000000, 1.000000]',
'[-2.000000, 3.000000]',
'[-7.000000, -6.000000]',
'[9.000000, 10.000000]',
'[8.000000, 16.000000]',
'[0.125000, 0.250000]',
'[7.000000, 6.000000]',
'[9.000000, 10.000000]',
'[8.000000, 16.000000]',
'[8.000000, 4.000000]',
'[3.000000, -2.000000]',
'[3.000000, -0.500000]',
'[6.000000, -2.000000]',
]
assert cstats.default_constructions == 0
assert cstats.copy_constructions == 0
assert cstats.move_constructions >= 10
assert cstats.copy_assignments == 0
assert cstats.move_assignments == 0
def test_operators_notimplemented():
"""#393: need to return NotSupported to ensure correct arithmetic operator behavior"""
c1, c2 = m.C1(), m.C2()
assert c1 + c1 == 11
assert c2 + c2 == 22
assert c2 + c1 == 21
assert c1 + c2 == 12
def test_nested():
"""#328: first member in a class can't be used in operators"""
a = m.NestA()
b = m.NestB()
c = m.NestC()
a += 10
assert m.get_NestA(a) == 13
b.a += 100
assert m.get_NestA(b.a) == 103
c.b.a += 1000
assert m.get_NestA(c.b.a) == 1003
b -= 1
assert m.get_NestB(b) == 3
c.b -= 3
assert m.get_NestB(c.b) == 1
c *= 7
assert m.get_NestC(c) == 35
abase = a.as_base()
assert abase.value == -2
a.as_base().value += 44
assert abase.value == 42
assert c.b.a.as_base().value == -2
c.b.a.as_base().value += 44
assert c.b.a.as_base().value == 42
del c
pytest.gc_collect()
del a # Shouldn't delete while abase is still alive
pytest.gc_collect()
assert abase.value == 42
del abase, b
pytest.gc_collect()
def test_overriding_eq_reset_hash():
assert m.Comparable(15) is not m.Comparable(15)
assert m.Comparable(15) == m.Comparable(15)
with pytest.raises(TypeError):
hash(m.Comparable(15)) # TypeError: unhashable type: 'm.Comparable'
for hashable in (m.Hashable, m.Hashable2):
assert hashable(15) is not hashable(15)
assert hashable(15) == hashable(15)
assert hash(hashable(15)) == 15
assert hash(hashable(15)) == hash(hashable(15))
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/third_party/unitree_legged_sdk/pybind11/tests/test_methods_and_attributes.py | third_party/unitree_legged_sdk/pybind11/tests/test_methods_and_attributes.py | # -*- coding: utf-8 -*-
import pytest
import env # noqa: F401
from pybind11_tests import methods_and_attributes as m
from pybind11_tests import ConstructorStats
def test_methods_and_attributes():
instance1 = m.ExampleMandA()
instance2 = m.ExampleMandA(32)
instance1.add1(instance2)
instance1.add2(instance2)
instance1.add3(instance2)
instance1.add4(instance2)
instance1.add5(instance2)
instance1.add6(32)
instance1.add7(32)
instance1.add8(32)
instance1.add9(32)
instance1.add10(32)
assert str(instance1) == "ExampleMandA[value=320]"
assert str(instance2) == "ExampleMandA[value=32]"
assert str(instance1.self1()) == "ExampleMandA[value=320]"
assert str(instance1.self2()) == "ExampleMandA[value=320]"
assert str(instance1.self3()) == "ExampleMandA[value=320]"
assert str(instance1.self4()) == "ExampleMandA[value=320]"
assert str(instance1.self5()) == "ExampleMandA[value=320]"
assert instance1.internal1() == 320
assert instance1.internal2() == 320
assert instance1.internal3() == 320
assert instance1.internal4() == 320
assert instance1.internal5() == 320
assert instance1.overloaded() == "()"
assert instance1.overloaded(0) == "(int)"
assert instance1.overloaded(1, 1.0) == "(int, float)"
assert instance1.overloaded(2.0, 2) == "(float, int)"
assert instance1.overloaded(3, 3) == "(int, int)"
assert instance1.overloaded(4., 4.) == "(float, float)"
assert instance1.overloaded_const(-3) == "(int) const"
assert instance1.overloaded_const(5, 5.0) == "(int, float) const"
assert instance1.overloaded_const(6.0, 6) == "(float, int) const"
assert instance1.overloaded_const(7, 7) == "(int, int) const"
assert instance1.overloaded_const(8., 8.) == "(float, float) const"
assert instance1.overloaded_float(1, 1) == "(float, float)"
assert instance1.overloaded_float(1, 1.) == "(float, float)"
assert instance1.overloaded_float(1., 1) == "(float, float)"
assert instance1.overloaded_float(1., 1.) == "(float, float)"
assert instance1.value == 320
instance1.value = 100
assert str(instance1) == "ExampleMandA[value=100]"
cstats = ConstructorStats.get(m.ExampleMandA)
assert cstats.alive() == 2
del instance1, instance2
assert cstats.alive() == 0
assert cstats.values() == ["32"]
assert cstats.default_constructions == 1
assert cstats.copy_constructions == 2
assert cstats.move_constructions >= 2
assert cstats.copy_assignments == 0
assert cstats.move_assignments == 0
def test_copy_method():
"""Issue #443: calling copied methods fails in Python 3"""
m.ExampleMandA.add2c = m.ExampleMandA.add2
m.ExampleMandA.add2d = m.ExampleMandA.add2b
a = m.ExampleMandA(123)
assert a.value == 123
a.add2(m.ExampleMandA(-100))
assert a.value == 23
a.add2b(m.ExampleMandA(20))
assert a.value == 43
a.add2c(m.ExampleMandA(6))
assert a.value == 49
a.add2d(m.ExampleMandA(-7))
assert a.value == 42
def test_properties():
instance = m.TestProperties()
assert instance.def_readonly == 1
with pytest.raises(AttributeError):
instance.def_readonly = 2
instance.def_readwrite = 2
assert instance.def_readwrite == 2
assert instance.def_property_readonly == 2
with pytest.raises(AttributeError):
instance.def_property_readonly = 3
instance.def_property = 3
assert instance.def_property == 3
with pytest.raises(AttributeError) as excinfo:
dummy = instance.def_property_writeonly # noqa: F841 unused var
assert "unreadable attribute" in str(excinfo.value)
instance.def_property_writeonly = 4
assert instance.def_property_readonly == 4
with pytest.raises(AttributeError) as excinfo:
dummy = instance.def_property_impossible # noqa: F841 unused var
assert "unreadable attribute" in str(excinfo.value)
with pytest.raises(AttributeError) as excinfo:
instance.def_property_impossible = 5
assert "can't set attribute" in str(excinfo.value)
def test_static_properties():
assert m.TestProperties.def_readonly_static == 1
with pytest.raises(AttributeError) as excinfo:
m.TestProperties.def_readonly_static = 2
assert "can't set attribute" in str(excinfo.value)
m.TestProperties.def_readwrite_static = 2
assert m.TestProperties.def_readwrite_static == 2
with pytest.raises(AttributeError) as excinfo:
dummy = m.TestProperties.def_writeonly_static # noqa: F841 unused var
assert "unreadable attribute" in str(excinfo.value)
m.TestProperties.def_writeonly_static = 3
assert m.TestProperties.def_readonly_static == 3
assert m.TestProperties.def_property_readonly_static == 3
with pytest.raises(AttributeError) as excinfo:
m.TestProperties.def_property_readonly_static = 99
assert "can't set attribute" in str(excinfo.value)
m.TestProperties.def_property_static = 4
assert m.TestProperties.def_property_static == 4
with pytest.raises(AttributeError) as excinfo:
dummy = m.TestProperties.def_property_writeonly_static
assert "unreadable attribute" in str(excinfo.value)
m.TestProperties.def_property_writeonly_static = 5
assert m.TestProperties.def_property_static == 5
# Static property read and write via instance
instance = m.TestProperties()
m.TestProperties.def_readwrite_static = 0
assert m.TestProperties.def_readwrite_static == 0
assert instance.def_readwrite_static == 0
instance.def_readwrite_static = 2
assert m.TestProperties.def_readwrite_static == 2
assert instance.def_readwrite_static == 2
with pytest.raises(AttributeError) as excinfo:
dummy = instance.def_property_writeonly_static # noqa: F841 unused var
assert "unreadable attribute" in str(excinfo.value)
instance.def_property_writeonly_static = 4
assert instance.def_property_static == 4
# It should be possible to override properties in derived classes
assert m.TestPropertiesOverride().def_readonly == 99
assert m.TestPropertiesOverride.def_readonly_static == 99
def test_static_cls():
"""Static property getter and setters expect the type object as the their only argument"""
instance = m.TestProperties()
assert m.TestProperties.static_cls is m.TestProperties
assert instance.static_cls is m.TestProperties
def check_self(self):
assert self is m.TestProperties
m.TestProperties.static_cls = check_self
instance.static_cls = check_self
def test_metaclass_override():
"""Overriding pybind11's default metaclass changes the behavior of `static_property`"""
assert type(m.ExampleMandA).__name__ == "pybind11_type"
assert type(m.MetaclassOverride).__name__ == "type"
assert m.MetaclassOverride.readonly == 1
assert type(m.MetaclassOverride.__dict__["readonly"]).__name__ == "pybind11_static_property"
# Regular `type` replaces the property instead of calling `__set__()`
m.MetaclassOverride.readonly = 2
assert m.MetaclassOverride.readonly == 2
assert isinstance(m.MetaclassOverride.__dict__["readonly"], int)
def test_no_mixed_overloads():
from pybind11_tests import debug_enabled
with pytest.raises(RuntimeError) as excinfo:
m.ExampleMandA.add_mixed_overloads1()
assert (str(excinfo.value) ==
"overloading a method with both static and instance methods is not supported; " +
("compile in debug mode for more details" if not debug_enabled else
"error while attempting to bind static method ExampleMandA.overload_mixed1"
"(arg0: float) -> str")
)
with pytest.raises(RuntimeError) as excinfo:
m.ExampleMandA.add_mixed_overloads2()
assert (str(excinfo.value) ==
"overloading a method with both static and instance methods is not supported; " +
("compile in debug mode for more details" if not debug_enabled else
"error while attempting to bind instance method ExampleMandA.overload_mixed2"
"(self: pybind11_tests.methods_and_attributes.ExampleMandA, arg0: int, arg1: int)"
" -> str")
)
@pytest.mark.parametrize("access", ["ro", "rw", "static_ro", "static_rw"])
def test_property_return_value_policies(access):
if not access.startswith("static"):
obj = m.TestPropRVP()
else:
obj = m.TestPropRVP
ref = getattr(obj, access + "_ref")
assert ref.value == 1
ref.value = 2
assert getattr(obj, access + "_ref").value == 2
ref.value = 1 # restore original value for static properties
copy = getattr(obj, access + "_copy")
assert copy.value == 1
copy.value = 2
assert getattr(obj, access + "_copy").value == 1
copy = getattr(obj, access + "_func")
assert copy.value == 1
copy.value = 2
assert getattr(obj, access + "_func").value == 1
def test_property_rvalue_policy():
"""When returning an rvalue, the return value policy is automatically changed from
`reference(_internal)` to `move`. The following would not work otherwise."""
instance = m.TestPropRVP()
o = instance.rvalue
assert o.value == 1
os = m.TestPropRVP.static_rvalue
assert os.value == 1
# https://foss.heptapod.net/pypy/pypy/-/issues/2447
@pytest.mark.xfail("env.PYPY")
def test_dynamic_attributes():
instance = m.DynamicClass()
assert not hasattr(instance, "foo")
assert "foo" not in dir(instance)
# Dynamically add attribute
instance.foo = 42
assert hasattr(instance, "foo")
assert instance.foo == 42
assert "foo" in dir(instance)
# __dict__ should be accessible and replaceable
assert "foo" in instance.__dict__
instance.__dict__ = {"bar": True}
assert not hasattr(instance, "foo")
assert hasattr(instance, "bar")
with pytest.raises(TypeError) as excinfo:
instance.__dict__ = []
assert str(excinfo.value) == "__dict__ must be set to a dictionary, not a 'list'"
cstats = ConstructorStats.get(m.DynamicClass)
assert cstats.alive() == 1
del instance
assert cstats.alive() == 0
# Derived classes should work as well
class PythonDerivedDynamicClass(m.DynamicClass):
pass
for cls in m.CppDerivedDynamicClass, PythonDerivedDynamicClass:
derived = cls()
derived.foobar = 100
assert derived.foobar == 100
assert cstats.alive() == 1
del derived
assert cstats.alive() == 0
# https://foss.heptapod.net/pypy/pypy/-/issues/2447
@pytest.mark.xfail("env.PYPY")
def test_cyclic_gc():
# One object references itself
instance = m.DynamicClass()
instance.circular_reference = instance
cstats = ConstructorStats.get(m.DynamicClass)
assert cstats.alive() == 1
del instance
assert cstats.alive() == 0
# Two object reference each other
i1 = m.DynamicClass()
i2 = m.DynamicClass()
i1.cycle = i2
i2.cycle = i1
assert cstats.alive() == 2
del i1, i2
assert cstats.alive() == 0
def test_bad_arg_default(msg):
from pybind11_tests import debug_enabled
with pytest.raises(RuntimeError) as excinfo:
m.bad_arg_def_named()
assert msg(excinfo.value) == (
"arg(): could not convert default argument 'a: UnregisteredType' in function "
"'should_fail' into a Python object (type not registered yet?)"
if debug_enabled else
"arg(): could not convert default argument into a Python object (type not registered "
"yet?). Compile in debug mode for more information."
)
with pytest.raises(RuntimeError) as excinfo:
m.bad_arg_def_unnamed()
assert msg(excinfo.value) == (
"arg(): could not convert default argument 'UnregisteredType' in function "
"'should_fail' into a Python object (type not registered yet?)"
if debug_enabled else
"arg(): could not convert default argument into a Python object (type not registered "
"yet?). Compile in debug mode for more information."
)
def test_accepts_none(msg):
a = m.NoneTester()
assert m.no_none1(a) == 42
assert m.no_none2(a) == 42
assert m.no_none3(a) == 42
assert m.no_none4(a) == 42
assert m.no_none5(a) == 42
assert m.ok_none1(a) == 42
assert m.ok_none2(a) == 42
assert m.ok_none3(a) == 42
assert m.ok_none4(a) == 42
assert m.ok_none5(a) == 42
with pytest.raises(TypeError) as excinfo:
m.no_none1(None)
assert "incompatible function arguments" in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
m.no_none2(None)
assert "incompatible function arguments" in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
m.no_none3(None)
assert "incompatible function arguments" in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
m.no_none4(None)
assert "incompatible function arguments" in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
m.no_none5(None)
assert "incompatible function arguments" in str(excinfo.value)
# The first one still raises because you can't pass None as a lvalue reference arg:
with pytest.raises(TypeError) as excinfo:
assert m.ok_none1(None) == -1
assert msg(excinfo.value) == """
ok_none1(): incompatible function arguments. The following argument types are supported:
1. (arg0: m.methods_and_attributes.NoneTester) -> int
Invoked with: None
"""
# The rest take the argument as pointer or holder, and accept None:
assert m.ok_none2(None) == -1
assert m.ok_none3(None) == -1
assert m.ok_none4(None) == -1
assert m.ok_none5(None) == -1
def test_str_issue(msg):
"""#283: __str__ called on uninitialized instance when constructor arguments invalid"""
assert str(m.StrIssue(3)) == "StrIssue[3]"
with pytest.raises(TypeError) as excinfo:
str(m.StrIssue("no", "such", "constructor"))
assert msg(excinfo.value) == """
__init__(): incompatible constructor arguments. The following argument types are supported:
1. m.methods_and_attributes.StrIssue(arg0: int)
2. m.methods_and_attributes.StrIssue()
Invoked with: 'no', 'such', 'constructor'
"""
def test_unregistered_base_implementations():
a = m.RegisteredDerived()
a.do_nothing()
assert a.rw_value == 42
assert a.ro_value == 1.25
a.rw_value += 5
assert a.sum() == 48.25
a.increase_value()
assert a.rw_value == 48
assert a.ro_value == 1.5
assert a.sum() == 49.5
assert a.rw_value_prop == 48
a.rw_value_prop += 1
assert a.rw_value_prop == 49
a.increase_value()
assert a.ro_value_prop == 1.75
def test_ref_qualified():
"""Tests that explicit lvalue ref-qualified methods can be called just like their
non ref-qualified counterparts."""
r = m.RefQualified()
assert r.value == 0
r.refQualified(17)
assert r.value == 17
assert r.constRefQualified(23) == 40
def test_overload_ordering():
'Check to see if the normal overload order (first defined) and prepend overload order works'
assert m.overload_order("string") == 1
assert m.overload_order(0) == 4
# Different for Python 2 vs. 3
uni_name = type(u"").__name__
assert "1. overload_order(arg0: int) -> int" in m.overload_order.__doc__
assert "2. overload_order(arg0: {}) -> int".format(uni_name) in m.overload_order.__doc__
assert "3. overload_order(arg0: {}) -> int".format(uni_name) in m.overload_order.__doc__
assert "4. overload_order(arg0: int) -> int" in m.overload_order.__doc__
with pytest.raises(TypeError) as err:
m.overload_order(1.1)
assert "1. (arg0: int) -> int" in str(err.value)
assert "2. (arg0: {}) -> int".format(uni_name) in str(err.value)
assert "3. (arg0: {}) -> int".format(uni_name) in str(err.value)
assert "4. (arg0: int) -> int" in str(err.value)
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/third_party/unitree_legged_sdk/pybind11/tests/test_eigen.py | third_party/unitree_legged_sdk/pybind11/tests/test_eigen.py | # -*- coding: utf-8 -*-
import pytest
from pybind11_tests import ConstructorStats
np = pytest.importorskip("numpy")
m = pytest.importorskip("pybind11_tests.eigen")
ref = np.array([[ 0., 3, 0, 0, 0, 11],
[22, 0, 0, 0, 17, 11],
[ 7, 5, 0, 1, 0, 11],
[ 0, 0, 0, 0, 0, 11],
[ 0, 0, 14, 0, 8, 11]])
def assert_equal_ref(mat):
np.testing.assert_array_equal(mat, ref)
def assert_sparse_equal_ref(sparse_mat):
assert_equal_ref(sparse_mat.toarray())
def test_fixed():
assert_equal_ref(m.fixed_c())
assert_equal_ref(m.fixed_r())
assert_equal_ref(m.fixed_copy_r(m.fixed_r()))
assert_equal_ref(m.fixed_copy_c(m.fixed_c()))
assert_equal_ref(m.fixed_copy_r(m.fixed_c()))
assert_equal_ref(m.fixed_copy_c(m.fixed_r()))
def test_dense():
assert_equal_ref(m.dense_r())
assert_equal_ref(m.dense_c())
assert_equal_ref(m.dense_copy_r(m.dense_r()))
assert_equal_ref(m.dense_copy_c(m.dense_c()))
assert_equal_ref(m.dense_copy_r(m.dense_c()))
assert_equal_ref(m.dense_copy_c(m.dense_r()))
def test_partially_fixed():
ref2 = np.array([[0., 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]])
np.testing.assert_array_equal(m.partial_copy_four_rm_r(ref2), ref2)
np.testing.assert_array_equal(m.partial_copy_four_rm_c(ref2), ref2)
np.testing.assert_array_equal(m.partial_copy_four_rm_r(ref2[:, 1]), ref2[:, [1]])
np.testing.assert_array_equal(m.partial_copy_four_rm_c(ref2[0, :]), ref2[[0], :])
np.testing.assert_array_equal(m.partial_copy_four_rm_r(ref2[:, (0, 2)]), ref2[:, (0, 2)])
np.testing.assert_array_equal(
m.partial_copy_four_rm_c(ref2[(3, 1, 2), :]), ref2[(3, 1, 2), :])
np.testing.assert_array_equal(m.partial_copy_four_cm_r(ref2), ref2)
np.testing.assert_array_equal(m.partial_copy_four_cm_c(ref2), ref2)
np.testing.assert_array_equal(m.partial_copy_four_cm_r(ref2[:, 1]), ref2[:, [1]])
np.testing.assert_array_equal(m.partial_copy_four_cm_c(ref2[0, :]), ref2[[0], :])
np.testing.assert_array_equal(m.partial_copy_four_cm_r(ref2[:, (0, 2)]), ref2[:, (0, 2)])
np.testing.assert_array_equal(
m.partial_copy_four_cm_c(ref2[(3, 1, 2), :]), ref2[(3, 1, 2), :])
# TypeError should be raise for a shape mismatch
functions = [m.partial_copy_four_rm_r, m.partial_copy_four_rm_c,
m.partial_copy_four_cm_r, m.partial_copy_four_cm_c]
matrix_with_wrong_shape = [[1, 2],
[3, 4]]
for f in functions:
with pytest.raises(TypeError) as excinfo:
f(matrix_with_wrong_shape)
assert "incompatible function arguments" in str(excinfo.value)
def test_mutator_descriptors():
zr = np.arange(30, dtype='float32').reshape(5, 6) # row-major
zc = zr.reshape(6, 5).transpose() # column-major
m.fixed_mutator_r(zr)
m.fixed_mutator_c(zc)
m.fixed_mutator_a(zr)
m.fixed_mutator_a(zc)
with pytest.raises(TypeError) as excinfo:
m.fixed_mutator_r(zc)
assert ('(arg0: numpy.ndarray[numpy.float32[5, 6],'
' flags.writeable, flags.c_contiguous]) -> None'
in str(excinfo.value))
with pytest.raises(TypeError) as excinfo:
m.fixed_mutator_c(zr)
assert ('(arg0: numpy.ndarray[numpy.float32[5, 6],'
' flags.writeable, flags.f_contiguous]) -> None'
in str(excinfo.value))
with pytest.raises(TypeError) as excinfo:
m.fixed_mutator_a(np.array([[1, 2], [3, 4]], dtype='float32'))
assert ('(arg0: numpy.ndarray[numpy.float32[5, 6], flags.writeable]) -> None'
in str(excinfo.value))
zr.flags.writeable = False
with pytest.raises(TypeError):
m.fixed_mutator_r(zr)
with pytest.raises(TypeError):
m.fixed_mutator_a(zr)
def test_cpp_casting():
assert m.cpp_copy(m.fixed_r()) == 22.
assert m.cpp_copy(m.fixed_c()) == 22.
z = np.array([[5., 6], [7, 8]])
assert m.cpp_copy(z) == 7.
assert m.cpp_copy(m.get_cm_ref()) == 21.
assert m.cpp_copy(m.get_rm_ref()) == 21.
assert m.cpp_ref_c(m.get_cm_ref()) == 21.
assert m.cpp_ref_r(m.get_rm_ref()) == 21.
with pytest.raises(RuntimeError) as excinfo:
# Can't reference m.fixed_c: it contains floats, m.cpp_ref_any wants doubles
m.cpp_ref_any(m.fixed_c())
assert 'Unable to cast Python instance' in str(excinfo.value)
with pytest.raises(RuntimeError) as excinfo:
# Can't reference m.fixed_r: it contains floats, m.cpp_ref_any wants doubles
m.cpp_ref_any(m.fixed_r())
assert 'Unable to cast Python instance' in str(excinfo.value)
assert m.cpp_ref_any(m.ReturnTester.create()) == 1.
assert m.cpp_ref_any(m.get_cm_ref()) == 21.
assert m.cpp_ref_any(m.get_cm_ref()) == 21.
def test_pass_readonly_array():
z = np.full((5, 6), 42.0)
z.flags.writeable = False
np.testing.assert_array_equal(z, m.fixed_copy_r(z))
np.testing.assert_array_equal(m.fixed_r_const(), m.fixed_r())
assert not m.fixed_r_const().flags.writeable
np.testing.assert_array_equal(m.fixed_copy_r(m.fixed_r_const()), m.fixed_r_const())
def test_nonunit_stride_from_python():
counting_mat = np.arange(9.0, dtype=np.float32).reshape((3, 3))
second_row = counting_mat[1, :]
second_col = counting_mat[:, 1]
np.testing.assert_array_equal(m.double_row(second_row), 2.0 * second_row)
np.testing.assert_array_equal(m.double_col(second_row), 2.0 * second_row)
np.testing.assert_array_equal(m.double_complex(second_row), 2.0 * second_row)
np.testing.assert_array_equal(m.double_row(second_col), 2.0 * second_col)
np.testing.assert_array_equal(m.double_col(second_col), 2.0 * second_col)
np.testing.assert_array_equal(m.double_complex(second_col), 2.0 * second_col)
counting_3d = np.arange(27.0, dtype=np.float32).reshape((3, 3, 3))
slices = [counting_3d[0, :, :], counting_3d[:, 0, :], counting_3d[:, :, 0]]
for ref_mat in slices:
np.testing.assert_array_equal(m.double_mat_cm(ref_mat), 2.0 * ref_mat)
np.testing.assert_array_equal(m.double_mat_rm(ref_mat), 2.0 * ref_mat)
# Mutator:
m.double_threer(second_row)
m.double_threec(second_col)
np.testing.assert_array_equal(counting_mat, [[0., 2, 2], [6, 16, 10], [6, 14, 8]])
def test_negative_stride_from_python(msg):
"""Eigen doesn't support (as of yet) negative strides. When a function takes an Eigen matrix by
copy or const reference, we can pass a numpy array that has negative strides. Otherwise, an
exception will be thrown as Eigen will not be able to map the numpy array."""
counting_mat = np.arange(9.0, dtype=np.float32).reshape((3, 3))
counting_mat = counting_mat[::-1, ::-1]
second_row = counting_mat[1, :]
second_col = counting_mat[:, 1]
np.testing.assert_array_equal(m.double_row(second_row), 2.0 * second_row)
np.testing.assert_array_equal(m.double_col(second_row), 2.0 * second_row)
np.testing.assert_array_equal(m.double_complex(second_row), 2.0 * second_row)
np.testing.assert_array_equal(m.double_row(second_col), 2.0 * second_col)
np.testing.assert_array_equal(m.double_col(second_col), 2.0 * second_col)
np.testing.assert_array_equal(m.double_complex(second_col), 2.0 * second_col)
counting_3d = np.arange(27.0, dtype=np.float32).reshape((3, 3, 3))
counting_3d = counting_3d[::-1, ::-1, ::-1]
slices = [counting_3d[0, :, :], counting_3d[:, 0, :], counting_3d[:, :, 0]]
for ref_mat in slices:
np.testing.assert_array_equal(m.double_mat_cm(ref_mat), 2.0 * ref_mat)
np.testing.assert_array_equal(m.double_mat_rm(ref_mat), 2.0 * ref_mat)
# Mutator:
with pytest.raises(TypeError) as excinfo:
m.double_threer(second_row)
assert msg(excinfo.value) == """
double_threer(): incompatible function arguments. The following argument types are supported:
1. (arg0: numpy.ndarray[numpy.float32[1, 3], flags.writeable]) -> None
Invoked with: """ + repr(np.array([ 5., 4., 3.], dtype='float32')) # noqa: E501 line too long
with pytest.raises(TypeError) as excinfo:
m.double_threec(second_col)
assert msg(excinfo.value) == """
double_threec(): incompatible function arguments. The following argument types are supported:
1. (arg0: numpy.ndarray[numpy.float32[3, 1], flags.writeable]) -> None
Invoked with: """ + repr(np.array([ 7., 4., 1.], dtype='float32')) # noqa: E501 line too long
def test_nonunit_stride_to_python():
assert np.all(m.diagonal(ref) == ref.diagonal())
assert np.all(m.diagonal_1(ref) == ref.diagonal(1))
for i in range(-5, 7):
assert np.all(m.diagonal_n(ref, i) == ref.diagonal(i)), "m.diagonal_n({})".format(i)
assert np.all(m.block(ref, 2, 1, 3, 3) == ref[2:5, 1:4])
assert np.all(m.block(ref, 1, 4, 4, 2) == ref[1:, 4:])
assert np.all(m.block(ref, 1, 4, 3, 2) == ref[1:4, 4:])
def test_eigen_ref_to_python():
chols = [m.cholesky1, m.cholesky2, m.cholesky3, m.cholesky4]
for i, chol in enumerate(chols, start=1):
mymat = chol(np.array([[1., 2, 4], [2, 13, 23], [4, 23, 77]]))
assert np.all(mymat == np.array([[1, 0, 0], [2, 3, 0], [4, 5, 6]])), "cholesky{}".format(i)
def assign_both(a1, a2, r, c, v):
a1[r, c] = v
a2[r, c] = v
def array_copy_but_one(a, r, c, v):
z = np.array(a, copy=True)
z[r, c] = v
return z
def test_eigen_return_references():
"""Tests various ways of returning references and non-referencing copies"""
master = np.ones((10, 10))
a = m.ReturnTester()
a_get1 = a.get()
assert not a_get1.flags.owndata and a_get1.flags.writeable
assign_both(a_get1, master, 3, 3, 5)
a_get2 = a.get_ptr()
assert not a_get2.flags.owndata and a_get2.flags.writeable
assign_both(a_get1, master, 2, 3, 6)
a_view1 = a.view()
assert not a_view1.flags.owndata and not a_view1.flags.writeable
with pytest.raises(ValueError):
a_view1[2, 3] = 4
a_view2 = a.view_ptr()
assert not a_view2.flags.owndata and not a_view2.flags.writeable
with pytest.raises(ValueError):
a_view2[2, 3] = 4
a_copy1 = a.copy_get()
assert a_copy1.flags.owndata and a_copy1.flags.writeable
np.testing.assert_array_equal(a_copy1, master)
a_copy1[7, 7] = -44 # Shouldn't affect anything else
c1want = array_copy_but_one(master, 7, 7, -44)
a_copy2 = a.copy_view()
assert a_copy2.flags.owndata and a_copy2.flags.writeable
np.testing.assert_array_equal(a_copy2, master)
a_copy2[4, 4] = -22 # Shouldn't affect anything else
c2want = array_copy_but_one(master, 4, 4, -22)
a_ref1 = a.ref()
assert not a_ref1.flags.owndata and a_ref1.flags.writeable
assign_both(a_ref1, master, 1, 1, 15)
a_ref2 = a.ref_const()
assert not a_ref2.flags.owndata and not a_ref2.flags.writeable
with pytest.raises(ValueError):
a_ref2[5, 5] = 33
a_ref3 = a.ref_safe()
assert not a_ref3.flags.owndata and a_ref3.flags.writeable
assign_both(a_ref3, master, 0, 7, 99)
a_ref4 = a.ref_const_safe()
assert not a_ref4.flags.owndata and not a_ref4.flags.writeable
with pytest.raises(ValueError):
a_ref4[7, 0] = 987654321
a_copy3 = a.copy_ref()
assert a_copy3.flags.owndata and a_copy3.flags.writeable
np.testing.assert_array_equal(a_copy3, master)
a_copy3[8, 1] = 11
c3want = array_copy_but_one(master, 8, 1, 11)
a_copy4 = a.copy_ref_const()
assert a_copy4.flags.owndata and a_copy4.flags.writeable
np.testing.assert_array_equal(a_copy4, master)
a_copy4[8, 4] = 88
c4want = array_copy_but_one(master, 8, 4, 88)
a_block1 = a.block(3, 3, 2, 2)
assert not a_block1.flags.owndata and a_block1.flags.writeable
a_block1[0, 0] = 55
master[3, 3] = 55
a_block2 = a.block_safe(2, 2, 3, 2)
assert not a_block2.flags.owndata and a_block2.flags.writeable
a_block2[2, 1] = -123
master[4, 3] = -123
a_block3 = a.block_const(6, 7, 4, 3)
assert not a_block3.flags.owndata and not a_block3.flags.writeable
with pytest.raises(ValueError):
a_block3[2, 2] = -44444
a_copy5 = a.copy_block(2, 2, 2, 3)
assert a_copy5.flags.owndata and a_copy5.flags.writeable
np.testing.assert_array_equal(a_copy5, master[2:4, 2:5])
a_copy5[1, 1] = 777
c5want = array_copy_but_one(master[2:4, 2:5], 1, 1, 777)
a_corn1 = a.corners()
assert not a_corn1.flags.owndata and a_corn1.flags.writeable
a_corn1 *= 50
a_corn1[1, 1] = 999
master[0, 0] = 50
master[0, 9] = 50
master[9, 0] = 50
master[9, 9] = 999
a_corn2 = a.corners_const()
assert not a_corn2.flags.owndata and not a_corn2.flags.writeable
with pytest.raises(ValueError):
a_corn2[1, 0] = 51
# All of the changes made all the way along should be visible everywhere
# now (except for the copies, of course)
np.testing.assert_array_equal(a_get1, master)
np.testing.assert_array_equal(a_get2, master)
np.testing.assert_array_equal(a_view1, master)
np.testing.assert_array_equal(a_view2, master)
np.testing.assert_array_equal(a_ref1, master)
np.testing.assert_array_equal(a_ref2, master)
np.testing.assert_array_equal(a_ref3, master)
np.testing.assert_array_equal(a_ref4, master)
np.testing.assert_array_equal(a_block1, master[3:5, 3:5])
np.testing.assert_array_equal(a_block2, master[2:5, 2:4])
np.testing.assert_array_equal(a_block3, master[6:10, 7:10])
np.testing.assert_array_equal(a_corn1, master[0::master.shape[0] - 1, 0::master.shape[1] - 1])
np.testing.assert_array_equal(a_corn2, master[0::master.shape[0] - 1, 0::master.shape[1] - 1])
np.testing.assert_array_equal(a_copy1, c1want)
np.testing.assert_array_equal(a_copy2, c2want)
np.testing.assert_array_equal(a_copy3, c3want)
np.testing.assert_array_equal(a_copy4, c4want)
np.testing.assert_array_equal(a_copy5, c5want)
def assert_keeps_alive(cl, method, *args):
cstats = ConstructorStats.get(cl)
start_with = cstats.alive()
a = cl()
assert cstats.alive() == start_with + 1
z = method(a, *args)
assert cstats.alive() == start_with + 1
del a
# Here's the keep alive in action:
assert cstats.alive() == start_with + 1
del z
# Keep alive should have expired:
assert cstats.alive() == start_with
def test_eigen_keepalive():
a = m.ReturnTester()
cstats = ConstructorStats.get(m.ReturnTester)
assert cstats.alive() == 1
unsafe = [a.ref(), a.ref_const(), a.block(1, 2, 3, 4)]
copies = [a.copy_get(), a.copy_view(), a.copy_ref(), a.copy_ref_const(),
a.copy_block(4, 3, 2, 1)]
del a
assert cstats.alive() == 0
del unsafe
del copies
for meth in [m.ReturnTester.get, m.ReturnTester.get_ptr, m.ReturnTester.view,
m.ReturnTester.view_ptr, m.ReturnTester.ref_safe, m.ReturnTester.ref_const_safe,
m.ReturnTester.corners, m.ReturnTester.corners_const]:
assert_keeps_alive(m.ReturnTester, meth)
for meth in [m.ReturnTester.block_safe, m.ReturnTester.block_const]:
assert_keeps_alive(m.ReturnTester, meth, 4, 3, 2, 1)
def test_eigen_ref_mutators():
"""Tests Eigen's ability to mutate numpy values"""
orig = np.array([[1., 2, 3], [4, 5, 6], [7, 8, 9]])
zr = np.array(orig)
zc = np.array(orig, order='F')
m.add_rm(zr, 1, 0, 100)
assert np.all(zr == np.array([[1., 2, 3], [104, 5, 6], [7, 8, 9]]))
m.add_cm(zc, 1, 0, 200)
assert np.all(zc == np.array([[1., 2, 3], [204, 5, 6], [7, 8, 9]]))
m.add_any(zr, 1, 0, 20)
assert np.all(zr == np.array([[1., 2, 3], [124, 5, 6], [7, 8, 9]]))
m.add_any(zc, 1, 0, 10)
assert np.all(zc == np.array([[1., 2, 3], [214, 5, 6], [7, 8, 9]]))
# Can't reference a col-major array with a row-major Ref, and vice versa:
with pytest.raises(TypeError):
m.add_rm(zc, 1, 0, 1)
with pytest.raises(TypeError):
m.add_cm(zr, 1, 0, 1)
# Overloads:
m.add1(zr, 1, 0, -100)
m.add2(zr, 1, 0, -20)
assert np.all(zr == orig)
m.add1(zc, 1, 0, -200)
m.add2(zc, 1, 0, -10)
assert np.all(zc == orig)
# a non-contiguous slice (this won't work on either the row- or
# column-contiguous refs, but should work for the any)
cornersr = zr[0::2, 0::2]
cornersc = zc[0::2, 0::2]
assert np.all(cornersr == np.array([[1., 3], [7, 9]]))
assert np.all(cornersc == np.array([[1., 3], [7, 9]]))
with pytest.raises(TypeError):
m.add_rm(cornersr, 0, 1, 25)
with pytest.raises(TypeError):
m.add_cm(cornersr, 0, 1, 25)
with pytest.raises(TypeError):
m.add_rm(cornersc, 0, 1, 25)
with pytest.raises(TypeError):
m.add_cm(cornersc, 0, 1, 25)
m.add_any(cornersr, 0, 1, 25)
m.add_any(cornersc, 0, 1, 44)
assert np.all(zr == np.array([[1., 2, 28], [4, 5, 6], [7, 8, 9]]))
assert np.all(zc == np.array([[1., 2, 47], [4, 5, 6], [7, 8, 9]]))
# You shouldn't be allowed to pass a non-writeable array to a mutating Eigen method:
zro = zr[0:4, 0:4]
zro.flags.writeable = False
with pytest.raises(TypeError):
m.add_rm(zro, 0, 0, 0)
with pytest.raises(TypeError):
m.add_any(zro, 0, 0, 0)
with pytest.raises(TypeError):
m.add1(zro, 0, 0, 0)
with pytest.raises(TypeError):
m.add2(zro, 0, 0, 0)
# integer array shouldn't be passable to a double-matrix-accepting mutating func:
zi = np.array([[1, 2], [3, 4]])
with pytest.raises(TypeError):
m.add_rm(zi)
def test_numpy_ref_mutators():
"""Tests numpy mutating Eigen matrices (for returned Eigen::Ref<...>s)"""
m.reset_refs() # In case another test already changed it
zc = m.get_cm_ref()
zcro = m.get_cm_const_ref()
zr = m.get_rm_ref()
zrro = m.get_rm_const_ref()
assert [zc[1, 2], zcro[1, 2], zr[1, 2], zrro[1, 2]] == [23] * 4
assert not zc.flags.owndata and zc.flags.writeable
assert not zr.flags.owndata and zr.flags.writeable
assert not zcro.flags.owndata and not zcro.flags.writeable
assert not zrro.flags.owndata and not zrro.flags.writeable
zc[1, 2] = 99
expect = np.array([[11., 12, 13], [21, 22, 99], [31, 32, 33]])
# We should have just changed zc, of course, but also zcro and the original eigen matrix
assert np.all(zc == expect)
assert np.all(zcro == expect)
assert np.all(m.get_cm_ref() == expect)
zr[1, 2] = 99
assert np.all(zr == expect)
assert np.all(zrro == expect)
assert np.all(m.get_rm_ref() == expect)
# Make sure the readonly ones are numpy-readonly:
with pytest.raises(ValueError):
zcro[1, 2] = 6
with pytest.raises(ValueError):
zrro[1, 2] = 6
# We should be able to explicitly copy like this (and since we're copying,
# the const should drop away)
y1 = np.array(m.get_cm_const_ref())
assert y1.flags.owndata and y1.flags.writeable
# We should get copies of the eigen data, which was modified above:
assert y1[1, 2] == 99
y1[1, 2] += 12
assert y1[1, 2] == 111
assert zc[1, 2] == 99 # Make sure we aren't referencing the original
def test_both_ref_mutators():
"""Tests a complex chain of nested eigen/numpy references"""
m.reset_refs() # In case another test already changed it
z = m.get_cm_ref() # numpy -> eigen
z[0, 2] -= 3
z2 = m.incr_matrix(z, 1) # numpy -> eigen -> numpy -> eigen
z2[1, 1] += 6
z3 = m.incr_matrix(z, 2) # (numpy -> eigen)^3
z3[2, 2] += -5
z4 = m.incr_matrix(z, 3) # (numpy -> eigen)^4
z4[1, 1] -= 1
z5 = m.incr_matrix(z, 4) # (numpy -> eigen)^5
z5[0, 0] = 0
assert np.all(z == z2)
assert np.all(z == z3)
assert np.all(z == z4)
assert np.all(z == z5)
expect = np.array([[0., 22, 20], [31, 37, 33], [41, 42, 38]])
assert np.all(z == expect)
y = np.array(range(100), dtype='float64').reshape(10, 10)
y2 = m.incr_matrix_any(y, 10) # np -> eigen -> np
y3 = m.incr_matrix_any(y2[0::2, 0::2], -33) # np -> eigen -> np slice -> np -> eigen -> np
y4 = m.even_rows(y3) # numpy -> eigen slice -> (... y3)
y5 = m.even_cols(y4) # numpy -> eigen slice -> (... y4)
y6 = m.incr_matrix_any(y5, 1000) # numpy -> eigen -> (... y5)
# Apply same mutations using just numpy:
yexpect = np.array(range(100), dtype='float64').reshape(10, 10)
yexpect += 10
yexpect[0::2, 0::2] -= 33
yexpect[0::4, 0::4] += 1000
assert np.all(y6 == yexpect[0::4, 0::4])
assert np.all(y5 == yexpect[0::4, 0::4])
assert np.all(y4 == yexpect[0::4, 0::2])
assert np.all(y3 == yexpect[0::2, 0::2])
assert np.all(y2 == yexpect)
assert np.all(y == yexpect)
def test_nocopy_wrapper():
# get_elem requires a column-contiguous matrix reference, but should be
# callable with other types of matrix (via copying):
int_matrix_colmajor = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], order='F')
dbl_matrix_colmajor = np.array(int_matrix_colmajor, dtype='double', order='F', copy=True)
int_matrix_rowmajor = np.array(int_matrix_colmajor, order='C', copy=True)
dbl_matrix_rowmajor = np.array(int_matrix_rowmajor, dtype='double', order='C', copy=True)
# All should be callable via get_elem:
assert m.get_elem(int_matrix_colmajor) == 8
assert m.get_elem(dbl_matrix_colmajor) == 8
assert m.get_elem(int_matrix_rowmajor) == 8
assert m.get_elem(dbl_matrix_rowmajor) == 8
# All but the second should fail with m.get_elem_nocopy:
with pytest.raises(TypeError) as excinfo:
m.get_elem_nocopy(int_matrix_colmajor)
assert ('get_elem_nocopy(): incompatible function arguments.' in str(excinfo.value) and
', flags.f_contiguous' in str(excinfo.value))
assert m.get_elem_nocopy(dbl_matrix_colmajor) == 8
with pytest.raises(TypeError) as excinfo:
m.get_elem_nocopy(int_matrix_rowmajor)
assert ('get_elem_nocopy(): incompatible function arguments.' in str(excinfo.value) and
', flags.f_contiguous' in str(excinfo.value))
with pytest.raises(TypeError) as excinfo:
m.get_elem_nocopy(dbl_matrix_rowmajor)
assert ('get_elem_nocopy(): incompatible function arguments.' in str(excinfo.value) and
', flags.f_contiguous' in str(excinfo.value))
# For the row-major test, we take a long matrix in row-major, so only the third is allowed:
with pytest.raises(TypeError) as excinfo:
m.get_elem_rm_nocopy(int_matrix_colmajor)
assert ('get_elem_rm_nocopy(): incompatible function arguments.' in str(excinfo.value) and
', flags.c_contiguous' in str(excinfo.value))
with pytest.raises(TypeError) as excinfo:
m.get_elem_rm_nocopy(dbl_matrix_colmajor)
assert ('get_elem_rm_nocopy(): incompatible function arguments.' in str(excinfo.value) and
', flags.c_contiguous' in str(excinfo.value))
assert m.get_elem_rm_nocopy(int_matrix_rowmajor) == 8
with pytest.raises(TypeError) as excinfo:
m.get_elem_rm_nocopy(dbl_matrix_rowmajor)
assert ('get_elem_rm_nocopy(): incompatible function arguments.' in str(excinfo.value) and
', flags.c_contiguous' in str(excinfo.value))
def test_eigen_ref_life_support():
"""Ensure the lifetime of temporary arrays created by the `Ref` caster
The `Ref` caster sometimes creates a copy which needs to stay alive. This needs to
happen both for directs casts (just the array) or indirectly (e.g. list of arrays).
"""
a = np.full(shape=10, fill_value=8, dtype=np.int8)
assert m.get_elem_direct(a) == 8
list_of_a = [a]
assert m.get_elem_indirect(list_of_a) == 8
def test_special_matrix_objects():
assert np.all(m.incr_diag(7) == np.diag([1., 2, 3, 4, 5, 6, 7]))
asymm = np.array([[ 1., 2, 3, 4],
[ 5, 6, 7, 8],
[ 9, 10, 11, 12],
[13, 14, 15, 16]])
symm_lower = np.array(asymm)
symm_upper = np.array(asymm)
for i in range(4):
for j in range(i + 1, 4):
symm_lower[i, j] = symm_lower[j, i]
symm_upper[j, i] = symm_upper[i, j]
assert np.all(m.symmetric_lower(asymm) == symm_lower)
assert np.all(m.symmetric_upper(asymm) == symm_upper)
def test_dense_signature(doc):
assert doc(m.double_col) == """
double_col(arg0: numpy.ndarray[numpy.float32[m, 1]]) -> numpy.ndarray[numpy.float32[m, 1]]
"""
assert doc(m.double_row) == """
double_row(arg0: numpy.ndarray[numpy.float32[1, n]]) -> numpy.ndarray[numpy.float32[1, n]]
"""
assert doc(m.double_complex) == ("""
double_complex(arg0: numpy.ndarray[numpy.complex64[m, 1]])"""
""" -> numpy.ndarray[numpy.complex64[m, 1]]
""")
assert doc(m.double_mat_rm) == ("""
double_mat_rm(arg0: numpy.ndarray[numpy.float32[m, n]])"""
""" -> numpy.ndarray[numpy.float32[m, n]]
""")
def test_named_arguments():
a = np.array([[1.0, 2], [3, 4], [5, 6]])
b = np.ones((2, 1))
assert np.all(m.matrix_multiply(a, b) == np.array([[3.], [7], [11]]))
assert np.all(m.matrix_multiply(A=a, B=b) == np.array([[3.], [7], [11]]))
assert np.all(m.matrix_multiply(B=b, A=a) == np.array([[3.], [7], [11]]))
with pytest.raises(ValueError) as excinfo:
m.matrix_multiply(b, a)
assert str(excinfo.value) == 'Nonconformable matrices!'
with pytest.raises(ValueError) as excinfo:
m.matrix_multiply(A=b, B=a)
assert str(excinfo.value) == 'Nonconformable matrices!'
with pytest.raises(ValueError) as excinfo:
m.matrix_multiply(B=a, A=b)
assert str(excinfo.value) == 'Nonconformable matrices!'
def test_sparse():
pytest.importorskip("scipy")
assert_sparse_equal_ref(m.sparse_r())
assert_sparse_equal_ref(m.sparse_c())
assert_sparse_equal_ref(m.sparse_copy_r(m.sparse_r()))
assert_sparse_equal_ref(m.sparse_copy_c(m.sparse_c()))
assert_sparse_equal_ref(m.sparse_copy_r(m.sparse_c()))
assert_sparse_equal_ref(m.sparse_copy_c(m.sparse_r()))
def test_sparse_signature(doc):
pytest.importorskip("scipy")
assert doc(m.sparse_copy_r) == """
sparse_copy_r(arg0: scipy.sparse.csr_matrix[numpy.float32]) -> scipy.sparse.csr_matrix[numpy.float32]
""" # noqa: E501 line too long
assert doc(m.sparse_copy_c) == """
sparse_copy_c(arg0: scipy.sparse.csc_matrix[numpy.float32]) -> scipy.sparse.csc_matrix[numpy.float32]
""" # noqa: E501 line too long
def test_issue738():
"""Ignore strides on a length-1 dimension (even if they would be incompatible length > 1)"""
assert np.all(m.iss738_f1(np.array([[1., 2, 3]])) == np.array([[1., 102, 203]]))
assert np.all(m.iss738_f1(np.array([[1.], [2], [3]])) == np.array([[1.], [12], [23]]))
assert np.all(m.iss738_f2(np.array([[1., 2, 3]])) == np.array([[1., 102, 203]]))
assert np.all(m.iss738_f2(np.array([[1.], [2], [3]])) == np.array([[1.], [12], [23]]))
def test_issue1105():
"""Issue 1105: 1xN or Nx1 input arrays weren't accepted for eigen
compile-time row vectors or column vector"""
assert m.iss1105_row(np.ones((1, 7)))
assert m.iss1105_col(np.ones((7, 1)))
# These should still fail (incompatible dimensions):
with pytest.raises(TypeError) as excinfo:
m.iss1105_row(np.ones((7, 1)))
assert "incompatible function arguments" in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
m.iss1105_col(np.ones((1, 7)))
assert "incompatible function arguments" in str(excinfo.value)
def test_custom_operator_new():
"""Using Eigen types as member variables requires a class-specific
operator new with proper alignment"""
o = m.CustomOperatorNew()
np.testing.assert_allclose(o.a, 0.0)
np.testing.assert_allclose(o.b.diagonal(), 1.0)
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/third_party/unitree_legged_sdk/pybind11/tests/test_eval_call.py | third_party/unitree_legged_sdk/pybind11/tests/test_eval_call.py | # -*- coding: utf-8 -*-
# This file is called from 'test_eval.py'
if 'call_test2' in locals():
call_test2(y) # noqa: F821 undefined name
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/third_party/unitree_legged_sdk/pybind11/tests/test_numpy_dtypes.py | third_party/unitree_legged_sdk/pybind11/tests/test_numpy_dtypes.py | # -*- coding: utf-8 -*-
import re
import pytest
import env # noqa: F401
from pybind11_tests import numpy_dtypes as m
np = pytest.importorskip("numpy")
@pytest.fixture(scope='module')
def simple_dtype():
ld = np.dtype('longdouble')
return np.dtype({'names': ['bool_', 'uint_', 'float_', 'ldbl_'],
'formats': ['?', 'u4', 'f4', 'f{}'.format(ld.itemsize)],
'offsets': [0, 4, 8, (16 if ld.alignment > 4 else 12)]})
@pytest.fixture(scope='module')
def packed_dtype():
return np.dtype([('bool_', '?'), ('uint_', 'u4'), ('float_', 'f4'), ('ldbl_', 'g')])
def dt_fmt():
from sys import byteorder
e = '<' if byteorder == 'little' else '>'
return ("{{'names':['bool_','uint_','float_','ldbl_'],"
" 'formats':['?','" + e + "u4','" + e + "f4','" + e + "f{}'],"
" 'offsets':[0,4,8,{}], 'itemsize':{}}}")
def simple_dtype_fmt():
ld = np.dtype('longdouble')
simple_ld_off = 12 + 4 * (ld.alignment > 4)
return dt_fmt().format(ld.itemsize, simple_ld_off, simple_ld_off + ld.itemsize)
def packed_dtype_fmt():
from sys import byteorder
return "[('bool_', '?'), ('uint_', '{e}u4'), ('float_', '{e}f4'), ('ldbl_', '{e}f{}')]".format(
np.dtype('longdouble').itemsize, e='<' if byteorder == 'little' else '>')
def partial_ld_offset():
return 12 + 4 * (np.dtype('uint64').alignment > 4) + 8 + 8 * (
np.dtype('longdouble').alignment > 8)
def partial_dtype_fmt():
ld = np.dtype('longdouble')
partial_ld_off = partial_ld_offset()
return dt_fmt().format(ld.itemsize, partial_ld_off, partial_ld_off + ld.itemsize)
def partial_nested_fmt():
ld = np.dtype('longdouble')
partial_nested_off = 8 + 8 * (ld.alignment > 8)
partial_ld_off = partial_ld_offset()
partial_nested_size = partial_nested_off * 2 + partial_ld_off + ld.itemsize
return "{{'names':['a'], 'formats':[{}], 'offsets':[{}], 'itemsize':{}}}".format(
partial_dtype_fmt(), partial_nested_off, partial_nested_size)
def assert_equal(actual, expected_data, expected_dtype):
np.testing.assert_equal(actual, np.array(expected_data, dtype=expected_dtype))
def test_format_descriptors():
with pytest.raises(RuntimeError) as excinfo:
m.get_format_unbound()
assert re.match('^NumPy type info missing for .*UnboundStruct.*$', str(excinfo.value))
ld = np.dtype('longdouble')
ldbl_fmt = ('4x' if ld.alignment > 4 else '') + ld.char
ss_fmt = "^T{?:bool_:3xI:uint_:f:float_:" + ldbl_fmt + ":ldbl_:}"
dbl = np.dtype('double')
partial_fmt = ("^T{?:bool_:3xI:uint_:f:float_:" +
str(4 * (dbl.alignment > 4) + dbl.itemsize + 8 * (ld.alignment > 8)) +
"xg:ldbl_:}")
nested_extra = str(max(8, ld.alignment))
assert m.print_format_descriptors() == [
ss_fmt,
"^T{?:bool_:I:uint_:f:float_:g:ldbl_:}",
"^T{" + ss_fmt + ":a:^T{?:bool_:I:uint_:f:float_:g:ldbl_:}:b:}",
partial_fmt,
"^T{" + nested_extra + "x" + partial_fmt + ":a:" + nested_extra + "x}",
"^T{3s:a:3s:b:}",
"^T{(3)4s:a:(2)i:b:(3)B:c:1x(4, 2)f:d:}",
'^T{q:e1:B:e2:}',
'^T{Zf:cflt:Zd:cdbl:}'
]
def test_dtype(simple_dtype):
from sys import byteorder
e = '<' if byteorder == 'little' else '>'
assert m.print_dtypes() == [
simple_dtype_fmt(),
packed_dtype_fmt(),
"[('a', {}), ('b', {})]".format(simple_dtype_fmt(), packed_dtype_fmt()),
partial_dtype_fmt(),
partial_nested_fmt(),
"[('a', 'S3'), ('b', 'S3')]",
("{{'names':['a','b','c','d'], " +
"'formats':[('S4', (3,)),('" + e + "i4', (2,)),('u1', (3,)),('" + e + "f4', (4, 2))], " +
"'offsets':[0,12,20,24], 'itemsize':56}}").format(e=e),
"[('e1', '" + e + "i8'), ('e2', 'u1')]",
"[('x', 'i1'), ('y', '" + e + "u8')]",
"[('cflt', '" + e + "c8'), ('cdbl', '" + e + "c16')]"
]
d1 = np.dtype({'names': ['a', 'b'], 'formats': ['int32', 'float64'],
'offsets': [1, 10], 'itemsize': 20})
d2 = np.dtype([('a', 'i4'), ('b', 'f4')])
assert m.test_dtype_ctors() == [np.dtype('int32'), np.dtype('float64'),
np.dtype('bool'), d1, d1, np.dtype('uint32'), d2]
assert m.test_dtype_methods() == [np.dtype('int32'), simple_dtype, False, True,
np.dtype('int32').itemsize, simple_dtype.itemsize]
assert m.trailing_padding_dtype() == m.buffer_to_dtype(np.zeros(1, m.trailing_padding_dtype()))
def test_recarray(simple_dtype, packed_dtype):
elements = [(False, 0, 0.0, -0.0), (True, 1, 1.5, -2.5), (False, 2, 3.0, -5.0)]
for func, dtype in [(m.create_rec_simple, simple_dtype), (m.create_rec_packed, packed_dtype)]:
arr = func(0)
assert arr.dtype == dtype
assert_equal(arr, [], simple_dtype)
assert_equal(arr, [], packed_dtype)
arr = func(3)
assert arr.dtype == dtype
assert_equal(arr, elements, simple_dtype)
assert_equal(arr, elements, packed_dtype)
# Show what recarray's look like in NumPy.
assert type(arr[0]) == np.void
assert type(arr[0].item()) == tuple
if dtype == simple_dtype:
assert m.print_rec_simple(arr) == [
"s:0,0,0,-0",
"s:1,1,1.5,-2.5",
"s:0,2,3,-5"
]
else:
assert m.print_rec_packed(arr) == [
"p:0,0,0,-0",
"p:1,1,1.5,-2.5",
"p:0,2,3,-5"
]
nested_dtype = np.dtype([('a', simple_dtype), ('b', packed_dtype)])
arr = m.create_rec_nested(0)
assert arr.dtype == nested_dtype
assert_equal(arr, [], nested_dtype)
arr = m.create_rec_nested(3)
assert arr.dtype == nested_dtype
assert_equal(arr, [((False, 0, 0.0, -0.0), (True, 1, 1.5, -2.5)),
((True, 1, 1.5, -2.5), (False, 2, 3.0, -5.0)),
((False, 2, 3.0, -5.0), (True, 3, 4.5, -7.5))], nested_dtype)
assert m.print_rec_nested(arr) == [
"n:a=s:0,0,0,-0;b=p:1,1,1.5,-2.5",
"n:a=s:1,1,1.5,-2.5;b=p:0,2,3,-5",
"n:a=s:0,2,3,-5;b=p:1,3,4.5,-7.5"
]
arr = m.create_rec_partial(3)
assert str(arr.dtype) == partial_dtype_fmt()
partial_dtype = arr.dtype
assert '' not in arr.dtype.fields
assert partial_dtype.itemsize > simple_dtype.itemsize
assert_equal(arr, elements, simple_dtype)
assert_equal(arr, elements, packed_dtype)
arr = m.create_rec_partial_nested(3)
assert str(arr.dtype) == partial_nested_fmt()
assert '' not in arr.dtype.fields
assert '' not in arr.dtype.fields['a'][0].fields
assert arr.dtype.itemsize > partial_dtype.itemsize
np.testing.assert_equal(arr['a'], m.create_rec_partial(3))
def test_array_constructors():
data = np.arange(1, 7, dtype='int32')
for i in range(8):
np.testing.assert_array_equal(m.test_array_ctors(10 + i), data.reshape((3, 2)))
np.testing.assert_array_equal(m.test_array_ctors(20 + i), data.reshape((3, 2)))
for i in range(5):
np.testing.assert_array_equal(m.test_array_ctors(30 + i), data)
np.testing.assert_array_equal(m.test_array_ctors(40 + i), data)
def test_string_array():
arr = m.create_string_array(True)
assert str(arr.dtype) == "[('a', 'S3'), ('b', 'S3')]"
assert m.print_string_array(arr) == [
"a='',b=''",
"a='a',b='a'",
"a='ab',b='ab'",
"a='abc',b='abc'"
]
dtype = arr.dtype
assert arr['a'].tolist() == [b'', b'a', b'ab', b'abc']
assert arr['b'].tolist() == [b'', b'a', b'ab', b'abc']
arr = m.create_string_array(False)
assert dtype == arr.dtype
def test_array_array():
from sys import byteorder
e = '<' if byteorder == 'little' else '>'
arr = m.create_array_array(3)
assert str(arr.dtype) == (
"{{'names':['a','b','c','d'], " +
"'formats':[('S4', (3,)),('" + e + "i4', (2,)),('u1', (3,)),('{e}f4', (4, 2))], " +
"'offsets':[0,12,20,24], 'itemsize':56}}").format(e=e)
assert m.print_array_array(arr) == [
"a={{A,B,C,D},{K,L,M,N},{U,V,W,X}},b={0,1}," +
"c={0,1,2},d={{0,1},{10,11},{20,21},{30,31}}",
"a={{W,X,Y,Z},{G,H,I,J},{Q,R,S,T}},b={1000,1001}," +
"c={10,11,12},d={{100,101},{110,111},{120,121},{130,131}}",
"a={{S,T,U,V},{C,D,E,F},{M,N,O,P}},b={2000,2001}," +
"c={20,21,22},d={{200,201},{210,211},{220,221},{230,231}}",
]
assert arr['a'].tolist() == [[b'ABCD', b'KLMN', b'UVWX'],
[b'WXYZ', b'GHIJ', b'QRST'],
[b'STUV', b'CDEF', b'MNOP']]
assert arr['b'].tolist() == [[0, 1], [1000, 1001], [2000, 2001]]
assert m.create_array_array(0).dtype == arr.dtype
def test_enum_array():
from sys import byteorder
e = '<' if byteorder == 'little' else '>'
arr = m.create_enum_array(3)
dtype = arr.dtype
assert dtype == np.dtype([('e1', e + 'i8'), ('e2', 'u1')])
assert m.print_enum_array(arr) == [
"e1=A,e2=X",
"e1=B,e2=Y",
"e1=A,e2=X"
]
assert arr['e1'].tolist() == [-1, 1, -1]
assert arr['e2'].tolist() == [1, 2, 1]
assert m.create_enum_array(0).dtype == dtype
def test_complex_array():
from sys import byteorder
e = '<' if byteorder == 'little' else '>'
arr = m.create_complex_array(3)
dtype = arr.dtype
assert dtype == np.dtype([('cflt', e + 'c8'), ('cdbl', e + 'c16')])
assert m.print_complex_array(arr) == [
"c:(0,0.25),(0.5,0.75)",
"c:(1,1.25),(1.5,1.75)",
"c:(2,2.25),(2.5,2.75)"
]
assert arr['cflt'].tolist() == [0.0 + 0.25j, 1.0 + 1.25j, 2.0 + 2.25j]
assert arr['cdbl'].tolist() == [0.5 + 0.75j, 1.5 + 1.75j, 2.5 + 2.75j]
assert m.create_complex_array(0).dtype == dtype
def test_signature(doc):
assert doc(m.create_rec_nested) == \
"create_rec_nested(arg0: int) -> numpy.ndarray[NestedStruct]"
def test_scalar_conversion():
n = 3
arrays = [m.create_rec_simple(n), m.create_rec_packed(n),
m.create_rec_nested(n), m.create_enum_array(n)]
funcs = [m.f_simple, m.f_packed, m.f_nested]
for i, func in enumerate(funcs):
for j, arr in enumerate(arrays):
if i == j and i < 2:
assert [func(arr[k]) for k in range(n)] == [k * 10 for k in range(n)]
else:
with pytest.raises(TypeError) as excinfo:
func(arr[0])
assert 'incompatible function arguments' in str(excinfo.value)
def test_vectorize():
n = 3
array = m.create_rec_simple(n)
values = m.f_simple_vectorized(array)
np.testing.assert_array_equal(values, [0, 10, 20])
array_2 = m.f_simple_pass_thru_vectorized(array)
np.testing.assert_array_equal(array, array_2)
def test_cls_and_dtype_conversion(simple_dtype):
s = m.SimpleStruct()
assert s.astuple() == (False, 0, 0., 0.)
assert m.SimpleStruct.fromtuple(s.astuple()).astuple() == s.astuple()
s.uint_ = 2
assert m.f_simple(s) == 20
# Try as recarray of shape==(1,).
s_recarray = np.array([(False, 2, 0., 0.)], dtype=simple_dtype)
# Show that this will work for vectorized case.
np.testing.assert_array_equal(m.f_simple_vectorized(s_recarray), [20])
# Show as a scalar that inherits from np.generic.
s_scalar = s_recarray[0]
assert isinstance(s_scalar, np.void)
assert m.f_simple(s_scalar) == 20
# Show that an *array* scalar (np.ndarray.shape == ()) does not convert.
# More specifically, conversion to SimpleStruct is not implicit.
s_recarray_scalar = s_recarray.reshape(())
assert isinstance(s_recarray_scalar, np.ndarray)
assert s_recarray_scalar.dtype == simple_dtype
with pytest.raises(TypeError) as excinfo:
m.f_simple(s_recarray_scalar)
assert 'incompatible function arguments' in str(excinfo.value)
# Explicitly convert to m.SimpleStruct.
assert m.f_simple(
m.SimpleStruct.fromtuple(s_recarray_scalar.item())) == 20
# Show that an array of dtype=object does *not* convert.
s_array_object = np.array([s])
assert s_array_object.dtype == object
with pytest.raises(TypeError) as excinfo:
m.f_simple_vectorized(s_array_object)
assert 'incompatible function arguments' in str(excinfo.value)
# Explicitly convert to `np.array(..., dtype=simple_dtype)`
s_array = np.array([s.astuple()], dtype=simple_dtype)
np.testing.assert_array_equal(m.f_simple_vectorized(s_array), [20])
def test_register_dtype():
with pytest.raises(RuntimeError) as excinfo:
m.register_dtype()
assert 'dtype is already registered' in str(excinfo.value)
@pytest.mark.xfail("env.PYPY")
def test_str_leak():
from sys import getrefcount
fmt = "f4"
pytest.gc_collect()
start = getrefcount(fmt)
d = m.dtype_wrapper(fmt)
assert d is np.dtype("f4")
del d
pytest.gc_collect()
assert getrefcount(fmt) == start
def test_compare_buffer_info():
assert all(m.compare_buffer_info())
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/third_party/unitree_legged_sdk/pybind11/tests/test_chrono.py | third_party/unitree_legged_sdk/pybind11/tests/test_chrono.py | # -*- coding: utf-8 -*-
from pybind11_tests import chrono as m
import datetime
import pytest
import env # noqa: F401
def test_chrono_system_clock():
# Get the time from both c++ and datetime
date0 = datetime.datetime.today()
date1 = m.test_chrono1()
date2 = datetime.datetime.today()
# The returned value should be a datetime
assert isinstance(date1, datetime.datetime)
# The numbers should vary by a very small amount (time it took to execute)
diff_python = abs(date2 - date0)
diff = abs(date1 - date2)
# There should never be a days difference
assert diff.days == 0
# Since datetime.datetime.today() calls time.time(), and on some platforms
# that has 1 second accuracy, we compare this way
assert diff.seconds <= diff_python.seconds
def test_chrono_system_clock_roundtrip():
date1 = datetime.datetime.today()
# Roundtrip the time
date2 = m.test_chrono2(date1)
# The returned value should be a datetime
assert isinstance(date2, datetime.datetime)
# They should be identical (no information lost on roundtrip)
diff = abs(date1 - date2)
assert diff.days == 0
assert diff.seconds == 0
assert diff.microseconds == 0
def test_chrono_system_clock_roundtrip_date():
date1 = datetime.date.today()
# Roundtrip the time
datetime2 = m.test_chrono2(date1)
date2 = datetime2.date()
time2 = datetime2.time()
# The returned value should be a datetime
assert isinstance(datetime2, datetime.datetime)
assert isinstance(date2, datetime.date)
assert isinstance(time2, datetime.time)
# They should be identical (no information lost on roundtrip)
diff = abs(date1 - date2)
assert diff.days == 0
assert diff.seconds == 0
assert diff.microseconds == 0
# Year, Month & Day should be the same after the round trip
assert date1.year == date2.year
assert date1.month == date2.month
assert date1.day == date2.day
# There should be no time information
assert time2.hour == 0
assert time2.minute == 0
assert time2.second == 0
assert time2.microsecond == 0
SKIP_TZ_ENV_ON_WIN = pytest.mark.skipif(
"env.WIN", reason="TZ environment variable only supported on POSIX"
)
@pytest.mark.parametrize("time1", [
datetime.datetime.today().time(),
datetime.time(0, 0, 0),
datetime.time(0, 0, 0, 1),
datetime.time(0, 28, 45, 109827),
datetime.time(0, 59, 59, 999999),
datetime.time(1, 0, 0),
datetime.time(5, 59, 59, 0),
datetime.time(5, 59, 59, 1),
])
@pytest.mark.parametrize("tz", [
None,
pytest.param("Europe/Brussels", marks=SKIP_TZ_ENV_ON_WIN),
pytest.param("Asia/Pyongyang", marks=SKIP_TZ_ENV_ON_WIN),
pytest.param("America/New_York", marks=SKIP_TZ_ENV_ON_WIN),
])
def test_chrono_system_clock_roundtrip_time(time1, tz, monkeypatch):
if tz is not None:
monkeypatch.setenv("TZ", "/usr/share/zoneinfo/{}".format(tz))
# Roundtrip the time
datetime2 = m.test_chrono2(time1)
date2 = datetime2.date()
time2 = datetime2.time()
# The returned value should be a datetime
assert isinstance(datetime2, datetime.datetime)
assert isinstance(date2, datetime.date)
assert isinstance(time2, datetime.time)
# Hour, Minute, Second & Microsecond should be the same after the round trip
assert time1.hour == time2.hour
assert time1.minute == time2.minute
assert time1.second == time2.second
assert time1.microsecond == time2.microsecond
# There should be no date information (i.e. date = python base date)
assert date2.year == 1970
assert date2.month == 1
assert date2.day == 1
def test_chrono_duration_roundtrip():
# Get the difference between two times (a timedelta)
date1 = datetime.datetime.today()
date2 = datetime.datetime.today()
diff = date2 - date1
# Make sure this is a timedelta
assert isinstance(diff, datetime.timedelta)
cpp_diff = m.test_chrono3(diff)
assert cpp_diff.days == diff.days
assert cpp_diff.seconds == diff.seconds
assert cpp_diff.microseconds == diff.microseconds
def test_chrono_duration_subtraction_equivalence():
date1 = datetime.datetime.today()
date2 = datetime.datetime.today()
diff = date2 - date1
cpp_diff = m.test_chrono4(date2, date1)
assert cpp_diff.days == diff.days
assert cpp_diff.seconds == diff.seconds
assert cpp_diff.microseconds == diff.microseconds
def test_chrono_duration_subtraction_equivalence_date():
date1 = datetime.date.today()
date2 = datetime.date.today()
diff = date2 - date1
cpp_diff = m.test_chrono4(date2, date1)
assert cpp_diff.days == diff.days
assert cpp_diff.seconds == diff.seconds
assert cpp_diff.microseconds == diff.microseconds
def test_chrono_steady_clock():
time1 = m.test_chrono5()
assert isinstance(time1, datetime.timedelta)
def test_chrono_steady_clock_roundtrip():
time1 = datetime.timedelta(days=10, seconds=10, microseconds=100)
time2 = m.test_chrono6(time1)
assert isinstance(time2, datetime.timedelta)
# They should be identical (no information lost on roundtrip)
assert time1.days == time2.days
assert time1.seconds == time2.seconds
assert time1.microseconds == time2.microseconds
def test_floating_point_duration():
# Test using a floating point number in seconds
time = m.test_chrono7(35.525123)
assert isinstance(time, datetime.timedelta)
assert time.seconds == 35
assert 525122 <= time.microseconds <= 525123
diff = m.test_chrono_float_diff(43.789012, 1.123456)
assert diff.seconds == 42
assert 665556 <= diff.microseconds <= 665557
def test_nano_timepoint():
time = datetime.datetime.now()
time1 = m.test_nano_timepoint(time, datetime.timedelta(seconds=60))
assert(time1 == time + datetime.timedelta(seconds=60))
def test_chrono_different_resolutions():
resolutions = m.different_resolutions()
time = datetime.datetime.now()
resolutions.timestamp_h = time
resolutions.timestamp_m = time
resolutions.timestamp_s = time
resolutions.timestamp_ms = time
resolutions.timestamp_us = time
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/third_party/unitree_legged_sdk/pybind11/tests/test_tagbased_polymorphic.py | third_party/unitree_legged_sdk/pybind11/tests/test_tagbased_polymorphic.py | # -*- coding: utf-8 -*-
from pybind11_tests import tagbased_polymorphic as m
def test_downcast():
zoo = m.create_zoo()
assert [type(animal) for animal in zoo] == [
m.Labrador, m.Dog, m.Chihuahua, m.Cat, m.Panther
]
assert [animal.name for animal in zoo] == [
"Fido", "Ginger", "Hertzl", "Tiger", "Leo"
]
zoo[1].sound = "woooooo"
assert [dog.bark() for dog in zoo[:3]] == [
"Labrador Fido goes WOOF!",
"Dog Ginger goes woooooo",
"Chihuahua Hertzl goes iyiyiyiyiyi and runs in circles"
]
assert [cat.purr() for cat in zoo[3:]] == ["mrowr", "mrrrRRRRRR"]
zoo[0].excitement -= 1000
assert zoo[0].excitement == 14000
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/third_party/unitree_legged_sdk/pybind11/tests/test_copy_move.py | third_party/unitree_legged_sdk/pybind11/tests/test_copy_move.py | # -*- coding: utf-8 -*-
import pytest
from pybind11_tests import copy_move_policies as m
def test_lacking_copy_ctor():
with pytest.raises(RuntimeError) as excinfo:
m.lacking_copy_ctor.get_one()
assert "is non-copyable!" in str(excinfo.value)
def test_lacking_move_ctor():
with pytest.raises(RuntimeError) as excinfo:
m.lacking_move_ctor.get_one()
assert "is neither movable nor copyable!" in str(excinfo.value)
def test_move_and_copy_casts():
"""Cast some values in C++ via custom type casters and count the number of moves/copies."""
cstats = m.move_and_copy_cstats()
c_m, c_mc, c_c = cstats["MoveOnlyInt"], cstats["MoveOrCopyInt"], cstats["CopyOnlyInt"]
# The type move constructions/assignments below each get incremented: the move assignment comes
# from the type_caster load; the move construction happens when extracting that via a cast or
# loading into an argument.
assert m.move_and_copy_casts(3) == 18
assert c_m.copy_assignments + c_m.copy_constructions == 0
assert c_m.move_assignments == 2
assert c_m.move_constructions >= 2
assert c_mc.alive() == 0
assert c_mc.copy_assignments + c_mc.copy_constructions == 0
assert c_mc.move_assignments == 2
assert c_mc.move_constructions >= 2
assert c_c.alive() == 0
assert c_c.copy_assignments == 2
assert c_c.copy_constructions >= 2
assert c_m.alive() + c_mc.alive() + c_c.alive() == 0
def test_move_and_copy_loads():
"""Call some functions that load arguments via custom type casters and count the number of
moves/copies."""
cstats = m.move_and_copy_cstats()
c_m, c_mc, c_c = cstats["MoveOnlyInt"], cstats["MoveOrCopyInt"], cstats["CopyOnlyInt"]
assert m.move_only(10) == 10 # 1 move, c_m
assert m.move_or_copy(11) == 11 # 1 move, c_mc
assert m.copy_only(12) == 12 # 1 copy, c_c
assert m.move_pair((13, 14)) == 27 # 1 c_m move, 1 c_mc move
assert m.move_tuple((15, 16, 17)) == 48 # 2 c_m moves, 1 c_mc move
assert m.copy_tuple((18, 19)) == 37 # 2 c_c copies
# Direct constructions: 2 c_m moves, 2 c_mc moves, 1 c_c copy
# Extra moves/copies when moving pairs/tuples: 3 c_m, 3 c_mc, 2 c_c
assert m.move_copy_nested((1, ((2, 3, (4,)), 5))) == 15
assert c_m.copy_assignments + c_m.copy_constructions == 0
assert c_m.move_assignments == 6
assert c_m.move_constructions == 9
assert c_mc.copy_assignments + c_mc.copy_constructions == 0
assert c_mc.move_assignments == 5
assert c_mc.move_constructions == 8
assert c_c.copy_assignments == 4
assert c_c.copy_constructions == 6
assert c_m.alive() + c_mc.alive() + c_c.alive() == 0
@pytest.mark.skipif(not m.has_optional, reason='no <optional>')
def test_move_and_copy_load_optional():
"""Tests move/copy loads of std::optional arguments"""
cstats = m.move_and_copy_cstats()
c_m, c_mc, c_c = cstats["MoveOnlyInt"], cstats["MoveOrCopyInt"], cstats["CopyOnlyInt"]
# The extra move/copy constructions below come from the std::optional move (which has to move
# its arguments):
assert m.move_optional(10) == 10 # c_m: 1 move assign, 2 move construct
assert m.move_or_copy_optional(11) == 11 # c_mc: 1 move assign, 2 move construct
assert m.copy_optional(12) == 12 # c_c: 1 copy assign, 2 copy construct
# 1 move assign + move construct moves each of c_m, c_mc, 1 c_c copy
# +1 move/copy construct each from moving the tuple
# +1 move/copy construct each from moving the optional (which moves the tuple again)
assert m.move_optional_tuple((3, 4, 5)) == 12
assert c_m.copy_assignments + c_m.copy_constructions == 0
assert c_m.move_assignments == 2
assert c_m.move_constructions == 5
assert c_mc.copy_assignments + c_mc.copy_constructions == 0
assert c_mc.move_assignments == 2
assert c_mc.move_constructions == 5
assert c_c.copy_assignments == 2
assert c_c.copy_constructions == 5
assert c_m.alive() + c_mc.alive() + c_c.alive() == 0
def test_private_op_new():
"""An object with a private `operator new` cannot be returned by value"""
with pytest.raises(RuntimeError) as excinfo:
m.private_op_new_value()
assert "is neither movable nor copyable" in str(excinfo.value)
assert m.private_op_new_reference().value == 1
def test_move_fallback():
"""#389: rvp::move should fall-through to copy on non-movable objects"""
m2 = m.get_moveissue2(2)
assert m2.value == 2
m1 = m.get_moveissue1(1)
assert m1.value == 1
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/third_party/unitree_legged_sdk/pybind11/tests/test_opaque_types.py | third_party/unitree_legged_sdk/pybind11/tests/test_opaque_types.py | # -*- coding: utf-8 -*-
import pytest
from pybind11_tests import opaque_types as m
from pybind11_tests import ConstructorStats, UserType
def test_string_list():
lst = m.StringList()
lst.push_back("Element 1")
lst.push_back("Element 2")
assert m.print_opaque_list(lst) == "Opaque list: [Element 1, Element 2]"
assert lst.back() == "Element 2"
for i, k in enumerate(lst, start=1):
assert k == "Element {}".format(i)
lst.pop_back()
assert m.print_opaque_list(lst) == "Opaque list: [Element 1]"
cvp = m.ClassWithSTLVecProperty()
assert m.print_opaque_list(cvp.stringList) == "Opaque list: []"
cvp.stringList = lst
cvp.stringList.push_back("Element 3")
assert m.print_opaque_list(cvp.stringList) == "Opaque list: [Element 1, Element 3]"
def test_pointers(msg):
living_before = ConstructorStats.get(UserType).alive()
assert m.get_void_ptr_value(m.return_void_ptr()) == 0x1234
assert m.get_void_ptr_value(UserType()) # Should also work for other C++ types
assert ConstructorStats.get(UserType).alive() == living_before
with pytest.raises(TypeError) as excinfo:
m.get_void_ptr_value([1, 2, 3]) # This should not work
assert msg(excinfo.value) == """
get_void_ptr_value(): incompatible function arguments. The following argument types are supported:
1. (arg0: capsule) -> int
Invoked with: [1, 2, 3]
""" # noqa: E501 line too long
assert m.return_null_str() is None
assert m.get_null_str_value(m.return_null_str()) is not None
ptr = m.return_unique_ptr()
assert "StringList" in repr(ptr)
assert m.print_opaque_list(ptr) == "Opaque list: [some value]"
def test_unions():
int_float_union = m.IntFloat()
int_float_union.i = 42
assert int_float_union.i == 42
int_float_union.f = 3.0
assert int_float_union.f == 3.0
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/third_party/unitree_legged_sdk/pybind11/tests/test_factory_constructors.py | third_party/unitree_legged_sdk/pybind11/tests/test_factory_constructors.py | # -*- coding: utf-8 -*-
import pytest
import re
import env # noqa: F401
from pybind11_tests import factory_constructors as m
from pybind11_tests.factory_constructors import tag
from pybind11_tests import ConstructorStats
def test_init_factory_basic():
"""Tests py::init_factory() wrapper around various ways of returning the object"""
cstats = [ConstructorStats.get(c) for c in [m.TestFactory1, m.TestFactory2, m.TestFactory3]]
cstats[0].alive() # force gc
n_inst = ConstructorStats.detail_reg_inst()
x1 = m.TestFactory1(tag.unique_ptr, 3)
assert x1.value == "3"
y1 = m.TestFactory1(tag.pointer)
assert y1.value == "(empty)"
z1 = m.TestFactory1("hi!")
assert z1.value == "hi!"
assert ConstructorStats.detail_reg_inst() == n_inst + 3
x2 = m.TestFactory2(tag.move)
assert x2.value == "(empty2)"
y2 = m.TestFactory2(tag.pointer, 7)
assert y2.value == "7"
z2 = m.TestFactory2(tag.unique_ptr, "hi again")
assert z2.value == "hi again"
assert ConstructorStats.detail_reg_inst() == n_inst + 6
x3 = m.TestFactory3(tag.shared_ptr)
assert x3.value == "(empty3)"
y3 = m.TestFactory3(tag.pointer, 42)
assert y3.value == "42"
z3 = m.TestFactory3("bye")
assert z3.value == "bye"
for null_ptr_kind in [tag.null_ptr,
tag.null_unique_ptr,
tag.null_shared_ptr]:
with pytest.raises(TypeError) as excinfo:
m.TestFactory3(null_ptr_kind)
assert str(excinfo.value) == "pybind11::init(): factory function returned nullptr"
assert [i.alive() for i in cstats] == [3, 3, 3]
assert ConstructorStats.detail_reg_inst() == n_inst + 9
del x1, y2, y3, z3
assert [i.alive() for i in cstats] == [2, 2, 1]
assert ConstructorStats.detail_reg_inst() == n_inst + 5
del x2, x3, y1, z1, z2
assert [i.alive() for i in cstats] == [0, 0, 0]
assert ConstructorStats.detail_reg_inst() == n_inst
assert [i.values() for i in cstats] == [
["3", "hi!"],
["7", "hi again"],
["42", "bye"]
]
assert [i.default_constructions for i in cstats] == [1, 1, 1]
def test_init_factory_signature(msg):
with pytest.raises(TypeError) as excinfo:
m.TestFactory1("invalid", "constructor", "arguments")
assert msg(excinfo.value) == """
__init__(): incompatible constructor arguments. The following argument types are supported:
1. m.factory_constructors.TestFactory1(arg0: m.factory_constructors.tag.unique_ptr_tag, arg1: int)
2. m.factory_constructors.TestFactory1(arg0: str)
3. m.factory_constructors.TestFactory1(arg0: m.factory_constructors.tag.pointer_tag)
4. m.factory_constructors.TestFactory1(arg0: handle, arg1: int, arg2: handle)
Invoked with: 'invalid', 'constructor', 'arguments'
""" # noqa: E501 line too long
assert msg(m.TestFactory1.__init__.__doc__) == """
__init__(*args, **kwargs)
Overloaded function.
1. __init__(self: m.factory_constructors.TestFactory1, arg0: m.factory_constructors.tag.unique_ptr_tag, arg1: int) -> None
2. __init__(self: m.factory_constructors.TestFactory1, arg0: str) -> None
3. __init__(self: m.factory_constructors.TestFactory1, arg0: m.factory_constructors.tag.pointer_tag) -> None
4. __init__(self: m.factory_constructors.TestFactory1, arg0: handle, arg1: int, arg2: handle) -> None
""" # noqa: E501 line too long
def test_init_factory_casting():
"""Tests py::init_factory() wrapper with various upcasting and downcasting returns"""
cstats = [ConstructorStats.get(c) for c in [m.TestFactory3, m.TestFactory4, m.TestFactory5]]
cstats[0].alive() # force gc
n_inst = ConstructorStats.detail_reg_inst()
# Construction from derived references:
a = m.TestFactory3(tag.pointer, tag.TF4, 4)
assert a.value == "4"
b = m.TestFactory3(tag.shared_ptr, tag.TF4, 5)
assert b.value == "5"
c = m.TestFactory3(tag.pointer, tag.TF5, 6)
assert c.value == "6"
d = m.TestFactory3(tag.shared_ptr, tag.TF5, 7)
assert d.value == "7"
assert ConstructorStats.detail_reg_inst() == n_inst + 4
# Shared a lambda with TF3:
e = m.TestFactory4(tag.pointer, tag.TF4, 8)
assert e.value == "8"
assert ConstructorStats.detail_reg_inst() == n_inst + 5
assert [i.alive() for i in cstats] == [5, 3, 2]
del a
assert [i.alive() for i in cstats] == [4, 2, 2]
assert ConstructorStats.detail_reg_inst() == n_inst + 4
del b, c, e
assert [i.alive() for i in cstats] == [1, 0, 1]
assert ConstructorStats.detail_reg_inst() == n_inst + 1
del d
assert [i.alive() for i in cstats] == [0, 0, 0]
assert ConstructorStats.detail_reg_inst() == n_inst
assert [i.values() for i in cstats] == [
["4", "5", "6", "7", "8"],
["4", "5", "8"],
["6", "7"]
]
def test_init_factory_alias():
"""Tests py::init_factory() wrapper with value conversions and alias types"""
cstats = [m.TestFactory6.get_cstats(), m.TestFactory6.get_alias_cstats()]
cstats[0].alive() # force gc
n_inst = ConstructorStats.detail_reg_inst()
a = m.TestFactory6(tag.base, 1)
assert a.get() == 1
assert not a.has_alias()
b = m.TestFactory6(tag.alias, "hi there")
assert b.get() == 8
assert b.has_alias()
c = m.TestFactory6(tag.alias, 3)
assert c.get() == 3
assert c.has_alias()
d = m.TestFactory6(tag.alias, tag.pointer, 4)
assert d.get() == 4
assert d.has_alias()
e = m.TestFactory6(tag.base, tag.pointer, 5)
assert e.get() == 5
assert not e.has_alias()
f = m.TestFactory6(tag.base, tag.alias, tag.pointer, 6)
assert f.get() == 6
assert f.has_alias()
assert ConstructorStats.detail_reg_inst() == n_inst + 6
assert [i.alive() for i in cstats] == [6, 4]
del a, b, e
assert [i.alive() for i in cstats] == [3, 3]
assert ConstructorStats.detail_reg_inst() == n_inst + 3
del f, c, d
assert [i.alive() for i in cstats] == [0, 0]
assert ConstructorStats.detail_reg_inst() == n_inst
class MyTest(m.TestFactory6):
def __init__(self, *args):
m.TestFactory6.__init__(self, *args)
def get(self):
return -5 + m.TestFactory6.get(self)
# Return Class by value, moved into new alias:
z = MyTest(tag.base, 123)
assert z.get() == 118
assert z.has_alias()
# Return alias by value, moved into new alias:
y = MyTest(tag.alias, "why hello!")
assert y.get() == 5
assert y.has_alias()
# Return Class by pointer, moved into new alias then original destroyed:
x = MyTest(tag.base, tag.pointer, 47)
assert x.get() == 42
assert x.has_alias()
assert ConstructorStats.detail_reg_inst() == n_inst + 3
assert [i.alive() for i in cstats] == [3, 3]
del x, y, z
assert [i.alive() for i in cstats] == [0, 0]
assert ConstructorStats.detail_reg_inst() == n_inst
assert [i.values() for i in cstats] == [
["1", "8", "3", "4", "5", "6", "123", "10", "47"],
["hi there", "3", "4", "6", "move", "123", "why hello!", "move", "47"]
]
def test_init_factory_dual():
"""Tests init factory functions with dual main/alias factory functions"""
from pybind11_tests.factory_constructors import TestFactory7
cstats = [TestFactory7.get_cstats(), TestFactory7.get_alias_cstats()]
cstats[0].alive() # force gc
n_inst = ConstructorStats.detail_reg_inst()
class PythFactory7(TestFactory7):
def get(self):
return 100 + TestFactory7.get(self)
a1 = TestFactory7(1)
a2 = PythFactory7(2)
assert a1.get() == 1
assert a2.get() == 102
assert not a1.has_alias()
assert a2.has_alias()
b1 = TestFactory7(tag.pointer, 3)
b2 = PythFactory7(tag.pointer, 4)
assert b1.get() == 3
assert b2.get() == 104
assert not b1.has_alias()
assert b2.has_alias()
c1 = TestFactory7(tag.mixed, 5)
c2 = PythFactory7(tag.mixed, 6)
assert c1.get() == 5
assert c2.get() == 106
assert not c1.has_alias()
assert c2.has_alias()
d1 = TestFactory7(tag.base, tag.pointer, 7)
d2 = PythFactory7(tag.base, tag.pointer, 8)
assert d1.get() == 7
assert d2.get() == 108
assert not d1.has_alias()
assert d2.has_alias()
# Both return an alias; the second multiplies the value by 10:
e1 = TestFactory7(tag.alias, tag.pointer, 9)
e2 = PythFactory7(tag.alias, tag.pointer, 10)
assert e1.get() == 9
assert e2.get() == 200
assert e1.has_alias()
assert e2.has_alias()
f1 = TestFactory7(tag.shared_ptr, tag.base, 11)
f2 = PythFactory7(tag.shared_ptr, tag.base, 12)
assert f1.get() == 11
assert f2.get() == 112
assert not f1.has_alias()
assert f2.has_alias()
g1 = TestFactory7(tag.shared_ptr, tag.invalid_base, 13)
assert g1.get() == 13
assert not g1.has_alias()
with pytest.raises(TypeError) as excinfo:
PythFactory7(tag.shared_ptr, tag.invalid_base, 14)
assert (str(excinfo.value) ==
"pybind11::init(): construction failed: returned holder-wrapped instance is not an "
"alias instance")
assert [i.alive() for i in cstats] == [13, 7]
assert ConstructorStats.detail_reg_inst() == n_inst + 13
del a1, a2, b1, d1, e1, e2
assert [i.alive() for i in cstats] == [7, 4]
assert ConstructorStats.detail_reg_inst() == n_inst + 7
del b2, c1, c2, d2, f1, f2, g1
assert [i.alive() for i in cstats] == [0, 0]
assert ConstructorStats.detail_reg_inst() == n_inst
assert [i.values() for i in cstats] == [
["1", "2", "3", "4", "5", "6", "7", "8", "9", "100", "11", "12", "13", "14"],
["2", "4", "6", "8", "9", "100", "12"]
]
def test_no_placement_new(capture):
"""Prior to 2.2, `py::init<...>` relied on the type supporting placement
new; this tests a class without placement new support."""
with capture:
a = m.NoPlacementNew(123)
found = re.search(r'^operator new called, returning (\d+)\n$', str(capture))
assert found
assert a.i == 123
with capture:
del a
pytest.gc_collect()
assert capture == "operator delete called on " + found.group(1)
with capture:
b = m.NoPlacementNew()
found = re.search(r'^operator new called, returning (\d+)\n$', str(capture))
assert found
assert b.i == 100
with capture:
del b
pytest.gc_collect()
assert capture == "operator delete called on " + found.group(1)
def test_multiple_inheritance():
class MITest(m.TestFactory1, m.TestFactory2):
def __init__(self):
m.TestFactory1.__init__(self, tag.unique_ptr, 33)
m.TestFactory2.__init__(self, tag.move)
a = MITest()
assert m.TestFactory1.value.fget(a) == "33"
assert m.TestFactory2.value.fget(a) == "(empty2)"
def create_and_destroy(*args):
a = m.NoisyAlloc(*args)
print("---")
del a
pytest.gc_collect()
def strip_comments(s):
return re.sub(r'\s+#.*', '', s)
def test_reallocation_a(capture, msg):
"""When the constructor is overloaded, previous overloads can require a preallocated value.
This test makes sure that such preallocated values only happen when they might be necessary,
and that they are deallocated properly."""
pytest.gc_collect()
with capture:
create_and_destroy(1)
assert msg(capture) == """
noisy new
noisy placement new
NoisyAlloc(int 1)
---
~NoisyAlloc()
noisy delete
"""
def test_reallocation_b(capture, msg):
with capture:
create_and_destroy(1.5)
assert msg(capture) == strip_comments("""
noisy new # allocation required to attempt first overload
noisy delete # have to dealloc before considering factory init overload
noisy new # pointer factory calling "new", part 1: allocation
NoisyAlloc(double 1.5) # ... part two, invoking constructor
---
~NoisyAlloc() # Destructor
noisy delete # operator delete
""")
def test_reallocation_c(capture, msg):
with capture:
create_and_destroy(2, 3)
assert msg(capture) == strip_comments("""
noisy new # pointer factory calling "new", allocation
NoisyAlloc(int 2) # constructor
---
~NoisyAlloc() # Destructor
noisy delete # operator delete
""")
def test_reallocation_d(capture, msg):
with capture:
create_and_destroy(2.5, 3)
assert msg(capture) == strip_comments("""
NoisyAlloc(double 2.5) # construction (local func variable: operator_new not called)
noisy new # return-by-value "new" part 1: allocation
~NoisyAlloc() # moved-away local func variable destruction
---
~NoisyAlloc() # Destructor
noisy delete # operator delete
""")
def test_reallocation_e(capture, msg):
with capture:
create_and_destroy(3.5, 4.5)
assert msg(capture) == strip_comments("""
noisy new # preallocation needed before invoking placement-new overload
noisy placement new # Placement new
NoisyAlloc(double 3.5) # construction
---
~NoisyAlloc() # Destructor
noisy delete # operator delete
""")
def test_reallocation_f(capture, msg):
with capture:
create_and_destroy(4, 0.5)
assert msg(capture) == strip_comments("""
noisy new # preallocation needed before invoking placement-new overload
noisy delete # deallocation of preallocated storage
noisy new # Factory pointer allocation
NoisyAlloc(int 4) # factory pointer construction
---
~NoisyAlloc() # Destructor
noisy delete # operator delete
""")
def test_reallocation_g(capture, msg):
with capture:
create_and_destroy(5, "hi")
assert msg(capture) == strip_comments("""
noisy new # preallocation needed before invoking first placement new
noisy delete # delete before considering new-style constructor
noisy new # preallocation for second placement new
noisy placement new # Placement new in the second placement new overload
NoisyAlloc(int 5) # construction
---
~NoisyAlloc() # Destructor
noisy delete # operator delete
""")
@pytest.mark.skipif("env.PY2")
def test_invalid_self():
"""Tests invocation of the pybind-registered base class with an invalid `self` argument. You
can only actually do this on Python 3: Python 2 raises an exception itself if you try."""
class NotPybindDerived(object):
pass
# Attempts to initialize with an invalid type passed as `self`:
class BrokenTF1(m.TestFactory1):
def __init__(self, bad):
if bad == 1:
a = m.TestFactory2(tag.pointer, 1)
m.TestFactory1.__init__(a, tag.pointer)
elif bad == 2:
a = NotPybindDerived()
m.TestFactory1.__init__(a, tag.pointer)
# Same as above, but for a class with an alias:
class BrokenTF6(m.TestFactory6):
def __init__(self, bad):
if bad == 1:
a = m.TestFactory2(tag.pointer, 1)
m.TestFactory6.__init__(a, tag.base, 1)
elif bad == 2:
a = m.TestFactory2(tag.pointer, 1)
m.TestFactory6.__init__(a, tag.alias, 1)
elif bad == 3:
m.TestFactory6.__init__(NotPybindDerived.__new__(NotPybindDerived), tag.base, 1)
elif bad == 4:
m.TestFactory6.__init__(NotPybindDerived.__new__(NotPybindDerived), tag.alias, 1)
for arg in (1, 2):
with pytest.raises(TypeError) as excinfo:
BrokenTF1(arg)
assert str(excinfo.value) == "__init__(self, ...) called with invalid `self` argument"
for arg in (1, 2, 3, 4):
with pytest.raises(TypeError) as excinfo:
BrokenTF6(arg)
assert str(excinfo.value) == "__init__(self, ...) called with invalid `self` argument"
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/third_party/unitree_legged_sdk/pybind11/tests/test_eval.py | third_party/unitree_legged_sdk/pybind11/tests/test_eval.py | # -*- coding: utf-8 -*-
import os
import pytest
import env # noqa: F401
from pybind11_tests import eval_ as m
def test_evals(capture):
with capture:
assert m.test_eval_statements()
assert capture == "Hello World!"
assert m.test_eval()
assert m.test_eval_single_statement()
assert m.test_eval_failure()
@pytest.mark.xfail("env.PYPY and not env.PY2", raises=RuntimeError)
def test_eval_file():
filename = os.path.join(os.path.dirname(__file__), "test_eval_call.py")
assert m.test_eval_file(filename)
assert m.test_eval_file_failure()
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/third_party/unitree_legged_sdk/pybind11/tests/test_class.py | third_party/unitree_legged_sdk/pybind11/tests/test_class.py | # -*- coding: utf-8 -*-
import pytest
import env # noqa: F401
from pybind11_tests import class_ as m
from pybind11_tests import UserType, ConstructorStats
def test_repr():
# In Python 3.3+, repr() accesses __qualname__
assert "pybind11_type" in repr(type(UserType))
assert "UserType" in repr(UserType)
def test_instance(msg):
with pytest.raises(TypeError) as excinfo:
m.NoConstructor()
assert msg(excinfo.value) == "m.class_.NoConstructor: No constructor defined!"
instance = m.NoConstructor.new_instance()
cstats = ConstructorStats.get(m.NoConstructor)
assert cstats.alive() == 1
del instance
assert cstats.alive() == 0
def test_type():
assert m.check_type(1) == m.DerivedClass1
with pytest.raises(RuntimeError) as execinfo:
m.check_type(0)
assert 'pybind11::detail::get_type_info: unable to find type info' in str(execinfo.value)
assert 'Invalid' in str(execinfo.value)
# Currently not supported
# See https://github.com/pybind/pybind11/issues/2486
# assert m.check_type(2) == int
def test_type_of_py():
assert m.get_type_of(1) == int
assert m.get_type_of(m.DerivedClass1()) == m.DerivedClass1
assert m.get_type_of(int) == type
def test_type_of_classic():
assert m.get_type_classic(1) == int
assert m.get_type_classic(m.DerivedClass1()) == m.DerivedClass1
assert m.get_type_classic(int) == type
def test_type_of_py_nodelete():
# If the above test deleted the class, this will segfault
assert m.get_type_of(m.DerivedClass1()) == m.DerivedClass1
def test_as_type_py():
assert m.as_type(int) == int
with pytest.raises(TypeError):
assert m.as_type(1) == int
with pytest.raises(TypeError):
assert m.as_type(m.DerivedClass1()) == m.DerivedClass1
def test_docstrings(doc):
assert doc(UserType) == "A `py::class_` type for testing"
assert UserType.__name__ == "UserType"
assert UserType.__module__ == "pybind11_tests"
assert UserType.get_value.__name__ == "get_value"
assert UserType.get_value.__module__ == "pybind11_tests"
assert doc(UserType.get_value) == """
get_value(self: m.UserType) -> int
Get value using a method
"""
assert doc(UserType.value) == "Get/set value using a property"
assert doc(m.NoConstructor.new_instance) == """
new_instance() -> m.class_.NoConstructor
Return an instance
"""
def test_qualname(doc):
"""Tests that a properly qualified name is set in __qualname__ (even in pre-3.3, where we
backport the attribute) and that generated docstrings properly use it and the module name"""
assert m.NestBase.__qualname__ == "NestBase"
assert m.NestBase.Nested.__qualname__ == "NestBase.Nested"
assert doc(m.NestBase.__init__) == """
__init__(self: m.class_.NestBase) -> None
"""
assert doc(m.NestBase.g) == """
g(self: m.class_.NestBase, arg0: m.class_.NestBase.Nested) -> None
"""
assert doc(m.NestBase.Nested.__init__) == """
__init__(self: m.class_.NestBase.Nested) -> None
"""
assert doc(m.NestBase.Nested.fn) == """
fn(self: m.class_.NestBase.Nested, arg0: int, arg1: m.class_.NestBase, arg2: m.class_.NestBase.Nested) -> None
""" # noqa: E501 line too long
assert doc(m.NestBase.Nested.fa) == """
fa(self: m.class_.NestBase.Nested, a: int, b: m.class_.NestBase, c: m.class_.NestBase.Nested) -> None
""" # noqa: E501 line too long
assert m.NestBase.__module__ == "pybind11_tests.class_"
assert m.NestBase.Nested.__module__ == "pybind11_tests.class_"
def test_inheritance(msg):
roger = m.Rabbit('Rabbit')
assert roger.name() + " is a " + roger.species() == "Rabbit is a parrot"
assert m.pet_name_species(roger) == "Rabbit is a parrot"
polly = m.Pet('Polly', 'parrot')
assert polly.name() + " is a " + polly.species() == "Polly is a parrot"
assert m.pet_name_species(polly) == "Polly is a parrot"
molly = m.Dog('Molly')
assert molly.name() + " is a " + molly.species() == "Molly is a dog"
assert m.pet_name_species(molly) == "Molly is a dog"
fred = m.Hamster('Fred')
assert fred.name() + " is a " + fred.species() == "Fred is a rodent"
assert m.dog_bark(molly) == "Woof!"
with pytest.raises(TypeError) as excinfo:
m.dog_bark(polly)
assert msg(excinfo.value) == """
dog_bark(): incompatible function arguments. The following argument types are supported:
1. (arg0: m.class_.Dog) -> str
Invoked with: <m.class_.Pet object at 0>
"""
with pytest.raises(TypeError) as excinfo:
m.Chimera("lion", "goat")
assert "No constructor defined!" in str(excinfo.value)
def test_inheritance_init(msg):
# Single base
class Python(m.Pet):
def __init__(self):
pass
with pytest.raises(TypeError) as exc_info:
Python()
expected = "m.class_.Pet.__init__() must be called when overriding __init__"
assert msg(exc_info.value) == expected
# Multiple bases
class RabbitHamster(m.Rabbit, m.Hamster):
def __init__(self):
m.Rabbit.__init__(self, "RabbitHamster")
with pytest.raises(TypeError) as exc_info:
RabbitHamster()
expected = "m.class_.Hamster.__init__() must be called when overriding __init__"
assert msg(exc_info.value) == expected
def test_automatic_upcasting():
assert type(m.return_class_1()).__name__ == "DerivedClass1"
assert type(m.return_class_2()).__name__ == "DerivedClass2"
assert type(m.return_none()).__name__ == "NoneType"
# Repeat these a few times in a random order to ensure no invalid caching is applied
assert type(m.return_class_n(1)).__name__ == "DerivedClass1"
assert type(m.return_class_n(2)).__name__ == "DerivedClass2"
assert type(m.return_class_n(0)).__name__ == "BaseClass"
assert type(m.return_class_n(2)).__name__ == "DerivedClass2"
assert type(m.return_class_n(2)).__name__ == "DerivedClass2"
assert type(m.return_class_n(0)).__name__ == "BaseClass"
assert type(m.return_class_n(1)).__name__ == "DerivedClass1"
def test_isinstance():
objects = [tuple(), dict(), m.Pet("Polly", "parrot")] + [m.Dog("Molly")] * 4
expected = (True, True, True, True, True, False, False)
assert m.check_instances(objects) == expected
def test_mismatched_holder():
import re
with pytest.raises(RuntimeError) as excinfo:
m.mismatched_holder_1()
assert re.match('generic_type: type ".*MismatchDerived1" does not have a non-default '
'holder type while its base ".*MismatchBase1" does', str(excinfo.value))
with pytest.raises(RuntimeError) as excinfo:
m.mismatched_holder_2()
assert re.match('generic_type: type ".*MismatchDerived2" has a non-default holder type '
'while its base ".*MismatchBase2" does not', str(excinfo.value))
def test_override_static():
"""#511: problem with inheritance + overwritten def_static"""
b = m.MyBase.make()
d1 = m.MyDerived.make2()
d2 = m.MyDerived.make()
assert isinstance(b, m.MyBase)
assert isinstance(d1, m.MyDerived)
assert isinstance(d2, m.MyDerived)
def test_implicit_conversion_life_support():
"""Ensure the lifetime of temporary objects created for implicit conversions"""
assert m.implicitly_convert_argument(UserType(5)) == 5
assert m.implicitly_convert_variable(UserType(5)) == 5
assert "outside a bound function" in m.implicitly_convert_variable_fail(UserType(5))
def test_operator_new_delete(capture):
"""Tests that class-specific operator new/delete functions are invoked"""
class SubAliased(m.AliasedHasOpNewDelSize):
pass
with capture:
a = m.HasOpNewDel()
b = m.HasOpNewDelSize()
d = m.HasOpNewDelBoth()
assert capture == """
A new 8
B new 4
D new 32
"""
sz_alias = str(m.AliasedHasOpNewDelSize.size_alias)
sz_noalias = str(m.AliasedHasOpNewDelSize.size_noalias)
with capture:
c = m.AliasedHasOpNewDelSize()
c2 = SubAliased()
assert capture == (
"C new " + sz_noalias + "\n" +
"C new " + sz_alias + "\n"
)
with capture:
del a
pytest.gc_collect()
del b
pytest.gc_collect()
del d
pytest.gc_collect()
assert capture == """
A delete
B delete 4
D delete
"""
with capture:
del c
pytest.gc_collect()
del c2
pytest.gc_collect()
assert capture == (
"C delete " + sz_noalias + "\n" +
"C delete " + sz_alias + "\n"
)
def test_bind_protected_functions():
"""Expose protected member functions to Python using a helper class"""
a = m.ProtectedA()
assert a.foo() == 42
b = m.ProtectedB()
assert b.foo() == 42
class C(m.ProtectedB):
def __init__(self):
m.ProtectedB.__init__(self)
def foo(self):
return 0
c = C()
assert c.foo() == 0
def test_brace_initialization():
""" Tests that simple POD classes can be constructed using C++11 brace initialization """
a = m.BraceInitialization(123, "test")
assert a.field1 == 123
assert a.field2 == "test"
# Tests that a non-simple class doesn't get brace initialization (if the
# class defines an initializer_list constructor, in particular, it would
# win over the expected constructor).
b = m.NoBraceInitialization([123, 456])
assert b.vec == [123, 456]
@pytest.mark.xfail("env.PYPY")
def test_class_refcount():
"""Instances must correctly increase/decrease the reference count of their types (#1029)"""
from sys import getrefcount
class PyDog(m.Dog):
pass
for cls in m.Dog, PyDog:
refcount_1 = getrefcount(cls)
molly = [cls("Molly") for _ in range(10)]
refcount_2 = getrefcount(cls)
del molly
pytest.gc_collect()
refcount_3 = getrefcount(cls)
assert refcount_1 == refcount_3
assert refcount_2 > refcount_1
def test_reentrant_implicit_conversion_failure(msg):
# ensure that there is no runaway reentrant implicit conversion (#1035)
with pytest.raises(TypeError) as excinfo:
m.BogusImplicitConversion(0)
assert msg(excinfo.value) == '''
__init__(): incompatible constructor arguments. The following argument types are supported:
1. m.class_.BogusImplicitConversion(arg0: m.class_.BogusImplicitConversion)
Invoked with: 0
'''
def test_error_after_conversions():
with pytest.raises(TypeError) as exc_info:
m.test_error_after_conversions("hello")
assert str(exc_info.value).startswith(
"Unable to convert function return value to a Python type!")
def test_aligned():
if hasattr(m, "Aligned"):
p = m.Aligned().ptr()
assert p % 1024 == 0
# https://foss.heptapod.net/pypy/pypy/-/issues/2742
@pytest.mark.xfail("env.PYPY")
def test_final():
with pytest.raises(TypeError) as exc_info:
class PyFinalChild(m.IsFinal):
pass
assert str(exc_info.value).endswith("is not an acceptable base type")
# https://foss.heptapod.net/pypy/pypy/-/issues/2742
@pytest.mark.xfail("env.PYPY")
def test_non_final_final():
with pytest.raises(TypeError) as exc_info:
class PyNonFinalFinalChild(m.IsNonFinalFinal):
pass
assert str(exc_info.value).endswith("is not an acceptable base type")
# https://github.com/pybind/pybind11/issues/1878
def test_exception_rvalue_abort():
with pytest.raises(RuntimeError):
m.PyPrintDestructor().throw_something()
# https://github.com/pybind/pybind11/issues/1568
def test_multiple_instances_with_same_pointer(capture):
n = 100
instances = [m.SamePointer() for _ in range(n)]
for i in range(n):
# We need to reuse the same allocated memory for with a different type,
# to ensure the bug in `deregister_instance_impl` is detected. Otherwise
# `Py_TYPE(self) == Py_TYPE(it->second)` will still succeed, even though
# the `instance` is already deleted.
instances[i] = m.Empty()
# No assert: if this does not trigger the error
# pybind11_fail("pybind11_object_dealloc(): Tried to deallocate unregistered instance!");
# and just completes without crashing, we're good.
# https://github.com/pybind/pybind11/issues/1624
def test_base_and_derived_nested_scope():
assert issubclass(m.DerivedWithNested, m.BaseWithNested)
assert m.BaseWithNested.Nested != m.DerivedWithNested.Nested
assert m.BaseWithNested.Nested.get_name() == "BaseWithNested::Nested"
assert m.DerivedWithNested.Nested.get_name() == "DerivedWithNested::Nested"
@pytest.mark.skip("See https://github.com/pybind/pybind11/pull/2564")
def test_register_duplicate_class():
import types
module_scope = types.ModuleType("module_scope")
with pytest.raises(RuntimeError) as exc_info:
m.register_duplicate_class_name(module_scope)
expected = ('generic_type: cannot initialize type "Duplicate": '
'an object with that name is already defined')
assert str(exc_info.value) == expected
with pytest.raises(RuntimeError) as exc_info:
m.register_duplicate_class_type(module_scope)
expected = 'generic_type: type "YetAnotherDuplicate" is already registered!'
assert str(exc_info.value) == expected
class ClassScope:
pass
with pytest.raises(RuntimeError) as exc_info:
m.register_duplicate_nested_class_name(ClassScope)
expected = ('generic_type: cannot initialize type "DuplicateNested": '
'an object with that name is already defined')
assert str(exc_info.value) == expected
with pytest.raises(RuntimeError) as exc_info:
m.register_duplicate_nested_class_type(ClassScope)
expected = 'generic_type: type "YetAnotherDuplicateNested" is already registered!'
assert str(exc_info.value) == expected
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/third_party/unitree_legged_sdk/pybind11/tests/test_pickling.py | third_party/unitree_legged_sdk/pybind11/tests/test_pickling.py | # -*- coding: utf-8 -*-
import pytest
import env # noqa: F401
from pybind11_tests import pickling as m
try:
import cPickle as pickle # Use cPickle on Python 2.7
except ImportError:
import pickle
@pytest.mark.parametrize("cls_name", ["Pickleable", "PickleableNew"])
def test_roundtrip(cls_name):
cls = getattr(m, cls_name)
p = cls("test_value")
p.setExtra1(15)
p.setExtra2(48)
data = pickle.dumps(p, 2) # Must use pickle protocol >= 2
p2 = pickle.loads(data)
assert p2.value() == p.value()
assert p2.extra1() == p.extra1()
assert p2.extra2() == p.extra2()
@pytest.mark.xfail("env.PYPY")
@pytest.mark.parametrize("cls_name", ["PickleableWithDict", "PickleableWithDictNew"])
def test_roundtrip_with_dict(cls_name):
cls = getattr(m, cls_name)
p = cls("test_value")
p.extra = 15
p.dynamic = "Attribute"
data = pickle.dumps(p, pickle.HIGHEST_PROTOCOL)
p2 = pickle.loads(data)
assert p2.value == p.value
assert p2.extra == p.extra
assert p2.dynamic == p.dynamic
def test_enum_pickle():
from pybind11_tests import enums as e
data = pickle.dumps(e.EOne, 2)
assert e.EOne == pickle.loads(data)
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/third_party/unitree_legged_sdk/pybind11/tests/test_kwargs_and_defaults.py | third_party/unitree_legged_sdk/pybind11/tests/test_kwargs_and_defaults.py | # -*- coding: utf-8 -*-
import pytest
import env # noqa: F401
from pybind11_tests import kwargs_and_defaults as m
def test_function_signatures(doc):
assert doc(m.kw_func0) == "kw_func0(arg0: int, arg1: int) -> str"
assert doc(m.kw_func1) == "kw_func1(x: int, y: int) -> str"
assert doc(m.kw_func2) == "kw_func2(x: int = 100, y: int = 200) -> str"
assert doc(m.kw_func3) == "kw_func3(data: str = 'Hello world!') -> None"
assert doc(m.kw_func4) == "kw_func4(myList: List[int] = [13, 17]) -> str"
assert doc(m.kw_func_udl) == "kw_func_udl(x: int, y: int = 300) -> str"
assert doc(m.kw_func_udl_z) == "kw_func_udl_z(x: int, y: int = 0) -> str"
assert doc(m.args_function) == "args_function(*args) -> tuple"
assert doc(m.args_kwargs_function) == "args_kwargs_function(*args, **kwargs) -> tuple"
assert doc(m.KWClass.foo0) == \
"foo0(self: m.kwargs_and_defaults.KWClass, arg0: int, arg1: float) -> None"
assert doc(m.KWClass.foo1) == \
"foo1(self: m.kwargs_and_defaults.KWClass, x: int, y: float) -> None"
def test_named_arguments(msg):
assert m.kw_func0(5, 10) == "x=5, y=10"
assert m.kw_func1(5, 10) == "x=5, y=10"
assert m.kw_func1(5, y=10) == "x=5, y=10"
assert m.kw_func1(y=10, x=5) == "x=5, y=10"
assert m.kw_func2() == "x=100, y=200"
assert m.kw_func2(5) == "x=5, y=200"
assert m.kw_func2(x=5) == "x=5, y=200"
assert m.kw_func2(y=10) == "x=100, y=10"
assert m.kw_func2(5, 10) == "x=5, y=10"
assert m.kw_func2(x=5, y=10) == "x=5, y=10"
with pytest.raises(TypeError) as excinfo:
# noinspection PyArgumentList
m.kw_func2(x=5, y=10, z=12)
assert excinfo.match(
r'(?s)^kw_func2\(\): incompatible.*Invoked with: kwargs: ((x=5|y=10|z=12)(, |$))' + '{3}$')
assert m.kw_func4() == "{13 17}"
assert m.kw_func4(myList=[1, 2, 3]) == "{1 2 3}"
assert m.kw_func_udl(x=5, y=10) == "x=5, y=10"
assert m.kw_func_udl_z(x=5) == "x=5, y=0"
def test_arg_and_kwargs():
args = 'arg1_value', 'arg2_value', 3
assert m.args_function(*args) == args
args = 'a1', 'a2'
kwargs = dict(arg3='a3', arg4=4)
assert m.args_kwargs_function(*args, **kwargs) == (args, kwargs)
def test_mixed_args_and_kwargs(msg):
mpa = m.mixed_plus_args
mpk = m.mixed_plus_kwargs
mpak = m.mixed_plus_args_kwargs
mpakd = m.mixed_plus_args_kwargs_defaults
assert mpa(1, 2.5, 4, 99.5, None) == (1, 2.5, (4, 99.5, None))
assert mpa(1, 2.5) == (1, 2.5, ())
with pytest.raises(TypeError) as excinfo:
assert mpa(1)
assert msg(excinfo.value) == """
mixed_plus_args(): incompatible function arguments. The following argument types are supported:
1. (arg0: int, arg1: float, *args) -> tuple
Invoked with: 1
""" # noqa: E501 line too long
with pytest.raises(TypeError) as excinfo:
assert mpa()
assert msg(excinfo.value) == """
mixed_plus_args(): incompatible function arguments. The following argument types are supported:
1. (arg0: int, arg1: float, *args) -> tuple
Invoked with:
""" # noqa: E501 line too long
assert mpk(-2, 3.5, pi=3.14159, e=2.71828) == (-2, 3.5, {'e': 2.71828, 'pi': 3.14159})
assert mpak(7, 7.7, 7.77, 7.777, 7.7777, minusseven=-7) == (
7, 7.7, (7.77, 7.777, 7.7777), {'minusseven': -7})
assert mpakd() == (1, 3.14159, (), {})
assert mpakd(3) == (3, 3.14159, (), {})
assert mpakd(j=2.71828) == (1, 2.71828, (), {})
assert mpakd(k=42) == (1, 3.14159, (), {'k': 42})
assert mpakd(1, 1, 2, 3, 5, 8, then=13, followedby=21) == (
1, 1, (2, 3, 5, 8), {'then': 13, 'followedby': 21})
# Arguments specified both positionally and via kwargs should fail:
with pytest.raises(TypeError) as excinfo:
assert mpakd(1, i=1)
assert msg(excinfo.value) == """
mixed_plus_args_kwargs_defaults(): incompatible function arguments. The following argument types are supported:
1. (i: int = 1, j: float = 3.14159, *args, **kwargs) -> tuple
Invoked with: 1; kwargs: i=1
""" # noqa: E501 line too long
with pytest.raises(TypeError) as excinfo:
assert mpakd(1, 2, j=1)
assert msg(excinfo.value) == """
mixed_plus_args_kwargs_defaults(): incompatible function arguments. The following argument types are supported:
1. (i: int = 1, j: float = 3.14159, *args, **kwargs) -> tuple
Invoked with: 1, 2; kwargs: j=1
""" # noqa: E501 line too long
def test_keyword_only_args(msg):
assert m.kw_only_all(i=1, j=2) == (1, 2)
assert m.kw_only_all(j=1, i=2) == (2, 1)
with pytest.raises(TypeError) as excinfo:
assert m.kw_only_all(i=1) == (1,)
assert "incompatible function arguments" in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
assert m.kw_only_all(1, 2) == (1, 2)
assert "incompatible function arguments" in str(excinfo.value)
assert m.kw_only_some(1, k=3, j=2) == (1, 2, 3)
assert m.kw_only_with_defaults(z=8) == (3, 4, 5, 8)
assert m.kw_only_with_defaults(2, z=8) == (2, 4, 5, 8)
assert m.kw_only_with_defaults(2, j=7, k=8, z=9) == (2, 7, 8, 9)
assert m.kw_only_with_defaults(2, 7, z=9, k=8) == (2, 7, 8, 9)
assert m.kw_only_mixed(1, j=2) == (1, 2)
assert m.kw_only_mixed(j=2, i=3) == (3, 2)
assert m.kw_only_mixed(i=2, j=3) == (2, 3)
assert m.kw_only_plus_more(4, 5, k=6, extra=7) == (4, 5, 6, {'extra': 7})
assert m.kw_only_plus_more(3, k=5, j=4, extra=6) == (3, 4, 5, {'extra': 6})
assert m.kw_only_plus_more(2, k=3, extra=4) == (2, -1, 3, {'extra': 4})
with pytest.raises(TypeError) as excinfo:
assert m.kw_only_mixed(i=1) == (1,)
assert "incompatible function arguments" in str(excinfo.value)
with pytest.raises(RuntimeError) as excinfo:
m.register_invalid_kw_only(m)
assert msg(excinfo.value) == """
arg(): cannot specify an unnamed argument after an kw_only() annotation
"""
def test_positional_only_args(msg):
assert m.pos_only_all(1, 2) == (1, 2)
assert m.pos_only_all(2, 1) == (2, 1)
with pytest.raises(TypeError) as excinfo:
m.pos_only_all(i=1, j=2)
assert "incompatible function arguments" in str(excinfo.value)
assert m.pos_only_mix(1, 2) == (1, 2)
assert m.pos_only_mix(2, j=1) == (2, 1)
with pytest.raises(TypeError) as excinfo:
m.pos_only_mix(i=1, j=2)
assert "incompatible function arguments" in str(excinfo.value)
assert m.pos_kw_only_mix(1, 2, k=3) == (1, 2, 3)
assert m.pos_kw_only_mix(1, j=2, k=3) == (1, 2, 3)
with pytest.raises(TypeError) as excinfo:
m.pos_kw_only_mix(i=1, j=2, k=3)
assert "incompatible function arguments" in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
m.pos_kw_only_mix(1, 2, 3)
assert "incompatible function arguments" in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
m.pos_only_def_mix()
assert "incompatible function arguments" in str(excinfo.value)
assert m.pos_only_def_mix(1) == (1, 2, 3)
assert m.pos_only_def_mix(1, 4) == (1, 4, 3)
assert m.pos_only_def_mix(1, 4, 7) == (1, 4, 7)
assert m.pos_only_def_mix(1, 4, k=7) == (1, 4, 7)
with pytest.raises(TypeError) as excinfo:
m.pos_only_def_mix(1, j=4)
assert "incompatible function arguments" in str(excinfo.value)
def test_signatures():
assert "kw_only_all(*, i: int, j: int) -> tuple\n" == m.kw_only_all.__doc__
assert "kw_only_mixed(i: int, *, j: int) -> tuple\n" == m.kw_only_mixed.__doc__
assert "pos_only_all(i: int, j: int, /) -> tuple\n" == m.pos_only_all.__doc__
assert "pos_only_mix(i: int, /, j: int) -> tuple\n" == m.pos_only_mix.__doc__
assert "pos_kw_only_mix(i: int, /, j: int, *, k: int) -> tuple\n" == m.pos_kw_only_mix.__doc__
@pytest.mark.xfail("env.PYPY and env.PY2", reason="PyPy2 doesn't double count")
def test_args_refcount():
"""Issue/PR #1216 - py::args elements get double-inc_ref()ed when combined with regular
arguments"""
refcount = m.arg_refcount_h
myval = 54321
expected = refcount(myval)
assert m.arg_refcount_h(myval) == expected
assert m.arg_refcount_o(myval) == expected + 1
assert m.arg_refcount_h(myval) == expected
assert refcount(myval) == expected
assert m.mixed_plus_args(1, 2.0, "a", myval) == (1, 2.0, ("a", myval))
assert refcount(myval) == expected
assert m.mixed_plus_kwargs(3, 4.0, a=1, b=myval) == (3, 4.0, {"a": 1, "b": myval})
assert refcount(myval) == expected
assert m.args_function(-1, myval) == (-1, myval)
assert refcount(myval) == expected
assert m.mixed_plus_args_kwargs(5, 6.0, myval, a=myval) == (5, 6.0, (myval,), {"a": myval})
assert refcount(myval) == expected
assert m.args_kwargs_function(7, 8, myval, a=1, b=myval) == \
((7, 8, myval), {"a": 1, "b": myval})
assert refcount(myval) == expected
exp3 = refcount(myval, myval, myval)
assert m.args_refcount(myval, myval, myval) == (exp3, exp3, exp3)
assert refcount(myval) == expected
# This function takes the first arg as a `py::object` and the rest as a `py::args`. Unlike the
# previous case, when we have both positional and `py::args` we need to construct a new tuple
# for the `py::args`; in the previous case, we could simply inc_ref and pass on Python's input
# tuple without having to inc_ref the individual elements, but here we can't, hence the extra
# refs.
assert m.mixed_args_refcount(myval, myval, myval) == (exp3 + 3, exp3 + 3, exp3 + 3)
assert m.class_default_argument() == "<class 'decimal.Decimal'>"
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/third_party/unitree_legged_sdk/pybind11/tests/conftest.py | third_party/unitree_legged_sdk/pybind11/tests/conftest.py | # -*- coding: utf-8 -*-
"""pytest configuration
Extends output capture as needed by pybind11: ignore constructors, optional unordered lines.
Adds docstring and exceptions message sanitizers: ignore Python 2 vs 3 differences.
"""
import contextlib
import difflib
import gc
import re
import textwrap
import pytest
import env
# Early diagnostic for failed imports
import pybind11_tests # noqa: F401
_unicode_marker = re.compile(r'u(\'[^\']*\')')
_long_marker = re.compile(r'([0-9])L')
_hexadecimal = re.compile(r'0x[0-9a-fA-F]+')
# Avoid collecting Python3 only files
collect_ignore = []
if env.PY2:
collect_ignore.append("test_async.py")
def _strip_and_dedent(s):
"""For triple-quote strings"""
return textwrap.dedent(s.lstrip('\n').rstrip())
def _split_and_sort(s):
"""For output which does not require specific line order"""
return sorted(_strip_and_dedent(s).splitlines())
def _make_explanation(a, b):
"""Explanation for a failed assert -- the a and b arguments are List[str]"""
return ["--- actual / +++ expected"] + [line.strip('\n') for line in difflib.ndiff(a, b)]
class Output(object):
"""Basic output post-processing and comparison"""
def __init__(self, string):
self.string = string
self.explanation = []
def __str__(self):
return self.string
def __eq__(self, other):
# Ignore constructor/destructor output which is prefixed with "###"
a = [line for line in self.string.strip().splitlines() if not line.startswith("###")]
b = _strip_and_dedent(other).splitlines()
if a == b:
return True
else:
self.explanation = _make_explanation(a, b)
return False
class Unordered(Output):
"""Custom comparison for output without strict line ordering"""
def __eq__(self, other):
a = _split_and_sort(self.string)
b = _split_and_sort(other)
if a == b:
return True
else:
self.explanation = _make_explanation(a, b)
return False
class Capture(object):
def __init__(self, capfd):
self.capfd = capfd
self.out = ""
self.err = ""
def __enter__(self):
self.capfd.readouterr()
return self
def __exit__(self, *args):
self.out, self.err = self.capfd.readouterr()
def __eq__(self, other):
a = Output(self.out)
b = other
if a == b:
return True
else:
self.explanation = a.explanation
return False
def __str__(self):
return self.out
def __contains__(self, item):
return item in self.out
@property
def unordered(self):
return Unordered(self.out)
@property
def stderr(self):
return Output(self.err)
@pytest.fixture
def capture(capsys):
"""Extended `capsys` with context manager and custom equality operators"""
return Capture(capsys)
class SanitizedString(object):
def __init__(self, sanitizer):
self.sanitizer = sanitizer
self.string = ""
self.explanation = []
def __call__(self, thing):
self.string = self.sanitizer(thing)
return self
def __eq__(self, other):
a = self.string
b = _strip_and_dedent(other)
if a == b:
return True
else:
self.explanation = _make_explanation(a.splitlines(), b.splitlines())
return False
def _sanitize_general(s):
s = s.strip()
s = s.replace("pybind11_tests.", "m.")
s = s.replace("unicode", "str")
s = _long_marker.sub(r"\1", s)
s = _unicode_marker.sub(r"\1", s)
return s
def _sanitize_docstring(thing):
s = thing.__doc__
s = _sanitize_general(s)
return s
@pytest.fixture
def doc():
"""Sanitize docstrings and add custom failure explanation"""
return SanitizedString(_sanitize_docstring)
def _sanitize_message(thing):
s = str(thing)
s = _sanitize_general(s)
s = _hexadecimal.sub("0", s)
return s
@pytest.fixture
def msg():
"""Sanitize messages and add custom failure explanation"""
return SanitizedString(_sanitize_message)
# noinspection PyUnusedLocal
def pytest_assertrepr_compare(op, left, right):
"""Hook to insert custom failure explanation"""
if hasattr(left, 'explanation'):
return left.explanation
@contextlib.contextmanager
def suppress(exception):
"""Suppress the desired exception"""
try:
yield
except exception:
pass
def gc_collect():
''' Run the garbage collector twice (needed when running
reference counting tests with PyPy) '''
gc.collect()
gc.collect()
def pytest_configure():
pytest.suppress = suppress
pytest.gc_collect = gc_collect
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/third_party/unitree_legged_sdk/pybind11/tests/test_union.py | third_party/unitree_legged_sdk/pybind11/tests/test_union.py | # -*- coding: utf-8 -*-
from pybind11_tests import union_ as m
def test_union():
instance = m.TestUnion()
instance.as_uint = 10
assert instance.as_int == 10
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/third_party/unitree_legged_sdk/pybind11/tests/test_call_policies.py | third_party/unitree_legged_sdk/pybind11/tests/test_call_policies.py | # -*- coding: utf-8 -*-
import pytest
import env # noqa: F401
from pybind11_tests import call_policies as m
from pybind11_tests import ConstructorStats
@pytest.mark.xfail("env.PYPY", reason="sometimes comes out 1 off on PyPy", strict=False)
def test_keep_alive_argument(capture):
n_inst = ConstructorStats.detail_reg_inst()
with capture:
p = m.Parent()
assert capture == "Allocating parent."
with capture:
p.addChild(m.Child())
assert ConstructorStats.detail_reg_inst() == n_inst + 1
assert capture == """
Allocating child.
Releasing child.
"""
with capture:
del p
assert ConstructorStats.detail_reg_inst() == n_inst
assert capture == "Releasing parent."
with capture:
p = m.Parent()
assert capture == "Allocating parent."
with capture:
p.addChildKeepAlive(m.Child())
assert ConstructorStats.detail_reg_inst() == n_inst + 2
assert capture == "Allocating child."
with capture:
del p
assert ConstructorStats.detail_reg_inst() == n_inst
assert capture == """
Releasing parent.
Releasing child.
"""
def test_keep_alive_return_value(capture):
n_inst = ConstructorStats.detail_reg_inst()
with capture:
p = m.Parent()
assert capture == "Allocating parent."
with capture:
p.returnChild()
assert ConstructorStats.detail_reg_inst() == n_inst + 1
assert capture == """
Allocating child.
Releasing child.
"""
with capture:
del p
assert ConstructorStats.detail_reg_inst() == n_inst
assert capture == "Releasing parent."
with capture:
p = m.Parent()
assert capture == "Allocating parent."
with capture:
p.returnChildKeepAlive()
assert ConstructorStats.detail_reg_inst() == n_inst + 2
assert capture == "Allocating child."
with capture:
del p
assert ConstructorStats.detail_reg_inst() == n_inst
assert capture == """
Releasing parent.
Releasing child.
"""
# https://foss.heptapod.net/pypy/pypy/-/issues/2447
@pytest.mark.xfail("env.PYPY", reason="_PyObject_GetDictPtr is unimplemented")
def test_alive_gc(capture):
n_inst = ConstructorStats.detail_reg_inst()
p = m.ParentGC()
p.addChildKeepAlive(m.Child())
assert ConstructorStats.detail_reg_inst() == n_inst + 2
lst = [p]
lst.append(lst) # creates a circular reference
with capture:
del p, lst
assert ConstructorStats.detail_reg_inst() == n_inst
assert capture == """
Releasing parent.
Releasing child.
"""
def test_alive_gc_derived(capture):
class Derived(m.Parent):
pass
n_inst = ConstructorStats.detail_reg_inst()
p = Derived()
p.addChildKeepAlive(m.Child())
assert ConstructorStats.detail_reg_inst() == n_inst + 2
lst = [p]
lst.append(lst) # creates a circular reference
with capture:
del p, lst
assert ConstructorStats.detail_reg_inst() == n_inst
assert capture == """
Releasing parent.
Releasing child.
"""
def test_alive_gc_multi_derived(capture):
class Derived(m.Parent, m.Child):
def __init__(self):
m.Parent.__init__(self)
m.Child.__init__(self)
n_inst = ConstructorStats.detail_reg_inst()
p = Derived()
p.addChildKeepAlive(m.Child())
# +3 rather than +2 because Derived corresponds to two registered instances
assert ConstructorStats.detail_reg_inst() == n_inst + 3
lst = [p]
lst.append(lst) # creates a circular reference
with capture:
del p, lst
assert ConstructorStats.detail_reg_inst() == n_inst
assert capture == """
Releasing parent.
Releasing child.
Releasing child.
"""
def test_return_none(capture):
n_inst = ConstructorStats.detail_reg_inst()
with capture:
p = m.Parent()
assert capture == "Allocating parent."
with capture:
p.returnNullChildKeepAliveChild()
assert ConstructorStats.detail_reg_inst() == n_inst + 1
assert capture == ""
with capture:
del p
assert ConstructorStats.detail_reg_inst() == n_inst
assert capture == "Releasing parent."
with capture:
p = m.Parent()
assert capture == "Allocating parent."
with capture:
p.returnNullChildKeepAliveParent()
assert ConstructorStats.detail_reg_inst() == n_inst + 1
assert capture == ""
with capture:
del p
assert ConstructorStats.detail_reg_inst() == n_inst
assert capture == "Releasing parent."
def test_keep_alive_constructor(capture):
n_inst = ConstructorStats.detail_reg_inst()
with capture:
p = m.Parent(m.Child())
assert ConstructorStats.detail_reg_inst() == n_inst + 2
assert capture == """
Allocating child.
Allocating parent.
"""
with capture:
del p
assert ConstructorStats.detail_reg_inst() == n_inst
assert capture == """
Releasing parent.
Releasing child.
"""
def test_call_guard():
assert m.unguarded_call() == "unguarded"
assert m.guarded_call() == "guarded"
assert m.multiple_guards_correct_order() == "guarded & guarded"
assert m.multiple_guards_wrong_order() == "unguarded & guarded"
if hasattr(m, "with_gil"):
assert m.with_gil() == "GIL held"
assert m.without_gil() == "GIL released"
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/third_party/unitree_legged_sdk/pybind11/tests/test_modules.py | third_party/unitree_legged_sdk/pybind11/tests/test_modules.py | # -*- coding: utf-8 -*-
from pybind11_tests import modules as m
from pybind11_tests.modules import subsubmodule as ms
from pybind11_tests import ConstructorStats
def test_nested_modules():
import pybind11_tests
assert pybind11_tests.__name__ == "pybind11_tests"
assert pybind11_tests.modules.__name__ == "pybind11_tests.modules"
assert pybind11_tests.modules.subsubmodule.__name__ == "pybind11_tests.modules.subsubmodule"
assert m.__name__ == "pybind11_tests.modules"
assert ms.__name__ == "pybind11_tests.modules.subsubmodule"
assert ms.submodule_func() == "submodule_func()"
def test_reference_internal():
b = ms.B()
assert str(b.get_a1()) == "A[1]"
assert str(b.a1) == "A[1]"
assert str(b.get_a2()) == "A[2]"
assert str(b.a2) == "A[2]"
b.a1 = ms.A(42)
b.a2 = ms.A(43)
assert str(b.get_a1()) == "A[42]"
assert str(b.a1) == "A[42]"
assert str(b.get_a2()) == "A[43]"
assert str(b.a2) == "A[43]"
astats, bstats = ConstructorStats.get(ms.A), ConstructorStats.get(ms.B)
assert astats.alive() == 2
assert bstats.alive() == 1
del b
assert astats.alive() == 0
assert bstats.alive() == 0
assert astats.values() == ['1', '2', '42', '43']
assert bstats.values() == []
assert astats.default_constructions == 0
assert bstats.default_constructions == 1
assert astats.copy_constructions == 0
assert bstats.copy_constructions == 0
# assert astats.move_constructions >= 0 # Don't invoke any
# assert bstats.move_constructions >= 0 # Don't invoke any
assert astats.copy_assignments == 2
assert bstats.copy_assignments == 0
assert astats.move_assignments == 0
assert bstats.move_assignments == 0
def test_importing():
from pybind11_tests.modules import OD
from collections import OrderedDict
assert OD is OrderedDict
assert str(OD([(1, 'a'), (2, 'b')])) == "OrderedDict([(1, 'a'), (2, 'b')])"
def test_pydoc():
"""Pydoc needs to be able to provide help() for everything inside a pybind11 module"""
import pybind11_tests
import pydoc
assert pybind11_tests.__name__ == "pybind11_tests"
assert pybind11_tests.__doc__ == "pybind11 test module"
assert pydoc.text.docmodule(pybind11_tests)
def test_duplicate_registration():
"""Registering two things with the same name"""
assert m.duplicate_registration() == []
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/third_party/unitree_legged_sdk/pybind11/tests/test_callbacks.py | third_party/unitree_legged_sdk/pybind11/tests/test_callbacks.py | # -*- coding: utf-8 -*-
import pytest
from pybind11_tests import callbacks as m
from threading import Thread
def test_callbacks():
from functools import partial
def func1():
return "func1"
def func2(a, b, c, d):
return "func2", a, b, c, d
def func3(a):
return "func3({})".format(a)
assert m.test_callback1(func1) == "func1"
assert m.test_callback2(func2) == ("func2", "Hello", "x", True, 5)
assert m.test_callback1(partial(func2, 1, 2, 3, 4)) == ("func2", 1, 2, 3, 4)
assert m.test_callback1(partial(func3, "partial")) == "func3(partial)"
assert m.test_callback3(lambda i: i + 1) == "func(43) = 44"
f = m.test_callback4()
assert f(43) == 44
f = m.test_callback5()
assert f(number=43) == 44
def test_bound_method_callback():
# Bound Python method:
class MyClass:
def double(self, val):
return 2 * val
z = MyClass()
assert m.test_callback3(z.double) == "func(43) = 86"
z = m.CppBoundMethodTest()
assert m.test_callback3(z.triple) == "func(43) = 129"
def test_keyword_args_and_generalized_unpacking():
def f(*args, **kwargs):
return args, kwargs
assert m.test_tuple_unpacking(f) == (("positional", 1, 2, 3, 4, 5, 6), {})
assert m.test_dict_unpacking(f) == (("positional", 1), {"key": "value", "a": 1, "b": 2})
assert m.test_keyword_args(f) == ((), {"x": 10, "y": 20})
assert m.test_unpacking_and_keywords1(f) == ((1, 2), {"c": 3, "d": 4})
assert m.test_unpacking_and_keywords2(f) == (
("positional", 1, 2, 3, 4, 5),
{"key": "value", "a": 1, "b": 2, "c": 3, "d": 4, "e": 5}
)
with pytest.raises(TypeError) as excinfo:
m.test_unpacking_error1(f)
assert "Got multiple values for keyword argument" in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
m.test_unpacking_error2(f)
assert "Got multiple values for keyword argument" in str(excinfo.value)
with pytest.raises(RuntimeError) as excinfo:
m.test_arg_conversion_error1(f)
assert "Unable to convert call argument" in str(excinfo.value)
with pytest.raises(RuntimeError) as excinfo:
m.test_arg_conversion_error2(f)
assert "Unable to convert call argument" in str(excinfo.value)
def test_lambda_closure_cleanup():
m.test_cleanup()
cstats = m.payload_cstats()
assert cstats.alive() == 0
assert cstats.copy_constructions == 1
assert cstats.move_constructions >= 1
def test_cpp_function_roundtrip():
"""Test if passing a function pointer from C++ -> Python -> C++ yields the original pointer"""
assert m.test_dummy_function(m.dummy_function) == "matches dummy_function: eval(1) = 2"
assert (m.test_dummy_function(m.roundtrip(m.dummy_function)) ==
"matches dummy_function: eval(1) = 2")
assert m.roundtrip(None, expect_none=True) is None
assert (m.test_dummy_function(lambda x: x + 2) ==
"can't convert to function pointer: eval(1) = 3")
with pytest.raises(TypeError) as excinfo:
m.test_dummy_function(m.dummy_function2)
assert "incompatible function arguments" in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
m.test_dummy_function(lambda x, y: x + y)
assert any(s in str(excinfo.value) for s in ("missing 1 required positional argument",
"takes exactly 2 arguments"))
def test_function_signatures(doc):
assert doc(m.test_callback3) == "test_callback3(arg0: Callable[[int], int]) -> str"
assert doc(m.test_callback4) == "test_callback4() -> Callable[[int], int]"
def test_movable_object():
assert m.callback_with_movable(lambda _: None) is True
def test_async_callbacks():
# serves as state for async callback
class Item:
def __init__(self, value):
self.value = value
res = []
# generate stateful lambda that will store result in `res`
def gen_f():
s = Item(3)
return lambda j: res.append(s.value + j)
# do some work async
work = [1, 2, 3, 4]
m.test_async_callback(gen_f(), work)
# wait until work is done
from time import sleep
sleep(0.5)
assert sum(res) == sum([x + 3 for x in work])
def test_async_async_callbacks():
t = Thread(target=test_async_callbacks)
t.start()
t.join()
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/third_party/unitree_legged_sdk/pybind11/tests/test_stl.py | third_party/unitree_legged_sdk/pybind11/tests/test_stl.py | # -*- coding: utf-8 -*-
import pytest
from pybind11_tests import stl as m
from pybind11_tests import UserType
from pybind11_tests import ConstructorStats
def test_vector(doc):
"""std::vector <-> list"""
lst = m.cast_vector()
assert lst == [1]
lst.append(2)
assert m.load_vector(lst)
assert m.load_vector(tuple(lst))
assert m.cast_bool_vector() == [True, False]
assert m.load_bool_vector([True, False])
assert doc(m.cast_vector) == "cast_vector() -> List[int]"
assert doc(m.load_vector) == "load_vector(arg0: List[int]) -> bool"
# Test regression caused by 936: pointers to stl containers weren't castable
assert m.cast_ptr_vector() == ["lvalue", "lvalue"]
def test_deque(doc):
"""std::deque <-> list"""
lst = m.cast_deque()
assert lst == [1]
lst.append(2)
assert m.load_deque(lst)
assert m.load_deque(tuple(lst))
def test_array(doc):
"""std::array <-> list"""
lst = m.cast_array()
assert lst == [1, 2]
assert m.load_array(lst)
assert doc(m.cast_array) == "cast_array() -> List[int[2]]"
assert doc(m.load_array) == "load_array(arg0: List[int[2]]) -> bool"
def test_valarray(doc):
"""std::valarray <-> list"""
lst = m.cast_valarray()
assert lst == [1, 4, 9]
assert m.load_valarray(lst)
assert doc(m.cast_valarray) == "cast_valarray() -> List[int]"
assert doc(m.load_valarray) == "load_valarray(arg0: List[int]) -> bool"
def test_map(doc):
"""std::map <-> dict"""
d = m.cast_map()
assert d == {"key": "value"}
assert "key" in d
d["key2"] = "value2"
assert "key2" in d
assert m.load_map(d)
assert doc(m.cast_map) == "cast_map() -> Dict[str, str]"
assert doc(m.load_map) == "load_map(arg0: Dict[str, str]) -> bool"
def test_set(doc):
"""std::set <-> set"""
s = m.cast_set()
assert s == {"key1", "key2"}
s.add("key3")
assert m.load_set(s)
assert doc(m.cast_set) == "cast_set() -> Set[str]"
assert doc(m.load_set) == "load_set(arg0: Set[str]) -> bool"
def test_recursive_casting():
"""Tests that stl casters preserve lvalue/rvalue context for container values"""
assert m.cast_rv_vector() == ["rvalue", "rvalue"]
assert m.cast_lv_vector() == ["lvalue", "lvalue"]
assert m.cast_rv_array() == ["rvalue", "rvalue", "rvalue"]
assert m.cast_lv_array() == ["lvalue", "lvalue"]
assert m.cast_rv_map() == {"a": "rvalue"}
assert m.cast_lv_map() == {"a": "lvalue", "b": "lvalue"}
assert m.cast_rv_nested() == [[[{"b": "rvalue", "c": "rvalue"}], [{"a": "rvalue"}]]]
assert m.cast_lv_nested() == {
"a": [[["lvalue", "lvalue"]], [["lvalue", "lvalue"]]],
"b": [[["lvalue", "lvalue"], ["lvalue", "lvalue"]]]
}
# Issue #853 test case:
z = m.cast_unique_ptr_vector()
assert z[0].value == 7 and z[1].value == 42
def test_move_out_container():
"""Properties use the `reference_internal` policy by default. If the underlying function
returns an rvalue, the policy is automatically changed to `move` to avoid referencing
a temporary. In case the return value is a container of user-defined types, the policy
also needs to be applied to the elements, not just the container."""
c = m.MoveOutContainer()
moved_out_list = c.move_list
assert [x.value for x in moved_out_list] == [0, 1, 2]
@pytest.mark.skipif(not hasattr(m, "has_optional"), reason='no <optional>')
def test_optional():
assert m.double_or_zero(None) == 0
assert m.double_or_zero(42) == 84
pytest.raises(TypeError, m.double_or_zero, 'foo')
assert m.half_or_none(0) is None
assert m.half_or_none(42) == 21
pytest.raises(TypeError, m.half_or_none, 'foo')
assert m.test_nullopt() == 42
assert m.test_nullopt(None) == 42
assert m.test_nullopt(42) == 42
assert m.test_nullopt(43) == 43
assert m.test_no_assign() == 42
assert m.test_no_assign(None) == 42
assert m.test_no_assign(m.NoAssign(43)) == 43
pytest.raises(TypeError, m.test_no_assign, 43)
assert m.nodefer_none_optional(None)
holder = m.OptionalHolder()
mvalue = holder.member
assert mvalue.initialized
assert holder.member_initialized()
@pytest.mark.skipif(not hasattr(m, "has_exp_optional"), reason='no <experimental/optional>')
def test_exp_optional():
assert m.double_or_zero_exp(None) == 0
assert m.double_or_zero_exp(42) == 84
pytest.raises(TypeError, m.double_or_zero_exp, 'foo')
assert m.half_or_none_exp(0) is None
assert m.half_or_none_exp(42) == 21
pytest.raises(TypeError, m.half_or_none_exp, 'foo')
assert m.test_nullopt_exp() == 42
assert m.test_nullopt_exp(None) == 42
assert m.test_nullopt_exp(42) == 42
assert m.test_nullopt_exp(43) == 43
assert m.test_no_assign_exp() == 42
assert m.test_no_assign_exp(None) == 42
assert m.test_no_assign_exp(m.NoAssign(43)) == 43
pytest.raises(TypeError, m.test_no_assign_exp, 43)
holder = m.OptionalExpHolder()
mvalue = holder.member
assert mvalue.initialized
assert holder.member_initialized()
@pytest.mark.skipif(not hasattr(m, "load_variant"), reason='no <variant>')
def test_variant(doc):
assert m.load_variant(1) == "int"
assert m.load_variant("1") == "std::string"
assert m.load_variant(1.0) == "double"
assert m.load_variant(None) == "std::nullptr_t"
assert m.load_variant_2pass(1) == "int"
assert m.load_variant_2pass(1.0) == "double"
assert m.cast_variant() == (5, "Hello")
assert doc(m.load_variant) == "load_variant(arg0: Union[int, str, float, None]) -> str"
def test_vec_of_reference_wrapper():
"""#171: Can't return reference wrappers (or STL structures containing them)"""
assert str(m.return_vec_of_reference_wrapper(UserType(4))) == \
"[UserType(1), UserType(2), UserType(3), UserType(4)]"
def test_stl_pass_by_pointer(msg):
"""Passing nullptr or None to an STL container pointer is not expected to work"""
with pytest.raises(TypeError) as excinfo:
m.stl_pass_by_pointer() # default value is `nullptr`
assert msg(excinfo.value) == """
stl_pass_by_pointer(): incompatible function arguments. The following argument types are supported:
1. (v: List[int] = None) -> List[int]
Invoked with:
""" # noqa: E501 line too long
with pytest.raises(TypeError) as excinfo:
m.stl_pass_by_pointer(None)
assert msg(excinfo.value) == """
stl_pass_by_pointer(): incompatible function arguments. The following argument types are supported:
1. (v: List[int] = None) -> List[int]
Invoked with: None
""" # noqa: E501 line too long
assert m.stl_pass_by_pointer([1, 2, 3]) == [1, 2, 3]
def test_missing_header_message():
"""Trying convert `list` to a `std::vector`, or vice versa, without including
<pybind11/stl.h> should result in a helpful suggestion in the error message"""
import pybind11_cross_module_tests as cm
expected_message = ("Did you forget to `#include <pybind11/stl.h>`? Or <pybind11/complex.h>,\n"
"<pybind11/functional.h>, <pybind11/chrono.h>, etc. Some automatic\n"
"conversions are optional and require extra headers to be included\n"
"when compiling your pybind11 module.")
with pytest.raises(TypeError) as excinfo:
cm.missing_header_arg([1.0, 2.0, 3.0])
assert expected_message in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
cm.missing_header_return()
assert expected_message in str(excinfo.value)
def test_function_with_string_and_vector_string_arg():
"""Check if a string is NOT implicitly converted to a list, which was the
behavior before fix of issue #1258"""
assert m.func_with_string_or_vector_string_arg_overload(('A', 'B', )) == 2
assert m.func_with_string_or_vector_string_arg_overload(['A', 'B']) == 2
assert m.func_with_string_or_vector_string_arg_overload('A') == 3
def test_stl_ownership():
cstats = ConstructorStats.get(m.Placeholder)
assert cstats.alive() == 0
r = m.test_stl_ownership()
assert len(r) == 1
del r
assert cstats.alive() == 0
def test_array_cast_sequence():
assert m.array_cast_sequence((1, 2, 3)) == [1, 2, 3]
def test_issue_1561():
""" check fix for issue #1561 """
bar = m.Issue1561Outer()
bar.list = [m.Issue1561Inner('bar')]
bar.list
assert bar.list[0].data == 'bar'
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/third_party/unitree_legged_sdk/pybind11/tests/test_multiple_inheritance.py | third_party/unitree_legged_sdk/pybind11/tests/test_multiple_inheritance.py | # -*- coding: utf-8 -*-
import pytest
import env # noqa: F401
from pybind11_tests import ConstructorStats
from pybind11_tests import multiple_inheritance as m
def test_multiple_inheritance_cpp():
mt = m.MIType(3, 4)
assert mt.foo() == 3
assert mt.bar() == 4
@pytest.mark.skipif("env.PYPY and env.PY2")
@pytest.mark.xfail("env.PYPY and not env.PY2")
def test_multiple_inheritance_mix1():
class Base1:
def __init__(self, i):
self.i = i
def foo(self):
return self.i
class MITypePy(Base1, m.Base2):
def __init__(self, i, j):
Base1.__init__(self, i)
m.Base2.__init__(self, j)
mt = MITypePy(3, 4)
assert mt.foo() == 3
assert mt.bar() == 4
def test_multiple_inheritance_mix2():
class Base2:
def __init__(self, i):
self.i = i
def bar(self):
return self.i
class MITypePy(m.Base1, Base2):
def __init__(self, i, j):
m.Base1.__init__(self, i)
Base2.__init__(self, j)
mt = MITypePy(3, 4)
assert mt.foo() == 3
assert mt.bar() == 4
@pytest.mark.skipif("env.PYPY and env.PY2")
@pytest.mark.xfail("env.PYPY and not env.PY2")
def test_multiple_inheritance_python():
class MI1(m.Base1, m.Base2):
def __init__(self, i, j):
m.Base1.__init__(self, i)
m.Base2.__init__(self, j)
class B1(object):
def v(self):
return 1
class MI2(B1, m.Base1, m.Base2):
def __init__(self, i, j):
B1.__init__(self)
m.Base1.__init__(self, i)
m.Base2.__init__(self, j)
class MI3(MI2):
def __init__(self, i, j):
MI2.__init__(self, i, j)
class MI4(MI3, m.Base2):
def __init__(self, i, j):
MI3.__init__(self, i, j)
# This should be ignored (Base2 is already initialized via MI2):
m.Base2.__init__(self, i + 100)
class MI5(m.Base2, B1, m.Base1):
def __init__(self, i, j):
B1.__init__(self)
m.Base1.__init__(self, i)
m.Base2.__init__(self, j)
class MI6(m.Base2, B1):
def __init__(self, i):
m.Base2.__init__(self, i)
B1.__init__(self)
class B2(B1):
def v(self):
return 2
class B3(object):
def v(self):
return 3
class B4(B3, B2):
def v(self):
return 4
class MI7(B4, MI6):
def __init__(self, i):
B4.__init__(self)
MI6.__init__(self, i)
class MI8(MI6, B3):
def __init__(self, i):
MI6.__init__(self, i)
B3.__init__(self)
class MI8b(B3, MI6):
def __init__(self, i):
B3.__init__(self)
MI6.__init__(self, i)
mi1 = MI1(1, 2)
assert mi1.foo() == 1
assert mi1.bar() == 2
mi2 = MI2(3, 4)
assert mi2.v() == 1
assert mi2.foo() == 3
assert mi2.bar() == 4
mi3 = MI3(5, 6)
assert mi3.v() == 1
assert mi3.foo() == 5
assert mi3.bar() == 6
mi4 = MI4(7, 8)
assert mi4.v() == 1
assert mi4.foo() == 7
assert mi4.bar() == 8
mi5 = MI5(10, 11)
assert mi5.v() == 1
assert mi5.foo() == 10
assert mi5.bar() == 11
mi6 = MI6(12)
assert mi6.v() == 1
assert mi6.bar() == 12
mi7 = MI7(13)
assert mi7.v() == 4
assert mi7.bar() == 13
mi8 = MI8(14)
assert mi8.v() == 1
assert mi8.bar() == 14
mi8b = MI8b(15)
assert mi8b.v() == 3
assert mi8b.bar() == 15
def test_multiple_inheritance_python_many_bases():
class MIMany14(m.BaseN1, m.BaseN2, m.BaseN3, m.BaseN4):
def __init__(self):
m.BaseN1.__init__(self, 1)
m.BaseN2.__init__(self, 2)
m.BaseN3.__init__(self, 3)
m.BaseN4.__init__(self, 4)
class MIMany58(m.BaseN5, m.BaseN6, m.BaseN7, m.BaseN8):
def __init__(self):
m.BaseN5.__init__(self, 5)
m.BaseN6.__init__(self, 6)
m.BaseN7.__init__(self, 7)
m.BaseN8.__init__(self, 8)
class MIMany916(m.BaseN9, m.BaseN10, m.BaseN11, m.BaseN12, m.BaseN13, m.BaseN14, m.BaseN15,
m.BaseN16):
def __init__(self):
m.BaseN9.__init__(self, 9)
m.BaseN10.__init__(self, 10)
m.BaseN11.__init__(self, 11)
m.BaseN12.__init__(self, 12)
m.BaseN13.__init__(self, 13)
m.BaseN14.__init__(self, 14)
m.BaseN15.__init__(self, 15)
m.BaseN16.__init__(self, 16)
class MIMany19(MIMany14, MIMany58, m.BaseN9):
def __init__(self):
MIMany14.__init__(self)
MIMany58.__init__(self)
m.BaseN9.__init__(self, 9)
class MIMany117(MIMany14, MIMany58, MIMany916, m.BaseN17):
def __init__(self):
MIMany14.__init__(self)
MIMany58.__init__(self)
MIMany916.__init__(self)
m.BaseN17.__init__(self, 17)
# Inherits from 4 registered C++ classes: can fit in one pointer on any modern arch:
a = MIMany14()
for i in range(1, 4):
assert getattr(a, "f" + str(i))() == 2 * i
# Inherits from 8: requires 1/2 pointers worth of holder flags on 32/64-bit arch:
b = MIMany916()
for i in range(9, 16):
assert getattr(b, "f" + str(i))() == 2 * i
# Inherits from 9: requires >= 2 pointers worth of holder flags
c = MIMany19()
for i in range(1, 9):
assert getattr(c, "f" + str(i))() == 2 * i
# Inherits from 17: requires >= 3 pointers worth of holder flags
d = MIMany117()
for i in range(1, 17):
assert getattr(d, "f" + str(i))() == 2 * i
def test_multiple_inheritance_virtbase():
class MITypePy(m.Base12a):
def __init__(self, i, j):
m.Base12a.__init__(self, i, j)
mt = MITypePy(3, 4)
assert mt.bar() == 4
assert m.bar_base2a(mt) == 4
assert m.bar_base2a_sharedptr(mt) == 4
def test_mi_static_properties():
"""Mixing bases with and without static properties should be possible
and the result should be independent of base definition order"""
for d in (m.VanillaStaticMix1(), m.VanillaStaticMix2()):
assert d.vanilla() == "Vanilla"
assert d.static_func1() == "WithStatic1"
assert d.static_func2() == "WithStatic2"
assert d.static_func() == d.__class__.__name__
m.WithStatic1.static_value1 = 1
m.WithStatic2.static_value2 = 2
assert d.static_value1 == 1
assert d.static_value2 == 2
assert d.static_value == 12
d.static_value1 = 0
assert d.static_value1 == 0
d.static_value2 = 0
assert d.static_value2 == 0
d.static_value = 0
assert d.static_value == 0
# Requires PyPy 6+
def test_mi_dynamic_attributes():
"""Mixing bases with and without dynamic attribute support"""
for d in (m.VanillaDictMix1(), m.VanillaDictMix2()):
d.dynamic = 1
assert d.dynamic == 1
def test_mi_unaligned_base():
"""Returning an offset (non-first MI) base class pointer should recognize the instance"""
n_inst = ConstructorStats.detail_reg_inst()
c = m.I801C()
d = m.I801D()
# + 4 below because we have the two instances, and each instance has offset base I801B2
assert ConstructorStats.detail_reg_inst() == n_inst + 4
b1c = m.i801b1_c(c)
assert b1c is c
b2c = m.i801b2_c(c)
assert b2c is c
b1d = m.i801b1_d(d)
assert b1d is d
b2d = m.i801b2_d(d)
assert b2d is d
assert ConstructorStats.detail_reg_inst() == n_inst + 4 # no extra instances
del c, b1c, b2c
assert ConstructorStats.detail_reg_inst() == n_inst + 2
del d, b1d, b2d
assert ConstructorStats.detail_reg_inst() == n_inst
def test_mi_base_return():
"""Tests returning an offset (non-first MI) base class pointer to a derived instance"""
n_inst = ConstructorStats.detail_reg_inst()
c1 = m.i801c_b1()
assert type(c1) is m.I801C
assert c1.a == 1
assert c1.b == 2
d1 = m.i801d_b1()
assert type(d1) is m.I801D
assert d1.a == 1
assert d1.b == 2
assert ConstructorStats.detail_reg_inst() == n_inst + 4
c2 = m.i801c_b2()
assert type(c2) is m.I801C
assert c2.a == 1
assert c2.b == 2
d2 = m.i801d_b2()
assert type(d2) is m.I801D
assert d2.a == 1
assert d2.b == 2
assert ConstructorStats.detail_reg_inst() == n_inst + 8
del c2
assert ConstructorStats.detail_reg_inst() == n_inst + 6
del c1, d1, d2
assert ConstructorStats.detail_reg_inst() == n_inst
# Returning an unregistered derived type with a registered base; we won't
# pick up the derived type, obviously, but should still work (as an object
# of whatever type was returned).
e1 = m.i801e_c()
assert type(e1) is m.I801C
assert e1.a == 1
assert e1.b == 2
e2 = m.i801e_b2()
assert type(e2) is m.I801B2
assert e2.b == 2
def test_diamond_inheritance():
"""Tests that diamond inheritance works as expected (issue #959)"""
# Issue #959: this shouldn't segfault:
d = m.D()
# Make sure all the various distinct pointers are all recognized as registered instances:
assert d is d.c0()
assert d is d.c1()
assert d is d.b()
assert d is d.c0().b()
assert d is d.c1().b()
assert d is d.c0().c1().b().c0().b()
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/third_party/unitree_legged_sdk/pybind11/tests/test_iostream.py | third_party/unitree_legged_sdk/pybind11/tests/test_iostream.py | # -*- coding: utf-8 -*-
from pybind11_tests import iostream as m
import sys
from contextlib import contextmanager
try:
# Python 3
from io import StringIO
except ImportError:
# Python 2
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
try:
# Python 3.4
from contextlib import redirect_stdout
except ImportError:
@contextmanager
def redirect_stdout(target):
original = sys.stdout
sys.stdout = target
yield
sys.stdout = original
try:
# Python 3.5
from contextlib import redirect_stderr
except ImportError:
@contextmanager
def redirect_stderr(target):
original = sys.stderr
sys.stderr = target
yield
sys.stderr = original
def test_captured(capsys):
msg = "I've been redirected to Python, I hope!"
m.captured_output(msg)
stdout, stderr = capsys.readouterr()
assert stdout == msg
assert stderr == ''
m.captured_output_default(msg)
stdout, stderr = capsys.readouterr()
assert stdout == msg
assert stderr == ''
m.captured_err(msg)
stdout, stderr = capsys.readouterr()
assert stdout == ''
assert stderr == msg
def test_captured_large_string(capsys):
# Make this bigger than the buffer used on the C++ side: 1024 chars
msg = "I've been redirected to Python, I hope!"
msg = msg * (1024 // len(msg) + 1)
m.captured_output_default(msg)
stdout, stderr = capsys.readouterr()
assert stdout == msg
assert stderr == ''
def test_guard_capture(capsys):
msg = "I've been redirected to Python, I hope!"
m.guard_output(msg)
stdout, stderr = capsys.readouterr()
assert stdout == msg
assert stderr == ''
def test_series_captured(capture):
with capture:
m.captured_output("a")
m.captured_output("b")
assert capture == "ab"
def test_flush(capfd):
msg = "(not flushed)"
msg2 = "(flushed)"
with m.ostream_redirect():
m.noisy_function(msg, flush=False)
stdout, stderr = capfd.readouterr()
assert stdout == ''
m.noisy_function(msg2, flush=True)
stdout, stderr = capfd.readouterr()
assert stdout == msg + msg2
m.noisy_function(msg, flush=False)
stdout, stderr = capfd.readouterr()
assert stdout == msg
def test_not_captured(capfd):
msg = "Something that should not show up in log"
stream = StringIO()
with redirect_stdout(stream):
m.raw_output(msg)
stdout, stderr = capfd.readouterr()
assert stdout == msg
assert stderr == ''
assert stream.getvalue() == ''
stream = StringIO()
with redirect_stdout(stream):
m.captured_output(msg)
stdout, stderr = capfd.readouterr()
assert stdout == ''
assert stderr == ''
assert stream.getvalue() == msg
def test_err(capfd):
msg = "Something that should not show up in log"
stream = StringIO()
with redirect_stderr(stream):
m.raw_err(msg)
stdout, stderr = capfd.readouterr()
assert stdout == ''
assert stderr == msg
assert stream.getvalue() == ''
stream = StringIO()
with redirect_stderr(stream):
m.captured_err(msg)
stdout, stderr = capfd.readouterr()
assert stdout == ''
assert stderr == ''
assert stream.getvalue() == msg
def test_multi_captured(capfd):
stream = StringIO()
with redirect_stdout(stream):
m.captured_output("a")
m.raw_output("b")
m.captured_output("c")
m.raw_output("d")
stdout, stderr = capfd.readouterr()
assert stdout == 'bd'
assert stream.getvalue() == 'ac'
def test_dual(capsys):
m.captured_dual("a", "b")
stdout, stderr = capsys.readouterr()
assert stdout == "a"
assert stderr == "b"
def test_redirect(capfd):
msg = "Should not be in log!"
stream = StringIO()
with redirect_stdout(stream):
m.raw_output(msg)
stdout, stderr = capfd.readouterr()
assert stdout == msg
assert stream.getvalue() == ''
stream = StringIO()
with redirect_stdout(stream):
with m.ostream_redirect():
m.raw_output(msg)
stdout, stderr = capfd.readouterr()
assert stdout == ''
assert stream.getvalue() == msg
stream = StringIO()
with redirect_stdout(stream):
m.raw_output(msg)
stdout, stderr = capfd.readouterr()
assert stdout == msg
assert stream.getvalue() == ''
def test_redirect_err(capfd):
msg = "StdOut"
msg2 = "StdErr"
stream = StringIO()
with redirect_stderr(stream):
with m.ostream_redirect(stdout=False):
m.raw_output(msg)
m.raw_err(msg2)
stdout, stderr = capfd.readouterr()
assert stdout == msg
assert stderr == ''
assert stream.getvalue() == msg2
def test_redirect_both(capfd):
msg = "StdOut"
msg2 = "StdErr"
stream = StringIO()
stream2 = StringIO()
with redirect_stdout(stream):
with redirect_stderr(stream2):
with m.ostream_redirect():
m.raw_output(msg)
m.raw_err(msg2)
stdout, stderr = capfd.readouterr()
assert stdout == ''
assert stderr == ''
assert stream.getvalue() == msg
assert stream2.getvalue() == msg2
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/third_party/unitree_legged_sdk/pybind11/tests/test_exceptions.py | third_party/unitree_legged_sdk/pybind11/tests/test_exceptions.py | # -*- coding: utf-8 -*-
import sys
import pytest
from pybind11_tests import exceptions as m
import pybind11_cross_module_tests as cm
def test_std_exception(msg):
with pytest.raises(RuntimeError) as excinfo:
m.throw_std_exception()
assert msg(excinfo.value) == "This exception was intentionally thrown."
def test_error_already_set(msg):
with pytest.raises(RuntimeError) as excinfo:
m.throw_already_set(False)
assert msg(excinfo.value) == "Unknown internal error occurred"
with pytest.raises(ValueError) as excinfo:
m.throw_already_set(True)
assert msg(excinfo.value) == "foo"
def test_cross_module_exceptions():
with pytest.raises(RuntimeError) as excinfo:
cm.raise_runtime_error()
assert str(excinfo.value) == "My runtime error"
with pytest.raises(ValueError) as excinfo:
cm.raise_value_error()
assert str(excinfo.value) == "My value error"
with pytest.raises(ValueError) as excinfo:
cm.throw_pybind_value_error()
assert str(excinfo.value) == "pybind11 value error"
with pytest.raises(TypeError) as excinfo:
cm.throw_pybind_type_error()
assert str(excinfo.value) == "pybind11 type error"
with pytest.raises(StopIteration) as excinfo:
cm.throw_stop_iteration()
def test_python_call_in_catch():
d = {}
assert m.python_call_in_destructor(d) is True
assert d["good"] is True
def test_python_alreadyset_in_destructor(monkeypatch, capsys):
hooked = False
triggered = [False] # mutable, so Python 2.7 closure can modify it
if hasattr(sys, 'unraisablehook'): # Python 3.8+
hooked = True
default_hook = sys.unraisablehook
def hook(unraisable_hook_args):
exc_type, exc_value, exc_tb, err_msg, obj = unraisable_hook_args
if obj == 'already_set demo':
triggered[0] = True
default_hook(unraisable_hook_args)
return
# Use monkeypatch so pytest can apply and remove the patch as appropriate
monkeypatch.setattr(sys, 'unraisablehook', hook)
assert m.python_alreadyset_in_destructor('already_set demo') is True
if hooked:
assert triggered[0] is True
_, captured_stderr = capsys.readouterr()
# Error message is different in Python 2 and 3, check for words that appear in both
assert 'ignored' in captured_stderr and 'already_set demo' in captured_stderr
def test_exception_matches():
assert m.exception_matches()
assert m.exception_matches_base()
assert m.modulenotfound_exception_matches_base()
def test_custom(msg):
# Can we catch a MyException?
with pytest.raises(m.MyException) as excinfo:
m.throws1()
assert msg(excinfo.value) == "this error should go to a custom type"
# Can we translate to standard Python exceptions?
with pytest.raises(RuntimeError) as excinfo:
m.throws2()
assert msg(excinfo.value) == "this error should go to a standard Python exception"
# Can we handle unknown exceptions?
with pytest.raises(RuntimeError) as excinfo:
m.throws3()
assert msg(excinfo.value) == "Caught an unknown exception!"
# Can we delegate to another handler by rethrowing?
with pytest.raises(m.MyException) as excinfo:
m.throws4()
assert msg(excinfo.value) == "this error is rethrown"
# Can we fall-through to the default handler?
with pytest.raises(RuntimeError) as excinfo:
m.throws_logic_error()
assert msg(excinfo.value) == "this error should fall through to the standard handler"
# OverFlow error translation.
with pytest.raises(OverflowError) as excinfo:
m.throws_overflow_error()
# Can we handle a helper-declared exception?
with pytest.raises(m.MyException5) as excinfo:
m.throws5()
assert msg(excinfo.value) == "this is a helper-defined translated exception"
# Exception subclassing:
with pytest.raises(m.MyException5) as excinfo:
m.throws5_1()
assert msg(excinfo.value) == "MyException5 subclass"
assert isinstance(excinfo.value, m.MyException5_1)
with pytest.raises(m.MyException5_1) as excinfo:
m.throws5_1()
assert msg(excinfo.value) == "MyException5 subclass"
with pytest.raises(m.MyException5) as excinfo:
try:
m.throws5()
except m.MyException5_1:
raise RuntimeError("Exception error: caught child from parent")
assert msg(excinfo.value) == "this is a helper-defined translated exception"
def test_nested_throws(capture):
"""Tests nested (e.g. C++ -> Python -> C++) exception handling"""
def throw_myex():
raise m.MyException("nested error")
def throw_myex5():
raise m.MyException5("nested error 5")
# In the comments below, the exception is caught in the first step, thrown in the last step
# C++ -> Python
with capture:
m.try_catch(m.MyException5, throw_myex5)
assert str(capture).startswith("MyException5: nested error 5")
# Python -> C++ -> Python
with pytest.raises(m.MyException) as excinfo:
m.try_catch(m.MyException5, throw_myex)
assert str(excinfo.value) == "nested error"
def pycatch(exctype, f, *args):
try:
f(*args)
except m.MyException as e:
print(e)
# C++ -> Python -> C++ -> Python
with capture:
m.try_catch(
m.MyException5, pycatch, m.MyException, m.try_catch, m.MyException, throw_myex5)
assert str(capture).startswith("MyException5: nested error 5")
# C++ -> Python -> C++
with capture:
m.try_catch(m.MyException, pycatch, m.MyException5, m.throws4)
assert capture == "this error is rethrown"
# Python -> C++ -> Python -> C++
with pytest.raises(m.MyException5) as excinfo:
m.try_catch(m.MyException, pycatch, m.MyException, m.throws5)
assert str(excinfo.value) == "this is a helper-defined translated exception"
# This can often happen if you wrap a pybind11 class in a Python wrapper
def test_invalid_repr():
class MyRepr(object):
def __repr__(self):
raise AttributeError("Example error")
with pytest.raises(TypeError):
m.simple_bool_passthrough(MyRepr())
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/third_party/unitree_legged_sdk/pybind11/tests/test_sequences_and_iterators.py | third_party/unitree_legged_sdk/pybind11/tests/test_sequences_and_iterators.py | # -*- coding: utf-8 -*-
import pytest
from pybind11_tests import sequences_and_iterators as m
from pybind11_tests import ConstructorStats
def isclose(a, b, rel_tol=1e-05, abs_tol=0.0):
"""Like math.isclose() from Python 3.5"""
return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
def allclose(a_list, b_list, rel_tol=1e-05, abs_tol=0.0):
return all(isclose(a, b, rel_tol=rel_tol, abs_tol=abs_tol) for a, b in zip(a_list, b_list))
def test_generalized_iterators():
assert list(m.IntPairs([(1, 2), (3, 4), (0, 5)]).nonzero()) == [(1, 2), (3, 4)]
assert list(m.IntPairs([(1, 2), (2, 0), (0, 3), (4, 5)]).nonzero()) == [(1, 2)]
assert list(m.IntPairs([(0, 3), (1, 2), (3, 4)]).nonzero()) == []
assert list(m.IntPairs([(1, 2), (3, 4), (0, 5)]).nonzero_keys()) == [1, 3]
assert list(m.IntPairs([(1, 2), (2, 0), (0, 3), (4, 5)]).nonzero_keys()) == [1]
assert list(m.IntPairs([(0, 3), (1, 2), (3, 4)]).nonzero_keys()) == []
# __next__ must continue to raise StopIteration
it = m.IntPairs([(0, 0)]).nonzero()
for _ in range(3):
with pytest.raises(StopIteration):
next(it)
it = m.IntPairs([(0, 0)]).nonzero_keys()
for _ in range(3):
with pytest.raises(StopIteration):
next(it)
def test_sliceable():
sliceable = m.Sliceable(100)
assert sliceable[::] == (0, 100, 1)
assert sliceable[10::] == (10, 100, 1)
assert sliceable[:10:] == (0, 10, 1)
assert sliceable[::10] == (0, 100, 10)
assert sliceable[-10::] == (90, 100, 1)
assert sliceable[:-10:] == (0, 90, 1)
assert sliceable[::-10] == (99, -1, -10)
assert sliceable[50:60:1] == (50, 60, 1)
assert sliceable[50:60:-1] == (50, 60, -1)
def test_sequence():
cstats = ConstructorStats.get(m.Sequence)
s = m.Sequence(5)
assert cstats.values() == ['of size', '5']
assert "Sequence" in repr(s)
assert len(s) == 5
assert s[0] == 0 and s[3] == 0
assert 12.34 not in s
s[0], s[3] = 12.34, 56.78
assert 12.34 in s
assert isclose(s[0], 12.34) and isclose(s[3], 56.78)
rev = reversed(s)
assert cstats.values() == ['of size', '5']
rev2 = s[::-1]
assert cstats.values() == ['of size', '5']
it = iter(m.Sequence(0))
for _ in range(3): # __next__ must continue to raise StopIteration
with pytest.raises(StopIteration):
next(it)
assert cstats.values() == ['of size', '0']
expected = [0, 56.78, 0, 0, 12.34]
assert allclose(rev, expected)
assert allclose(rev2, expected)
assert rev == rev2
rev[0::2] = m.Sequence([2.0, 2.0, 2.0])
assert cstats.values() == ['of size', '3', 'from std::vector']
assert allclose(rev, [2, 56.78, 2, 0, 2])
assert cstats.alive() == 4
del it
assert cstats.alive() == 3
del s
assert cstats.alive() == 2
del rev
assert cstats.alive() == 1
del rev2
assert cstats.alive() == 0
assert cstats.values() == []
assert cstats.default_constructions == 0
assert cstats.copy_constructions == 0
assert cstats.move_constructions >= 1
assert cstats.copy_assignments == 0
assert cstats.move_assignments == 0
def test_sequence_length():
"""#2076: Exception raised by len(arg) should be propagated """
class BadLen(RuntimeError):
pass
class SequenceLike():
def __getitem__(self, i):
return None
def __len__(self):
raise BadLen()
with pytest.raises(BadLen):
m.sequence_length(SequenceLike())
assert m.sequence_length([1, 2, 3]) == 3
assert m.sequence_length("hello") == 5
def test_map_iterator():
sm = m.StringMap({'hi': 'bye', 'black': 'white'})
assert sm['hi'] == 'bye'
assert len(sm) == 2
assert sm['black'] == 'white'
with pytest.raises(KeyError):
assert sm['orange']
sm['orange'] = 'banana'
assert sm['orange'] == 'banana'
expected = {'hi': 'bye', 'black': 'white', 'orange': 'banana'}
for k in sm:
assert sm[k] == expected[k]
for k, v in sm.items():
assert v == expected[k]
it = iter(m.StringMap({}))
for _ in range(3): # __next__ must continue to raise StopIteration
with pytest.raises(StopIteration):
next(it)
def test_python_iterator_in_cpp():
t = (1, 2, 3)
assert m.object_to_list(t) == [1, 2, 3]
assert m.object_to_list(iter(t)) == [1, 2, 3]
assert m.iterator_to_list(iter(t)) == [1, 2, 3]
with pytest.raises(TypeError) as excinfo:
m.object_to_list(1)
assert "object is not iterable" in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
m.iterator_to_list(1)
assert "incompatible function arguments" in str(excinfo.value)
def bad_next_call():
raise RuntimeError("py::iterator::advance() should propagate errors")
with pytest.raises(RuntimeError) as excinfo:
m.iterator_to_list(iter(bad_next_call, None))
assert str(excinfo.value) == "py::iterator::advance() should propagate errors"
lst = [1, None, 0, None]
assert m.count_none(lst) == 2
assert m.find_none(lst) is True
assert m.count_nonzeros({"a": 0, "b": 1, "c": 2}) == 2
r = range(5)
assert all(m.tuple_iterator(tuple(r)))
assert all(m.list_iterator(list(r)))
assert all(m.sequence_iterator(r))
def test_iterator_passthrough():
"""#181: iterator passthrough did not compile"""
from pybind11_tests.sequences_and_iterators import iterator_passthrough
assert list(iterator_passthrough(iter([3, 5, 7, 9, 11, 13, 15]))) == [3, 5, 7, 9, 11, 13, 15]
def test_iterator_rvp():
"""#388: Can't make iterators via make_iterator() with different r/v policies """
import pybind11_tests.sequences_and_iterators as m
assert list(m.make_iterator_1()) == [1, 2, 3]
assert list(m.make_iterator_2()) == [1, 2, 3]
assert not isinstance(m.make_iterator_1(), type(m.make_iterator_2()))
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/third_party/unitree_legged_sdk/pybind11/tests/test_pytypes.py | third_party/unitree_legged_sdk/pybind11/tests/test_pytypes.py | # -*- coding: utf-8 -*-
from __future__ import division
import pytest
import sys
import env # noqa: F401
from pybind11_tests import pytypes as m
from pybind11_tests import debug_enabled
def test_int(doc):
assert doc(m.get_int) == "get_int() -> int"
def test_iterator(doc):
assert doc(m.get_iterator) == "get_iterator() -> Iterator"
def test_iterable(doc):
assert doc(m.get_iterable) == "get_iterable() -> Iterable"
def test_list(capture, doc):
with capture:
lst = m.get_list()
assert lst == ["inserted-0", "overwritten", "inserted-2"]
lst.append("value2")
m.print_list(lst)
assert capture.unordered == """
Entry at position 0: value
list item 0: inserted-0
list item 1: overwritten
list item 2: inserted-2
list item 3: value2
"""
assert doc(m.get_list) == "get_list() -> list"
assert doc(m.print_list) == "print_list(arg0: list) -> None"
def test_none(capture, doc):
assert doc(m.get_none) == "get_none() -> None"
assert doc(m.print_none) == "print_none(arg0: None) -> None"
def test_set(capture, doc):
s = m.get_set()
assert s == {"key1", "key2", "key3"}
with capture:
s.add("key4")
m.print_set(s)
assert capture.unordered == """
key: key1
key: key2
key: key3
key: key4
"""
assert not m.set_contains(set([]), 42)
assert m.set_contains({42}, 42)
assert m.set_contains({"foo"}, "foo")
assert doc(m.get_list) == "get_list() -> list"
assert doc(m.print_list) == "print_list(arg0: list) -> None"
def test_dict(capture, doc):
d = m.get_dict()
assert d == {"key": "value"}
with capture:
d["key2"] = "value2"
m.print_dict(d)
assert capture.unordered == """
key: key, value=value
key: key2, value=value2
"""
assert not m.dict_contains({}, 42)
assert m.dict_contains({42: None}, 42)
assert m.dict_contains({"foo": None}, "foo")
assert doc(m.get_dict) == "get_dict() -> dict"
assert doc(m.print_dict) == "print_dict(arg0: dict) -> None"
assert m.dict_keyword_constructor() == {"x": 1, "y": 2, "z": 3}
def test_str(doc):
assert m.str_from_string().encode().decode() == "baz"
assert m.str_from_bytes().encode().decode() == "boo"
assert doc(m.str_from_bytes) == "str_from_bytes() -> str"
class A(object):
def __str__(self):
return "this is a str"
def __repr__(self):
return "this is a repr"
assert m.str_from_object(A()) == "this is a str"
assert m.repr_from_object(A()) == "this is a repr"
assert m.str_from_handle(A()) == "this is a str"
s1, s2 = m.str_format()
assert s1 == "1 + 2 = 3"
assert s1 == s2
malformed_utf8 = b"\x80"
assert m.str_from_object(malformed_utf8) is malformed_utf8 # To be fixed; see #2380
if env.PY2:
# with pytest.raises(UnicodeDecodeError):
# m.str_from_object(malformed_utf8)
with pytest.raises(UnicodeDecodeError):
m.str_from_handle(malformed_utf8)
else:
# assert m.str_from_object(malformed_utf8) == "b'\\x80'"
assert m.str_from_handle(malformed_utf8) == "b'\\x80'"
def test_bytes(doc):
assert m.bytes_from_string().decode() == "foo"
assert m.bytes_from_str().decode() == "bar"
assert doc(m.bytes_from_str) == "bytes_from_str() -> {}".format(
"str" if env.PY2 else "bytes"
)
def test_capsule(capture):
pytest.gc_collect()
with capture:
a = m.return_capsule_with_destructor()
del a
pytest.gc_collect()
assert capture.unordered == """
creating capsule
destructing capsule
"""
with capture:
a = m.return_capsule_with_destructor_2()
del a
pytest.gc_collect()
assert capture.unordered == """
creating capsule
destructing capsule: 1234
"""
with capture:
a = m.return_capsule_with_name_and_destructor()
del a
pytest.gc_collect()
assert capture.unordered == """
created capsule (1234, 'pointer type description')
destructing capsule (1234, 'pointer type description')
"""
def test_accessors():
class SubTestObject:
attr_obj = 1
attr_char = 2
class TestObject:
basic_attr = 1
begin_end = [1, 2, 3]
d = {"operator[object]": 1, "operator[char *]": 2}
sub = SubTestObject()
def func(self, x, *args):
return self.basic_attr + x + sum(args)
d = m.accessor_api(TestObject())
assert d["basic_attr"] == 1
assert d["begin_end"] == [1, 2, 3]
assert d["operator[object]"] == 1
assert d["operator[char *]"] == 2
assert d["attr(object)"] == 1
assert d["attr(char *)"] == 2
assert d["missing_attr_ptr"] == "raised"
assert d["missing_attr_chain"] == "raised"
assert d["is_none"] is False
assert d["operator()"] == 2
assert d["operator*"] == 7
assert d["implicit_list"] == [1, 2, 3]
assert all(x in TestObject.__dict__ for x in d["implicit_dict"])
assert m.tuple_accessor(tuple()) == (0, 1, 2)
d = m.accessor_assignment()
assert d["get"] == 0
assert d["deferred_get"] == 0
assert d["set"] == 1
assert d["deferred_set"] == 1
assert d["var"] == 99
def test_constructors():
"""C++ default and converting constructors are equivalent to type calls in Python"""
types = [bytes, str, bool, int, float, tuple, list, dict, set]
expected = {t.__name__: t() for t in types}
if env.PY2:
# Note that bytes.__name__ == 'str' in Python 2.
# pybind11::str is unicode even under Python 2.
expected["bytes"] = bytes()
expected["str"] = unicode() # noqa: F821
assert m.default_constructors() == expected
data = {
bytes: b'41', # Currently no supported or working conversions.
str: 42,
bool: "Not empty",
int: "42",
float: "+1e3",
tuple: range(3),
list: range(3),
dict: [("two", 2), ("one", 1), ("three", 3)],
set: [4, 4, 5, 6, 6, 6],
memoryview: b'abc'
}
inputs = {k.__name__: v for k, v in data.items()}
expected = {k.__name__: k(v) for k, v in data.items()}
if env.PY2: # Similar to the above. See comments above.
inputs["bytes"] = b'41'
inputs["str"] = 42
expected["bytes"] = b'41'
expected["str"] = u"42"
assert m.converting_constructors(inputs) == expected
assert m.cast_functions(inputs) == expected
# Converting constructors and cast functions should just reference rather
# than copy when no conversion is needed:
noconv1 = m.converting_constructors(expected)
for k in noconv1:
assert noconv1[k] is expected[k]
noconv2 = m.cast_functions(expected)
for k in noconv2:
assert noconv2[k] is expected[k]
def test_non_converting_constructors():
non_converting_test_cases = [
("bytes", range(10)),
("none", 42),
("ellipsis", 42),
]
for t, v in non_converting_test_cases:
with pytest.raises(TypeError) as excinfo:
m.nonconverting_constructor(t, v)
expected_error = "Object of type '{}' is not an instance of '{}'".format(
type(v).__name__, t)
assert str(excinfo.value) == expected_error
def test_pybind11_str_raw_str():
# specifically to exercise pybind11::str::raw_str
cvt = m.convert_to_pybind11_str
assert cvt(u"Str") == u"Str"
assert cvt(b'Bytes') == u"Bytes" if env.PY2 else "b'Bytes'"
assert cvt(None) == u"None"
assert cvt(False) == u"False"
assert cvt(True) == u"True"
assert cvt(42) == u"42"
assert cvt(2**65) == u"36893488147419103232"
assert cvt(-1.50) == u"-1.5"
assert cvt(()) == u"()"
assert cvt((18,)) == u"(18,)"
assert cvt([]) == u"[]"
assert cvt([28]) == u"[28]"
assert cvt({}) == u"{}"
assert cvt({3: 4}) == u"{3: 4}"
assert cvt(set()) == u"set([])" if env.PY2 else "set()"
assert cvt({3, 3}) == u"set([3])" if env.PY2 else "{3}"
valid_orig = u"Η±"
valid_utf8 = valid_orig.encode("utf-8")
valid_cvt = cvt(valid_utf8)
assert type(valid_cvt) == bytes # Probably surprising.
assert valid_cvt == b'\xc7\xb1'
malformed_utf8 = b'\x80'
malformed_cvt = cvt(malformed_utf8)
assert type(malformed_cvt) == bytes # Probably surprising.
assert malformed_cvt == b'\x80'
def test_implicit_casting():
"""Tests implicit casting when assigning or appending to dicts and lists."""
z = m.get_implicit_casting()
assert z['d'] == {
'char*_i1': 'abc', 'char*_i2': 'abc', 'char*_e': 'abc', 'char*_p': 'abc',
'str_i1': 'str', 'str_i2': 'str1', 'str_e': 'str2', 'str_p': 'str3',
'int_i1': 42, 'int_i2': 42, 'int_e': 43, 'int_p': 44
}
assert z['l'] == [3, 6, 9, 12, 15]
def test_print(capture):
with capture:
m.print_function()
assert capture == """
Hello, World!
1 2.0 three True -- multiple args
*args-and-a-custom-separator
no new line here -- next print
flush
py::print + str.format = this
"""
assert capture.stderr == "this goes to stderr"
with pytest.raises(RuntimeError) as excinfo:
m.print_failure()
assert str(excinfo.value) == "make_tuple(): unable to convert " + (
"argument of type 'UnregisteredType' to Python object"
if debug_enabled else
"arguments to Python object (compile in debug mode for details)"
)
def test_hash():
class Hashable(object):
def __init__(self, value):
self.value = value
def __hash__(self):
return self.value
class Unhashable(object):
__hash__ = None
assert m.hash_function(Hashable(42)) == 42
with pytest.raises(TypeError):
m.hash_function(Unhashable())
def test_number_protocol():
for a, b in [(1, 1), (3, 5)]:
li = [a == b, a != b, a < b, a <= b, a > b, a >= b, a + b,
a - b, a * b, a / b, a | b, a & b, a ^ b, a >> b, a << b]
assert m.test_number_protocol(a, b) == li
def test_list_slicing():
li = list(range(100))
assert li[::2] == m.test_list_slicing(li)
def test_issue2361():
# See issue #2361
assert m.issue2361_str_implicit_copy_none() == "None"
with pytest.raises(TypeError) as excinfo:
assert m.issue2361_dict_implicit_copy_none()
assert "'NoneType' object is not iterable" in str(excinfo.value)
@pytest.mark.parametrize('method, args, fmt, expected_view', [
(m.test_memoryview_object, (b'red',), 'B', b'red'),
(m.test_memoryview_buffer_info, (b'green',), 'B', b'green'),
(m.test_memoryview_from_buffer, (False,), 'h', [3, 1, 4, 1, 5]),
(m.test_memoryview_from_buffer, (True,), 'H', [2, 7, 1, 8]),
(m.test_memoryview_from_buffer_nativeformat, (), '@i', [4, 7, 5]),
])
def test_memoryview(method, args, fmt, expected_view):
view = method(*args)
assert isinstance(view, memoryview)
assert view.format == fmt
if isinstance(expected_view, bytes) or not env.PY2:
view_as_list = list(view)
else:
# Using max to pick non-zero byte (big-endian vs little-endian).
view_as_list = [max([ord(c) for c in s]) for s in view]
assert view_as_list == list(expected_view)
@pytest.mark.xfail("env.PYPY", reason="getrefcount is not available")
@pytest.mark.parametrize('method', [
m.test_memoryview_object,
m.test_memoryview_buffer_info,
])
def test_memoryview_refcount(method):
buf = b'\x0a\x0b\x0c\x0d'
ref_before = sys.getrefcount(buf)
view = method(buf)
ref_after = sys.getrefcount(buf)
assert ref_before < ref_after
assert list(view) == list(buf)
def test_memoryview_from_buffer_empty_shape():
view = m.test_memoryview_from_buffer_empty_shape()
assert isinstance(view, memoryview)
assert view.format == 'B'
if env.PY2:
# Python 2 behavior is weird, but Python 3 (the future) is fine.
# PyPy3 has <memoryview, while CPython 2 has <memory
assert bytes(view).startswith(b'<memory')
else:
assert bytes(view) == b''
def test_test_memoryview_from_buffer_invalid_strides():
with pytest.raises(RuntimeError):
m.test_memoryview_from_buffer_invalid_strides()
def test_test_memoryview_from_buffer_nullptr():
if env.PY2:
m.test_memoryview_from_buffer_nullptr()
else:
with pytest.raises(ValueError):
m.test_memoryview_from_buffer_nullptr()
@pytest.mark.skipif("env.PY2")
def test_memoryview_from_memory():
view = m.test_memoryview_from_memory()
assert isinstance(view, memoryview)
assert view.format == 'B'
assert bytes(view) == b'\xff\xe1\xab\x37'
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/third_party/unitree_legged_sdk/pybind11/tests/test_enum.py | third_party/unitree_legged_sdk/pybind11/tests/test_enum.py | # -*- coding: utf-8 -*-
import pytest
from pybind11_tests import enums as m
def test_unscoped_enum():
assert str(m.UnscopedEnum.EOne) == "UnscopedEnum.EOne"
assert str(m.UnscopedEnum.ETwo) == "UnscopedEnum.ETwo"
assert str(m.EOne) == "UnscopedEnum.EOne"
assert repr(m.UnscopedEnum.EOne) == "<UnscopedEnum.EOne: 1>"
assert repr(m.UnscopedEnum.ETwo) == "<UnscopedEnum.ETwo: 2>"
assert repr(m.EOne) == "<UnscopedEnum.EOne: 1>"
# name property
assert m.UnscopedEnum.EOne.name == "EOne"
assert m.UnscopedEnum.ETwo.name == "ETwo"
assert m.EOne.name == "EOne"
# name readonly
with pytest.raises(AttributeError):
m.UnscopedEnum.EOne.name = ""
# name returns a copy
foo = m.UnscopedEnum.EOne.name
foo = "bar"
assert m.UnscopedEnum.EOne.name == "EOne"
# __members__ property
assert m.UnscopedEnum.__members__ == \
{"EOne": m.UnscopedEnum.EOne, "ETwo": m.UnscopedEnum.ETwo, "EThree": m.UnscopedEnum.EThree}
# __members__ readonly
with pytest.raises(AttributeError):
m.UnscopedEnum.__members__ = {}
# __members__ returns a copy
foo = m.UnscopedEnum.__members__
foo["bar"] = "baz"
assert m.UnscopedEnum.__members__ == \
{"EOne": m.UnscopedEnum.EOne, "ETwo": m.UnscopedEnum.ETwo, "EThree": m.UnscopedEnum.EThree}
for docstring_line in '''An unscoped enumeration
Members:
EOne : Docstring for EOne
ETwo : Docstring for ETwo
EThree : Docstring for EThree'''.split('\n'):
assert docstring_line in m.UnscopedEnum.__doc__
# Unscoped enums will accept ==/!= int comparisons
y = m.UnscopedEnum.ETwo
assert y == 2
assert 2 == y
assert y != 3
assert 3 != y
# Compare with None
assert (y != None) # noqa: E711
assert not (y == None) # noqa: E711
# Compare with an object
assert (y != object())
assert not (y == object())
# Compare with string
assert y != "2"
assert "2" != y
assert not ("2" == y)
assert not (y == "2")
with pytest.raises(TypeError):
y < object()
with pytest.raises(TypeError):
y <= object()
with pytest.raises(TypeError):
y > object()
with pytest.raises(TypeError):
y >= object()
with pytest.raises(TypeError):
y | object()
with pytest.raises(TypeError):
y & object()
with pytest.raises(TypeError):
y ^ object()
assert int(m.UnscopedEnum.ETwo) == 2
assert str(m.UnscopedEnum(2)) == "UnscopedEnum.ETwo"
# order
assert m.UnscopedEnum.EOne < m.UnscopedEnum.ETwo
assert m.UnscopedEnum.EOne < 2
assert m.UnscopedEnum.ETwo > m.UnscopedEnum.EOne
assert m.UnscopedEnum.ETwo > 1
assert m.UnscopedEnum.ETwo <= 2
assert m.UnscopedEnum.ETwo >= 2
assert m.UnscopedEnum.EOne <= m.UnscopedEnum.ETwo
assert m.UnscopedEnum.EOne <= 2
assert m.UnscopedEnum.ETwo >= m.UnscopedEnum.EOne
assert m.UnscopedEnum.ETwo >= 1
assert not (m.UnscopedEnum.ETwo < m.UnscopedEnum.EOne)
assert not (2 < m.UnscopedEnum.EOne)
# arithmetic
assert m.UnscopedEnum.EOne & m.UnscopedEnum.EThree == m.UnscopedEnum.EOne
assert m.UnscopedEnum.EOne | m.UnscopedEnum.ETwo == m.UnscopedEnum.EThree
assert m.UnscopedEnum.EOne ^ m.UnscopedEnum.EThree == m.UnscopedEnum.ETwo
def test_scoped_enum():
assert m.test_scoped_enum(m.ScopedEnum.Three) == "ScopedEnum::Three"
z = m.ScopedEnum.Two
assert m.test_scoped_enum(z) == "ScopedEnum::Two"
# Scoped enums will *NOT* accept ==/!= int comparisons (Will always return False)
assert not z == 3
assert not 3 == z
assert z != 3
assert 3 != z
# Compare with None
assert (z != None) # noqa: E711
assert not (z == None) # noqa: E711
# Compare with an object
assert (z != object())
assert not (z == object())
# Scoped enums will *NOT* accept >, <, >= and <= int comparisons (Will throw exceptions)
with pytest.raises(TypeError):
z > 3
with pytest.raises(TypeError):
z < 3
with pytest.raises(TypeError):
z >= 3
with pytest.raises(TypeError):
z <= 3
# order
assert m.ScopedEnum.Two < m.ScopedEnum.Three
assert m.ScopedEnum.Three > m.ScopedEnum.Two
assert m.ScopedEnum.Two <= m.ScopedEnum.Three
assert m.ScopedEnum.Two <= m.ScopedEnum.Two
assert m.ScopedEnum.Two >= m.ScopedEnum.Two
assert m.ScopedEnum.Three >= m.ScopedEnum.Two
def test_implicit_conversion():
assert str(m.ClassWithUnscopedEnum.EMode.EFirstMode) == "EMode.EFirstMode"
assert str(m.ClassWithUnscopedEnum.EFirstMode) == "EMode.EFirstMode"
assert repr(m.ClassWithUnscopedEnum.EMode.EFirstMode) == "<EMode.EFirstMode: 1>"
assert repr(m.ClassWithUnscopedEnum.EFirstMode) == "<EMode.EFirstMode: 1>"
f = m.ClassWithUnscopedEnum.test_function
first = m.ClassWithUnscopedEnum.EFirstMode
second = m.ClassWithUnscopedEnum.ESecondMode
assert f(first) == 1
assert f(first) == f(first)
assert not f(first) != f(first)
assert f(first) != f(second)
assert not f(first) == f(second)
assert f(first) == int(f(first))
assert not f(first) != int(f(first))
assert f(first) != int(f(second))
assert not f(first) == int(f(second))
# noinspection PyDictCreation
x = {f(first): 1, f(second): 2}
x[f(first)] = 3
x[f(second)] = 4
# Hashing test
assert repr(x) == "{<EMode.EFirstMode: 1>: 3, <EMode.ESecondMode: 2>: 4}"
def test_binary_operators():
assert int(m.Flags.Read) == 4
assert int(m.Flags.Write) == 2
assert int(m.Flags.Execute) == 1
assert int(m.Flags.Read | m.Flags.Write | m.Flags.Execute) == 7
assert int(m.Flags.Read | m.Flags.Write) == 6
assert int(m.Flags.Read | m.Flags.Execute) == 5
assert int(m.Flags.Write | m.Flags.Execute) == 3
assert int(m.Flags.Write | 1) == 3
assert ~m.Flags.Write == -3
state = m.Flags.Read | m.Flags.Write
assert (state & m.Flags.Read) != 0
assert (state & m.Flags.Write) != 0
assert (state & m.Flags.Execute) == 0
assert (state & 1) == 0
state2 = ~state
assert state2 == -7
assert int(state ^ state2) == -1
def test_enum_to_int():
m.test_enum_to_int(m.Flags.Read)
m.test_enum_to_int(m.ClassWithUnscopedEnum.EMode.EFirstMode)
m.test_enum_to_uint(m.Flags.Read)
m.test_enum_to_uint(m.ClassWithUnscopedEnum.EMode.EFirstMode)
m.test_enum_to_long_long(m.Flags.Read)
m.test_enum_to_long_long(m.ClassWithUnscopedEnum.EMode.EFirstMode)
def test_duplicate_enum_name():
with pytest.raises(ValueError) as excinfo:
m.register_bad_enum()
assert str(excinfo.value) == 'SimpleEnum: element "ONE" already exists!'
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/third_party/unitree_legged_sdk/pybind11/tests/test_numpy_vectorize.py | third_party/unitree_legged_sdk/pybind11/tests/test_numpy_vectorize.py | # -*- coding: utf-8 -*-
import pytest
from pybind11_tests import numpy_vectorize as m
np = pytest.importorskip("numpy")
def test_vectorize(capture):
assert np.isclose(m.vectorized_func3(np.array(3 + 7j)), [6 + 14j])
for f in [m.vectorized_func, m.vectorized_func2]:
with capture:
assert np.isclose(f(1, 2, 3), 6)
assert capture == "my_func(x:int=1, y:float=2, z:float=3)"
with capture:
assert np.isclose(f(np.array(1), np.array(2), 3), 6)
assert capture == "my_func(x:int=1, y:float=2, z:float=3)"
with capture:
assert np.allclose(f(np.array([1, 3]), np.array([2, 4]), 3), [6, 36])
assert capture == """
my_func(x:int=1, y:float=2, z:float=3)
my_func(x:int=3, y:float=4, z:float=3)
"""
with capture:
a = np.array([[1, 2], [3, 4]], order='F')
b = np.array([[10, 20], [30, 40]], order='F')
c = 3
result = f(a, b, c)
assert np.allclose(result, a * b * c)
assert result.flags.f_contiguous
# All inputs are F order and full or singletons, so we the result is in col-major order:
assert capture == """
my_func(x:int=1, y:float=10, z:float=3)
my_func(x:int=3, y:float=30, z:float=3)
my_func(x:int=2, y:float=20, z:float=3)
my_func(x:int=4, y:float=40, z:float=3)
"""
with capture:
a, b, c = np.array([[1, 3, 5], [7, 9, 11]]), np.array([[2, 4, 6], [8, 10, 12]]), 3
assert np.allclose(f(a, b, c), a * b * c)
assert capture == """
my_func(x:int=1, y:float=2, z:float=3)
my_func(x:int=3, y:float=4, z:float=3)
my_func(x:int=5, y:float=6, z:float=3)
my_func(x:int=7, y:float=8, z:float=3)
my_func(x:int=9, y:float=10, z:float=3)
my_func(x:int=11, y:float=12, z:float=3)
"""
with capture:
a, b, c = np.array([[1, 2, 3], [4, 5, 6]]), np.array([2, 3, 4]), 2
assert np.allclose(f(a, b, c), a * b * c)
assert capture == """
my_func(x:int=1, y:float=2, z:float=2)
my_func(x:int=2, y:float=3, z:float=2)
my_func(x:int=3, y:float=4, z:float=2)
my_func(x:int=4, y:float=2, z:float=2)
my_func(x:int=5, y:float=3, z:float=2)
my_func(x:int=6, y:float=4, z:float=2)
"""
with capture:
a, b, c = np.array([[1, 2, 3], [4, 5, 6]]), np.array([[2], [3]]), 2
assert np.allclose(f(a, b, c), a * b * c)
assert capture == """
my_func(x:int=1, y:float=2, z:float=2)
my_func(x:int=2, y:float=2, z:float=2)
my_func(x:int=3, y:float=2, z:float=2)
my_func(x:int=4, y:float=3, z:float=2)
my_func(x:int=5, y:float=3, z:float=2)
my_func(x:int=6, y:float=3, z:float=2)
"""
with capture:
a, b, c = np.array([[1, 2, 3], [4, 5, 6]], order='F'), np.array([[2], [3]]), 2
assert np.allclose(f(a, b, c), a * b * c)
assert capture == """
my_func(x:int=1, y:float=2, z:float=2)
my_func(x:int=2, y:float=2, z:float=2)
my_func(x:int=3, y:float=2, z:float=2)
my_func(x:int=4, y:float=3, z:float=2)
my_func(x:int=5, y:float=3, z:float=2)
my_func(x:int=6, y:float=3, z:float=2)
"""
with capture:
a, b, c = np.array([[1, 2, 3], [4, 5, 6]])[::, ::2], np.array([[2], [3]]), 2
assert np.allclose(f(a, b, c), a * b * c)
assert capture == """
my_func(x:int=1, y:float=2, z:float=2)
my_func(x:int=3, y:float=2, z:float=2)
my_func(x:int=4, y:float=3, z:float=2)
my_func(x:int=6, y:float=3, z:float=2)
"""
with capture:
a, b, c = np.array([[1, 2, 3], [4, 5, 6]], order='F')[::, ::2], np.array([[2], [3]]), 2
assert np.allclose(f(a, b, c), a * b * c)
assert capture == """
my_func(x:int=1, y:float=2, z:float=2)
my_func(x:int=3, y:float=2, z:float=2)
my_func(x:int=4, y:float=3, z:float=2)
my_func(x:int=6, y:float=3, z:float=2)
"""
def test_type_selection():
assert m.selective_func(np.array([1], dtype=np.int32)) == "Int branch taken."
assert m.selective_func(np.array([1.0], dtype=np.float32)) == "Float branch taken."
assert m.selective_func(np.array([1.0j], dtype=np.complex64)) == "Complex float branch taken."
def test_docs(doc):
assert doc(m.vectorized_func) == """
vectorized_func(arg0: numpy.ndarray[numpy.int32], arg1: numpy.ndarray[numpy.float32], arg2: numpy.ndarray[numpy.float64]) -> object
""" # noqa: E501 line too long
def test_trivial_broadcasting():
trivial, vectorized_is_trivial = m.trivial, m.vectorized_is_trivial
assert vectorized_is_trivial(1, 2, 3) == trivial.c_trivial
assert vectorized_is_trivial(np.array(1), np.array(2), 3) == trivial.c_trivial
assert vectorized_is_trivial(np.array([1, 3]), np.array([2, 4]), 3) == trivial.c_trivial
assert trivial.c_trivial == vectorized_is_trivial(
np.array([[1, 3, 5], [7, 9, 11]]), np.array([[2, 4, 6], [8, 10, 12]]), 3)
assert vectorized_is_trivial(
np.array([[1, 2, 3], [4, 5, 6]]), np.array([2, 3, 4]), 2) == trivial.non_trivial
assert vectorized_is_trivial(
np.array([[1, 2, 3], [4, 5, 6]]), np.array([[2], [3]]), 2) == trivial.non_trivial
z1 = np.array([[1, 2, 3, 4], [5, 6, 7, 8]], dtype='int32')
z2 = np.array(z1, dtype='float32')
z3 = np.array(z1, dtype='float64')
assert vectorized_is_trivial(z1, z2, z3) == trivial.c_trivial
assert vectorized_is_trivial(1, z2, z3) == trivial.c_trivial
assert vectorized_is_trivial(z1, 1, z3) == trivial.c_trivial
assert vectorized_is_trivial(z1, z2, 1) == trivial.c_trivial
assert vectorized_is_trivial(z1[::2, ::2], 1, 1) == trivial.non_trivial
assert vectorized_is_trivial(1, 1, z1[::2, ::2]) == trivial.c_trivial
assert vectorized_is_trivial(1, 1, z3[::2, ::2]) == trivial.non_trivial
assert vectorized_is_trivial(z1, 1, z3[1::4, 1::4]) == trivial.c_trivial
y1 = np.array(z1, order='F')
y2 = np.array(y1)
y3 = np.array(y1)
assert vectorized_is_trivial(y1, y2, y3) == trivial.f_trivial
assert vectorized_is_trivial(y1, 1, 1) == trivial.f_trivial
assert vectorized_is_trivial(1, y2, 1) == trivial.f_trivial
assert vectorized_is_trivial(1, 1, y3) == trivial.f_trivial
assert vectorized_is_trivial(y1, z2, 1) == trivial.non_trivial
assert vectorized_is_trivial(z1[1::4, 1::4], y2, 1) == trivial.f_trivial
assert vectorized_is_trivial(y1[1::4, 1::4], z2, 1) == trivial.c_trivial
assert m.vectorized_func(z1, z2, z3).flags.c_contiguous
assert m.vectorized_func(y1, y2, y3).flags.f_contiguous
assert m.vectorized_func(z1, 1, 1).flags.c_contiguous
assert m.vectorized_func(1, y2, 1).flags.f_contiguous
assert m.vectorized_func(z1[1::4, 1::4], y2, 1).flags.f_contiguous
assert m.vectorized_func(y1[1::4, 1::4], z2, 1).flags.c_contiguous
def test_passthrough_arguments(doc):
assert doc(m.vec_passthrough) == (
"vec_passthrough(" + ", ".join([
"arg0: float",
"arg1: numpy.ndarray[numpy.float64]",
"arg2: numpy.ndarray[numpy.float64]",
"arg3: numpy.ndarray[numpy.int32]",
"arg4: int",
"arg5: m.numpy_vectorize.NonPODClass",
"arg6: numpy.ndarray[numpy.float64]"]) + ") -> object")
b = np.array([[10, 20, 30]], dtype='float64')
c = np.array([100, 200]) # NOT a vectorized argument
d = np.array([[1000], [2000], [3000]], dtype='int')
g = np.array([[1000000, 2000000, 3000000]], dtype='int') # requires casting
assert np.all(
m.vec_passthrough(1, b, c, d, 10000, m.NonPODClass(100000), g) ==
np.array([[1111111, 2111121, 3111131],
[1112111, 2112121, 3112131],
[1113111, 2113121, 3113131]]))
def test_method_vectorization():
o = m.VectorizeTestClass(3)
x = np.array([1, 2], dtype='int')
y = np.array([[10], [20]], dtype='float32')
assert np.all(o.method(x, y) == [[14, 15], [24, 25]])
def test_array_collapse():
assert not isinstance(m.vectorized_func(1, 2, 3), np.ndarray)
assert not isinstance(m.vectorized_func(np.array(1), 2, 3), np.ndarray)
z = m.vectorized_func([1], 2, 3)
assert isinstance(z, np.ndarray)
assert z.shape == (1, )
z = m.vectorized_func(1, [[[2]]], 3)
assert isinstance(z, np.ndarray)
assert z.shape == (1, 1, 1)
def test_vectorized_noreturn():
x = m.NonPODClass(0)
assert x.value == 0
m.add_to(x, [1, 2, 3, 4])
assert x.value == 10
m.add_to(x, 1)
assert x.value == 11
m.add_to(x, [[1, 1], [2, 3]])
assert x.value == 18
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/third_party/unitree_legged_sdk/pybind11/tests/test_virtual_functions.py | third_party/unitree_legged_sdk/pybind11/tests/test_virtual_functions.py | # -*- coding: utf-8 -*-
import pytest
import env # noqa: F401
m = pytest.importorskip("pybind11_tests.virtual_functions")
from pybind11_tests import ConstructorStats # noqa: E402
def test_override(capture, msg):
class ExtendedExampleVirt(m.ExampleVirt):
def __init__(self, state):
super(ExtendedExampleVirt, self).__init__(state + 1)
self.data = "Hello world"
def run(self, value):
print('ExtendedExampleVirt::run(%i), calling parent..' % value)
return super(ExtendedExampleVirt, self).run(value + 1)
def run_bool(self):
print('ExtendedExampleVirt::run_bool()')
return False
def get_string1(self):
return "override1"
def pure_virtual(self):
print('ExtendedExampleVirt::pure_virtual(): %s' % self.data)
class ExtendedExampleVirt2(ExtendedExampleVirt):
def __init__(self, state):
super(ExtendedExampleVirt2, self).__init__(state + 1)
def get_string2(self):
return "override2"
ex12 = m.ExampleVirt(10)
with capture:
assert m.runExampleVirt(ex12, 20) == 30
assert capture == """
Original implementation of ExampleVirt::run(state=10, value=20, str1=default1, str2=default2)
""" # noqa: E501 line too long
with pytest.raises(RuntimeError) as excinfo:
m.runExampleVirtVirtual(ex12)
assert msg(excinfo.value) == 'Tried to call pure virtual function "ExampleVirt::pure_virtual"'
ex12p = ExtendedExampleVirt(10)
with capture:
assert m.runExampleVirt(ex12p, 20) == 32
assert capture == """
ExtendedExampleVirt::run(20), calling parent..
Original implementation of ExampleVirt::run(state=11, value=21, str1=override1, str2=default2)
""" # noqa: E501 line too long
with capture:
assert m.runExampleVirtBool(ex12p) is False
assert capture == "ExtendedExampleVirt::run_bool()"
with capture:
m.runExampleVirtVirtual(ex12p)
assert capture == "ExtendedExampleVirt::pure_virtual(): Hello world"
ex12p2 = ExtendedExampleVirt2(15)
with capture:
assert m.runExampleVirt(ex12p2, 50) == 68
assert capture == """
ExtendedExampleVirt::run(50), calling parent..
Original implementation of ExampleVirt::run(state=17, value=51, str1=override1, str2=override2)
""" # noqa: E501 line too long
cstats = ConstructorStats.get(m.ExampleVirt)
assert cstats.alive() == 3
del ex12, ex12p, ex12p2
assert cstats.alive() == 0
assert cstats.values() == ['10', '11', '17']
assert cstats.copy_constructions == 0
assert cstats.move_constructions >= 0
def test_alias_delay_initialization1(capture):
"""`A` only initializes its trampoline class when we inherit from it
If we just create and use an A instance directly, the trampoline initialization is
bypassed and we only initialize an A() instead (for performance reasons).
"""
class B(m.A):
def __init__(self):
super(B, self).__init__()
def f(self):
print("In python f()")
# C++ version
with capture:
a = m.A()
m.call_f(a)
del a
pytest.gc_collect()
assert capture == "A.f()"
# Python version
with capture:
b = B()
m.call_f(b)
del b
pytest.gc_collect()
assert capture == """
PyA.PyA()
PyA.f()
In python f()
PyA.~PyA()
"""
def test_alias_delay_initialization2(capture):
"""`A2`, unlike the above, is configured to always initialize the alias
While the extra initialization and extra class layer has small virtual dispatch
performance penalty, it also allows us to do more things with the trampoline
class such as defining local variables and performing construction/destruction.
"""
class B2(m.A2):
def __init__(self):
super(B2, self).__init__()
def f(self):
print("In python B2.f()")
# No python subclass version
with capture:
a2 = m.A2()
m.call_f(a2)
del a2
pytest.gc_collect()
a3 = m.A2(1)
m.call_f(a3)
del a3
pytest.gc_collect()
assert capture == """
PyA2.PyA2()
PyA2.f()
A2.f()
PyA2.~PyA2()
PyA2.PyA2()
PyA2.f()
A2.f()
PyA2.~PyA2()
"""
# Python subclass version
with capture:
b2 = B2()
m.call_f(b2)
del b2
pytest.gc_collect()
assert capture == """
PyA2.PyA2()
PyA2.f()
In python B2.f()
PyA2.~PyA2()
"""
# PyPy: Reference count > 1 causes call with noncopyable instance
# to fail in ncv1.print_nc()
@pytest.mark.xfail("env.PYPY")
@pytest.mark.skipif(not hasattr(m, "NCVirt"), reason="NCVirt test broken on ICPC")
def test_move_support():
class NCVirtExt(m.NCVirt):
def get_noncopyable(self, a, b):
# Constructs and returns a new instance:
nc = m.NonCopyable(a * a, b * b)
return nc
def get_movable(self, a, b):
# Return a referenced copy
self.movable = m.Movable(a, b)
return self.movable
class NCVirtExt2(m.NCVirt):
def get_noncopyable(self, a, b):
# Keep a reference: this is going to throw an exception
self.nc = m.NonCopyable(a, b)
return self.nc
def get_movable(self, a, b):
# Return a new instance without storing it
return m.Movable(a, b)
ncv1 = NCVirtExt()
assert ncv1.print_nc(2, 3) == "36"
assert ncv1.print_movable(4, 5) == "9"
ncv2 = NCVirtExt2()
assert ncv2.print_movable(7, 7) == "14"
# Don't check the exception message here because it differs under debug/non-debug mode
with pytest.raises(RuntimeError):
ncv2.print_nc(9, 9)
nc_stats = ConstructorStats.get(m.NonCopyable)
mv_stats = ConstructorStats.get(m.Movable)
assert nc_stats.alive() == 1
assert mv_stats.alive() == 1
del ncv1, ncv2
assert nc_stats.alive() == 0
assert mv_stats.alive() == 0
assert nc_stats.values() == ['4', '9', '9', '9']
assert mv_stats.values() == ['4', '5', '7', '7']
assert nc_stats.copy_constructions == 0
assert mv_stats.copy_constructions == 1
assert nc_stats.move_constructions >= 0
assert mv_stats.move_constructions >= 0
def test_dispatch_issue(msg):
"""#159: virtual function dispatch has problems with similar-named functions"""
class PyClass1(m.DispatchIssue):
def dispatch(self):
return "Yay.."
class PyClass2(m.DispatchIssue):
def dispatch(self):
with pytest.raises(RuntimeError) as excinfo:
super(PyClass2, self).dispatch()
assert msg(excinfo.value) == 'Tried to call pure virtual function "Base::dispatch"'
p = PyClass1()
return m.dispatch_issue_go(p)
b = PyClass2()
assert m.dispatch_issue_go(b) == "Yay.."
def test_override_ref():
"""#392/397: overriding reference-returning functions"""
o = m.OverrideTest("asdf")
# Not allowed (see associated .cpp comment)
# i = o.str_ref()
# assert o.str_ref() == "asdf"
assert o.str_value() == "asdf"
assert o.A_value().value == "hi"
a = o.A_ref()
assert a.value == "hi"
a.value = "bye"
assert a.value == "bye"
def test_inherited_virtuals():
class AR(m.A_Repeat):
def unlucky_number(self):
return 99
class AT(m.A_Tpl):
def unlucky_number(self):
return 999
obj = AR()
assert obj.say_something(3) == "hihihi"
assert obj.unlucky_number() == 99
assert obj.say_everything() == "hi 99"
obj = AT()
assert obj.say_something(3) == "hihihi"
assert obj.unlucky_number() == 999
assert obj.say_everything() == "hi 999"
for obj in [m.B_Repeat(), m.B_Tpl()]:
assert obj.say_something(3) == "B says hi 3 times"
assert obj.unlucky_number() == 13
assert obj.lucky_number() == 7.0
assert obj.say_everything() == "B says hi 1 times 13"
for obj in [m.C_Repeat(), m.C_Tpl()]:
assert obj.say_something(3) == "B says hi 3 times"
assert obj.unlucky_number() == 4444
assert obj.lucky_number() == 888.0
assert obj.say_everything() == "B says hi 1 times 4444"
class CR(m.C_Repeat):
def lucky_number(self):
return m.C_Repeat.lucky_number(self) + 1.25
obj = CR()
assert obj.say_something(3) == "B says hi 3 times"
assert obj.unlucky_number() == 4444
assert obj.lucky_number() == 889.25
assert obj.say_everything() == "B says hi 1 times 4444"
class CT(m.C_Tpl):
pass
obj = CT()
assert obj.say_something(3) == "B says hi 3 times"
assert obj.unlucky_number() == 4444
assert obj.lucky_number() == 888.0
assert obj.say_everything() == "B says hi 1 times 4444"
class CCR(CR):
def lucky_number(self):
return CR.lucky_number(self) * 10
obj = CCR()
assert obj.say_something(3) == "B says hi 3 times"
assert obj.unlucky_number() == 4444
assert obj.lucky_number() == 8892.5
assert obj.say_everything() == "B says hi 1 times 4444"
class CCT(CT):
def lucky_number(self):
return CT.lucky_number(self) * 1000
obj = CCT()
assert obj.say_something(3) == "B says hi 3 times"
assert obj.unlucky_number() == 4444
assert obj.lucky_number() == 888000.0
assert obj.say_everything() == "B says hi 1 times 4444"
class DR(m.D_Repeat):
def unlucky_number(self):
return 123
def lucky_number(self):
return 42.0
for obj in [m.D_Repeat(), m.D_Tpl()]:
assert obj.say_something(3) == "B says hi 3 times"
assert obj.unlucky_number() == 4444
assert obj.lucky_number() == 888.0
assert obj.say_everything() == "B says hi 1 times 4444"
obj = DR()
assert obj.say_something(3) == "B says hi 3 times"
assert obj.unlucky_number() == 123
assert obj.lucky_number() == 42.0
assert obj.say_everything() == "B says hi 1 times 123"
class DT(m.D_Tpl):
def say_something(self, times):
return "DT says:" + (' quack' * times)
def unlucky_number(self):
return 1234
def lucky_number(self):
return -4.25
obj = DT()
assert obj.say_something(3) == "DT says: quack quack quack"
assert obj.unlucky_number() == 1234
assert obj.lucky_number() == -4.25
assert obj.say_everything() == "DT says: quack 1234"
class DT2(DT):
def say_something(self, times):
return "DT2: " + ('QUACK' * times)
def unlucky_number(self):
return -3
class BT(m.B_Tpl):
def say_something(self, times):
return "BT" * times
def unlucky_number(self):
return -7
def lucky_number(self):
return -1.375
obj = BT()
assert obj.say_something(3) == "BTBTBT"
assert obj.unlucky_number() == -7
assert obj.lucky_number() == -1.375
assert obj.say_everything() == "BT -7"
def test_issue_1454():
# Fix issue #1454 (crash when acquiring/releasing GIL on another thread in Python 2.7)
m.test_gil()
m.test_gil_from_thread()
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/third_party/unitree_legged_sdk/pybind11/tests/test_async.py | third_party/unitree_legged_sdk/pybind11/tests/test_async.py | # -*- coding: utf-8 -*-
import pytest
asyncio = pytest.importorskip("asyncio")
m = pytest.importorskip("pybind11_tests.async_module")
@pytest.fixture
def event_loop():
loop = asyncio.new_event_loop()
yield loop
loop.close()
async def get_await_result(x):
return await x
def test_await(event_loop):
assert 5 == event_loop.run_until_complete(get_await_result(m.SupportsAsync()))
def test_await_missing(event_loop):
with pytest.raises(TypeError):
event_loop.run_until_complete(get_await_result(m.DoesNotSupportAsync()))
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/third_party/unitree_legged_sdk/pybind11/tests/test_gil_scoped.py | third_party/unitree_legged_sdk/pybind11/tests/test_gil_scoped.py | # -*- coding: utf-8 -*-
import multiprocessing
import threading
from pybind11_tests import gil_scoped as m
def _run_in_process(target, *args, **kwargs):
"""Runs target in process and returns its exitcode after 10s (None if still alive)."""
process = multiprocessing.Process(target=target, args=args, kwargs=kwargs)
process.daemon = True
try:
process.start()
# Do not need to wait much, 10s should be more than enough.
process.join(timeout=10)
return process.exitcode
finally:
if process.is_alive():
process.terminate()
def _python_to_cpp_to_python():
"""Calls different C++ functions that come back to Python."""
class ExtendedVirtClass(m.VirtClass):
def virtual_func(self):
pass
def pure_virtual_func(self):
pass
extended = ExtendedVirtClass()
m.test_callback_py_obj(lambda: None)
m.test_callback_std_func(lambda: None)
m.test_callback_virtual_func(extended)
m.test_callback_pure_virtual_func(extended)
def _python_to_cpp_to_python_from_threads(num_threads, parallel=False):
"""Calls different C++ functions that come back to Python, from Python threads."""
threads = []
for _ in range(num_threads):
thread = threading.Thread(target=_python_to_cpp_to_python)
thread.daemon = True
thread.start()
if parallel:
threads.append(thread)
else:
thread.join()
for thread in threads:
thread.join()
# TODO: FIXME, sometimes returns -11 (segfault) instead of 0 on macOS Python 3.9
def test_python_to_cpp_to_python_from_thread():
"""Makes sure there is no GIL deadlock when running in a thread.
It runs in a separate process to be able to stop and assert if it deadlocks.
"""
assert _run_in_process(_python_to_cpp_to_python_from_threads, 1) == 0
# TODO: FIXME on macOS Python 3.9
def test_python_to_cpp_to_python_from_thread_multiple_parallel():
"""Makes sure there is no GIL deadlock when running in a thread multiple times in parallel.
It runs in a separate process to be able to stop and assert if it deadlocks.
"""
assert _run_in_process(_python_to_cpp_to_python_from_threads, 8, parallel=True) == 0
# TODO: FIXME on macOS Python 3.9
def test_python_to_cpp_to_python_from_thread_multiple_sequential():
"""Makes sure there is no GIL deadlock when running in a thread multiple times sequentially.
It runs in a separate process to be able to stop and assert if it deadlocks.
"""
assert _run_in_process(_python_to_cpp_to_python_from_threads, 8, parallel=False) == 0
# TODO: FIXME on macOS Python 3.9
def test_python_to_cpp_to_python_from_process():
"""Makes sure there is no GIL deadlock when using processes.
This test is for completion, but it was never an issue.
"""
assert _run_in_process(_python_to_cpp_to_python) == 0
def test_cross_module_gil():
"""Makes sure that the GIL can be acquired by another module from a GIL-released state."""
m.test_cross_module_gil() # Should not raise a SIGSEGV
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/third_party/unitree_legged_sdk/pybind11/tests/test_constants_and_functions.py | third_party/unitree_legged_sdk/pybind11/tests/test_constants_and_functions.py | # -*- coding: utf-8 -*-
import pytest
m = pytest.importorskip("pybind11_tests.constants_and_functions")
def test_constants():
assert m.some_constant == 14
def test_function_overloading():
assert m.test_function() == "test_function()"
assert m.test_function(7) == "test_function(7)"
assert m.test_function(m.MyEnum.EFirstEntry) == "test_function(enum=1)"
assert m.test_function(m.MyEnum.ESecondEntry) == "test_function(enum=2)"
assert m.test_function() == "test_function()"
assert m.test_function("abcd") == "test_function(char *)"
assert m.test_function(1, 1.0) == "test_function(int, float)"
assert m.test_function(1, 1.0) == "test_function(int, float)"
assert m.test_function(2.0, 2) == "test_function(float, int)"
def test_bytes():
assert m.print_bytes(m.return_bytes()) == "bytes[1 0 2 0]"
def test_exception_specifiers():
c = m.C()
assert c.m1(2) == 1
assert c.m2(3) == 1
assert c.m3(5) == 2
assert c.m4(7) == 3
assert c.m5(10) == 5
assert c.m6(14) == 8
assert c.m7(20) == 13
assert c.m8(29) == 21
assert m.f1(33) == 34
assert m.f2(53) == 55
assert m.f3(86) == 89
assert m.f4(140) == 144
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/third_party/unitree_legged_sdk/pybind11/tests/test_stl_binders.py | third_party/unitree_legged_sdk/pybind11/tests/test_stl_binders.py | # -*- coding: utf-8 -*-
import pytest
import env # noqa: F401
from pybind11_tests import stl_binders as m
def test_vector_int():
v_int = m.VectorInt([0, 0])
assert len(v_int) == 2
assert bool(v_int) is True
# test construction from a generator
v_int1 = m.VectorInt(x for x in range(5))
assert v_int1 == m.VectorInt([0, 1, 2, 3, 4])
v_int2 = m.VectorInt([0, 0])
assert v_int == v_int2
v_int2[1] = 1
assert v_int != v_int2
v_int2.append(2)
v_int2.insert(0, 1)
v_int2.insert(0, 2)
v_int2.insert(0, 3)
v_int2.insert(6, 3)
assert str(v_int2) == "VectorInt[3, 2, 1, 0, 1, 2, 3]"
with pytest.raises(IndexError):
v_int2.insert(8, 4)
v_int.append(99)
v_int2[2:-2] = v_int
assert v_int2 == m.VectorInt([3, 2, 0, 0, 99, 2, 3])
del v_int2[1:3]
assert v_int2 == m.VectorInt([3, 0, 99, 2, 3])
del v_int2[0]
assert v_int2 == m.VectorInt([0, 99, 2, 3])
v_int2.extend(m.VectorInt([4, 5]))
assert v_int2 == m.VectorInt([0, 99, 2, 3, 4, 5])
v_int2.extend([6, 7])
assert v_int2 == m.VectorInt([0, 99, 2, 3, 4, 5, 6, 7])
# test error handling, and that the vector is unchanged
with pytest.raises(RuntimeError):
v_int2.extend([8, 'a'])
assert v_int2 == m.VectorInt([0, 99, 2, 3, 4, 5, 6, 7])
# test extending from a generator
v_int2.extend(x for x in range(5))
assert v_int2 == m.VectorInt([0, 99, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4])
# test negative indexing
assert v_int2[-1] == 4
# insert with negative index
v_int2.insert(-1, 88)
assert v_int2 == m.VectorInt([0, 99, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 88, 4])
# delete negative index
del v_int2[-1]
assert v_int2 == m.VectorInt([0, 99, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 88])
v_int2.clear()
assert len(v_int2) == 0
# Older PyPy's failed here, related to the PyPy's buffer protocol.
def test_vector_buffer():
b = bytearray([1, 2, 3, 4])
v = m.VectorUChar(b)
assert v[1] == 2
v[2] = 5
mv = memoryview(v) # We expose the buffer interface
if not env.PY2:
assert mv[2] == 5
mv[2] = 6
else:
assert mv[2] == '\x05'
mv[2] = '\x06'
assert v[2] == 6
if not env.PY2:
mv = memoryview(b)
v = m.VectorUChar(mv[::2])
assert v[1] == 3
with pytest.raises(RuntimeError) as excinfo:
m.create_undeclstruct() # Undeclared struct contents, no buffer interface
assert "NumPy type info missing for " in str(excinfo.value)
def test_vector_buffer_numpy():
np = pytest.importorskip("numpy")
a = np.array([1, 2, 3, 4], dtype=np.int32)
with pytest.raises(TypeError):
m.VectorInt(a)
a = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]], dtype=np.uintc)
v = m.VectorInt(a[0, :])
assert len(v) == 4
assert v[2] == 3
ma = np.asarray(v)
ma[2] = 5
assert v[2] == 5
v = m.VectorInt(a[:, 1])
assert len(v) == 3
assert v[2] == 10
v = m.get_vectorstruct()
assert v[0].x == 5
ma = np.asarray(v)
ma[1]['x'] = 99
assert v[1].x == 99
v = m.VectorStruct(np.zeros(3, dtype=np.dtype([('w', 'bool'), ('x', 'I'),
('y', 'float64'), ('z', 'bool')], align=True)))
assert len(v) == 3
b = np.array([1, 2, 3, 4], dtype=np.uint8)
v = m.VectorUChar(b[::2])
assert v[1] == 3
def test_vector_bool():
import pybind11_cross_module_tests as cm
vv_c = cm.VectorBool()
for i in range(10):
vv_c.append(i % 2 == 0)
for i in range(10):
assert vv_c[i] == (i % 2 == 0)
assert str(vv_c) == "VectorBool[1, 0, 1, 0, 1, 0, 1, 0, 1, 0]"
def test_vector_custom():
v_a = m.VectorEl()
v_a.append(m.El(1))
v_a.append(m.El(2))
assert str(v_a) == "VectorEl[El{1}, El{2}]"
vv_a = m.VectorVectorEl()
vv_a.append(v_a)
vv_b = vv_a[0]
assert str(vv_b) == "VectorEl[El{1}, El{2}]"
def test_map_string_double():
mm = m.MapStringDouble()
mm['a'] = 1
mm['b'] = 2.5
assert list(mm) == ['a', 'b']
assert list(mm.items()) == [('a', 1), ('b', 2.5)]
assert str(mm) == "MapStringDouble{a: 1, b: 2.5}"
um = m.UnorderedMapStringDouble()
um['ua'] = 1.1
um['ub'] = 2.6
assert sorted(list(um)) == ['ua', 'ub']
assert sorted(list(um.items())) == [('ua', 1.1), ('ub', 2.6)]
assert "UnorderedMapStringDouble" in str(um)
def test_map_string_double_const():
mc = m.MapStringDoubleConst()
mc['a'] = 10
mc['b'] = 20.5
assert str(mc) == "MapStringDoubleConst{a: 10, b: 20.5}"
umc = m.UnorderedMapStringDoubleConst()
umc['a'] = 11
umc['b'] = 21.5
str(umc)
def test_noncopyable_containers():
# std::vector
vnc = m.get_vnc(5)
for i in range(0, 5):
assert vnc[i].value == i + 1
for i, j in enumerate(vnc, start=1):
assert j.value == i
# std::deque
dnc = m.get_dnc(5)
for i in range(0, 5):
assert dnc[i].value == i + 1
i = 1
for j in dnc:
assert(j.value == i)
i += 1
# std::map
mnc = m.get_mnc(5)
for i in range(1, 6):
assert mnc[i].value == 10 * i
vsum = 0
for k, v in mnc.items():
assert v.value == 10 * k
vsum += v.value
assert vsum == 150
# std::unordered_map
mnc = m.get_umnc(5)
for i in range(1, 6):
assert mnc[i].value == 10 * i
vsum = 0
for k, v in mnc.items():
assert v.value == 10 * k
vsum += v.value
assert vsum == 150
# nested std::map<std::vector>
nvnc = m.get_nvnc(5)
for i in range(1, 6):
for j in range(0, 5):
assert nvnc[i][j].value == j + 1
# Note: maps do not have .values()
for _, v in nvnc.items():
for i, j in enumerate(v, start=1):
assert j.value == i
# nested std::map<std::map>
nmnc = m.get_nmnc(5)
for i in range(1, 6):
for j in range(10, 60, 10):
assert nmnc[i][j].value == 10 * j
vsum = 0
for _, v_o in nmnc.items():
for k_i, v_i in v_o.items():
assert v_i.value == 10 * k_i
vsum += v_i.value
assert vsum == 7500
# nested std::unordered_map<std::unordered_map>
numnc = m.get_numnc(5)
for i in range(1, 6):
for j in range(10, 60, 10):
assert numnc[i][j].value == 10 * j
vsum = 0
for _, v_o in numnc.items():
for k_i, v_i in v_o.items():
assert v_i.value == 10 * k_i
vsum += v_i.value
assert vsum == 7500
def test_map_delitem():
mm = m.MapStringDouble()
mm['a'] = 1
mm['b'] = 2.5
assert list(mm) == ['a', 'b']
assert list(mm.items()) == [('a', 1), ('b', 2.5)]
del mm['a']
assert list(mm) == ['b']
assert list(mm.items()) == [('b', 2.5)]
um = m.UnorderedMapStringDouble()
um['ua'] = 1.1
um['ub'] = 2.6
assert sorted(list(um)) == ['ua', 'ub']
assert sorted(list(um.items())) == [('ua', 1.1), ('ub', 2.6)]
del um['ua']
assert sorted(list(um)) == ['ub']
assert sorted(list(um.items())) == [('ub', 2.6)]
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/third_party/unitree_legged_sdk/pybind11/tests/env.py | third_party/unitree_legged_sdk/pybind11/tests/env.py | # -*- coding: utf-8 -*-
import platform
import sys
LINUX = sys.platform.startswith("linux")
MACOS = sys.platform.startswith("darwin")
WIN = sys.platform.startswith("win32") or sys.platform.startswith("cygwin")
CPYTHON = platform.python_implementation() == "CPython"
PYPY = platform.python_implementation() == "PyPy"
PY2 = sys.version_info.major == 2
PY = sys.version_info
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/third_party/unitree_legged_sdk/pybind11/tests/extra_setuptools/test_setuphelper.py | third_party/unitree_legged_sdk/pybind11/tests/extra_setuptools/test_setuphelper.py | # -*- coding: utf-8 -*-
import os
import sys
import subprocess
from textwrap import dedent
import pytest
DIR = os.path.abspath(os.path.dirname(__file__))
MAIN_DIR = os.path.dirname(os.path.dirname(DIR))
@pytest.mark.parametrize("parallel", [False, True])
@pytest.mark.parametrize("std", [11, 0])
def test_simple_setup_py(monkeypatch, tmpdir, parallel, std):
monkeypatch.chdir(tmpdir)
monkeypatch.syspath_prepend(MAIN_DIR)
(tmpdir / "setup.py").write_text(
dedent(
u"""\
import sys
sys.path.append({MAIN_DIR!r})
from setuptools import setup, Extension
from pybind11.setup_helpers import build_ext, Pybind11Extension
std = {std}
ext_modules = [
Pybind11Extension(
"simple_setup",
sorted(["main.cpp"]),
cxx_std=std,
),
]
cmdclass = dict()
if std == 0:
cmdclass["build_ext"] = build_ext
parallel = {parallel}
if parallel:
from pybind11.setup_helpers import ParallelCompile
ParallelCompile().install()
setup(
name="simple_setup_package",
cmdclass=cmdclass,
ext_modules=ext_modules,
)
"""
).format(MAIN_DIR=MAIN_DIR, std=std, parallel=parallel),
encoding="ascii",
)
(tmpdir / "main.cpp").write_text(
dedent(
u"""\
#include <pybind11/pybind11.h>
int f(int x) {
return x * 3;
}
PYBIND11_MODULE(simple_setup, m) {
m.def("f", &f);
}
"""
),
encoding="ascii",
)
subprocess.check_call(
[sys.executable, "setup.py", "build_ext", "--inplace"],
stdout=sys.stdout,
stderr=sys.stderr,
)
# Debug helper printout, normally hidden
for item in tmpdir.listdir():
print(item.basename)
assert (
len([f for f in tmpdir.listdir() if f.basename.startswith("simple_setup")]) == 1
)
assert len(list(tmpdir.listdir())) == 4 # two files + output + build_dir
(tmpdir / "test.py").write_text(
dedent(
u"""\
import simple_setup
assert simple_setup.f(3) == 9
"""
),
encoding="ascii",
)
subprocess.check_call(
[sys.executable, "test.py"], stdout=sys.stdout, stderr=sys.stderr
)
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/third_party/unitree_legged_sdk/pybind11/tests/test_cmake_build/test.py | third_party/unitree_legged_sdk/pybind11/tests/test_cmake_build/test.py | # -*- coding: utf-8 -*-
import sys
import test_cmake_build
assert test_cmake_build.add(1, 2) == 3
print("{} imports, runs, and adds: 1 + 2 = 3".format(sys.argv[1]))
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/third_party/unitree_legged_sdk/pybind11/tests/extra_python_package/test_files.py | third_party/unitree_legged_sdk/pybind11/tests/extra_python_package/test_files.py | # -*- coding: utf-8 -*-
import contextlib
import os
import string
import subprocess
import sys
import tarfile
import zipfile
# These tests must be run explicitly
# They require CMake 3.15+ (--install)
DIR = os.path.abspath(os.path.dirname(__file__))
MAIN_DIR = os.path.dirname(os.path.dirname(DIR))
main_headers = {
"include/pybind11/attr.h",
"include/pybind11/buffer_info.h",
"include/pybind11/cast.h",
"include/pybind11/chrono.h",
"include/pybind11/common.h",
"include/pybind11/complex.h",
"include/pybind11/eigen.h",
"include/pybind11/embed.h",
"include/pybind11/eval.h",
"include/pybind11/functional.h",
"include/pybind11/iostream.h",
"include/pybind11/numpy.h",
"include/pybind11/operators.h",
"include/pybind11/options.h",
"include/pybind11/pybind11.h",
"include/pybind11/pytypes.h",
"include/pybind11/stl.h",
"include/pybind11/stl_bind.h",
}
detail_headers = {
"include/pybind11/detail/class.h",
"include/pybind11/detail/common.h",
"include/pybind11/detail/descr.h",
"include/pybind11/detail/init.h",
"include/pybind11/detail/internals.h",
"include/pybind11/detail/typeid.h",
}
cmake_files = {
"share/cmake/pybind11/FindPythonLibsNew.cmake",
"share/cmake/pybind11/pybind11Common.cmake",
"share/cmake/pybind11/pybind11Config.cmake",
"share/cmake/pybind11/pybind11ConfigVersion.cmake",
"share/cmake/pybind11/pybind11NewTools.cmake",
"share/cmake/pybind11/pybind11Targets.cmake",
"share/cmake/pybind11/pybind11Tools.cmake",
}
py_files = {
"__init__.py",
"__main__.py",
"_version.py",
"commands.py",
"setup_helpers.py",
}
headers = main_headers | detail_headers
src_files = headers | cmake_files
all_files = src_files | py_files
sdist_files = {
"pybind11",
"pybind11/include",
"pybind11/include/pybind11",
"pybind11/include/pybind11/detail",
"pybind11/share",
"pybind11/share/cmake",
"pybind11/share/cmake/pybind11",
"pyproject.toml",
"setup.cfg",
"setup.py",
"LICENSE",
"MANIFEST.in",
"README.rst",
"PKG-INFO",
}
local_sdist_files = {
".egg-info",
".egg-info/PKG-INFO",
".egg-info/SOURCES.txt",
".egg-info/dependency_links.txt",
".egg-info/not-zip-safe",
".egg-info/top_level.txt",
}
def test_build_sdist(monkeypatch, tmpdir):
monkeypatch.chdir(MAIN_DIR)
out = subprocess.check_output(
[
sys.executable,
"setup.py",
"sdist",
"--formats=tar",
"--dist-dir",
str(tmpdir),
]
)
if hasattr(out, "decode"):
out = out.decode()
(sdist,) = tmpdir.visit("*.tar")
with tarfile.open(str(sdist)) as tar:
start = tar.getnames()[0] + "/"
version = start[9:-1]
simpler = set(n.split("/", 1)[-1] for n in tar.getnames()[1:])
with contextlib.closing(
tar.extractfile(tar.getmember(start + "setup.py"))
) as f:
setup_py = f.read()
with contextlib.closing(
tar.extractfile(tar.getmember(start + "pyproject.toml"))
) as f:
pyproject_toml = f.read()
files = set("pybind11/{}".format(n) for n in all_files)
files |= sdist_files
files |= set("pybind11{}".format(n) for n in local_sdist_files)
files.add("pybind11.egg-info/entry_points.txt")
files.add("pybind11.egg-info/requires.txt")
assert simpler == files
with open(os.path.join(MAIN_DIR, "tools", "setup_main.py.in"), "rb") as f:
contents = (
string.Template(f.read().decode())
.substitute(version=version, extra_cmd="")
.encode()
)
assert setup_py == contents
with open(os.path.join(MAIN_DIR, "tools", "pyproject.toml"), "rb") as f:
contents = f.read()
assert pyproject_toml == contents
def test_build_global_dist(monkeypatch, tmpdir):
monkeypatch.chdir(MAIN_DIR)
monkeypatch.setenv("PYBIND11_GLOBAL_SDIST", "1")
out = subprocess.check_output(
[
sys.executable,
"setup.py",
"sdist",
"--formats=tar",
"--dist-dir",
str(tmpdir),
]
)
if hasattr(out, "decode"):
out = out.decode()
(sdist,) = tmpdir.visit("*.tar")
with tarfile.open(str(sdist)) as tar:
start = tar.getnames()[0] + "/"
version = start[16:-1]
simpler = set(n.split("/", 1)[-1] for n in tar.getnames()[1:])
with contextlib.closing(
tar.extractfile(tar.getmember(start + "setup.py"))
) as f:
setup_py = f.read()
with contextlib.closing(
tar.extractfile(tar.getmember(start + "pyproject.toml"))
) as f:
pyproject_toml = f.read()
files = set("pybind11/{}".format(n) for n in all_files)
files |= sdist_files
files |= set("pybind11_global{}".format(n) for n in local_sdist_files)
assert simpler == files
with open(os.path.join(MAIN_DIR, "tools", "setup_global.py.in"), "rb") as f:
contents = (
string.Template(f.read().decode())
.substitute(version=version, extra_cmd="")
.encode()
)
assert setup_py == contents
with open(os.path.join(MAIN_DIR, "tools", "pyproject.toml"), "rb") as f:
contents = f.read()
assert pyproject_toml == contents
def tests_build_wheel(monkeypatch, tmpdir):
monkeypatch.chdir(MAIN_DIR)
subprocess.check_output(
[sys.executable, "-m", "pip", "wheel", ".", "-w", str(tmpdir)]
)
(wheel,) = tmpdir.visit("*.whl")
files = set("pybind11/{}".format(n) for n in all_files)
files |= {
"dist-info/LICENSE",
"dist-info/METADATA",
"dist-info/RECORD",
"dist-info/WHEEL",
"dist-info/entry_points.txt",
"dist-info/top_level.txt",
}
with zipfile.ZipFile(str(wheel)) as z:
names = z.namelist()
trimmed = set(n for n in names if "dist-info" not in n)
trimmed |= set(
"dist-info/{}".format(n.split("/", 1)[-1]) for n in names if "dist-info" in n
)
assert files == trimmed
def tests_build_global_wheel(monkeypatch, tmpdir):
monkeypatch.chdir(MAIN_DIR)
monkeypatch.setenv("PYBIND11_GLOBAL_SDIST", "1")
subprocess.check_output(
[sys.executable, "-m", "pip", "wheel", ".", "-w", str(tmpdir)]
)
(wheel,) = tmpdir.visit("*.whl")
files = set("data/data/{}".format(n) for n in src_files)
files |= set("data/headers/{}".format(n[8:]) for n in headers)
files |= {
"dist-info/LICENSE",
"dist-info/METADATA",
"dist-info/WHEEL",
"dist-info/top_level.txt",
"dist-info/RECORD",
}
with zipfile.ZipFile(str(wheel)) as z:
names = z.namelist()
beginning = names[0].split("/", 1)[0].rsplit(".", 1)[0]
trimmed = set(n[len(beginning) + 1 :] for n in names)
assert files == trimmed
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/third_party/unitree_legged_sdk/pybind11/tests/test_embed/test_interpreter.py | third_party/unitree_legged_sdk/pybind11/tests/test_embed/test_interpreter.py | # -*- coding: utf-8 -*-
from widget_module import Widget
class DerivedWidget(Widget):
def __init__(self, message):
super(DerivedWidget, self).__init__(message)
def the_answer(self):
return 42
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/third_party/unitree_legged_sdk/pybind11/pybind11/setup_helpers.py | third_party/unitree_legged_sdk/pybind11/pybind11/setup_helpers.py | # -*- coding: utf-8 -*-
"""
This module provides helpers for C++11+ projects using pybind11.
LICENSE:
Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>, All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import contextlib
import os
import shutil
import sys
import tempfile
import threading
import warnings
try:
from setuptools.command.build_ext import build_ext as _build_ext
from setuptools import Extension as _Extension
except ImportError:
from distutils.command.build_ext import build_ext as _build_ext
from distutils.extension import Extension as _Extension
import distutils.errors
import distutils.ccompiler
WIN = sys.platform.startswith("win32")
PY2 = sys.version_info[0] < 3
MACOS = sys.platform.startswith("darwin")
STD_TMPL = "/std:c++{}" if WIN else "-std=c++{}"
# It is recommended to use PEP 518 builds if using this module. However, this
# file explicitly supports being copied into a user's project directory
# standalone, and pulling pybind11 with the deprecated setup_requires feature.
# If you copy the file, remember to add it to your MANIFEST.in, and add the current
# directory into your path if it sits beside your setup.py.
class Pybind11Extension(_Extension):
"""
Build a C++11+ Extension module with pybind11. This automatically adds the
recommended flags when you init the extension and assumes C++ sources - you
can further modify the options yourself.
The customizations are:
* ``/EHsc`` and ``/bigobj`` on Windows
* ``stdlib=libc++`` on macOS
* ``visibility=hidden`` and ``-g0`` on Unix
Finally, you can set ``cxx_std`` via constructor or afterwords to enable
flags for C++ std, and a few extra helper flags related to the C++ standard
level. It is _highly_ recommended you either set this, or use the provided
``build_ext``, which will search for the highest supported extension for
you if the ``cxx_std`` property is not set. Do not set the ``cxx_std``
property more than once, as flags are added when you set it. Set the
property to None to disable the addition of C++ standard flags.
If you want to add pybind11 headers manually, for example for an exact
git checkout, then set ``include_pybind11=False``.
Warning: do not use property-based access to the instance on Python 2 -
this is an ugly old-style class due to Distutils.
"""
def _add_cflags(self, *flags):
for flag in flags:
if flag not in self.extra_compile_args:
self.extra_compile_args.append(flag)
def _add_lflags(self, *flags):
for flag in flags:
if flag not in self.extra_compile_args:
self.extra_link_args.append(flag)
def __init__(self, *args, **kwargs):
self._cxx_level = 0
cxx_std = kwargs.pop("cxx_std", 0)
if "language" not in kwargs:
kwargs["language"] = "c++"
include_pybind11 = kwargs.pop("include_pybind11", True)
# Can't use super here because distutils has old-style classes in
# Python 2!
_Extension.__init__(self, *args, **kwargs)
# Include the installed package pybind11 headers
if include_pybind11:
# If using setup_requires, this fails the first time - that's okay
try:
import pybind11
pyinc = pybind11.get_include()
if pyinc not in self.include_dirs:
self.include_dirs.append(pyinc)
except ImportError:
pass
# Have to use the accessor manually to support Python 2 distutils
Pybind11Extension.cxx_std.__set__(self, cxx_std)
if WIN:
self._add_cflags("/EHsc", "/bigobj")
else:
self._add_cflags("-fvisibility=hidden", "-g0")
if MACOS:
self._add_cflags("-stdlib=libc++")
self._add_lflags("-stdlib=libc++")
@property
def cxx_std(self):
"""
The CXX standard level. If set, will add the required flags. If left
at 0, it will trigger an automatic search when pybind11's build_ext
is used. If None, will have no effect. Besides just the flags, this
may add a register warning/error fix for Python 2 or macos-min 10.9
or 10.14.
"""
return self._cxx_level
@cxx_std.setter
def cxx_std(self, level):
if self._cxx_level:
warnings.warn("You cannot safely change the cxx_level after setting it!")
# MSVC 2015 Update 3 and later only have 14 (and later 17) modes, so
# force a valid flag here.
if WIN and level == 11:
level = 14
self._cxx_level = level
if not level:
return
self.extra_compile_args.append(STD_TMPL.format(level))
if MACOS and "MACOSX_DEPLOYMENT_TARGET" not in os.environ:
# C++17 requires a higher min version of macOS. An earlier version
# can be set manually via environment variable if you are careful
# in your feature usage, but 10.14 is the safest setting for
# general use.
macosx_min = "-mmacosx-version-min=" + ("10.9" if level < 17 else "10.14")
self.extra_compile_args.append(macosx_min)
self.extra_link_args.append(macosx_min)
if PY2:
if WIN:
# Will be ignored on MSVC 2015, where C++17 is not supported so
# this flag is not valid.
self.extra_compile_args.append("/wd5033")
elif level >= 17:
self.extra_compile_args.append("-Wno-register")
elif level >= 14:
self.extra_compile_args.append("-Wno-deprecated-register")
# Just in case someone clever tries to multithread
tmp_chdir_lock = threading.Lock()
cpp_cache_lock = threading.Lock()
@contextlib.contextmanager
def tmp_chdir():
"Prepare and enter a temporary directory, cleanup when done"
# Threadsafe
with tmp_chdir_lock:
olddir = os.getcwd()
try:
tmpdir = tempfile.mkdtemp()
os.chdir(tmpdir)
yield tmpdir
finally:
os.chdir(olddir)
shutil.rmtree(tmpdir)
# cf http://bugs.python.org/issue26689
def has_flag(compiler, flag):
"""
Return the flag if a flag name is supported on the
specified compiler, otherwise None (can be used as a boolean).
If multiple flags are passed, return the first that matches.
"""
with tmp_chdir():
fname = "flagcheck.cpp"
with open(fname, "w") as f:
f.write("int main (int argc, char **argv) { return 0; }")
try:
compiler.compile([fname], extra_postargs=[flag])
except distutils.errors.CompileError:
return False
return True
# Every call will cache the result
cpp_flag_cache = None
def auto_cpp_level(compiler):
"""
Return the max supported C++ std level (17, 14, or 11). Returns latest on Windows.
"""
if WIN:
return "latest"
global cpp_flag_cache
# If this has been previously calculated with the same args, return that
with cpp_cache_lock:
if cpp_flag_cache:
return cpp_flag_cache
levels = [17, 14, 11]
for level in levels:
if has_flag(compiler, STD_TMPL.format(level)):
with cpp_cache_lock:
cpp_flag_cache = level
return level
msg = "Unsupported compiler -- at least C++11 support is needed!"
raise RuntimeError(msg)
class build_ext(_build_ext): # noqa: N801
"""
Customized build_ext that allows an auto-search for the highest supported
C++ level for Pybind11Extension.
"""
def build_extensions(self):
"""
Build extensions, injecting C++ std for Pybind11Extension if needed.
"""
for ext in self.extensions:
if hasattr(ext, "_cxx_level") and ext._cxx_level == 0:
# Python 2 syntax - old-style distutils class
ext.__class__.cxx_std.__set__(ext, auto_cpp_level(self.compiler))
# Python 2 doesn't allow super here, since distutils uses old-style
# classes!
_build_ext.build_extensions(self)
# Optional parallel compile utility
# inspired by: http://stackoverflow.com/questions/11013851/speeding-up-build-process-with-distutils
# and: https://github.com/tbenthompson/cppimport/blob/stable/cppimport/build_module.py
# and NumPy's parallel distutils module:
# https://github.com/numpy/numpy/blob/master/numpy/distutils/ccompiler.py
class ParallelCompile(object):
"""
Make a parallel compile function. Inspired by
numpy.distutils.ccompiler.CCompiler_compile and cppimport.
This takes several arguments that allow you to customize the compile
function created:
envvar: Set an environment variable to control the compilation threads, like NPY_NUM_BUILD_JOBS
default: 0 will automatically multithread, or 1 will only multithread if the envvar is set.
max: The limit for automatic multithreading if non-zero
To use:
ParallelCompile("NPY_NUM_BUILD_JOBS").install()
or:
with ParallelCompile("NPY_NUM_BUILD_JOBS"):
setup(...)
"""
__slots__ = ("envvar", "default", "max", "old")
def __init__(self, envvar=None, default=0, max=0):
self.envvar = envvar
self.default = default
self.max = max
self.old = []
def function(self):
"""
Builds a function object usable as distutils.ccompiler.CCompiler.compile.
"""
def compile_function(
compiler,
sources,
output_dir=None,
macros=None,
include_dirs=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
depends=None,
):
# These lines are directly from distutils.ccompiler.CCompiler
macros, objects, extra_postargs, pp_opts, build = compiler._setup_compile(
output_dir, macros, include_dirs, sources, depends, extra_postargs
)
cc_args = compiler._get_cc_args(pp_opts, debug, extra_preargs)
# The number of threads; start with default.
threads = self.default
# Determine the number of compilation threads, unless set by an environment variable.
if self.envvar is not None:
threads = int(os.environ.get(self.envvar, self.default))
def _single_compile(obj):
try:
src, ext = build[obj]
except KeyError:
return
compiler._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)
try:
import multiprocessing
from multiprocessing.pool import ThreadPool
except ImportError:
threads = 1
if threads == 0:
try:
threads = multiprocessing.cpu_count()
threads = self.max if self.max and self.max < threads else threads
except NotImplementedError:
threads = 1
if threads > 1:
for _ in ThreadPool(threads).imap_unordered(_single_compile, objects):
pass
else:
for ob in objects:
_single_compile(ob)
return objects
return compile_function
def install(self):
distutils.ccompiler.CCompiler.compile = self.function()
return self
def __enter__(self):
self.old.append(distutils.ccompiler.CCompiler.compile)
return self.install()
def __exit__(self, *args):
distutils.ccompiler.CCompiler.compile = self.old.pop()
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/third_party/unitree_legged_sdk/pybind11/pybind11/commands.py | third_party/unitree_legged_sdk/pybind11/pybind11/commands.py | # -*- coding: utf-8 -*-
import os
DIR = os.path.abspath(os.path.dirname(__file__))
def get_include(user=False):
installed_path = os.path.join(DIR, "include")
source_path = os.path.join(os.path.dirname(DIR), "include")
return installed_path if os.path.exists(installed_path) else source_path
def get_cmake_dir():
cmake_installed_path = os.path.join(DIR, "share", "cmake", "pybind11")
if os.path.exists(cmake_installed_path):
return cmake_installed_path
else:
msg = "pybind11 not installed, installation required to access the CMake files"
raise ImportError(msg)
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/third_party/unitree_legged_sdk/pybind11/pybind11/_version.py | third_party/unitree_legged_sdk/pybind11/pybind11/_version.py | # -*- coding: utf-8 -*-
def _to_int(s):
try:
return int(s)
except ValueError:
return s
__version__ = "2.6.0rc1"
version_info = tuple(_to_int(s) for s in __version__.split("."))
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/third_party/unitree_legged_sdk/pybind11/pybind11/__main__.py | third_party/unitree_legged_sdk/pybind11/pybind11/__main__.py | # -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import sys
import sysconfig
from .commands import get_include, get_cmake_dir
def print_includes():
dirs = [
sysconfig.get_path("include"),
sysconfig.get_path("platinclude"),
get_include(),
]
# Make unique but preserve order
unique_dirs = []
for d in dirs:
if d not in unique_dirs:
unique_dirs.append(d)
print(" ".join("-I" + d for d in unique_dirs))
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--includes",
action="store_true",
help="Include flags for both pybind11 and Python headers.",
)
parser.add_argument(
"--cmakedir",
action="store_true",
help="Print the CMake module directory, ideal for setting -Dpybind11_ROOT in CMake.",
)
args = parser.parse_args()
if not sys.argv[1:]:
parser.print_help()
if args.includes:
print_includes()
if args.cmakedir:
print(get_cmake_dir())
if __name__ == "__main__":
main()
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/third_party/unitree_legged_sdk/pybind11/pybind11/__init__.py | third_party/unitree_legged_sdk/pybind11/pybind11/__init__.py | # -*- coding: utf-8 -*-
from ._version import version_info, __version__
from .commands import get_include, get_cmake_dir
__all__ = (
"version_info",
"__version__",
"get_include",
"get_cmake_dir",
)
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/third_party/unitree_legged_sdk/pybind11/docs/benchmark.py | third_party/unitree_legged_sdk/pybind11/docs/benchmark.py | # -*- coding: utf-8 -*-
import random
import os
import time
import datetime as dt
nfns = 4 # Functions per class
nargs = 4 # Arguments per function
def generate_dummy_code_pybind11(nclasses=10):
decl = ""
bindings = ""
for cl in range(nclasses):
decl += "class cl%03i;\n" % cl
decl += '\n'
for cl in range(nclasses):
decl += "class cl%03i {\n" % cl
decl += "public:\n"
bindings += ' py::class_<cl%03i>(m, "cl%03i")\n' % (cl, cl)
for fn in range(nfns):
ret = random.randint(0, nclasses - 1)
params = [random.randint(0, nclasses - 1) for i in range(nargs)]
decl += " cl%03i *fn_%03i(" % (ret, fn)
decl += ", ".join("cl%03i *" % p for p in params)
decl += ");\n"
bindings += ' .def("fn_%03i", &cl%03i::fn_%03i)\n' % \
(fn, cl, fn)
decl += "};\n\n"
bindings += ' ;\n'
result = "#include <pybind11/pybind11.h>\n\n"
result += "namespace py = pybind11;\n\n"
result += decl + '\n'
result += "PYBIND11_MODULE(example, m) {\n"
result += bindings
result += "}"
return result
def generate_dummy_code_boost(nclasses=10):
decl = ""
bindings = ""
for cl in range(nclasses):
decl += "class cl%03i;\n" % cl
decl += '\n'
for cl in range(nclasses):
decl += "class cl%03i {\n" % cl
decl += "public:\n"
bindings += ' py::class_<cl%03i>("cl%03i")\n' % (cl, cl)
for fn in range(nfns):
ret = random.randint(0, nclasses - 1)
params = [random.randint(0, nclasses - 1) for i in range(nargs)]
decl += " cl%03i *fn_%03i(" % (ret, fn)
decl += ", ".join("cl%03i *" % p for p in params)
decl += ");\n"
bindings += ' .def("fn_%03i", &cl%03i::fn_%03i, py::return_value_policy<py::manage_new_object>())\n' % \
(fn, cl, fn)
decl += "};\n\n"
bindings += ' ;\n'
result = "#include <boost/python.hpp>\n\n"
result += "namespace py = boost::python;\n\n"
result += decl + '\n'
result += "BOOST_PYTHON_MODULE(example) {\n"
result += bindings
result += "}"
return result
for codegen in [generate_dummy_code_pybind11, generate_dummy_code_boost]:
print ("{")
for i in range(0, 10):
nclasses = 2 ** i
with open("test.cpp", "w") as f:
f.write(codegen(nclasses))
n1 = dt.datetime.now()
os.system("g++ -Os -shared -rdynamic -undefined dynamic_lookup "
"-fvisibility=hidden -std=c++14 test.cpp -I include "
"-I /System/Library/Frameworks/Python.framework/Headers -o test.so")
n2 = dt.datetime.now()
elapsed = (n2 - n1).total_seconds()
size = os.stat('test.so').st_size
print(" {%i, %f, %i}," % (nclasses * nfns, elapsed, size))
print ("}")
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/third_party/unitree_legged_sdk/pybind11/docs/conf.py | third_party/unitree_legged_sdk/pybind11/docs/conf.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# pybind11 documentation build configuration file, created by
# sphinx-quickstart on Sun Oct 11 19:23:48 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
import subprocess
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['breathe', 'sphinxcontrib.rsvgconverter', 'sphinxcontrib.moderncmakedomain']
breathe_projects = {'pybind11': '.build/doxygenxml/'}
breathe_default_project = 'pybind11'
breathe_domain_by_extension = {'h': 'cpp'}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['.templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'pybind11'
copyright = '2017, Wenzel Jakob'
author = 'Wenzel Jakob'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
# Read the listed version
with open("../pybind11/_version.py") as f:
code = compile(f.read(), "../pybind11/_version.py", "exec")
loc = {}
exec(code, loc)
# The full version, including alpha/beta/rc tags.
version = loc["__version__"]
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['.build', 'release.rst']
# The reST default role (used for this markup: `text`) to use for all
# documents.
default_role = 'any'
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
#pygments_style = 'monokai'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_context = {
'css_files': [
'_static/theme_overrides.css'
]
}
else:
html_context = {
'css_files': [
'//media.readthedocs.org/css/sphinx_rtd_theme.css',
'//media.readthedocs.org/css/readthedocs-doc-embed.css',
'_static/theme_overrides.css'
]
}
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<version> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'pybind11doc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
'preamble': r'''
\DeclareUnicodeCharacter{00A0}{}
\DeclareUnicodeCharacter{2194}{<->}
''',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pybind11.tex', 'pybind11 Documentation',
'Wenzel Jakob', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = 'pybind11-logo.png'
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pybind11', 'pybind11 Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pybind11', 'pybind11 Documentation',
author, 'pybind11', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
primary_domain = 'cpp'
highlight_language = 'cpp'
def generate_doxygen_xml(app):
build_dir = os.path.join(app.confdir, '.build')
if not os.path.exists(build_dir):
os.mkdir(build_dir)
try:
subprocess.call(['doxygen', '--version'])
retcode = subprocess.call(['doxygen'], cwd=app.confdir)
if retcode < 0:
sys.stderr.write("doxygen error code: {}\n".format(-retcode))
except OSError as e:
sys.stderr.write("doxygen execution failed: {}\n".format(e))
def setup(app):
"""Add hook for building doxygen xml when needed"""
app.connect("builder-inited", generate_doxygen_xml)
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/mpc_controller/a1_sim.py | mpc_controller/a1_sim.py | import re
import numpy as np
URDF_NAME = "a1/a1.urdf"
START_POS = [0, 0, 0.32]
MPC_BODY_MASS = 108 / 9.8
MPC_BODY_INERTIA = np.array(
(0.017, 0, 0, 0, 0.057, 0, 0, 0, 0.064)) * 0.1#* 2
MPC_BODY_HEIGHT = 0.24
MPC_VELOCITY_MULTIPLIER = 0.5
ACTION_REPEAT = 5
_IDENTITY_ORIENTATION=[0,0,0,1]
HIP_NAME_PATTERN = re.compile(r"\w+_hip_\w+")
UPPER_NAME_PATTERN = re.compile(r"\w+_upper_\w+")
LOWER_NAME_PATTERN = re.compile(r"\w+_lower_\w+")
TOE_NAME_PATTERN = re.compile(r"\w+_toe\d*")
IMU_NAME_PATTERN = re.compile(r"imu\d*")
_DEFAULT_HIP_POSITIONS = (
(0.17, -0.14, 0),
(0.17, 0.14, 0),
(-0.17, -0.14, 0),
(-0.17, 0.14, 0),
)
ABDUCTION_P_GAIN = 100.0
ABDUCTION_D_GAIN = 1.
HIP_P_GAIN = 100.0
HIP_D_GAIN = 2.0
KNEE_P_GAIN = 100.0
KNEE_D_GAIN = 2.0
_BODY_B_FIELD_NUMBER = 2
_LINK_A_FIELD_NUMBER = 3
HIP_JOINT_OFFSET = 0.0
UPPER_LEG_JOINT_OFFSET = 0
KNEE_JOINT_OFFSET = 0
LAIKAGO_DEFAULT_ABDUCTION_ANGLE = 0
LAIKAGO_DEFAULT_HIP_ANGLE = 0.9
LAIKAGO_DEFAULT_KNEE_ANGLE = -1.8
NUM_LEGS = 4
NUM_MOTORS = 12
# Bases on the readings from Laikago's default pose.
INIT_MOTOR_ANGLES = np.array([
LAIKAGO_DEFAULT_ABDUCTION_ANGLE,
LAIKAGO_DEFAULT_HIP_ANGLE,
LAIKAGO_DEFAULT_KNEE_ANGLE
] * NUM_LEGS)
MOTOR_NAMES = [
"FR_hip_joint",
"FR_upper_joint",
"FR_lower_joint",
"FL_hip_joint",
"FL_upper_joint",
"FL_lower_joint",
"RR_hip_joint",
"RR_upper_joint",
"RR_lower_joint",
"RL_hip_joint",
"RL_upper_joint",
"RL_lower_joint",
]
#Use a PD controller
MOTOR_CONTROL_POSITION = 1
# Apply motor torques directly.
MOTOR_CONTROL_TORQUE = 2
# Apply a tuple (q, qdot, kp, kd, tau) for each motor. Here q, qdot are motor
# position and velocities. kp and kd are PD gains. tau is the additional
# motor torque. This is the most flexible control mode.
MOTOR_CONTROL_HYBRID = 3
MOTOR_CONTROL_PWM = 4 #only for Minitaur
MOTOR_COMMAND_DIMENSION = 5
# These values represent the indices of each field in the motor command tuple
POSITION_INDEX = 0
POSITION_GAIN_INDEX = 1
VELOCITY_INDEX = 2
VELOCITY_GAIN_INDEX = 3
TORQUE_INDEX = 4
class LaikagoMotorModel(object):
"""A simple motor model for Laikago.
When in POSITION mode, the torque is calculated according to the difference
between current and desired joint angle, as well as the joint velocity.
For more information about PD control, please refer to:
https://en.wikipedia.org/wiki/PID_controller.
The model supports a HYBRID mode in which each motor command can be a tuple
(desired_motor_angle, position_gain, desired_motor_velocity, velocity_gain,
torque).
"""
def __init__(self,
kp,
kd,
torque_limits=None,
motor_control_mode=MOTOR_CONTROL_POSITION):
self._kp = kp
self._kd = kd
self._torque_limits = torque_limits
if torque_limits is not None:
if isinstance(torque_limits, (collections.Sequence, np.ndarray)):
self._torque_limits = np.asarray(torque_limits)
else:
self._torque_limits = np.full(NUM_MOTORS, torque_limits)
self._motor_control_mode = motor_control_mode
self._strength_ratios = np.full(NUM_MOTORS, 1)
def set_strength_ratios(self, ratios):
"""Set the strength of each motors relative to the default value.
Args:
ratios: The relative strength of motor output. A numpy array ranging from
0.0 to 1.0.
"""
self._strength_ratios = ratios
def set_motor_gains(self, kp, kd):
"""Set the gains of all motors.
These gains are PD gains for motor positional control. kp is the
proportional gain and kd is the derivative gain.
Args:
kp: proportional gain of the motors.
kd: derivative gain of the motors.
"""
self._kp = kp
self._kd = kd
def set_voltage(self, voltage):
pass
def get_voltage(self):
return 0.0
def set_viscous_damping(self, viscous_damping):
pass
def get_viscous_dampling(self):
return 0.0
def convert_to_torque(self,
motor_commands,
motor_angle,
motor_velocity,
true_motor_velocity,
motor_control_mode=None):
"""Convert the commands (position control or torque control) to torque.
Args:
motor_commands: The desired motor angle if the motor is in position
control mode. The pwm signal if the motor is in torque control mode.
motor_angle: The motor angle observed at the current time step. It is
actually the true motor angle observed a few milliseconds ago (pd
latency).
motor_velocity: The motor velocity observed at the current time step, it
is actually the true motor velocity a few milliseconds ago (pd latency).
true_motor_velocity: The true motor velocity. The true velocity is used to
compute back EMF voltage and viscous damping.
motor_control_mode: A MotorControlMode enum.
Returns:
actual_torque: The torque that needs to be applied to the motor.
observed_torque: The torque observed by the sensor.
"""
del true_motor_velocity
if not motor_control_mode:
motor_control_mode = self._motor_control_mode
# No processing for motor torques
if motor_control_mode is MOTOR_CONTROL_TORQUE:
assert len(motor_commands) == NUM_MOTORS
motor_torques = self._strength_ratios * motor_commands
return motor_torques, motor_torques
desired_motor_angles = None
desired_motor_velocities = None
kp = None
kd = None
additional_torques = np.full(NUM_MOTORS, 0)
if motor_control_mode is MOTOR_CONTROL_POSITION:
assert len(motor_commands) == NUM_MOTORS
kp = self._kp
kd = self._kd
desired_motor_angles = motor_commands
desired_motor_velocities = np.full(NUM_MOTORS, 0)
elif motor_control_mode is MOTOR_CONTROL_HYBRID:
# The input should be a 60 dimension vector
assert len(motor_commands) == MOTOR_COMMAND_DIMENSION * NUM_MOTORS
kp = motor_commands[POSITION_GAIN_INDEX::MOTOR_COMMAND_DIMENSION]
kd = motor_commands[VELOCITY_GAIN_INDEX::MOTOR_COMMAND_DIMENSION]
desired_motor_angles = motor_commands[
POSITION_INDEX::MOTOR_COMMAND_DIMENSION]
desired_motor_velocities = motor_commands[
VELOCITY_INDEX::MOTOR_COMMAND_DIMENSION]
additional_torques = motor_commands[TORQUE_INDEX::MOTOR_COMMAND_DIMENSION]
motor_torques = -1 * (kp * (motor_angle - desired_motor_angles)) - kd * (
motor_velocity - desired_motor_velocities) + additional_torques
motor_torques = self._strength_ratios * motor_torques
if self._torque_limits is not None:
if len(self._torque_limits) != len(motor_torques):
raise ValueError(
"Torque limits dimension does not match the number of motors.")
motor_torques = np.clip(motor_torques, -1.0 * self._torque_limits,
self._torque_limits)
return motor_torques, motor_torques
class SimpleRobot(object):
def __init__(self, pybullet_client, robot_uid, simulation_time_step):
self.pybullet_client = pybullet_client
self.time_step = simulation_time_step
self.quadruped = robot_uid
self.num_legs = NUM_LEGS
self.num_motors = NUM_MOTORS
self._BuildJointNameToIdDict()
self._BuildUrdfIds()
self._BuildMotorIdList()
self.ResetPose()
self._motor_enabled_list = [True] * self.num_motors
self._step_counter = 0
self._state_action_counter = 0
self._motor_offset= np.array([0]*12)
self._motor_direction= np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
self.ReceiveObservation()
self._kp = self.GetMotorPositionGains()
self._kd = self.GetMotorVelocityGains()
self._motor_model = LaikagoMotorModel(kp=self._kp, kd=self._kd, motor_control_mode=MOTOR_CONTROL_HYBRID)
self._SettleDownForReset(reset_time=1.0)
self._step_counter = 0
def ResetPose(self):
for name in self._joint_name_to_id:
joint_id = self._joint_name_to_id[name]
self.pybullet_client.setJointMotorControl2(
bodyIndex=self.quadruped,
jointIndex=(joint_id),
controlMode=self.pybullet_client.VELOCITY_CONTROL,
targetVelocity=0,
force=0)
for name, i in zip(MOTOR_NAMES, range(len(MOTOR_NAMES))):
if "hip_joint" in name:
angle = INIT_MOTOR_ANGLES[i] + HIP_JOINT_OFFSET
elif "upper_joint" in name:
angle = INIT_MOTOR_ANGLES[i] + UPPER_LEG_JOINT_OFFSET
elif "lower_joint" in name:
angle = INIT_MOTOR_ANGLES[i] + KNEE_JOINT_OFFSET
else:
raise ValueError("The name %s is not recognized as a motor joint." %
name)
self.pybullet_client.resetJointState(
self.quadruped, self._joint_name_to_id[name], angle, targetVelocity=0)
def _SettleDownForReset(self, reset_time):
self.ReceiveObservation()
if reset_time <= 0:
return
for _ in range(500):
self._StepInternal(
INIT_MOTOR_ANGLES,
motor_control_mode=MOTOR_CONTROL_POSITION)
def _GetMotorNames(self):
return MOTOR_NAMES
def _BuildMotorIdList(self):
self._motor_id_list = [
self._joint_name_to_id[motor_name]
for motor_name in self._GetMotorNames()
]
def GetMotorPositionGains(self):
return np.array([ABDUCTION_P_GAIN, HIP_P_GAIN, KNEE_P_GAIN, ABDUCTION_P_GAIN,
HIP_P_GAIN, KNEE_P_GAIN, ABDUCTION_P_GAIN, HIP_P_GAIN, KNEE_P_GAIN,
ABDUCTION_P_GAIN, HIP_P_GAIN, KNEE_P_GAIN])
def GetMotorVelocityGains(self):
return np.array([ABDUCTION_D_GAIN, HIP_D_GAIN, KNEE_D_GAIN, ABDUCTION_D_GAIN,
HIP_D_GAIN, KNEE_D_GAIN, ABDUCTION_D_GAIN, HIP_D_GAIN, KNEE_D_GAIN,
ABDUCTION_D_GAIN, HIP_D_GAIN, KNEE_D_GAIN])
def compute_jacobian(self, robot, link_id):
"""Computes the Jacobian matrix for the given link.
Args:
robot: A robot instance.
link_id: The link id as returned from loadURDF.
Returns:
The 3 x N transposed Jacobian matrix. where N is the total DoFs of the
robot. For a quadruped, the first 6 columns of the matrix corresponds to
the CoM translation and rotation. The columns corresponds to a leg can be
extracted with indices [6 + leg_id * 3: 6 + leg_id * 3 + 3].
"""
all_joint_angles = [state[0] for state in robot._joint_states]
zero_vec = [0] * len(all_joint_angles)
jv, _ = self.pybullet_client.calculateJacobian(robot.quadruped, link_id,
(0, 0, 0), all_joint_angles,
zero_vec, zero_vec)
jacobian = np.array(jv)
assert jacobian.shape[0] == 3
return jacobian
def ComputeJacobian(self, leg_id):
"""Compute the Jacobian for a given leg."""
# Does not work for Minitaur which has the four bar mechanism for now.
assert len(self._foot_link_ids) == self.num_legs
return self.compute_jacobian(
robot=self,
link_id=self._foot_link_ids[leg_id],
)
def MapContactForceToJointTorques(self, leg_id, contact_force):
"""Maps the foot contact force to the leg joint torques."""
jv = self.ComputeJacobian(leg_id)
all_motor_torques = np.matmul(contact_force, jv)
motor_torques = {}
motors_per_leg = self.num_motors // self.num_legs
com_dof = 6
for joint_id in range(leg_id * motors_per_leg,
(leg_id + 1) * motors_per_leg):
motor_torques[joint_id] = all_motor_torques[
com_dof + joint_id] * self._motor_direction[joint_id]
return motor_torques
def GetBaseRollPitchYaw(self):
"""Get minitaur's base orientation in euler angle in the world frame.
Returns:
A tuple (roll, pitch, yaw) of the base in world frame.
"""
orientation = self.GetTrueBaseOrientation()
roll_pitch_yaw = self.pybullet_client.getEulerFromQuaternion(orientation)
return np.asarray(roll_pitch_yaw)
def joint_angles_from_link_position(
self,
robot,
link_position,
link_id,
joint_ids,
position_in_world_frame,
base_translation = (0, 0, 0),
base_rotation = (0, 0, 0, 1)):
"""Uses Inverse Kinematics to calculate joint angles.
Args:
robot: A robot instance.
link_position: The (x, y, z) of the link in the body or the world frame,
depending on whether the argument position_in_world_frame is true.
link_id: The link id as returned from loadURDF.
joint_ids: The positional index of the joints. This can be different from
the joint unique ids.
position_in_world_frame: Whether the input link_position is specified
in the world frame or the robot's base frame.
base_translation: Additional base translation.
base_rotation: Additional base rotation.
Returns:
A list of joint angles.
"""
if not position_in_world_frame:
# Projects to local frame.
base_position, base_orientation = self.pybullet_client.getBasePositionAndOrientation(self.quadruped)#robot.GetBasePosition(), robot.GetBaseOrientation()
base_position, base_orientation = robot.pybullet_client.multiplyTransforms(
base_position, base_orientation, base_translation, base_rotation)
# Projects to world space.
world_link_pos, _ = robot.pybullet_client.multiplyTransforms(
base_position, base_orientation, link_position, _IDENTITY_ORIENTATION)
else:
world_link_pos = link_position
ik_solver = 0
all_joint_angles = robot.pybullet_client.calculateInverseKinematics(
robot.quadruped, link_id, world_link_pos, solver=ik_solver)
# Extract the relevant joint angles.
joint_angles = [all_joint_angles[i] for i in joint_ids]
return joint_angles
def ComputeMotorAnglesFromFootLocalPosition(self, leg_id,
foot_local_position):
"""Use IK to compute the motor angles, given the foot link's local position.
Args:
leg_id: The leg index.
foot_local_position: The foot link's position in the base frame.
Returns:
A tuple. The position indices and the angles for all joints along the
leg. The position indices is consistent with the joint orders as returned
by GetMotorAngles API.
"""
return self._EndEffectorIK(
leg_id, foot_local_position, position_in_world_frame=False)
def _EndEffectorIK(self, leg_id, position, position_in_world_frame):
"""Calculate the joint positions from the end effector position."""
assert len(self._foot_link_ids) == self.num_legs
toe_id = self._foot_link_ids[leg_id]
motors_per_leg = self.num_motors // self.num_legs
joint_position_idxs = [
i for i in range(leg_id * motors_per_leg, leg_id * motors_per_leg +
motors_per_leg)
]
joint_angles = self.joint_angles_from_link_position(
robot=self,
link_position=position,
link_id=toe_id,
joint_ids=joint_position_idxs,
position_in_world_frame=position_in_world_frame)
# Joint offset is necessary for Laikago.
joint_angles = np.multiply(
np.asarray(joint_angles) -
np.asarray(self._motor_offset)[joint_position_idxs],
self._motor_direction[joint_position_idxs])
# Return the joing index (the same as when calling GetMotorAngles) as well
# as the angles.
return joint_position_idxs, joint_angles.tolist()
def GetTimeSinceReset(self):
return self._step_counter * self.time_step
def GetHipPositionsInBaseFrame(self):
return _DEFAULT_HIP_POSITIONS
def GetBaseVelocity(self):
"""Get the linear velocity of minitaur's base.
Returns:
The velocity of minitaur's base.
"""
velocity, _ = self.pybullet_client.getBaseVelocity(self.quadruped)
return velocity
def GetTrueBaseOrientation(self):
pos,orn = self.pybullet_client.getBasePositionAndOrientation(
self.quadruped)
return orn
def TransformAngularVelocityToLocalFrame(self, angular_velocity, orientation):
"""Transform the angular velocity from world frame to robot's frame.
Args:
angular_velocity: Angular velocity of the robot in world frame.
orientation: Orientation of the robot represented as a quaternion.
Returns:
angular velocity of based on the given orientation.
"""
# Treat angular velocity as a position vector, then transform based on the
# orientation given by dividing (or multiplying with inverse).
# Get inverse quaternion assuming the vector is at 0,0,0 origin.
_, orientation_inversed = self.pybullet_client.invertTransform([0, 0, 0],
orientation)
# Transform the angular_velocity at neutral orientation using a neutral
# translation and reverse of the given orientation.
relative_velocity, _ = self.pybullet_client.multiplyTransforms(
[0, 0, 0], orientation_inversed, angular_velocity,
self.pybullet_client.getQuaternionFromEuler([0, 0, 0]))
return np.asarray(relative_velocity)
def GetBaseRollPitchYawRate(self):
"""Get the rate of orientation change of the minitaur's base in euler angle.
Returns:
rate of (roll, pitch, yaw) change of the minitaur's base.
"""
angular_velocity = self.pybullet_client.getBaseVelocity(self.quadruped)[1]
orientation = self.GetTrueBaseOrientation()
return self.TransformAngularVelocityToLocalFrame(angular_velocity,
orientation)
def GetFootContacts(self):
all_contacts = self.pybullet_client.getContactPoints(bodyA=self.quadruped)
contacts = [False, False, False, False]
for contact in all_contacts:
# Ignore self contacts
if contact[_BODY_B_FIELD_NUMBER] == self.quadruped:
continue
try:
toe_link_index = self._foot_link_ids.index(
contact[_LINK_A_FIELD_NUMBER])
contacts[toe_link_index] = True
except ValueError:
continue
return contacts
def GetTrueMotorAngles(self):
"""Gets the eight motor angles at the current moment, mapped to [-pi, pi].
Returns:
Motor angles, mapped to [-pi, pi].
"""
self.ReceiveObservation()
motor_angles = [state[0] for state in self._joint_states]
motor_angles = np.multiply(
np.asarray(motor_angles) - np.asarray(self._motor_offset),
self._motor_direction)
return motor_angles
def GetPDObservation(self):
self.ReceiveObservation()
observation = []
observation.extend(self.GetTrueMotorAngles())
observation.extend(self.GetTrueMotorVelocities())
q = observation[0:self.num_motors]
qdot = observation[self.num_motors:2 * self.num_motors]
return (np.array(q), np.array(qdot))
def GetTrueMotorVelocities(self):
"""Get the velocity of all eight motors.
Returns:
Velocities of all eight motors.
"""
motor_velocities = [state[1] for state in self._joint_states]
motor_velocities = np.multiply(motor_velocities, self._motor_direction)
return motor_velocities
def GetTrueObservation(self):
self.ReceiveObservation()
observation = []
observation.extend(self.GetTrueMotorAngles())
observation.extend(self.GetTrueMotorVelocities())
observation.extend(self.GetTrueMotorTorques())
observation.extend(self.GetTrueBaseOrientation())
observation.extend(self.GetTrueBaseRollPitchYawRate())
return observation
def ApplyAction(self, motor_commands, motor_control_mode):
"""Apply the motor commands using the motor model.
Args:
motor_commands: np.array. Can be motor angles, torques, hybrid commands
motor_control_mode: A MotorControlMode enum.
"""
motor_commands = np.asarray(motor_commands)
q, qdot = self.GetPDObservation()
qdot_true = self.GetTrueMotorVelocities()
actual_torque, observed_torque = self._motor_model.convert_to_torque(
motor_commands, q, qdot, qdot_true, motor_control_mode)
# The torque is already in the observation space because we use
# GetMotorAngles and GetMotorVelocities.
self._observed_motor_torques = observed_torque
# Transform into the motor space when applying the torque.
self._applied_motor_torque = np.multiply(actual_torque,
self._motor_direction)
motor_ids = []
motor_torques = []
for motor_id, motor_torque, motor_enabled in zip(self._motor_id_list,
self._applied_motor_torque,
self._motor_enabled_list):
if motor_enabled:
motor_ids.append(motor_id)
motor_torques.append(motor_torque)
else:
motor_ids.append(motor_id)
motor_torques.append(0)
self._SetMotorTorqueByIds(motor_ids, motor_torques)
def _SetMotorTorqueByIds(self, motor_ids, torques):
self.pybullet_client.setJointMotorControlArray(
bodyIndex=self.quadruped,
jointIndices=motor_ids,
controlMode=self.pybullet_client.TORQUE_CONTROL,
forces=torques)
def ReceiveObservation(self):
self._joint_states = self.pybullet_client.getJointStates(self.quadruped, self._motor_id_list)
def _StepInternal(self, action, motor_control_mode):
self.ApplyAction(action, motor_control_mode)
self.pybullet_client.stepSimulation()
self.ReceiveObservation()
self._state_action_counter += 1
def Step(self, action):
"""Steps simulation."""
#if self._enable_action_filter:
# action = self._FilterAction(action)
for i in range(ACTION_REPEAT):
#proc_action = self.ProcessAction(action, i)
proc_action = action
self._StepInternal(proc_action, motor_control_mode=MOTOR_CONTROL_HYBRID)
self._step_counter += 1
def _BuildJointNameToIdDict(self):
num_joints = self.pybullet_client.getNumJoints(self.quadruped)
self._joint_name_to_id = {}
for i in range(num_joints):
joint_info = self.pybullet_client.getJointInfo(self.quadruped, i)
self._joint_name_to_id[joint_info[1].decode("UTF-8")] = joint_info[0]
def _BuildUrdfIds(self):
"""Build the link Ids from its name in the URDF file.
Raises:
ValueError: Unknown category of the joint name.
"""
num_joints = self.pybullet_client.getNumJoints(self.quadruped)
self._hip_link_ids = [-1]
self._leg_link_ids = []
self._motor_link_ids = []
self._lower_link_ids = []
self._foot_link_ids = []
self._imu_link_ids = []
for i in range(num_joints):
joint_info = self.pybullet_client.getJointInfo(self.quadruped, i)
joint_name = joint_info[1].decode("UTF-8")
joint_id = self._joint_name_to_id[joint_name]
if HIP_NAME_PATTERN.match(joint_name):
self._hip_link_ids.append(joint_id)
elif UPPER_NAME_PATTERN.match(joint_name):
self._motor_link_ids.append(joint_id)
# We either treat the lower leg or the toe as the foot link, depending on
# the urdf version used.
elif LOWER_NAME_PATTERN.match(joint_name):
self._lower_link_ids.append(joint_id)
elif TOE_NAME_PATTERN.match(joint_name):
#assert self._urdf_filename == URDF_WITH_TOES
self._foot_link_ids.append(joint_id)
elif IMU_NAME_PATTERN.match(joint_name):
self._imu_link_ids.append(joint_id)
else:
raise ValueError("Unknown category of joint %s" % joint_name)
self._leg_link_ids.extend(self._lower_link_ids)
self._leg_link_ids.extend(self._foot_link_ids)
#assert len(self._foot_link_ids) == NUM_LEGS
self._hip_link_ids.sort()
self._motor_link_ids.sort()
self._lower_link_ids.sort()
self._foot_link_ids.sort()
self._leg_link_ids.sort()
return
def link_position_in_base_frame( self, link_id ):
"""Computes the link's local position in the robot frame.
Args:
robot: A robot instance.
link_id: The link to calculate its relative position.
Returns:
The relative position of the link.
"""
base_position, base_orientation = self.pybullet_client.getBasePositionAndOrientation(self.quadruped)
inverse_translation, inverse_rotation = self.pybullet_client.invertTransform(
base_position, base_orientation)
link_state = self.pybullet_client.getLinkState(self.quadruped, link_id)
link_position = link_state[0]
link_local_position, _ = self.pybullet_client.multiplyTransforms(
inverse_translation, inverse_rotation, link_position, (0, 0, 0, 1))
return np.array(link_local_position)
def GetFootLinkIDs(self):
"""Get list of IDs for all foot links."""
return self._foot_link_ids
def GetFootPositionsInBaseFrame(self):
"""Get the robot's foot position in the base frame."""
assert len(self._foot_link_ids) == self.num_legs
foot_positions = []
for foot_id in self.GetFootLinkIDs():
foot_positions.append(
self.link_position_in_base_frame(link_id=foot_id)
)
return np.array(foot_positions)
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/mpc_controller/laikago_sim.py | mpc_controller/laikago_sim.py | import re
import numpy as np
URDF_NAME = "laikago/laikago_toes_zup.urdf"
START_POS = [0, 0, 0.48]
MPC_BODY_MASS = 215/9.8
MPC_BODY_INERTIA = (0.07335, 0, 0, 0, 0.25068, 0, 0, 0, 0.25447)
MPC_BODY_HEIGHT = 0.42
MPC_VELOCITY_MULTIPLIER = 1.0
ACTION_REPEAT = 10
_IDENTITY_ORIENTATION=[0,0,0,1]
CHASSIS_NAME_PATTERN = re.compile(r"\w+_chassis_\w+")
MOTOR_NAME_PATTERN = re.compile(r"\w+_hip_motor_\w+")
KNEE_NAME_PATTERN = re.compile(r"\w+_lower_leg_\w+")
TOE_NAME_PATTERN = re.compile(r"jtoe\d*")
_DEFAULT_HIP_POSITIONS = (
(0.21, -0.1157, 0),
(0.21, 0.1157, 0),
(-0.21, -0.1157, 0),
(-0.21, 0.1157, 0),
)
_BODY_B_FIELD_NUMBER = 2
_LINK_A_FIELD_NUMBER = 3
HIP_JOINT_OFFSET = 0.0
UPPER_LEG_JOINT_OFFSET = -0.6
KNEE_JOINT_OFFSET = 0.66
LAIKAGO_DEFAULT_ABDUCTION_ANGLE = 0
LAIKAGO_DEFAULT_HIP_ANGLE = 0.67
LAIKAGO_DEFAULT_KNEE_ANGLE = -1.25
NUM_LEGS = 4
NUM_MOTORS = 12
# Bases on the readings from Laikago's default pose.
INIT_MOTOR_ANGLES = np.array([
LAIKAGO_DEFAULT_ABDUCTION_ANGLE,
LAIKAGO_DEFAULT_HIP_ANGLE,
LAIKAGO_DEFAULT_KNEE_ANGLE
] * NUM_LEGS)
MOTOR_NAMES = [
"FR_hip_motor_2_chassis_joint",
"FR_upper_leg_2_hip_motor_joint",
"FR_lower_leg_2_upper_leg_joint",
"FL_hip_motor_2_chassis_joint",
"FL_upper_leg_2_hip_motor_joint",
"FL_lower_leg_2_upper_leg_joint",
"RR_hip_motor_2_chassis_joint",
"RR_upper_leg_2_hip_motor_joint",
"RR_lower_leg_2_upper_leg_joint",
"RL_hip_motor_2_chassis_joint",
"RL_upper_leg_2_hip_motor_joint",
"RL_lower_leg_2_upper_leg_joint",
]
#Use a PD controller
MOTOR_CONTROL_POSITION = 1
# Apply motor torques directly.
MOTOR_CONTROL_TORQUE = 2
# Apply a tuple (q, qdot, kp, kd, tau) for each motor. Here q, qdot are motor
# position and velocities. kp and kd are PD gains. tau is the additional
# motor torque. This is the most flexible control mode.
MOTOR_CONTROL_HYBRID = 3
MOTOR_CONTROL_PWM = 4 #only for Minitaur
MOTOR_COMMAND_DIMENSION = 5
# These values represent the indices of each field in the motor command tuple
POSITION_INDEX = 0
POSITION_GAIN_INDEX = 1
VELOCITY_INDEX = 2
VELOCITY_GAIN_INDEX = 3
TORQUE_INDEX = 4
class LaikagoMotorModel(object):
"""A simple motor model for Laikago.
When in POSITION mode, the torque is calculated according to the difference
between current and desired joint angle, as well as the joint velocity.
For more information about PD control, please refer to:
https://en.wikipedia.org/wiki/PID_controller.
The model supports a HYBRID mode in which each motor command can be a tuple
(desired_motor_angle, position_gain, desired_motor_velocity, velocity_gain,
torque).
"""
def __init__(self,
kp,
kd,
torque_limits=None,
motor_control_mode=MOTOR_CONTROL_POSITION):
self._kp = kp
self._kd = kd
self._torque_limits = torque_limits
if torque_limits is not None:
if isinstance(torque_limits, (collections.Sequence, np.ndarray)):
self._torque_limits = np.asarray(torque_limits)
else:
self._torque_limits = np.full(NUM_MOTORS, torque_limits)
self._motor_control_mode = motor_control_mode
self._strength_ratios = np.full(NUM_MOTORS, 1)
def set_strength_ratios(self, ratios):
"""Set the strength of each motors relative to the default value.
Args:
ratios: The relative strength of motor output. A numpy array ranging from
0.0 to 1.0.
"""
self._strength_ratios = ratios
def set_motor_gains(self, kp, kd):
"""Set the gains of all motors.
These gains are PD gains for motor positional control. kp is the
proportional gain and kd is the derivative gain.
Args:
kp: proportional gain of the motors.
kd: derivative gain of the motors.
"""
self._kp = kp
self._kd = kd
def set_voltage(self, voltage):
pass
def get_voltage(self):
return 0.0
def set_viscous_damping(self, viscous_damping):
pass
def get_viscous_dampling(self):
return 0.0
def convert_to_torque(self,
motor_commands,
motor_angle,
motor_velocity,
true_motor_velocity,
motor_control_mode=None):
"""Convert the commands (position control or torque control) to torque.
Args:
motor_commands: The desired motor angle if the motor is in position
control mode. The pwm signal if the motor is in torque control mode.
motor_angle: The motor angle observed at the current time step. It is
actually the true motor angle observed a few milliseconds ago (pd
latency).
motor_velocity: The motor velocity observed at the current time step, it
is actually the true motor velocity a few milliseconds ago (pd latency).
true_motor_velocity: The true motor velocity. The true velocity is used to
compute back EMF voltage and viscous damping.
motor_control_mode: A MotorControlMode enum.
Returns:
actual_torque: The torque that needs to be applied to the motor.
observed_torque: The torque observed by the sensor.
"""
del true_motor_velocity
if not motor_control_mode:
motor_control_mode = self._motor_control_mode
# No processing for motor torques
if motor_control_mode is MOTOR_CONTROL_TORQUE:
assert len(motor_commands) == NUM_MOTORS
motor_torques = self._strength_ratios * motor_commands
return motor_torques, motor_torques
desired_motor_angles = None
desired_motor_velocities = None
kp = None
kd = None
additional_torques = np.full(NUM_MOTORS, 0)
if motor_control_mode is MOTOR_CONTROL_POSITION:
assert len(motor_commands) == NUM_MOTORS
kp = self._kp
kd = self._kd
desired_motor_angles = motor_commands
desired_motor_velocities = np.full(NUM_MOTORS, 0)
elif motor_control_mode is MOTOR_CONTROL_HYBRID:
# The input should be a 60 dimension vector
assert len(motor_commands) == MOTOR_COMMAND_DIMENSION * NUM_MOTORS
kp = motor_commands[POSITION_GAIN_INDEX::MOTOR_COMMAND_DIMENSION]
kd = motor_commands[VELOCITY_GAIN_INDEX::MOTOR_COMMAND_DIMENSION]
desired_motor_angles = motor_commands[
POSITION_INDEX::MOTOR_COMMAND_DIMENSION]
desired_motor_velocities = motor_commands[
VELOCITY_INDEX::MOTOR_COMMAND_DIMENSION]
additional_torques = motor_commands[TORQUE_INDEX::MOTOR_COMMAND_DIMENSION]
motor_torques = -1 * (kp * (motor_angle - desired_motor_angles)) - kd * (
motor_velocity - desired_motor_velocities) + additional_torques
motor_torques = self._strength_ratios * motor_torques
if self._torque_limits is not None:
if len(self._torque_limits) != len(motor_torques):
raise ValueError(
"Torque limits dimension does not match the number of motors.")
motor_torques = np.clip(motor_torques, -1.0 * self._torque_limits,
self._torque_limits)
return motor_torques, motor_torques
class SimpleRobot(object):
def __init__(self, pybullet_client, robot_uid, simulation_time_step):
self.pybullet_client = pybullet_client
self.time_step = simulation_time_step
self.quadruped = robot_uid
self.num_legs = NUM_LEGS
self.num_motors = NUM_MOTORS
self._BuildJointNameToIdDict()
self._BuildUrdfIds()
self._BuildMotorIdList()
self.ResetPose()
self._motor_enabled_list = [True] * self.num_motors
self._step_counter = 0
self._state_action_counter = 0
self._motor_offset= np.array([ 0., -0.6, 0.66, 0., -0.6, 0.66, 0., -0.6, 0.66, 0., -0.6, 0.66])
self._motor_direction= np.array([-1, 1, 1, 1, 1, 1, -1, 1, 1, 1, 1, 1])
self.ReceiveObservation()
self._kp = self.GetMotorPositionGains()
self._kd = self.GetMotorVelocityGains()
self._motor_model = LaikagoMotorModel(kp=self._kp, kd=self._kd, motor_control_mode=MOTOR_CONTROL_HYBRID)
self._SettleDownForReset(reset_time=1.0)
def ResetPose(self):
for name in self._joint_name_to_id:
joint_id = self._joint_name_to_id[name]
self.pybullet_client.setJointMotorControl2(
bodyIndex=self.quadruped,
jointIndex=(joint_id),
controlMode=self.pybullet_client.VELOCITY_CONTROL,
targetVelocity=0,
force=0)
for name, i in zip(MOTOR_NAMES, range(len(MOTOR_NAMES))):
if "hip_motor_2_chassis_joint" in name:
angle = INIT_MOTOR_ANGLES[i] + HIP_JOINT_OFFSET
elif "upper_leg_2_hip_motor_joint" in name:
angle = INIT_MOTOR_ANGLES[i] + UPPER_LEG_JOINT_OFFSET
elif "lower_leg_2_upper_leg_joint" in name:
angle = INIT_MOTOR_ANGLES[i] + KNEE_JOINT_OFFSET
else:
raise ValueError("The name %s is not recognized as a motor joint." %
name)
self.pybullet_client.resetJointState(
self.quadruped, self._joint_name_to_id[name], angle, targetVelocity=0)
def _SettleDownForReset(self, reset_time):
self.ReceiveObservation()
if reset_time <= 0:
return
for _ in range(500):
self._StepInternal(
INIT_MOTOR_ANGLES,
motor_control_mode=MOTOR_CONTROL_POSITION)
def _GetMotorNames(self):
return MOTOR_NAMES
def _BuildMotorIdList(self):
self._motor_id_list = [
self._joint_name_to_id[motor_name]
for motor_name in self._GetMotorNames()
]
def GetMotorPositionGains(self):
return [220.]*self.num_motors
def GetMotorVelocityGains(self):
return np.array([1., 2., 2., 1., 2., 2., 1., 2., 2., 1., 2., 2.])
def compute_jacobian(self, robot, link_id):
"""Computes the Jacobian matrix for the given link.
Args:
robot: A robot instance.
link_id: The link id as returned from loadURDF.
Returns:
The 3 x N transposed Jacobian matrix. where N is the total DoFs of the
robot. For a quadruped, the first 6 columns of the matrix corresponds to
the CoM translation and rotation. The columns corresponds to a leg can be
extracted with indices [6 + leg_id * 3: 6 + leg_id * 3 + 3].
"""
all_joint_angles = [state[0] for state in robot._joint_states]
zero_vec = [0] * len(all_joint_angles)
jv, _ = self.pybullet_client.calculateJacobian(robot.quadruped, link_id,
(0, 0, 0), all_joint_angles,
zero_vec, zero_vec)
jacobian = np.array(jv)
assert jacobian.shape[0] == 3
return jacobian
def ComputeJacobian(self, leg_id):
"""Compute the Jacobian for a given leg."""
# Does not work for Minitaur which has the four bar mechanism for now.
assert len(self._foot_link_ids) == self.num_legs
return self.compute_jacobian(
robot=self,
link_id=self._foot_link_ids[leg_id],
)
def MapContactForceToJointTorques(self, leg_id, contact_force):
"""Maps the foot contact force to the leg joint torques."""
jv = self.ComputeJacobian(leg_id)
all_motor_torques = np.matmul(contact_force, jv)
motor_torques = {}
motors_per_leg = self.num_motors // self.num_legs
com_dof = 6
for joint_id in range(leg_id * motors_per_leg,
(leg_id + 1) * motors_per_leg):
motor_torques[joint_id] = all_motor_torques[
com_dof + joint_id] * self._motor_direction[joint_id]
return motor_torques
def GetBaseRollPitchYaw(self):
"""Get minitaur's base orientation in euler angle in the world frame.
Returns:
A tuple (roll, pitch, yaw) of the base in world frame.
"""
orientation = self.GetTrueBaseOrientation()
roll_pitch_yaw = self.pybullet_client.getEulerFromQuaternion(orientation)
return np.asarray(roll_pitch_yaw)
def joint_angles_from_link_position(
self,
robot,
link_position,
link_id,
joint_ids,
position_in_world_frame,
base_translation = (0, 0, 0),
base_rotation = (0, 0, 0, 1)):
"""Uses Inverse Kinematics to calculate joint angles.
Args:
robot: A robot instance.
link_position: The (x, y, z) of the link in the body or the world frame,
depending on whether the argument position_in_world_frame is true.
link_id: The link id as returned from loadURDF.
joint_ids: The positional index of the joints. This can be different from
the joint unique ids.
position_in_world_frame: Whether the input link_position is specified
in the world frame or the robot's base frame.
base_translation: Additional base translation.
base_rotation: Additional base rotation.
Returns:
A list of joint angles.
"""
if not position_in_world_frame:
# Projects to local frame.
base_position, base_orientation = self.pybullet_client.getBasePositionAndOrientation(self.quadruped)#robot.GetBasePosition(), robot.GetBaseOrientation()
base_position, base_orientation = robot.pybullet_client.multiplyTransforms(
base_position, base_orientation, base_translation, base_rotation)
# Projects to world space.
world_link_pos, _ = robot.pybullet_client.multiplyTransforms(
base_position, base_orientation, link_position, _IDENTITY_ORIENTATION)
else:
world_link_pos = link_position
ik_solver = 0
all_joint_angles = robot.pybullet_client.calculateInverseKinematics(
robot.quadruped, link_id, world_link_pos, solver=ik_solver)
# Extract the relevant joint angles.
joint_angles = [all_joint_angles[i] for i in joint_ids]
return joint_angles
def ComputeMotorAnglesFromFootLocalPosition(self, leg_id,
foot_local_position):
"""Use IK to compute the motor angles, given the foot link's local position.
Args:
leg_id: The leg index.
foot_local_position: The foot link's position in the base frame.
Returns:
A tuple. The position indices and the angles for all joints along the
leg. The position indices is consistent with the joint orders as returned
by GetMotorAngles API.
"""
return self._EndEffectorIK(
leg_id, foot_local_position, position_in_world_frame=False)
def _EndEffectorIK(self, leg_id, position, position_in_world_frame):
"""Calculate the joint positions from the end effector position."""
assert len(self._foot_link_ids) == self.num_legs
toe_id = self._foot_link_ids[leg_id]
motors_per_leg = self.num_motors // self.num_legs
joint_position_idxs = [
i for i in range(leg_id * motors_per_leg, leg_id * motors_per_leg +
motors_per_leg)
]
joint_angles = self.joint_angles_from_link_position(
robot=self,
link_position=position,
link_id=toe_id,
joint_ids=joint_position_idxs,
position_in_world_frame=position_in_world_frame)
# Joint offset is necessary for Laikago.
joint_angles = np.multiply(
np.asarray(joint_angles) -
np.asarray(self._motor_offset)[joint_position_idxs],
self._motor_direction[joint_position_idxs])
# Return the joing index (the same as when calling GetMotorAngles) as well
# as the angles.
return joint_position_idxs, joint_angles.tolist()
def GetTimeSinceReset(self):
return self._step_counter * self.time_step
def GetHipPositionsInBaseFrame(self):
return _DEFAULT_HIP_POSITIONS
def GetBaseVelocity(self):
"""Get the linear velocity of minitaur's base.
Returns:
The velocity of minitaur's base.
"""
velocity, _ = self.pybullet_client.getBaseVelocity(self.quadruped)
return velocity
def GetTrueBaseOrientation(self):
pos,orn = self.pybullet_client.getBasePositionAndOrientation(
self.quadruped)
return orn
def TransformAngularVelocityToLocalFrame(self, angular_velocity, orientation):
"""Transform the angular velocity from world frame to robot's frame.
Args:
angular_velocity: Angular velocity of the robot in world frame.
orientation: Orientation of the robot represented as a quaternion.
Returns:
angular velocity of based on the given orientation.
"""
# Treat angular velocity as a position vector, then transform based on the
# orientation given by dividing (or multiplying with inverse).
# Get inverse quaternion assuming the vector is at 0,0,0 origin.
_, orientation_inversed = self.pybullet_client.invertTransform([0, 0, 0],
orientation)
# Transform the angular_velocity at neutral orientation using a neutral
# translation and reverse of the given orientation.
relative_velocity, _ = self.pybullet_client.multiplyTransforms(
[0, 0, 0], orientation_inversed, angular_velocity,
self.pybullet_client.getQuaternionFromEuler([0, 0, 0]))
return np.asarray(relative_velocity)
def GetBaseRollPitchYawRate(self):
"""Get the rate of orientation change of the minitaur's base in euler angle.
Returns:
rate of (roll, pitch, yaw) change of the minitaur's base.
"""
angular_velocity = self.pybullet_client.getBaseVelocity(self.quadruped)[1]
orientation = self.GetTrueBaseOrientation()
return self.TransformAngularVelocityToLocalFrame(angular_velocity,
orientation)
def GetFootContacts(self):
all_contacts = self.pybullet_client.getContactPoints(bodyA=self.quadruped)
contacts = [False, False, False, False]
for contact in all_contacts:
# Ignore self contacts
if contact[_BODY_B_FIELD_NUMBER] == self.quadruped:
continue
try:
toe_link_index = self._foot_link_ids.index(
contact[_LINK_A_FIELD_NUMBER])
contacts[toe_link_index] = True
except ValueError:
continue
return contacts
def GetTrueMotorAngles(self):
"""Gets the eight motor angles at the current moment, mapped to [-pi, pi].
Returns:
Motor angles, mapped to [-pi, pi].
"""
self.ReceiveObservation()
motor_angles = [state[0] for state in self._joint_states]
motor_angles = np.multiply(
np.asarray(motor_angles) - np.asarray(self._motor_offset),
self._motor_direction)
return motor_angles
def GetPDObservation(self):
self.ReceiveObservation()
observation = []
observation.extend(self.GetTrueMotorAngles())
observation.extend(self.GetTrueMotorVelocities())
q = observation[0:self.num_motors]
qdot = observation[self.num_motors:2 * self.num_motors]
return (np.array(q), np.array(qdot))
def GetTrueMotorVelocities(self):
"""Get the velocity of all eight motors.
Returns:
Velocities of all eight motors.
"""
motor_velocities = [state[1] for state in self._joint_states]
motor_velocities = np.multiply(motor_velocities, self._motor_direction)
return motor_velocities
def GetTrueObservation(self):
self.ReceiveObservation()
observation = []
observation.extend(self.GetTrueMotorAngles())
observation.extend(self.GetTrueMotorVelocities())
observation.extend(self.GetTrueMotorTorques())
observation.extend(self.GetTrueBaseOrientation())
observation.extend(self.GetTrueBaseRollPitchYawRate())
return observation
def ApplyAction(self, motor_commands, motor_control_mode):
"""Apply the motor commands using the motor model.
Args:
motor_commands: np.array. Can be motor angles, torques, hybrid commands
motor_control_mode: A MotorControlMode enum.
"""
motor_commands = np.asarray(motor_commands)
q, qdot = self.GetPDObservation()
qdot_true = self.GetTrueMotorVelocities()
actual_torque, observed_torque = self._motor_model.convert_to_torque(
motor_commands, q, qdot, qdot_true, motor_control_mode)
# The torque is already in the observation space because we use
# GetMotorAngles and GetMotorVelocities.
self._observed_motor_torques = observed_torque
# Transform into the motor space when applying the torque.
self._applied_motor_torque = np.multiply(actual_torque,
self._motor_direction)
motor_ids = []
motor_torques = []
for motor_id, motor_torque, motor_enabled in zip(self._motor_id_list,
self._applied_motor_torque,
self._motor_enabled_list):
if motor_enabled:
motor_ids.append(motor_id)
motor_torques.append(motor_torque)
else:
motor_ids.append(motor_id)
motor_torques.append(0)
self._SetMotorTorqueByIds(motor_ids, motor_torques)
def _SetMotorTorqueByIds(self, motor_ids, torques):
self.pybullet_client.setJointMotorControlArray(
bodyIndex=self.quadruped,
jointIndices=motor_ids,
controlMode=self.pybullet_client.TORQUE_CONTROL,
forces=torques)
def ReceiveObservation(self):
self._joint_states = self.pybullet_client.getJointStates(self.quadruped, self._motor_id_list)
def _StepInternal(self, action, motor_control_mode):
self.ApplyAction(action, motor_control_mode)
self.pybullet_client.stepSimulation()
self.ReceiveObservation()
self._state_action_counter += 1
def Step(self, action):
"""Steps simulation."""
#if self._enable_action_filter:
# action = self._FilterAction(action)
for i in range(ACTION_REPEAT):
#proc_action = self.ProcessAction(action, i)
proc_action = action
self._StepInternal(proc_action, motor_control_mode=MOTOR_CONTROL_HYBRID)
self._step_counter += 1
def _BuildJointNameToIdDict(self):
num_joints = self.pybullet_client.getNumJoints(self.quadruped)
self._joint_name_to_id = {}
for i in range(num_joints):
joint_info = self.pybullet_client.getJointInfo(self.quadruped, i)
self._joint_name_to_id[joint_info[1].decode("UTF-8")] = joint_info[0]
def _BuildUrdfIds(self):
"""Build the link Ids from its name in the URDF file.
Raises:
ValueError: Unknown category of the joint name.
"""
num_joints = self.pybullet_client.getNumJoints(self.quadruped)
self._chassis_link_ids = [-1]
self._leg_link_ids = []
self._motor_link_ids = []
self._knee_link_ids = []
self._foot_link_ids = []
for i in range(num_joints):
joint_info = self.pybullet_client.getJointInfo(self.quadruped, i)
joint_name = joint_info[1].decode("UTF-8")
joint_id = self._joint_name_to_id[joint_name]
if CHASSIS_NAME_PATTERN.match(joint_name):
self._chassis_link_ids.append(joint_id)
elif MOTOR_NAME_PATTERN.match(joint_name):
self._motor_link_ids.append(joint_id)
# We either treat the lower leg or the toe as the foot link, depending on
# the urdf version used.
elif KNEE_NAME_PATTERN.match(joint_name):
self._knee_link_ids.append(joint_id)
elif TOE_NAME_PATTERN.match(joint_name):
#assert self._urdf_filename == URDF_WITH_TOES
self._foot_link_ids.append(joint_id)
else:
raise ValueError("Unknown category of joint %s" % joint_name)
self._leg_link_ids.extend(self._knee_link_ids)
self._leg_link_ids.extend(self._foot_link_ids)
#assert len(self._foot_link_ids) == NUM_LEGS
self._chassis_link_ids.sort()
self._motor_link_ids.sort()
self._knee_link_ids.sort()
self._foot_link_ids.sort()
self._leg_link_ids.sort()
return
def link_position_in_base_frame( self, link_id ):
"""Computes the link's local position in the robot frame.
Args:
robot: A robot instance.
link_id: The link to calculate its relative position.
Returns:
The relative position of the link.
"""
base_position, base_orientation = self.pybullet_client.getBasePositionAndOrientation(self.quadruped)
inverse_translation, inverse_rotation = self.pybullet_client.invertTransform(
base_position, base_orientation)
link_state = self.pybullet_client.getLinkState(self.quadruped, link_id)
link_position = link_state[0]
link_local_position, _ = self.pybullet_client.multiplyTransforms(
inverse_translation, inverse_rotation, link_position, (0, 0, 0, 1))
return np.array(link_local_position)
def GetFootLinkIDs(self):
"""Get list of IDs for all foot links."""
return self._foot_link_ids
def GetFootPositionsInBaseFrame(self):
"""Get the robot's foot position in the base frame."""
assert len(self._foot_link_ids) == self.num_legs
foot_positions = []
for foot_id in self.GetFootLinkIDs():
foot_positions.append(
self.link_position_in_base_frame(link_id=foot_id)
)
return np.array(foot_positions)
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/mpc_controller/openloop_gait_generator.py | mpc_controller/openloop_gait_generator.py | """Gait pattern planning module."""
from __future__ import absolute_import
from __future__ import division
#from __future__ import google_type_annotations
from __future__ import print_function
import os
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(os.path.dirname(currentdir))
os.sys.path.insert(0, parentdir)
import logging
import math
import numpy as np
from typing import Any, Sequence
from mpc_controller import gait_generator
LAIKAGO_TROTTING = (
gait_generator.LegState.SWING,
gait_generator.LegState.STANCE,
gait_generator.LegState.STANCE,
gait_generator.LegState.SWING,
)
_NOMINAL_STANCE_DURATION = (0.3, 0.3, 0.3, 0.3)
_NOMINAL_DUTY_FACTOR = (0.5, 0.5, 0.5, 0.5)
_NOMINAL_CONTACT_DETECTION_PHASE = 0.1
class OpenloopGaitGenerator(gait_generator.GaitGenerator):
"""Generates openloop gaits for quadruped robots.
A flexible open-loop gait generator. Each leg has its own cycle and duty
factor. And the state of each leg alternates between stance and swing. One can
easily formuate a set of common quadruped gaits like trotting, pacing,
pronking, bounding, etc by tweaking the input parameters.
"""
def __init__(
self,
robot: Any,
stance_duration: Sequence[float] = _NOMINAL_STANCE_DURATION,
duty_factor: Sequence[float] = _NOMINAL_DUTY_FACTOR,
initial_leg_state: Sequence[gait_generator.LegState] = LAIKAGO_TROTTING,
initial_leg_phase: Sequence[float] = (0, 0, 0, 0),
contact_detection_phase_threshold:
float = _NOMINAL_CONTACT_DETECTION_PHASE,
):
"""Initializes the class.
Args:
robot: A quadruped robot that at least implements the GetFootContacts API
and num_legs property.
stance_duration: The desired stance duration.
duty_factor: The ratio stance_duration / total_gait_cycle.
initial_leg_state: The desired initial swing/stance state of legs indexed
by their id.
initial_leg_phase: The desired initial phase [0, 1] of the legs within the
full swing + stance cycle.
contact_detection_phase_threshold: Updates the state of each leg based on
contact info, when the current normalized phase is greater than this
threshold. This is essential to remove false positives in contact
detection when phase switches. For example, a swing foot at at the
beginning of the gait cycle might be still on the ground.
"""
self._robot = robot
self._stance_duration = stance_duration
self._duty_factor = duty_factor
self._swing_duration = np.array(stance_duration) / np.array(
duty_factor) - np.array(stance_duration)
if len(initial_leg_phase) != self._robot.num_legs:
raise ValueError(
"The number of leg phases should be the same as number of legs.")
self._initial_leg_phase = initial_leg_phase
if len(initial_leg_state) != self._robot.num_legs:
raise ValueError(
"The number of leg states should be the same of number of legs.")
self._initial_leg_state = initial_leg_state
self._next_leg_state = []
# The ratio in cycle is duty factor if initial state of the leg is STANCE,
# and 1 - duty_factory if the initial state of the leg is SWING.
self._initial_state_ratio_in_cycle = []
for state, duty in zip(initial_leg_state, duty_factor):
if state == gait_generator.LegState.SWING:
self._initial_state_ratio_in_cycle.append(1 - duty)
self._next_leg_state.append(gait_generator.LegState.STANCE)
else:
self._initial_state_ratio_in_cycle.append(duty)
self._next_leg_state.append(gait_generator.LegState.SWING)
self._contact_detection_phase_threshold = contact_detection_phase_threshold
# The normalized phase within swing or stance duration.
self._normalized_phase = None
self._leg_state = None
self._desired_leg_state = None
self.reset(0)
def reset(self, current_time):
# The normalized phase within swing or stance duration.
self._normalized_phase = np.zeros(self._robot.num_legs)
self._leg_state = list(self._initial_leg_state)
self._desired_leg_state = list(self._initial_leg_state)
@property
def desired_leg_state(self) -> Sequence[gait_generator.LegState]:
"""The desired leg SWING/STANCE states.
Returns:
The SWING/STANCE states for all legs.
"""
return self._desired_leg_state
@property
def leg_state(self) -> Sequence[gait_generator.LegState]:
"""The leg state after considering contact with ground.
Returns:
The actual state of each leg after accounting for contacts.
"""
return self._leg_state
@property
def swing_duration(self) -> Sequence[float]:
return self._swing_duration
@property
def stance_duration(self) -> Sequence[float]:
return self._stance_duration
@property
def normalized_phase(self) -> Sequence[float]:
"""The phase within the current swing or stance cycle.
Reflects the leg's phase within the curren swing or stance stage. For
example, at the end of the current swing duration, the phase will
be set to 1 for all swing legs. Same for stance legs.
Returns:
Normalized leg phase for all legs.
"""
return self._normalized_phase
def update(self, current_time):
contact_state = self._robot.GetFootContacts()
for leg_id in range(self._robot.num_legs):
# Here is the explanation behind this logic: We use the phase within the
# full swing/stance cycle to determine if a swing/stance switch occurs
# for a leg. The threshold value is the "initial_state_ratio_in_cycle" as
# explained before. If the current phase is less than the initial state
# ratio, the leg is either in the initial state or has switched back after
# one or more full cycles.
full_cycle_period = (self._stance_duration[leg_id] /
self._duty_factor[leg_id])
# To account for the non-zero initial phase, we offset the time duration
# with the effect time contribution from the initial leg phase.
augmented_time = current_time + self._initial_leg_phase[
leg_id] * full_cycle_period
phase_in_full_cycle = math.fmod(augmented_time,
full_cycle_period) / full_cycle_period
ratio = self._initial_state_ratio_in_cycle[leg_id]
if phase_in_full_cycle < ratio:
self._desired_leg_state[leg_id] = self._initial_leg_state[leg_id]
self._normalized_phase[leg_id] = phase_in_full_cycle / ratio
else:
# A phase switch happens for this leg.
self._desired_leg_state[leg_id] = self._next_leg_state[leg_id]
self._normalized_phase[leg_id] = (phase_in_full_cycle -
ratio) / (1 - ratio)
self._leg_state[leg_id] = self._desired_leg_state[leg_id]
# No contact detection at the beginning of each SWING/STANCE phase.
if (self._normalized_phase[leg_id] <
self._contact_detection_phase_threshold):
continue
if (self._leg_state[leg_id] == gait_generator.LegState.SWING
and contact_state[leg_id]):
logging.info("early touch down detected.")
self._leg_state[leg_id] = gait_generator.LegState.EARLY_CONTACT
if (self._leg_state[leg_id] == gait_generator.LegState.STANCE
and not contact_state[leg_id]):
logging.info("lost contact detected.")
self._leg_state[leg_id] = gait_generator.LegState.LOSE_CONTACT
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/mpc_controller/torque_stance_leg_controller.py | mpc_controller/torque_stance_leg_controller.py | # Lint as: python3
"""A torque based stance controller framework."""
from __future__ import absolute_import
from __future__ import division
#from __future__ import google_type_annotations
from __future__ import print_function
import os
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(os.path.dirname(currentdir))
os.sys.path.insert(0, parentdir)
from typing import Any, Sequence, Tuple
import numpy as np
import pybullet as p # pytype: disable=import-error
try:
from mpc_controller import gait_generator as gait_generator_lib
from mpc_controller import leg_controller
except: #pylint: disable=W0702
print("You need to install motion_imitation")
print("Either run python3 setup.py install --user in this repo")
print("or use pip3 install motion_imitation --user")
sys.exit()
try:
import mpc_osqp as convex_mpc # pytype: disable=import-error
except: #pylint: disable=W0702
print("You need to install motion_imitation")
print("Either run python3 setup.py install --user in this repo")
print("or use pip3 install motion_imitation --user")
sys.exit()
_FORCE_DIMENSION = 3
# The QP weights in the convex MPC formulation. See the MIT paper for details:
# https://ieeexplore.ieee.org/document/8594448/
# Intuitively, this is the weights of each state dimension when tracking a
# desired CoM trajectory. The full CoM state is represented by
# (roll_pitch_yaw, position, angular_velocity, velocity, gravity_place_holder).
# _MPC_WEIGHTS = (5, 5, 0.2, 0, 0, 10, 0.5, 0.5, 0.2, 0.2, 0.2, 0.1, 0)
# This worked well for in-place stepping in the real robot.
# _MPC_WEIGHTS = (5, 5, 0.2, 0, 0, 10, 0., 0., 0.2, 1., 1., 0., 0)
_MPC_WEIGHTS = (5, 5, 0.2, 0, 0, 10, 0., 0., 1., 1., 1., 0., 0)
_PLANNING_HORIZON_STEPS = 10
_PLANNING_TIMESTEP = 0.025
class TorqueStanceLegController(leg_controller.LegController):
"""A torque based stance leg controller framework.
Takes in high level parameters like walking speed and turning speed, and
generates necessary the torques for stance legs.
"""
def __init__(
self,
robot: Any,
gait_generator: Any,
state_estimator: Any,
desired_speed: Tuple[float, float] = (0, 0),
desired_twisting_speed: float = 0,
desired_body_height: float = 0.45,
body_mass: float = 220 / 9.8,
body_inertia: Tuple[float, float, float, float, float, float, float,
float, float] = (0.07335, 0, 0, 0, 0.25068, 0, 0, 0,
0.25447),
num_legs: int = 4,
friction_coeffs: Sequence[float] = (0.45, 0.45, 0.45, 0.45),
):
"""Initializes the class.
Tracks the desired position/velocity of the robot by computing proper joint
torques using MPC module.
Args:
robot: A robot instance.
gait_generator: Used to query the locomotion phase and leg states.
state_estimator: Estimate the robot states (e.g. CoM velocity).
desired_speed: desired CoM speed in x-y plane.
desired_twisting_speed: desired CoM rotating speed in z direction.
desired_body_height: The standing height of the robot.
body_mass: The total mass of the robot.
body_inertia: The inertia matrix in the body principle frame. We assume
the body principle coordinate frame has x-forward and z-up.
num_legs: The number of legs used for force planning.
friction_coeffs: The friction coeffs on the contact surfaces.
"""
self._robot = robot
self._gait_generator = gait_generator
self._state_estimator = state_estimator
self.desired_speed = desired_speed
self.desired_twisting_speed = desired_twisting_speed
self._desired_body_height = desired_body_height
self._body_mass = body_mass
self._num_legs = num_legs
self._friction_coeffs = np.array(friction_coeffs)
body_inertia_list = list(body_inertia)
weights_list = list(_MPC_WEIGHTS)
self._cpp_mpc = convex_mpc.ConvexMpc(
body_mass,
body_inertia_list,
self._num_legs,
_PLANNING_HORIZON_STEPS,
_PLANNING_TIMESTEP,
weights_list,
)
def reset(self, current_time):
del current_time
def update(self, current_time):
del current_time
def get_action(self):
"""Computes the torque for stance legs."""
desired_com_position = np.array((0., 0., self._desired_body_height),
dtype=np.float64)
desired_com_velocity = np.array(
(self.desired_speed[0], self.desired_speed[1], 0.), dtype=np.float64)
desired_com_roll_pitch_yaw = np.array((0., 0., 0.), dtype=np.float64)
desired_com_angular_velocity = np.array(
(0., 0., self.desired_twisting_speed), dtype=np.float64)
foot_contact_state = np.array(
[(leg_state in (gait_generator_lib.LegState.STANCE,
gait_generator_lib.LegState.EARLY_CONTACT))
for leg_state in self._gait_generator.desired_leg_state],
dtype=np.int32)
# We use the body yaw aligned world frame for MPC computation.
com_roll_pitch_yaw = np.array(self._robot.GetBaseRollPitchYaw(),
dtype=np.float64)
com_roll_pitch_yaw[2] = 0
#predicted_contact_forces=[0]*self._num_legs*_FORCE_DIMENSION
# print("Com Vel: {}".format(self._state_estimator.com_velocity_body_frame))
# print("Com RPY: {}".format(self._robot.GetBaseRollPitchYawRate()))
# print("Com RPY Rate: {}".format(self._robot.GetBaseRollPitchYawRate()))
p.submitProfileTiming("predicted_contact_forces")
predicted_contact_forces = self._cpp_mpc.compute_contact_forces(
[0], #com_position
np.asarray(self._state_estimator.com_velocity_body_frame,
dtype=np.float64), #com_velocity
np.array(com_roll_pitch_yaw, dtype=np.float64), #com_roll_pitch_yaw
# Angular velocity in the yaw aligned world frame is actually different
# from rpy rate. We use it here as a simple approximation.
np.asarray(self._robot.GetBaseRollPitchYawRate(),
dtype=np.float64), #com_angular_velocity
foot_contact_state, #foot_contact_states
np.array(self._robot.GetFootPositionsInBaseFrame().flatten(),
dtype=np.float64), #foot_positions_base_frame
self._friction_coeffs, #foot_friction_coeffs
desired_com_position, #desired_com_position
desired_com_velocity, #desired_com_velocity
desired_com_roll_pitch_yaw, #desired_com_roll_pitch_yaw
desired_com_angular_velocity #desired_com_angular_velocity
)
p.submitProfileTiming()
# sol = np.array(predicted_contact_forces).reshape((-1, 12))
# x_dim = np.array([0, 3, 6, 9])
# y_dim = x_dim + 1
# z_dim = y_dim + 1
# print("Y_forces: {}".format(sol[:, y_dim]))
contact_forces = {}
for i in range(self._num_legs):
contact_forces[i] = np.array(
predicted_contact_forces[i * _FORCE_DIMENSION:(i + 1) *
_FORCE_DIMENSION])
action = {}
for leg_id, force in contact_forces.items():
# While "Lose Contact" is useful in simulation, in real environment it's
# susceptible to sensor noise. Disabling for now.
# if self._gait_generator.leg_state[
# leg_id] == gait_generator_lib.LegState.LOSE_CONTACT:
# force = (0, 0, 0)
motor_torques = self._robot.MapContactForceToJointTorques(leg_id, force)
for joint_id, torque in motor_torques.items():
action[joint_id] = (0, 0, 0, 0, torque)
return action, contact_forces
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/mpc_controller/raibert_swing_leg_controller.py | mpc_controller/raibert_swing_leg_controller.py | """The swing leg controller class."""
from __future__ import absolute_import
from __future__ import division
#from __future__ import google_type_annotations
from __future__ import print_function
import os
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(os.path.dirname(currentdir))
os.sys.path.insert(0, parentdir)
import copy
import math
import numpy as np
from typing import Any, Mapping, Sequence, Tuple
from mpc_controller import gait_generator as gait_generator_lib
from mpc_controller import leg_controller
# The position correction coefficients in Raibert's formula.
_KP = np.array([0.01, 0.01, 0.01]) * 3
# At the end of swing, we leave a small clearance to prevent unexpected foot
# collision.
_FOOT_CLEARANCE_M = 0.01
def _gen_parabola(phase: float, start: float, mid: float, end: float) -> float:
"""Gets a point on a parabola y = a x^2 + b x + c.
The Parabola is determined by three points (0, start), (0.5, mid), (1, end) in
the plane.
Args:
phase: Normalized to [0, 1]. A point on the x-axis of the parabola.
start: The y value at x == 0.
mid: The y value at x == 0.5.
end: The y value at x == 1.
Returns:
The y value at x == phase.
"""
mid_phase = 0.5
delta_1 = mid - start
delta_2 = end - start
delta_3 = mid_phase**2 - mid_phase
coef_a = (delta_1 - delta_2 * mid_phase) / delta_3
coef_b = (delta_2 * mid_phase**2 - delta_1) / delta_3
coef_c = start
return coef_a * phase**2 + coef_b * phase + coef_c
def _gen_swing_foot_trajectory(input_phase: float, start_pos: Sequence[float],
end_pos: Sequence[float]) -> Tuple[float]:
"""Generates the swing trajectory using a parabola.
Args:
input_phase: the swing/stance phase value between [0, 1].
start_pos: The foot's position at the beginning of swing cycle.
end_pos: The foot's desired position at the end of swing cycle.
Returns:
The desired foot position at the current phase.
"""
# We augment the swing speed using the below formula. For the first half of
# the swing cycle, the swing leg moves faster and finishes 80% of the full
# swing trajectory. The rest 20% of trajectory takes another half swing
# cycle. Intuitely, we want to move the swing foot quickly to the target
# landing location and stay above the ground, in this way the control is more
# robust to perturbations to the body that may cause the swing foot to drop
# onto the ground earlier than expected. This is a common practice similar
# to the MIT cheetah and Marc Raibert's original controllers.
phase = input_phase
if input_phase <= 0.5:
phase = 0.8 * math.sin(input_phase * math.pi)
else:
phase = 0.8 + (input_phase - 0.5) * 0.4
x = (1 - phase) * start_pos[0] + phase * end_pos[0]
y = (1 - phase) * start_pos[1] + phase * end_pos[1]
max_clearance = 0.1
mid = max(end_pos[2], start_pos[2]) + max_clearance
z = _gen_parabola(phase, start_pos[2], mid, end_pos[2])
# PyType detects the wrong return type here.
return (x, y, z) # pytype: disable=bad-return-type
class RaibertSwingLegController(leg_controller.LegController):
"""Controls the swing leg position using Raibert's formula.
For details, please refer to chapter 2 in "Legged robbots that balance" by
Marc Raibert. The key idea is to stablize the swing foot's location based on
the CoM moving speed.
"""
def __init__(
self,
robot: Any,
gait_generator: Any,
state_estimator: Any,
desired_speed: Tuple[float, float],
desired_twisting_speed: float,
desired_height: float,
foot_clearance: float,
):
"""Initializes the class.
Args:
robot: A robot instance.
gait_generator: Generates the stance/swing pattern.
state_estimator: Estiamtes the CoM speeds.
desired_speed: Behavior parameters. X-Y speed.
desired_twisting_speed: Behavior control parameters.
desired_height: Desired standing height.
foot_clearance: The foot clearance on the ground at the end of the swing
cycle.
"""
self._robot = robot
self._state_estimator = state_estimator
self._gait_generator = gait_generator
self._last_leg_state = gait_generator.desired_leg_state
self.desired_speed = np.array((desired_speed[0], desired_speed[1], 0))
self.desired_twisting_speed = desired_twisting_speed
self._desired_height = np.array((0, 0, desired_height - foot_clearance))
self._joint_angles = None
self._phase_switch_foot_local_position = None
self.reset(0)
def reset(self, current_time: float) -> None:
"""Called during the start of a swing cycle.
Args:
current_time: The wall time in seconds.
"""
del current_time
self._last_leg_state = self._gait_generator.desired_leg_state
self._phase_switch_foot_local_position = (
self._robot.GetFootPositionsInBaseFrame())
self._joint_angles = {}
def update(self, current_time: float) -> None:
"""Called at each control step.
Args:
current_time: The wall time in seconds.
"""
del current_time
new_leg_state = self._gait_generator.desired_leg_state
# Detects phase switch for each leg so we can remember the feet position at
# the beginning of the swing phase.
for leg_id, state in enumerate(new_leg_state):
if (state == gait_generator_lib.LegState.SWING
and state != self._last_leg_state[leg_id]):
self._phase_switch_foot_local_position[leg_id] = (
self._robot.GetFootPositionsInBaseFrame()[leg_id])
self._last_leg_state = copy.deepcopy(new_leg_state)
def get_action(self) -> Mapping[Any, Any]:
com_velocity = self._state_estimator.com_velocity_body_frame
com_velocity = np.array((com_velocity[0], com_velocity[1], 0))
_, _, yaw_dot = self._robot.GetBaseRollPitchYawRate()
hip_positions = self._robot.GetHipPositionsInBaseFrame()
for leg_id, leg_state in enumerate(self._gait_generator.leg_state):
if leg_state in (gait_generator_lib.LegState.STANCE,
gait_generator_lib.LegState.EARLY_CONTACT):
continue
# For now we did not consider the body pitch/roll and all calculation is
# in the body frame. TODO(b/143378213): Calculate the foot_target_position
# in world frame and then project back to calculate the joint angles.
hip_offset = hip_positions[leg_id]
twisting_vector = np.array((-hip_offset[1], hip_offset[0], 0))
hip_horizontal_velocity = com_velocity + yaw_dot * twisting_vector
# print("Leg: {}, ComVel: {}, Yaw_dot: {}".format(leg_id, com_velocity,
# yaw_dot))
# print(hip_horizontal_velocity)
target_hip_horizontal_velocity = (
self.desired_speed + self.desired_twisting_speed * twisting_vector)
foot_target_position = (
hip_horizontal_velocity *
self._gait_generator.stance_duration[leg_id] / 2 - _KP *
(target_hip_horizontal_velocity - hip_horizontal_velocity)
) - self._desired_height + np.array((hip_offset[0], hip_offset[1], 0))
foot_position = _gen_swing_foot_trajectory(
self._gait_generator.normalized_phase[leg_id],
self._phase_switch_foot_local_position[leg_id], foot_target_position)
joint_ids, joint_angles = (
self._robot.ComputeMotorAnglesFromFootLocalPosition(
leg_id, foot_position))
# Update the stored joint angles as needed.
for joint_id, joint_angle in zip(joint_ids, joint_angles):
self._joint_angles[joint_id] = (joint_angle, leg_id)
action = {}
kps = self._robot.GetMotorPositionGains()
kds = self._robot.GetMotorVelocityGains()
for joint_id, joint_angle_leg_id in self._joint_angles.items():
leg_id = joint_angle_leg_id[1]
if self._gait_generator.desired_leg_state[
leg_id] == gait_generator_lib.LegState.SWING:
# This is a hybrid action for PD control.
action[joint_id] = (joint_angle_leg_id[0], kps[joint_id], 0,
kds[joint_id], 0)
return action
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/mpc_controller/locomotion_controller_example.py | mpc_controller/locomotion_controller_example.py |
from __future__ import absolute_import
from __future__ import division
#from __future__ import google_type_annotations
from __future__ import print_function
import os
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
os.sys.path.insert(0, parentdir)
from absl import app
from absl import flags
import scipy.interpolate
import numpy as np
import pybullet_data as pd
from pybullet_utils import bullet_client
import time
import pybullet
import random
from mpc_controller import com_velocity_estimator
from mpc_controller import gait_generator as gait_generator_lib
from mpc_controller import locomotion_controller
from mpc_controller import openloop_gait_generator
from mpc_controller import raibert_swing_leg_controller
from mpc_controller import torque_stance_leg_controller
#uncomment the robot of choice
#from mpc_controller import laikago_sim as robot_sim
from mpc_controller import a1_sim as robot_sim
FLAGS = flags.FLAGS
_NUM_SIMULATION_ITERATION_STEPS = 300
_STANCE_DURATION_SECONDS = [
0.3
] * 4 # For faster trotting (v > 1.5 ms reduce this to 0.13s).
# Standing
# _DUTY_FACTOR = [1.] * 4
# _INIT_PHASE_FULL_CYCLE = [0., 0., 0., 0.]
# _MAX_TIME_SECONDS = 5
# _INIT_LEG_STATE = (
# gait_generator_lib.LegState.STANCE,
# gait_generator_lib.LegState.STANCE,
# gait_generator_lib.LegState.STANCE,
# gait_generator_lib.LegState.S,
# )
# Tripod
# _DUTY_FACTOR = [.8] * 4
# _INIT_PHASE_FULL_CYCLE = [0., 0.25, 0.5, 0.]
# _MAX_TIME_SECONDS = 5
# _INIT_LEG_STATE = (
# gait_generator_lib.LegState.STANCE,
# gait_generator_lib.LegState.STANCE,
# gait_generator_lib.LegState.STANCE,
# gait_generator_lib.LegState.SWING,
# )
# Trotting
_DUTY_FACTOR = [0.6] * 4
_INIT_PHASE_FULL_CYCLE = [0.9, 0, 0, 0.9]
_MAX_TIME_SECONDS = 50
_INIT_LEG_STATE = (
gait_generator_lib.LegState.SWING,
gait_generator_lib.LegState.STANCE,
gait_generator_lib.LegState.STANCE,
gait_generator_lib.LegState.SWING,
)
def _generate_example_linear_angular_speed(t):
"""Creates an example speed profile based on time for demo purpose."""
vx = 0.6 * robot_sim.MPC_VELOCITY_MULTIPLIER
vy = 0.2 * robot_sim.MPC_VELOCITY_MULTIPLIER
wz = 0.8 * robot_sim.MPC_VELOCITY_MULTIPLIER
time_points = (0, 5, 10, 15, 20, 25,30)
speed_points = ((0, 0, 0, 0), (0, 0, 0, wz), (vx, 0, 0, 0), (0, 0, 0, -wz), (0, -vy, 0, 0),
(0, 0, 0, 0), (0, 0, 0, wz))
speed = scipy.interpolate.interp1d(
time_points,
speed_points,
kind="previous",
fill_value="extrapolate",
axis=0)(
t)
return speed[0:3], speed[3]
def _setup_controller(robot):
"""Demonstrates how to create a locomotion controller."""
desired_speed = (0, 0)
desired_twisting_speed = 0
gait_generator = openloop_gait_generator.OpenloopGaitGenerator(
robot,
stance_duration=_STANCE_DURATION_SECONDS,
duty_factor=_DUTY_FACTOR,
initial_leg_phase=_INIT_PHASE_FULL_CYCLE,
initial_leg_state=_INIT_LEG_STATE)
state_estimator = com_velocity_estimator.COMVelocityEstimator(robot,
window_size=20)
sw_controller = raibert_swing_leg_controller.RaibertSwingLegController(
robot,
gait_generator,
state_estimator,
desired_speed=desired_speed,
desired_twisting_speed=desired_twisting_speed,
desired_height=robot_sim.MPC_BODY_HEIGHT,
foot_clearance=0.01)
st_controller = torque_stance_leg_controller.TorqueStanceLegController(
robot,
gait_generator,
state_estimator,
desired_speed=desired_speed,
desired_twisting_speed=desired_twisting_speed,
desired_body_height=robot_sim.MPC_BODY_HEIGHT,
body_mass=robot_sim.MPC_BODY_MASS,
body_inertia=robot_sim.MPC_BODY_INERTIA)
controller = locomotion_controller.LocomotionController(
robot=robot,
gait_generator=gait_generator,
state_estimator=state_estimator,
swing_leg_controller=sw_controller,
stance_leg_controller=st_controller,
clock=robot.GetTimeSinceReset)
return controller
def _update_controller_params(controller, lin_speed, ang_speed):
controller.swing_leg_controller.desired_speed = lin_speed
controller.swing_leg_controller.desired_twisting_speed = ang_speed
controller.stance_leg_controller.desired_speed = lin_speed
controller.stance_leg_controller.desired_twisting_speed = ang_speed
def _run_example(max_time=_MAX_TIME_SECONDS):
"""Runs the locomotion controller example."""
#recording video requires ffmpeg in the path
record_video = False
if record_video:
p = pybullet
p.connect(p.GUI, options="--width=1280 --height=720 --mp4=\"test.mp4\" --mp4fps=100")
p.configureDebugVisualizer(p.COV_ENABLE_SINGLE_STEP_RENDERING,1)
else:
p = bullet_client.BulletClient(connection_mode=pybullet.GUI)
p.configureDebugVisualizer(p.COV_ENABLE_GUI,0)
p.configureDebugVisualizer(p.COV_ENABLE_RENDERING,0)
p.setAdditionalSearchPath(pd.getDataPath())
num_bullet_solver_iterations = 30
p.setPhysicsEngineParameter(numSolverIterations=num_bullet_solver_iterations)
p.setPhysicsEngineParameter(enableConeFriction=0)
p.setPhysicsEngineParameter(numSolverIterations=30)
simulation_time_step = 0.001
p.setTimeStep(simulation_time_step)
p.setGravity(0, 0, -9.8)
p.setPhysicsEngineParameter(enableConeFriction=0)
p.setAdditionalSearchPath(pd.getDataPath())
#random.seed(10)
#p.configureDebugVisualizer(p.COV_ENABLE_RENDERING,0)
heightPerturbationRange = 0.06
plane = True
if plane:
p.loadURDF("plane.urdf")
#planeShape = p.createCollisionShape(shapeType = p.GEOM_PLANE)
#ground_id = p.createMultiBody(0, planeShape)
else:
numHeightfieldRows = 256
numHeightfieldColumns = 256
heightfieldData = [0]*numHeightfieldRows*numHeightfieldColumns
for j in range (int(numHeightfieldColumns/2)):
for i in range (int(numHeightfieldRows/2) ):
height = random.uniform(0,heightPerturbationRange)
heightfieldData[2*i+2*j*numHeightfieldRows]=height
heightfieldData[2*i+1+2*j*numHeightfieldRows]=height
heightfieldData[2*i+(2*j+1)*numHeightfieldRows]=height
heightfieldData[2*i+1+(2*j+1)*numHeightfieldRows]=height
terrainShape = p.createCollisionShape(shapeType = p.GEOM_HEIGHTFIELD, meshScale=[.05,.05,1], heightfieldTextureScaling=(numHeightfieldRows-1)/2, heightfieldData=heightfieldData, numHeightfieldRows=numHeightfieldRows, numHeightfieldColumns=numHeightfieldColumns)
ground_id = p.createMultiBody(0, terrainShape)
#p.resetBasePositionAndOrientation(ground_id,[0,0,0], [0,0,0,1])
#p.changeDynamics(ground_id, -1, lateralFriction=1.0)
robot_uid = p.loadURDF(robot_sim.URDF_NAME, robot_sim.START_POS)
robot = robot_sim.SimpleRobot(p, robot_uid, simulation_time_step=simulation_time_step)
controller = _setup_controller(robot)
controller.reset()
p.configureDebugVisualizer(p.COV_ENABLE_RENDERING,1)
#while p.isConnected():
# pos,orn = p.getBasePositionAndOrientation(robot_uid)
# print("pos=",pos)
# p.stepSimulation()
# time.sleep(1./240)
current_time = robot.GetTimeSinceReset()
#logId = p.startStateLogging(p.STATE_LOGGING_PROFILE_TIMINGS, "mpc.json")
while current_time < max_time:
#pos,orn = p.getBasePositionAndOrientation(robot_uid)
#print("pos=",pos, " orn=",orn)
p.submitProfileTiming("loop")
# Updates the controller behavior parameters.
lin_speed, ang_speed = _generate_example_linear_angular_speed(current_time)
#lin_speed, ang_speed = (0., 0., 0.), 0.
_update_controller_params(controller, lin_speed, ang_speed)
# Needed before every call to get_action().
controller.update()
hybrid_action, info = controller.get_action()
robot.Step(hybrid_action)
#if record_video:
p.configureDebugVisualizer(p.COV_ENABLE_SINGLE_STEP_RENDERING,1)
time.sleep(0.003)
current_time = robot.GetTimeSinceReset()
p.submitProfileTiming()
#p.stopStateLogging(logId)
#while p.isConnected():
# time.sleep(0.1)
def main(argv):
del argv
_run_example()
if __name__ == "__main__":
app.run(main)
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/mpc_controller/spirit40_sim.py | mpc_controller/spirit40_sim.py | import re
import numpy as np
URDF_NAME = "quadruped/spirit40newer.urdf"
START_POS = [0, 0, 0.43]
MPC_BODY_MASS = 12
MPC_BODY_INERTIA = (0.07335, 0, 0, 0, 0.25068, 0, 0, 0, 0.25447)
MPC_BODY_HEIGHT = 0.32
time_step = 0.001
ACTION_REPEAT = 10
MPC_VELOCITY_MULTIPLIER = 0.7
_IDENTITY_ORIENTATION=[0,0,0,1]
HIP_NAME_PATTERN = re.compile(r"\w+_hip_\w+")
UPPER_NAME_PATTERN = re.compile(r"\w+_upper_\w+")
LOWER_NAME_PATTERN = re.compile(r"\w+_lower_\w+")
TOE_NAME_PATTERN = re.compile(r"\w+_toe\d*")
IMU_NAME_PATTERN = re.compile(r"imu\d*")
_DEFAULT_HIP_POSITIONS = (
(0.23, -0.12, 0),
(0.23, 0.12, 0),
(-0.23, -0.12, 0),
(-0.23, 0.12, 0),
)
_BODY_B_FIELD_NUMBER = 2
_LINK_A_FIELD_NUMBER = 3
HIP_JOINT_OFFSET = 0.0
UPPER_LEG_JOINT_OFFSET = 0
KNEE_JOINT_OFFSET = 0
SPIRIT40_DEFAULT_ABDUCTION_ANGLE = 0
SPIRIT40_DEFAULT_HIP_ANGLE = -0.7
SPIRIT40_DEFAULT_KNEE_ANGLE = 1.4
NUM_LEGS = 4
NUM_MOTORS = 12
# Bases on the readings from Laikago's default pose.
INIT_MOTOR_ANGLES = np.array([
SPIRIT40_DEFAULT_ABDUCTION_ANGLE,
SPIRIT40_DEFAULT_HIP_ANGLE,
SPIRIT40_DEFAULT_KNEE_ANGLE
] * NUM_LEGS)
MOTOR_NAMES = [
"FR_hip_joint",
"FR_upper_joint",
"FR_lower_joint",
"FL_hip_joint",
"FL_upper_joint",
"FL_lower_joint",
"RR_hip_joint",
"RR_upper_joint",
"RR_lower_joint",
"RL_hip_joint",
"RL_upper_joint",
"RL_lower_joint",
]
#Use a PD controller
MOTOR_CONTROL_POSITION = 1
# Apply motor torques directly.
MOTOR_CONTROL_TORQUE = 2
# Apply a tuple (q, qdot, kp, kd, tau) for each motor. Here q, qdot are motor
# position and velocities. kp and kd are PD gains. tau is the additional
# motor torque. This is the most flexible control mode.
MOTOR_CONTROL_HYBRID = 3
MOTOR_CONTROL_PWM = 4 #only for Minitaur
MOTOR_COMMAND_DIMENSION = 5
# These values represent the indices of each field in the motor command tuple
POSITION_INDEX = 0
POSITION_GAIN_INDEX = 1
VELOCITY_INDEX = 2
VELOCITY_GAIN_INDEX = 3
TORQUE_INDEX = 4
class LaikagoMotorModel(object):
"""A simple motor model for Laikago.
When in POSITION mode, the torque is calculated according to the difference
between current and desired joint angle, as well as the joint velocity.
For more information about PD control, please refer to:
https://en.wikipedia.org/wiki/PID_controller.
The model supports a HYBRID mode in which each motor command can be a tuple
(desired_motor_angle, position_gain, desired_motor_velocity, velocity_gain,
torque).
"""
def __init__(self,
kp,
kd,
torque_limits=None,
motor_control_mode=MOTOR_CONTROL_POSITION):
self._kp = kp
self._kd = kd
self._torque_limits = torque_limits
if torque_limits is not None:
if isinstance(torque_limits, (collections.Sequence, np.ndarray)):
self._torque_limits = np.asarray(torque_limits)
else:
self._torque_limits = np.full(NUM_MOTORS, torque_limits)
self._motor_control_mode = motor_control_mode
self._strength_ratios = np.full(NUM_MOTORS, 1)
def set_strength_ratios(self, ratios):
"""Set the strength of each motors relative to the default value.
Args:
ratios: The relative strength of motor output. A numpy array ranging from
0.0 to 1.0.
"""
self._strength_ratios = ratios
def set_motor_gains(self, kp, kd):
"""Set the gains of all motors.
These gains are PD gains for motor positional control. kp is the
proportional gain and kd is the derivative gain.
Args:
kp: proportional gain of the motors.
kd: derivative gain of the motors.
"""
self._kp = kp
self._kd = kd
def set_voltage(self, voltage):
pass
def get_voltage(self):
return 0.0
def set_viscous_damping(self, viscous_damping):
pass
def get_viscous_dampling(self):
return 0.0
def convert_to_torque(self,
motor_commands,
motor_angle,
motor_velocity,
true_motor_velocity,
motor_control_mode=None):
"""Convert the commands (position control or torque control) to torque.
Args:
motor_commands: The desired motor angle if the motor is in position
control mode. The pwm signal if the motor is in torque control mode.
motor_angle: The motor angle observed at the current time step. It is
actually the true motor angle observed a few milliseconds ago (pd
latency).
motor_velocity: The motor velocity observed at the current time step, it
is actually the true motor velocity a few milliseconds ago (pd latency).
true_motor_velocity: The true motor velocity. The true velocity is used to
compute back EMF voltage and viscous damping.
motor_control_mode: A MotorControlMode enum.
Returns:
actual_torque: The torque that needs to be applied to the motor.
observed_torque: The torque observed by the sensor.
"""
del true_motor_velocity
if not motor_control_mode:
motor_control_mode = self._motor_control_mode
# No processing for motor torques
if motor_control_mode is MOTOR_CONTROL_TORQUE:
assert len(motor_commands) == NUM_MOTORS
motor_torques = self._strength_ratios * motor_commands
return motor_torques, motor_torques
desired_motor_angles = None
desired_motor_velocities = None
kp = None
kd = None
additional_torques = np.full(NUM_MOTORS, 0)
if motor_control_mode is MOTOR_CONTROL_POSITION:
assert len(motor_commands) == NUM_MOTORS
kp = self._kp
kd = self._kd
desired_motor_angles = motor_commands
desired_motor_velocities = np.full(NUM_MOTORS, 0)
elif motor_control_mode is MOTOR_CONTROL_HYBRID:
# The input should be a 60 dimension vector
assert len(motor_commands) == MOTOR_COMMAND_DIMENSION * NUM_MOTORS
kp = motor_commands[POSITION_GAIN_INDEX::MOTOR_COMMAND_DIMENSION]
kd = motor_commands[VELOCITY_GAIN_INDEX::MOTOR_COMMAND_DIMENSION]
desired_motor_angles = motor_commands[
POSITION_INDEX::MOTOR_COMMAND_DIMENSION]
desired_motor_velocities = motor_commands[
VELOCITY_INDEX::MOTOR_COMMAND_DIMENSION]
additional_torques = motor_commands[TORQUE_INDEX::MOTOR_COMMAND_DIMENSION]
motor_torques = -1 * (kp * (motor_angle - desired_motor_angles)) - kd * (
motor_velocity - desired_motor_velocities) + additional_torques
motor_torques = self._strength_ratios * motor_torques
if self._torque_limits is not None:
if len(self._torque_limits) != len(motor_torques):
raise ValueError(
"Torque limits dimension does not match the number of motors.")
motor_torques = np.clip(motor_torques, -1.0 * self._torque_limits,
self._torque_limits)
return motor_torques, motor_torques
class SimpleRobot(object):
def __init__(self, pybullet_client, robot_uid):
self.pybullet_client = pybullet_client
self.quadruped = robot_uid
self.num_legs = NUM_LEGS
self.num_motors = NUM_MOTORS
self._BuildJointNameToIdDict()
self._BuildUrdfIds()
self._BuildMotorIdList()
self.ResetPose()
self._motor_enabled_list = [True] * self.num_motors
self._step_counter = 0
self._state_action_counter = 0
self._motor_offset= np.array([0]*12)
self._motor_direction= np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
self.ReceiveObservation()
self._kp = self.GetMotorPositionGains()
self._kd = self.GetMotorVelocityGains()
self._motor_model = LaikagoMotorModel(kp=self._kp, kd=self._kd, motor_control_mode=MOTOR_CONTROL_HYBRID)
self._SettleDownForReset(reset_time=1.0)
def ResetPose(self):
for name in self._joint_name_to_id:
joint_id = self._joint_name_to_id[name]
self.pybullet_client.setJointMotorControl2(
bodyIndex=self.quadruped,
jointIndex=(joint_id),
controlMode=self.pybullet_client.VELOCITY_CONTROL,
targetVelocity=0,
force=0)
for name, i in zip(MOTOR_NAMES, range(len(MOTOR_NAMES))):
if "hip_joint" in name:
angle = INIT_MOTOR_ANGLES[i] + HIP_JOINT_OFFSET
elif "upper_joint" in name:
angle = INIT_MOTOR_ANGLES[i] + UPPER_LEG_JOINT_OFFSET
elif "lower_joint" in name:
angle = INIT_MOTOR_ANGLES[i] + KNEE_JOINT_OFFSET
else:
raise ValueError("The name %s is not recognized as a motor joint." %
name)
self.pybullet_client.resetJointState(
self.quadruped, self._joint_name_to_id[name], angle, targetVelocity=0)
def _SettleDownForReset(self, reset_time):
self.ReceiveObservation()
if reset_time <= 0:
return
for _ in range(500):
self._StepInternal(
INIT_MOTOR_ANGLES,
motor_control_mode=MOTOR_CONTROL_POSITION)
def _GetMotorNames(self):
return MOTOR_NAMES
def _BuildMotorIdList(self):
self._motor_id_list = [
self._joint_name_to_id[motor_name]
for motor_name in self._GetMotorNames()
]
def GetMotorPositionGains(self):
return [220.]*self.num_motors
def GetMotorVelocityGains(self):
return np.array([1., 2., 2., 1., 2., 2., 1., 2., 2., 1., 2., 2.])
def compute_jacobian(self, robot, link_id):
"""Computes the Jacobian matrix for the given link.
Args:
robot: A robot instance.
link_id: The link id as returned from loadURDF.
Returns:
The 3 x N transposed Jacobian matrix. where N is the total DoFs of the
robot. For a quadruped, the first 6 columns of the matrix corresponds to
the CoM translation and rotation. The columns corresponds to a leg can be
extracted with indices [6 + leg_id * 3: 6 + leg_id * 3 + 3].
"""
all_joint_angles = [state[0] for state in robot._joint_states]
zero_vec = [0] * len(all_joint_angles)
jv, _ = self.pybullet_client.calculateJacobian(robot.quadruped, link_id,
(0, 0, 0), all_joint_angles,
zero_vec, zero_vec)
jacobian = np.array(jv)
assert jacobian.shape[0] == 3
return jacobian
def ComputeJacobian(self, leg_id):
"""Compute the Jacobian for a given leg."""
# Does not work for Minitaur which has the four bar mechanism for now.
assert len(self._foot_link_ids) == self.num_legs
return self.compute_jacobian(
robot=self,
link_id=self._foot_link_ids[leg_id],
)
def MapContactForceToJointTorques(self, leg_id, contact_force):
"""Maps the foot contact force to the leg joint torques."""
jv = self.ComputeJacobian(leg_id)
all_motor_torques = np.matmul(contact_force, jv)
motor_torques = {}
motors_per_leg = self.num_motors // self.num_legs
com_dof = 6
for joint_id in range(leg_id * motors_per_leg,
(leg_id + 1) * motors_per_leg):
motor_torques[joint_id] = all_motor_torques[
com_dof + joint_id] * self._motor_direction[joint_id]
return motor_torques
def GetBaseRollPitchYaw(self):
"""Get minitaur's base orientation in euler angle in the world frame.
Returns:
A tuple (roll, pitch, yaw) of the base in world frame.
"""
orientation = self.GetTrueBaseOrientation()
roll_pitch_yaw = self.pybullet_client.getEulerFromQuaternion(orientation)
return np.asarray(roll_pitch_yaw)
def joint_angles_from_link_position(
self,
robot,
link_position,
link_id,
joint_ids,
position_in_world_frame,
base_translation = (0, 0, 0),
base_rotation = (0, 0, 0, 1)):
"""Uses Inverse Kinematics to calculate joint angles.
Args:
robot: A robot instance.
link_position: The (x, y, z) of the link in the body or the world frame,
depending on whether the argument position_in_world_frame is true.
link_id: The link id as returned from loadURDF.
joint_ids: The positional index of the joints. This can be different from
the joint unique ids.
position_in_world_frame: Whether the input link_position is specified
in the world frame or the robot's base frame.
base_translation: Additional base translation.
base_rotation: Additional base rotation.
Returns:
A list of joint angles.
"""
if not position_in_world_frame:
# Projects to local frame.
base_position, base_orientation = self.pybullet_client.getBasePositionAndOrientation(self.quadruped)#robot.GetBasePosition(), robot.GetBaseOrientation()
base_position, base_orientation = robot.pybullet_client.multiplyTransforms(
base_position, base_orientation, base_translation, base_rotation)
# Projects to world space.
world_link_pos, _ = robot.pybullet_client.multiplyTransforms(
base_position, base_orientation, link_position, _IDENTITY_ORIENTATION)
else:
world_link_pos = link_position
ik_solver = 0
all_joint_angles = robot.pybullet_client.calculateInverseKinematics(
robot.quadruped, link_id, world_link_pos, solver=ik_solver)
# Extract the relevant joint angles.
joint_angles = [all_joint_angles[i] for i in joint_ids]
return joint_angles
def ComputeMotorAnglesFromFootLocalPosition(self, leg_id,
foot_local_position):
"""Use IK to compute the motor angles, given the foot link's local position.
Args:
leg_id: The leg index.
foot_local_position: The foot link's position in the base frame.
Returns:
A tuple. The position indices and the angles for all joints along the
leg. The position indices is consistent with the joint orders as returned
by GetMotorAngles API.
"""
return self._EndEffectorIK(
leg_id, foot_local_position, position_in_world_frame=False)
def _EndEffectorIK(self, leg_id, position, position_in_world_frame):
"""Calculate the joint positions from the end effector position."""
assert len(self._foot_link_ids) == self.num_legs
toe_id = self._foot_link_ids[leg_id]
motors_per_leg = self.num_motors // self.num_legs
joint_position_idxs = [
i for i in range(leg_id * motors_per_leg, leg_id * motors_per_leg +
motors_per_leg)
]
joint_angles = self.joint_angles_from_link_position(
robot=self,
link_position=position,
link_id=toe_id,
joint_ids=joint_position_idxs,
position_in_world_frame=position_in_world_frame)
# Joint offset is necessary for Laikago.
joint_angles = np.multiply(
np.asarray(joint_angles) -
np.asarray(self._motor_offset)[joint_position_idxs],
self._motor_direction[joint_position_idxs])
# Return the joing index (the same as when calling GetMotorAngles) as well
# as the angles.
return joint_position_idxs, joint_angles.tolist()
def GetTimeSinceReset(self):
return self._step_counter * time_step
def GetHipPositionsInBaseFrame(self):
return _DEFAULT_HIP_POSITIONS
def GetBaseVelocity(self):
"""Get the linear velocity of minitaur's base.
Returns:
The velocity of minitaur's base.
"""
velocity, _ = self.pybullet_client.getBaseVelocity(self.quadruped)
return velocity
def GetTrueBaseOrientation(self):
pos,orn = self.pybullet_client.getBasePositionAndOrientation(
self.quadruped)
return orn
def TransformAngularVelocityToLocalFrame(self, angular_velocity, orientation):
"""Transform the angular velocity from world frame to robot's frame.
Args:
angular_velocity: Angular velocity of the robot in world frame.
orientation: Orientation of the robot represented as a quaternion.
Returns:
angular velocity of based on the given orientation.
"""
# Treat angular velocity as a position vector, then transform based on the
# orientation given by dividing (or multiplying with inverse).
# Get inverse quaternion assuming the vector is at 0,0,0 origin.
_, orientation_inversed = self.pybullet_client.invertTransform([0, 0, 0],
orientation)
# Transform the angular_velocity at neutral orientation using a neutral
# translation and reverse of the given orientation.
relative_velocity, _ = self.pybullet_client.multiplyTransforms(
[0, 0, 0], orientation_inversed, angular_velocity,
self.pybullet_client.getQuaternionFromEuler([0, 0, 0]))
return np.asarray(relative_velocity)
def GetBaseRollPitchYawRate(self):
"""Get the rate of orientation change of the minitaur's base in euler angle.
Returns:
rate of (roll, pitch, yaw) change of the minitaur's base.
"""
angular_velocity = self.pybullet_client.getBaseVelocity(self.quadruped)[1]
orientation = self.GetTrueBaseOrientation()
return self.TransformAngularVelocityToLocalFrame(angular_velocity,
orientation)
def GetFootContacts(self):
all_contacts = self.pybullet_client.getContactPoints(bodyA=self.quadruped)
contacts = [False, False, False, False]
for contact in all_contacts:
# Ignore self contacts
if contact[_BODY_B_FIELD_NUMBER] == self.quadruped:
continue
try:
toe_link_index = self._foot_link_ids.index(
contact[_LINK_A_FIELD_NUMBER])
contacts[toe_link_index] = True
except ValueError:
continue
return contacts
def GetTrueMotorAngles(self):
"""Gets the eight motor angles at the current moment, mapped to [-pi, pi].
Returns:
Motor angles, mapped to [-pi, pi].
"""
self.ReceiveObservation()
motor_angles = [state[0] for state in self._joint_states]
motor_angles = np.multiply(
np.asarray(motor_angles) - np.asarray(self._motor_offset),
self._motor_direction)
return motor_angles
def GetPDObservation(self):
self.ReceiveObservation()
observation = []
observation.extend(self.GetTrueMotorAngles())
observation.extend(self.GetTrueMotorVelocities())
q = observation[0:self.num_motors]
qdot = observation[self.num_motors:2 * self.num_motors]
return (np.array(q), np.array(qdot))
def GetTrueMotorVelocities(self):
"""Get the velocity of all eight motors.
Returns:
Velocities of all eight motors.
"""
motor_velocities = [state[1] for state in self._joint_states]
motor_velocities = np.multiply(motor_velocities, self._motor_direction)
return motor_velocities
def GetTrueObservation(self):
self.ReceiveObservation()
observation = []
observation.extend(self.GetTrueMotorAngles())
observation.extend(self.GetTrueMotorVelocities())
observation.extend(self.GetTrueMotorTorques())
observation.extend(self.GetTrueBaseOrientation())
observation.extend(self.GetTrueBaseRollPitchYawRate())
return observation
def ApplyAction(self, motor_commands, motor_control_mode):
"""Apply the motor commands using the motor model.
Args:
motor_commands: np.array. Can be motor angles, torques, hybrid commands
motor_control_mode: A MotorControlMode enum.
"""
motor_commands = np.asarray(motor_commands)
q, qdot = self.GetPDObservation()
qdot_true = self.GetTrueMotorVelocities()
actual_torque, observed_torque = self._motor_model.convert_to_torque(
motor_commands, q, qdot, qdot_true, motor_control_mode)
# The torque is already in the observation space because we use
# GetMotorAngles and GetMotorVelocities.
self._observed_motor_torques = observed_torque
# Transform into the motor space when applying the torque.
self._applied_motor_torque = np.multiply(actual_torque,
self._motor_direction)
motor_ids = []
motor_torques = []
for motor_id, motor_torque, motor_enabled in zip(self._motor_id_list,
self._applied_motor_torque,
self._motor_enabled_list):
if motor_enabled:
motor_ids.append(motor_id)
motor_torques.append(motor_torque)
else:
motor_ids.append(motor_id)
motor_torques.append(0)
self._SetMotorTorqueByIds(motor_ids, motor_torques)
def _SetMotorTorqueByIds(self, motor_ids, torques):
self.pybullet_client.setJointMotorControlArray(
bodyIndex=self.quadruped,
jointIndices=motor_ids,
controlMode=self.pybullet_client.TORQUE_CONTROL,
forces=torques)
def ReceiveObservation(self):
self._joint_states = self.pybullet_client.getJointStates(self.quadruped, self._motor_id_list)
def _StepInternal(self, action, motor_control_mode):
self.ApplyAction(action, motor_control_mode)
self.pybullet_client.stepSimulation()
self.ReceiveObservation()
self._state_action_counter += 1
def Step(self, action):
"""Steps simulation."""
#if self._enable_action_filter:
# action = self._FilterAction(action)
for i in range(ACTION_REPEAT):
#proc_action = self.ProcessAction(action, i)
proc_action = action
self._StepInternal(proc_action, motor_control_mode=MOTOR_CONTROL_HYBRID)
self._step_counter += 1
def _BuildJointNameToIdDict(self):
num_joints = self.pybullet_client.getNumJoints(self.quadruped)
self._joint_name_to_id = {}
for i in range(num_joints):
joint_info = self.pybullet_client.getJointInfo(self.quadruped, i)
self._joint_name_to_id[joint_info[1].decode("UTF-8")] = joint_info[0]
def _BuildUrdfIds(self):
"""Build the link Ids from its name in the URDF file.
Raises:
ValueError: Unknown category of the joint name.
"""
num_joints = self.pybullet_client.getNumJoints(self.quadruped)
self._hip_link_ids = [-1]
self._leg_link_ids = []
self._motor_link_ids = []
self._lower_link_ids = []
self._foot_link_ids = []
self._imu_link_ids = []
for i in range(num_joints):
joint_info = self.pybullet_client.getJointInfo(self.quadruped, i)
joint_name = joint_info[1].decode("UTF-8")
joint_id = self._joint_name_to_id[joint_name]
if HIP_NAME_PATTERN.match(joint_name):
self._hip_link_ids.append(joint_id)
elif UPPER_NAME_PATTERN.match(joint_name):
self._motor_link_ids.append(joint_id)
# We either treat the lower leg or the toe as the foot link, depending on
# the urdf version used.
elif LOWER_NAME_PATTERN.match(joint_name):
self._lower_link_ids.append(joint_id)
elif TOE_NAME_PATTERN.match(joint_name):
#assert self._urdf_filename == URDF_WITH_TOES
self._foot_link_ids.append(joint_id)
elif IMU_NAME_PATTERN.match(joint_name):
self._imu_link_ids.append(joint_id)
else:
raise ValueError("Unknown category of joint %s" % joint_name)
self._leg_link_ids.extend(self._lower_link_ids)
self._leg_link_ids.extend(self._foot_link_ids)
#assert len(self._foot_link_ids) == NUM_LEGS
self._hip_link_ids.sort()
self._motor_link_ids.sort()
self._lower_link_ids.sort()
self._foot_link_ids.sort()
self._leg_link_ids.sort()
return
def link_position_in_base_frame( self, link_id ):
"""Computes the link's local position in the robot frame.
Args:
robot: A robot instance.
link_id: The link to calculate its relative position.
Returns:
The relative position of the link.
"""
base_position, base_orientation = self.pybullet_client.getBasePositionAndOrientation(self.quadruped)
inverse_translation, inverse_rotation = self.pybullet_client.invertTransform(
base_position, base_orientation)
link_state = self.pybullet_client.getLinkState(self.quadruped, link_id)
link_position = link_state[0]
link_local_position, _ = self.pybullet_client.multiplyTransforms(
inverse_translation, inverse_rotation, link_position, (0, 0, 0, 1))
return np.array(link_local_position)
def GetFootLinkIDs(self):
"""Get list of IDs for all foot links."""
return self._foot_link_ids
def GetFootPositionsInBaseFrame(self):
"""Get the robot's foot position in the base frame."""
assert len(self._foot_link_ids) == self.num_legs
foot_positions = []
for foot_id in self.GetFootLinkIDs():
foot_positions.append(
self.link_position_in_base_frame(link_id=foot_id)
)
return np.array(foot_positions)
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/mpc_controller/static_gait_controller.py | mpc_controller/static_gait_controller.py | # Lint as: python3
"""A static gait controller for a quadruped robot. Experimental code."""
import os
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
os.sys.path.insert(0, parentdir)
import numpy as np
from mpc_controller import foot_stepper
toe_pos_local_ref = np.array([[0.1478, -0.11459, -0.45576],
[0.1478, 0.11688, -0.45576],
[-0.2895, -0.11459, -0.45576],
[-0.2895, 0.11688, -0.45576]])
class StaticGaitController(object):
"""A static gait controller for a quadruped robot."""
def __init__(self, robot):
self._robot = robot
self._toe_ids = tuple(robot.urdf_loader.get_end_effector_id_dict().values())
self._wait_count = 0
self._stepper = foot_stepper.FootStepper(self._robot.pybullet_client,
self._toe_ids, toe_pos_local_ref)
def act(self, observation):
"""Computes actions based on observations."""
del observation
p = self._robot.pybullet_client
quadruped = self._robot.robot_id
step_input = foot_stepper.StepInput()
ls = p.getLinkStates(
quadruped, self._toe_ids, computeForwardKinematics=True)
toe_pos_world = np.array([ls[0][0], ls[1][0], ls[2][0], ls[3][0]])
base_com_pos, base_com_orn = p.getBasePositionAndOrientation(quadruped)
new_pos_world = np.array([0, 0, 0])
if self._stepper.is_com_stable() and not self._stepper.move_swing_foot:
self._wait_count += 1
if self._wait_count == 20:
self._stepper.next_foot()
if self._wait_count > 50:
self._wait_count = 0
step_dist = 0.15
print("time {}, make a step of {}".format(
self._robot.GetTimeSinceReset(), step_dist))
new_pos_local = self._stepper.get_reference_pos_swing_foot()
new_pos_local[0] += step_dist
new_pos_world, _ = p.multiplyTransforms(base_com_pos, base_com_orn,
new_pos_local, [0, 0, 0, 1])
self._stepper.swing_foot()
step_input.new_pos_world = new_pos_world
step_input.base_com_pos = base_com_pos
step_input.base_com_orn = base_com_orn
step_input.toe_pos_world = toe_pos_world
step_input.dt = 1.0 / 250
step_output = self._stepper.update(step_input)
# Finds joint poses to achieve toePosWorld
desired_joint_angles = self._robot.motor_angles_from_foot_positions(
foot_positions=step_output.new_toe_pos_world,
position_in_world_frame=True)[1]
return desired_joint_angles
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/mpc_controller/locomotion_controller.py | mpc_controller/locomotion_controller.py | """A model based controller framework."""
from __future__ import absolute_import
from __future__ import division
#from __future__ import google_type_annotations
from __future__ import print_function
import os
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(os.path.dirname(currentdir))
os.sys.path.insert(0, parentdir)
import numpy as np
import time
from typing import Any, Callable
class LocomotionController(object):
"""Generates the quadruped locomotion.
The actual effect of this controller depends on the composition of each
individual subcomponent.
"""
def __init__(
self,
robot: Any,
gait_generator,
state_estimator,
swing_leg_controller,
stance_leg_controller,
clock,
):
"""Initializes the class.
Args:
robot: A robot instance.
gait_generator: Generates the leg swing/stance pattern.
state_estimator: Estimates the state of the robot (e.g. center of mass
position or velocity that may not be observable from sensors).
swing_leg_controller: Generates motor actions for swing legs.
stance_leg_controller: Generates motor actions for stance legs.
clock: A real or fake clock source.
"""
self._robot = robot
self._clock = clock
self._reset_time = self._clock()
self._time_since_reset = 0
self._gait_generator = gait_generator
self._state_estimator = state_estimator
self._swing_leg_controller = swing_leg_controller
self._stance_leg_controller = stance_leg_controller
@property
def swing_leg_controller(self):
return self._swing_leg_controller
@property
def stance_leg_controller(self):
return self._stance_leg_controller
@property
def gait_generator(self):
return self._gait_generator
@property
def state_estimator(self):
return self._state_estimator
def reset(self):
self._reset_time = self._clock()
self._time_since_reset = 0
self._gait_generator.reset(self._time_since_reset)
self._state_estimator.reset(self._time_since_reset)
self._swing_leg_controller.reset(self._time_since_reset)
self._stance_leg_controller.reset(self._time_since_reset)
def update(self):
self._time_since_reset = self._clock() - self._reset_time
self._gait_generator.update(self._time_since_reset)
self._state_estimator.update(self._time_since_reset)
self._swing_leg_controller.update(self._time_since_reset)
self._stance_leg_controller.update(self._time_since_reset)
def get_action(self):
"""Returns the control ouputs (e.g. positions/torques) for all motors."""
swing_action = self._swing_leg_controller.get_action()
# start_time = time.time()
stance_action, qp_sol = self._stance_leg_controller.get_action()
# print(time.time() - start_time)
action = []
for joint_id in range(self._robot.num_motors):
if joint_id in swing_action:
action.extend(swing_action[joint_id])
else:
assert joint_id in stance_action
action.extend(stance_action[joint_id])
action = np.array(action, dtype=np.float32)
return action, dict(qp_sol=qp_sol)
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/mpc_controller/model_predictive_control.py | mpc_controller/model_predictive_control.py | # Lint as: python3
"""Classic model predictive control methods."""
from __future__ import absolute_import
from __future__ import division
from __future__ import google_type_annotations
from __future__ import print_function
import math
from typing import Sequence
import numpy as np
_MAX_ABS_RPY = 0.3
_MAX_ABS_ANGULAR_VELOCITY = math.pi
# The center of mass torque is computed using a simple PD control: tau = -KP *
# delta_rotation - KD * delta_angular_velocity
_TORQUE_KP = 2000
_TORQUE_KD = 150
# For center of mass force, we only need to track position in the z direction
# (i.e. maintain the body height), and speed in x-y plane.
_FORCE_KP = 500
_FORCE_KD = 200
def compute_contact_force_projection_matrix(
foot_positions_in_com_frame: Sequence[Sequence[float]],
stance_foot_ids: Sequence[int],
) -> np.ndarray:
r"""Computes the 6 x 3n matrix to map contact force to com dynamics.
This is essentially the vectorized rhs of com dynamics equation:
ma = \sum f
I\omega_dot = \sum r \cross f
where the summation if over all feet in contact with ground.
Caveats: Current we have not taken the com rotation into account as we assume
the com rotation would be small. Ideally we should rotate the foot_positions
to a world frame centered at com. Also, since absolute yaw angles are not
accurate dute to drifting, we should use (roll, pitch, 0) to do the foot
position projection. This feature will be quite useful for MPC.
TODO(b/143378213): Fix this.
Args:
foot_positions_in_com_frame: the local position of each foot.
stance_foot_ids: The stance foot to be used to assemble the matrix.
Returns:
The contact force projection matrix.
"""
jacobians = []
for foot_id in stance_foot_ids:
jv = np.identity(3)
foot_position = foot_positions_in_com_frame[foot_id]
x, y, z = foot_position[:3]
jw = np.array(((0, -z, y), (z, 0, -x), (-y, x, 0)))
jacobians.append(np.vstack((jv, jw)))
return np.hstack(jacobians)
def plan_foot_contact_force(
mass: float,
inertia: np.ndarray,
com_position: np.ndarray,
com_velocity: np.ndarray,
com_roll_pitch_yaw: np.ndarray,
com_angular_velocity: np.ndarray,
foot_positions_in_com_frame: Sequence[Sequence[float]],
foot_contact_state: Sequence[bool],
desired_com_position: np.ndarray,
desired_com_velocity: np.ndarray,
desired_com_roll_pitch_yaw: np.ndarray,
desired_com_angular_velocity: np.ndarray,
):
"""Plan the foot contact forces using robot states.
TODO(b/143382305): Wrap this interface in a MPC class so we can use other
planning algorithms.
Args:
mass: The total mass of the robot.
inertia: The diagnal elements [Ixx, Iyy, Izz] of the robot.
com_position: Center of mass position in world frame. Usually we cannot
accurrately obtain this without motion capture.
com_velocity: Center of mass velocity in world frame.
com_roll_pitch_yaw: Center of mass rotation wrt world frame in euler angles.
com_angular_velocity: The angular velocity (roll_dot, pitch_dot, yaw_dot).
foot_positions_in_com_frame: The position of all feet/toe joints in the body
frame.
foot_contact_state: Indicates if a foot is in contact with the ground.
desired_com_position: We usually just care about the body height.
desired_com_velocity: In world frame.
desired_com_roll_pitch_yaw: We usually care about roll and pitch, since yaw
measurement can be unreliable.
desired_com_angular_velocity: Roll and pitch change rate are usually zero.
Yaw rate is the turning speed of the robot.
Returns:
The desired stance foot contact forces.
"""
del inertia
del com_position
body_height = []
stance_foot_ids = []
for foot_id, foot_position in enumerate(foot_positions_in_com_frame):
if not foot_contact_state[foot_id]:
continue
stance_foot_ids.append(foot_id)
body_height.append(foot_position[2])
avg_bogy_height = abs(sum(body_height) / len(body_height))
rpy = com_roll_pitch_yaw
rpy[:2] = np.clip(rpy[:2], -_MAX_ABS_RPY, _MAX_ABS_RPY)
rpy_dot = com_angular_velocity
rpy_dot = np.clip(rpy_dot, -_MAX_ABS_ANGULAR_VELOCITY,
_MAX_ABS_ANGULAR_VELOCITY)
com_torque = -avg_bogy_height * (
_TORQUE_KP * (rpy - desired_com_roll_pitch_yaw) + _TORQUE_KD * rpy_dot)
# We don't care about the absolute yaw angle in the low level controller.
# Instead, we stabialize the angular velocity in the z direction.
com_torque[2] = -avg_bogy_height * _TORQUE_KD * (
rpy_dot[2] - desired_com_angular_velocity[2])
# Track a desired com velocity.
com_force = -_FORCE_KD * (com_velocity - desired_com_velocity)
# In the z-direction we also want to track the body height.
com_force[2] += mass * 9.8 - _FORCE_KP * (
avg_bogy_height - desired_com_position[2])
com_force_torque = np.concatenate((com_force, com_torque)).transpose()
# Map the com force torque to foot contact forces.
foot_force_to_com = compute_contact_force_projection_matrix(
foot_positions_in_com_frame, stance_foot_ids)
all_contact_force = -np.matmul(
np.linalg.pinv(foot_force_to_com), com_force_torque).transpose()
contact_force = {}
for i, foot_id in enumerate(stance_foot_ids):
contact_force[foot_id] = all_contact_force[3 * i:3 * i + 3]
return contact_force
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/mpc_controller/__init__.py | mpc_controller/__init__.py | python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false | |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/mpc_controller/torque_stance_leg_controller_quadprog.py | mpc_controller/torque_stance_leg_controller_quadprog.py | # Lint as: python3
"""A torque based stance controller framework."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Any, Sequence, Tuple
import numpy as np
# import time
from mpc_controller import gait_generator as gait_generator_lib
from mpc_controller import leg_controller
from mpc_controller import qp_torque_optimizer
_FORCE_DIMENSION = 3
KP = np.array((0., 0., 100., 100., 100., 0.))
KD = np.array((40., 30., 10., 10., 10., 30.))
MAX_DDQ = np.array((10., 10., 10., 20., 20., 20.))
MIN_DDQ = -MAX_DDQ
class TorqueStanceLegController(leg_controller.LegController):
"""A torque based stance leg controller framework.
Takes in high level parameters like walking speed and turning speed, and
generates necessary the torques for stance legs.
"""
def __init__(
self,
robot: Any,
gait_generator: Any,
state_estimator: Any,
desired_speed: Tuple[float, float] = (0, 0),
desired_twisting_speed: float = 0,
desired_body_height: float = 0.45,
num_legs: int = 4,
friction_coeffs: Sequence[float] = (0.45, 0.45, 0.45, 0.45),
):
"""Initializes the class.
Tracks the desired position/velocity of the robot by computing proper joint
torques using MPC module.
Args:
robot: A robot instance.
gait_generator: Used to query the locomotion phase and leg states.
state_estimator: Estimate the robot states (e.g. CoM velocity).
desired_speed: desired CoM speed in x-y plane.
desired_twisting_speed: desired CoM rotating speed in z direction.
desired_body_height: The standing height of the robot.
body_mass: The total mass of the robot.
body_inertia: The inertia matrix in the body principle frame. We assume
the body principle coordinate frame has x-forward and z-up.
num_legs: The number of legs used for force planning.
friction_coeffs: The friction coeffs on the contact surfaces.
"""
self._robot = robot
self._gait_generator = gait_generator
self._state_estimator = state_estimator
self.desired_speed = desired_speed
self.desired_twisting_speed = desired_twisting_speed
self._desired_body_height = desired_body_height
self._num_legs = num_legs
self._friction_coeffs = np.array(friction_coeffs)
def reset(self, current_time):
del current_time
def update(self, current_time):
del current_time
def _estimate_robot_height(self, contacts):
if np.sum(contacts) == 0:
# All foot in air, no way to estimate
return self._desired_body_height
else:
base_orientation = self._robot.GetBaseOrientation()
rot_mat = self._robot.pybullet_client.getMatrixFromQuaternion(
base_orientation)
rot_mat = np.array(rot_mat).reshape((3, 3))
foot_positions = self._robot.GetFootPositionsInBaseFrame()
foot_positions_world_frame = (rot_mat.dot(foot_positions.T)).T
# pylint: disable=unsubscriptable-object
useful_heights = contacts * (-foot_positions_world_frame[:, 2])
return np.sum(useful_heights) / np.sum(contacts)
def get_action(self):
"""Computes the torque for stance legs."""
# Actual q and dq
contacts = np.array(
[(leg_state in (gait_generator_lib.LegState.STANCE,
gait_generator_lib.LegState.EARLY_CONTACT))
for leg_state in self._gait_generator.desired_leg_state],
dtype=np.int32)
robot_com_position = np.array(
(0., 0., self._estimate_robot_height(contacts)))
robot_com_velocity = self._state_estimator.com_velocity_body_frame
robot_com_roll_pitch_yaw = np.array(self._robot.GetBaseRollPitchYaw())
robot_com_roll_pitch_yaw[2] = 0 # To prevent yaw drifting
robot_com_roll_pitch_yaw_rate = self._robot.GetBaseRollPitchYawRate()
robot_q = np.hstack((robot_com_position, robot_com_roll_pitch_yaw))
robot_dq = np.hstack((robot_com_velocity, robot_com_roll_pitch_yaw_rate))
# Desired q and dq
desired_com_position = np.array((0., 0., self._desired_body_height),
dtype=np.float64)
desired_com_velocity = np.array(
(self.desired_speed[0], self.desired_speed[1], 0.), dtype=np.float64)
desired_com_roll_pitch_yaw = np.array((0., 0., 0.), dtype=np.float64)
desired_com_angular_velocity = np.array(
(0., 0., self.desired_twisting_speed), dtype=np.float64)
desired_q = np.hstack((desired_com_position, desired_com_roll_pitch_yaw))
desired_dq = np.hstack(
(desired_com_velocity, desired_com_angular_velocity))
# Desired ddq
desired_ddq = KP * (desired_q - robot_q) + KD * (desired_dq - robot_dq)
desired_ddq = np.clip(desired_ddq, MIN_DDQ, MAX_DDQ)
contact_forces = qp_torque_optimizer.compute_contact_force(
self._robot, desired_ddq, contacts=contacts)
action = {}
for leg_id, force in enumerate(contact_forces):
# While "Lose Contact" is useful in simulation, in real environment it's
# susceptible to sensor noise. Disabling for now.
# if self._gait_generator.leg_state[
# leg_id] == gait_generator_lib.LegState.LOSE_CONTACT:
# force = (0, 0, 0)
motor_torques = self._robot.MapContactForceToJointTorques(leg_id, force)
for joint_id, torque in motor_torques.items():
action[joint_id] = (0, 0, 0, 0, torque)
return action, contact_forces
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/mpc_controller/foot_stepper.py | mpc_controller/foot_stepper.py | # Lint as: python3
"""A state machine that steps each foot for a static gait. Experimental code."""
import copy
import math
import numpy as np
class StepInput(object):
def __init__(self):
self.base_com_pos = np.array([0, 0, 0])
self.base_com_orn = np.array([0, 0, 0, 1])
self.toe_pos_world = np.array([0, 0, 0] * 4)
self.new_pos_world = np.array([0, 0, 0])
class StepOutput(object):
def __init__(self, new_toe_pos_world):
self.new_toe_pos_world = new_toe_pos_world
class FootStepper(object):
"""This class computes desired foot placement for a quadruped robot."""
def __init__(self, bullet_client, toe_ids, toe_pos_local_ref):
self.bullet_client = bullet_client
self.state_time = 0.
self.toe_ids = toe_ids
self.toe_pos_local_ref = toe_pos_local_ref
self.sphere_uid = self.bullet_client.loadURDF(
"sphere_small.urdf", [0, 0, 0], useFixedBase=True)
self.is_far = True
self.max_shift = 0.0008
self.far_bound = 0.005
self.close_bound = 0.03
self.move_swing_foot = False
self.amp = 0.2
alpha = 1
# Loads/draws spheres for debugging purpose. The spheres visualize the
# target COM, the current COM and the target foothold location.
self.sphere_uid_centroid = self.bullet_client.loadURDF(
"sphere_small.urdf", [0, 0, 0], useFixedBase=True)
self.bullet_client.changeVisualShape(
self.sphere_uid_centroid, -1, rgbaColor=[1, 1, 0, alpha])
# Disable collision since visualization spheres should not collide with the
# robot.
self.bullet_client.setCollisionFilterGroupMask(self.sphere_uid_centroid, -1,
0, 0)
self.sphere_uid_com = self.bullet_client.loadURDF(
"sphere_small.urdf", [0, 0, 0], useFixedBase=True)
self.bullet_client.changeVisualShape(
self.sphere_uid_com, -1, rgbaColor=[1, 0, 1, alpha])
self.bullet_client.setCollisionFilterGroupMask(self.sphere_uid_com, -1, 0,
0)
self.bullet_client.setCollisionFilterGroupMask(self.sphere_uid, -1, 0, 0)
self.feetindices = [1, 3, 0, 2]
self.swing_foot_index1 = 0
self.swing_foot_index = self.feetindices[self.swing_foot_index1]
self.colors = [[1, 0, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1], [1, 1, 1, 1]]
self.support_vertices = [[1, 2, 3], [0, 2, 3], [0, 1, 3], [0, 1, 2]]
self.local_diff_y_threshold = 0.05
self.local_diff_y = 100
self.is_far = True
self.get_reference_pos_swing_foot()
def next_foot(self):
self.swing_foot_index1 = (self.swing_foot_index1 + 1) % 4
self.swing_foot_index = self.feetindices[self.swing_foot_index1]
def swing_foot(self):
self.move_swing_foot = True
def get_reference_pos_swing_foot(self):
self.new_pos_local = np.array(
self.toe_pos_local_ref[self.swing_foot_index])
return self.new_pos_local
def set_reference_pos_swing_foot(self, new_pos_local):
self.new_pos_local = new_pos_local
def is_com_stable(self):
ld2 = self.local_diff_y * self.local_diff_y
yaw_ok = ld2 < (self.local_diff_y_threshold * self.local_diff_y_threshold)
com_ok = not self.is_far
return com_ok and yaw_ok
def update(self, step_input):
"""Updates the state machine and toe movements per state."""
base_com_pos = step_input.base_com_pos
base_com_orn = step_input.base_com_orn
base_com_pos_inv, base_com_orn_inv = self.bullet_client.invertTransform(
base_com_pos, base_com_orn)
dt = step_input.dt
self.bullet_client.resetBasePositionAndOrientation(self.sphere_uid,
step_input.new_pos_world,
[0, 0, 0, 1])
self.bullet_client.changeVisualShape(
self.sphere_uid, -1, rgbaColor=self.colors[self.swing_foot_index])
all_toes_pos_locals = []
for toe_pos_world in step_input.toe_pos_world:
toe_pos_local, _ = self.bullet_client.multiplyTransforms(
base_com_pos_inv, base_com_orn_inv, toe_pos_world, [0, 0, 0, 1])
all_toes_pos_locals.append(toe_pos_local)
all_toes_pos_locals = np.array(all_toes_pos_locals)
centroid_world = np.zeros(3)
for v in self.support_vertices[self.swing_foot_index]:
vtx_pos_world = step_input.toe_pos_world[v]
centroid_world += vtx_pos_world
centroid_world /= 3.
sphere_z_offset = 0.05
self.diff_world = base_com_pos - centroid_world
self.diff_world[2] = 0.
self.bullet_client.resetBasePositionAndOrientation(self.sphere_uid_centroid,
centroid_world,
[0, 0, 0, 1])
self.bullet_client.resetBasePositionAndOrientation(
self.sphere_uid_com,
[base_com_pos[0], base_com_pos[1], sphere_z_offset], [0, 0, 0, 1])
l = np.linalg.norm(self.diff_world)
if self.is_far:
bound = self.far_bound
else:
bound = self.close_bound
if l > bound:
self.diff_world *= self.max_shift * 0.5 / l
if not self.is_far:
self.is_far = True
else:
if self.is_far:
self.is_far = False
if not self.is_far:
self.diff_world = np.zeros(3)
for i in range(len(self.toe_pos_local_ref)):
toe = self.toe_pos_local_ref[i]
toe = [
toe[0] + self.diff_world[0], toe[1] + self.diff_world[1],
toe[2] + self.diff_world[2]
]
self.toe_pos_local_ref[i] = toe
self.local_diff_y = self.toe_pos_local_ref[0][
1] + self.toe_pos_local_ref[1][1] - self.toe_pos_local_ref[
2][1] - self.toe_pos_local_ref[3][1]
self.yaw = 0
if self.local_diff_y < -self.local_diff_y_threshold:
self.yaw = 0.001
if self.local_diff_y > self.local_diff_y_threshold:
self.yaw = -0.001
yaw_trans = self.bullet_client.getQuaternionFromEuler([0, 0, self.yaw])
if not self.is_far:
for i in range(len(self.toe_pos_local_ref)):
toe = self.toe_pos_local_ref[i]
toe, _ = self.bullet_client.multiplyTransforms([0, 0, 0], yaw_trans,
toe, [0, 0, 0, 1])
self.toe_pos_local_ref[i] = toe
new_toe_pos_world = []
# Moves the swing foot to the target location.
if self.move_swing_foot:
if self.state_time <= 1:
self.state_time += 4 * dt
if self.state_time >= 1:
self.move_swing_foot = False
self.state_time = 0
self.toe_pos_local_ref[self.swing_foot_index] = self.new_pos_local
toe_pos_local_ref_copy = copy.deepcopy(self.toe_pos_local_ref)
old_pos = self.toe_pos_local_ref[self.swing_foot_index]
new_pos = [
old_pos[0] * (1 - self.state_time) + self.new_pos_local[0] *
(self.state_time), old_pos[1] * (1 - self.state_time) +
self.new_pos_local[1] * (self.state_time),
old_pos[2] * (1 - self.state_time) + self.new_pos_local[2] *
(self.state_time) + self.amp * math.sin(self.state_time * math.pi)
]
toe_pos_local_ref_copy[self.swing_foot_index] = new_pos
for toe_pos_local in toe_pos_local_ref_copy:
new_toe_pos_world.append(self.bullet_client.multiplyTransforms(
base_com_pos, base_com_orn, toe_pos_local, [0, 0, 0, 1])[0])
step_output = StepOutput(new_toe_pos_world)
return step_output
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/mpc_controller/gait_generator.py | mpc_controller/gait_generator.py | """Gait pattern planning module."""
from __future__ import absolute_import
from __future__ import division
#from __future__ import google_type_annotations
from __future__ import print_function
import abc
import enum
class LegState(enum.Enum):
"""The state of a leg during locomotion."""
SWING = 0
STANCE = 1
# A swing leg that collides with the ground.
EARLY_CONTACT = 2
# A stance leg that loses contact.
LOSE_CONTACT = 3
class GaitGenerator(object): # pytype: disable=ignored-metaclass
"""Generates the leg swing/stance pattern for the robot."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def reset(self, current_time):
pass
@abc.abstractmethod
def update(self, current_time):
pass
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/mpc_controller/leg_controller.py | mpc_controller/leg_controller.py | """The leg controller class interface."""
from __future__ import absolute_import
from __future__ import division
#from __future__ import google_type_annotations
from __future__ import print_function
import abc
from typing import Any
class LegController(object): # pytype: disable=ignored-metaclass
"""Generates the leg control signal."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def reset(self, current_time: float):
"""Resets the controller's internal state."""
pass
@abc.abstractmethod
def update(self, current_time: float):
"""Updates the controller's internal state."""
pass
@abc.abstractmethod
def get_action(self) -> Any:
"""Gets the control signal e.g. torques/positions for the leg."""
pass
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/mpc_controller/com_velocity_estimator.py | mpc_controller/com_velocity_estimator.py | """State estimator."""
from __future__ import absolute_import
from __future__ import division
#from __future__ import google_type_annotations
from __future__ import print_function
import numpy as np
from typing import Any, Sequence
import collections
_DEFAULT_WINDOW_SIZE = 20
class MovingWindowFilter(object):
"""A stable O(1) moving filter for incoming data streams.
We implement the Neumaier's algorithm to calculate the moving window average,
which is numerically stable.
"""
def __init__(self, window_size: int):
"""Initializes the class.
Args:
window_size: The moving window size.
"""
assert window_size > 0
self._window_size = window_size
self._value_deque = collections.deque(maxlen=window_size)
# The moving window sum.
self._sum = 0
# The correction term to compensate numerical precision loss during
# calculation.
self._correction = 0
def _neumaier_sum(self, value: float):
"""Update the moving window sum using Neumaier's algorithm.
For more details please refer to:
https://en.wikipedia.org/wiki/Kahan_summation_algorithm#Further_enhancements
Args:
value: The new value to be added to the window.
"""
new_sum = self._sum + value
if abs(self._sum) >= abs(value):
# If self._sum is bigger, low-order digits of value are lost.
self._correction += (self._sum - new_sum) + value
else:
# low-order digits of sum are lost
self._correction += (value - new_sum) + self._sum
self._sum = new_sum
def calculate_average(self, new_value: float) -> float:
"""Computes the moving window average in O(1) time.
Args:
new_value: The new value to enter the moving window.
Returns:
The average of the values in the window.
"""
deque_len = len(self._value_deque)
if deque_len < self._value_deque.maxlen:
pass
else:
# The left most value to be subtracted from the moving sum.
self._neumaier_sum(-self._value_deque[0])
self._neumaier_sum(new_value)
self._value_deque.append(new_value)
return (self._sum + self._correction) / self._window_size
class COMVelocityEstimator(object):
"""Estimate the CoM velocity using on board sensors.
Requires knowledge about the base velocity in world frame, which for example
can be obtained from a MoCap system. This estimator will filter out the high
frequency noises in the velocity so the results can be used with controllers
reliably.
"""
def __init__(
self,
robot: Any,
window_size: int = _DEFAULT_WINDOW_SIZE,
):
self._robot = robot
self._window_size = window_size
self.reset(0)
@property
def com_velocity_body_frame(self) -> Sequence[float]:
"""The base velocity projected in the body aligned inertial frame.
The body aligned frame is a intertia frame that coincides with the body
frame, but has a zero relative velocity/angular velocity to the world frame.
Returns:
The com velocity in body aligned frame.
"""
return self._com_velocity_body_frame
@property
def com_velocity_world_frame(self) -> Sequence[float]:
return self._com_velocity_world_frame
def reset(self, current_time):
del current_time
# We use a moving window filter to reduce the noise in velocity estimation.
self._velocity_filter_x = MovingWindowFilter(
window_size=self._window_size)
self._velocity_filter_y = MovingWindowFilter(
window_size=self._window_size)
self._velocity_filter_z = MovingWindowFilter(
window_size=self._window_size)
self._com_velocity_world_frame = np.array((0, 0, 0))
self._com_velocity_body_frame = np.array((0, 0, 0))
def update(self, current_time):
del current_time
velocity = self._robot.GetBaseVelocity()
vx = self._velocity_filter_x.calculate_average(velocity[0])
vy = self._velocity_filter_y.calculate_average(velocity[1])
vz = self._velocity_filter_z.calculate_average(velocity[2])
self._com_velocity_world_frame = np.array((vx, vy, vz))
base_orientation = self._robot.GetTrueBaseOrientation()
_, inverse_rotation = self._robot.pybullet_client.invertTransform(
(0, 0, 0), base_orientation)
self._com_velocity_body_frame, _ = (
self._robot.pybullet_client.multiplyTransforms(
(0, 0, 0), inverse_rotation, self._com_velocity_world_frame,
(0, 0, 0, 1)))
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/mpc_controller/qp_torque_optimizer.py | mpc_controller/qp_torque_optimizer.py | """Set up the zeroth-order QP problem for stance leg control.
For details, please refer to section XX of this paper:
https://arxiv.org/abs/2009.10019
"""
import numpy as np
# import numba
import quadprog # pytype:disable=import-error
np.set_printoptions(precision=3, suppress=True)
ACC_WEIGHT = np.array([1., 1., 1., 10., 10, 1.])
# @numba.jit(nopython=True, parallel=True, cache=True)
def compute_mass_matrix(robot_mass, robot_inertia, foot_positions):
# yaw = 0. # Set yaw to 0 for now as all commands are local.
# rot_z = np.array([[np.cos(yaw), np.sin(yaw), 0.],
# [-np.sin(yaw), np.cos(yaw), 0.], [0., 0., 1.]])
rot_z = np.eye(3)
inv_mass = np.eye(3) / robot_mass
inv_inertia = np.linalg.inv(robot_inertia)
mass_mat = np.zeros((6, 12))
for leg_id in range(4):
mass_mat[:3, leg_id * 3:leg_id * 3 + 3] = inv_mass
x = foot_positions[leg_id]
foot_position_skew = np.array([[0, -x[2], x[1]], [x[2], 0, -x[0]],
[-x[1], x[0], 0]])
mass_mat[3:6, leg_id * 3:leg_id * 3 +
3] = rot_z.T.dot(inv_inertia).dot(foot_position_skew)
return mass_mat
# @numba.jit(nopython=True, parallel=True, cache=True)
def compute_constraint_matrix(mpc_body_mass,
contacts,
friction_coef=0.8,
f_min_ratio=0.1,
f_max_ratio=10):
f_min = f_min_ratio * mpc_body_mass * 9.8
f_max = f_max_ratio * mpc_body_mass * 9.8
A = np.zeros((24, 12))
lb = np.zeros(24)
for leg_id in range(4):
A[leg_id * 2, leg_id * 3 + 2] = 1
A[leg_id * 2 + 1, leg_id * 3 + 2] = -1
if contacts[leg_id]:
lb[leg_id * 2], lb[leg_id * 2 + 1] = f_min, -f_max
else:
lb[leg_id * 2] = -1e-7
lb[leg_id * 2 + 1] = -1e-7
# Friction constraints
for leg_id in range(4):
row_id = 8 + leg_id * 4
col_id = leg_id * 3
lb[row_id:row_id + 4] = np.array([0, 0, 0, 0])
A[row_id, col_id:col_id + 3] = np.array([1, 0, friction_coef])
A[row_id + 1, col_id:col_id + 3] = np.array([-1, 0, friction_coef])
A[row_id + 2, col_id:col_id + 3] = np.array([0, 1, friction_coef])
A[row_id + 3, col_id:col_id + 3] = np.array([0, -1, friction_coef])
return A.T, lb
# @numba.jit(nopython=True, cache=True)
def compute_objective_matrix(mass_matrix, desired_acc, acc_weight, reg_weight):
g = np.array([0., 0., 9.8, 0., 0., 0.])
Q = np.diag(acc_weight)
R = np.ones(12) * reg_weight
quad_term = mass_matrix.T.dot(Q).dot(mass_matrix) + R
linear_term = 1 * (g + desired_acc).T.dot(Q).dot(mass_matrix)
return quad_term, linear_term
def compute_contact_force(robot,
desired_acc,
contacts,
acc_weight=ACC_WEIGHT,
reg_weight=1e-4,
friction_coef=0.45,
f_min_ratio=0.1,
f_max_ratio=10.):
mass_matrix = compute_mass_matrix(
robot.MPC_BODY_MASS,
np.array(robot.MPC_BODY_INERTIA).reshape((3, 3)),
robot.GetFootPositionsInBaseFrame())
G, a = compute_objective_matrix(mass_matrix, desired_acc, acc_weight,
reg_weight)
C, b = compute_constraint_matrix(robot.MPC_BODY_MASS, contacts,
friction_coef, f_min_ratio, f_max_ratio)
G += 1e-4 * np.eye(12)
result = quadprog.solve_qp(G, a, C, b)
return -result[0].reshape((4, 3))
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/retarget_motion/retarget_config_vision60.py | retarget_motion/retarget_config_vision60.py | import numpy as np
from motion_imitation.utilities import pose3d
from pybullet_utils import transformations
URDF_FILENAME = "quadruped/vision60.urdf"
REF_POS_SCALE = 1
INIT_POS = np.array([0, 0, 0])
INIT_ROT = np.array([0, 0, 0, 1.0])
SIM_TOE_JOINT_IDS = [
3, # left hand
7, # left foot
11, # right hand
15 # right foot
]
SIM_HIP_JOINT_IDS = [0, 4, 8, 12]
SIM_ROOT_OFFSET = np.array([0, 0, 0])
SIM_TOE_OFFSET_LOCAL = [
np.array([0, -0.05, 0.0]),
np.array([0, -0.05, 0.01]),
np.array([0, 0.05, 0.0]),
np.array([0, 0.05, 0.01])
]
DEFAULT_JOINT_POSE = np.array([0, 0.7, 1.5, 0, 0.7, 1.5, 0, 0.7, 1.5, 0, 0.7, 1.5])
JOINT_DAMPING = [0.1, 0.05, 0.01,
0.1, 0.05, 0.01,
0.1, 0.05, 0.01,
0.1, 0.05, 0.01]
FORWARD_DIR_OFFSET = np.array([0, 0, 0])
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/retarget_motion/retarget_config_a1.py | retarget_motion/retarget_config_a1.py | import numpy as np
URDF_FILENAME = "a1/a1.urdf"
REF_POS_SCALE = 0.825
INIT_POS = np.array([0, 0, 0.32])
INIT_ROT = np.array([0, 0, 0, 1.0])
SIM_TOE_JOINT_IDS = [
5, # right hand
15, # right foot
10, # left hand
20, # left foot
]
SIM_HIP_JOINT_IDS = [1, 11, 6, 16]
SIM_ROOT_OFFSET = np.array([0, 0, -0.06])
SIM_TOE_OFFSET_LOCAL = [
np.array([0, -0.05, 0.0]),
np.array([0, -0.05, 0.01]),
np.array([0, 0.05, 0.0]),
np.array([0, 0.05, 0.01])
]
DEFAULT_JOINT_POSE = np.array([0, 0.9, -1.8, 0, 0.9, -1.8, 0, 0.9, -1.8, 0, 0.9, -1.8])
JOINT_DAMPING = [0.1, 0.05, 0.01,
0.1, 0.05, 0.01,
0.1, 0.05, 0.01,
0.1, 0.05, 0.01]
FORWARD_DIR_OFFSET = np.array([0, 0, 0])
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/retarget_motion/retarget_config_laikago.py | retarget_motion/retarget_config_laikago.py | import numpy as np
from motion_imitation.utilities import pose3d
from pybullet_utils import transformations
URDF_FILENAME = "laikago/laikago_toes.urdf"
REF_POS_SCALE = 1
INIT_POS = np.array([0, 0, 0])
INIT_ROT = transformations.quaternion_from_euler(ai=np.pi / 2.0, aj=0, ak=np.pi / 2.0, axes="sxyz")
SIM_TOE_JOINT_IDS = [
7, # left hand
15, # left foot
3, # right hand
11 # right foot
]
SIM_HIP_JOINT_IDS = [4, 12, 0, 8]
SIM_ROOT_OFFSET = np.array([0, 0, 0])
SIM_TOE_OFFSET_LOCAL = [
np.array([-0.02, 0.0, 0.0]),
np.array([-0.02, 0.0, 0.01]),
np.array([-0.02, 0.0, 0.0]),
np.array([-0.02, 0.0, 0.01])
]
DEFAULT_JOINT_POSE = np.array([0, 0.67, -1.25, 0, 0.67, -1.25, 0, 0.67, -1.25, 0, 0.67, -1.25])
JOINT_DAMPING = [0.5, 0.05, 0.01,
0.5, 0.05, 0.01,
0.5, 0.05, 0.01,
0.5, 0.05, 0.01]
FORWARD_DIR_OFFSET = np.array([0, 0, 0.025])
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/retarget_motion/retarget_motion.py | retarget_motion/retarget_motion.py | """Run from motion_imitation/retarget_motion to find data correctly."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
os.sys.path.insert(0, parentdir)
import numpy as np
import time
import collections
from motion_imitation.utilities import motion_util
from motion_imitation.utilities import pose3d
from pybullet_utils import transformations
import pybullet
import pybullet_data as pd
# import retarget_config_laikago as config
# import retarget_config_vision60 as config
import retarget_config_a1 as config
POS_SIZE = 3
ROT_SIZE = 4
DEFAULT_ROT = np.array([0, 0, 0, 1])
FORWARD_DIR = np.array([1, 0, 0])
OUTPUT_FILENAME = "retarget_motion.txt"
GROUND_URDF_FILENAME = "plane_implicit.urdf"
# reference motion
FRAME_DURATION = 0.01667
REF_COORD_ROT = transformations.quaternion_from_euler(0.5 * np.pi, 0, 0)
REF_POS_OFFSET = np.array([0, 0, 0])
REF_ROOT_ROT = transformations.quaternion_from_euler(0, 0, 0.47 * np.pi)
REF_PELVIS_JOINT_ID = 0
REF_NECK_JOINT_ID = 3
REF_HIP_JOINT_IDS = [6, 16, 11, 20]
REF_TOE_JOINT_IDS = [10, 19, 15, 23]
MOBILE_JOINT_LIMITS = collections.OrderedDict()
mocap_motions = [
["pace", "data/dog_walk00_joint_pos.txt",162,201],
["trot", "data/dog_walk03_joint_pos.txt",448,481 ],
["trot2", "data/dog_run04_joint_pos.txt",630,663 ],
["canter", "data/dog_run00_joint_pos.txt", 430, 459],
["left turn0", "data/dog_walk09_joint_pos.txt",1085,1124 ],
["right turn0", "data/dog_walk09_joint_pos.txt", 2404,2450],
]
# right turn0
#JOINT_POS_FILENAME = "data/dog_walk09_joint_pos.txt"
#FRAME_START = 2404
#FRAME_END = 2450
def build_markers(num_markers):
marker_radius = 0.02
markers = []
for i in range(num_markers):
if (i == REF_NECK_JOINT_ID) or (i == REF_PELVIS_JOINT_ID)\
or (i in REF_HIP_JOINT_IDS):
col = [0, 0, 1, 1]
elif (i in REF_TOE_JOINT_IDS):
col = [1, 0, 0, 1]
else:
col = [0, 1, 0, 1]
virtual_shape_id = pybullet.createVisualShape(shapeType=pybullet.GEOM_SPHERE,
radius=marker_radius,
rgbaColor=col)
body_id = pybullet.createMultiBody(baseMass=0,
baseCollisionShapeIndex=-1,
baseVisualShapeIndex=virtual_shape_id,
basePosition=[0,0,0],
useMaximalCoordinates=True)
markers.append(body_id)
return markers
def get_joint_limits(robot):
num_joints = pybullet.getNumJoints(robot)
joint_limit_low = []
joint_limit_high = []
for i in range(num_joints):
joint_info = pybullet.getJointInfo(robot, i)
joint_type = joint_info[2]
if (joint_type == pybullet.JOINT_PRISMATIC or joint_type == pybullet.JOINT_REVOLUTE):
joint_limit_low.append(joint_info[8])
joint_limit_high.append(joint_info[9])
return joint_limit_low, joint_limit_high
def get_root_pos(pose):
return pose[0:POS_SIZE]
def get_root_rot(pose):
return pose[POS_SIZE:(POS_SIZE + ROT_SIZE)]
def get_joint_pose(pose):
return pose[(POS_SIZE + ROT_SIZE):]
def set_root_pos(root_pos, pose):
pose[0:POS_SIZE] = root_pos
return
def set_root_rot(root_rot, pose):
pose[POS_SIZE:(POS_SIZE + ROT_SIZE)] = root_rot
return
def set_joint_pose(joint_pose, pose):
pose[(POS_SIZE + ROT_SIZE):] = joint_pose
return
def set_pose(robot, pose):
num_joints = pybullet.getNumJoints(robot)
root_pos = get_root_pos(pose)
root_rot = get_root_rot(pose)
pybullet.resetBasePositionAndOrientation(robot, root_pos, root_rot)
for j in range(num_joints):
j_info = pybullet.getJointInfo(robot, j)
j_state = pybullet.getJointStateMultiDof(robot, j)
j_pose_idx = j_info[3]
j_pose_size = len(j_state[0])
j_vel_size = len(j_state[1])
if (j_pose_size > 0):
j_pose = pose[j_pose_idx:(j_pose_idx + j_pose_size)]
j_vel = np.zeros(j_vel_size)
pybullet.resetJointStateMultiDof(robot, j, j_pose, j_vel)
return
def set_maker_pos(marker_pos, marker_ids):
num_markers = len(marker_ids)
assert(num_markers == marker_pos.shape[0])
for i in range(num_markers):
curr_id = marker_ids[i]
curr_pos = marker_pos[i]
pybullet.resetBasePositionAndOrientation(curr_id, curr_pos, DEFAULT_ROT)
return
def process_ref_joint_pos_data(joint_pos):
proc_pos = joint_pos.copy()
num_pos = joint_pos.shape[0]
for i in range(num_pos):
curr_pos = proc_pos[i]
curr_pos = pose3d.QuaternionRotatePoint(curr_pos, REF_COORD_ROT)
curr_pos = pose3d.QuaternionRotatePoint(curr_pos, REF_ROOT_ROT)
curr_pos = curr_pos * config.REF_POS_SCALE + REF_POS_OFFSET
proc_pos[i] = curr_pos
return proc_pos
def retarget_root_pose(ref_joint_pos):
pelvis_pos = ref_joint_pos[REF_PELVIS_JOINT_ID]
neck_pos = ref_joint_pos[REF_NECK_JOINT_ID]
left_shoulder_pos = ref_joint_pos[REF_HIP_JOINT_IDS[0]]
right_shoulder_pos = ref_joint_pos[REF_HIP_JOINT_IDS[2]]
left_hip_pos = ref_joint_pos[REF_HIP_JOINT_IDS[1]]
right_hip_pos = ref_joint_pos[REF_HIP_JOINT_IDS[3]]
forward_dir = neck_pos - pelvis_pos
forward_dir += config.FORWARD_DIR_OFFSET
forward_dir = forward_dir / np.linalg.norm(forward_dir)
delta_shoulder = left_shoulder_pos - right_shoulder_pos
delta_hip = left_hip_pos - right_hip_pos
dir_shoulder = delta_shoulder / np.linalg.norm(delta_shoulder)
dir_hip = delta_hip / np.linalg.norm(delta_hip)
left_dir = 0.5 * (dir_shoulder + dir_hip)
up_dir = np.cross(forward_dir, left_dir)
up_dir = up_dir / np.linalg.norm(up_dir)
left_dir = np.cross(up_dir, forward_dir)
left_dir[2] = 0.0; # make the base more stable
left_dir = left_dir / np.linalg.norm(left_dir)
rot_mat = np.array([[forward_dir[0], left_dir[0], up_dir[0], 0],
[forward_dir[1], left_dir[1], up_dir[1], 0],
[forward_dir[2], left_dir[2], up_dir[2], 0],
[0, 0, 0, 1]])
root_pos = 0.5 * (pelvis_pos + neck_pos)
#root_pos = 0.25 * (left_shoulder_pos + right_shoulder_pos + left_hip_pos + right_hip_pos)
root_rot = transformations.quaternion_from_matrix(rot_mat)
root_rot = transformations.quaternion_multiply(root_rot, config.INIT_ROT)
root_rot = root_rot / np.linalg.norm(root_rot)
return root_pos, root_rot
def retarget_pose(robot, default_pose, ref_joint_pos):
joint_lim_low, joint_lim_high = get_joint_limits(robot)
root_pos, root_rot = retarget_root_pose(ref_joint_pos)
root_pos += config.SIM_ROOT_OFFSET
pybullet.resetBasePositionAndOrientation(robot, root_pos, root_rot)
inv_init_rot = transformations.quaternion_inverse(config.INIT_ROT)
heading_rot = motion_util.calc_heading_rot(transformations.quaternion_multiply(root_rot, inv_init_rot))
tar_toe_pos = []
for i in range(len(REF_TOE_JOINT_IDS)):
ref_toe_id = REF_TOE_JOINT_IDS[i]
ref_hip_id = REF_HIP_JOINT_IDS[i]
sim_hip_id = config.SIM_HIP_JOINT_IDS[i]
toe_offset_local = config.SIM_TOE_OFFSET_LOCAL[i]
ref_toe_pos = ref_joint_pos[ref_toe_id]
ref_hip_pos = ref_joint_pos[ref_hip_id]
hip_link_state = pybullet.getLinkState(robot, sim_hip_id, computeForwardKinematics=True)
sim_hip_pos = np.array(hip_link_state[4])
toe_offset_world = pose3d.QuaternionRotatePoint(toe_offset_local, heading_rot)
ref_hip_toe_delta = ref_toe_pos - ref_hip_pos
sim_tar_toe_pos = sim_hip_pos + ref_hip_toe_delta
sim_tar_toe_pos[2] = ref_toe_pos[2]
sim_tar_toe_pos += toe_offset_world
tar_toe_pos.append(sim_tar_toe_pos)
joint_pose = pybullet.calculateInverseKinematics2(
bodyUniqueId=robot,
endEffectorLinkIndices=config.SIM_TOE_JOINT_IDS,
targetPositions=tar_toe_pos,
jointDamping=config.JOINT_DAMPING,
lowerLimits=joint_lim_low,
upperLimits=joint_lim_high,
restPoses=default_pose)
joint_pose = np.array(joint_pose)
assert len(joint_pose) == len(MOBILE_JOINT_LIMITS)
for joint_name, pose in zip(MOBILE_JOINT_LIMITS, joint_pose):
joint_min, joint_max = MOBILE_JOINT_LIMITS[joint_name]
if pose < joint_min or pose > joint_max:
print(f"Joint out of bounds: {joint_name}, val: {pose}, "
"bounds: [{joint_min}, {joint_max}]")
pose = np.concatenate([root_pos, root_rot, joint_pose])
return pose
def update_camera(robot):
base_pos = np.array(pybullet.getBasePositionAndOrientation(robot)[0])
[yaw, pitch, dist] = pybullet.getDebugVisualizerCamera()[8:11]
pybullet.resetDebugVisualizerCamera(dist, yaw, pitch, base_pos)
return
def load_ref_data(JOINT_POS_FILENAME, FRAME_START, FRAME_END):
joint_pos_data = np.loadtxt(JOINT_POS_FILENAME, delimiter=",")
start_frame = 0 if (FRAME_START is None) else FRAME_START
end_frame = joint_pos_data.shape[0] if (FRAME_END is None) else FRAME_END
joint_pos_data = joint_pos_data[start_frame:end_frame]
return joint_pos_data
def retarget_motion(robot, joint_pos_data):
num_frames = joint_pos_data.shape[0]
for f in range(num_frames):
ref_joint_pos = joint_pos_data[f]
ref_joint_pos = np.reshape(ref_joint_pos, [-1, POS_SIZE])
ref_joint_pos = process_ref_joint_pos_data(ref_joint_pos)
curr_pose = retarget_pose(robot, config.DEFAULT_JOINT_POSE, ref_joint_pos)
set_pose(robot, curr_pose)
if f == 0:
pose_size = curr_pose.shape[-1]
new_frames = np.zeros([num_frames, pose_size])
new_frames[f] = curr_pose
new_frames[:, 0:2] -= new_frames[0, 0:2]
return new_frames
def output_motion(frames, out_filename):
with open(out_filename, "w") as f:
f.write("{\n")
f.write("\"LoopMode\": \"Wrap\",\n")
f.write("\"FrameDuration\": " + str(FRAME_DURATION) + ",\n")
f.write("\"EnableCycleOffsetPosition\": true,\n")
f.write("\"EnableCycleOffsetRotation\": true,\n")
f.write("\n")
f.write("\"Frames\":\n")
f.write("[")
for i in range(frames.shape[0]):
curr_frame = frames[i]
if i != 0:
f.write(",")
f.write("\n [")
for j in range(frames.shape[1]):
curr_val = curr_frame[j]
if j != 0:
f.write(", ")
f.write("%.5f" % curr_val)
f.write("]")
f.write("\n]")
f.write("\n}")
return
def main():
p = pybullet
p.connect(p.GUI, options="--mp4=\"retarget_motion.mp4\" --mp4fps=60")
p.configureDebugVisualizer(p.COV_ENABLE_SINGLE_STEP_RENDERING,1)
pybullet.setAdditionalSearchPath(pd.getDataPath())
for mocap_motion in mocap_motions:
pybullet.resetSimulation()
pybullet.setGravity(0, 0, 0)
ground = pybullet.loadURDF(GROUND_URDF_FILENAME)
robot = pybullet.loadURDF(config.URDF_FILENAME, config.INIT_POS, config.INIT_ROT)
set_pose(robot, np.concatenate([config.INIT_POS, config.INIT_ROT, config.DEFAULT_JOINT_POSE]))
for i in range(p.getNumJoints(robot)):
joint_info = p.getJointInfo(robot, i)
name = joint_info[1].decode('utf-8')
joint_type = joint_info[2]
if joint_type in (p.JOINT_PRISMATIC, p.JOINT_REVOLUTE):
MOBILE_JOINT_LIMITS[name] = (joint_info[8], joint_info[9])
p.removeAllUserDebugItems()
print("mocap_name=", mocap_motion[0])
joint_pos_data = load_ref_data(mocap_motion[1],mocap_motion[2],mocap_motion[3])
num_markers = joint_pos_data.shape[-1] // POS_SIZE
marker_ids = build_markers(num_markers)
retarget_frames = retarget_motion(robot, joint_pos_data)
output_motion(retarget_frames, OUTPUT_FILENAME)
f = 0
num_frames = joint_pos_data.shape[0]
for repeat in range (5*num_frames):
time_start = time.time()
f_idx = f % num_frames
print("Frame {:d}".format(f_idx))
ref_joint_pos = joint_pos_data[f_idx]
ref_joint_pos = np.reshape(ref_joint_pos, [-1, POS_SIZE])
ref_joint_pos = process_ref_joint_pos_data(ref_joint_pos)
pose = retarget_frames[f_idx]
set_pose(robot, pose)
set_maker_pos(ref_joint_pos, marker_ids)
update_camera(robot)
p.configureDebugVisualizer(p.COV_ENABLE_SINGLE_STEP_RENDERING,1)
f += 1
time_end = time.time()
sleep_dur = FRAME_DURATION - (time_end - time_start)
sleep_dur = max(0, sleep_dur)
time.sleep(sleep_dur)
for m in marker_ids:
p.removeBody(m)
marker_ids = []
pybullet.disconnect()
return
if __name__ == "__main__":
main()
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/safe_outdoor/resetters.py | safe_outdoor/resetters.py | """For resetting the robot to a ready pose."""
import inspect
import os
currentdir = os.path.dirname(
os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
os.sys.path.insert(0, parentdir)
from motion_imitation.envs.env_wrappers import reset_task
from motion_imitation.robots import robot_config
from sac_dev.learning import sac_agent
import sac_dev.sac_configs
import tensorflow as tf
class RewardPlateau(object):
"""Terminal condition that returns True when the reward stops changing."""
def __init__(self, n=5, delta=.001, lowbar=0.8):
self.n = n
self.delta = delta
self.last_reward = float("inf")
self.count_same = 0
self.lowbar = lowbar
def __call__(self, env):
reward = env._task.reward(env)
if reward > self.lowbar and abs(reward - self.last_reward) < self.delta:
self.count_same += 1
else:
self.count_same = 0
self.last_reward = reward
return self.count_same >= self.n
class GetupResetter(object):
"""Single-policy resetter that first rolls over, then stands up."""
def __init__(self, env, checkpoint_path):
self._env = env
timeout = lambda env: env.env_step_counter > 150
upright = lambda env: env.task.reward(env) > .94
# Here real_robot just means no starting pose randomization.
self._reset_task = reset_task.ResetTask(
terminal_conditions=(upright, timeout, RewardPlateau()),
real_robot=True)
old_task = self._env.task
self._env.set_task(self._reset_task)
self._graph = tf.Graph()
self._sess = tf.Session(graph=self._graph)
agent_configs = sac_dev.sac_configs.SAC_CONFIGS["A1-Motion-Imitation-Vanilla-SAC-Pretrain"]
self._reset_model = sac_agent.SACAgent(
env=self._env, sess=self._sess, **agent_configs)
self._reset_model.load_model(checkpoint_path)
self._env.set_task(old_task)
def _run_single_episode(self, task, policy):
self._env.set_task(task)
obs = self._env.reset()
done = False
self._env.robot.running_reset_policy = True
while not done:
action = policy(obs)
obs, _, done, _ = self._env.step(action)
self._env.robot.running_reset_policy = False
def __call__(self):
for i in range(1, 6):
print("Reset attempt {}/5".format(i))
try:
self._run_single_episode(
self._reset_task,
lambda x: self._reset_model.sample_action(x, True)[0])
break
except robot_config.SafetyError:
continue
self._env.robot.HoldCurrentPose()
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/safe_outdoor/learning/yielding_sac_agent.py | safe_outdoor/learning/yielding_sac_agent.py | """SAC agent that yields after every environment episode."""
import collections
import numpy as np
import os
import tensorflow as tf
import time
import sac_dev.learning.sac_agent as sac_agent
import sac_dev.util.logger as logger
import sac_dev.util.mpi_util as mpi_util
import sac_dev.util.replay_buffer as replay_buffer
import pickle
class YieldingSACAgent(sac_agent.SACAgent):
def __init__(self, *args, **kwargs):
if mpi_util.get_num_procs() != 1:
raise ValueError("YieldingSACAgent cannot run with multiprocessing")
super().__init__(*args, **kwargs)
def train(self, max_samples, test_episodes, output_dir, output_iters, variant=None):
log_file = os.path.join(output_dir, "log.txt")
self._logger = logger.Logger()
self._logger.configure_output_file(log_file, variant=variant)
video_dir = os.path.join(output_dir, "videos")
if (mpi_util.is_root_proc()):
os.makedirs(video_dir, exist_ok=True)
model_dir = os.path.join(output_dir, "train")
os.makedirs(model_dir, exist_ok=True)
self._tf_writer = tf.summary.FileWriter(
os.path.join(output_dir, "tensorboard"), graph=self._sess.graph)
iter = 0
total_train_path_count = 0
test_return = 0
total_test_path_count = 0
start_time = time.time()
init_train_func = self._init_train()
for _ in init_train_func:
yield
num_procs = 1
local_samples_per_iter = int(np.ceil(self._samples_per_iter / num_procs))
local_test_episodes = int(np.ceil(test_episodes / num_procs))
total_samples = 0
print("Training")
while (total_samples < max_samples):
update_normalizer = self._enable_normalizer_update(total_samples)
rollout_train_func = self._rollout_train(local_samples_per_iter, update_normalizer)
for ret in rollout_train_func:
yield
train_return, train_path_count, new_sample_count, metrics = ret
train_return = mpi_util.reduce_mean(train_return)
train_path_count = mpi_util.reduce_sum(train_path_count)
new_sample_count = mpi_util.reduce_sum(new_sample_count)
total_train_path_count += train_path_count
total_samples = self.get_total_samples()
wall_time = time.time() - start_time
wall_time /= 60 * 60 # store time in hours
log_dict = {
"Iteration": iter,
"Wall_Time": wall_time,
"Samples": total_samples,
"Train_Return": train_return,
"Train_Paths": total_train_path_count,
"Test_Return": test_return,
"Test_Paths": total_test_path_count}
for metric_name, value in metrics.items():
if metric_name == "max_torque":
log_dict["Max_Torque"] = mpi_util.reduce_max(value)
continue
log_dict[metric_name] = mpi_util.reduce_mean(value)
self._log(log_dict, iter)
if (self._need_normalizer_update() and iter == 0):
self._update_normalizers()
self._update(iter, new_sample_count)
if (self._need_normalizer_update()):
self._update_normalizers()
if (iter % output_iters == 0):
rollout_test_func = self._rollout_test(local_test_episodes, print_info=False)
for ret in rollout_test_func:
yield
test_return, test_path_count = ret
test_return = mpi_util.reduce_mean(test_return)
total_test_path_count += mpi_util.reduce_sum(test_path_count)
self._log({
"Test_Return": test_return,
"Test_Paths": total_test_path_count
}, iter)
if (mpi_util.is_root_proc()):
model_file = os.path.join(model_dir, f"model-{iter:06}.ckpt")
self.save_model(model_file)
buffer_file = os.path.join(model_dir, f"buffer.pkl")
file = open(buffer_file, "wb")
pickle.dump(self._replay_buffer, file)
file.close()
self._logger.print_tabular()
self._logger.dump_tabular()
else:
self._logger.print_tabular()
iter += 1
self._tf_writer.close()
self._tf_writer = None
return
def _init_train(self):
super(sac_agent.SACAgent, self)._init_train()
num_procs = mpi_util.get_num_procs()
local_init_samples = int(np.ceil(self._init_samples / num_procs))
collect_func = self._collect_init_samples(local_init_samples)
for _ in collect_func:
yield
def _collect_init_samples(self, max_samples):
print("Collecting {} initial samples".format(max_samples))
sample_count = 0
next_benchmark = 1000
update_normalizer = self._enable_normalizer_update(sample_count)
start_time = time.time()
while (sample_count < max_samples):
rollout_func = self._rollout_train(1, update_normalizer)
next(rollout_func)
_, _, new_sample_count, _ = next(rollout_func)
sample_count += new_sample_count
print("samples: {}/{}".format(sample_count, max_samples))
if sample_count >= next_benchmark:
print("Collected {} initial samples in {} sec".format(
sample_count, time.time() - start_time))
next_benchmark += 1000
yield
if (self._need_normalizer_update()):
self._update_normalizers()
yield sample_count
def _rollout_train(self, num_samples, update_normalizer):
new_sample_count = 0
total_return = 0
path_count = 0
all_metrics = collections.defaultdict(list)
while (new_sample_count < num_samples):
path, _, metrics = self._rollout_path(test=False)
yield
path_id = self._replay_buffer.store(path)
valid_path = path_id != replay_buffer.INVALID_IDX
if not valid_path:
assert False, "Invalid path detected"
path_return = path.calc_return()
if update_normalizer:
self._record_normalizers(path)
for metric_name in metrics:
all_metrics[metric_name].append(metrics[metric_name][0])
all_metrics["max_torque"].append(path.calc_max_torque())
new_sample_count += path.pathlength()
total_return += path_return
path_count += 1
avg_return = total_return / path_count
metrics["max_torque"] = (None, np.max)
aggregate_metrics = {}
for metric_name, val_list in all_metrics.items():
aggregate_fn = metrics[metric_name][1]
aggregate_metrics[metric_name] = aggregate_fn(val_list)
yield avg_return, path_count, new_sample_count, aggregate_metrics
def _rollout_test(self, num_episodes, print_info=False, task_name=""):
total_return = 0
for e in range(num_episodes):
path, _, _ = self._rollout_path(test=True)
yield
path_return = path.calc_return()
total_return += path_return
if (print_info):
logger.Logger.print("Task: "+task_name)
logger.Logger.print("Episode: {:d}".format(e))
logger.Logger.print("Curr_Return: {:.3f}".format(path_return))
logger.Logger.print("Avg_Return: {:.3f}\n".format(total_return / (e + 1)))
yield
avg_return = total_return / num_episodes
yield avg_return, num_episodes
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/motion_imitation/run.py | motion_imitation/run.py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
os.sys.path.insert(0, parentdir)
import argparse
from mpi4py import MPI
import numpy as np
import os
import random
import tensorflow as tf
import time
import pybullet as p
import moviepy.editor as mpy
from motion_imitation.envs import env_builder as env_builder
from motion_imitation.learning import imitation_policies as imitation_policies
from motion_imitation.learning import ppo_imitation as ppo_imitation
from stable_baselines.common.callbacks import CheckpointCallback
TIMESTEPS_PER_ACTORBATCH = 4096
OPTIM_BATCHSIZE = 256
ENABLE_ENV_RANDOMIZER = True
def set_rand_seed(seed=None):
if seed is None:
seed = int(time.time())
seed += 97 * MPI.COMM_WORLD.Get_rank()
tf.set_random_seed(seed)
np.random.seed(seed)
random.seed(seed)
return
def build_model(env, num_procs, timesteps_per_actorbatch, optim_batchsize, output_dir):
policy_kwargs = {
"net_arch": [{"pi": [512, 256],
"vf": [512, 256]}],
"act_fun": tf.nn.relu
}
timesteps_per_actorbatch = int(np.ceil(float(timesteps_per_actorbatch) / num_procs))
optim_batchsize = int(np.ceil(float(optim_batchsize) / num_procs))
model = ppo_imitation.PPOImitation(
policy=imitation_policies.ImitationPolicy,
env=env,
gamma=0.95,
timesteps_per_actorbatch=timesteps_per_actorbatch,
clip_param=0.2,
optim_epochs=1,
optim_stepsize=1e-5,
optim_batchsize=optim_batchsize,
lam=0.99,
adam_epsilon=1e-5,
schedule='constant',
policy_kwargs=policy_kwargs,
tensorboard_log=output_dir,
verbose=1)
return model
def train(model, env, total_timesteps, output_dir="", int_save_freq=0):
if (output_dir == ""):
save_path = None
else:
save_path = os.path.join(output_dir, "model.zip")
if not os.path.exists(output_dir):
os.makedirs(output_dir)
callbacks = []
# Save a checkpoint every n steps
if (output_dir != ""):
if (int_save_freq > 0):
int_dir = os.path.join(output_dir, "intermedate")
callbacks.append(CheckpointCallback(save_freq=int_save_freq, save_path=int_dir,
name_prefix='model'))
model.learn(total_timesteps=total_timesteps, save_path=save_path, callback=callbacks)
return
def test(model, env, num_procs, num_episodes=None):
curr_return = 0
sum_return = 0
episode_count = 0
if num_episodes is not None:
num_local_episodes = int(np.ceil(float(num_episodes) / num_procs))
else:
num_local_episodes = np.inf
o = env.reset()
video_frames = []
while episode_count < num_local_episodes:
a, _ = model.predict(o, deterministic=True)
o, r, done, info = env.step(a)
curr_return += r
img = env.render(mode="rgb_array")
video_frames.append(img)
if done:
o = env.reset()
print(curr_return)
sum_return += curr_return
episode_count += 1
if len(video_frames) > 0:
clip = mpy.ImageSequenceClip(video_frames, fps=(1/.033))
clip.write_videofile("testing_ppo.mp4")
sum_return = MPI.COMM_WORLD.allreduce(sum_return, MPI.SUM)
episode_count = MPI.COMM_WORLD.allreduce(episode_count, MPI.SUM)
mean_return = sum_return / episode_count
if MPI.COMM_WORLD.Get_rank() == 0:
print("Mean Return: " + str(mean_return))
print("Episode Count: " + str(episode_count))
return
def main():
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument("--seed", dest="seed", type=int, default=None)
arg_parser.add_argument("--mode", dest="mode", type=str, default="train")
arg_parser.add_argument("--motion_file", dest="motion_file", type=str, default="motion_imitation/data/motions/dog_pace.txt")
arg_parser.add_argument("--visualize", dest="visualize", action="store_true", default=False)
arg_parser.add_argument("--output_dir", dest="output_dir", type=str, default="output")
arg_parser.add_argument("--num_test_episodes", dest="num_test_episodes", type=int, default=None)
arg_parser.add_argument("--model_file", dest="model_file", type=str, default="")
arg_parser.add_argument("--total_timesteps", dest="total_timesteps", type=int, default=2e8)
arg_parser.add_argument("--train_reset", dest="train_reset", action="store_true", default=False)
arg_parser.add_argument("--int_save_freq", dest="int_save_freq", type=int, default=0) # save intermediate model every n policy steps
arg_parser.add_argument("--real_robot", dest="real", action="store_true")
arg_parser.add_argument("--sim_robot", dest="real", action="store_false")
arg_parser.set_defaults(real=False)
arg_parser.add_argument("--realistic_sim", dest="realistic_sim", action="store_true", default=False)
args = arg_parser.parse_args()
if (args.seed is not None):
set_rand_seed(args.seed)
num_procs = MPI.COMM_WORLD.Get_size()
os.environ["CUDA_VISIBLE_DEVICES"] = '-1'
enable_env_rand = ENABLE_ENV_RANDOMIZER and (args.mode != "test")
env = env_builder.build_env("reset" if args.train_reset else "imitate",
motion_files=[find_file(args.motion_file)],
num_parallel_envs=num_procs,
mode=args.mode,
enable_randomizer=enable_env_rand,
enable_rendering=args.visualize,
use_real_robot=args.real,
reset_at_current_position=args.multitask,
realistic_sim=args.realistic_sim)
model = build_model(env=env,
num_procs=num_procs,
timesteps_per_actorbatch=TIMESTEPS_PER_ACTORBATCH,
optim_batchsize=OPTIM_BATCHSIZE,
output_dir=args.output_dir)
if args.model_file != "":
model.load_parameters(args.model_file)
if args.mode == "train":
train(model=model,
env=env,
total_timesteps=args.total_timesteps,
output_dir=args.output_dir,
int_save_freq=args.int_save_freq)
elif args.mode == "test":
test(model=model,
env=env,
num_procs=num_procs,
num_episodes=args.num_test_episodes)
else:
assert False, "Unsupported mode: " + args.mode
return
if __name__ == '__main__':
main()
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/motion_imitation/__init__.py | motion_imitation/__init__.py | """Set up gym interface for locomotion environments."""
import gym
from gym.envs.registration import registry, make, spec
def register(env_id, *args, **kvargs):
if env_id in registry.env_specs:
return
else:
return gym.envs.registration.register(env_id, *args, **kvargs)
register(
env_id='A1GymEnv-v1',
entry_point='locomotion.envs.gym_envs:A1GymEnv',
max_episode_steps=2000,
reward_threshold=2000.0,
)
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/motion_imitation/run_sac.py | motion_imitation/run_sac.py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
os.sys.path.insert(0, parentdir)
import argparse
import numpy as np
import os
import random
import tensorflow.compat.v1 as tf
import time
from motion_imitation.envs import env_builder as env_builder
from motion_imitation.utilities import motion_util
import sac_dev.learning.sac_agent as sac_agent
import sac_dev.util.mpi_util as mpi_util
import sac_dev.sac_configs
from motion_imitation.envs.env_wrappers import imitation_task
from motion_imitation.envs.env_wrappers import logging_wrapper
from safe_outdoor import resetters
from safe_outdoor.learning import yielding_sac_agent
ENABLE_ENV_RANDOMIZER = True
def find_file(filename):
if os.path.isfile(filename):
return filename
possible_filepath = os.path.join(currentdir, filename)
if os.path.isfile(possible_filepath):
return possible_filepath
possible_filepath = os.path.join(parentdir, filename)
if os.path.isfile(possible_filepath):
return possible_filepath
raise ValueError("No such file: '{}'".format(filename))
def set_rand_seed(seed=None):
if seed is None:
seed = int(time.time())
seed += 97 * mpi_util.get_proc_rank()
tf.set_random_seed(seed)
np.random.seed(seed)
random.seed(seed)
return
def enable_gpus(gpu_str):
if (gpu_str is not ""):
os.environ["CUDA_VISIBLE_DEVICES"] = gpu_str
else:
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
return
def build_agent(env, variant, agent_cls=sac_agent.SACAgent):
graph = tf.Graph()
sess = tf.Session(graph=graph)
agent = agent_cls(env=env, sess=sess, **variant)
return agent
def train_multitask(
env,
total_timesteps,
output_dir,
int_save_freq,
test_episodes,
variant,
forward_model_file,
reset_func,
backward_motion_file="",
backward_model_file="",
):
forward_task = env.task
task_dict = {"Forward": forward_task}
model_file_dict = {"Forward": forward_model_file}
if backward_motion_file != "":
backward_task = imitation_task.ImitationTask(
ref_motion_filenames=[find_file(backward_motion_file)],
enable_cycle_sync=forward_task.cycle_sync_enabled,
tar_frame_steps=forward_task.tar_frame_steps,
ref_state_init_prob=forward_task.ref_state_init_prob,
enable_rand_init_time=forward_task.enable_rand_init_time,
warmup_time=forward_task.warmup_time)
task_dict["Backward"] = backward_task
model_file_dict["Backward"] = backward_model_file
train_funcs = {}
for task_name, model_file in model_file_dict.items():
my_variant = variant.copy()
model = build_agent(env, my_variant, yielding_sac_agent.YieldingSACAgent)
if model_file != "":
model.load_model(model_file)
train_funcs[task_name] = model.train(
max_samples=total_timesteps,
test_episodes=test_episodes,
output_dir=os.path.join(output_dir, task_name),
output_iters=int_save_freq,
variant=my_variant)
direction_label = -1
counter = 0
task_name = "Forward"
while True:
reset_func()
if backward_motion_file != "":
pos = env.robot.GetBasePosition()
init_mat = motion_util.to_matrix(pos,
env.robot.GetTrueBaseRollPitchYaw())
init_mat_inv = np.linalg.inv(init_mat)
local_ox, local_oy = init_mat_inv[0:2, 2]
if local_ox >= 0.0:
task_name = "Forward"
else:
task_name = "Backward"
env.pybullet_client.removeUserDebugItem(direction_label)
direction_label = env.pybullet_client.addUserDebugText(
task_name, (0, 0, 0.1), (1, 1, 1),
parentObjectUniqueId=env.robot.quadruped)
env.set_task(task_dict[task_name])
try:
next(train_funcs[task_name])
except StopIteration:
return
def main():
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument("--seed", dest="seed", type=int, default=None)
arg_parser.add_argument("--mode", dest="mode", type=str, default="train")
arg_parser.add_argument("--motion_file", dest="motion_file", type=str, default="motion_imitation/data/motions/pace.txt")
arg_parser.add_argument("--backward_motion_file", dest="backward_motion_file", type=str, default="")
arg_parser.add_argument("--visualize", dest="visualize", action="store_true", default=False)
# Root output directory. An additional subdir with the datetime is added.
arg_parser.add_argument("--output_dir", dest="output_dir", type=str, default="output")
# Optionally add a descriptive suffix to the datetime.
arg_parser.add_argument("--output_suffix", dest="output_suffix", type=str, default="")
arg_parser.add_argument("--num_test_episodes", dest="num_test_episodes", type=int, default=1)
arg_parser.add_argument("--model_file", dest="model_file", type=str, default="")
arg_parser.add_argument("--backward_model_file", dest="backward_model_file", type=str, default="")
arg_parser.add_argument("--getup_model_file", dest="getup_model_file", type=str, default="motion_imitation/data/policies/model-004050.ckpt")
arg_parser.add_argument("--total_timesteps", dest="total_timesteps", type=int, default=2e8)
arg_parser.add_argument("--int_save_freq", dest="int_save_freq", type=int, default=50) # save intermediate model every n policy steps
arg_parser.add_argument("--gpu", dest="gpu", default="")
arg_parser.add_argument("--train_reset", dest="train_reset", action="store_true", default=False)
arg_parser.add_argument("--finetune", dest="finetune", action="store_true", default=False)
arg_parser.add_argument("--use_redq", dest="use_redq", action="store_true", default=False)
arg_parser.add_argument("--multitask", dest="multitask", action="store_true")
arg_parser.add_argument("--no_multitask", dest="multitask", action="store_false")
arg_parser.set_defaults(multitask=False)
arg_parser.add_argument("--real_robot", dest="real", action="store_true")
arg_parser.add_argument("--sim_robot", dest="real", action="store_false")
arg_parser.set_defaults(real=False)
arg_parser.add_argument("--realistic_sim", dest="realistic_sim", action="store_true", default=False)
arg_parser.add_argument("--mocap_grpc_server", dest="mocap_grpc_server", type=str, default=None)
arg_parser.add_argument("--no_env_logging", dest="env_logging", action="store_false", default=True)
args = arg_parser.parse_args()
tf.logging.set_verbosity(tf.logging.ERROR)
agent_configs = {}
env_id = "A1-Motion-Imitation"
if args.use_redq:
env_id += "-REDQ"
else:
env_id += "-Vanilla-SAC"
if args.finetune:
env_id += "-Finetune"
else:
env_id += "-Pretrain"
agent_configs = sac_dev.sac_configs.SAC_CONFIGS[env_id]
# Quick run to make sure bits are flowing.
if args.mode == "canary":
args.int_save_freq = 1
args.num_test_episodes = 1
args.output_dir = "/tmp/safe_outdoor_canary_runs"
args.total_timesteps = 1
agent_configs["init_samples"] = 10
num_procs = mpi_util.get_num_procs()
enable_gpus(args.gpu)
set_rand_seed(int(time.time()))
suf = "_" + args.output_suffix if args.output_suffix else ""
output_dir = os.path.join(args.output_dir,
time.strftime("%Y-%m-%d_%H%M_%S", time.localtime()) + suf)
enable_env_rand = ENABLE_ENV_RANDOMIZER and (args.mode != "test")
env = env_builder.build_env("reset" if args.train_reset else "imitate",
motion_files=[find_file(args.motion_file)],
num_parallel_envs=num_procs,
mode=args.mode,
enable_randomizer=enable_env_rand,
enable_rendering=args.visualize,
use_real_robot=args.real,
reset_at_current_position=args.multitask,
realistic_sim=args.realistic_sim)
if args.env_logging:
env = logging_wrapper.LoggingWrapper(env, output_dir,
args.mocap_grpc_server)
if args.multitask:
if args.mode == "test":
raise NotImplementedError("Only training implemented for multitask")
if args.real or args.realistic_sim:
resetter = resetters.GetupResetter(env, args.getup_model_file)
else:
resetter = lambda: None
train_multitask(env=env,
backward_motion_file=args.backward_motion_file,
total_timesteps=int(args.total_timesteps),
output_dir=output_dir,
int_save_freq=args.int_save_freq,
test_episodes=args.num_test_episodes,
variant=agent_configs,
forward_model_file=args.model_file,
backward_model_file=args.backward_model_file,
reset_func=resetter)
return
if args.mode == "standup":
resetter = resetters.GetupResetter(env, args.getup_model_file)
for i in range(args.num_test_episodes):
input("Strike <Enter> to stand.")
resetter()
input("Strike <Enter> to fall.")
env.robot.Brake()
return
model = build_agent(env, variant=agent_configs)
if args.model_file != "":
model.load_model(args.model_file)
if args.mode in ("train", "canary"):
model.train(max_samples=args.total_timesteps,
test_episodes=args.num_test_episodes,
output_dir=output_dir,
output_iters=args.int_save_freq,
variant=agent_configs)
elif args.mode == "test":
model.eval(num_episodes=args.num_test_episodes)
else:
assert False, "Unsupported mode: " + args.mode
return
if __name__ == '__main__':
main()
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/motion_imitation/envs/env_builder.py | motion_imitation/envs/env_builder.py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(os.path.dirname(currentdir))
os.sys.path.insert(0, parentdir)
import numpy as np
from motion_imitation.envs import locomotion_gym_config
from motion_imitation.envs import locomotion_gym_env
from motion_imitation.envs.env_wrappers import imitation_task
from motion_imitation.envs.env_wrappers import imitation_wrapper_env
from motion_imitation.envs.env_wrappers import observation_dictionary_to_array_wrapper
from motion_imitation.envs.env_wrappers import reset_task
from motion_imitation.envs.env_wrappers import simple_openloop
from motion_imitation.envs.env_wrappers import trajectory_generator_wrapper_env
from motion_imitation.envs.sensors import environment_sensors
from motion_imitation.envs.sensors import robot_sensors
from motion_imitation.envs.sensors import sensor_wrappers
from motion_imitation.envs.utilities import controllable_env_randomizer_from_config
from motion_imitation.robots import a1
from motion_imitation.robots import a1_robot
from motion_imitation.robots import robot_config
def build_env(task,
motion_files=None,
num_parallel_envs=0,
mode="train",
enable_randomizer=True,
enable_rendering=False,
reset_at_current_position=False,
use_real_robot=False,
realistic_sim=False):
assert len(motion_files) > 0
if task == "reset":
curriculum_episode_length_start = curriculum_episode_length_end = 150
else:
curriculum_episode_length_start = 50
curriculum_episode_length_end = 600
sim_params = locomotion_gym_config.SimulationParameters()
sim_params.enable_rendering = enable_rendering
sim_params.allow_knee_contact = True
sim_params.motor_control_mode = robot_config.MotorControlMode.POSITION
sim_params.reset_at_current_position = reset_at_current_position
sim_params.num_action_repeat = 33
gym_config = locomotion_gym_config.LocomotionGymConfig(simulation_parameters=sim_params)
robot_kwargs = {"self_collision_enabled": True}
ref_state_init_prob = 0.0
if use_real_robot:
robot_class = a1_robot.A1Robot
else:
robot_class = a1.A1
if use_real_robot or realistic_sim:
robot_kwargs["reset_func_name"] = "_SafeJointsReset"
robot_kwargs["velocity_source"] = a1.VelocitySource.IMU_FOOT_CONTACT
else:
robot_kwargs["reset_func_name"] = "_PybulletReset"
num_motors = a1.NUM_MOTORS
traj_gen = simple_openloop.A1PoseOffsetGenerator(
action_limit=np.array([0.802851455917, 4.18879020479, -0.916297857297] *
4) - np.array([0, 0.9, -1.8] * 4))
sensors = [
sensor_wrappers.HistoricSensorWrapper(wrapped_sensor=robot_sensors.MotorAngleSensor(num_motors=num_motors), num_history=3),
sensor_wrappers.HistoricSensorWrapper(wrapped_sensor=robot_sensors.IMUSensor(), num_history=3),
sensor_wrappers.HistoricSensorWrapper(wrapped_sensor=environment_sensors.LastActionSensor(num_actions=num_motors), num_history=3)
]
if task == "reset":
task = reset_task.ResetTask()
else:
task = imitation_task.ImitationTask(
ref_motion_filenames=motion_files,
real_robot=use_real_robot,
enable_cycle_sync=True,
tar_frame_steps=[1, 2, 10, 30],
ref_state_init_prob=ref_state_init_prob,
enable_rand_init_time=enable_randomizer,
warmup_time=.3)
randomizers = []
if enable_randomizer:
randomizer = controllable_env_randomizer_from_config.ControllableEnvRandomizerFromConfig(verbose=False)
randomizers.append(randomizer)
env = locomotion_gym_env.LocomotionGymEnv(
gym_config=gym_config,
robot_class=robot_class,
robot_kwargs=robot_kwargs,
env_randomizers=randomizers,
robot_sensors=sensors,
task=task)
env = observation_dictionary_to_array_wrapper.ObservationDictionaryToArrayWrapper(env)
env = trajectory_generator_wrapper_env.TrajectoryGeneratorWrapperEnv(env, trajectory_generator=traj_gen)
if mode == "test":
curriculum_episode_length_start = curriculum_episode_length_end
env = imitation_wrapper_env.ImitationWrapperEnv(env,
episode_length_start=curriculum_episode_length_start,
episode_length_end=curriculum_episode_length_end,
curriculum_steps=2000000,
num_parallel_envs=num_parallel_envs)
return env
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/motion_imitation/envs/locomotion_gym_env.py | motion_imitation/envs/locomotion_gym_env.py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file implements the locomotion gym env."""
import collections
import time
import gym
from gym import spaces
from gym.utils import seeding
import numpy as np
import pybullet # pytype: disable=import-error
import pybullet_utils.bullet_client as bullet_client
import pybullet_data as pd
from motion_imitation.robots import robot_config
from motion_imitation.envs.sensors import sensor
from motion_imitation.envs.sensors import space_utils
_ACTION_EPS = 0.01
_NUM_SIMULATION_ITERATION_STEPS = 300
_LOG_BUFFER_LENGTH = 5000
class LocomotionGymEnv(gym.Env):
"""The gym environment for the locomotion tasks."""
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second': 100
}
def __init__(self,
gym_config,
robot_class=None,
env_sensors=None,
robot_sensors=None,
task=None,
env_randomizers=None,
robot_kwargs=None):
"""Initializes the locomotion gym environment.
Args:
gym_config: An instance of LocomotionGymConfig.
robot_class: A class of a robot. We provide a class rather than an
instance due to hard_reset functionality. Parameters are expected to be
configured with gin.
sensors: A list of environmental sensors for observation.
task: A callable function/class to calculate the reward and termination
condition. Takes the gym env as the argument when calling.
env_randomizers: A list of EnvRandomizer(s). An EnvRandomizer may
randomize the physical property of minitaur, change the terrrain during
reset(), or add perturbation forces during step().
robot_kwargs: An optional dictionary of arguments to pass to the robot.
Raises:
ValueError: If the num_action_repeat is less than 1.
"""
self.seed()
self._gym_config = gym_config
self._robot_class = robot_class
self._robot_sensors = robot_sensors
self._robot_kwargs = robot_kwargs or {}
self._sensors = env_sensors if env_sensors is not None else list()
if self._robot_class is None:
raise ValueError('robot_class cannot be None.')
# A dictionary containing the objects in the world other than the robot.
self._world_dict = {}
self._task = task
self._env_randomizers = env_randomizers if env_randomizers else []
# This is a workaround due to the issue in b/130128505#comment5
if isinstance(self._task, sensor.Sensor):
self._sensors.append(self._task)
# Simulation related parameters.
self._num_action_repeat = gym_config.simulation_parameters.num_action_repeat
self._on_rack = gym_config.simulation_parameters.robot_on_rack
if self._num_action_repeat < 1:
raise ValueError('number of action repeats should be at least 1.')
self._sim_time_step = gym_config.simulation_parameters.sim_time_step_s
self._env_time_step = self._num_action_repeat * self._sim_time_step
self._env_step_counter = 0
self._num_bullet_solver_iterations = int(_NUM_SIMULATION_ITERATION_STEPS /
self._num_action_repeat)
self._is_render = gym_config.simulation_parameters.enable_rendering
# The wall-clock time at which the last frame is rendered.
self._last_frame_time = 0.0
self._show_reference_id = -1
if self._is_render:
self._pybullet_client = bullet_client.BulletClient(
connection_mode=pybullet.GUI)
pybullet.configureDebugVisualizer(
pybullet.COV_ENABLE_GUI,
gym_config.simulation_parameters.enable_rendering_gui)
if hasattr(self._task, '_draw_ref_model_alpha'):
self._show_reference_id = pybullet.addUserDebugParameter("show reference",0,1,
self._task._draw_ref_model_alpha)
self._delay_id = pybullet.addUserDebugParameter("delay",0,0.3,0)
else:
self._pybullet_client = bullet_client.BulletClient(
connection_mode=pybullet.DIRECT)
self._pybullet_client.setAdditionalSearchPath(pd.getDataPath())
if gym_config.simulation_parameters.egl_rendering:
self._pybullet_client.loadPlugin('eglRendererPlugin')
# The action list contains the name of all actions.
self._build_action_space()
# Set the default render options.
self._camera_dist = gym_config.simulation_parameters.camera_distance
self._camera_yaw = gym_config.simulation_parameters.camera_yaw
self._camera_pitch = gym_config.simulation_parameters.camera_pitch
self._render_width = gym_config.simulation_parameters.render_width
self._render_height = gym_config.simulation_parameters.render_height
self._hard_reset = True
self.reset()
self._hard_reset = gym_config.simulation_parameters.enable_hard_reset
# Construct the observation space from the list of sensors. Note that we
# will reconstruct the observation_space after the robot is created.
self.observation_space = (
space_utils.convert_sensors_to_gym_space_dictionary(
self.all_sensors()))
def _build_action_space(self):
"""Builds action space based on motor control mode."""
motor_mode = self._gym_config.simulation_parameters.motor_control_mode
if motor_mode == robot_config.MotorControlMode.HYBRID:
action_upper_bound = []
action_lower_bound = []
action_config = self._robot_class.ACTION_CONFIG
for action in action_config:
action_upper_bound.extend([6.28] * 5)
action_lower_bound.extend([-6.28] * 5)
self.action_space = spaces.Box(np.array(action_lower_bound),
np.array(action_upper_bound),
dtype=np.float32)
elif motor_mode == robot_config.MotorControlMode.TORQUE:
# TODO (yuxiangy): figure out the torque limits of robots.
torque_limits = np.array([100] * len(self._robot_class.ACTION_CONFIG))
self.action_space = spaces.Box(-torque_limits,
torque_limits,
dtype=np.float32)
else:
# Position mode
action_upper_bound = []
action_lower_bound = []
action_config = self._robot_class.ACTION_CONFIG
for action in action_config:
action_upper_bound.append(action.upper_bound)
action_lower_bound.append(action.lower_bound)
self.action_space = spaces.Box(np.array(action_lower_bound),
np.array(action_upper_bound),
dtype=np.float32)
def close(self):
if hasattr(self, '_robot') and self._robot:
self._robot.Terminate()
def seed(self, seed=None):
self.np_random, self.np_random_seed = seeding.np_random(seed)
return [self.np_random_seed]
def all_sensors(self):
"""Returns all robot and environmental sensors."""
return self._robot.GetAllSensors() + self._sensors
def sensor_by_name(self, name):
"""Returns the sensor with the given name, or None if not exist."""
for sensor_ in self.all_sensors():
if sensor_.get_name() == name:
return sensor_
return None
def reset(self,
initial_motor_angles=None,
reset_duration=0.0,
reset_visualization_camera=True):
"""Resets the robot's position in the world or rebuild the sim world.
The simulation world will be rebuilt if self._hard_reset is True.
Args:
initial_motor_angles: A list of Floats. The desired joint angles after
reset. If None, the robot will use its built-in value.
reset_duration: Float. The time (in seconds) needed to rotate all motors
to the desired initial values.
reset_visualization_camera: Whether to reset debug visualization camera on
reset.
Returns:
A numpy array contains the initial observation after reset.
"""
if self._is_render:
self._pybullet_client.configureDebugVisualizer(
self._pybullet_client.COV_ENABLE_RENDERING, 0)
# Clear the simulation world and rebuild the robot interface.
if self._hard_reset:
self._pybullet_client.resetSimulation()
self._pybullet_client.setPhysicsEngineParameter(
numSolverIterations=self._num_bullet_solver_iterations)
self._pybullet_client.setTimeStep(self._sim_time_step)
self._pybullet_client.setGravity(0, 0, -10)
# Rebuild the world.
self._world_dict = {
"ground": self._pybullet_client.loadURDF("plane_implicit.urdf")
}
# Rebuild the robot
self._robot = self._robot_class(
pybullet_client=self._pybullet_client,
sensors=self._robot_sensors,
on_rack=self._on_rack,
action_repeat=self._gym_config.simulation_parameters.
num_action_repeat,
motor_control_mode=self._gym_config.simulation_parameters.
motor_control_mode,
reset_time=self._gym_config.simulation_parameters.reset_time,
reset_at_current_position=self._gym_config.simulation_parameters.
reset_at_current_position,
motor_torque_limits=self._gym_config.simulation_parameters.torque_limits,
enable_clip_motor_commands=self._gym_config.simulation_parameters.
enable_clip_motor_commands,
enable_action_filter=self._gym_config.simulation_parameters.
enable_action_filter,
enable_action_interpolation=self._gym_config.simulation_parameters.
enable_action_interpolation,
allow_knee_contact=self._gym_config.simulation_parameters.
allow_knee_contact,
**self._robot_kwargs)
# Reset the pose of the robot.
self._robot.Reset(reload_urdf=False,
default_motor_angles=initial_motor_angles,
reset_time=reset_duration)
self._pybullet_client.setPhysicsEngineParameter(enableConeFriction=0)
self._env_step_counter = 0
if reset_visualization_camera:
self._pybullet_client.resetDebugVisualizerCamera(self._camera_dist,
self._camera_yaw,
self._camera_pitch,
[0, 0, 0])
self._last_action = np.zeros(self.action_space.shape)
if self._is_render:
self._pybullet_client.configureDebugVisualizer(
self._pybullet_client.COV_ENABLE_RENDERING, 1)
for s in self.all_sensors():
s.on_reset(self)
if self._task and hasattr(self._task, 'reset'):
self._task.reset(self)
# Loop over all env randomizers.
for env_randomizer in self._env_randomizers:
env_randomizer.randomize_env(self)
# Reset the observations again, since randomizers/task might change the env.
self._robot.ClearObservationHistory()
self._robot.ReceiveObservation()
for s in self.all_sensors():
s.on_reset(self)
# Reset the observations again, since randomizers/task might change the env.
self._robot.ClearObservationHistory()
self._robot.ReceiveObservation()
for s in self.all_sensors():
s.on_reset(self)
return self._get_observation()
def step(self, action):
"""Step forward the simulation, given the action.
Args:
action: Can be a list of desired motor angles for all motors when the
robot is in position control mode; A list of desired motor torques. Or a
list of tuples (q, qdot, kp, kd, tau) for hybrid control mode. The
action must be compatible with the robot's motor control mode. Also, we
are not going to use the leg space (swing/extension) definition at the
gym level, since they are specific to Minitaur.
Returns:
observations: The observation dictionary. The keys are the sensor names
and the values are the sensor readings.
reward: The reward for the current state-action pair.
done: Whether the episode has ended.
info: A dictionary that stores diagnostic information.
Raises:
ValueError: The action dimension is not the same as the number of motors.
ValueError: The magnitude of actions is out of bounds.
"""
self._last_base_position = self._robot.GetBasePosition()
self._last_action = action
if self._is_render:
# Sleep, otherwise the computation takes less time than real time,
# which will make the visualization like a fast-forward video.
time_spent = time.time() - self._last_frame_time
self._last_frame_time = time.time()
time_to_sleep = self._env_time_step - time_spent
if time_to_sleep > 0:
time.sleep(time_to_sleep)
base_pos = self._robot.GetBasePosition()
# Also keep the previous orientation of the camera set by the user.
[yaw, pitch,
dist] = self._pybullet_client.getDebugVisualizerCamera()[8:11]
self._pybullet_client.resetDebugVisualizerCamera(dist, yaw, pitch,
base_pos)
self._pybullet_client.configureDebugVisualizer(
self._pybullet_client.COV_ENABLE_SINGLE_STEP_RENDERING, 1)
alpha = 0.5
if self._show_reference_id>0:
alpha = self._pybullet_client.readUserDebugParameter(self._show_reference_id)
ref_col = [1, 1, 1, alpha]
if hasattr(self._task, '_ref_model'):
self._pybullet_client.changeVisualShape(self._task._ref_model, -1, rgbaColor=ref_col)
for l in range (self._pybullet_client.getNumJoints(self._task._ref_model)):
self._pybullet_client.changeVisualShape(self._task._ref_model, l, rgbaColor=ref_col)
delay = self._pybullet_client.readUserDebugParameter(self._delay_id)
if (delay>0):
time.sleep(delay)
for env_randomizer in self._env_randomizers:
env_randomizer.randomize_step(self)
# robot class and put the logics here.
self._robot.Step(action)
for s in self.all_sensors():
s.on_step(self)
if self._task and hasattr(self._task, 'update'):
self._task.update(self)
reward = self._reward()
done = self._termination()
self._env_step_counter += 1
if done:
self._robot.Terminate()
return self._get_observation(), reward, done, {}
def render(self, mode='rgb_array'):
if mode != 'rgb_array':
raise ValueError('Unsupported render mode:{}'.format(mode))
base_pos = self._robot.GetBasePosition()
view_matrix = self._pybullet_client.computeViewMatrixFromYawPitchRoll(
cameraTargetPosition=base_pos,
distance=self._camera_dist,
yaw=self._camera_yaw,
pitch=self._camera_pitch,
roll=0,
upAxisIndex=2)
proj_matrix = self._pybullet_client.computeProjectionMatrixFOV(
fov=60,
aspect=float(self._render_width) / self._render_height,
nearVal=0.1,
farVal=100.0)
(_, _, px, _, _) = self._pybullet_client.getCameraImage(
width=self._render_width,
height=self._render_height,
renderer=self._pybullet_client.ER_BULLET_HARDWARE_OPENGL,
viewMatrix=view_matrix,
projectionMatrix=proj_matrix)
rgb_array = np.array(px)
rgb_array = rgb_array[:, :, :3]
return rgb_array
def get_ground(self):
"""Get simulation ground model."""
return self._world_dict['ground']
def set_ground(self, ground_id):
"""Set simulation ground model."""
self._world_dict['ground'] = ground_id
@property
def rendering_enabled(self):
return self._is_render
@property
def last_base_position(self):
return self._last_base_position
@property
def world_dict(self):
return self._world_dict.copy()
@world_dict.setter
def world_dict(self, new_dict):
self._world_dict = new_dict.copy()
def _termination(self):
if not self._robot.is_safe:
return True
if self._task and hasattr(self._task, 'done'):
return self._task.done(self)
for s in self.all_sensors():
s.on_terminate(self)
return False
def _reward(self):
if self._task:
return self._task(self)
return 0
def _get_observation(self):
"""Get observation of this environment from a list of sensors.
Returns:
observations: sensory observation in the numpy array format
"""
sensors_dict = {}
for s in self.all_sensors():
sensors_dict[s.get_name()] = s.get_observation()
observations = collections.OrderedDict(sorted(list(sensors_dict.items())))
return observations
def set_time_step(self, num_action_repeat, sim_step=0.001):
"""Sets the time step of the environment.
Args:
num_action_repeat: The number of simulation steps/action repeats to be
executed when calling env.step().
sim_step: The simulation time step in PyBullet. By default, the simulation
step is 0.001s, which is a good trade-off between simulation speed and
accuracy.
Raises:
ValueError: If the num_action_repeat is less than 1.
"""
if num_action_repeat < 1:
raise ValueError('number of action repeats should be at least 1.')
self._sim_time_step = sim_step
self._num_action_repeat = num_action_repeat
self._env_time_step = sim_step * num_action_repeat
self._num_bullet_solver_iterations = (_NUM_SIMULATION_ITERATION_STEPS /
self._num_action_repeat)
self._pybullet_client.setPhysicsEngineParameter(
numSolverIterations=int(np.round(self._num_bullet_solver_iterations)))
self._pybullet_client.setTimeStep(self._sim_time_step)
self._robot.SetTimeSteps(self._num_action_repeat, self._sim_time_step)
def get_time_since_reset(self):
"""Get the time passed (in seconds) since the last reset.
Returns:
Time in seconds since the last reset.
"""
return self._robot.GetTimeSinceReset()
@property
def pybullet_client(self):
return self._pybullet_client
@property
def robot(self):
return self._robot
@property
def env_step_counter(self):
return self._env_step_counter
@property
def hard_reset(self):
return self._hard_reset
@property
def last_action(self):
return self._last_action
@property
def env_time_step(self):
return self._env_time_step
@property
def task(self):
return self._task
def set_task(self, new_task):
# Hide the reference model from ImitationTask by moving it under the floor.
if hasattr(self._task, "_ref_model") and self._task._ref_model is not None:
self._pybullet_client.resetBasePositionAndOrientation(
self._task._ref_model, (0, 0, -2), (0, 0, 0, 1))
self._task = new_task
self._task.reset(self)
@property
def robot_class(self):
return self._robot_class | python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/motion_imitation/envs/__init__.py | motion_imitation/envs/__init__.py | python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false | |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/motion_imitation/envs/locomotion_gym_config.py | motion_imitation/envs/locomotion_gym_config.py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A gin-config class for locomotion_gym_env.
This should be identical to locomotion_gym_config.proto.
"""
import attr
import typing
from motion_imitation.robots import robot_config
@attr.s
class SimulationParameters(object):
"""Parameters specific for the pyBullet simulation."""
sim_time_step_s = attr.ib(type=float, default=0.001)
num_action_repeat = attr.ib(type=int, default=33)
enable_hard_reset = attr.ib(type=bool, default=False)
enable_rendering = attr.ib(type=bool, default=False)
enable_rendering_gui = attr.ib(type=bool, default=True)
robot_on_rack = attr.ib(type=bool, default=False)
camera_distance = attr.ib(type=float, default=1.0)
camera_yaw = attr.ib(type=float, default=0)
camera_pitch = attr.ib(type=float, default=-30)
render_width = attr.ib(type=int, default=480)
render_height = attr.ib(type=int, default=360)
egl_rendering = attr.ib(type=bool, default=False)
motor_control_mode = attr.ib(type=int,
default=robot_config.MotorControlMode.POSITION)
reset_time = attr.ib(type=float, default=-1)
enable_action_filter = attr.ib(type=bool, default=True)
enable_action_interpolation = attr.ib(type=bool, default=True)
allow_knee_contact = attr.ib(type=bool, default=False)
enable_clip_motor_commands = attr.ib(type=bool, default=True)
reset_at_current_position = attr.ib(type=bool, default=False)
torque_limits = attr.ib(type=float, default=35.5)
@attr.s
class ScalarField(object):
"""A named scalar space with bounds."""
name = attr.ib(type=str)
upper_bound = attr.ib(type=float)
lower_bound = attr.ib(type=float)
@attr.s
class LocomotionGymConfig(object):
"""Grouped Config Parameters for LocomotionGym."""
simulation_parameters = attr.ib(type=SimulationParameters)
log_path = attr.ib(type=typing.Text, default=None)
profiling_path = attr.ib(type=typing.Text, default=None)
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/motion_imitation/envs/env_wrappers/simple_forward_task.py | motion_imitation/envs/env_wrappers/simple_forward_task.py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple locomotion task and termination condition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
class SimpleForwardTask(object):
"""Default empy task."""
def __init__(self):
"""Initializes the task."""
self.current_base_pos = np.zeros(3)
self.last_base_pos = np.zeros(3)
def __call__(self, env):
return self.reward(env)
def reset(self, env):
"""Resets the internal state of the task."""
self._env = env
self.last_base_pos = env.robot.GetBasePosition()
self.current_base_pos = self.last_base_pos
def update(self, env):
"""Updates the internal state of the task."""
self.last_base_pos = self.current_base_pos
self.current_base_pos = env.robot.GetBasePosition()
def done(self, env):
"""Checks if the episode is over.
If the robot base becomes unstable (based on orientation), the episode
terminates early.
"""
rot_quat = env.robot.GetBaseOrientation()
rot_mat = env.pybullet_client.getMatrixFromQuaternion(rot_quat)
return rot_mat[-1] < 0.85
def reward(self, env):
"""Get the reward without side effects."""
del env
return self.current_base_pos[0] - self.last_base_pos[0]
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/motion_imitation/envs/env_wrappers/survival_task.py | motion_imitation/envs/env_wrappers/survival_task.py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple locomotion task and termination condition."""
class SurvivalTask(object):
"""Default empy task."""
def __init__(self):
"""Initializes the task."""
self._draw_ref_model_alpha = 1.
self._ref_model = -1
def __call__(self, env):
return self.reward(env)
def reset(self, env):
"""Resets the internal state of the task."""
self._env = env
def update(self, env):
"""Updates the internal state of the task."""
del env
def done(self, env):
"""Checks if the episode is over."""
del env
return False
def reward(self, env):
"""Get the reward without side effects."""
del env
return 1
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/motion_imitation/envs/env_wrappers/reset_task.py | motion_imitation/envs/env_wrappers/reset_task.py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from pybullet_utils import transformations
from motion_imitation.robots import a1
from motion_imitation.utilities import motion_data
from motion_imitation.utilities import pose3d
SITTING_POSE = np.array([0.0, 0.0, 0.11, 0.0, 0.0, 0.0, 1.0] + [0, 1.17752553, -2.69719727]*4)
STANDING_POSE = np.array([0.0, 0.0, 0.25870023, 0.0, 0.0, 0.0, 1.0] + [0, 0.9, -1.8] * 4)
JOINT_WEIGHTS = np.array([1.0, 0.75, 0.5] * 4)
class ResetTask(object):
"""Imitation reference motion task."""
def __init__(self, terminal_conditions=(), real_robot=False):
self._terminal_conditions = terminal_conditions
self._env = None
self._default_pose = None
self._joint_pose_idx = None
self._joint_pose_size = None
self._stand_prob = 0.2
self._sit_prob = 0.2
self._fall_init_rot_x_min = -3.0 / 4.0 * np.pi
self._fall_init_rot_x_max = 3.0 / 4.0 * np.pi
self._real_robot = real_robot
return
def __call__(self, env):
return self.reward(env)
def reset(self, env):
"""Resets the internal state of the task."""
assert(self._stand_prob + self._sit_prob <= 1.0)
self._env = env
if (self._joint_pose_idx is None or self._env.hard_reset):
self._build_joint_data()
if self._real_robot:
return
rand_val = self._rand_uniform(0, 1)
if (rand_val < self._stand_prob):
self._init_stand_pose()
elif (rand_val < self._stand_prob + self._sit_prob):
self._init_sit_pose()
else:
self._init_fall_pose(self._fall_init_rot_x_min, self._fall_init_rot_x_max)
return
def update(self, env):
"""Updates the internal state of the task."""
del env
return
def done(self, env):
"""Checks if the episode is over."""
del env
done = any([done_fn(self._env) for done_fn in self._terminal_conditions])
return done
def build_target_obs(self):
tar_obs = np.array([])
return tar_obs
def get_target_obs_bounds(self):
low = np.array([])
high = np.array([])
return low, high
def reward(self, env):
"""Get the reward without side effects."""
del env
roll_w = 0.5
stand_w = 0.5
roll_threshold = np.cos(0.2 * np.pi)
r_roll, root_cos_dist = self._calc_reward_roll()
if (root_cos_dist > roll_threshold):
r_stand = self._calc_reward_stand()
else:
r_stand = 0.0
reward = roll_w * r_roll + stand_w * r_stand
return reward
def _calc_reward_roll(self):
up = np.array([0, 0, 1])
root_rot = self._env.robot.GetTrueBaseOrientation()
root_up = pose3d.QuaternionRotatePoint(up, root_rot)
cos_dist = up.dot(root_up)
r_roll = (0.5 * cos_dist + 0.5) ** 2
return r_roll, cos_dist
def _calc_reward_stand(self):
tar_h = STANDING_POSE[2]
pos_size = motion_data.MotionData.POS_SIZE
rot_size = motion_data.MotionData.ROT_SIZE
root_pos = self._env.robot.GetBasePosition()
root_h = root_pos[2]
h_err = tar_h - root_h
h_err /= tar_h
h_err = np.clip(h_err, 0.0, 1.0)
r_height = 1.0 - h_err
tar_pose = STANDING_POSE[(pos_size + rot_size):]
joint_pose = self._env.robot.GetTrueMotorAngles()
pose_diff = tar_pose - joint_pose
pose_diff = JOINT_WEIGHTS * JOINT_WEIGHTS * pose_diff * pose_diff
pose_err = np.sum(pose_diff)
r_pose = np.exp(-0.6 * pose_err)
tar_vel = 0.0
joint_vel = self._env.robot.GetMotorVelocities()
vel_diff = tar_vel - joint_vel
vel_diff = vel_diff * vel_diff
vel_err = np.sum(vel_diff)
r_vel = np.exp(-0.02 * vel_err)
r_stand = 0.2 * r_height + 0.6 * r_pose + 0.2 * r_vel
return r_stand
def _calc_reward_end_effector(self, ref_joint_angles):
"""Get the end effector reward for sim or real A1 robot."""
pos_size = motion_data.MotionData.POS_SIZE
rot_size = motion_data.MotionData.ROT_SIZE
ref_base_pos = np.array(STANDING_POSE[:pos_size])
ref_base_rot = np.array(STANDING_POSE[pos_size:(pos_size + rot_size)])
rel_feet_pos_ref = a1.foot_positions_in_base_frame(ref_joint_angles)
rel_feet_pos_robot = self._env.robot.GetFootPositionsInBaseFrame()
end_eff_err = 0
for rel_foot_pos_ref, rel_foot_pos_robot in zip(rel_feet_pos_ref,
rel_feet_pos_robot):
rel_foot_pos_diff = rel_foot_pos_ref - rel_foot_pos_robot
end_eff_err += rel_foot_pos_diff[0]**2 + rel_foot_pos_diff[1]**2
foot_height_ref = pose3d.PoseTransformPoint(
point=rel_foot_pos_ref,
position=ref_base_pos,
quat=ref_base_rot)[2]
foot_height_robot = pose3d.PoseTransformPoint(
point=rel_foot_pos_robot,
position=self._env.robot.GetBasePosition(),
quat=self._env.robot.GetBaseOrientation())[2]
end_eff_err += 3.0 * (foot_height_ref - foot_height_robot)**2
r_end_eff = np.exp(-40 * end_eff_err)
return r_end_eff
def _get_pybullet_client(self):
"""Get bullet client from the environment"""
return self._env._pybullet_client
def _get_num_joints(self):
"""Get the number of joints in the character's body."""
pyb = self._get_pybullet_client()
return pyb.getNumJoints(self._env.robot.quadruped)
def _init_stand_pose(self):
self._set_pose(STANDING_POSE)
self._env.robot.ReceiveObservation()
return
def _init_sit_pose(self):
self._set_pose(SITTING_POSE)
self._env.robot.ReceiveObservation()
return
def _init_fall_pose(self, rot_x_min, rot_x_max):
pyb = self._get_pybullet_client()
pos_size = motion_data.MotionData.POS_SIZE
rot_size = motion_data.MotionData.ROT_SIZE
pose = self._get_pose()
root_pos = np.array([0, 0, self._rand_uniform(low=0.4, high=0.5)])
root_rot = self._rand_uniform(low=[rot_x_min, -np.pi/4, -np.pi],
high=[rot_x_max, np.pi/4, np.pi])
root_rot = pyb.getQuaternionFromEuler(root_rot)
joint_lim_low = self._env.robot._joint_angle_lower_limits
joint_lim_high = self._env.robot._joint_angle_upper_limits
joint_pose_size = len(joint_lim_low)
stand_pose = STANDING_POSE[-joint_pose_size:]
joint_dir = self._randint(0, 2, joint_pose_size).astype(np.float32)
lim_pose = (1.0 - joint_dir) * joint_lim_low + joint_dir * joint_lim_high
pose_lerp = self._rand_uniform(low=0, high=1, size=joint_pose_size)
pose_lerp = pose_lerp * pose_lerp * pose_lerp
joint_pose = (1.0 - pose_lerp) * stand_pose + pose_lerp * lim_pose
pose = np.concatenate([root_pos, root_rot, joint_pose])
self._set_pose(pose)
for _ in range(1000):
pyb.stepSimulation()
self._env.robot.ReceiveObservation()
return
def _build_joint_data(self):
"""Precomputes joint data to facilitating accessing data from motion frames."""
num_joints = self._get_num_joints()
self._joint_pose_idx = np.zeros(num_joints, dtype=np.int32)
self._joint_pose_size = np.zeros(num_joints, dtype=np.int32)
for j in range(num_joints):
pyb = self._get_pybullet_client()
j_info = pyb.getJointInfo(self._env.robot.quadruped, j)
j_state = pyb.getJointStateMultiDof(self._env.robot.quadruped, j)
j_pose_idx = j_info[3]
j_pose_size = len(j_state[0])
if (j_pose_idx < 0):
assert (j_pose_size == 0)
if (j == 0):
j_pose_idx = 0
else:
j_pose_idx = self._joint_pose_idx[j - 1] + self._joint_pose_size[j - 1]
self._joint_pose_idx[j] = j_pose_idx
self._joint_pose_size[j] = j_pose_size
return
def _get_pose(self):
root_pos = self._env.robot.GetBasePosition()
root_rot = self._env.robot.GetTrueBaseOrientation()
joint_pose = self._env.robot.GetTrueMotorAngles()
pose = np.concatenate([root_pos, root_rot, joint_pose])
return pose
def _set_pose(self, pose):
"""Set the state of a character to the given pose and velocity.
Args:
phys_model: handle of the character
pose: pose to be applied to the character
vel: velocity to be applied to the character
"""
pyb = self._get_pybullet_client()
phys_model = self._env.robot.quadruped
root_pos = pose[0:motion_data.MotionData.POS_SIZE]
root_rot = pose[motion_data.MotionData.POS_SIZE:(motion_data.MotionData.POS_SIZE + motion_data.MotionData.ROT_SIZE)]
pyb.resetBasePositionAndOrientation(phys_model, root_pos, root_rot)
num_joints = self._get_num_joints()
for j in range(num_joints):
q_idx = self._get_joint_pose_idx(j)
q_size = self._get_joint_pose_size(j)
if (q_size > 0):
j_pose = pose[q_idx:(q_idx + q_size)]
j_vel = np.zeros_like(j_pose)
pyb.resetJointStateMultiDof(phys_model, j, j_pose, j_vel)
return
def _get_joint_pose_idx(self, j):
"""Get the starting index of the pose data for a give joint in a pose array."""
idx = self._joint_pose_idx[j]
return idx
def _get_joint_pose_size(self, j):
"""Get the size of the pose data for a give joint in a pose array."""
pose_size = self._joint_pose_size[j]
assert (pose_size == 1 or
pose_size == 0), "Only support 1D and 0D joints at the moment."
return pose_size
def _rand_uniform(self, low, high, size=None):
"""Samples random float between [val_min, val_max]."""
if hasattr(self._env, "np_random"):
rand_val = self._env.np_random.uniform(low=low, high=high, size=size)
else:
rand_val = np.random.uniform(low=low, high=high, size=size)
return rand_val
def _randint(self, low, high, size=None):
"""Samples random integer between [val_min, val_max]."""
if hasattr(self._env, "np_random"):
rand_val = self._env.np_random.randint(low, high, size=size)
else:
rand_val = np.random.randint(low, high, size=size)
return rand_val
class RollTask(ResetTask):
"""Imitation reference motion task."""
def __init__(self, terminal_conditions=()):
super().__init__(terminal_conditions)
self._stand_prob = 0.0
self._sit_prob = 0.2
return
def reward(self, env):
"""Get the reward without side effects."""
del env
r_roll, _ = self._calc_reward_roll()
torques = self._env.robot.GetMotorTorques()
torque_penalty = np.sum(np.abs(torques))
reward = r_roll - 1e-3 * torque_penalty
return reward
class StandTask(ResetTask):
"""Imitation reference motion task."""
def __init__(self, terminal_conditions=()):
super().__init__(terminal_conditions)
self._fall_init_rot_x_min = -1.0 / 4.0 * np.pi
self._fall_init_rot_x_max = 1.0 / 4.0 * np.pi
return
def reward(self, env):
"""Get the reward without side effects."""
del env
r_stand = self._calc_reward_stand()
reward = r_stand
return reward
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/motion_imitation/envs/env_wrappers/observation_dictionary_to_array_wrapper.py | motion_imitation/envs/env_wrappers/observation_dictionary_to_array_wrapper.py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An env wrapper that flattens the observation dictionary to an array."""
import gym
from motion_imitation.envs.utilities import env_utils
class ObservationDictionaryToArrayWrapper(gym.Env):
"""An env wrapper that flattens the observation dictionary to an array."""
def __init__(self, gym_env, observation_excluded=()):
"""Initializes the wrapper."""
self.observation_excluded = observation_excluded
self._gym_env = gym_env
self.observation_space = self._flatten_observation_spaces(
self._gym_env.observation_space)
self.action_space = self._gym_env.action_space
def __getattr__(self, attr):
return getattr(self._gym_env, attr)
def _flatten_observation_spaces(self, observation_spaces):
flat_observation_space = env_utils.flatten_observation_spaces(
observation_spaces=observation_spaces,
observation_excluded=self.observation_excluded)
return flat_observation_space
def _flatten_observation(self, input_observation):
"""Flatten the dictionary to an array."""
return env_utils.flatten_observations(
observation_dict=input_observation,
observation_excluded=self.observation_excluded)
def reset(self, initial_motor_angles=None, reset_duration=0.0):
observation = self._gym_env.reset(
initial_motor_angles=initial_motor_angles,
reset_duration=reset_duration)
return self._flatten_observation(observation)
def step(self, action):
"""Steps the wrapped environment.
Args:
action: Numpy array. The input action from an NN agent.
Returns:
The tuple containing the flattened observation, the reward, the epsiode
end indicator.
"""
observation_dict, reward, done, _ = self._gym_env.step(action)
return self._flatten_observation(observation_dict), reward, done, _
def render(self, mode='human'):
return self._gym_env.render(mode)
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/motion_imitation/envs/env_wrappers/default_task.py | motion_imitation/envs/env_wrappers/default_task.py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple locomotion task and termination condition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
class DefaultTask(object):
"""Default empy task."""
def __init__(self):
"""Initializes the task."""
self._draw_ref_model_alpha = 1.
self._ref_model = -1
return
def __call__(self, env):
return self.reward(env)
def reset(self, env):
"""Resets the internal state of the task."""
self._env = env
return
def update(self, env):
"""Updates the internal state of the task."""
del env
return
def done(self, env):
"""Checks if the episode is over."""
del env
return False
def reward(self, env):
"""Get the reward without side effects."""
del env
return 1
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/motion_imitation/envs/env_wrappers/boundary_terminal_conditions.py | motion_imitation/envs/env_wrappers/boundary_terminal_conditions.py | """Ends episode if robot is outside workspace bounds."""
class BoundaryTerminalCondition(object):
"""Ends episode if robot is outside workspace bounds."""
def __init__(self, x_space_m=5, y_space_m=5):
"""Constructor.
:param x_space_m: Length of workspace in meters.
:param y_space_m: Width of workspace in meters.
"""
self._x_bound = x_space_m / 2.0
self._y_bound = y_space_m / 2.0
def __call__(self, env):
x, y, _ = env.robot.GetBasePosition()
return abs(x) > self._x_bound or abs(y) > self._y_bound
class CircularBoundaryTerminalCondition(object):
def __init__(self, radius_m=2.5):
self._radius_squared = radius_m ** 2
def __call__(self, env):
x, y, _ = env.robot.GetBasePosition()
return x ** 2 + y ** 2 > self._radius_squared
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/motion_imitation/envs/env_wrappers/imitation_task.py | motion_imitation/envs/env_wrappers/imitation_task.py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple locomotion task and termination condition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(os.path.dirname(currentdir))
os.sys.path.insert(0, parentdir)
import logging
import os
import numpy as np
from motion_imitation.envs.env_wrappers import imitation_terminal_conditions
from motion_imitation.robots import a1
from motion_imitation.utilities import pose3d
from motion_imitation.utilities import motion_data
from motion_imitation.utilities import motion_util
from pybullet_utils import transformations
class ImitationTask(object):
"""Imitation reference motion task."""
def __init__(self,
real_robot=False,
weight=1.0,
terminal_conditions=(imitation_terminal_conditions.imitation_terminal_condition,),
ref_motion_filenames=None,
enable_cycle_sync=True,
clip_velocity=None,
tar_frame_steps=None,
clip_time_min=np.inf,
clip_time_max=np.inf,
ref_state_init_prob=1.0,
enable_rand_init_time=True,
warmup_time=0.0,
pose_weight=0.5,
velocity_weight=0.05,
end_effector_weight=0.2,
root_pose_weight=0.15,
root_velocity_weight=0.1,
pose_err_scale=5.0,
velocity_err_scale=0.1,
end_effector_err_scale=40,
end_effector_height_err_scale=3.0,
root_pose_err_scale=20,
root_velocity_err_scale=2,
perturb_init_state_prob=0.0,
tar_obs_noise=None,
draw_ref_model_alpha=0.5):
"""Initializes the task.
Args:
real_robot: Boolean indicating whether this is running in the real world
or in simulation.
weight: Float. The scaling factor for the reward.
terminal_conditions: Callable object or function. Determines if the task
is done.
ref_motion_filenames: List of files containing reference motion data.
enable_cycle_sync: Boolean indicating if the root of the reference motion
should be synchronized with the root of the simulated robot at the start
of every cycle to mitigate drift.
clip_velocity: if not None, we will clip the velocity with this value.
tar_frame_steps: The number of steps to sample each target frame to
include in the target observations.
clip_time_min: Minimum amount of time a reference motion clip is active
before switching to another clip.
clip_time_max: Maximum amount of time a reference motion clip is active
before switching to another clip.
ref_state_init_prob: Probability of initializing the robot to a state from
the reference at the start of each episode. When not initializing to a
reference state, the robot will be initialized to the default state.
enable_rand_init_time: Flag for enabling randomly initializing to
different points in time alont a reference motion.
warmup_time: Amount of time for the robot to move from default pose to
reference pose at the start of each episode. This helps for deployment,
so that the robot doesn't try to move to the reference pose too quickly
from its default pose.
pose_weight: Pose reward weight.
velocity_weight: Velocity reward weight.
end_effector_weight: End effector reward weight.
root_pose_weight: Root position and rotation reward weight.
root_velocity_weight: Root linear and angular velocity reward weight.
pose_err_scale: Pose error scale for calculating pose reward.
velocity_err_scale: Velocity error scale for calculating velocity reward.
end_effector_err_scale: End effector error scale for calculating end
effector reward.
end_effector_height_err_scale: End effector height error scale for
calculating the end effector reward.
root_pose_err_scale: Root position and rotation error scale for
calculating root position and rotation reward.
root_velocity_err_scale: Root linear and angular velocity error scale for
calculating root linear and angular velocity reward.
perturb_init_state_prob: Probability of applying random pertubations to
the initial state.
tar_obs_noise: List of the standard deviations of the noise to be applied
to the target observations [base rotation std, base position std].
draw_ref_model_alpha: Color transparency for drawing the reference model.
"""
self._using_real_robot = real_robot
self._weight = weight
self._terminal_conditions = terminal_conditions
self._last_base_position = None
self._clip_velocity = clip_velocity
self._action_history_sensor = None
self._env = None
assert ref_motion_filenames is not None
self._ref_state_init_prob = ref_state_init_prob
if self._using_real_robot:
self._ref_state_init_prob = 0
self._enable_rand_init_time = enable_rand_init_time
self._warmup_time = warmup_time
self._curr_episode_warmup = False
self._tar_frame_steps = tar_frame_steps
self._ref_motion_filenames = ref_motion_filenames
self._ref_motions = None
if self._tar_frame_steps is None:
self._tar_frame_steps = [1, 2]
self._clip_time_min = clip_time_min
self._clip_time_max = clip_time_max
self._clip_change_time = clip_time_min
self._enable_cycle_sync = enable_cycle_sync
self._active_motion_id = -1
self._motion_time_offset = 0.0
self._episode_start_time_offset = 0.0
self._ref_model = None
self._ref_pose = None
self._ref_vel = None
self._default_pose = None
self._perturb_init_state_prob = perturb_init_state_prob
self._tar_obs_noise = tar_obs_noise
self._draw_ref_model_alpha = draw_ref_model_alpha
self._prev_motion_phase = 0
self._origin_offset_rot = np.array([0, 0, 0, 1])
self._origin_offset_pos = np.zeros(motion_data.MotionData.POS_SIZE)
# reward function parameters
self._pose_weight = pose_weight
self._velocity_weight = velocity_weight
self._end_effector_weight = end_effector_weight
self._root_pose_weight = root_pose_weight
self._root_velocity_weight = root_velocity_weight
self._pose_err_scale = pose_err_scale
self._velocity_err_scale = velocity_err_scale
self._end_effector_err_scale = end_effector_err_scale
self._end_effector_height_err_scale = end_effector_height_err_scale
self._root_pose_err_scale = root_pose_err_scale
self._root_velocity_err_scale = root_velocity_err_scale
return
def __call__(self, env):
return self.reward(env)
def reset(self, env):
"""Resets the internal state of the task."""
self._env = env
self._last_base_position = self._get_sim_base_position()
self._episode_start_time_offset = 0.0
if (self._ref_motions is None or self._env.hard_reset):
self._ref_motions = self._load_ref_motions(self._ref_motion_filenames)
self._active_motion_id = self._sample_ref_motion()
if (self._ref_model is None or self._env.hard_reset):
self._ref_model = self._build_ref_model()
self._build_joint_data()
if self._default_pose is None or self._env.hard_reset:
self._default_pose = self._record_default_pose()
rand_val = self._rand_uniform(0.0, 1.0)
ref_state_init = rand_val < self._ref_state_init_prob
self._curr_episode_warmup = False
if not ref_state_init and self._enable_warmup():
self._curr_episode_warmup = True
self._reset_ref_motion()
perturb_state = False
if self._enable_perturb_init_state():
rand_val = self._rand_uniform(0.0, 1.0)
perturb_state = rand_val < self._perturb_init_state_prob
if ref_state_init:
self._sync_sim_model(perturb_state)
return
def update(self, env):
"""Updates the internal state of the task."""
del env
self._update_ref_motion()
self._last_base_position = self._get_sim_base_position()
return
def done(self, env):
"""Checks if the episode is over."""
del env
done = any([done_fn(self._env) for done_fn in self._terminal_conditions])
return done
def get_num_motions(self):
"""Get the number of reference motions to be imitated.
Returns:
Number of reference motions.
"""
return len(self._ref_motions)
def get_num_tar_frames(self):
"""Get the number of target frames to include in the observations.
Returns:
Number of target frames.
"""
return len(self._tar_frame_steps)
def is_motion_over(self):
"""Checks if the current reference motion is over.
Returns:
Boolean indicating if the motion is over.
"""
time = self._get_motion_time()
motion = self.get_active_motion()
is_over = motion.is_over(time)
return is_over
def get_active_motion(self):
"""Get index of the active reference motion currently being imitated.
Returns:
Index of the active reference motion.
"""
return self._ref_motions[self._active_motion_id]
def build_target_obs(self):
"""Constructs the target observations, consisting of a sequence of
target frames for future timesteps. The tartet poses to include is
specified by self._tar_frame_steps.
Returns:
An array containing the target frames.
"""
tar_poses = []
time0 = self._get_motion_time()
dt = self._env.env_time_step
motion = self.get_active_motion()
robot = self._env.robot
ref_base_pos = self.get_ref_base_position()
sim_base_rot = np.array(robot.GetBaseOrientation())
heading = motion_util.calc_heading(sim_base_rot)
if self._tar_obs_noise is not None:
heading += self._randn(0, self._tar_obs_noise[0])
inv_heading_rot = transformations.quaternion_about_axis(-heading, [0, 0, 1])
for step in self._tar_frame_steps:
tar_time = time0 + step * dt
tar_pose = self._calc_ref_pose(tar_time)
tar_root_pos = motion.get_frame_root_pos(tar_pose)
tar_root_rot = motion.get_frame_root_rot(tar_pose)
tar_root_pos -= ref_base_pos
tar_root_pos = pose3d.QuaternionRotatePoint(tar_root_pos, inv_heading_rot)
tar_root_rot = transformations.quaternion_multiply(
inv_heading_rot, tar_root_rot)
tar_root_rot = motion_util.standardize_quaternion(tar_root_rot)
motion.set_frame_root_pos(tar_root_pos, tar_pose)
motion.set_frame_root_rot(tar_root_rot, tar_pose)
tar_poses.append(tar_pose)
tar_obs = np.concatenate(tar_poses, axis=-1)
return tar_obs
def get_target_obs_bounds(self):
"""Get bounds for target observations.
Returns:
low: Array containing the minimum value for each target observation
features.
high: Array containing the maximum value for each target observation
features.
"""
pos_bound = 2 * np.ones(motion_data.MotionData.POS_SIZE)
rot_bound = 1 * np.ones(motion_data.MotionData.ROT_SIZE)
pose_size = self.get_pose_size()
low = np.inf * np.ones(pose_size)
high = -np.inf * np.ones(pose_size)
for m in self._ref_motions:
curr_frames = m.get_frames()
curr_low = np.min(curr_frames, axis=0)
curr_high = np.max(curr_frames, axis=0)
low = np.minimum(low, curr_low)
high = np.maximum(high, curr_high)
motion = self.get_active_motion()
motion.set_frame_root_pos(-pos_bound, low)
motion.set_frame_root_pos(pos_bound, high)
motion.set_frame_root_rot(-rot_bound, low)
motion.set_frame_root_rot(rot_bound, high)
num_tar_frames = self.get_num_tar_frames()
low = np.concatenate([low] * num_tar_frames, axis=-1)
high = np.concatenate([high] * num_tar_frames, axis=-1)
return low, high
@property
def ref_state_init_prob(self):
return self._ref_state_init_prob
def set_ref_state_init_prob(self, prob):
self._ref_state_init_prob = prob
return
def reward(self, env):
"""Get the reward without side effects."""
del env
pose_reward = self._calc_reward_pose()
velocity_reward = self._calc_reward_velocity()
end_effector_reward = self._calc_reward_end_effector()
root_pose_reward = self._calc_reward_root_pose()
root_velocity_reward = self._calc_reward_root_velocity()
reward = self._pose_weight * pose_reward \
+ self._velocity_weight * velocity_reward \
+ self._end_effector_weight * end_effector_reward \
+ self._root_pose_weight * root_pose_reward \
+ self._root_velocity_weight * root_velocity_reward
return reward * self._weight
def _calc_reward_pose(self):
"""Get the reward for matching joint angles."""
ref_joint_angles = self.get_active_motion().get_frame_joints(self._ref_pose)
robot_joint_angles = self._env.robot.GetMotorAngles()
angle_err = robot_joint_angles - ref_joint_angles
return np.exp(-self._pose_err_scale * angle_err.dot(angle_err))
def _calc_reward_velocity(self):
"""Get the reward for matching joint velocities."""
ref_vels = self.get_active_motion().get_frame_joints_vel(self._ref_vel)
robot_vels = self._env.robot.GetMotorVelocities()
vel_err = robot_vels - ref_vels
return np.exp(-self._velocity_err_scale * vel_err.dot(vel_err))
def _calc_reward_end_effector(self):
"""Get the end effector reward."""
if issubclass(self._env.robot_class, a1.A1):
return self._calc_reward_end_effector_A1()
if self._using_real_robot:
raise NotImplementedError(
"End effector positions unknown for real robots other than A1")
return self._calc_reward_end_effector_sim()
def _calc_reward_end_effector_A1(self):
"""Get the end effector reward for sim or real A1 robot."""
ref_joint_angles = self.get_active_motion().get_frame_joints(self._ref_pose)
rel_feet_pos_ref = a1.foot_positions_in_base_frame(ref_joint_angles)
rel_feet_pos_robot = self._env.robot.GetFootPositionsInBaseFrame()
end_eff_err = 0
for rel_foot_pos_ref, rel_foot_pos_robot in zip(rel_feet_pos_ref,
rel_feet_pos_robot):
rel_foot_pos_diff = rel_foot_pos_ref - rel_foot_pos_robot
end_eff_err += rel_foot_pos_diff[0]**2 + rel_foot_pos_diff[1]**2
foot_height_ref = pose3d.PoseTransformPoint(
point=rel_foot_pos_ref,
position=self.get_ref_base_position(),
quat=self.get_ref_base_rotation())[2]
foot_height_robot = pose3d.PoseTransformPoint(
point=rel_foot_pos_robot,
position=self._env.robot.GetBasePosition(),
quat=self._env.robot.GetBaseOrientation())[2]
end_eff_err += self._end_effector_height_err_scale * (
foot_height_ref - foot_height_robot)**2
return np.exp(-self._end_effector_err_scale * end_eff_err)
def _calc_reward_end_effector_sim(self):
"""Get the end effector reward using positions from pybullet."""
env = self._env
robot = env.robot
sim_model = robot.quadruped
ref_model = self._ref_model
pyb = self._get_pybullet_client()
root_pos_ref = self.get_ref_base_position()
root_rot_ref = self.get_ref_base_rotation()
root_pos_sim = self._get_sim_base_position()
root_rot_sim = self._get_sim_base_rotation()
heading_rot_ref = self._calc_heading_rot(root_rot_ref)
heading_rot_sim = self._calc_heading_rot(root_rot_sim)
inv_heading_rot_ref = transformations.quaternion_conjugate(heading_rot_ref)
inv_heading_rot_sim = transformations.quaternion_conjugate(heading_rot_sim)
end_eff_err = 0.0
num_joints = self._get_num_joints()
height_err_scale = self._end_effector_height_err_scale
for j in range(num_joints):
is_end_eff = (j in robot._foot_link_ids)
if (is_end_eff):
end_state_ref = pyb.getLinkState(ref_model, j)
end_state_sim = pyb.getLinkState(sim_model, j)
end_pos_ref = np.array(end_state_ref[0])
end_pos_sim = np.array(end_state_sim[0])
rel_end_pos_ref = end_pos_ref - root_pos_ref
rel_end_pos_ref = pose3d.QuaternionRotatePoint(rel_end_pos_ref,
inv_heading_rot_ref)
rel_end_pos_sim = end_pos_sim - root_pos_sim
rel_end_pos_sim = pose3d.QuaternionRotatePoint(rel_end_pos_sim,
inv_heading_rot_sim)
rel_end_pos_diff = rel_end_pos_ref - rel_end_pos_sim
end_pos_diff_height = end_pos_ref[2] - end_pos_sim[2]
end_pos_err = (
rel_end_pos_diff[0] * rel_end_pos_diff[0] +
rel_end_pos_diff[1] * rel_end_pos_diff[1] +
height_err_scale * end_pos_diff_height * end_pos_diff_height)
end_eff_err += end_pos_err
end_effector_reward = np.exp(-self._end_effector_err_scale * end_eff_err)
return end_effector_reward
def _calc_reward_root_pose(self):
"""Get the root pose reward."""
root_pos_ref = self.get_ref_base_position()
root_rot_ref = self.get_ref_base_rotation()
root_pos_robot = self._env.robot.GetBasePosition()
root_rot_robot = self._env.robot.GetBaseOrientation()
root_pos_diff = root_pos_ref - root_pos_robot
root_pos_err = root_pos_diff.dot(root_pos_diff)
root_rot_diff = transformations.quaternion_multiply(
root_rot_ref, transformations.quaternion_conjugate(root_rot_robot))
root_rot_diff /= np.linalg.norm(root_rot_diff)
_, root_rot_diff_angle = pose3d.QuaternionToAxisAngle(root_rot_diff)
root_rot_diff_angle = motion_util.normalize_rotation_angle(
root_rot_diff_angle)
root_rot_err = root_rot_diff_angle * root_rot_diff_angle
root_pose_err = root_pos_err + 0.5 * root_rot_err
root_pose_reward = np.exp(-self._root_pose_err_scale * root_pose_err)
return root_pose_reward
def _calc_reward_root_velocity(self):
"""Get the root velocity reward."""
ref_lin_vel = self.get_active_motion().get_frame_root_vel(self._ref_vel)
ref_ang_vel = self.get_active_motion().get_frame_root_ang_vel(self._ref_vel)
robot_lin_vel = self._env.robot.GetBaseVelocity()
robot_ang_vel = self._env.robot.GetBaseRollPitchYawRate()
lin_vel_diff = ref_lin_vel - robot_lin_vel
lin_vel_err = lin_vel_diff.dot(lin_vel_diff)
ang_vel_diff = ref_ang_vel - robot_ang_vel
ang_vel_err = ang_vel_diff.dot(ang_vel_diff)
root_velocity_err = lin_vel_err + 0.1 * ang_vel_err
root_velocity_reward = np.exp(-self._root_velocity_err_scale *
root_velocity_err)
return root_velocity_reward
def _load_ref_motions(self, filenames):
"""Load reference motions.
Args:
dir: Directory containing the reference motion files.
filenames: Names of files in dir to be loaded.
Returns: List of reference motions loaded from the files.
"""
num_files = len(filenames)
if num_files == 0:
raise ValueError("No reference motions specified.")
total_time = 0.0
motions = []
for filename in filenames:
curr_motion = motion_data.MotionData(filename)
curr_duration = curr_motion.get_duration()
total_time += curr_duration
motions.append(curr_motion)
logging.info("Loaded {:d} motion clips with {:.3f}s of motion data.".format(
num_files, total_time))
return motions
def _build_ref_model(self):
"""Constructs simulated model for playing back the reference motion.
Returns:
Handle to the simulated model for the reference motion.
"""
if self._using_real_robot:
return None
pyb = self._get_pybullet_client()
# Disable rendering while loading to speed up.
if self._env.rendering_enabled:
pyb.configureDebugVisualizer(pyb.COV_ENABLE_RENDERING, 0)
ref_col = [1, 1, 1, self._draw_ref_model_alpha]
urdf_file = self._env.robot.GetURDFFile()
ref_model = pyb.loadURDF(urdf_file, useFixedBase=True)
pyb.changeDynamics(ref_model, -1, linearDamping=0, angularDamping=0)
pyb.setCollisionFilterGroupMask(
ref_model, -1, collisionFilterGroup=0, collisionFilterMask=0)
pyb.changeDynamics(
ref_model,
-1,
activationState=pyb.ACTIVATION_STATE_SLEEP +
pyb.ACTIVATION_STATE_ENABLE_SLEEPING +
pyb.ACTIVATION_STATE_DISABLE_WAKEUP)
pyb.changeVisualShape(ref_model, -1, rgbaColor=ref_col)
num_joints = pyb.getNumJoints(ref_model)
num_joints_sim = pyb.getNumJoints(self._env.robot.quadruped)
assert (
num_joints == num_joints_sim
), "ref model must have the same number of joints as the simulated model."
for j in range(num_joints):
pyb.setCollisionFilterGroupMask(
ref_model, j, collisionFilterGroup=0, collisionFilterMask=0)
pyb.changeDynamics(
ref_model,
j,
activationState=pyb.ACTIVATION_STATE_SLEEP +
pyb.ACTIVATION_STATE_ENABLE_SLEEPING +
pyb.ACTIVATION_STATE_DISABLE_WAKEUP)
pyb.changeVisualShape(ref_model, j, rgbaColor=ref_col)
if self._env.rendering_enabled:
pyb.configureDebugVisualizer(pyb.COV_ENABLE_RENDERING, 1)
return ref_model
def _build_joint_data(self):
"""Precomputes joint data to facilitating accessing data from motion frames."""
if self._using_real_robot:
return
num_joints = self._get_num_joints()
self._joint_pose_idx = np.zeros(num_joints, dtype=np.int32)
self._joint_pose_size = np.zeros(num_joints, dtype=np.int32)
self._joint_vel_idx = np.zeros(num_joints, dtype=np.int32)
self._joint_vel_size = np.zeros(num_joints, dtype=np.int32)
for j in range(num_joints):
pyb = self._get_pybullet_client()
j_info = pyb.getJointInfo(self._ref_model, j)
j_state = pyb.getJointStateMultiDof(self._ref_model, j)
j_pose_idx = j_info[3]
j_vel_idx = j_info[4]
j_pose_size = len(j_state[0])
j_vel_size = len(j_state[1])
if (j_pose_idx < 0):
assert (j_vel_idx < 0)
assert (j_pose_size == 0)
assert (j_vel_size == 0)
if (j == 0):
j_pose_idx = 0
j_vel_idx = 0
else:
j_pose_idx = self._joint_pose_idx[j - 1] + self._joint_pose_size[j -
1]
j_vel_idx = self._joint_vel_idx[j - 1] + self._joint_vel_size[j - 1]
self._joint_pose_idx[j] = j_pose_idx
self._joint_pose_size[j] = j_pose_size
self._joint_vel_idx[j] = j_vel_idx
self._joint_vel_size[j] = j_vel_size
motion = self.get_active_motion()
motion_frame_size = motion.get_frame_size()
motion_frame_vel_size = motion.get_frame_vel_size()
pose_size = self.get_pose_size()
vel_size = self.get_vel_size()
assert (motion_frame_size == pose_size)
assert (motion_frame_vel_size == vel_size)
return
def _reset_ref_motion(self):
"""Reset reference motion.
First randomly select a new reference motion from
the set of available motions, and then resets to a random point along the
selected motion.
"""
self._active_motion_id = self._sample_ref_motion()
self._origin_offset_rot = np.array([0, 0, 0, 1])
self._origin_offset_pos.fill(0.0)
self._reset_motion_time_offset()
motion = self.get_active_motion()
time = self._get_motion_time()
ref_pose = self._calc_ref_pose(time)
ref_root_pos = motion.get_frame_root_pos(ref_pose)
ref_root_rot = motion.get_frame_root_rot(ref_pose)
sim_root_pos = self._get_sim_base_position()
sim_root_rot = self._get_sim_base_rotation()
# move the root to the same position and rotation as simulated robot
self._origin_offset_pos = sim_root_pos - ref_root_pos
self._origin_offset_pos[2] = 0
ref_heading = motion_util.calc_heading(ref_root_rot)
sim_heading = motion_util.calc_heading(sim_root_rot)
delta_heading = sim_heading - ref_heading
self._origin_offset_rot = transformations.quaternion_about_axis(
delta_heading, [0, 0, 1])
self._ref_pose = self._calc_ref_pose(time)
self._ref_vel = self._calc_ref_vel(time)
self._update_ref_model()
self._prev_motion_phase = motion.calc_phase(time)
self._reset_clip_change_time()
return
def _update_ref_motion(self):
"""Updates the reference motion and synchronizes the state of the reference
model with the current motion frame.
"""
time = self._get_motion_time()
change_clip = self._check_change_clip()
if change_clip:
new_motion_id = self._sample_ref_motion()
self._change_ref_motion(new_motion_id)
self._reset_clip_change_time()
self._motion_time_offset = self._sample_time_offset()
motion = self.get_active_motion()
new_phase = motion.calc_phase(time)
if (self._enable_cycle_sync and (new_phase < self._prev_motion_phase)) \
or change_clip:
self._sync_ref_origin(
sync_root_position=True, sync_root_rotation=change_clip)
self._update_ref_state()
self._update_ref_model()
self._prev_motion_phase = new_phase
return
def _update_ref_state(self):
"""Calculates and stores the current reference pose and velocity."""
time = self._get_motion_time()
self._ref_pose = self._calc_ref_pose(time)
self._ref_vel = self._calc_ref_vel(time)
return
def _update_ref_model(self):
"""Synchronizes the reference model to the pose and velocity of the
reference motion.
"""
self._set_state(self._ref_model, self._ref_pose, self._ref_vel)
return
def _sync_sim_model(self, perturb_state):
"""Synchronizes the simulated character to the pose and velocity of the
reference motion.
Args:
perturb_state: A flag for enabling perturbations to be applied to state.
"""
if self._using_real_robot:
raise RuntimeError("Real robot cannot sync to reference motion.")
pose = self._ref_pose
vel = self._ref_vel
if perturb_state:
pose, vel = self._apply_state_perturb(pose, vel)
self._set_state(self._env.robot.quadruped, pose, vel)
self._env.robot.ReceiveObservation()
return
def _set_state(self, phys_model, pose, vel):
"""Set the state of a character to the given pose and velocity.
Args:
phys_model: handle of the character
pose: pose to be applied to the character
vel: velocity to be applied to the character
"""
if self._using_real_robot:
return
motion = self.get_active_motion()
pyb = self._get_pybullet_client()
root_pos = motion.get_frame_root_pos(pose)
root_rot = motion.get_frame_root_rot(pose)
root_vel = motion.get_frame_root_vel(vel)
root_ang_vel = motion.get_frame_root_ang_vel(vel)
pyb.resetBasePositionAndOrientation(phys_model, root_pos, root_rot)
pyb.resetBaseVelocity(phys_model, root_vel, root_ang_vel)
num_joints = self._get_num_joints()
for j in range(num_joints):
q_idx = self._get_joint_pose_idx(j)
q_size = self._get_joint_pose_size(j)
dq_idx = self._get_joint_vel_idx(j)
dq_size = self._get_joint_vel_size(j)
if (q_size > 0):
assert (dq_size > 0)
j_pose = pose[q_idx:(q_idx + q_size)]
j_vel = vel[dq_idx:(dq_idx + dq_size)]
pyb.resetJointStateMultiDof(phys_model, j, j_pose, j_vel)
return
def _get_pybullet_client(self):
"""Get bullet client from the environment"""
if self._using_real_robot:
raise RuntimeError("Tried to access simulation when running real robot")
return self._env.pybullet_client
def _get_motion_time(self):
"""Get the time since the start of the reference motion."""
time = self._env.get_time_since_reset()
# Needed to ensure that during deployment, the first timestep will be at
# time = 0
if self._env.env_step_counter == 0:
self._episode_start_time_offset = -time
time += self._motion_time_offset
time += self._episode_start_time_offset
if self._curr_episode_warmup:
# if warmup is enabled, then apply a time offset to give the robot more
# time to move to the reference motion
time -= self._warmup_time
return time
def _get_num_joints(self):
"""Get the number of joints in the character's body."""
pyb = self._get_pybullet_client()
return pyb.getNumJoints(self._ref_model)
def _get_joint_pose_idx(self, j):
"""Get the starting index of the pose data for a give joint in a pose array."""
idx = self._joint_pose_idx[j]
return idx
def _get_joint_vel_idx(self, j):
"""Get the starting index of the velocity data for a give joint in a
velocity array.
"""
idx = self._joint_vel_idx[j]
return idx
def _get_joint_pose_size(self, j):
"""Get the size of the pose data for a give joint in a pose array."""
pose_size = self._joint_pose_size[j]
assert (pose_size == 1 or
pose_size == 0), "Only support 1D and 0D joints at the moment."
return pose_size
def _get_joint_vel_size(self, j):
"""Get the size of the velocity data for a give joint in a velocity array."""
vel_size = self._joint_vel_size[j]
assert (vel_size == 1 or
vel_size == 0), "Only support 1D and 0D joints at the moment."
return vel_size
def get_pose_size(self):
"""Get the total size of a pose array."""
return self.get_active_motion().get_frame_size()
def get_vel_size(self):
"""Get the total size of a velocity array."""
return self.get_active_motion().get_frame_vel_size()
def _get_sim_base_position(self):
return np.array(self._env.robot.GetBasePosition())
def _get_sim_base_rotation(self):
return np.array(self._env.robot.GetBaseOrientation())
def get_ref_base_position(self):
return self.get_active_motion().get_frame_root_pos(self._ref_pose)
def get_ref_base_rotation(self):
return self.get_active_motion().get_frame_root_rot(self._ref_pose)
def _calc_ref_pose(self, time, apply_origin_offset=True):
"""Calculates the reference pose for a given point in time.
Args:
time: Time elapsed since the start of the reference motion.
apply_origin_offset: A flag for enabling the origin offset to be applied
to the pose.
Returns:
An array containing the reference pose at the given point in time.
"""
motion = self.get_active_motion()
enable_warmup_pose = self._curr_episode_warmup \
and time >= -self._warmup_time and time < 0.0
if enable_warmup_pose:
pose = self._calc_ref_pose_warmup()
else:
pose = motion.calc_frame(time)
if apply_origin_offset:
root_pos = motion.get_frame_root_pos(pose)
root_rot = motion.get_frame_root_rot(pose)
root_rot = transformations.quaternion_multiply(self._origin_offset_rot,
root_rot)
root_pos = pose3d.QuaternionRotatePoint(root_pos, self._origin_offset_rot)
root_pos += self._origin_offset_pos
motion.set_frame_root_rot(root_rot, pose)
motion.set_frame_root_pos(root_pos, pose)
return pose
def _calc_ref_vel(self, time):
"""Calculates the reference velocity for a given point in time.
Args:
time: Time elapsed since the start of the reference motion.
Returns:
An array containing the reference velocity at the given point in time.
"""
motion = self.get_active_motion()
enable_warmup_pose = self._curr_episode_warmup \
and time >= -self._warmup_time and time < 0.0
if enable_warmup_pose:
vel = self._calc_ref_vel_warmup()
else:
vel = motion.calc_frame_vel(time)
root_vel = motion.get_frame_root_vel(vel)
root_ang_vel = motion.get_frame_root_ang_vel(vel)
root_vel = pose3d.QuaternionRotatePoint(root_vel, self._origin_offset_rot)
root_ang_vel = pose3d.QuaternionRotatePoint(root_ang_vel,
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | true |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/motion_imitation/envs/env_wrappers/logging_wrapper.py | motion_imitation/envs/env_wrappers/logging_wrapper.py | """Env wrapper that saves logs."""
import atexit
import os
import numpy as np
from phasespace import phasespace_robot_tracker
class LoggingWrapper(object):
"""Env wrapper that saves logs."""
def __init__(self,
env,
output_dir,
mocap_grpc_server=None,
verbose=True,
separate_episodes=False):
"""Constructor.
Args:
env: An instance (possibly wrapped) of LocomotionGymEnv.
output_dir: Where to save logs.
mocap_grpc_server: Hostname and port of the gRPC server outputting marker
data protos
(e.g. "localhost:12345"). If None, don't look for mocap data.
verbose: If True, print a message every time a log is saved.
separate_episodes: If True, save one log file per episode. If False, save
all episodes as one log file.
"""
if mocap_grpc_server:
self._mocap_tracker = phasespace_robot_tracker.PhaseSpaceRobotTracker(
server=mocap_grpc_server)
else:
self._mocap_tracker = None
self._env = env
self._robot = self._env.robot
self._output_dir = output_dir
os.makedirs(self._output_dir, exist_ok=True)
self._verbose = verbose
self._separate_episodes = separate_episodes
self._clear_logs()
self._episode_counter = 0
atexit.register(self.log, verbose=True)
def __getattr__(self, attr):
return getattr(self._env, attr)
def _clear_logs(self):
self._linear_vels = []
self._rpys = []
self._angular_vels = []
self._timestamps = []
self._input_actions = []
self._processed_actions = []
self._joint_angles = []
self._motor_temperatures = []
self._mocap_positions = []
self._mocap_rpys = []
def step(self, action):
self._input_actions.append(action)
if self._mocap_tracker:
self._mocap_tracker.update()
obs, reward, done, info = self._env.step(action)
self._processed_actions.append(self._robot.last_action)
self._linear_vels.append(self._robot.GetBaseVelocity())
self._rpys.append(self._robot.GetBaseRollPitchYaw())
self._angular_vels.append(self._robot.GetBaseRollPitchYawRate())
self._joint_angles.append(self._robot.GetMotorAngles())
self._timestamps.append(self._robot.GetTimeSinceReset())
if hasattr(self._robot, "motor_temperatures"):
self._motor_temperatures.append(self._robot.motor_temperatures)
if self._mocap_tracker:
self._mocap_positions.append(self._mocap_tracker.get_base_position())
self._mocap_rpys.append(self._mocap_tracker.get_base_roll_pitch_yaw())
return obs, reward, done, info
def log(self, verbose):
if self._separate_episodes:
out_file = os.path.join(
self._output_dir,
"log_episode_{:07d}.npz".format(self._episode_counter))
else:
out_file = os.path.join(self._output_dir, "log_all_episodes.npz")
np.savez(
out_file,
input_actions=self._input_actions,
processed_actions=self._processed_actions,
timestamps=self._timestamps,
linear_vels=self._linear_vels,
rpys=self._rpys,
angular_vels=self._angular_vels,
joint_angles=self._joint_angles,
motor_temperatures=self._motor_temperatures,
mocap_positions=self._mocap_positions,
mocap_rpys=self._mocap_rpys,
)
if verbose:
print("logged to: {}".format(out_file))
self._clear_logs()
def reset(self, *args, **kwargs):
if self._separate_episodes:
self.log(self._verbose)
self._episode_counter += 1
return self._env.reset(*args, **kwargs)
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/motion_imitation/envs/env_wrappers/__init__.py | motion_imitation/envs/env_wrappers/__init__.py | python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false | |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/motion_imitation/envs/env_wrappers/trajectory_generator_wrapper_env.py | motion_imitation/envs/env_wrappers/trajectory_generator_wrapper_env.py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A wrapped MinitaurGymEnv with a built-in controller."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
class TrajectoryGeneratorWrapperEnv(object):
"""A wrapped LocomotionGymEnv with a built-in trajectory generator."""
def __init__(self, gym_env, trajectory_generator):
"""Initialzes the wrapped env.
Args:
gym_env: An instance of LocomotionGymEnv.
trajectory_generator: A trajectory_generator that can potentially modify
the action and observation. Typticall generators includes the PMTG and
openloop signals. Expected to have get_action and get_observation
interfaces.
Raises:
ValueError if the controller does not implement get_action and
get_observation.
"""
self._gym_env = gym_env
if not hasattr(trajectory_generator, 'get_action') or not hasattr(
trajectory_generator, 'get_observation'):
raise ValueError(
'The controller does not have the necessary interface(s) implemented.'
)
self._trajectory_generator = trajectory_generator
# The trajectory generator can subsume the action/observation space.
if hasattr(trajectory_generator, 'observation_space'):
self.observation_space = self._trajectory_generator.observation_space
if hasattr(trajectory_generator, 'action_space'):
self.action_space = self._trajectory_generator.action_space
def __getattr__(self, attr):
return getattr(self._gym_env, attr)
def _modify_observation(self, observation):
return self._trajectory_generator.get_observation(observation)
def reset(self, initial_motor_angles=None, reset_duration=0.0):
if getattr(self._trajectory_generator, 'reset'):
self._trajectory_generator.reset()
observation = self._gym_env.reset(initial_motor_angles, reset_duration)
return self._modify_observation(observation)
def step(self, action):
"""Steps the wrapped environment.
Args:
action: Numpy array. The input action from an NN agent.
Returns:
The tuple containing the modified observation, the reward, the epsiode end
indicator.
Raises:
ValueError if input action is None.
"""
if action is None:
raise ValueError('Action cannot be None')
new_action = self._trajectory_generator.get_action(
self._gym_env.robot.GetTimeSinceReset(), action)
original_observation, reward, done, _ = self._gym_env.step(new_action)
return self._modify_observation(original_observation), reward, done, _
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/motion_imitation/envs/env_wrappers/imitation_wrapper_env.py | motion_imitation/envs/env_wrappers/imitation_wrapper_env.py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A wrapper for motion imitation environment."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gym
import numpy as np
class ImitationWrapperEnv(object):
"""An env using for training policy with motion imitation."""
def __init__(self,
gym_env,
episode_length_start=1000,
episode_length_end=1000,
curriculum_steps=0,
num_parallel_envs=1):
"""Initialzes the wrapped env.
Args:
gym_env: An instance of LocomotionGymEnv.
"""
self._gym_env = gym_env
self.observation_space = self._build_observation_space()
self._episode_length_start = episode_length_start
self._episode_length_end = episode_length_end
self._curriculum_steps = int(np.ceil(curriculum_steps / num_parallel_envs))
self._total_step_count = 0
if self._enable_curriculum():
self._update_time_limit()
else:
self._max_episode_steps = episode_length_end
self.seed()
return
def __getattr__(self, attr):
return getattr(self._gym_env, attr)
def step(self, action):
"""Steps the wrapped environment.
Args:
action: Numpy array. The input action from an NN agent.
Returns:
The tuple containing the modified observation, the reward, the epsiode end
indicator.
Raises:
ValueError if input action is None.
"""
original_observation, reward, done, _ = self._gym_env.step(action)
observation = self._modify_observation(original_observation)
terminated = done
done |= (self.env_step_counter >= self._max_episode_steps)
if not done:
self._total_step_count += 1
info = {
"terminated":
terminated,
"max_torque":
np.max(np.abs(self._gym_env._robot._observed_motor_torques)),
"metrics":
self._get_metrics()
}
return observation, reward, done, info
def reset(self, initial_motor_angles=None, reset_duration=0.0):
"""Resets the robot's position in the world or rebuild the sim world.
The simulation world will be rebuilt if self._hard_reset is True.
Args:
initial_motor_angles: A list of Floats. The desired joint angles after
reset. If None, the robot will use its built-in value.
reset_duration: Float. The time (in seconds) needed to rotate all motors
to the desired initial values.
Returns:
A numpy array contains the initial observation after reset.
"""
original_observation = self._gym_env.reset(initial_motor_angles, reset_duration)
observation = self._modify_observation(original_observation)
if self._enable_curriculum():
self._update_time_limit()
return observation
def _modify_observation(self, original_observation):
"""Appends target observations from the reference motion to the observations.
Args:
original_observation: A numpy array containing the original observations.
Returns:
A numpy array contains the initial original concatenated with target
observations from the reference motion.
"""
target_observation = self._task.build_target_obs()
observation = np.concatenate([original_observation, target_observation], axis=-1)
return observation
def _build_observation_space(self):
"""Constructs the observation space, including target observations from
the reference motion.
Returns:
Observation space representing the concatenations of the original
observations and target observations.
"""
obs_space0 = self._gym_env.observation_space
low0 = obs_space0.low
high0 = obs_space0.high
task_low, task_high = self._task.get_target_obs_bounds()
low = np.concatenate([low0, task_low], axis=-1)
high = np.concatenate([high0, task_high], axis=-1)
obs_space = gym.spaces.Box(low, high)
return obs_space
def _enable_curriculum(self):
"""Check if curriculum is enabled."""
return self._curriculum_steps > 0
def _update_time_limit(self):
"""Updates the current episode length depending on the number of environment steps taken so far."""
t = float(self._total_step_count) / self._curriculum_steps
t = np.clip(t, 0.0, 1.0)
t = np.power(t, 3.0)
new_steps = int((1.0 - t) * self._episode_length_start +
t * self._episode_length_end)
self._max_episode_steps = new_steps
return
def _get_metrics(self):
x, y, _ = self._gym_env.last_base_position
(forward, sideways, _), (_, _, yaw) = self._gym_env.robot.RelativeTransformSinceReset()
yaw = np.rad2deg(yaw)
# First element is value; second is aggregator function.
return {
# Aggregator finds the farthest value from the origin.
"Position/Final_Robot_X": (x, lambda vec: max(vec, key=abs)),
"Position/Final_Robot_Y": (y, lambda vec: max(vec, key=abs)),
"Position/Robot_Travel_Forward": (forward, np.mean),
"Position/Robot_Travel_Sideways": (sideways, np.mean),
"Position/Robot_Travel_Yaw_Deg": (yaw, np.mean),
}
def set_task(self, new_task):
self._gym_env.set_task(new_task)
self.observation_space = self._build_observation_space()
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/motion_imitation/envs/env_wrappers/simple_openloop.py | motion_imitation/envs/env_wrappers/simple_openloop.py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple openloop trajectory generators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(os.path.dirname(currentdir))
os.sys.path.insert(0, parentdir)
import attr
from gym import spaces
import numpy as np
from motion_imitation.robots import laikago_pose_utils
from motion_imitation.robots import minitaur_pose_utils
class MinitaurPoseOffsetGenerator(object):
"""A trajectory generator that return a constant leg pose."""
def __init__(self,
init_swing=0,
init_extension=2.0,
init_pose=None,
action_scale=1.0,
action_limit=0.5):
"""Initializes the controller.
Args:
init_swing: the swing of the default pose offset
init_extension: the extension of the default pose offset
init_pose: the default pose offset, which is None by default. If not None,
it will define the default pose offset while ignoring init_swing and
init_extension.
action_scale: changes the magnitudes of actions
action_limit: clips actions
"""
if init_pose is None:
self._pose = np.array(
attr.astuple(
minitaur_pose_utils.MinitaurPose(
swing_angle_0=init_swing,
swing_angle_1=init_swing,
swing_angle_2=init_swing,
swing_angle_3=init_swing,
extension_angle_0=init_extension,
extension_angle_1=init_extension,
extension_angle_2=init_extension,
extension_angle_3=init_extension)))
else: # Ignore init_swing and init_extension
self._pose = np.array(init_pose)
action_high = np.array([action_limit] * minitaur_pose_utils.NUM_MOTORS)
self.action_space = spaces.Box(-action_high, action_high, dtype=np.float32)
self._action_scale = action_scale
def reset(self):
pass
def get_action(self, current_time=None, input_action=None):
"""Computes the trajectory according to input time and action.
Args:
current_time: The time in gym env since reset.
input_action: A numpy array. The input leg pose from a NN controller.
Returns:
A numpy array. The desired motor angles.
"""
del current_time
return minitaur_pose_utils.leg_pose_to_motor_angles(self._pose +
self._action_scale *
np.array(input_action))
def get_observation(self, input_observation):
"""Get the trajectory generator's observation."""
return input_observation
class LaikagoPoseOffsetGenerator(object):
"""A trajectory generator that return constant motor angles."""
def __init__(
self,
init_abduction=laikago_pose_utils.LAIKAGO_DEFAULT_ABDUCTION_ANGLE,
init_hip=laikago_pose_utils.LAIKAGO_DEFAULT_HIP_ANGLE,
init_knee=laikago_pose_utils.LAIKAGO_DEFAULT_KNEE_ANGLE,
action_limit=0.5,
):
"""Initializes the controller.
Args:
action_limit: a tuple of [limit_abduction, limit_hip, limit_knee]
"""
self._pose = np.array(
attr.astuple(
laikago_pose_utils.LaikagoPose(abduction_angle_0=init_abduction,
hip_angle_0=init_hip,
knee_angle_0=init_knee,
abduction_angle_1=init_abduction,
hip_angle_1=init_hip,
knee_angle_1=init_knee,
abduction_angle_2=init_abduction,
hip_angle_2=init_hip,
knee_angle_2=init_knee,
abduction_angle_3=init_abduction,
hip_angle_3=init_hip,
knee_angle_3=init_knee)))
action_high = np.array([action_limit] * 12)
self.action_space = spaces.Box(-action_high, action_high, dtype=np.float32)
def reset(self):
pass
def get_action(self, current_time=None, input_action=None):
"""Computes the trajectory according to input time and action.
Args:
current_time: The time in gym env since reset.
input_action: A numpy array. The input leg pose from a NN controller.
Returns:
A numpy array. The desired motor angles.
"""
del current_time
return self._pose + input_action
def get_observation(self, input_observation):
"""Get the trajectory generator's observation."""
return input_observation
class A1PoseOffsetGenerator(object):
"""A trajectory generator that return constant motor angles."""
def __init__(
self,
action_limit=.5
):
"""Initializes the controller.
Args:
action_limit: a tuple of [limit_abduction, limit_hip, limit_knee]
"""
self._pose = self._pose = np.array([0.,0.9,-1.8,0.,0.9,-1.8,0.,0.9,-1.8,0.,0.9,-1.8])
# action_high = np.array([action_limit] * 12)
action_high = action_limit
self.action_space = spaces.Box(-action_high, action_high, dtype=np.float32)
def reset(self):
pass
def get_action(self, current_time=None, input_action=None):
"""Computes the trajectory according to input time and action.
Args:
current_time: The time in gym env since reset.
input_action: A numpy array. The input leg pose from a NN controller.
Returns:
A numpy array. The desired motor angles.
"""
del current_time
return self._pose + input_action
def get_observation(self, input_observation):
"""Get the trajectory generator's observation."""
return input_observation
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/motion_imitation/envs/env_wrappers/imitation_terminal_conditions.py | motion_imitation/envs/env_wrappers/imitation_terminal_conditions.py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the terminal conditions for imitation task."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from utilities import pose3d
from utilities import motion_util
from pybullet_utils import transformations
def imitation_terminal_condition(env,
dist_fail_threshold=1.0,
rot_fail_threshold=0.5 * np.pi):
"""A terminal condition for motion imitation task.
Args:
env: An instance of MinitaurGymEnv
dist_fail_threshold: Max distance the simulated character's root is allowed
to drift from the reference motion before the episode terminates.
rot_fail_threshold: Max rotational difference between simulated character's
root and the reference motion's root before the episode terminates.
Returns:
A boolean indicating if episode is over.
"""
task = env.task
motion_over = task.is_motion_over()
foot_links = env.robot.GetFootLinkIDs()
ground = env.get_ground()
contact_fall = False
# sometimes the robot can be initialized with some ground penetration
# so do not check for contacts until after the first env step.
if env.env_step_counter > 0:
robot_ground_contacts = env.pybullet_client.getContactPoints(
bodyA=env.robot.quadruped, bodyB=ground)
for contact in robot_ground_contacts:
if contact[3] not in foot_links:
contact_fall = True
break
root_pos_ref = task.get_ref_base_position()
root_rot_ref = task.get_ref_base_rotation()
root_pos_robot = env.robot.GetBasePosition()
root_rot_robot = env.robot.GetBaseOrientation()
root_pos_diff = np.array(root_pos_ref) - np.array(root_pos_robot)
root_pos_fail = (
root_pos_diff.dot(root_pos_diff) >
dist_fail_threshold * dist_fail_threshold)
root_rot_diff = transformations.quaternion_multiply(
np.array(root_rot_ref),
transformations.quaternion_conjugate(np.array(root_rot_robot)))
root_rot_diff /= np.linalg.norm(root_rot_diff)
_, root_rot_diff_angle = pose3d.QuaternionToAxisAngle(
root_rot_diff)
root_rot_diff_angle = motion_util.normalize_rotation_angle(
root_rot_diff_angle)
root_rot_fail = (np.abs(root_rot_diff_angle) > rot_fail_threshold)
done = motion_over \
or contact_fall \
or root_pos_fail \
or root_rot_fail
return done
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/motion_imitation/envs/gym_envs/a1_gym_env.py | motion_imitation/envs/gym_envs/a1_gym_env.py | """Wrapper to make the a1 environment suitable for OpenAI gym."""
import gym
from motion_imitation.envs import env_builder
from motion_imitation.robots import a1
from motion_imitation.robots import robot_config
class A1GymEnv(gym.Env):
"""A1 environment that supports the gym interface."""
metadata = {'render.modes': ['rgb_array']}
def __init__(self,
action_limit=(0.75, 0.75, 0.75),
render=False,
on_rack=False):
self._env = env_builder.build_regular_env(
a1.A1,
motor_control_mode=robot_config.MotorControlMode.POSITION,
enable_rendering=render,
action_limit=action_limit,
on_rack=on_rack)
self.observation_space = self._env.observation_space
self.action_space = self._env.action_space
def step(self, action):
return self._env.step(action)
def reset(self):
return self._env.reset()
def close(self):
self._env.close()
def render(self, mode):
return self._env.render(mode)
def __getattr__(self, attr):
return getattr(self._env, attr)
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.