file_path stringlengths 3 280 | file_language stringclasses 66 values | content stringlengths 1 1.04M | repo_name stringlengths 5 92 | repo_stars int64 0 154k | repo_description stringlengths 0 402 | repo_primary_language stringclasses 108 values | developer_username stringlengths 1 25 | developer_name stringlengths 0 30 | developer_company stringlengths 0 82 |
|---|---|---|---|---|---|---|---|---|---|
src/version.py | Python | #!/usr/bin/env python3
# With no command line flag, this prints the nanobind version.
# With flags -w semver, this writes the new version to where it's needed.
import os
import re
import sys
# Parse the header file <nanobind/nanobind.h> and print the version.
def get_version(root):
major = ''
minor = ''
patch = ''
dev = ''
with open(os.path.join(root, "include/nanobind/nanobind.h"), 'r') as f:
for line in f:
if m := re.match(r'#define NB_VERSION_(.+)', line):
if m_major := re.match(r'MAJOR\s+([0-9]+)', m.group(1)):
major = m_major.group(1)
if m_minor := re.match(r'MINOR\s+([0-9]+)', m.group(1)):
minor = m_minor.group(1)
if m_patch := re.match(r'PATCH\s+([0-9]+)', m.group(1)):
patch = m_patch.group(1)
if m_dev := re.match(r'DEV\s+([0-9]+)', m.group(1)):
dev = m_dev.group(1)
version_core = '.'.join([major, minor, patch])
if int(dev) > 0:
print(version_core, '-dev', dev, sep='')
else:
print(version_core)
# Write the semantic version to nanobind.h, pyproject.toml, __init__.py,
# and docs/bazel.rst.
# The semver string must be either 'X.Y.Z' or 'X.Y.Z-devN', where X, Y, Z are
# non-negative integers and N is a positive integer.
def write_version(root, semver):
major = 0
minor = 0
patch = 0
dev = 0
try:
beginning, middle, end = semver.split('.', maxsplit=2)
major = int(beginning)
minor = int(middle)
if m := re.match(r'([0-9]+)-dev([1-9][0-9]*)', end):
patch = int(m.group(1))
dev = int(m.group(2))
else:
patch = int(end)
except:
print("Invalid version: '", semver, "'", sep='', file=sys.stderr)
print("Valid examples: '1.2.3' or '1.2.3-dev4'", file=sys.stderr)
return
# Write to nanobind.h
with open(os.path.join(root, "include/nanobind/nanobind.h"), "r+") as f:
contents = f.read()
contents = re.sub(r'#define NB_VERSION_MAJOR\s+[0-9]+',
r'#define NB_VERSION_MAJOR ' + str(major),
contents, count=1)
contents = re.sub(r'#define NB_VERSION_MINOR\s+[0-9]+',
r'#define NB_VERSION_MINOR ' + str(minor),
contents, count=1)
contents = re.sub(r'#define NB_VERSION_PATCH\s+[0-9]+',
r'#define NB_VERSION_PATCH ' + str(patch),
contents, count=1)
contents = re.sub(r'#define NB_VERSION_DEV\s+[0-9]+',
r'#define NB_VERSION_DEV ' + str(dev),
contents, count=1)
f.seek(0)
f.truncate()
f.write(contents)
# Write to pyproject.toml
with open(os.path.join(root, "pyproject.toml"), "r+") as f:
contents = f.read()
contents = re.sub(r'version\s+=\s+"[^"]+"',
r'version = "' + semver + '"',
contents, count=1)
f.seek(0)
f.truncate()
f.write(contents)
# Write to __init__.py
with open(os.path.join(root, "src/__init__.py"), "r+") as f:
contents = f.read()
contents = re.sub(r'__version__\s+=\s+"[^"]+"',
r'__version__ = "' + semver + '"',
contents, count=1)
f.seek(0)
f.truncate()
f.write(contents)
# write to docs/bazel.rst, but only if `semver` is not a dev release.
# This is because documentation is scoped only to the latest stable release.
if "dev" not in semver:
with open(os.path.join(root, "docs/bazel.rst"), "r+") as f:
contents = f.read()
contents = re.sub(
r"nanobind\s+v\d+(\.\d+)+",
r"nanobind v" + semver,
contents,
count=1,
)
contents = re.sub(
r'"nanobind_bazel", version = "\d+(\.\d+)+"',
r'"nanobind_bazel", version = "' + semver + '"',
contents,
count=1,
)
f.seek(0)
f.truncate()
f.write(contents)
def main():
root = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
if len(sys.argv) == 1:
get_version(root)
elif len(sys.argv) == 3 and sys.argv[1] == '-w':
write_version(root, sys.argv[2])
else:
print("Usage: ", sys.argv[0], file=sys.stderr)
print(" or: ", sys.argv[0], "-w X.Y.Z", file=sys.stderr)
print(" or: ", sys.argv[0], "-w X.Y.Z-devN", file=sys.stderr)
if __name__ == '__main__':
main()
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
tests/common.py | Python | import platform
import gc
import pytest
import threading
is_pypy = platform.python_implementation() == 'PyPy'
is_darwin = platform.system() == 'Darwin'
def collect() -> None:
if is_pypy:
for _ in range(3):
gc.collect()
else:
gc.collect()
skip_on_pypy = pytest.mark.skipif(
is_pypy, reason="This test currently fails/crashes PyPy")
xfail_on_pypy_darwin = pytest.mark.xfail(
is_pypy and is_darwin, reason="This test for some reason fails on PyPy/Darwin")
# Helper function to parallelize execution of a function. We intentionally
# don't use the Python threads pools here to have threads shut down / start
# between test cases.
def parallelize(func, n_threads):
barrier = threading.Barrier(n_threads)
result = [None]*n_threads
def wrapper(i):
barrier.wait()
result[i] = func()
workers = []
for i in range(n_threads):
t = threading.Thread(target=wrapper, args=(i,))
t.start()
workers.append(t)
for worker in workers:
worker.join()
return result | wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
tests/conftest.py | Python | def pytest_addoption(parser):
parser.addoption('--enable-slow-tests',
action='store_true',
dest="enable-slow-tests",
default=False,
help="enable long-running tests")
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
tests/inter_module.cpp | C++ | #include "inter_module.h"
Shared create_shared() {
return { 123 };
}
bool check_shared(const Shared &shared, int expected) {
return shared.value == expected;
}
void increment_shared(Shared &shared) {
++shared.value;
}
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
tests/inter_module.h | C/C++ Header | #include <nanobind/nb_defs.h>
#if defined(SHARED_BUILD)
# define EXPORT_SHARED NB_EXPORT
#else
# define EXPORT_SHARED NB_IMPORT
#endif
struct EXPORT_SHARED Shared {
int value;
};
extern EXPORT_SHARED Shared create_shared();
extern EXPORT_SHARED bool check_shared(const Shared &shared, int expected);
extern EXPORT_SHARED void increment_shared(Shared &shared);
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
tests/object_py.h | C/C++ Header | #include <nanobind/nanobind.h>
NAMESPACE_BEGIN(nanobind)
NAMESPACE_BEGIN(detail)
template <typename T> struct type_caster<ref<T>> {
using Caster = make_caster<T>;
static constexpr bool IsClass = true;
NB_TYPE_CASTER(ref<T>, Caster::Name)
bool from_python(handle src, uint8_t flags,
cleanup_list *cleanup) noexcept {
Caster caster;
if (!caster.from_python(src, flags, cleanup))
return false;
value = Value(caster.operator T *());
return true;
}
static handle from_cpp(const ref<T> &value, rv_policy policy,
cleanup_list *cleanup) noexcept {
return Caster::from_cpp(value.get(), policy, cleanup);
}
};
NAMESPACE_END(detail)
NAMESPACE_END(nanobind)
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
tests/py_recursive_stub_test/__init__.py | Python | FOO = 123
from . import bar
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
tests/py_recursive_stub_test/bar.py | Python | BAR=456
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
tests/py_stub_test.py | Python | """Example module docstring."""
import sys
if sys.version_info < (3, 11, 0):
# Work around limitations in typing.overload in Python<3.11
import typing_extensions as typing
else:
import typing
# Ignore a type and a function from elsewhere. These shouldn't be included in
# the stub by default
from os import PathLike, getcwd
del sys
C = 123
T = typing.TypeVar("T")
def f1(a, b, c, /):
"""docstring"""
def f2(a=(3,(4,5)), /, b=(1,2), *, c=4):
"""docstring 2"""
def f3(*args, **kwargs):
pass
def f4() -> typing.Callable[[T], T]:
return lambda x: x
class AClass:
STATIC_VAR: int = 5
class NestedClass:
pass
def __init__(self, x):
pass
def method(self, x: str):
pass
@staticmethod
def static_method(x):
pass
@classmethod
def class_method(cls, x):
pass
@typing.overload
def overloaded(self, x: int) -> None:
"""docstr 1"""
@typing.overload
def overloaded(self, x: str) -> None:
"""docstr 2"""
def overloaded(self, x):
pass
@typing.overload
def overloaded_2(self, x: int) -> None: ...
@typing.overload
def overloaded_2(self, x: str) -> None: ...
def overloaded_2(self, x):
"docstr 3"
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
tests/test_accessor.cpp | C++ | #include <nanobind/nanobind.h>
namespace nb = nanobind;
struct A { int value; };
NB_MODULE(test_accessor_ext, m) {
nb::class_<A>(m, "A")
.def(nb::init<>())
.def_rw("value", &A::value);
m.def("test_str_attr_accessor_inplace_mutation", []() {
nb::object a_ = nb::module_::import_("test_accessor_ext").attr("A")();
a_.attr("value") += nb::int_(1);
return a_;
});
m.def("test_str_item_accessor_inplace_mutation", []() {
nb::dict d;
d["a"] = nb::int_(0);
d["a"] += nb::int_(1);
return d;
});
m.def("test_num_item_list_accessor_inplace_mutation", []() {
nb::list l;
l.append(nb::int_(0));
l[0] += nb::int_(1);
return l;
});
m.def("test_obj_item_accessor_inplace_mutation", []() {
nb::dict d;
nb::int_ key = nb::int_(0);
d[key] = nb::int_(0);
d[key] += nb::int_(1);
return d;
});
}
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
tests/test_accessor.py | Python | import test_accessor_ext as t
def test_01_str_attr_inplace_mutation():
"""
Tests that a C++ expression like obj.attr("foo") += ...
can actually modify the object in-place.
"""
a = t.test_str_attr_accessor_inplace_mutation()
assert a.value == 1
def test_02_str_item_inplace_mutation():
"""
Similar to test 01, but tests obj["foo"] (keyed attribute access)
on the C++ side.
"""
d = t.test_str_item_accessor_inplace_mutation()
assert d.keys() == {"a"}
assert d["a"] == 1
def test_03_num_item_list_inplace_mutation():
"""
Similar to test 01, but tests l[n] (index access)
on the C++ side, where l is an ``nb::list``.
"""
l = t.test_num_item_list_accessor_inplace_mutation()
assert len(l) == 1
assert l[0] == 1
def test_04_obj_item_inplace_mutation():
"""
Similar to test 01, but tests obj[h] (handle access)
on the C++ side.
"""
d = t.test_obj_item_accessor_inplace_mutation()
assert len(d) == 1
assert d.keys() == {0}
assert d[0] == 1 # dict lookup
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
tests/test_callbacks.cpp | C++ | // This is an example of using nb::call_policy to support binding an
// object that takes non-owning callbacks. Since the callbacks can't
// directly keep a Python object alive (they're trivially copyable), we
// maintain a sideband structure to manage the lifetimes.
#include <algorithm>
#include <unordered_set>
#include <vector>
#include <nanobind/nanobind.h>
#include <nanobind/stl/unordered_set.h>
namespace nb = nanobind;
// The callback type accepted by the object, which we assume we can't change.
// It's trivially copyable, so it can't directly keep a Python object alive.
struct callback {
void *context;
void (*func)(void *context, int arg);
void operator()(int arg) const { (*func)(context, arg); }
bool operator==(const callback& other) const {
return context == other.context && func == other.func;
}
};
// An object that uses these callbacks, which we want to write bindings for
class publisher {
public:
void subscribe(callback cb) { cbs.push_back(cb); }
void unsubscribe(callback cb) {
cbs.erase(std::remove(cbs.begin(), cbs.end(), cb), cbs.end());
}
void emit(int arg) const { for (auto cb : cbs) cb(arg); }
private:
std::vector<callback> cbs;
};
template <> struct nanobind::detail::type_caster<callback> {
static void wrap_call(void *context, int arg) {
borrow<callable>((PyObject *) context)(arg);
}
bool from_python(handle src, uint8_t, cleanup_list*) noexcept {
if (!isinstance<callable>(src)) return false;
value = {(void *) src.ptr(), &wrap_call};
return true;
}
static handle from_cpp(callback cb, rv_policy policy, cleanup_list*) noexcept {
if (cb.func == &wrap_call)
return handle((PyObject *) cb.context).inc_ref();
if (policy == rv_policy::none)
return handle();
return cpp_function(cb, policy).release();
}
NB_TYPE_CASTER(callback, const_name("Callable[[int], None]"))
};
nb::dict cb_registry() {
return nb::cast<nb::dict>(
nb::module_::import_("test_callbacks_ext").attr("registry"));
}
struct callback_data {
struct py_hash {
size_t operator()(const nb::object& obj) const { return nb::hash(obj); }
};
struct py_eq {
bool operator()(const nb::object& a, const nb::object& b) const {
return a.equal(b);
}
};
std::unordered_set<nb::object, py_hash, py_eq> subscribers;
};
callback_data& callbacks_for(nb::handle publisher) {
auto registry = cb_registry();
nb::weakref key(publisher, registry.attr("__delitem__"));
if (nb::handle value = PyDict_GetItem(registry.ptr(), key.ptr())) {
return nb::cast<callback_data&>(value);
}
nb::object new_data = nb::cast(callback_data{});
registry[key] = new_data;
return nb::cast<callback_data&>(new_data);
}
struct cb_policy_common {
using TwoArgs = std::integral_constant<size_t, 2>;
static void precall(PyObject **args, TwoArgs,
nb::detail::cleanup_list *cleanup) {
nb::handle self = args[0], cb = args[1];
auto& cbs = callbacks_for(self);
auto it = cbs.subscribers.find(nb::borrow(cb));
if (it != cbs.subscribers.end() && !it->is(cb)) {
// A callback is already subscribed that is
// equal-but-not-identical to the one passed in.
// Adjust args to refer to that one, to work around
// the fact that the C++ object does not understand py-equality.
args[1] = it->ptr();
// This ensures that the normalized callback won't be
// immediately destroyed if it's removed from the registry
// in the unsubscribe postcall hook. Such destruction could
// result in a use-after-free if you have other postcall hooks
// or keep_alives that try to inspect the function args.
// It's not strictly necessary if each arg is inspected by
// only one call policy or keep_alive.
cleanup->append(it->inc_ref().ptr());
}
}
};
struct subscribe_policy : cb_policy_common {
static void postcall(PyObject **args, TwoArgs, nb::handle) {
nb::handle self = args[0], cb = args[1];
callbacks_for(self).subscribers.insert(nb::borrow(cb));
}
};
struct unsubscribe_policy : cb_policy_common {
static void postcall(PyObject **args, TwoArgs, nb::handle) {
nb::handle self = args[0], cb = args[1];
callbacks_for(self).subscribers.erase(nb::borrow(cb));
}
};
NB_MODULE(test_callbacks_ext, m) {
m.attr("registry") = nb::dict();
nb::class_<callback_data>(m, "callback_data")
.def_ro("subscribers", &callback_data::subscribers);
nb::class_<publisher>(m, "publisher", nb::is_weak_referenceable())
.def(nb::init<>())
.def("subscribe", &publisher::subscribe,
nb::call_policy<subscribe_policy>())
.def("unsubscribe", &publisher::unsubscribe,
nb::call_policy<unsubscribe_policy>())
.def("emit", &publisher::emit);
}
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
tests/test_callbacks.py | Python | import test_callbacks_ext as t
import gc
def test_callbacks():
pub1 = t.publisher()
pub2 = t.publisher()
record = []
def sub1(x):
record.append(x + 10)
def sub2(x):
record.append(x + 20)
pub1.subscribe(sub1)
pub2.subscribe(sub2)
for pub in (pub1, pub2):
pub.subscribe(record.append)
pub1.emit(1)
assert record == [11, 1]
del record[:]
pub2.emit(2)
assert record == [22, 2]
del record[:]
pub1_w, pub2_w = t.registry.keys() # weakrefs to pub1, pub2
assert pub1_w() is pub1
assert pub2_w() is pub2
assert t.registry[pub1_w].subscribers == {sub1, record.append}
assert t.registry[pub2_w].subscribers == {sub2, record.append}
# NB: this `record.append` is a different object than the one we subscribed
# above, so we're testing the normalization logic in unsubscribe_policy
pub1.unsubscribe(record.append)
assert t.registry[pub1_w].subscribers == {sub1}
pub1.emit(3)
assert record == [13]
del record[:]
del pub, pub1
gc.collect()
gc.collect()
assert pub1_w() is None
assert pub2_w() is pub2
assert t.registry.keys() == {pub2_w}
pub2.emit(4)
assert record == [24, 4]
del record[:]
del pub2
gc.collect()
gc.collect()
assert pub2_w() is None
assert not t.registry
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
tests/test_chrono.cpp | C++ | /*
tests/test_chrono.cpp -- test conversions to/from std::chrono types
Ported from pybind11/tests/test_chrono.cpp
Copyright (c) 2016 Trent Houliston <trent@houliston.me> and
Wenzel Jakob <wenzel.jakob@epfl.ch>
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#include <nanobind/stl/chrono.h>
struct different_resolutions {
using time_point_h = std::chrono::time_point<std::chrono::system_clock,
std::chrono::hours>;
using time_point_m = std::chrono::time_point<std::chrono::system_clock,
std::chrono::minutes>;
using time_point_s = std::chrono::time_point<std::chrono::system_clock,
std::chrono::seconds>;
using time_point_ms = std::chrono::time_point<std::chrono::system_clock,
std::chrono::milliseconds>;
using time_point_us = std::chrono::time_point<std::chrono::system_clock,
std::chrono::microseconds>;
time_point_h timestamp_h;
time_point_m timestamp_m;
time_point_s timestamp_s;
time_point_ms timestamp_ms;
time_point_us timestamp_us;
};
NB_MODULE(test_chrono_ext, m) {
using system_time = std::chrono::system_clock::time_point;
using steady_time = std::chrono::steady_clock::time_point;
using timespan = std::chrono::duration<int64_t, std::nano>;
using timestamp = std::chrono::time_point<std::chrono::system_clock, timespan>;
// test_chrono_system_clock
// Return the current time off the wall clock
m.def("test_chrono1", []() { return std::chrono::system_clock::now(); });
// test_chrono_system_clock_roundtrip
// Round trip the passed in system clock time
m.def("test_chrono2", [](system_time t) { return t; });
// test_chrono_duration_roundtrip
// Round trip the passed in duration
m.def("test_chrono3", [](std::chrono::system_clock::duration d) { return d; });
// test_chrono_duration_subtraction_equivalence
// Difference between two passed in time_points
m.def("test_chrono4", [](system_time a, system_time b) { return a - b; });
// test_chrono_steady_clock
// Return the current time off the steady_clock
m.def("test_chrono5", []() { return std::chrono::steady_clock::now(); });
// test_chrono_steady_clock_roundtrip
// Round trip a steady clock timepoint
m.def("test_chrono6", [](steady_time t) { return t; });
// test_floating_point_duration
// Roundtrip a duration in microseconds from a float argument
m.def("test_chrono7", [](std::chrono::microseconds t) { return t; });
// Float durations (pybind11 issue #719)
m.def("test_chrono_float_diff",
[](std::chrono::duration<float> a, std::chrono::duration<float> b) { return a - b; });
m.def("test_nano_timepoint",
[](timestamp start, timespan delta) -> timestamp { return start + delta; });
m.def("test_nano_timepoint_roundtrip",
[](timestamp start) { return start; });
m.def("test_nano_timepoint_diff",
[](timestamp start, timestamp end) -> timespan { return start - end; });
// Test different resolutions
nanobind::class_<different_resolutions>(m, "different_resolutions")
.def(nanobind::init<>())
.def_rw("timestamp_h", &different_resolutions::timestamp_h)
.def_rw("timestamp_m", &different_resolutions::timestamp_m)
.def_rw("timestamp_s", &different_resolutions::timestamp_s)
.def_rw("timestamp_ms", &different_resolutions::timestamp_ms)
.def_rw("timestamp_us", &different_resolutions::timestamp_us);
#if defined(Py_LIMITED_API) || defined(PYPY_VERSION)
m.attr("access_via_python") = true;
#else
m.attr("access_via_python") = false;
#endif
}
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
tests/test_chrono.py | Python | # Ported from pybind11/tests/test_chrono.py
import test_chrono_ext as m
import time
import datetime
import sys
import pytest
def test_chrono_system_clock():
# Get the time from both c++ and datetime
date0 = datetime.datetime.today()
date1 = m.test_chrono1()
date2 = datetime.datetime.today()
# The returned value should be a datetime
assert isinstance(date1, datetime.datetime)
# The numbers should vary by a very small amount (time it took to execute)
diff_python = abs(date2 - date0)
diff = abs(date1 - date2)
# There should never be a days difference
assert diff.days == 0
# Since datetime.datetime.today() calls time.time(), and on some platforms
# that has 1 second accuracy, we compare this way
assert diff.seconds <= diff_python.seconds
def test_chrono_system_clock_roundtrip():
date1 = datetime.datetime.today()
# Roundtrip the time
date2 = m.test_chrono2(date1)
# The returned value should be a datetime
assert isinstance(date2, datetime.datetime)
# They should be identical (no information lost on roundtrip)
diff = abs(date1 - date2)
assert diff == datetime.timedelta(0)
def test_chrono_system_clock_roundtrip_date():
date1 = datetime.date.today()
# Roundtrip the time
datetime2 = m.test_chrono2(date1)
date2 = datetime2.date()
time2 = datetime2.time()
# The returned value should be a datetime
assert isinstance(datetime2, datetime.datetime)
assert isinstance(date2, datetime.date)
assert isinstance(time2, datetime.time)
# They should be identical (no information lost on roundtrip)
diff = abs(date1 - date2)
assert diff.days == 0
assert diff.seconds == 0
assert diff.microseconds == 0
# Year, Month & Day should be the same after the round trip
assert date1 == date2
# There should be no time information
assert time2.hour == 0
assert time2.minute == 0
assert time2.second == 0
assert time2.microsecond == 0
SKIP_TZ_ENV_ON_WIN = pytest.mark.skipif(
"sys.platform == 'win32'",
reason="TZ environment variable only supported on POSIX"
)
@pytest.mark.parametrize(
"time1",
[
datetime.datetime.today().time(),
datetime.time(0, 0, 0),
datetime.time(0, 0, 0, 1),
datetime.time(0, 28, 45, 109827),
datetime.time(0, 59, 59, 999999),
datetime.time(1, 0, 0),
datetime.time(5, 59, 59, 0),
datetime.time(5, 59, 59, 1),
],
)
@pytest.mark.parametrize(
"tz",
[
None,
pytest.param("Europe/Brussels", marks=SKIP_TZ_ENV_ON_WIN),
pytest.param("Asia/Pyongyang", marks=SKIP_TZ_ENV_ON_WIN),
pytest.param("America/New_York", marks=SKIP_TZ_ENV_ON_WIN),
],
)
def test_chrono_system_clock_roundtrip_time(time1, tz, monkeypatch):
if tz is not None:
monkeypatch.setenv("TZ", f"/usr/share/zoneinfo/{tz}")
# Roundtrip the time
datetime2 = m.test_chrono2(time1)
date2 = datetime2.date()
time2 = datetime2.time()
# The returned value should be a datetime
assert isinstance(datetime2, datetime.datetime)
assert isinstance(date2, datetime.date)
assert isinstance(time2, datetime.time)
# Hour, Minute, Second & Microsecond should be the same after the round trip
assert time1 == time2
# There should be no date information (i.e. date = python base date)
assert date2.year == 1970
assert date2.month == 1
assert date2.day == 1
def test_chrono_duration_roundtrip():
# Get the difference between two times (a timedelta)
date1 = datetime.datetime.today()
date2 = datetime.datetime.today()
diff = date2 - date1
# Make sure this is a timedelta
assert isinstance(diff, datetime.timedelta)
cpp_diff = m.test_chrono3(diff)
assert cpp_diff == diff
# Negative timedelta roundtrip
diff = datetime.timedelta(microseconds=-1)
cpp_diff = m.test_chrono3(diff)
assert cpp_diff == diff
def test_chrono_duration_subtraction_equivalence():
date1 = datetime.datetime.today()
date2 = datetime.datetime.today()
diff = date2 - date1
cpp_diff = m.test_chrono4(date2, date1)
assert cpp_diff == diff
def test_chrono_duration_subtraction_equivalence_date():
date1 = datetime.date.today()
date2 = datetime.date.today()
diff = date2 - date1
cpp_diff = m.test_chrono4(date2, date1)
assert cpp_diff == diff
def test_chrono_steady_clock():
time1 = m.test_chrono5()
assert isinstance(time1, datetime.timedelta)
def test_chrono_steady_clock_roundtrip():
time1 = datetime.timedelta(days=10, seconds=10, microseconds=100)
time2 = m.test_chrono6(time1)
assert isinstance(time2, datetime.timedelta)
# They should be identical (no information lost on roundtrip)
assert time1 == time2
# Floating point conversion also works
assert m.test_chrono6(time1.total_seconds()) == time1
def test_floating_point_duration():
# Test using a floating point number in seconds
time = m.test_chrono7(35.525123)
assert isinstance(time, datetime.timedelta)
assert time.seconds == 35
assert 525122 <= time.microseconds <= 525123
diff = m.test_chrono_float_diff(43.789012, 1.123456)
assert diff.seconds == 42
assert 665556 <= diff.microseconds <= 665557
def test_nano_timepoint():
time = datetime.datetime.now()
time1 = m.test_nano_timepoint(time, datetime.timedelta(seconds=60))
assert time1 == time + datetime.timedelta(seconds=60)
def test_chrono_different_resolutions():
resolutions = m.different_resolutions()
time = datetime.datetime.now()
resolutions.timestamp_h = time
resolutions.timestamp_m = time
resolutions.timestamp_s = time
resolutions.timestamp_ms = time
resolutions.timestamp_us = time
assert time == resolutions.timestamp_us
time = time.replace(microsecond=(time.microsecond // 1000) * 1000)
assert time == resolutions.timestamp_ms
time = time.replace(microsecond=0)
assert time == resolutions.timestamp_s
time = time.replace(second=0)
assert time == resolutions.timestamp_m
time = time.replace(minute=0)
assert time == resolutions.timestamp_h
# Tests below this point are new in nanobind
def test_chrono_misc():
from datetime import datetime, timedelta
advance_datetime = m.test_nano_timepoint
difference_between_datetimes = m.test_nano_timepoint_diff
roundtrip_datetime = m.test_nano_timepoint_roundtrip
d1 = datetime(2023, 4, 5, 12, 0, 0, 0)
d2 = datetime(2023, 4, 5, 12, 30, 0, 123)
# datetime -> time_point and duration -> timedelta conversion
assert difference_between_datetimes(d1, d2) == d1 - d2
assert difference_between_datetimes(d2, d1) == d2 - d1
# date -> time_point conversion
assert difference_between_datetimes(d2, d1.date()) == timedelta(
hours=12, minutes=30, microseconds=123
)
# time -> time_point conversion
assert difference_between_datetimes(d2.time(), d1.time()) == timedelta(
minutes=30, microseconds=123
)
assert roundtrip_datetime(d1.time()) == datetime(1970, 1, 1, 12, 0, 0)
for td in (
timedelta(seconds=5),
timedelta(microseconds=123),
timedelta(days=1, seconds=10),
timedelta(seconds=-5),
timedelta(microseconds=-123),
timedelta(days=-1, seconds=-10),
):
# timedelta -> duration conversion
assert advance_datetime(d1, td) == d1 + td
# float -> duration conversion
assert advance_datetime(d1, td.total_seconds()) == d1 + td
# time_point -> datetime conversion
assert roundtrip_datetime(d1) == d1
assert roundtrip_datetime(d2) == d2
@pytest.mark.parametrize(
"test_type,roundtrip_name",
[
(datetime.timedelta, 'test_chrono7'),
(datetime.datetime, 'test_nano_timepoint_roundtrip'),
]
)
def test_chrono_invalid(test_type, roundtrip_name):
roundtrip = getattr(m, roundtrip_name)
# Can't pass None or an integer where a duration or timepoint is expected
with pytest.raises(TypeError, match="incompatible function arguments"):
roundtrip(None)
with pytest.raises(TypeError, match="incompatible function arguments"):
roundtrip(42)
# Can't pass a duration where a timepoint is expected, or vice versa
with pytest.raises(TypeError, match="incompatible function arguments"):
if test_type is datetime.datetime:
roundtrip(datetime.timedelta(seconds=5))
else:
roundtrip(datetime.datetime.now())
# On the limited API we access timedelta/datetime objects via
# regular attribute access; test that invalid results are handled
# reasonably. On the full API we use Python's <datetime.h> header
# so we'll always access the true C-level datetime slot and can't
# be fooled by tricks like this. PyPy uses normal attribute access
# and works like the limited API in this respect.
class fake_type(test_type):
@property
def seconds(self):
return self.override_value
@property
def second(self):
return self.override_value
if test_type is datetime.datetime:
fake_val = fake_type.fromtimestamp(time.time())
replace_overridden = lambda s: fake_val.replace(second=s)
else:
fake_val = fake_type(days=1, seconds=10, microseconds=123456)
replace_overridden = lambda s: fake_type(
days=1, seconds=s, microseconds=123456
)
for fake_result, errtype in (
("hi", "TypeError"),
(0, None),
(2**64, "Python int too large to convert to C long"),
(2**32, "OverflowError"),
):
fake_val.override_value = fake_result
if not m.access_via_python:
assert roundtrip(fake_val) == fake_val
elif errtype is None:
assert roundtrip(fake_val) == replace_overridden(fake_result)
elif test_type is datetime.timedelta and sys.implementation.name == "pypy":
# pypy's cpyext module converts timedelta to a C structure
# before the nanobind function even gets called, producing
# a different exception than the one we're testing below.
# datetime still works as it doesn't have its attributes
# converted but instead is implemented with Python
# attribute accesses.
pass
else:
from test.support import catch_unraisable_exception
with catch_unraisable_exception() as cm:
with pytest.raises(TypeError, match="incompatible function arguments"):
roundtrip(fake_val)
assert cm.unraisable is not None
assert errtype in repr(cm.unraisable.exc_value)
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
tests/test_classes.cpp | C++ | #include <nanobind/nanobind.h>
#include <nanobind/trampoline.h>
#include <nanobind/operators.h>
#include <nanobind/stl/optional.h>
#include <nanobind/stl/string.h>
#include <nanobind/stl/pair.h>
#include <nanobind/stl/shared_ptr.h>
#include <nanobind/stl/tuple.h>
#include <nanobind/stl/unique_ptr.h>
#include <map>
#include <memory>
#include <cstring>
#include <vector>
#include <nanobind/stl/detail/traits.h>
#include "inter_module.h"
#include "test_classes.h"
namespace nb = nanobind;
using namespace nb::literals;
static int default_constructed = 0, value_constructed = 0, copy_constructed = 0,
move_constructed = 0, copy_assigned = 0, move_assigned = 0,
destructed = 0, pickled = 0, unpickled = 0;
struct Struct;
std::unique_ptr<Struct> struct_tmp;
static std::vector<int> struct_destructed;
struct Struct {
int i = 5;
Struct() { default_constructed++; }
Struct(int i) : i(i) { value_constructed++; }
Struct(const Struct &s) : i(s.i) { copy_constructed++; }
Struct(Struct &&s) noexcept : i(s.i) { s.i = 0; move_constructed++; }
Struct &operator=(const Struct &s) { i = s.i; copy_assigned++; return *this; }
Struct &operator=(Struct &&s) noexcept { i = s.i; s.i = 0; move_assigned++; return *this; }
~Struct() { destructed++; if (nb::is_alive()) struct_destructed.push_back(i); }
int value() const { return i; }
int value_plus(int j, int k, int l, int m, int n, int o, int p) const {
return i + j + k + l + m + n + o + p;
}
int getstate() const { ++pickled; return i; }
void set_value(int value) { i = value; }
void setstate(int value) { unpickled++; i = value; }
static int static_test(int) { return 1; }
static int static_test(float) { return 2; }
static Struct* create_take() { return new Struct(10); }
static Struct create_move() { return Struct(11); }
static Struct* create_copy() { return struct_tmp.get(); }
static Struct* create_reference() { return struct_tmp.get(); }
Struct &self() { return *this; }
};
struct PairStruct {
Struct s1;
Struct s2;
};
// Test case for issue #1280
struct OptionalNoneTest {
int compute(int i, std::optional<int> j, int k) const {
return i + j.value_or(0) + k;
}
};
struct Big {
char data[1024];
Big() { memset(data, 0xff, 1024); }
};
struct alignas(128) BigAligned {
char data[128];
BigAligned() {
if (((uintptr_t) data) % 128)
throw std::runtime_error("data is not aligned!");
memset(data, 0xff, 128);
}
};
struct Animal {
virtual ~Animal() = default;
virtual std::string name() const { return "Animal"; }
virtual std::string what() const = 0;
virtual void void_ret() { }
};
struct StaticProperties {
static int value;
};
struct StaticProperties2 : StaticProperties { };
int StaticProperties::value = 23;
static Py_ssize_t sq_length_dummy(PyObject *) {
return 123;
}
struct Wrapper {
std::shared_ptr<Wrapper> value;
};
struct StructWithWeakrefs : Struct { };
struct StructWithWeakrefsAndDynamicAttrs : Struct { };
struct UniqueInt {
static std::map<int, std::weak_ptr<UniqueInt>> instances;
static std::shared_ptr<UniqueInt> make(int val) {
std::weak_ptr<UniqueInt>& entry = instances[val];
std::shared_ptr<UniqueInt> ret = entry.lock();
if (!ret) {
entry = ret = std::shared_ptr<UniqueInt>(new UniqueInt(val));
}
++ret->nlook;
return ret;
}
int value() const { return val; }
int lookups() const { return nlook; }
private:
UniqueInt(int v) : val(v) {}
int val;
int nlook = 0;
};
std::map<int, std::weak_ptr<UniqueInt>> UniqueInt::instances;
int wrapper_tp_traverse(PyObject *self, visitproc visit, void *arg) {
// We must traverse the implicit dependency of an object on its associated type object.
Py_VISIT(Py_TYPE(self));
// The tp_traverse method may be called after __new__ but before or during
// __init__, before the C++ constructor has been called. We must not inspect
// the C++ state before the constructor completes.
if (!nb::inst_ready(self)) {
return 0;
}
Wrapper *w = nb::inst_ptr<Wrapper>(self);
// If c->value corresponds to an associated CPython object, return it
nb::handle value = nb::find(w->value);
// Inform the Python GC about it
Py_VISIT(value.ptr());
return 0;
}
int wrapper_tp_clear(PyObject *self) {
Wrapper *w = nb::inst_ptr<Wrapper>(self);
w->value.reset();
return 0;
}
PyType_Slot wrapper_slots[] = {
{ Py_tp_traverse, (void *) wrapper_tp_traverse },
{ Py_tp_clear, (void *) wrapper_tp_clear },
{ 0, 0 }
};
NB_MODULE(test_classes_ext, m) {
struct_tmp = std::make_unique<Struct>(12);
auto cls = nb::class_<Struct>(m, "Struct", "Some documentation")
.def(nb::init<>())
.def(nb::init<int>())
.def("value", &Struct::value)
.def("value_plus", &Struct::value_plus)
.def("set_value", &Struct::set_value, "value"_a)
.def("self", &Struct::self, nb::rv_policy::none)
.def("none", [](Struct &) -> const Struct * { return nullptr; })
.def("__getstate__", &Struct::getstate)
.def("__setstate__", &Struct::setstate)
.def_static("static_test", nb::overload_cast<int>(&Struct::static_test))
.def_static("static_test", nb::overload_cast<float>(&Struct::static_test))
.def_static("create_move", &Struct::create_move)
.def_static("create_reference", &Struct::create_reference,
nb::rv_policy::reference)
.def_static("create_copy", &Struct::create_copy,
nb::rv_policy::copy)
.def_static("create_take", &Struct::create_take);
if (!nb::type<Struct>().is(cls))
nb::detail::raise("type lookup failed!");
nb::class_<PairStruct>(m, "PairStruct")
.def(nb::init<>())
.def_rw("s1", &PairStruct::s1, "A documented property")
.def_rw("s2", &PairStruct::s2);
// Test case for issue #1280
nb::class_<OptionalNoneTest>(m, "OptionalNoneTest")
.def(nb::init<>())
.def("compute", &OptionalNoneTest::compute,
"i"_a, "j"_a = nb::none(), "k"_a = 0);
m.def("stats", []{
nb::dict d;
d["default_constructed"] = default_constructed;
d["value_constructed"] = value_constructed;
d["copy_constructed"] = copy_constructed;
d["move_constructed"] = move_constructed;
d["copy_assigned"] = copy_assigned;
d["move_assigned"] = move_assigned;
d["destructed"] = destructed;
d["pickled"] = pickled;
d["unpickled"] = unpickled;
return d;
});
m.def("reset", []() {
default_constructed = 0;
value_constructed = 0;
copy_constructed = 0;
move_constructed = 0;
copy_assigned = 0;
move_assigned = 0;
destructed = 0;
pickled = 0;
unpickled = 0;
});
// test06_big
nb::class_<Big>(m, "Big", "A class\nwith a multi-line\ndocstring..")
.def(nb::init<>());
nb::class_<BigAligned>(m, "BigAligned")
.def(nb::init<>());
// test09_trampoline
// test10_trampoline_failures
struct PyAnimal : Animal {
NB_TRAMPOLINE(Animal, 3);
PyAnimal() {
default_constructed++;
}
~PyAnimal() override {
destructed++;
}
std::string name() const override {
NB_OVERRIDE(name);
}
std::string what() const override {
NB_OVERRIDE_PURE(what);
}
void void_ret() override {
NB_OVERRIDE(void_ret);
}
};
struct Dog : Animal {
Dog(const std::string &s) : s(s) { }
std::string name() const override { return "Dog"; }
std::string what() const override { return s; }
std::string s;
};
struct PyDog : Dog {
NB_TRAMPOLINE(Dog, 2);
PyDog(const std::string &s) : Dog(s) { }
std::string name() const override {
NB_OVERRIDE(name);
}
std::string what() const override {
NB_OVERRIDE(what);
}
};
struct Cat : Animal {
Cat(const std::string &s) : s(s) { }
std::string name() const override { return "Cat"; }
std::string what() const override { return s; }
std::string s;
};
struct SiameseCat : Cat { };
struct Foo { };
auto animal = nb::class_<Animal, PyAnimal>(m, "Animal")
.def(nb::init<>(), "A constructor")
.def("name", &Animal::name, "A method")
.def("what", &Animal::what);
nb::class_<Dog, Animal, PyDog>(m, "Dog")
.def(nb::init<const std::string &>());
nb::class_<Cat>(m, "Cat", animal)
.def(nb::init<const std::string &>());
nb::class_<SiameseCat, Cat> sc(m, "SiameseCat");
(void) sc;
m.def("go", [](Animal *a) {
return a->name() + " says " + a->what();
});
m.def("animal_passthrough", [](Animal *a) { return a; }, nb::rv_policy::none);
m.def("dog_passthrough", [](Dog *d) { return d; }, nb::rv_policy::none);
m.def("void_ret", [](Animal *a) { a->void_ret(); });
m.def("call_function", [](nb::handle h) {
return h(1, 2, "hello", true, 4);
});
m.def("call_method", [](nb::handle h) {
return h.attr("f")(1, 2, "hello", true, 4);
});
// test11_large_pointers
nb::class_<Foo>(m, "Foo");
m.def("i2p", [](uintptr_t x) { return (Foo *) x; }, nb::rv_policy::reference);
m.def("p2i", [](Foo *x) { return (uintptr_t) x; }, "x"_a = nullptr);
// test12_implicitly_convertible
struct A { int a; };
struct B { int b; };
struct B2 : B { B2(int i) : B{i} { } };
struct C { int c; };
struct D {
D(const A &a) : value(a.a + 10) { }
D(const B *b) : value(b->b + 100) { }
D(C c) : value(c.c + 1000) { }
D(int d) : value(d + 10000) { }
D(float) : value(0) { throw std::runtime_error("Fail!"); }
D(std::nullptr_t) : value(0) {}
// notice dangling access:
~D() { static_cast<volatile int&>(value) = -100; }
int value;
};
nb::class_<A>(m, "A")
.def(nb::init<int>());
nb::class_<B>(m, "B")
.def(nb::init<int>());
nb::class_<B2, B>(m, "B2")
.def(nb::init<int>());
nb::class_<C>(m, "C")
.def(nb::init<int>());
nb::class_<D>(m, "D")
.def(nb::init_implicit<const A &>())
.def(nb::init_implicit<const B *>())
.def(nb::init_implicit<int>())
.def(nb::init_implicit<float>())
.def_rw("value", &D::value);
m.def("get_d", [](const D &d) { return d.value; });
m.def("get_optional_d", [](std::optional<const D*> arg) {
return arg ? arg.value()->value : -1;
}, nb::arg().none());
m.def("get_d_via_cast", [](nb::object obj) {
int by_val = -1, by_ptr = -1, by_opt_val = -1, by_opt_ptr = -1;
try {
by_val = nb::cast<D>(obj).value;
} catch (const nb::cast_error&) {}
try {
by_ptr = nb::cast<D*>(obj)->value;
} catch (const nb::cast_error&) {}
try {
by_opt_val = nb::cast<std::optional<D>>(obj)->value;
} catch (const nb::cast_error&) {}
try {
by_opt_ptr = nb::cast<std::optional<D*>>(obj).value()->value;
} catch (const nb::cast_error&) {}
return nb::make_tuple(by_val, by_ptr, by_opt_val, by_opt_ptr);
});
m.def("get_d_via_try_cast", [](nb::object obj) {
int by_val = -1, by_ptr = -1, by_opt_val = -1, by_opt_ptr = -1;
if (D val(nullptr); nb::try_cast(obj, val))
by_val = val.value;
if (D* ptr; nb::try_cast(obj, ptr))
by_ptr = ptr->value;
if (std::optional<D> opt_val; nb::try_cast(obj, opt_val))
by_opt_val = opt_val->value;
if (std::optional<D*> opt_ptr; nb::try_cast(obj, opt_ptr))
by_opt_ptr = opt_ptr.value()->value;
return nb::make_tuple(by_val, by_ptr, by_opt_val, by_opt_ptr);
});
struct Int {
int i;
Int operator+(Int o) const { return {i + o.i}; }
Int operator-(float j) const { return {int(i - j)}; }
bool operator==(Int o) const { return i == o.i; }
Int &operator+=(Int o) {
i += o.i;
return *this;
}
};
// test13_operators
nb::class_<Int>(m, "Int")
.def(nb::init<int>())
.def(nb::self + nb::self)
.def(nb::self += nb::self)
.def(nb::self - float())
.def("__repr__", [](const Int &i) { return std::to_string(i.i); });
// test15: Test nb::keep_alive feature
m.def(
"keep_alive_arg", [](nb::handle, nb::handle ret) { return ret; },
nb::keep_alive<0, 1>());
m.def(
"keep_alive_ret", [](nb::handle, nb::handle ret) { return ret; },
nb::keep_alive<1, 0>());
// test17_name_qualname_module()
m.def("f", []{});
struct MyClass { struct NestedClass { }; };
nb::class_<MyClass> mcls(m, "MyClass");
nb::class_<MyClass::NestedClass> ncls(mcls, "NestedClass");
mcls.def(nb::init<>());
mcls.def("f", [](MyClass&){});
ncls.def("f", [](MyClass::NestedClass&){});
// test18_static_properties
nb::class_<StaticProperties>(m, "StaticProperties")
.def_rw_static("value", &StaticProperties::value, "Static property docstring")
.def_static("get", []{ return StaticProperties::value; } );
nb::class_<StaticProperties2, StaticProperties>(m, "StaticProperties2");
// test19_supplement
struct ClassWithSupplement { };
struct Supplement {
uint8_t data[0xFF];
};
auto scls = nb::class_<ClassWithSupplement>(m, "ClassWithSupplement", nb::supplement<Supplement>())
.def(nb::init<>());
Supplement &supplement = nb::type_supplement<Supplement>(scls);
for (uint8_t i = 0; i < 0xFF; ++i)
supplement.data[i] = i;
m.def("check_supplement", [](nb::handle h) {
if (nb::isinstance<ClassWithSupplement>(h)) {
Supplement &s2 = nb::type_supplement<Supplement>(h.type());
for (uint16_t i = 0; i < 0xFF; ++i) {
if (s2.data[i] != i)
return false;
}
return true;
}
return false;
});
// test20_type_callback
PyType_Slot slots[] {
{ Py_sq_length, (void *) sq_length_dummy },
{ 0, nullptr }
};
struct ClassWithLen { };
nb::class_<ClassWithLen>(m, "ClassWithLen", nb::type_slots(slots))
.def(nb::init<>());
// test21_low_level
m.def("test_lowlevel", []() {
nb::handle py_type = nb::type<Struct>();
if (!(nb::type_check(py_type) &&
nb::type_size(py_type) == sizeof(Struct) &&
nb::type_align(py_type) == alignof(Struct) &&
nb::type_info(py_type) == typeid(Struct)))
throw std::runtime_error("Internal error!");
nb::object py_inst = nb::inst_alloc(py_type);
if (!(nb::inst_check(py_inst) && py_inst.type().is(py_type) &&
!nb::inst_ready(py_inst)))
throw std::runtime_error("Internal error! (2)");
// Get a C++ pointer to the uninitialized instance data
Struct *ptr = nb::inst_ptr<Struct>(py_inst);
// Perform an in-place construction of the C++ object
new (ptr) Struct(123);
nb::inst_mark_ready(py_inst);
if (!nb::inst_ready(py_inst))
throw std::runtime_error("Internal error! (3)");
nb::object py_inst_2 = nb::inst_alloc(py_type);
if (nb::inst_ready(py_inst_2))
throw std::runtime_error("Internal error! (4)");
nb::inst_copy(py_inst_2, py_inst);
if (!nb::inst_ready(py_inst_2))
throw std::runtime_error("Internal error! (5)");
nb::inst_destruct(py_inst);
if (nb::inst_ready(py_inst))
throw std::runtime_error("Internal error! (6)");
nb::inst_move(py_inst, py_inst_2);
if (!nb::inst_ready(py_inst))
throw std::runtime_error("Internal error! (7)");
nb::handle py_type_pair = nb::type<PairStruct>();
PairStruct *ps = new PairStruct{Struct(123), Struct(456)};
nb::object py_inst_3 = nb::inst_take_ownership(py_type_pair, ps);
if (!(nb::inst_check(py_inst_3) && py_inst_3.type().is(py_type_pair) &&
nb::inst_state(py_inst_3) == std::make_pair(true, true)))
throw std::runtime_error("Internal error! (8)");
nb::object py_inst_4 = nb::inst_reference(py_type, &ps->s1, py_inst_3);
if (!(nb::inst_check(py_inst_4) && py_inst_4.type().is(py_type) &&
nb::inst_state(py_inst_4) == std::make_pair(true, false)))
throw std::runtime_error("Internal error! (9)");
return nb::make_tuple(py_inst, py_inst_2, py_inst_3, py_inst_4);
});
// test22_handle_t
m.def("test_handle_t", [](nb::handle_t<Struct> h) { return borrow(h); });
// test23_type_object_t
m.def("test_type_object_t", [](nb::type_object_t<Struct> h) -> nb::object { return h; });
// test24_none_arg
m.def("none_0", [](Struct *s) { return s == nullptr; });
m.def("none_1", [](Struct *s) { return s == nullptr; }, nb::arg());
m.def("none_2", [](Struct *s) { return s == nullptr; }, nb::arg("arg"));
m.def("none_3", [](Struct *s) { return s == nullptr; }, nb::arg().none());
m.def("none_4", [](Struct *s) { return s == nullptr; }, nb::arg("arg").none());
// test25_is_final
struct FinalType { };
nb::class_<FinalType>(m, "FinalType", nb::is_final())
.def(nb::init<>());
// test26_dynamic_attr
struct StructWithAttr : Struct { };
nb::class_<StructWithAttr, Struct>(m, "StructWithAttr", nb::dynamic_attr())
.def(nb::init<int>());
nb::class_<Wrapper>(m, "Wrapper", nb::type_slots(wrapper_slots))
.def(nb::init<>())
.def_rw("value", &Wrapper::value);
// The following isn't tested on the Python side, we just want to make sure it compiles
struct NonCopyable {
NonCopyable() = default;
NonCopyable(const NonCopyable&) = delete;
};
using NonCopyableVec = std::vector<NonCopyable>;
nb::class_<NonCopyableVec>(m, "NonCopyableVec");
struct PrivateNonCopyable {
static PrivateNonCopyable &get_instance() {
static PrivateNonCopyable i;
return i;
}
int get_int() { return 42; }
private:
PrivateNonCopyable() {}
PrivateNonCopyable(const PrivateNonCopyable&) = delete;
PrivateNonCopyable &operator=(const PrivateNonCopyable&) = delete;
};
// #1249 this didn't compile previously
struct my_call_guard {};
nb::class_<PrivateNonCopyable>(m, "PrivateNonCopyable")
.def_static("get_instance", &PrivateNonCopyable::get_instance, nb::call_guard<my_call_guard>(), nb::rv_policy::reference)
.def("get_int", &PrivateNonCopyable::get_int);
m.def("is_int_1", [](nb::handle h) { return nb::isinstance<int>(h); });
m.def("is_int_2", [](nb::handle h) { return nb::isinstance<nb::int_>(h); });
m.def("is_struct", [](nb::handle h) { return nb::isinstance<Struct>(h); });
struct Base { ~Base() = default; };
struct PolymorphicBase { virtual ~PolymorphicBase() = default; };
struct Subclass : Base { };
struct PolymorphicSubclass : PolymorphicBase { };
struct AnotherSubclass : Base { };
struct AnotherPolymorphicSubclass : PolymorphicBase { };
nb::class_<Base> (m, "Base");
nb::class_<Subclass> (m, "Subclass");
nb::class_<PolymorphicBase> (m, "PolymorphicBase");
nb::class_<PolymorphicSubclass> (m, "PolymorphicSubclass");
m.def("polymorphic_factory", []() { return (PolymorphicBase *) new PolymorphicSubclass(); });
m.def("polymorphic_factory_2", []() { return (PolymorphicBase *) new AnotherPolymorphicSubclass(); });
m.def("factory", []() { return (Base *) new Subclass(); });
m.def("factory_2", []() { return (Base *) new AnotherSubclass(); });
m.def("check_shared", [](Shared *) { });
m.def("try_cast_1", [](nb::handle h) {
Struct s;
bool rv = nb::try_cast<Struct>(h, s);
return std::make_pair(rv, std::move(s));
}, nb::arg().none());
m.def("try_cast_2", [](nb::handle h) {
Struct s;
Struct &s2 = s;
bool rv = nb::try_cast<Struct &>(h, s2);
return std::make_pair(rv, std::move(s2));
}, nb::arg().none());
m.def("try_cast_3", [](nb::handle h) {
Struct *sp = nullptr;
bool rv = nb::try_cast<Struct *>(h, sp);
return std::make_pair(rv, sp);
}, nb::arg().none(), nb::rv_policy::none);
m.def("try_cast_4", [](nb::handle h) {
int i = 0;
bool rv = nb::try_cast<int>(h, i);
return std::make_pair(rv, i);
});
#if !defined(Py_LIMITED_API)
m.def("test_slots", []() {
nb::object wrapper_tp = nb::module_::import_("test_classes_ext").attr("Wrapper");
return nb::make_tuple(
nb::type_get_slot(wrapper_tp, Py_tp_traverse) == (void *) wrapper_tp_traverse,
nb::type_get_slot(&PyLong_Type, Py_tp_init) == (void *) PyLong_Type.tp_init,
nb::type_get_slot(&PyLong_Type, Py_nb_add) == (void *) PyLong_Type.tp_as_number->nb_add
);
});
#endif
// Used by test41_implicit_conversion_keep_alive
struct IncrementingStruct : Struct {
IncrementingStruct(const Struct &s) : Struct(s) {
i++;
}
};
nb::class_<IncrementingStruct, Struct>(m, "IncrementingStruct")
.def(nb::init_implicit<const Struct &>());
m.def("get_destructed", []() {
nb::list out;
for (int i : struct_destructed)
out.append(i);
struct_destructed.clear();
return out;
});
m.def(
"get_incrementing_struct_value",
[](IncrementingStruct &s) { return new Struct(s.i + 100); },
nb::keep_alive<0, 1>());
nb::class_<StructWithWeakrefs, Struct>(m, "StructWithWeakrefs", nb::is_weak_referenceable())
.def(nb::init<int>());
nb::class_<StructWithWeakrefsAndDynamicAttrs, Struct>(m, "StructWithWeakrefsAndDynamicAttrs",
nb::is_weak_referenceable(), nb::dynamic_attr())
.def(nb::init<int>());
// test50_weakref_with_slots_subclass
struct StructWithWeakrefsOnly : Struct { };
nb::class_<StructWithWeakrefsOnly, Struct>(m, "StructWithWeakrefsOnly", nb::is_weak_referenceable())
.def(nb::init<int>());
union Union {
int i;
float f;
};
nb::class_<Union>(m, "Union")
.def(nb::init<>())
.def_rw("i", &Union::i)
.def_rw("f", &Union::f);
struct HiddenBase {
int value = 10;
int vget() const { return value; }
void vset(int v) { value = v; }
int get_answer() const { return value * 10; }
};
struct BoundDerived : HiddenBase {
virtual int polymorphic() { return value; }
};
nb::class_<BoundDerived>(m, "BoundDerived")
.def(nb::init<>())
.def_rw("value", &BoundDerived::value)
.def_prop_rw("prop", &BoundDerived::vget, &BoundDerived::vset)
.def("get_answer", &BoundDerived::get_answer)
.def("polymorphic", &BoundDerived::polymorphic);
nb::class_<UniqueInt>(m, "UniqueInt")
.def(nb::new_(&UniqueInt::make))
.def(nb::new_([](std::string s) {
return UniqueInt::make(std::atoi(s.c_str()));
}), "s"_a)
.def("value", &UniqueInt::value)
.def("lookups", &UniqueInt::lookups);
// issue #786
struct NewNone {};
struct NewDflt { int value; };
struct NewStarPosOnly { size_t value; };
struct NewStar { size_t value; };
nb::class_<NewNone>(m, "NewNone")
.def(nb::new_([]() { return NewNone(); }));
nb::class_<NewDflt>(m, "NewDflt")
.def(nb::new_([](int value) { return NewDflt{value}; }),
"value"_a = 42)
.def_ro("value", &NewDflt::value);
nb::class_<NewStarPosOnly>(m, "NewStarPosOnly")
.def(nb::new_([](nb::args a, int value) {
return NewStarPosOnly{nb::len(a) + value};
}),
"args"_a, "value"_a = 42)
.def_ro("value", &NewStarPosOnly::value);
nb::class_<NewStar>(m, "NewStar")
.def(nb::new_([](nb::args a, int value, nb::kwargs k) {
return NewStar{nb::len(a) + value + 10 * nb::len(k)};
}),
"args"_a, "value"_a = 42, "kwargs"_a)
.def_ro("value", &NewStar::value);
// issue #750
PyCFunctionWithKeywords dummy_init = [](PyObject *, PyObject *,
PyObject *) -> PyObject * {
PyErr_SetString(PyExc_RuntimeError, "This should never be called!");
return nullptr;
};
PyType_Slot init_slots[] {
// the presence of this slot enables normal object construction via __init__ and __new__
// instead of an optimized codepath within nanobind that skips these. That in turn
// makes it possible to intercept calls and implement custom logic.
{ Py_tp_init, (void *) dummy_init },
{ 0, nullptr }
};
struct MonkeyPatchable {
int value = 123;
};
nb::class_<MonkeyPatchable>(m, "MonkeyPatchable", nb::type_slots(init_slots))
.def(nb::init<>())
.def_static("custom_init", [](nb::handle_t<MonkeyPatchable> h) {
if (nb::inst_ready(h))
nb::raise_type_error("Input is already initialized!");
MonkeyPatchable *p = nb::inst_ptr<MonkeyPatchable>(h);
new (p) MonkeyPatchable{456};
nb::inst_mark_ready(h);
})
.def_rw("value", &MonkeyPatchable::value);
struct StaticPropertyOverride {};
struct StaticPropertyOverride2 : public StaticPropertyOverride {};
nb::class_<StaticPropertyOverride>(m, "StaticPropertyOverride")
.def_prop_ro_static("x", [](nb::handle /*unused*/) { return 42; });
nb::class_<StaticPropertyOverride2, StaticPropertyOverride>(m, "StaticPropertyOverride2")
.def_prop_ro_static("x", [](nb::handle /*unused*/) { return 43; });
// nanobind::detail::trampoline's constructor must be constexpr otherwise
// the trampoline will not compile under MSVC
struct ConstexprClass {
constexpr ConstexprClass(int i) : something(i) {}
virtual ~ConstexprClass() = default;
virtual int getInt() const {
return 1;
};
int something;
};
struct PyConstexprClass : ConstexprClass {
NB_TRAMPOLINE(ConstexprClass, 1);
int getInt() const override {
NB_OVERRIDE(getInt);
}
};
auto constexpr_class = nb::class_<ConstexprClass, PyConstexprClass>(m, "ConstexprClass")
.def(nb::init<int>())
.def("getInt", &ConstexprClass::getInt);
m.def("constexpr_call_getInt", [](ConstexprClass *c) {
return c->getInt();
});
auto never_destruct_class = nb::class_<NeverDestruct>(m, "NeverDestruct", nb::never_destruct())
.def_static("make_ref", &NeverDestruct::make, nb::rv_policy::reference)
.def("var", &NeverDestruct::var)
.def("set_var", &NeverDestruct::set_var);
}
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
tests/test_classes.h | C/C++ Header | #pragma once
#include <memory>
class NeverDestruct {
public:
static NeverDestruct& make();
NeverDestruct(const NeverDestruct&) = delete;
NeverDestruct& operator=(const NeverDestruct&) = delete;
int var() const;
void set_var(int i);
private:
NeverDestruct();
// incomplete type error if nanobind tries to instantiate the destructor
struct NDImpl;
std::unique_ptr<NDImpl> impl;
};
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
tests/test_classes.py | Python | import sys
import test_classes_ext as t
import pytest
from common import skip_on_pypy, collect
def optional(arg: str, /) -> str:
if sys.version_info < (3, 10):
return "typing.Optional[" + arg + "]"
else:
return arg + " | " + "None"
@pytest.fixture
def clean():
collect()
t.reset()
def assert_stats(**kwargs):
collect()
for k, v in t.stats().items():
fail = False
if k in kwargs:
if v != kwargs[k]:
fail = True
elif v != 0:
fail = True
if fail:
raise Exception(f"Mismatch for key {k}: {t.stats()}")
def test01_signature():
assert t.Struct.__init__.__doc__ == (
"__init__(self) -> None\n" "__init__(self, arg: int, /) -> None"
)
assert t.Struct.value.__doc__ == "value(self) -> int"
assert t.Struct.create_move.__doc__ == "create_move() -> test_classes_ext.Struct"
assert t.Struct.set_value.__doc__ == "set_value(self, value: int) -> None"
assert t.Struct().set_value.__doc__ == "set_value(self, value: int) -> None"
assert t.Struct.__doc__ == "Some documentation"
assert t.Struct.static_test.__doc__ == (
"static_test(arg: int, /) -> int\n" "static_test(arg: float, /) -> int"
)
def test02_static_overload():
assert t.Struct.static_test(1) == 1
assert t.Struct.static_test(1.0) == 2
def test03_instantiate(clean):
s1: t.Struct = t.Struct()
assert s1.value() == 5
assert s1.value_plus(1, 2, 3, 4, 5, 6, 7) == 33
s2 = t.Struct(10)
assert s2.value() == 10
del s1
del s2
assert_stats(default_constructed=1, value_constructed=1, destructed=2)
def test04_double_init():
s = t.Struct()
with pytest.warns(
RuntimeWarning,
match="nanobind: attempted to initialize an already-initialized instance of type",
):
with pytest.raises(TypeError):
s.__init__(3)
def test05_rv_policy(clean):
s = t.Struct()
assert s.self() is s
assert s.none() is None
del s
assert_stats(default_constructed=1, destructed=1)
# ------
t.reset()
assert t.Struct.create_take().value() == 10
assert_stats(value_constructed=1, destructed=1)
# ------
t.reset()
assert t.Struct.create_move().value() == 11
assert_stats(value_constructed=1, move_constructed=1, destructed=2)
# ------
t.reset()
assert t.Struct.create_reference().value() == 12
assert_stats()
# ------
t.reset()
assert t.Struct.create_copy().value() == 12
assert_stats(copy_constructed=1, destructed=1)
def test06_reference_internal(clean):
s = t.PairStruct()
s1 = s.s1
s2 = s.s2
del s
assert_stats(default_constructed=2)
assert s2.value() == 5
del s2
assert_stats(default_constructed=2)
assert s1.value() == 5
del s1
assert_stats(default_constructed=2, destructed=2)
# ----
s = t.PairStruct()
s1 = s.s1
del s1
del s
def test07_big():
x = [t.Big() for i in range(1024)]
x2 = [t.BigAligned() for i in range(1024)]
def test08_inheritance():
dog = t.Dog("woof")
cat = t.Cat("meow")
assert dog.name() == "Dog"
assert cat.name() == "Cat"
assert dog.what() == "woof"
assert cat.what() == "meow"
assert isinstance(dog, t.Animal) and isinstance(dog, t.Dog)
assert isinstance(cat, t.Animal) and isinstance(cat, t.Cat)
assert t.go(dog) == "Dog says woof"
assert t.go(cat) == "Cat says meow"
assert t.animal_passthrough(dog) is dog
assert t.animal_passthrough(cat) is cat
assert t.dog_passthrough(dog) is dog
with pytest.raises(TypeError):
t.dog_passthrough(cat)
def test09_method_vectorcall():
out = []
def f(a, b, c, d, e):
out.append((a, b, c, d, e))
class MyClass:
def f(self, a, b, c, d, e):
self.out = (a, b, c, d, e)
t.call_function(f)
i = MyClass()
t.call_method(i)
assert out == [(1, 2, "hello", True, 4)]
assert i.out == (1, 2, "hello", True, 4)
def test10_trampoline(clean):
for _ in range(10):
class Dachshund(t.Animal):
def __init__(self):
super().__init__()
def name(self):
return "Dachshund"
def what(self):
return "yap"
d = Dachshund()
for _ in range(10):
assert t.go(d) == "Dachshund says yap"
assert t.animal_passthrough(d) is d
a = 0
class GenericAnimal(t.Animal):
def what(self):
return "goo"
def void_ret(self):
nonlocal a
a += 1
def name(self):
return "Generic" + super().name()
ga = GenericAnimal()
assert t.go(ga) == "GenericAnimal says goo"
assert t.void_ret(ga) is None
assert a == 1
assert t.animal_passthrough(ga) is ga
del ga
del d
assert_stats(default_constructed=11, destructed=11)
class GenericDog(t.Dog):
pass
d = GenericDog("GenericDog")
assert t.dog_passthrough(d) is d
assert t.animal_passthrough(d) is d
def test11_trampoline_failures():
class Incomplete(t.Animal):
def __init__(self):
super().__init__()
def void_ret(self):
raise TypeError("propagating an exception")
d = Incomplete()
with pytest.raises(RuntimeError) as excinfo:
t.go(d)
assert (
"test_classes.Incomplete::what()'): tried to call a pure virtual function!"
in str(excinfo.value)
)
with pytest.raises(TypeError) as excinfo:
t.void_ret(d)
assert "propagating an exception" in str(excinfo.value)
class Incomplete2(t.Animal):
def __init__(self):
pass # Missing call to super().__init__()
def name(self):
return "a"
def what(self):
return "b"
with pytest.warns(
RuntimeWarning,
match="nanobind: attempted to access an uninitialized instance of type",
):
with pytest.raises(TypeError) as excinfo:
t.go(Incomplete2())
assert "incompatible function arguments" in str(excinfo.value)
def test12_large_pointers():
import struct
for i in range(1, 10):
c = t.i2p(i)
assert isinstance(c, t.Foo)
assert t.p2i(c) == i
large = (1 << (struct.calcsize("P") * 8)) - 1
for i in range(large - 10, large):
c = t.i2p(i)
assert isinstance(c, t.Foo)
assert t.p2i(c) == i
def test13_implicitly_convertible():
assert t.get_d.__doc__ == "get_d(arg: test_classes_ext.D, /) -> int"
a = t.A(1)
b = t.B(2)
b2 = t.B2(3)
c = t.C(4)
i = 5
with pytest.raises(TypeError) as excinfo:
t.get_d(c)
assert str(excinfo.value) == (
"get_d(): incompatible function arguments. The following argument types are supported:\n"
" 1. get_d(arg: test_classes_ext.D, /) -> int\n"
"\n"
"Invoked with types: test_classes_ext.C"
)
with pytest.raises(TypeError):
t.get_optional_d(c)
for obj, expected in ((a, 11), (b, 102), (b2, 103), (i, 10005)):
assert t.get_d(obj) == expected
assert t.get_optional_d(obj) == expected
# The -1's here are because nb::cast() won't implicit-convert to a
# pointer because it would dangle
assert t.get_d_via_cast(obj) == (expected, -1, expected, -1)
assert t.get_d_via_try_cast(obj) == (expected, -1, expected, -1)
d = t.D(5)
assert t.get_d(d) == 10005
assert t.get_optional_d(d) == 10005
assert t.get_d_via_cast(d) == (10005, 10005, 10005, 10005)
assert t.get_d_via_try_cast(d) == (10005, 10005, 10005, 10005)
assert t.get_optional_d(None) == -1
assert t.get_d_via_cast(c) == (-1, -1, -1, -1)
assert t.get_d_via_try_cast(c) == (-1, -1, -1, -1)
def test14_operators():
a = t.Int(1)
b = t.Int(2)
assert repr(a + b) == "3"
with pytest.raises(TypeError) as excinfo:
assert repr(a - b) == "3"
assert "unsupported operand type" in str(excinfo.value)
assert repr(a - 2) == "-1"
a_before = id(a)
a += b
assert id(a) == a_before
assert repr(a) == "3"
assert repr(b) == "2"
assert a.__add__("test") is NotImplemented
def test15_keep_alive_nbtype(clean):
t.reset()
s = t.Struct()
a = t.Dog("Rufus")
assert t.keep_alive_arg(s, a) is a
assert t.keep_alive_arg(s, a) is a
del s
assert_stats(default_constructed=1)
del a
assert_stats(default_constructed=1, destructed=1)
t.reset()
s = t.Struct()
a = t.Dog("Rufus")
assert t.keep_alive_ret(a, s) is s
assert t.keep_alive_ret(a, s) is s
del a
assert_stats(default_constructed=1)
del s
assert_stats(default_constructed=1, destructed=1)
def test16_keep_alive_custom(clean):
constructed = 0
destructed = 0
class Struct:
def __init__(self):
nonlocal constructed
constructed += 1
def __del__(self):
nonlocal destructed
destructed += 1
class Struct2:
def __init__(self):
pass
s = Struct()
a = Struct2()
assert t.keep_alive_arg(s, a) is a
del s
collect()
assert constructed == 1 and destructed == 0
del a
collect()
assert constructed == 1 and destructed == 1
s = Struct()
a = Struct2()
assert t.keep_alive_ret(a, s) is s
del a
collect()
assert constructed == 2 and destructed == 1
del s
collect()
assert constructed == 2 and destructed == 2
with pytest.raises(RuntimeError) as excinfo:
s = Struct()
x = 5
t.keep_alive_ret(x, s)
assert "nanobind::detail::keep_alive(): could not create a weak reference!" in str(
excinfo.value
)
def f():
pass
class MyClass:
def f(self):
pass
class NestedClass:
def f(self):
pass
def test17_name_qualname_module():
# First, check what CPython does
assert f.__module__ == "test_classes"
assert f.__name__ == "f"
assert f.__qualname__ == "f"
assert MyClass.__name__ == "MyClass"
assert MyClass.__qualname__ == "MyClass"
assert MyClass.__module__ == "test_classes"
assert MyClass.f.__name__ == "f"
assert MyClass.f.__qualname__ == "MyClass.f"
assert MyClass.f.__module__ == "test_classes"
assert MyClass().f.__name__ == "f"
assert MyClass().f.__qualname__ == "MyClass.f"
assert MyClass().f.__module__ == "test_classes"
assert MyClass.NestedClass.__name__ == "NestedClass"
assert MyClass.NestedClass.__qualname__ == "MyClass.NestedClass"
assert MyClass.NestedClass.__module__ == "test_classes"
assert MyClass.NestedClass.f.__name__ == "f"
assert MyClass.NestedClass.f.__qualname__ == "MyClass.NestedClass.f"
assert MyClass.NestedClass.f.__module__ == "test_classes"
# Now, check the extension module
assert t.f.__module__ == "test_classes_ext"
assert t.f.__name__ == "f"
assert t.f.__qualname__ == "f"
assert type(t.f).__module__ == "nanobind"
assert type(t.f).__name__ == "nb_func"
assert type(t.f).__qualname__ == "nb_func"
assert t.MyClass.__name__ == "MyClass"
assert t.MyClass.__qualname__ == "MyClass"
assert t.MyClass.__module__ == "test_classes_ext"
assert t.MyClass.f.__name__ == "f"
assert t.MyClass.f.__qualname__ == "MyClass.f"
assert t.MyClass.f.__module__ == "test_classes_ext"
assert t.MyClass().f.__name__ == "f"
assert t.MyClass().f.__qualname__ == "MyClass.f"
assert t.MyClass().f.__module__ == "test_classes_ext"
assert type(t.MyClass.f).__module__ == "nanobind"
assert type(t.MyClass.f).__name__ == "nb_method"
assert type(t.MyClass.f).__qualname__ == "nb_method"
assert t.MyClass.NestedClass.__name__ == "NestedClass"
assert t.MyClass.NestedClass.__qualname__ == "MyClass.NestedClass"
assert t.MyClass.NestedClass.__module__ == "test_classes_ext"
assert t.MyClass.NestedClass.f.__name__ == "f"
assert t.MyClass.NestedClass.f.__qualname__ == "MyClass.NestedClass.f"
assert t.MyClass.NestedClass.f.__module__ == "test_classes_ext"
def test18_static_properties():
assert t.StaticProperties.value == 23
t.StaticProperties.value += 1
assert t.StaticProperties.value == 24
assert t.StaticProperties.get() == 24
assert t.StaticProperties2.get() == 24
t.StaticProperties2.value = 50
assert t.StaticProperties2.get() == 50
assert t.StaticProperties.get() == 50
@skip_on_pypy
def test19_static_properties_doc():
import pydoc
assert "Static property docstring" in pydoc.render_doc(t.StaticProperties2)
def test20_supplement():
c = t.ClassWithSupplement()
assert t.check_supplement(c)
assert not t.check_supplement(t.Struct())
def test21_type_callback():
o = t.ClassWithLen()
assert len(o) == 123
def test22_low_level(clean):
s1, s2, s3, s4 = t.test_lowlevel()
assert s1.value() == 123 and s2.value() == 0 and s4.value() == 123
assert s3.s1.value() == 123 and s3.s2.value() == 456
assert s3.s1 is s4
del s1
del s2
del s3
collect()
assert s4.value() == 123
del s4
assert_stats(
value_constructed=3, copy_constructed=1, move_constructed=1, destructed=5
)
def test23_handle_t(clean):
assert (
t.test_handle_t.__doc__
== "test_handle_t(arg: test_classes_ext.Struct, /) -> object"
)
s = t.test_handle_t(t.Struct(5))
assert s.value() == 5
del s
with pytest.raises(TypeError) as excinfo:
t.test_handle_t("test")
assert "incompatible function argument" in str(excinfo.value)
assert_stats(value_constructed=1, destructed=1)
def test24_type_object_t(clean):
assert (
t.test_type_object_t.__doc__
== "test_type_object_t(arg: type[test_classes_ext.Struct], /) -> object"
)
assert t.test_type_object_t(t.Struct) is t.Struct
with pytest.raises(TypeError):
t.test_type_object_t(t.Struct())
with pytest.raises(TypeError):
t.test_type_object_t(int)
def test25_none_arg():
with pytest.raises(TypeError):
t.none_0(None)
with pytest.raises(TypeError):
t.none_1(None)
with pytest.raises(TypeError):
t.none_2(arg=None)
assert t.none_3(None) is True
assert t.none_4(arg=None) is True
assert t.none_0.__doc__ == "none_0(arg: test_classes_ext.Struct, /) -> bool"
assert t.none_1.__doc__ == "none_1(arg: test_classes_ext.Struct) -> bool"
assert t.none_2.__doc__ == "none_2(arg: test_classes_ext.Struct) -> bool"
opt_struct = optional("test_classes_ext.Struct")
assert t.none_3.__doc__ == f"none_3(arg: {opt_struct}) -> bool"
assert t.none_4.__doc__ == f"none_4(arg: {opt_struct}) -> bool"
def test26_is_final():
with pytest.raises(TypeError) as excinfo:
class MyType(t.FinalType):
pass
assert "The type 'test_classes_ext.FinalType' prohibits subclassing!" in str(
excinfo.value
)
def test27_dynamic_attr(clean):
l = [None] * 100
for i in range(100):
l[i] = t.StructWithAttr(i)
# Create a big reference cycle..
for i in range(100):
l[i].prev = l[i - 1]
l[i].next = l[i + 1 if i < 99 else 0]
l[i].t = t.StructWithAttr
l[i].self = l[i]
for i in range(100):
assert l[i].value() == i
assert l[i].self.value() == i
assert l[i].prev.value() == (i - 1 if i > 0 else 99)
assert l[i].next.value() == (i + 1 if i < 99 else 0)
del l
assert_stats(value_constructed=100, destructed=100)
def test28_copy_rvp():
a = t.Struct.create_reference()
b = t.Struct.create_copy()
assert a is not b
def test29_pydoc():
import pydoc
assert "Some documentation" in pydoc.render_doc(t)
def test30_property_assignment_instance():
s = t.PairStruct()
s1 = t.Struct(123)
s2 = t.Struct(456)
s.s1 = s1
s.s2 = s2
assert s2 is not s.s2 and s1 is not s.s1
assert s.s1.value() == 123
assert s.s2.value() == 456
assert s1.value() == 123
assert s2.value() == 456
# cpyext reference cycles are not supported, see https://foss.heptapod.net/pypy/pypy/-/issues/3849
@skip_on_pypy()
def test31_cycle():
a = t.Wrapper()
a.value = a
del a
collect()
def test32_type_checks():
v1 = 5
v2 = t.Struct()
assert t.is_int_1(v1) and not t.is_int_1(v2)
assert t.is_int_2(v1) and not t.is_int_2(v2)
assert not t.is_struct(v1) and t.is_struct(v2)
def test33_polymorphic_downcast():
assert isinstance(t.factory(), t.Base)
assert isinstance(t.factory_2(), t.Base)
assert isinstance(t.polymorphic_factory(), t.PolymorphicSubclass)
assert isinstance(t.polymorphic_factory_2(), t.PolymorphicBase)
def test34_trampoline_optimization():
class Rufus(t.Dog):
def __init__(self):
super().__init__("woof")
def name(self):
return "Rufus"
for i in range(2):
d1 = t.Dog("woof")
d2 = Rufus()
if i == 0:
assert t.go(d1) == "Dog says woof"
assert t.go(d2) == "Rufus says woof"
old = t.Dog.name
try:
t.Dog.name = lambda self: "Max"
assert t.go(d1) == "Dog says woof"
assert t.go(d2) == "Rufus says woof"
finally:
t.Dog.name = old
def test35_method_introspection():
obj = t.Struct(5)
m = obj.value
assert m() == m.__call__() == 5
assert hash(m) == m.__hash__()
assert repr(m) == m.__repr__()
assert "bound_method" in repr(m)
assert m.__self__ is obj
assert m.__func__ is t.Struct.value
# attributes not defined by nb_bound_method are forwarded to nb_method:
assert m.__name__ == "value"
assert m.__qualname__ == "Struct.value"
assert m.__module__ == t.__name__
assert m.__doc__ == t.Struct.value.__doc__ == "value(self) -> int"
def test38_pickle(clean):
import pickle
s = t.Struct(123)
s2 = pickle.dumps(s, protocol=pickle.HIGHEST_PROTOCOL)
s3 = pickle.loads(s2)
assert s.value() == s3.value()
del s, s3
assert_stats(value_constructed=1, pickled=1, unpickled=1, destructed=2)
def test39_try_cast(clean):
s = t.Struct(123)
assert_stats(value_constructed=1)
t.reset()
rv, s2 = t.try_cast_1(s)
assert rv is True and s2 is not s and s.value() == 123 and s2.value() == 123
del s2
assert_stats(
default_constructed=1, move_constructed=2, copy_assigned=1, destructed=3
)
t.reset()
rv, s2 = t.try_cast_1(None)
assert rv is False and s2 is not s and s2.value() == 5
del s2
assert_stats(
default_constructed=1, move_constructed=2, copy_assigned=0, destructed=3
)
t.reset()
rv, s2 = t.try_cast_2(s)
assert rv is True and s2 is not s and s.value() == 123 and s2.value() == 123
del s2
assert_stats(
default_constructed=1, move_constructed=2, copy_assigned=1, destructed=3
)
t.reset()
rv, s2 = t.try_cast_2(None)
assert rv is False and s2 is not s and s2.value() == 5
del s2
assert_stats(
default_constructed=1, move_constructed=2, copy_assigned=0, destructed=3
)
t.reset()
rv, s2 = t.try_cast_3(s)
assert rv is True and s2 is s and s.value() == 123
del s2
assert_stats()
t.reset()
rv, s2 = t.try_cast_3(None)
assert rv is True and s2 is None
del s2
assert_stats(
default_constructed=0, move_constructed=0, copy_assigned=0, destructed=0
)
t.reset()
rv, s2 = t.try_cast_2(1)
assert rv is False
del s2
assert_stats(default_constructed=1, move_constructed=2, destructed=3)
t.reset()
rv, s2 = t.try_cast_3(1)
assert rv is False and s2 is None
del s2
assert_stats()
t.reset()
rv, s2 = t.try_cast_4(s)
assert rv is False and s2 == 0
rv, s2 = t.try_cast_4(123)
assert rv is True and s2 == 123
del s, s2
assert_stats(destructed=1)
def test40_slots():
if not hasattr(t, "test_slots"):
pytest.skip()
assert t.test_slots() == (True, True, True)
def test41_implicit_conversion_keep_alive():
# Check that keep_alive references implicitly constructed arguments
# as opposed to the original function arguments
collect()
t.get_destructed()
a = t.Struct(5)
b = t.get_incrementing_struct_value(a)
d1 = t.get_destructed()
assert b.value() == 106
del a
collect()
d2 = t.get_destructed()
collect()
del b
collect()
d3 = t.get_destructed()
assert d1 == []
assert d2 == [5]
assert d3 == [106, 6]
def test42_weak_references():
import weakref
import gc
import time
o = t.StructWithWeakrefs(42)
w = weakref.ref(o)
assert w() is o
del o
gc.collect()
gc.collect()
assert w() is None
p = t.StructWithWeakrefsAndDynamicAttrs(43)
p.a_dynamic_attr = 101
w = weakref.ref(p)
assert w() is p
assert w().a_dynamic_attr == 101
del p
gc.collect()
gc.collect()
assert w() is None
def test43_union():
u = t.Union()
u.i = 42
assert u.i == 42
u.f = 2.125
assert u.f == 2.125
def test44_dynamic_attr_has_dict():
s = t.StructWithAttr(5)
assert s.__dict__ == {}
s.a_dynamic_attr = 101
assert s.__dict__ == {"a_dynamic_attr": 101}
def test45_hidden_base():
s = t.BoundDerived()
assert s.value == 10
s.value = 5
assert s.prop == 5
s.prop = 20
assert s.value == 20
assert s.get_answer() == 200
assert s.polymorphic() == 20
def test46_custom_new():
import gc
u1 = t.UniqueInt(10)
assert u1.value() == 10 and u1.lookups() == 1
u2 = t.UniqueInt(10)
assert u1 is u2
assert u1.lookups() == 2
# test alternate constructor
assert t.UniqueInt("10") is u1
assert t.UniqueInt(s="10") is u1
assert u1.lookups() == 4
u3 = t.UniqueInt(20)
assert u1 is not u3
assert u3.value() == 20 and u3.lookups() == 1
del u1
assert u2.lookups() == 4
assert u2 is t.UniqueInt(10)
assert u2.lookups() == 5
del u2
gc.collect()
gc.collect()
u4 = t.UniqueInt(10)
assert u4.value() == 10 and u4.lookups() == 1
# As if unpickling:
empty = t.UniqueInt.__new__(t.UniqueInt)
with pytest.warns(RuntimeWarning, match="access an uninitialized instance"):
with pytest.raises(TypeError):
empty.value()
# Make sure pickle support doesn't allow no-args construction by mistake
with pytest.raises(TypeError):
t.UniqueInt()
with pytest.raises(RuntimeError):
t.UniqueInt.__new__(int)
# Make sure we do allow no-args construction for types that declare
# such a __new__
t.NewNone()
assert t.NewDflt().value == 42
assert t.NewDflt(10).value == 10
assert t.NewStarPosOnly().value == 42
assert t.NewStarPosOnly("hi").value == 43
assert t.NewStarPosOnly(value=10).value == 10
assert t.NewStarPosOnly("hi", "lo", value=10).value == 12
assert t.NewStar().value == 42
assert t.NewStar("hi").value == 43
assert t.NewStar(value=10).value == 10
assert t.NewStar("hi", "lo", value=10).value == 12
assert t.NewStar(value=10, other="blah").value == 20
# Make sure a Python class that derives from a C++ class that uses
# nb::new_() can be instantiated producing the correct Python type
class FancyInt(t.UniqueInt):
@staticmethod
def the_answer():
return 42
@property
def value_as_string(self):
return str(self.value())
f1 = FancyInt(10)
f2 = FancyInt(20)
# The derived-type wrapping doesn't preserve Python identity...
assert f1 is not FancyInt(10)
# ... but does preserve C++ identity
assert f1.lookups() == u4.lookups() == 3 # u4, f1, and anonymous
assert f1.the_answer() == f2.the_answer() == 42
assert f1.value_as_string == "10"
assert f2.value_as_string == "20"
def test47_inconstructible():
with pytest.raises(TypeError, match="no constructor defined"):
t.Foo()
def test48_monekypatchable():
# issue 750: how to monkeypatch __init__
q = t.MonkeyPatchable()
assert q.value == 123
def my_init(self):
t.MonkeyPatchable.custom_init(self)
t.MonkeyPatchable.__init__ = my_init
q = t.MonkeyPatchable()
assert q.value == 456
def test49_static_property_override():
assert t.StaticPropertyOverride.x == 42
assert t.StaticPropertyOverride2.x == 43
def test50_weakref_with_slots_subclass():
"""
Test that Python subclasses work correctly with nb::is_weak_referenceable()
base classes. The nb::is_weak_referenceable() flag causes nanobind to
install tp_traverse/tp_clear callbacks. When Python subclasses add their
own instance dictionaries (e.g., via managed dicts on Python 3.12+),
subtype_traverse calls our tp_traverse. We must only traverse dicts/weaklists
created by nanobind, not those added by Python.
Regression test for issue #1201.
"""
import gc
# Create a Python subclass with __slots__
class SubClass(t.StructWithWeakrefsOnly):
__slots__ = 'hello',
# Create a sub-subclass without __slots__ (which should get a __dict__)
class SubSubClass(SubClass):
pass
# This should not crash
x = SubSubClass(42)
x.bye = 'blah'
assert x.value() == 42
assert x.bye == 'blah'
# Trigger GC to ensure inst_traverse doesn't crash
gc.collect()
gc.collect()
# Clean up
del x
gc.collect()
def test51_constexpr_trampoline():
class PyConstexprClass(t.ConstexprClass):
def getInt(self):
return 42
c = PyConstexprClass(4)
assert t.constexpr_call_getInt(c) == 42
def test52_noncopyable():
assert t.PrivateNonCopyable.get_instance().get_int() == 42
def test53_never_destruct():
r = t.NeverDestruct.make_ref()
r.set_var(5)
assert r.var() == 5
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
tests/test_classes_extra.cpp | C++ | #include "test_classes.h"
struct NeverDestruct::NDImpl{
int var = 0;
};
NeverDestruct::NeverDestruct() {
impl = std::make_unique<NeverDestruct::NDImpl>();
}
int NeverDestruct::var() const {
return impl->var;
}
void NeverDestruct::set_var(int i) {
impl->var = i;
}
NeverDestruct& NeverDestruct::make() {
static NeverDestruct nd;
return nd;
}
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
tests/test_eigen.cpp | C++ | #include <nanobind/stl/complex.h>
#include <nanobind/eigen/dense.h>
#include <nanobind/eigen/sparse.h>
#include <nanobind/trampoline.h>
#include <iostream>
namespace nb = nanobind;
using namespace nb::literals;
NB_MODULE(test_eigen_ext, m) {
m.def("addV3i",
[](const Eigen::Vector3i &a,
const Eigen::Vector3i &b) -> Eigen::Vector3i { return a + b; },
"a"_a, "b"_a.noconvert());
m.def("addR3i",
[](const Eigen::RowVector3i &a,
const Eigen::RowVector3i &b) -> Eigen::RowVector3i { return a + b; },
"a"_a, "b"_a.noconvert());
m.def("addRefCnstV3i",
[](const Eigen::Ref<const Eigen::Vector3i> &a,
const Eigen::Ref<const Eigen::Vector3i> &b) -> Eigen::Vector3i { return a + b; },
"a"_a, "b"_a.noconvert());
m.def("addRefCnstR3i",
[](const Eigen::Ref<const Eigen::RowVector3i>& a,
const Eigen::Ref<const Eigen::RowVector3i>& b) -> Eigen::RowVector3i { return a + b; },
"a"_a, "b"_a.noconvert());
m.def("addA3i",
[](const Eigen::Array3i &a,
const Eigen::Array3i &b) -> Eigen::Array3i { return a + b; },
"a"_a, "b"_a.noconvert());
m.def("addA3i_retExpr",
[](const Eigen::Array3i &a,
const Eigen::Array3i &b) { return a + b; },
"a"_a, "b"_a.noconvert());
m.def("addVXi",
[](const Eigen::VectorXi &a,
const Eigen::VectorXi &b) -> Eigen::VectorXi { return a + b; });
using Matrix4uC = Eigen::Matrix<uint32_t, 4, 4, Eigen::ColMajor>;
using Matrix4uR = Eigen::Matrix<uint32_t, 4, 4, Eigen::RowMajor>;
using MatrixXuC = Eigen::Matrix<uint32_t, Eigen::Dynamic, Eigen::Dynamic, Eigen::ColMajor>;
using MatrixXuR = Eigen::Matrix<uint32_t, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>;
m.def("addM4uCC",
[](const Matrix4uC &a,
const Matrix4uC &b) -> Matrix4uC { return a + b; });
m.def("addMXuCC",
[](const MatrixXuC &a,
const MatrixXuC &b) -> MatrixXuC { return a + b; });
m.def("addMXuCC_nc",
[](const MatrixXuC &a,
const MatrixXuC &b) -> MatrixXuC { return a + b; },
"a"_a.noconvert(), "b"_a.noconvert());
m.def("addM4uRR",
[](const Matrix4uR &a,
const Matrix4uR &b) -> Matrix4uR { return a + b; });
m.def("addMXuRR",
[](const MatrixXuR &a,
const MatrixXuR &b) -> MatrixXuR { return a + b; });
m.def("addMXuRR_nc",
[](const MatrixXuR &a,
const MatrixXuR &b) -> MatrixXuR { return a + b; },
"a"_a.noconvert(), "b"_a.noconvert());
m.def("addM4uCR",
[](const Matrix4uC &a,
const Matrix4uR &b) -> Matrix4uC { return a + b; });
m.def("addMXuCR",
[](const MatrixXuC &a,
const MatrixXuR &b) -> MatrixXuC { return a + b; });
m.def("addM4uRC",
[](const Matrix4uR &a,
const Matrix4uC &b) -> Matrix4uR { return a + b; });
m.def("addMXuRC",
[](const MatrixXuR &a,
const MatrixXuC &b) -> MatrixXuR { return a + b; });
m.def("addMapMXuCC",
[](const Eigen::Map<MatrixXuC>& a,
const Eigen::Map<MatrixXuC>& b) -> MatrixXuC { return a + b; });
m.def("addMapCnstMXuCC",
[](const Eigen::Map<const MatrixXuC>& a,
const Eigen::Map<const MatrixXuC>& b) -> MatrixXuC { return a + b; });
m.def("addMapMXuRR",
[](const Eigen::Map<MatrixXuR>& a,
const Eigen::Map<MatrixXuR>& b) -> MatrixXuC { return a + b; });
m.def("addMapCnstMXuRR",
[](const Eigen::Map<const MatrixXuR>& a,
const Eigen::Map<const MatrixXuR>& b) -> MatrixXuC { return a + b; });
m.def("addRefMXuCC",
[](const Eigen::Ref<MatrixXuC>& a,
const Eigen::Ref<MatrixXuC>& b) -> MatrixXuC { return a + b; });
m.def("addRefCnstMXuCC",
[](const Eigen::Ref<const MatrixXuC>& a,
const Eigen::Ref<const MatrixXuC>& b) -> MatrixXuC { return a + b; });
m.def("addRefCnstMXuCC_nc",
[](const Eigen::Ref<const MatrixXuC>& a,
const Eigen::Ref<const MatrixXuC>& b) -> MatrixXuC { return a + b; },
"a"_a.noconvert(), "b"_a.noconvert());
m.def("addRefMXuRR",
[](const Eigen::Ref<MatrixXuR>& a,
const Eigen::Ref<MatrixXuR>& b) -> MatrixXuC { return a + b; });
m.def("addRefCnstMXuRR",
[](const Eigen::Ref<const MatrixXuR>& a,
const Eigen::Ref<const MatrixXuR>& b) -> MatrixXuC { return a + b; });
m.def("addRefCnstMXuRR_nc",
[](const Eigen::Ref<const MatrixXuR>& a,
const Eigen::Ref<const MatrixXuR>& b) -> MatrixXuC { return a + b; },
"a"_a.noconvert(), "b"_a.noconvert());
m.def("addDRefMXuCC_nc",
[](const nb::DRef<MatrixXuC> &a,
const nb::DRef<MatrixXuC> &b) -> MatrixXuC { return a + b; },
"a"_a.noconvert(), "b"_a.noconvert());
m.def("addDRefMXuRR_nc",
[](const nb::DRef<MatrixXuR>& a,
const nb::DRef<MatrixXuR>& b) -> MatrixXuC { return a + b; },
"a"_a.noconvert(), "b"_a.noconvert());
m.def("mutate_DRefMXuC", [](nb::DRef<MatrixXuC> a) { a *= 2; }, nb::arg().noconvert());
m.def("updateRefV3i", [](Eigen::Ref<Eigen::Vector3i> a) { a[2] = 123; });
m.def("updateRefV3i_nc", [](Eigen::Ref<Eigen::Vector3i> a) { a[2] = 123; }, nb::arg().noconvert());
m.def("updateRefVXi", [](Eigen::Ref<Eigen::VectorXi> a) { a[2] = 123; });
m.def("updateRefVXi_nc", [](Eigen::Ref<Eigen::VectorXi> a) { a[2] = 123; }, nb::arg().noconvert());
using SparseMatrixR = Eigen::SparseMatrix<float, Eigen::RowMajor>;
using SparseMatrixC = Eigen::SparseMatrix<float>;
Eigen::MatrixXf mat(5, 6);
mat <<
0, 3, 0, 0, 0, 11,
22, 0, 0, 0, 17, 11,
7, 5, 0, 1, 0, 11,
0, 0, 0, 0, 0, 11,
0, 0, 14, 0, 8, 11;
m.def("sparse_r", [mat]() -> SparseMatrixR {
return Eigen::SparseView<Eigen::MatrixXf>(mat);
});
m.def("sparse_c", [mat]() -> SparseMatrixC {
return Eigen::SparseView<Eigen::MatrixXf>(mat);
});
m.def("sparse_copy_r", [](const SparseMatrixR &m) -> SparseMatrixR { return m; });
m.def("sparse_copy_c", [](const SparseMatrixC &m) -> SparseMatrixC { return m; });
m.def("sparse_r_uncompressed", []() -> SparseMatrixR {
SparseMatrixR m(2,2);
m.coeffRef(0,0) = 1.0f;
assert(!m.isCompressed());
return m.markAsRValue();
});
// This function doesn't appear to be called in tests/test_eigen.py
m.def("sparse_complex", [](Eigen::SparseMatrix<std::complex<double>> x) -> Eigen::SparseMatrix<std::complex<double>> { return x; });
m.def("sparse_complex_map_c", [](Eigen::Map<Eigen::SparseMatrix<std::complex<double>>> x) { return x; });
m.def("sparse_map_c", [](const Eigen::Map<const SparseMatrixC> &c) { return c; }, nb::rv_policy::reference);
m.def("sparse_map_r", [](const Eigen::Map<const SparseMatrixR> &r) { return r; }, nb::rv_policy::reference);
m.def("sparse_update_map_to_zero_c", [](nb::object obj) {
Eigen::Map<SparseMatrixC> c = nb::cast<Eigen::Map<SparseMatrixC>>(obj);
for (int i = 0; i < c.nonZeros(); ++i) { c.valuePtr()[i] = 0; }
});
m.def("sparse_update_map_to_zero_r", [](nb::object obj) {
Eigen::Map<SparseMatrixR> r = nb::cast<Eigen::Map<SparseMatrixR>>(obj);
for (int i = 0; i < r.nonZeros(); ++i) { r.valuePtr()[i] = 0; }
});
/// issue #166
using Matrix1d = Eigen::Matrix<double,1,1>;
try {
m.def(
"default_arg", [](Matrix1d a, Matrix1d b) -> Matrix1d { return a + b; },
"a"_a = Matrix1d::Zero(), "b"_a = Matrix1d::Zero());
} catch (...) {
// Ignore (NumPy not installed, etc.)
}
struct Buffer {
uint32_t x[30] { };
using Map = Eigen::Map<Eigen::Array<uint32_t, 10, 3>>;
using DMap = Eigen::Map<Eigen::Array<uint32_t, Eigen::Dynamic, Eigen::Dynamic>>;
Map map() { return Map(x); }
DMap dmap() { return DMap(x, 10, 3); }
};
nb::class_<Buffer>(m, "Buffer")
.def(nb::init<>())
.def("map", &Buffer::map, nb::rv_policy::reference_internal)
.def("dmap", &Buffer::dmap, nb::rv_policy::reference_internal);
struct ClassWithEigenMember {
Eigen::MatrixXd member = Eigen::Matrix2d::Ones();
const Eigen::MatrixXd &get_member_ref() { return member; }
const Eigen::MatrixXd get_member_copy() { return member; }
};
nb::class_<ClassWithEigenMember>(m, "ClassWithEigenMember")
.def(nb::init<>())
.def_prop_ro("member_ro_ref", &ClassWithEigenMember::get_member_ref)
.def_prop_ro("member_ro_copy", &ClassWithEigenMember::get_member_copy)
.def_rw("member", &ClassWithEigenMember::member);
m.def("castToMapVXi", [](nb::object obj) {
return nb::cast<Eigen::Map<Eigen::VectorXi>>(obj);
});
m.def("castToMapCnstVXi", [](nb::object obj) {
return nb::cast<Eigen::Map<const Eigen::VectorXi>>(obj);
});
m.def("castToRefVXi", [](nb::object obj) -> Eigen::VectorXi {
return nb::cast<Eigen::Ref<Eigen::VectorXi>>(obj);
});
m.def("castToRefCnstVXi", [](nb::object obj) -> Eigen::VectorXi {
return nb::cast<Eigen::Ref<const Eigen::VectorXi>>(obj);
});
m.def("castToDRefCnstVXi", [](nb::object obj) -> Eigen::VectorXi {
return nb::cast<nb::DRef<const Eigen::VectorXi>>(obj);
});
m.def("castToRef03CnstVXi", [](nb::object obj) -> Eigen::VectorXi {
return nb::cast<Eigen::Ref<const Eigen::VectorXi, Eigen::Unaligned, Eigen::InnerStride<3>>>(obj);
});
struct Base {
virtual ~Base() = default;
virtual void modRefData(Eigen::Ref<Eigen::VectorXd>) {}
virtual void modRefDataConst(Eigen::Ref<const Eigen::VectorXd>) {}
virtual Eigen::VectorXd returnVecXd() { return { 1, 2 }; }
};
struct PyBase : Base {
NB_TRAMPOLINE(Base, 3);
void modRefData(Eigen::Ref<Eigen::VectorXd> a) override {
NB_OVERRIDE_PURE(modRefData, a);
}
void modRefDataConst(Eigen::Ref<const Eigen::VectorXd> a) override {
NB_OVERRIDE_PURE(modRefDataConst, a);
}
Eigen::VectorXd returnVecXd() override {
NB_OVERRIDE_PURE(returnVecXd);
}
};
nb::class_<Base, PyBase>(m, "Base")
.def(nb::init<>())
.def("modRefData", &Base::modRefData)
.def("modRefDataConst", &Base::modRefDataConst);
m.def("modifyRef", [](Base* base) {
Eigen::Vector2d input(1.0, 2.0);
base->modRefData(input);
return input;
});
m.def("modifyRefConst", [](Base* base) {
Eigen::Vector2d input(1.0, 2.0);
base->modRefDataConst(input);
return input;
});
m.def("returnVecXd", [](Base* base) {
return base->returnVecXd();
});
}
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
tests/test_eigen.py | Python | import pytest
import gc
import itertools
import re
import sys
try:
import numpy as np
from numpy.testing import assert_array_equal
import test_eigen_ext as t
def needs_numpy_and_eigen(x):
return x
except:
needs_numpy_and_eigen = pytest.mark.skip(reason="NumPy and Eigen are required")
@needs_numpy_and_eigen
def test01_vector_fixed():
a = np.array([1, 2, 3], dtype=np.int32)
b = np.array([0, 1, 2], dtype=np.int32)
c = np.array([1, 3, 5], dtype=np.int32)
x = np.array([1, 3, 5, 6], dtype=np.int32)
af = np.float32(a)
bf = np.float32(b)
assert_array_equal(t.addV3i(a, b), c)
assert_array_equal(t.addR3i(a, b), c)
assert_array_equal(t.addRefCnstV3i(a, b), c)
assert_array_equal(t.addRefCnstR3i(a, b), c)
assert_array_equal(t.addA3i(a, b), c)
assert_array_equal(t.addA3i_retExpr(a, b), c)
# Implicit conversion supported for first argument
assert_array_equal(t.addV3i(af, b), c)
assert_array_equal(t.addR3i(af, b), c)
assert_array_equal(t.addRefCnstV3i(af, b), c)
assert_array_equal(t.addRefCnstR3i(af, b), c)
assert_array_equal(t.addA3i(af, b), c)
# But not the second one
with pytest.raises(TypeError, match='incompatible function arguments'):
t.addV3i(a, bf)
with pytest.raises(TypeError, match='incompatible function arguments'):
t.addR3i(a, bf)
with pytest.raises(TypeError, match='incompatible function arguments'):
t.addRefCnstV3i(a, bf)
with pytest.raises(TypeError, match='incompatible function arguments'):
t.addRefCnstR3i(a, bf)
with pytest.raises(TypeError, match='incompatible function arguments'):
t.addA3i(a, bf)
# Catch size errors
with pytest.raises(TypeError, match='incompatible function arguments'):
t.addV3i(x, b)
with pytest.raises(TypeError, match='incompatible function arguments'):
t.addR3i(x, b)
with pytest.raises(TypeError, match='incompatible function arguments'):
t.addRefCnstV3i(x, b)
with pytest.raises(TypeError, match='incompatible function arguments'):
t.addA3i(x, b)
@needs_numpy_and_eigen
def test02_vector_dynamic():
a = np.array([1, 2, 3], dtype=np.int32)
b = np.array([0, 1, 2], dtype=np.int32)
c = np.array([1, 3, 5], dtype=np.int32)
x = np.arange(10000, dtype=np.int32)
af = np.float32(a)
# Check call with dynamically sized arrays
assert_array_equal(t.addVXi(a, b), c)
# Implicit conversion
assert_array_equal(t.addVXi(af, b), c)
# Try with a big array. This will move the result to avoid a copy
assert_array_equal(t.addVXi(x, x), 2*x)
@needs_numpy_and_eigen
def test03_update_map():
a = np.array([1, 2, 3], dtype=np.int32)
b = np.array([1, 2, 123], dtype=np.int32)
c = a.copy()
t.updateRefV3i(c)
assert_array_equal(c, b)
c = a.copy()
t.updateRefV3i_nc(c)
assert_array_equal(c, b)
c = a.copy()
t.updateRefVXi(c)
assert_array_equal(c, b)
c = a.copy()
t.updateRefVXi_nc(c)
assert_array_equal(c, b)
c = np.float32(a)
with pytest.raises(TypeError, match='incompatible function arguments'):
t.updateRefV3i(c)
c = np.float32(a)
with pytest.raises(TypeError, match='incompatible function arguments'):
t.updateRefV3i_nc(c)
c = np.float32(a)
with pytest.raises(TypeError, match='incompatible function arguments'):
t.updateRefVXi(c)
c = np.float32(a)
with pytest.raises(TypeError, match='incompatible function arguments'):
t.updateRefVXi_nc(c)
@needs_numpy_and_eigen
def test04_matrix():
A = np.vander((1, 2, 3, 4,))
At = A.T
assert A.flags['C_CONTIGUOUS']
assert At.flags['F_CONTIGUOUS']
base = np.zeros((A.shape[0] * 2, A.shape[1] * 2), A.dtype)
base[::2, ::2] = A
Av = base[-2::-2, -2::-2]
assert Av.base is base
Avt = Av.T
assert Avt.base is base
matrices = A, At, Av, Avt
for addM in (t.addM4uCC, t.addM4uRR, t.addM4uCR, t.addM4uRC,
t.addMXuCC, t.addMXuRR, t.addMXuCR, t.addMXuRC):
for left, right in itertools.product(matrices, matrices):
assert_array_equal(addM(left, right), left + right)
@needs_numpy_and_eigen
@pytest.mark.parametrize("rowStart", (0, 1))
@pytest.mark.parametrize("colStart", (0, 2))
@pytest.mark.parametrize("rowStep", (1, 2, -2))
@pytest.mark.parametrize("colStep", (1, 3, -3))
@pytest.mark.parametrize("transpose", (False, True))
def test05_matrix_large_nonsymm(rowStart, colStart, rowStep, colStep, transpose):
A = np.uint32(np.vander(np.arange(80)))
if rowStep < 0:
rowStart = -rowStart - 1
if colStep < 0:
colStart = -colStart - 1
A = A[rowStart::rowStep, colStart::colStep]
if transpose:
A = A.T
A2 = A + A
assert_array_equal(t.addMXuCC(A, A), A2)
assert_array_equal(t.addMXuRR(A, A), A2)
assert_array_equal(t.addMXuCR(A, A), A2)
assert_array_equal(t.addMXuRC(A, A), A2)
assert_array_equal(t.addDRefMXuCC_nc(A, A), A2)
assert_array_equal(t.addDRefMXuRR_nc(A, A), A2)
if A.flags['C_CONTIGUOUS']:
assert_array_equal(t.addMapMXuRR(A, A), A2)
assert_array_equal(t.addMapCnstMXuRR(A, A), A2)
else:
with pytest.raises(TypeError, match="incompatible function arguments"):
t.addMapMXuRR(A, A)
with pytest.raises(TypeError, match="incompatible function arguments"):
t.addMapCnstMXuRR(A, A)
assert_array_equal(t.addRefCnstMXuRR(A, A), A2)
assert_array_equal(t.addRefCnstMXuRR(A.view(np.int32), A), A2)
assert_array_equal(t.addRefCnstMXuRR_nc(A, A), A2)
with pytest.raises(TypeError, match="incompatible function arguments"):
t.addRefCnstMXuRR_nc(A.view(np.int32), A)
if A.strides[1] == A.itemsize:
assert_array_equal(t.addRefMXuRR(A, A), A2)
else:
with pytest.raises(TypeError, match="incompatible function arguments"):
t.addRefMXuRR(A, A)
if A.flags['F_CONTIGUOUS']:
assert_array_equal(t.addMapMXuCC(A, A), A2)
assert_array_equal(t.addMapCnstMXuCC(A, A), A2)
else:
with pytest.raises(TypeError, match="incompatible function arguments"):
t.addMapMXuCC(A, A)
with pytest.raises(TypeError, match="incompatible function arguments"):
t.addMapCnstMXuCC(A, A)
assert_array_equal(t.addRefCnstMXuCC(A, A), A2)
assert_array_equal(t.addRefCnstMXuCC(A.view(np.int32), A), A2)
assert_array_equal(t.addRefCnstMXuCC_nc(A, A), A2)
with pytest.raises(TypeError, match="incompatible function arguments"):
t.addRefCnstMXuCC_nc(A.view(np.int32), A)
if A.strides[0] == A.itemsize:
assert_array_equal(t.addRefMXuCC(A, A), A2)
else:
with pytest.raises(TypeError, match="incompatible function arguments"):
t.addRefMXuCC(A, A)
A = np.ascontiguousarray(A)
assert A.flags['C_CONTIGUOUS']
assert_array_equal(t.addMXuRR_nc(A, A), A2)
A = np.asfortranarray(A)
assert A.flags['F_CONTIGUOUS']
assert_array_equal(t.addMXuCC_nc(A, A), A2)
@needs_numpy_and_eigen
def test06_map():
b = t.Buffer()
m = b.map()
dm = b.dmap()
for i in range(10):
for j in range(3):
m[i, j] = i*3+j
for i in range(10):
for j in range(3):
assert dm[i, j] == i*3+j
del dm
del b
gc.collect()
gc.collect()
for i in range(10):
for j in range(3):
assert m[i, j] == i*3+j
@needs_numpy_and_eigen
def test07_mutate_arg():
A = np.uint32(np.vander(np.arange(10)))
A2 = A.copy()
t.mutate_DRefMXuC(A)
assert_array_equal(A, 2*A2)
def create_spmat_unsorted():
import scipy.sparse as sparse
# Create a small matrix with explicit indices and indptr
data = np.array([1.0, 2.0, 3.0, 4.0, 5.0])
# Deliberately unsorted indices within columns
# For a properly sorted CSC matrix, indices should be sorted within each column
indices = np.array([0, 2, 1, 4, 3]) # Unsorted (should be [0, 1, 2, 3, 4])
# indptr points to where each column starts in the indices/data arrays
indptr = np.array([0, 2, 3, 5])
# Create a 5x3 matrix with unsorted indices
unsorted_csc = sparse.csc_matrix((data, indices, indptr), shape=(5, 3))
# Verify that indices are unsorted
assert not unsorted_csc.has_sorted_indices
return unsorted_csc
@needs_numpy_and_eigen
def test08_sparse():
pytest.importorskip("scipy")
import scipy.sparse
# no isinstance here because we want strict type equivalence
assert type(t.sparse_r()) is scipy.sparse.csr_matrix
assert type(t.sparse_c()) is scipy.sparse.csc_matrix
assert type(t.sparse_copy_r(t.sparse_r())) is scipy.sparse.csr_matrix
assert type(t.sparse_copy_c(t.sparse_c())) is scipy.sparse.csc_matrix
assert type(t.sparse_copy_r(t.sparse_c())) is scipy.sparse.csr_matrix
assert type(t.sparse_copy_c(t.sparse_r())) is scipy.sparse.csc_matrix
def assert_sparse_equal_ref(sparse_mat):
ref = np.array(
[
[0.0, 3, 0, 0, 0, 11],
[22, 0, 0, 0, 17, 11],
[7, 5, 0, 1, 0, 11],
[0, 0, 0, 0, 0, 11],
[0, 0, 14, 0, 8, 11],
]
)
assert_array_equal(sparse_mat.toarray(), ref)
assert_sparse_equal_ref(t.sparse_r())
assert_sparse_equal_ref(t.sparse_c())
assert_sparse_equal_ref(t.sparse_copy_r(t.sparse_r()))
assert_sparse_equal_ref(t.sparse_copy_c(t.sparse_c()))
assert_sparse_equal_ref(t.sparse_copy_r(t.sparse_c()))
assert_sparse_equal_ref(t.sparse_copy_c(t.sparse_r()))
# construct scipy matrix with unsorted indices
assert type(t.sparse_copy_c(create_spmat_unsorted())) is scipy.sparse.csc_matrix
mat_unsort = create_spmat_unsorted()
assert_array_equal(t.sparse_copy_c(mat_unsort).toarray(), create_spmat_unsorted().toarray())
@needs_numpy_and_eigen
def test09_sparse_failures():
sp = pytest.importorskip("scipy.sparse")
with pytest.raises(
ValueError,
match=re.escape(
"nanobind: unable to return an Eigen sparse matrix that is not in a compressed format. Please call `.makeCompressed()` before returning the value on the C++ end."
),
):
t.sparse_r_uncompressed()
csr_matrix = sp.csr_matrix
sp.csr_matrix = None
with pytest.raises(TypeError, match=re.escape("'NoneType' object is not callable")):
t.sparse_r()
del sp.csr_matrix
with pytest.raises(
AttributeError,
match=re.escape("'scipy.sparse' has no attribute 'csr_matrix'"),
):
t.sparse_r()
sys_path = sys.path
sys.path = []
del sys.modules["scipy"]
with pytest.raises(ModuleNotFoundError, match=re.escape("No module named 'scipy'")):
t.sparse_r()
# undo sabotage of the module
sys.path = sys_path
sp.csr_matrix = csr_matrix
@needs_numpy_and_eigen
def test10_eigen_scalar_default():
x = t.default_arg()
assert x==0
@needs_numpy_and_eigen
def test11_prop():
for j in range(3):
c = t.ClassWithEigenMember()
ref = np.ones((2, 2))
if j == 0:
c.member = ref
for i in range(2):
member = c.member
if j == 2 and i == 0:
member[0, 0] = 10
ref[0, 0] = 10
assert_array_equal(member, ref)
del member
gc.collect()
gc.collect()
member = c.member
assert_array_equal(c.member_ro_ref, ref)
assert_array_equal(c.member_ro_copy, ref)
del c
gc.collect()
gc.collect()
assert_array_equal(member, ref)
@needs_numpy_and_eigen
def test12_cast():
vec = np.arange(1000, dtype=np.int32)
vec2 = vec[::2]
vecf = np.float32(vec)
assert_array_equal(t.castToMapVXi(vec), vec)
assert_array_equal(t.castToMapCnstVXi(vec), vec)
assert_array_equal(t.castToRefVXi(vec), vec)
assert_array_equal(t.castToRefCnstVXi(vec), vec)
assert t.castToMapVXi(vec).flags.writeable
assert not t.castToMapCnstVXi(vec).flags.writeable
assert_array_equal(t.castToDRefCnstVXi(vec), vec)
for v in vec2, vecf:
with pytest.raises(RuntimeError, match="bad[_ ]cast"):
t.castToMapVXi(v)
with pytest.raises(RuntimeError, match="bad[_ ]cast"):
t.castToRefVXi(v)
assert_array_equal(t.castToRefCnstVXi(v), v)
assert_array_equal(t.castToDRefCnstVXi(vec2), vec2)
with pytest.raises(RuntimeError, match="bad[_ ]cast"):
t.castToDRefCnstVXi(vecf)
for v in vec, vec2, vecf:
with pytest.raises(RuntimeError, match='bad[_ ]cast'):
t.castToRef03CnstVXi(v)
@needs_numpy_and_eigen
def test13_mutate_python():
class Derived(t.Base):
def modRefData(self, input):
input[0] = 3.0
def modRefDataConst(self, input):
input[0] = 3.0
def returnVecXd(self):
pass
vecRef = np.array([3.0, 2.0])
der = Derived()
assert_array_equal(t.modifyRef(der), vecRef)
with pytest.raises(ValueError):
t.modifyRefConst(der)
with pytest.raises(RuntimeError, match="bad[_ ]cast"):
t.returnVecXd(der)
@needs_numpy_and_eigen
def test14_single_element():
a = np.array([[1]], dtype=np.uint32)
assert a.ndim == 2 and a.shape == (1, 1)
t.addMXuCC(a, a)
@needs_numpy_and_eigen
def test15_sparse_map():
scipy = pytest.importorskip("scipy")
def assert_same_array(a, b):
assert a.shape == b.shape
assert a.__array_interface__['data'] == b.__array_interface__['data']
def assert_same_sparse_array(a, b):
assert_same_array(a.data, b.data)
assert_same_array(a.indices, b.indices)
assert_same_array(a.indptr, b.indptr)
c1 = scipy.sparse.csc_matrix([[1, 0], [0, 1]], dtype=np.float32)
c2 = t.sparse_map_c(c1)
assert_same_sparse_array(c1, c2)
r1 = scipy.sparse.csr_matrix([[1, 0], [0, 1]], dtype=np.float32)
r2 = t.sparse_map_r(r1)
assert_same_sparse_array(r1, r2)
# Implicit CSR <-> CSC conversion is not permitted by the map type caster
with pytest.raises(TypeError):
t.sparse_map_c(r1)
with pytest.raises(TypeError):
t.sparse_map_r(c1)
assert c1.sum() != 0
t.sparse_update_map_to_zero_c(c1);
assert c1.sum() == 0
assert r1.sum() != 0
t.sparse_update_map_to_zero_r(r1);
assert r1.sum() == 0
# Implicit type conversion is not permitted by the map type caster
c1 = scipy.sparse.csc_matrix([[1, 0], [0, 1]], dtype=np.float64)
r1 = scipy.sparse.csr_matrix([[1, 0], [0, 1]], dtype=np.float64)
with pytest.raises(TypeError):
t.sparse_map_c(c1)
with pytest.raises(TypeError):
t.sparse_map_r(r1)
@needs_numpy_and_eigen
def test16_sparse_complex():
scipy = pytest.importorskip("scipy")
c1 = scipy.sparse.csc_matrix([[1j+2, 0], [-3j, 1]], dtype=np.complex128)
c2 = t.sparse_complex(c1)
assert np.array_equal(c1.todense(), c2.todense())
@needs_numpy_and_eigen
def test17_sparse_map_complex():
scipy = pytest.importorskip("scipy")
c1 = scipy.sparse.csc_matrix([[1j+2, 0], [-3j, 1]], dtype=np.complex128)
c2 = t.sparse_complex_map_c(c1)
assert np.array_equal(c1.todense(), c2.todense())
@needs_numpy_and_eigen
def test18_zero_size_vec():
# Test for stride issues after numpy 2.4, when using
a = np.ones((0, 2), dtype=np.uint32, order='C')
b = np.ones((0, 2), dtype=np.uint32, order='C')
print(a.strides)
print(b.strides)
assert_array_equal(t.addRefCnstMXuCC(a, b), a + b)
assert_array_equal(t.addRefCnstMXuCC_nc(a, b), a + b)
assert_array_equal(t.addMapCnstMXuCC(a, b), a + b)
c = np.zeros(0, dtype=np.int32)
assert_array_equal(t.castToRefVXi(c), c)
assert_array_equal(t.castToMapCnstVXi(c), c)
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
tests/test_enum.cpp | C++ | #include <nanobind/nanobind.h>
#include <nanobind/operators.h>
#include <nanobind/stl/string.h>
namespace nb = nanobind;
enum class Enum : uint32_t { A, B, C = (uint32_t) -1 };
enum class Flag : uint32_t { A = 1, B = 2, C = 4};
enum class UnsignedFlag : uint64_t {
A = 1 << 0,
B = 1 << 1,
All = (uint64_t) -1,
};
enum class SEnum : int32_t { A, B, C = (int32_t) -1 };
enum ClassicEnum { Item1, Item2 };
struct EnumProperty { Enum get_enum() { return Enum::A; } };
enum class OpaqueEnum { X, Y };
NB_MAKE_OPAQUE(OpaqueEnum)
// Enum with members named 'name' and 'value' to test stubgen (issue #1246)
enum class Item { name, value, extra };
NB_MODULE(test_enum_ext, m) {
nb::enum_<Enum>(m, "Enum", "enum-level docstring")
.value("A", Enum::A, "Value A")
.value("B", Enum::B, "Value B")
.value("C", Enum::C, "Value C");
nb::enum_<Flag>(m, "Flag", "enum-level docstring", nb::is_flag())
.value("A", Flag::A, "Value A")
.value("B", Flag::B, "Value B")
.value("C", Flag::C, "Value C")
.export_values();
nb::enum_<UnsignedFlag>(m, "UnsignedFlag", nb::is_flag())
.value("A", UnsignedFlag::A, "Value A")
.value("B", UnsignedFlag::B, "Value B")
.value("All", UnsignedFlag::All, "All values");
nb::enum_<SEnum>(m, "SEnum", nb::is_arithmetic())
.value("A", SEnum::A)
.value("B", SEnum::B)
.value("C", SEnum::C);
auto ce = nb::enum_<ClassicEnum>(m, "ClassicEnum")
.value("Item1", ClassicEnum::Item1)
.value("Item2", ClassicEnum::Item2)
.export_values();
ce.def("get_value", [](ClassicEnum &x) { return (int) x; })
.def_prop_ro("my_value", [](ClassicEnum &x) { return (int) x; })
.def("foo", [](ClassicEnum x) { return x; })
.def_static("bar", [](ClassicEnum x) { return x; });
m.def("from_enum", [](Enum value) { return (uint32_t) value; }, nb::arg().noconvert());
m.def("to_enum", [](uint32_t value) { return (Enum) value; });
m.def("from_enum", [](Flag value) { return (uint32_t) value; }, nb::arg().noconvert());
m.def("to_flag", [](uint32_t value) { return (Flag) value; });
m.def("from_enum", [](SEnum value) { return (int32_t) value; }, nb::arg().noconvert());
m.def("to_unsigned_flag", [](uint64_t value) { return (UnsignedFlag) value; });
m.def("from_enum", [](UnsignedFlag value) { return (uint64_t) value; }, nb::arg().noconvert());
m.def("from_enum_implicit", [](Enum value) { return (uint32_t) value; });
m.def("from_enum_default_0", [](Enum value) { return (uint32_t) value; }, nb::arg("value") = Enum::A);
m.def("from_enum_implicit", [](Flag value) { return (uint32_t) value; });
m.def("from_enum_default_0", [](Flag value) { return (uint32_t) value; }, nb::arg("value") = Enum::A);
m.def("from_enum_default_1", [](SEnum value) { return (uint32_t) value; }, nb::arg("value") = SEnum::A);
// test for issue #39
nb::class_<EnumProperty>(m, "EnumProperty")
.def(nb::init<>())
.def_prop_ro("read_enum", &EnumProperty::get_enum);
auto oe = nb::class_<OpaqueEnum>(m, "OpaqueEnum")
.def_prop_ro_static("X", [](nb::object&){return OpaqueEnum::X;})
.def_prop_ro_static("Y", [](nb::object&){return OpaqueEnum::Y;})
.def(nb::init<>())
.def("__init__", [](OpaqueEnum* p, std::string v){
if (v == "X") new (p) OpaqueEnum{OpaqueEnum::X};
else if (v == "Y") new (p) OpaqueEnum{OpaqueEnum::Y};
else throw std::runtime_error(v);
})
.def(nb::self == nb::self);
nb::implicitly_convertible<std::string, OpaqueEnum>();
// Enum with members named 'name' and 'value' (issue #1246)
nb::enum_<Item>(m, "Item")
.value("name", Item::name)
.value("value", Item::value)
.value("extra", Item::extra);
m.def("item_to_int", [](Item i) { return (int) i; }, nb::arg("item") = Item::name);
}
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
tests/test_enum.py | Python | import test_enum_ext as t
import pytest
def test01_unsigned_enum():
assert repr(t.Enum.A) == 'Enum.A'
assert str(t.Enum.A) == 'Enum.A'
assert repr(t.Enum.B) == 'Enum.B'
assert str(t.Enum.B) == 'Enum.B'
assert repr(t.Enum.C) == 'Enum.C'
assert str(t.Enum.C) == 'Enum.C'
assert t.Enum.A.name == 'A'
assert t.Enum.B.name == 'B'
assert t.Enum.C.name == 'C'
assert t.Enum.A.__name__ == 'A'
assert t.Enum.B.__name__ == 'B'
assert t.Enum.C.__name__ == 'C'
assert t.Enum.__doc__ == 'enum-level docstring'
assert t.Enum.A.__doc__ == 'Value A'
assert t.Enum.B.__doc__ == 'Value B'
assert t.Enum.C.__doc__ == 'Value C'
assert t.Enum.A.value == 0
assert t.Enum.B.value == 1
assert t.Enum.C.value == 0xffffffff
assert t.Enum(0) is t.Enum.A
assert t.Enum(1) is t.Enum.B
assert t.Enum(0xffffffff) is t.Enum.C
assert t.Enum(t.Enum.A) is t.Enum.A
assert t.Enum(t.Enum.B) is t.Enum.B
assert t.Enum(t.Enum.C) == t.Enum.C
assert t.from_enum(t.Enum.A) == 0
assert t.from_enum(t.Enum.B) == 1
assert t.from_enum(t.Enum.C) == 0xffffffff
assert t.to_enum(0).__name__ == 'A'
assert t.to_enum(0) == t.Enum.A
assert t.to_enum(1) == t.Enum.B
assert t.to_enum(0xffffffff) == t.Enum.C
with pytest.raises(ValueError) as excinfo:
t.to_enum(5)
assert '5 is not a valid Enum' in str(excinfo.value)
with pytest.raises(ValueError) as excinfo:
t.Enum(123)
assert '123 is not a valid Enum' in str(excinfo.value)
assert t.from_enum_implicit(0) == 0
with pytest.raises(TypeError):
t.from_enum_implicit(123)
def test02_signed_enum():
assert repr(t.SEnum.A) == 'SEnum.A'
assert repr(t.SEnum.B) == 'SEnum.B'
assert repr(t.SEnum.C) == 'SEnum.C'
assert str(t.SEnum.A) == 'SEnum.A'
assert str(t.SEnum.B) == 'SEnum.B'
assert str(t.SEnum.C) == 'SEnum.C'
assert int(t.SEnum.A) == 0
assert int(t.SEnum.B) == 1
assert int(t.SEnum.C) == -1
assert t.SEnum.A.value == 0
assert t.SEnum.B.value == 1
assert t.SEnum.C.value == -1
assert t.SEnum(0) is t.SEnum.A
assert t.SEnum(1) is t.SEnum.B
assert t.SEnum(-1) is t.SEnum.C
assert t.from_enum(t.SEnum.A) == 0
assert t.from_enum(t.SEnum.B) == 1
assert t.from_enum(t.SEnum.C) == -1
def test03_enum_arithmetic():
assert t.SEnum.B + 2 == 3
assert t.SEnum.B + 2.5 == 3.5
assert 2 + t.SEnum.B == 3
assert 2.5 + t.SEnum.B == 3.5
assert t.SEnum.B >> t.SEnum.B == 0
assert t.SEnum.B << t.SEnum.B == 2
assert -t.SEnum.B == -1 and -t.SEnum.C == 1
assert t.SEnum.B & t.SEnum.B == 1
assert t.SEnum.B & ~t.SEnum.B == 0
with pytest.raises(TypeError, match="unsupported operand type"):
t.Enum.B + 2
with pytest.raises(TypeError, match="unsupported operand type"):
t.SEnum.B - "1"
with pytest.raises(TypeError, match="unsupported operand type"):
t.SEnum.B >> 1.0
def test04_enum_export():
assert t.Item1 is t.ClassicEnum.Item1 and t.Item1.value == 0
assert t.Item2 is t.ClassicEnum.Item2 and t.Item2.value == 1
# test for issue #39
def test05_enum_property():
w = t.EnumProperty()
assert w.read_enum == t.Enum.A
assert str(w.read_enum) == 'Enum.A'
def test08_enum_comparisons():
assert int(t.SEnum.B) == 1
for enum in (t.SEnum,):
value = getattr(enum, "B")
assert value != str(int(value))
assert value != int(value) + 0.4
assert value < int(value) + 0.4
for i in (0, 0.5, 1, 1.5, 2):
assert (value == i) == (int(value) == i)
assert (value != i) == (int(value) != i)
assert (value < i) == (int(value) < i)
assert (value <= i) == (int(value) <= i)
assert (value >= i) == (int(value) >= i)
assert (value > i) == (int(value) > i)
assert (i == value) == (i == int(value))
assert (i != value) == (i != int(value))
assert (i < value) == (i < int(value))
assert (i <= value) == (i <= int(value))
assert (i >= value) == (i >= int(value))
assert (i > value) == (i > int(value))
for unrelated in (None, "hello", "1"):
assert value != unrelated and unrelated != value
assert not (value == unrelated) and not (unrelated == value)
with pytest.raises(TypeError):
value < unrelated
with pytest.raises(TypeError):
unrelated < value
# different enum types never compare equal ...
assert t.Enum.B != t.SEnum.B and t.SEnum.B != t.Enum.B
assert not (t.Enum.B == t.SEnum.B) and not (t.SEnum.B == t.Enum.B)
assert t.Enum.B != t.SEnum.C and t.SEnum.C != t.Enum.B
def test06_enum_flag():
# repr / str tests
assert repr(t.Flag.A) == 'Flag.A'
assert str(t.Flag.A) == 'Flag.A'
assert repr(t.Flag.B) == 'Flag.B'
assert str(t.Flag.B) == 'Flag.B'
assert repr(t.Flag.C) == 'Flag.C'
assert str(t.Flag.C) == 'Flag.C'
assert repr(t.Flag.A | t.Flag.B) in ['Flag.A|B', 'Flag.B|A']
assert str(t.Flag.A | t.Flag.B) in ['Flag.A|B', 'Flag.B|A']
assert repr(t.Flag.A | t.Flag.B | t.Flag.C) in ['Flag.A|B|C', 'Flag.C|B|A']
assert str(t.Flag.A | t.Flag.B | t.Flag.C) in ['Flag.A|B|C', 'Flag.C|B|A']
# Flag membership tests
assert (t.Flag(1) | t.Flag(2)).value == 3
assert (t.Flag(3) & t.Flag(1)).value == 1
assert (t.Flag(3) ^ t.Flag(1)).value == 2
assert (t.Flag(3) == (t.Flag.A | t.Flag.B))
# ensure the flag mask is set correctly by enum_append in Python 3.11+
if hasattr(t.Flag, "_flag_mask_"):
assert t.Flag._flag_mask_ == 7
assert (t.from_enum(t.Flag.A | t.Flag.C) == 5)
assert (t.from_enum_implicit(t.Flag(1) | t.Flag(4)) == 5)
# unsigned flag tests to verify correct type casting behavior
# (in particular, overflow protection in enum_from_python.)
assert (t.UnsignedFlag(1) | t.UnsignedFlag(2)).value == 3
assert t.UnsignedFlag.A.value == 1
assert t.UnsignedFlag.B.value == 2
assert t.UnsignedFlag.All.value == 0xffffffffffffffff
assert t.UnsignedFlag(t.UnsignedFlag.A) is t.UnsignedFlag.A
assert t.UnsignedFlag(t.UnsignedFlag.B) is t.UnsignedFlag.B
assert t.UnsignedFlag(t.UnsignedFlag.All) is t.UnsignedFlag.All
assert t.from_enum(t.UnsignedFlag.A) == 1
assert t.from_enum(t.UnsignedFlag.B) == 2
assert t.from_enum(t.UnsignedFlag.All) == 0xffffffffffffffff
assert t.to_flag(1) == t.Flag.A
assert t.to_flag(2) == t.Flag.B
assert t.to_flag(4) == t.Flag.C
assert t.to_flag(5) == (t.Flag.A | t.Flag.C)
def test09_enum_methods():
assert t.Item1.my_value == 0 and t.Item2.my_value == 1
assert t.Item1.get_value() == 0 and t.Item2.get_value() == 1
assert t.Item1.foo() == t.Item1
assert t.ClassicEnum.bar(t.Item1) == t.Item1
def test10_enum_opaque():
assert t.OpaqueEnum.X == t.OpaqueEnum("X") and t.OpaqueEnum.Y == t.OpaqueEnum("Y")
def test11_enum_name_value_members():
# Test for issue #1246: enums with members named 'name' or 'value'
# When an enum has members named 'name' or 'value', accessing .name/.value
# returns the enum member instead of the attribute. Use _name_/_value_.
assert t.Item.name._value_ == 0
assert t.Item.value._value_ == 1
assert t.Item.extra._value_ == 2
assert t.Item.name._name_ == 'name'
assert t.Item.value._name_ == 'value'
assert t.Item.extra._name_ == 'extra'
assert t.item_to_int(t.Item.name) == 0
assert t.item_to_int(t.Item.value) == 1
assert t.item_to_int(t.Item.extra) == 2
assert t.item_to_int() == 0 # default is Item.name
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
tests/test_eval.cpp | C++ | #include <nanobind/nanobind.h>
#include <nanobind/eval.h>
#include <nanobind/stl/pair.h>
namespace nb = nanobind;
NB_MODULE(test_eval_ext, m) {
auto global = nb::dict(nb::module_::import_("__main__").attr("__dict__"));
m.def("test_eval_statements", [global]() {
auto local = nb::dict();
local["call_test"] = nb::cpp_function([&]() -> int { return 42; });
// Regular string literal
nb::exec("message = 'Hello World!'\n"
"x = call_test()",
global,
local);
// Multi-line raw string literal
nb::exec(R"(
if x == 42:
print(message)
else:
raise RuntimeError
)",
global,
local);
auto x = nb::cast<int>(local["x"]);
return x == 42;
});
m.def("test_eval", [global]() {
auto local = nb::dict();
local["x"] = nb::int_(42);
auto x = nb::eval("x", global, local);
return nb::cast<int>(x) == 42;
});
m.def("test_eval_single_statement", []() {
auto local = nb::dict();
local["call_test"] = nb::cpp_function([&]() -> int { return 42; });
auto result = nb::eval<nb::eval_single_statement>("x = call_test()", nb::dict(), local);
auto x = nb::cast<int>(local["x"]);
return result.is_none() && x == 42;
});
m.def("test_eval_failure", []() {
try {
nb::eval("nonsense code ...");
} catch (nb::python_error &) {
return true;
}
return false;
});
// test_eval_closure
m.def("test_eval_closure", []() {
nb::dict global;
global["closure_value"] = 42;
nb::dict local;
local["closure_value"] = 0;
nb::exec(R"(
local_value = closure_value
def func_global():
return closure_value
def func_local():
return local_value
)",
global,
local);
return std::make_pair(global, local);
});
m.def("globals_contains_a", []() {
return nb::globals().contains("a");
});
m.def("globals_add_b", []() {
auto globals = nb::globals();
globals["b"] = 123;
return globals;
});
}
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
tests/test_eval.py | Python | import os
import pytest
import test_eval_ext as m
def test_evals(capsys):
assert m.test_eval_statements()
captured = capsys.readouterr()
assert captured.out == "Hello World!\n"
assert m.test_eval()
assert m.test_eval_single_statement()
assert m.test_eval_failure()
def test_eval_closure():
global_, local = m.test_eval_closure()
assert global_["closure_value"] == 42
assert local["closure_value"] == 0
assert "local_value" not in global_
assert local["local_value"] == 0
assert "func_global" not in global_
assert local["func_global"]() == 42
assert "func_local" not in global_
with pytest.raises(NameError):
local["func_local"]()
a = 1
def test_read_globals():
assert m.globals_contains_a()
def test_write_globals():
assert "b" not in globals()
m.globals_add_b()
assert globals()["b"] == 123
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
tests/test_exception.cpp | C++ | #include <nanobind/nanobind.h>
namespace nb = nanobind;
class MyError1 : public std::exception {
public:
virtual const char *what() const noexcept { return "MyError1"; }
};
class MyError2 : public std::exception {
public:
virtual const char *what() const noexcept { return "MyError2"; }
};
class MyError3 : public std::exception {
public:
virtual const char *what() const noexcept { return "MyError3"; }
};
NB_MODULE(test_exception_ext, m) {
m.def("raise_generic", [] { throw std::exception(); });
m.def("raise_bad_alloc", [] { throw std::bad_alloc(); });
m.def("raise_runtime_error", [] { throw std::runtime_error("a runtime error"); });
m.def("raise_domain_error", [] { throw std::domain_error("a domain error"); });
m.def("raise_invalid_argument", [] { throw std::invalid_argument("an invalid argument error"); });
m.def("raise_length_error", [] { throw std::length_error("a length error"); });
m.def("raise_out_of_range", [] { throw std::out_of_range("an out of range error"); });
m.def("raise_range_error", [] { throw std::range_error("a range error"); });
m.def("raise_overflow_error", [] { throw std::overflow_error("an overflow error"); });
m.def("raise_index_error", [] { throw nb::index_error("an index error"); });
m.def("raise_key_error", [] { throw nb::key_error("a key error"); });
m.def("raise_value_error", [] { throw nb::value_error("a value error"); });
m.def("raise_type_error", [] { throw nb::type_error("a type error"); });
m.def("raise_import_error", [] { throw nb::import_error("an import error"); });
m.def("raise_attribute_error", [] { throw nb::attribute_error("an attribute error"); });
m.def("raise_stop_iteration", [] { throw nb::stop_iteration("a stop iteration error"); });
m.def("raise_my_error_1", [] { throw MyError1(); });
nb::register_exception_translator(
[](const std::exception_ptr &p, void * /* unused */) {
try {
std::rethrow_exception(p);
} catch (const MyError2 &e) {
PyErr_SetString(PyExc_IndexError, e.what());
}
});
m.def("raise_my_error_2", [] { throw MyError2(); });
nb::exception<MyError3>(m, "MyError3");
m.def("raise_my_error_3", [] { throw MyError3(); });
m.def("raise_nested", [](nb::callable c) {
int arg = 123;
try {
c(arg);
} catch (nb::python_error &e) {
nb::raise_from(e, PyExc_RuntimeError, "Call with value %i failed", arg);
}
}
);
}
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
tests/test_exception.py | Python | import test_exception_ext as t
import pytest
def test01_base():
with pytest.raises(RuntimeError):
assert t.raise_generic()
def test02_bad_alloc():
with pytest.raises(MemoryError):
assert t.raise_bad_alloc()
def test03_runtime_error():
with pytest.raises(RuntimeError) as excinfo:
assert t.raise_runtime_error()
assert str(excinfo.value) == 'a runtime error'
def test04_domain_error():
with pytest.raises(ValueError) as excinfo:
assert t.raise_domain_error()
assert str(excinfo.value) == 'a domain error'
def test05_invalid_argument():
with pytest.raises(ValueError) as excinfo:
assert t.raise_invalid_argument()
assert str(excinfo.value) == 'an invalid argument error'
def test06_length():
with pytest.raises(ValueError) as excinfo:
assert t.raise_length_error()
assert str(excinfo.value) == 'a length error'
def test07_out_of_range():
with pytest.raises(IndexError) as excinfo:
assert t.raise_out_of_range()
assert str(excinfo.value) == 'an out of range error'
def test08_range_error():
with pytest.raises(ValueError) as excinfo:
assert t.raise_range_error()
assert str(excinfo.value) == 'a range error'
def test09_overflow_error():
with pytest.raises(OverflowError) as excinfo:
assert t.raise_overflow_error()
assert str(excinfo.value) == 'an overflow error'
def test10_index_error():
with pytest.raises(IndexError) as excinfo:
assert t.raise_index_error()
assert str(excinfo.value) == 'an index error'
def test11_key_error():
with pytest.raises(KeyError) as excinfo:
assert t.raise_key_error()
assert str(excinfo.value) == "'a key error'"
def test12_value_error():
with pytest.raises(ValueError) as excinfo:
assert t.raise_value_error()
assert str(excinfo.value) == 'a value error'
def test13_type_error():
with pytest.raises(TypeError) as excinfo:
assert t.raise_type_error()
assert str(excinfo.value) == 'a type error'
def test14_import_error():
with pytest.raises(ImportError) as excinfo:
assert t.raise_import_error()
assert str(excinfo.value) == 'an import error'
def test15_attribute_error():
with pytest.raises(AttributeError) as excinfo:
assert t.raise_attribute_error()
assert str(excinfo.value) == 'an attribute error'
def test16_stop_iteration():
with pytest.raises(StopIteration) as excinfo:
assert t.raise_stop_iteration()
assert str(excinfo.value) == 'a stop iteration error'
def test17_raise_my_error_1():
with pytest.raises(RuntimeError) as excinfo:
assert t.raise_my_error_1()
assert str(excinfo.value) == 'MyError1'
def test18_raise_my_error_2():
with pytest.raises(IndexError) as excinfo:
assert t.raise_my_error_2()
assert str(excinfo.value) == 'MyError2'
def test19_raise_my_error_3():
with pytest.raises(t.MyError3) as excinfo:
assert t.raise_my_error_3()
assert str(excinfo.value) == 'MyError3'
def test20_nested():
def foo(arg):
return arg / 0
with pytest.raises(RuntimeError) as excinfo:
t.raise_nested(foo)
assert str(excinfo.value) == 'Call with value 123 failed'
assert str(excinfo.value.__cause__) == 'division by zero'
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
tests/test_functions.cpp | C++ | #include <string.h>
#include <nanobind/nanobind.h>
#include <nanobind/stl/function.h>
#include <nanobind/stl/pair.h>
#include <nanobind/stl/string.h>
#include <nanobind/stl/vector.h>
namespace nb = nanobind;
using namespace nb::literals;
int call_guard_value = 0;
struct my_call_guard {
my_call_guard() { call_guard_value = 1; }
~my_call_guard() { call_guard_value = 2; }
};
// Example call policy for use with nb::call_policy<>. Each call will add
// an entry to `calls` containing the arguments tuple and return value.
// The return value will be recorded as "<unfinished>" if the function
// did not return (still executing or threw an exception) and as
// "<return conversion failed>" if the function returned something that we
// couldn't convert to a Python object.
// Additional features to test particular interactions:
// - the precall hook will throw if any arguments are not strings
// - any argument equal to "swapfrom" will be replaced by a temporary
// string object equal to "swapto", which will be destroyed at end of call
// - the postcall hook will throw if any argument equals "postthrow"
struct example_policy {
static inline std::vector<std::pair<nb::tuple, nb::object>> calls;
static void precall(PyObject **args, size_t nargs,
nb::detail::cleanup_list *cleanup) {
PyObject* tup = PyTuple_New(nargs);
for (size_t i = 0; i < nargs; ++i) {
if (!PyUnicode_CheckExact(args[i])) {
Py_DECREF(tup);
throw std::runtime_error("expected only strings");
}
if (0 == PyUnicode_CompareWithASCIIString(args[i], "swapfrom")) {
nb::object replacement = nb::cast("swapto");
args[i] = replacement.ptr();
cleanup->append(replacement.release().ptr());
}
Py_INCREF(args[i]);
PyTuple_SetItem(tup, i, args[i]);
}
calls.emplace_back(nb::steal<nb::tuple>(tup), nb::cast("<unfinished>"));
}
static void postcall(PyObject **args, size_t nargs, nb::handle ret) {
if (!ret.is_valid()) {
calls.back().second = nb::cast("<return conversion failed>");
} else {
calls.back().second = nb::borrow(ret);
}
for (size_t i = 0; i < nargs; ++i) {
if (0 == PyUnicode_CompareWithASCIIString(args[i], "postthrow")) {
throw std::runtime_error("postcall exception");
}
}
}
};
struct numeric_string {
unsigned long number;
};
template <> struct nb::detail::type_caster<numeric_string> {
NB_TYPE_CASTER(numeric_string, const_name("str"))
bool from_python(handle h, uint8_t flags, cleanup_list* cleanup) noexcept {
make_caster<const char*> str_caster;
if (!str_caster.from_python(h, flags, cleanup))
return false;
const char* str = str_caster.operator cast_t<const char*>();
if (!str)
return false;
char* endp;
value.number = strtoul(str, &endp, 10);
return *str && !*endp;
}
static handle from_cpp(numeric_string, rv_policy, handle) noexcept {
return nullptr;
}
};
int test_31(int i) noexcept { return i; }
NB_MODULE(test_functions_ext, m) {
m.doc() = "function testcase";
// Function without inputs/outputs
m.def("test_01", []() { });
// Simple binary function (via function pointer)
auto test_02 = [](int up, int down) -> int { return up - down; };
m.def("test_02", (int (*)(int, int)) test_02, "up"_a = 8, "down"_a = 1);
// Simple binary function with capture object
int i = 42;
m.def("test_03", [i](int j, int k) -> int { return i + j - k; });
// Large capture object requiring separate storage
uint64_t k = 10, l = 11, m_ = 12, n = 13, o = 14;
m.def("test_04", [k, l, m_, n, o]() -> int { return (int) (k + l + m_ + n + o); });
// Overload chain with two docstrings
m.def("test_05", [](int) -> int { return 1; }, "doc_1");
nb::object first_overload = m.attr("test_05");
m.def("test_05", [](float) -> int { return 2; }, "doc_2");
#if !defined(PYPY_VERSION) && !defined(Py_GIL_DISABLED)
// Make sure we don't leak the previous member of the overload chain
// (pypy's refcounts are bogus and will not help us with this check)
if (first_overload.ptr()->ob_refcnt != 1) {
throw std::runtime_error("Overload was leaked!");
}
#endif
first_overload.reset();
// Test an overload chain that always repeats the same docstring
m.def("test_05b", [](int) -> int { return 1; }, "doc_1");
m.def("test_05b", [](float) -> int { return 2; }, "doc_1");
// Test an overload chain with an empty docstring
m.def("test_05c", [](int) -> int { return 1; }, "doc_1");
m.def("test_05c", [](float) -> int { return 2; }, "");
/// Function raising an exception
m.def("test_06", []() { throw std::runtime_error("oops!"); });
/// Function taking some positional/keyword args and nb::[kw]args
m.def("test_07", [](int, int, nb::args args, nb::kwargs kwargs) {
return std::make_pair(args.size(), kwargs.size());
});
/// As above, but with nb::arg annotations
m.def("test_07", [](int, int, nb::args args, nb::kwargs kwargs) {
return std::make_pair(args.size(), kwargs.size());
}, "a"_a, "b"_a, "myargs"_a, "mykwargs"_a);
/// Function with eight arguments
m.def("test_simple",
[](int i0, int i1, int i2, int i3, int i4, int i5, int i6, int i7) {
return i0 + i1 + i2 + i3 + i4 + i5 + i6 - i7;
});
/// Test successful/unsuccessful tuple conversion, with rich output types
m.def("test_tuple", []() -> nb::typed<nb::tuple, std::string, int> {
return nb::make_tuple("Hello", 123); });
m.def("test_bad_tuple", []() -> nb::typed<nb::object, std::pair<std::string, nb::object>> {
struct Foo{}; return nb::make_tuple("Hello", Foo()); });
/// Perform a Python function call from C++
m.def("test_call_1", [](nb::typed<nb::object, std::function<int(int)>> o) {
return o(1);
});
m.def("test_call_2", [](nb::typed<nb::callable, void(int, int)> o) {
return o(1, 2);
});
/// Test expansion of args/kwargs-style arguments
m.def("test_call_extra", [](nb::typed<nb::callable, void(...)> o,
nb::args args, nb::kwargs kwargs) {
return o(1, 2, *args, **kwargs, "extra"_a = 5);
});
/// Test list manipulation
m.def("test_list", [](nb::list l) {
int result = 0;
for (size_t i = 0; i < l.size(); ++i)
result += nb::cast<int>(l[i]);
l[2] = 123;
l.append(result);
});
/// Test tuple manipulation
m.def("test_tuple", [](nb::typed<nb::tuple, int, nb::ellipsis> l) {
int result = 0;
for (size_t i = 0; i < l.size(); ++i)
result += nb::cast<int>(l[i]);
return result;
});
/// Test call_guard feature
m.def("test_call_guard_wrapper_rvalue_ref", [](int&& i) { return i; },
nb::call_guard<my_call_guard>());
m.def("test_call_guard", []() {
return call_guard_value;
}, nb::call_guard<my_call_guard>());
m.def("call_guard_value", []() { return call_guard_value; });
m.def("test_release_gil", []() -> bool {
#if defined(Py_LIMITED_API)
return false;
#else
return PyGILState_Check();
#endif
}, nb::call_guard<nb::gil_scoped_release>());
m.def("test_print", []{
nb::print("Test 1");
nb::print("Test 2"_s);
});
m.def("test_iter", [](nb::object in) {
nb::list l;
for (nb::handle h : in)
l.append(h);
return l;
});
m.def("test_iter_tuple", [](nb::tuple in) {
nb::list l;
for (nb::handle h : in)
l.append(h);
return l;
});
m.def("test_iter_list", [](nb::list in) {
nb::list l;
for (nb::handle h : in)
l.append(h);
return l;
});
// Overload chain with a raw docstring that has precedence
m.def("test_08", [](int) -> int { return 1; }, "first docstring");
m.def("test_08", [](float) -> int { return 2; },
nb::sig("def test_08(x: typing.Annotated[float, 'foo']) -> int"),
"another docstring");
// Manual type check
m.def("test_09", [](nb::type_object t) -> bool { return t.is(&PyBool_Type); });
// nb::dict iterator
m.def("test_10", [](nb::dict d) {
nb::dict result;
for (auto [k, v] : d)
result[k] = v;
return result;
});
m.def("test_10_contains", [](nb::dict d) {
return d.contains("foo"_s);
});
// Test implicit conversion of various types
m.def("test_11_sl", [](signed long x) { return x; });
m.def("test_11_ul", [](unsigned long x) { return x; });
m.def("test_11_sll", [](signed long long x) { return x; });
m.def("test_11_ull", [](unsigned long long x) { return x; });
// Test string caster
m.def("test_12", [](const char *c) { return nb::str(c); });
m.def("test_13", []() -> const char * { return "test"; });
m.def("test_14", [](nb::object o) -> const char * { return nb::cast<const char *>(o); });
// Test bytes type
m.def("test_15", [](nb::bytes o) -> const char * { return o.c_str(); });
m.def("test_15_d", [](nb::bytes o) { return nb::bytes(o.data(), o.size()); });
m.def("test_16", [](const char *c) { return nb::bytes(c); });
m.def("test_17", [](nb::bytes c) { return c.size(); });
m.def("test_18", [](const char *c, int size) { return nb::bytes(c, size); });
// Test int type
m.def("test_19", [](nb::int_ i) { return i + nb::int_(123); });
m.def("test_20", [](nb::str s) { return nb::int_(s) + nb::int_(123); });
m.def("test_21", [](nb::int_ i) { return (int) i; });
m.def("test_21_f", [](nb::float_ f) { return nb::int_(f); });
m.def("test_21_g", []() { return nb::int_(1.5); });
m.def("test_21_h", []() { return nb::int_(1e50); });
// Test floating-point
m.def("test_21_dnc", [](double d) { return d + 1.0; }, nb::arg().noconvert());
m.def("test_21_fnc", [](float f) { return f + 1.0f; }, nb::arg().noconvert());
// Test capsule wrapper
m.def("test_22", []() -> void * { return (void*) 1; });
m.def("test_23", []() -> void * { return nullptr; });
m.def("test_24", [](void *p) { return (uintptr_t) p; }, "p"_a.none());
// Test slice
m.def("test_25", [](nb::slice s) { return s; });
m.def("test_26", []() { return nb::slice(4); });
m.def("test_27", []() {
nb::slice s(2, 10);
auto tpl = s.compute(7);
if (tpl.get<0>() != 2) return nb::slice(400); // fail
auto [start, stop, step, slice_length] = tpl;
if (start != 2) return nb::slice(401); // fail
if (stop != 7) return nb::slice(402); // fail
if (step != 1) return nb::slice(403); // fail
if (slice_length != 5) return nb::slice(404); // fail
return s;
});
m.def("test_28", []() { return nb::slice(5, -5, -2); });
// Test ellipsis
m.def("test_29", [](nb::ellipsis) { return nb::ellipsis(); });
// Traceback test
m.def("test_30", [](nb::callable f) -> std::string {
nb::gil_scoped_release g;
try {
nb::gil_scoped_acquire g2;
f();
} catch (const nb::python_error &e) {
return e.what();
}
return "Unknown";
});
m.def("test_31", &test_31);
m.def("test_32", [](int i) noexcept { return i; });
m.def("identity_i8", [](int8_t i) { return i; });
m.def("identity_u8", [](uint8_t i) { return i; });
m.def("identity_i16", [](int16_t i) { return i; });
m.def("identity_u16", [](uint16_t i) { return i; });
m.def("identity_i32", [](int32_t i) { return i; });
m.def("identity_u32", [](uint32_t i) { return i; });
m.def("identity_i64", [](int64_t i) { return i; });
m.def("identity_u64", [](uint64_t i) { return i; });
m.attr("test_33") = nb::cpp_function([](nb::object self, int y) {
return nb::cast<int>(self.attr("x")) + y;
}, nb::is_method());
m.attr("test_34") = nb::cpp_function([](nb::object self, int y) {
return nb::cast<int>(self.attr("x")) * y;
}, nb::arg("y"), nb::is_method());
m.def("test_35", []() {
const char *name = "Foo";
auto callback = [=]() {
return "Test {}"_s.format(name);
};
return nb::cpp_function(callback);
});
m.def("test_cast_char", [](nb::handle h) {
return nb::cast<char>(h);
});
m.def("test_cast_str", [](nb::handle h) {
return nb::cast<const char *>(h);
});
m.def("test_set", []() {
nb::set s;
s.add("123");
s.add(123);
return s;
});
m.def("test_set_contains", [](nb::set s, nb::handle h) { return s.contains(h); });
m.def("test_frozenset", []() {
return nb::frozenset(nb::make_tuple("123", 123));
});
m.def("test_frozenset_contains", [](nb::frozenset s, nb::handle h) {
return s.contains(h);
});
m.def("test_memoryview", []() { return nb::memoryview(nb::bytes("123456")); });
m.def("test_bad_memview", []() { return nb::memoryview(nb::int_(0)); });
m.def("test_del_list", [](nb::list l) { nb::del(l[2]); });
m.def("test_del_dict", [](nb::dict l) { nb::del(l["a"]); });
static int imut = 10;
static const int iconst = 100;
m.def("test_ptr_return", []() { return std::make_pair(&imut, &iconst); });
// These are caught at compile time, uncomment and rebuild to verify:
// No nb::arg annotations:
//m.def("bad_args1", [](nb::args, int) {});
// kw_only in wrong place (1):
//m.def("bad_args2", [](nb::args, int) {}, nb::kw_only(), "args"_a, "i"_a);
// kw_only in wrong place (2):
//m.def("bad_args3", [](nb::args, int) {}, "args"_a, "i"_a, nb::kw_only());
// kw_only in wrong place (3):
//m.def("bad_args4", [](int, nb::kwargs) {}, "i"_a, "kwargs"_a, nb::kw_only());
// kw_only specified twice:
//m.def("bad_args5", [](int, int) {}, nb::kw_only(), "i"_a, nb::kw_only(), "j"_a);
m.def("test_args_kwonly",
[](int i, double j, nb::args args, int z) {
return nb::make_tuple(i, j, args, z);
}, "i"_a, "j"_a, "args"_a, "z"_a);
m.def("test_args_kwonly_kwargs",
[](int i, double j, nb::args args, int z, nb::kwargs kwargs) {
return nb::make_tuple(i, j, args, z, kwargs);
}, "i"_a, "j"_a, "args"_a, nb::kw_only(), "z"_a, "kwargs"_a);
m.def("test_kwonly_kwargs",
[](int i, double j, nb::kwargs kwargs) {
return nb::make_tuple(i, j, kwargs);
}, "i"_a, nb::kw_only(), "j"_a, "kwargs"_a);
m.def("test_kw_only_all",
[](int i, int j) { return nb::make_tuple(i, j); },
nb::kw_only(), "i"_a, "j"_a);
m.def("test_kw_only_some",
[](int i, int j, int k) { return nb::make_tuple(i, j, k); },
nb::arg(), nb::kw_only(), "j"_a, "k"_a);
m.def("test_kw_only_with_defaults",
[](int i, int j, int k, int z) { return nb::make_tuple(i, j, k, z); },
nb::arg() = 3, "j"_a = 4, nb::kw_only(), "k"_a = 5, "z"_a);
m.def("test_kw_only_mixed",
[](int i, int j) { return nb::make_tuple(i, j); },
"i"_a, nb::kw_only(), "j"_a);
struct kw_only_methods {
kw_only_methods(int _v) : v(_v) {}
int v;
};
nb::class_<kw_only_methods>(m, "kw_only_methods")
.def(nb::init<int>(), nb::kw_only(), "v"_a)
.def_rw("v", &kw_only_methods::v)
.def("method_2k",
[](kw_only_methods&, int i, int j) { return nb::make_tuple(i, j); },
nb::kw_only(), "i"_a = 1, "j"_a = 2)
.def("method_1p1k",
[](kw_only_methods&, int i, int j) { return nb::make_tuple(i, j); },
"i"_a = 1, nb::kw_only(), "j"_a = 2);
m.def("test_any", [](nb::any a) { return a; } );
m.def("test_wrappers_list", []{
nb::list l1, l2;
l1.append(1);
l2.append(2);
l1.extend(l2);
bool b = nb::len(l1) == 2 && nb::len(l2) == 1 &&
l1[0].equal(nb::int_(1)) && l1[1].equal(nb::int_(2));
l1.clear();
return b && nb::len(l1) == 0;
});
m.def("test_wrappers_dict", []{
nb::dict d1, d2;
d1["a"] = 1;
d2["b"] = 2;
d1.update(d2);
bool b = nb::len(d1) == 2 && nb::len(d2) == 1 &&
d1["a"].equal(nb::int_(1)) &&
d1["b"].equal(nb::int_(2));
d1.clear();
return b && nb::len(d1) == 0;
});
m.def("test_wrappers_set", []{
nb::set s;
s.add("a");
s.add("b");
bool b = nb::len(s) == 2 && s.contains("a") && s.contains("b");
b &= s.discard("a");
b &= !s.discard("q");
b &= !s.contains("a") && s.contains("b");
s.clear();
b &= s.size() == 0;
return b;
});
m.def("hash_it", [](nb::handle h) { return nb::hash(h); });
m.def("isinstance_", [](nb::handle inst, nb::handle cls) {
return nb::isinstance(inst, cls);
});
// Test bytearray type
m.def("test_bytearray_new", []() { return nb::bytearray(); });
m.def("test_bytearray_new", [](const char *c, int size) { return nb::bytearray(c, size); });
m.def("test_bytearray_copy", [](nb::bytearray o) { return nb::bytearray(o.c_str(), o.size()); });
m.def("test_bytearray_c_str", [](nb::bytearray o) -> const char * { return o.c_str(); });
m.def("test_bytearray_size", [](nb::bytearray o) { return o.size(); });
m.def("test_bytearray_resize", [](nb::bytearray c, int size) { return c.resize(size); });
// Test call_policy feature
m.def("test_call_policy",
[](const char* s, numeric_string n) -> const char* {
if (0 == strcmp(s, "returnfail")) {
return "not utf8 \xff";
}
if (n.number > strlen(s)) {
throw std::runtime_error("offset too large");
}
return s + n.number;
},
nb::call_policy<example_policy>());
m.def("call_policy_record",
[]() {
auto ret = std::move(example_policy::calls);
return ret;
});
m.def("abi_tag", [](){ return nb::detail::abi_tag(); });
// Test the nb::fallback type
m.def("test_fallback_1", [](double){ return 0; });
m.def("test_fallback_1", [](nb::handle){ return 1; });
m.def("test_fallback_2", [](double) { return 0; });
m.def("test_fallback_2", [](nb::fallback){ return 1; });
m.def("test_get_dict_default", [](nb::dict l) { return l.get("key", nb::int_(123)); });
}
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
tests/test_functions.py | Python | import test_functions_ext as t
import pytest
import sys
import re
# Reference counting behavior changed on 3.14a7+
py_3_14a7_or_newer = sys.version_info >= (3, 14, 0, 'alpha', 7)
def fail_fn(): # used in test_30
raise RuntimeError("Foo")
def test01_capture():
# Functions with and without capture object of different sizes
assert t.test_01() is None
assert t.test_02(5, 3) == 2
assert t.test_03(5, 3) == 44
assert t.test_04() == 60
assert t.test_simple(0, 1, 2, 3, 4, 5, 6, 7) == 14
def test02_default_args():
# Default arguments
assert t.test_02() == 7
assert t.test_02(7) == 6
def test03_kwargs():
# Basic use of keyword arguments
assert t.test_02(3, 5) == -2
assert t.test_02(3, down=5) == -2
assert t.test_02(down=5, up=3) == -2
# Make sure non-interned keyword names work also
i_cant_believe_its_not_down = "".join("down")
assert i_cant_believe_its_not_down is not "down"
assert t.test_02(**{i_cant_believe_its_not_down: 5, "up": 3}) == -2
assert t.test_02(**{i_cant_believe_its_not_down: 5}) == 3
with pytest.raises(TypeError):
t.test_02(unexpected=27)
with pytest.raises(TypeError):
t.test_02(**{i_cant_believe_its_not_down: None})
def test04_overloads():
assert t.test_05(0) == 1
assert t.test_05(0.0) == 2
def test05_signature():
assert t.test_01.__doc__ == "test_01() -> None"
assert t.test_02.__doc__ == "test_02(up: int = 8, down: int = 1) -> int"
assert t.test_05.__doc__ == (
"test_05(arg: int, /) -> int\n"
"test_05(arg: float, /) -> int\n"
"\n"
"Overloaded function.\n"
"\n"
"1. ``test_05(arg: int, /) -> int``\n"
"\n"
"doc_1\n"
"\n"
"2. ``test_05(arg: float, /) -> int``\n"
"\n"
"doc_2"
)
assert t.test_05b.__doc__ == (
"test_05b(arg: int, /) -> int\n"
"test_05b(arg: float, /) -> int\n"
"\n"
"doc_1"
)
assert t.test_05c.__doc__ == (
"test_05c(arg: int, /) -> int\n"
"test_05c(arg: float, /) -> int\n"
"\n"
"Overloaded function.\n"
"\n"
"1. ``test_05c(arg: int, /) -> int``\n"
"\n"
"doc_1\n"
"\n"
"2. ``test_05c(arg: float, /) -> int``\n"
)
assert t.test_07.__doc__ == (
"test_07(arg0: int, arg1: int, /, *args, **kwargs) -> tuple[int, int]\n"
"test_07(a: int, b: int, *myargs, **mykwargs) -> tuple[int, int]"
)
def test06_signature_error():
with pytest.raises(TypeError) as excinfo:
t.test_05("x", y=4)
assert str(excinfo.value) == (
"test_05(): incompatible function arguments. The "
"following argument types are supported:\n"
" 1. test_05(arg: int, /) -> int\n"
" 2. test_05(arg: float, /) -> int\n\n"
"Invoked with types: str, kwargs = { y: int }"
)
def test07_raises():
with pytest.raises(RuntimeError) as excinfo:
t.test_06()
assert str(excinfo.value) == "oops!"
def test08_args_kwargs():
assert t.test_07(1, 2) == (0, 0)
assert t.test_07(a=1, b=2) == (0, 0)
assert t.test_07(a=1, b=2, c=3) == (0, 1)
assert t.test_07(1, 2, 3, c=4) == (1, 1)
assert t.test_07(1, 2, 3, 4, c=5, d=5) == (2, 2)
def test09_maketuple():
assert t.test_tuple() == ("Hello", 123)
with pytest.raises(RuntimeError) as excinfo:
assert t.test_bad_tuple()
value = str(excinfo.value)
assert value == "std::bad_cast" or value == "bad cast"
def test10_cpp_call_simple():
result = []
def my_callable(a, b):
result.append((a, b))
t.test_call_2(my_callable)
assert result == [(1, 2)]
with pytest.raises(TypeError) as excinfo:
t.test_call_1(my_callable)
assert "my_callable() missing 1 required positional argument: 'b'" in str(
excinfo.value
)
assert result == [(1, 2)]
def test11_call_complex():
result = []
def my_callable(*args, **kwargs):
result.append((args, kwargs))
t.test_call_extra(my_callable)
assert result == [((1, 2), {"extra": 5})]
result.clear()
t.test_call_extra(my_callable, 5, 6, hello="world")
assert result == [((1, 2, 5, 6), {"extra": 5, "hello": "world"})]
def test12_list_tuple_manipulation():
li = [1, 5, 6, 7]
t.test_list(li)
assert li == [1, 5, 123, 7, 19]
tu = (1, 5, 6, 7)
assert t.test_tuple(tu) == 19
assert tu == (1, 5, 6, 7)
def test13_call_guard():
assert t.call_guard_value() == 0
assert t.test_call_guard() == 1
assert t.call_guard_value() == 2
assert t.test_call_guard_wrapper_rvalue_ref(1) == 1
assert not t.test_release_gil()
def test14_print(capsys):
t.test_print()
captured = capsys.readouterr()
assert captured.out == "Test 1\nTest 2\n"
def test15_iter():
assert t.test_iter(()) == []
assert t.test_iter((1,)) == [1]
assert t.test_iter((1, 2)) == [1, 2]
assert t.test_iter((1, 2, 3)) == [1, 2, 3]
def test16_iter_tuple():
assert t.test_iter_tuple(()) == []
assert t.test_iter_tuple((1,)) == [1]
assert t.test_iter_tuple((1, 2)) == [1, 2]
assert t.test_iter_tuple((1, 2, 3)) == [1, 2, 3]
def test17_iter_tuple():
assert t.test_iter_list([]) == []
assert t.test_iter_list([1]) == [1]
assert t.test_iter_list([1, 2]) == [1, 2]
assert t.test_iter_list([1, 2, 3]) == [1, 2, 3]
def test18_raw_doc():
assert (
t.test_08.__doc__
== """test_08(arg: int, /) -> int
test_08(x: typing.Annotated[float, 'foo']) -> int
Overloaded function.
1. ``test_08(arg: int, /) -> int``
first docstring
2. ``test_08(x: typing.Annotated[float, 'foo']) -> int``
another docstring"""
)
def test19_type_check_manual():
assert t.test_09.__doc__ == "test_09(arg: type, /) -> bool"
assert t.test_09(bool) is True
assert t.test_09(int) is False
with pytest.raises(TypeError) as excinfo:
assert t.test_09(True)
assert "incompatible function arguments" in str(excinfo.value)
def test20_dict_iterator():
assert t.test_10({}) == {}
assert t.test_10({1: 2}) == {1: 2}
assert t.test_10({1: 2, 3: 4}) == {1: 2, 3: 4}
assert t.test_10({1: 2, 3: 4, "a": "b"}) == {1: 2, 3: 4, "a": "b"}
def test21_numpy_overloads():
try:
import numpy as np
except ImportError:
pytest.skip("numpy is missing")
assert t.test_05(np.int32(0)) == 1
assert t.test_05(np.float64(0.1)) == 2
assert t.test_05(np.float64(0.0)) == 2
assert t.test_11_sl(np.int32(5)) == 5
assert t.test_11_ul(np.int32(5)) == 5
assert t.test_11_sll(np.int32(5)) == 5
assert t.test_11_ull(np.int32(5)) == 5
with pytest.raises(TypeError) as excinfo:
t.test_21_dnc(np.float64(21.0)) # Python type is not exactly float
assert "incompatible function arguments" in str(excinfo.value)
assert t.test_21_dnc(float(np.float64(21.0))) == 22.0
assert t.test_21_dnc(float(np.float32(21.0))) == 22.0
assert t.test_21_fnc(float(np.float32(21.0))) == 22.0
with pytest.raises(TypeError) as excinfo:
t.test_21_fnc(float(np.float64(21.1))) # Inexact narrowing to float32
assert "incompatible function arguments" in str(excinfo.value)
assert t.test_21_fnc(float(np.float32(21.1))) == np.float32(22.1)
def test22_string_return():
assert t.test_12("hello") == "hello"
assert t.test_13() == "test"
assert t.test_14("abc") == "abc"
def test23_byte_return():
assert t.test_15(b"abc") == "abc"
assert t.test_15_d(b"abc\x00def\x00ghi") == b"abc\x00def\x00ghi"
assert t.test_16("hello") == b"hello"
assert t.test_17(b"four") == 4
assert t.test_17(b"\x00\x00\x00\x00") == 4
assert t.test_18("hello world", 5) == b"hello"
def test24_pydoc():
import pydoc
assert "test_05(arg: int, /)" in pydoc.render_doc(t)
def test25_int():
assert t.test_19(5) == 128
assert t.test_20("5") == 128
assert t.test_21(5) == 5
assert t.test_21_f(5.1) == int(5.1)
assert t.test_21_f(1e50) == int(1e50)
assert type(t.test_21_f(0.5)) is int
assert t.test_21_g() == int(1.5)
assert type(t.test_21_g()) is int
assert t.test_21_h() == int(1e50)
assert type(t.test_21_h()) is int
assert t.test_19.__doc__ == "test_19(arg: int, /) -> object"
def test26_capsule():
p = t.test_22()
assert "capsule" in str(p) and "nb_handle" in str(p)
assert t.test_24(p) == 1
p = t.test_23()
assert p is None
assert t.test_24(p) == 0
def test27_slice():
s = slice(1, 10, 2)
assert t.test_25(s) is s
assert t.test_25.__doc__ == "test_25(arg: slice, /) -> slice"
assert t.test_26() == slice(4)
assert t.test_27() == slice(2, 10)
assert t.test_28() == slice(5, -5, -2)
def test28_ellipsis():
assert t.test_29(...) is ...
assert (
t.test_29.__doc__ == "test_29(arg: types.EllipsisType, /) -> types.EllipsisType"
)
def test29_traceback():
result = t.test_30(fail_fn)
regexp = r'Traceback \(most recent call last\):\n.*\n File "[^"]*", line [0-9]*, in fail_fn\n.*RuntimeError: Foo'
matches = re.findall(regexp, result, re.MULTILINE | re.DOTALL)
assert len(matches) == 1
def test30_noexcept():
assert t.test_31(123) == 123
assert t.test_32(123) == 123
@pytest.mark.parametrize(
"func_name",
[
"identity_i8",
"identity_u8",
"identity_i16",
"identity_u16",
"identity_i32",
"identity_u32",
"identity_i64",
"identity_u64",
],
)
def test31_range(func_name):
func = getattr(t, func_name)
values = [
0,
-1,
1,
2**7,
2**7 - 1,
2**8,
2**8 - 1,
2**15,
2**15 - 1,
2**16,
2**16 - 1,
2**29,
2**29 - 1,
2**30,
2**30 - 1,
2**31,
2**31 - 1,
2**32,
2**32 - 1,
2**63,
2**63 - 1,
2**64,
2**64 - 1,
2**127,
2**127 - 1,
2**128,
2**128 - 1,
]
values += [-value for value in values]
suffix = func.__name__[9:]
if suffix[0] == "u":
range_min = 0
range_max = 2 ** int(suffix[1:]) - 1
else:
range_min = -(2 ** (int(suffix[1:]) - 1))
range_max = -range_min - 1
for value in values:
if value < range_min or value > range_max:
with pytest.raises(TypeError):
value_out = func(value)
else:
value_out = func(value)
assert value_out == value
def test33_method_on_non_nanobind_class():
class AClass:
def __init__(self):
self.x = 42
AClass.simple_method = t.test_33
AClass.complex_method = t.test_34
a = AClass()
assert a.simple_method(7) == 49
assert a.complex_method(y=2) == 84
def test34_module_docstring():
assert t.__doc__ == "function testcase"
def test35_return_capture():
x = t.test_35()
assert x() == "Test Foo"
def test36_test_char():
assert t.test_cast_char("c") == "c"
assert t.test_cast_char("\x00") == "\x00"
with pytest.raises(TypeError):
assert t.test_cast_char("abc")
with pytest.raises(TypeError):
assert t.test_cast_char("")
with pytest.raises(RuntimeError):
assert t.test_cast_char(123)
def test37_test_str():
assert t.test_cast_str("c") == "c"
assert t.test_cast_str("abc") == "abc"
with pytest.raises(RuntimeError):
assert t.test_cast_str(123)
def test38_set():
x = t.test_set()
assert isinstance(x, set)
assert len(x) == 2
assert 123 in x and "123" in x
assert t.test_set_contains(x, 123)
assert t.test_set_contains(x, "123")
assert not t.test_set_contains(x, "1234")
assert not t.test_set_contains(x, 1234)
def test39_del():
l = [0, 1, 2, 3, 4]
t.test_del_list(l)
assert l == [0, 1, 3, 4]
l = {"a": 0, "b": 1}
t.test_del_dict(l)
assert l == {"b": 1}
with pytest.raises(IndexError):
t.test_del_list([])
with pytest.raises(KeyError):
t.test_del_dict({})
def test40_nb_signature():
assert t.test_01.__nb_signature__ == ((r"def test_01() -> None", None, None),)
assert t.test_02.__nb_signature__ == (
(r"def test_02(up: int = \0, down: int = \1) -> int", None, (8, 1)),
)
assert t.test_05.__nb_signature__ == (
(r"def test_05(arg: int, /) -> int", "doc_1", None),
(r"def test_05(arg: float, /) -> int", "doc_2", None),
)
assert t.test_07.__nb_signature__ == (
(
r"def test_07(arg0: int, arg1: int, /, *args, **kwargs) -> tuple[int, int]",
None,
None,
),
(
r"def test_07(a: int, b: int, *myargs, **mykwargs) -> tuple[int, int]",
None,
None,
),
)
def test41_kw_only():
# (i, j, *args, z)
assert t.test_args_kwonly(2, 2.5, z=22) == (2, 2.5, (), 22)
assert t.test_args_kwonly(2, 2.5, "a", "b", z=22) == (2, 2.5, ("a", "b"), 22)
assert t.test_args_kwonly(z=22, i=4, j=16) == (4, 16.0, (), 22)
assert (
t.test_args_kwonly.__doc__
== "test_args_kwonly(i: int, j: float, *args, z: int) -> tuple"
)
with pytest.raises(TypeError):
t.test_args_kwonly(2, 2.5, 22) # missing z= keyword
# (i, j, *args, z, **kwargs)
assert t.test_args_kwonly_kwargs(i=1, k=4, j=10, z=-1, y=9) == (
1, 10, (), -1, {"k": 4, "y": 9}
)
assert t.test_args_kwonly_kwargs(1, 2, 3, 4, z=11, y=12) == (
1, 2, (3, 4), 11, {"y": 12}
)
with pytest.raises(TypeError):
t.test_args_kwonly_kwargs(1, 2, 3, 4, 5)
assert (
t.test_args_kwonly_kwargs.__doc__
== "test_args_kwonly_kwargs(i: int, j: float, *args, z: int, **kwargs) -> tuple"
)
# (i, *, j, **kwargs)
assert t.test_kwonly_kwargs(j=2, i=1) == (1, 2, {})
assert t.test_kwonly_kwargs(j=2, i=1, z=10) == (1, 2, {"z": 10})
assert t.test_kwonly_kwargs(1, j=2) == (1, 2, {})
assert t.test_kwonly_kwargs(1, j=2, z=10) == (1, 2, {"z": 10})
with pytest.raises(TypeError):
t.test_kwonly_kwargs(1, 2)
with pytest.raises(TypeError):
t.test_kwonly_kwargs(1, 2, j=3)
with pytest.raises(TypeError):
t.test_kwonly_kwargs(1, 2, z=10)
assert (
t.test_kwonly_kwargs.__doc__
== "test_kwonly_kwargs(i: int, *, j: float, **kwargs) -> tuple"
)
# (*, i, j)
assert t.test_kw_only_all(i=1, j=2) == (1, 2)
assert t.test_kw_only_all(j=1, i=2) == (2, 1)
with pytest.raises(TypeError):
t.test_kw_only_all(i=1)
with pytest.raises(TypeError):
t.test_kw_only_all(1, 2)
assert (
t.test_kw_only_all.__doc__
== "test_kw_only_all(*, i: int, j: int) -> tuple"
)
# (__arg0, *, j, k)
assert t.test_kw_only_some(1, k=3, j=2) == (1, 2, 3)
assert (
t.test_kw_only_some.__doc__
== "test_kw_only_some(arg0: int, *, j: int, k: int) -> tuple"
)
# (__arg0=3, j=4, *, k=5, z)
assert t.test_kw_only_with_defaults(z=8) == (3, 4, 5, 8)
assert t.test_kw_only_with_defaults(2, z=8) == (2, 4, 5, 8)
assert t.test_kw_only_with_defaults(2, j=7, k=8, z=9) == (2, 7, 8, 9)
assert t.test_kw_only_with_defaults(2, 7, z=9, k=8) == (2, 7, 8, 9)
with pytest.raises(TypeError):
t.test_kw_only_with_defaults(2, 7, 8, z=9)
assert (
t.test_kw_only_with_defaults.__doc__
== "test_kw_only_with_defaults(arg0: int = 3, j: int = 4, *, k: int = 5, z: int) -> tuple"
)
# (i, *, j)
assert t.test_kw_only_mixed(1, j=2) == (1, 2)
assert t.test_kw_only_mixed(j=2, i=3) == (3, 2)
assert t.test_kw_only_mixed(i=2, j=3) == (2, 3)
with pytest.raises(TypeError):
t.test_kw_only_mixed(i=1)
with pytest.raises(TypeError):
t.test_kw_only_mixed(1, i=2)
assert (
t.test_kw_only_mixed.__doc__
== "test_kw_only_mixed(i: int, *, j: int) -> tuple"
)
with pytest.raises(TypeError):
t.kw_only_methods(42)
val = t.kw_only_methods(v=42)
assert val.v == 42
# (self, *, i, j)
assert val.method_2k() == (1, 2)
assert val.method_2k(i=3) == (3, 2)
assert val.method_2k(j=4) == (1, 4)
assert val.method_2k(i=3, j=4) == (3, 4)
assert val.method_2k(j=3, i=4) == (4, 3)
with pytest.raises(TypeError):
val.method_2k(1)
with pytest.raises(TypeError):
val.method_2k(1, j=2)
assert (
t.kw_only_methods.method_2k.__doc__
== "method_2k(self, *, i: int = 1, j: int = 2) -> tuple"
)
# (self, i, *, j)
assert val.method_1p1k() == (1, 2)
assert val.method_1p1k(i=3) == (3, 2)
assert val.method_1p1k(j=4) == (1, 4)
assert val.method_1p1k(i=3, j=4) == (3, 4)
assert val.method_1p1k(j=3, i=4) == (4, 3)
assert val.method_1p1k(3) == (3, 2)
assert val.method_1p1k(3, j=4) == (3, 4)
with pytest.raises(TypeError):
val.method_2k(1, 2)
assert (
t.kw_only_methods.method_1p1k.__doc__
== "method_1p1k(self, i: int = 1, *, j: int = 2) -> tuple"
)
def test42_ptr_return():
assert t.test_ptr_return() == (10, 100)
def test41_any():
s = "hello"
assert t.test_any(s) is s
assert t.test_any.__doc__ == "test_any(arg: typing.Any, /) -> typing.Any"
def test42_wrappers_list():
assert t.test_wrappers_list()
def test43_wrappers_dict():
assert t.test_wrappers_dict()
def test43_wrappers_set():
assert t.test_wrappers_set()
def test44_hash():
value = (1, 2, 3)
assert t.hash_it(value) == hash(value);
def test45_new():
assert t.test_bytearray_new() == bytearray()
assert t.test_bytearray_new("\x00\x01\x02\x03", 4) == bytearray(
b"\x00\x01\x02\x03"
)
assert t.test_bytearray_new("", 0) == bytearray()
def test46_copy():
o = bytearray(b"\x00\x01\x02\x03")
c = t.test_bytearray_copy(o)
assert c == o
o.clear()
assert c != o
def test47_c_str():
o = bytearray(b"Hello, world!")
assert t.test_bytearray_c_str(o) == "Hello, world!"
def test48_size():
o = bytearray(b"Hello, world!")
assert t.test_bytearray_size(o) == len(o)
def test49_resize():
o = bytearray(b"\x00\x01\x02\x03")
assert len(o) == 4
t.test_bytearray_resize(o, 8)
assert len(o) == 8
def test50_call_policy():
def case(arg1, arg2, expect_ret): # type: (str, str, str | None) -> str
if hasattr(sys, "getrefcount"):
refs_before = (sys.getrefcount(arg1), sys.getrefcount(arg2))
ret = None
try:
ret = t.test_call_policy(arg1, arg2)
assert ret == expect_ret
return ret
finally:
if expect_ret is None:
assert t.call_policy_record() == []
else:
(((arg1r, arg2r), recorded_ret),) = t.call_policy_record()
assert recorded_ret == expect_ret
assert ret is None or ret is recorded_ret
assert recorded_ret is not expect_ret
if hasattr(sys, "getrefcount"):
# Make sure no reference leak occurred: should be
# one in getrefcount args, one or two in locals,
# zero or one in the pending-return-value slot.
# We have to decompose this to avoid getting confused
# by transient additional references added by pytest's
# assertion rewriting.
ret_refs = sys.getrefcount(recorded_ret)
expected_refs = 2 + 2 * (ret is not None)
# On Python 3.14a7, an optimization was introduced where
# stack-based function calling no longer acquires a reference
if py_3_14a7_or_newer:
assert ret_refs == expected_refs - 1 or ret_refs == expected_refs
else:
assert ret_refs == expected_refs
for (passed, recorded) in ((arg1, arg1r), (arg2, arg2r)):
if passed == "swapfrom":
assert recorded == "swapto"
if hasattr(sys, "getrefcount"):
recorded_refs = sys.getrefcount(recorded)
# recorded, arg1r, unnamed tuple, getrefcount arg
if py_3_14a7_or_newer:
assert recorded_refs == 3 or recorded_refs == 4
else:
assert recorded_refs == 4
else:
assert passed is recorded
del passed, recorded, arg1r, arg2r
if hasattr(sys, "getrefcount"):
refs_after = (sys.getrefcount(arg1), sys.getrefcount(arg2))
if not py_3_14a7_or_newer or ret is not None:
assert refs_before == refs_after
# precall throws exception
with pytest.raises(RuntimeError, match="expected only strings"):
case(12345, "0", None)
# conversion of args fails
with pytest.raises(TypeError):
case("string", "xxx", "<unfinished>")
# function throws exception
with pytest.raises(RuntimeError, match="offset too large"):
case("abc", "4", "<unfinished>")
# conversion of return value fails
with pytest.raises(UnicodeDecodeError):
case("returnfail", "4", "<return conversion failed>")
# postcall throws exception
with pytest.raises(RuntimeError, match="postcall exception"):
case("postthrow", "4", "throw")
# normal call
case("example", "1", "xample")
# precall modifies args
case("swapfrom", "0", "swapto")
with pytest.raises(TypeError):
case("swapfrom", "xxx", "<unfinished>")
with pytest.raises(RuntimeError, match="offset too large"):
case("swapfrom", "10", "<unfinished>")
def test51_isinstance():
assert t.isinstance_(3, int)
assert not t.isinstance_(3, bool)
with pytest.raises(TypeError):
t.isinstance_(3, 7)
def test52_frozenset():
x = t.test_frozenset()
assert isinstance(x, frozenset)
assert len(x) == 2
assert 123 in x and "123" in x
assert t.test_frozenset_contains(x, 123)
assert t.test_frozenset_contains(x, "123")
assert not t.test_frozenset_contains(x, "1234")
assert not t.test_frozenset_contains(x, 1234)
def test53_fallback():
assert t.test_fallback_1(3.0) == 0
assert t.test_fallback_1(3) == 1
assert t.test_fallback_1('3') == 1
assert t.test_fallback_2(3.0) == 0
assert t.test_fallback_2(3) == 0
assert t.test_fallback_2('3') == 1
def test54_dict_default():
assert t.test_get_dict_default({'key': 100}) == 100
assert t.test_get_dict_default({'key2': 100}) == 123
def test_55_memoryview():
memview = t.test_memoryview()
assert isinstance(memview, memoryview)
assert bytes(memview[0:3]) == b'123'
with pytest.raises(TypeError):
t.test_bad_memview()
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
tests/test_holders.cpp | C++ | #if defined(__GNUC__)
// warning: '..' declared with greater visibility than the type of its field '..'
# pragma GCC diagnostic ignored "-Wattributes"
#endif
#include <nanobind/stl/shared_ptr.h>
#include <nanobind/stl/unique_ptr.h>
#include <nanobind/stl/pair.h>
#include <nanobind/stl/vector.h>
namespace nb = nanobind;
static int created = 0;
static int deleted = 0;
struct Example {
int value;
Example(int value) : value(value) { created++; }
~Example() { deleted++; }
static Example *make(int value) { return new Example(value); }
static std::shared_ptr<Example> make_shared(int value) {
return std::make_shared<Example>(value);
}
};
struct SharedWrapper { std::shared_ptr<Example> value; };
struct UniqueWrapper { std::unique_ptr<Example> value; };
struct UniqueWrapper2 { std::unique_ptr<Example, nb::deleter<Example>> value; };
struct ExampleST : std::enable_shared_from_this<ExampleST> {
int value;
ExampleST(int value) : value(value) { created++; }
~ExampleST() { deleted++; }
static ExampleST *make(int value) { return new ExampleST(value); }
static std::shared_ptr<ExampleST> make_shared(int value) {
return std::make_shared<ExampleST>(value);
}
};
struct SharedWrapperST {
std::shared_ptr<ExampleST> value;
ExampleST* get() const { return value.get(); }
};
static_assert(nb::detail::has_shared_from_this_v<ExampleST>);
static_assert(!nb::detail::has_shared_from_this_v<Example>);
enum class PetKind { Cat, Dog };
struct Pet { const PetKind kind; };
struct Dog : Pet { Dog() : Pet{PetKind::Dog} { } };
struct Cat : Pet { Cat() : Pet{PetKind::Cat} { } };
namespace nanobind::detail {
template <> struct type_hook<Pet> {
static const std::type_info *get(Pet *p) {
if (p) {
switch (p->kind) {
case PetKind::Dog: return &typeid(Dog);
case PetKind::Cat: return &typeid(Cat);
}
}
return &typeid(Pet);
}
};
} // namespace nanobind::detail
NB_MODULE(test_holders_ext, m) {
nb::class_<Example>(m, "Example")
.def(nb::init<int>())
.def_rw("value", &Example::value)
.def_static("make", &Example::make)
.def_static("make_shared", &Example::make_shared);
// ------- shared_ptr -------
nb::class_<SharedWrapper>(m, "SharedWrapper")
.def(nb::init<std::shared_ptr<Example>>())
.def_rw("ptr", &SharedWrapper::value)
.def_prop_rw("value",
[](SharedWrapper &t) { return t.value->value; },
[](SharedWrapper &t, int value) { t.value->value = value; });
m.def("query_shared_1", [](Example *shared) { return shared->value; });
m.def("query_shared_2",
[](std::shared_ptr<Example> &shared) { return shared->value; });
m.def("query_shared_3",
[](std::shared_ptr<const Example> &shared) { return shared->value; });
m.def("passthrough",
[](std::shared_ptr<Example> shared) { return shared; });
m.def("passthrough_2",
[](std::shared_ptr<const Example> shared) { return shared; });
// ------- enable_shared_from_this -------
nb::class_<ExampleST>(m, "ExampleST")
.def(nb::init<int>())
.def("has_shared_from_this", [](ExampleST& self) {
return !self.weak_from_this().expired();
})
.def("shared_from_this", [](ExampleST& self) {
return self.shared_from_this();
})
.def("use_count", [](ExampleST& self) {
return self.weak_from_this().use_count();
})
.def_rw("value", &ExampleST::value)
.def_static("make", &ExampleST::make)
.def_static("make_shared", &ExampleST::make_shared);
struct DerivedST : ExampleST {
using ExampleST::ExampleST;
};
static_assert(nb::detail::has_shared_from_this_v<DerivedST>);
nb::class_<DerivedST, ExampleST>(m, "DerivedST")
.def(nb::init<int>())
.def_static("make", [](int v) {
return static_cast<DerivedST*>(ExampleST::make(v));
})
.def_static("make_shared", [](int v) {
return std::static_pointer_cast<DerivedST>(ExampleST::make_shared(v));
});
nb::class_<SharedWrapperST>(m, "SharedWrapperST")
.def(nb::init<std::shared_ptr<ExampleST>>())
.def_static("from_existing", [](ExampleST *obj) {
return SharedWrapperST{obj->shared_from_this()};
})
.def_static("from_wrapper", [](SharedWrapperST& w) {
return SharedWrapperST{w.value};
})
.def("use_count", [](SharedWrapperST& self) {
return self.value.use_count();
})
.def("same_owner", [](SharedWrapperST& self, ExampleST& other) {
auto self_s = self.value;
auto other_s = other.shared_from_this();
return !self_s.owner_before(other_s) &&
!other_s.owner_before(self_s);
})
.def("get_own", &SharedWrapperST::get)
.def("get_ref", &SharedWrapperST::get, nb::rv_policy::reference)
.def_rw("ptr", &SharedWrapperST::value)
.def_prop_rw("value",
[](SharedWrapperST &t) { return t.value->value; },
[](SharedWrapperST &t, int value) { t.value->value = value; });
m.def("owns_cpp", [](nb::handle h) { return nb::inst_state(h).second; });
m.def("same_owner", [](const SharedWrapperST& a,
const SharedWrapperST& b) {
return !a.value.owner_before(b.value) && !b.value.owner_before(a.value);
});
// ------- unique_ptr -------
m.def("unique_from_cpp",
[](int val) { return std::make_unique<const Example>(val); },
nb::arg() = 1);
m.def("unique_from_cpp_2", []() {
return std::unique_ptr<Example, nb::deleter<Example>>(new Example(2));
});
nb::class_<UniqueWrapper>(m, "UniqueWrapper")
.def(nb::init<std::unique_ptr<Example>>())
.def("get", [](UniqueWrapper *uw) { return std::move(uw->value); });
nb::class_<UniqueWrapper2>(m, "UniqueWrapper2")
.def(nb::init<std::unique_ptr<Example, nb::deleter<Example>>>())
.def("get", [](UniqueWrapper2 *uw) { return std::move(uw->value); });
m.def("passthrough_unique",
[](std::unique_ptr<Example> unique) { return unique; },
nb::arg().none());
m.def("passthrough_unique_2",
[](std::unique_ptr<Example, nb::deleter<Example>> unique) { return unique; });
m.def("passthrough_unique_pairs",
[](std::vector<std::pair<std::unique_ptr<Example>,
std::unique_ptr<Example>>> v,
bool clear) {
if (clear)
v.clear();
return v;
}, nb::arg("v"), nb::arg("clear") = false);
m.def("stats", []{ return std::make_pair(created, deleted); });
m.def("reset", []{ created = deleted = 0; });
struct Base { ~Base() = default; };
struct PolymorphicBase { virtual ~PolymorphicBase() = default; };
struct Subclass : Base { };
struct PolymorphicSubclass : PolymorphicBase { };
struct AnotherSubclass : Base { };
struct AnotherPolymorphicSubclass : PolymorphicBase { };
nb::class_<Base> (m, "Base");
nb::class_<Subclass> (m, "Subclass");
nb::class_<PolymorphicBase> (m, "PolymorphicBase");
nb::class_<PolymorphicSubclass> (m, "PolymorphicSubclass");
m.def("u_polymorphic_factory", []() { return std::unique_ptr<PolymorphicBase>(new PolymorphicSubclass()); });
m.def("u_polymorphic_factory_2", []() { return std::unique_ptr<PolymorphicBase>(new AnotherPolymorphicSubclass()); });
m.def("u_factory", []() { return std::unique_ptr<Base>(new Subclass()); });
m.def("u_factory_2", []() { return std::unique_ptr<Base>(new AnotherSubclass()); });
m.def("s_polymorphic_factory", []() { return std::shared_ptr<PolymorphicBase>(new PolymorphicSubclass()); });
m.def("s_polymorphic_factory_2", []() { return std::shared_ptr<PolymorphicBase>(new AnotherPolymorphicSubclass()); });
m.def("s_factory", []() { return std::shared_ptr<Base>(new Subclass()); });
m.def("s_factory_2", []() { return std::shared_ptr<Base>(new AnotherSubclass()); });
nb::class_<Pet>(m, "Pet");
nb::class_<Dog>(m, "Dog");
nb::class_<Cat>(m, "Cat");
nb::enum_<PetKind>(m, "PetKind")
.value("Cat", PetKind::Cat)
.value("Dog", PetKind::Dog);
m.def("make_pet", [](PetKind kind) -> Pet* {
switch (kind) {
case PetKind::Dog:
return new Dog();
case PetKind::Cat:
return new Cat();
default:
throw std::runtime_error("Internal error");
}
});
m.def("make_pet_u", [](PetKind kind) -> std::unique_ptr<Pet> {
switch (kind) {
case PetKind::Dog:
return std::make_unique<Dog>();
case PetKind::Cat:
return std::make_unique<Cat>();
default:
throw std::runtime_error("Internal error");
}
});
m.def("make_pet_s", [](PetKind kind) -> std::shared_ptr<Pet> {
switch (kind) {
case PetKind::Dog:
return std::make_shared<Dog>();
case PetKind::Cat:
return std::make_shared<Cat>();
default:
throw std::runtime_error("Internal error");
}
});
struct ExampleWrapper {
Example value{5};
std::shared_ptr<Example> value_nullable;
};
nb::class_<ExampleWrapper>(m, "ExampleWrapper")
.def(nb::init<>())
.def_rw("value", &ExampleWrapper::value)
.def_rw("value_nullable", &ExampleWrapper::value_nullable, nb::arg().none());
}
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
tests/test_holders.py | Python | import sys
import test_holders_ext as t
import pytest
from common import collect
# Reference counting behavior changed on 3.14a7+
py_3_14a7_or_newer = sys.version_info >= (3, 14, 0, 'alpha', 7)
@pytest.fixture
def clean():
collect()
t.reset()
# ------------------------------------------------------------------
def test01_create(clean):
e = t.Example(123)
assert e.value == 123
assert t.query_shared_1(e) == 123
assert t.query_shared_2(e) == 123
assert t.query_shared_3(e) == 123
del e
collect()
assert t.stats() == (1, 1)
def test02_sharedptr_from_python(clean):
e = t.Example(234)
w = t.SharedWrapper(e)
assert w.ptr is e
del e
collect()
assert t.stats() == (1, 0)
del w
collect()
assert t.stats() == (1, 1)
w = t.SharedWrapper(t.Example(234))
collect()
assert t.stats() == (2, 1)
w.ptr = t.Example(0)
collect()
assert t.stats() == (3, 2)
del w
collect()
assert t.stats() == (3, 3)
def test03_sharedptr_from_cpp(clean):
e = t.Example.make(5)
assert t.passthrough(e) is e
assert t.passthrough_2(e) is e
assert t.query_shared_1(e) == 5
assert t.query_shared_2(e) == 5
assert t.query_shared_3(e) == 5
w = t.SharedWrapper(e)
assert e is not w.value
assert w.value == 5
w.value = 6
assert e.value == 6
del w, e
e = t.Example.make_shared(6)
assert t.query_shared_1(e) == 6
assert t.query_shared_2(e) == 6
assert t.query_shared_3(e) == 6
assert t.passthrough(e) is e
assert t.passthrough_2(e) is e
w = t.SharedWrapper(e)
assert e is not w.value
assert w.value == 6
del w, e
collect()
assert t.stats() == (2, 2)
# ------------------------------------------------------------------
def test04_uniqueptr_from_cpp(clean):
a = t.unique_from_cpp()
b = t.unique_from_cpp_2()
assert a.value == 1
assert b.value == 2
del a, b
collect()
assert t.stats() == (2, 2)
def test05a_uniqueptr_from_cpp(clean):
# Test ownership exchange when the object has been created on the C++ side
a = t.unique_from_cpp()
b = t.unique_from_cpp_2()
wa = t.UniqueWrapper(a)
wb = t.UniqueWrapper(b)
with pytest.warns(RuntimeWarning, match='nanobind: attempted to access a relinquished instance of type \'test_holders_ext.Example\'!'):
with pytest.raises(TypeError) as excinfo:
assert a.value == 1
assert 'incompatible function arguments' in str(excinfo.value)
with pytest.warns(RuntimeWarning, match='nanobind: attempted to access a relinquished instance of type \'test_holders_ext.Example\'!'):
with pytest.raises(TypeError) as excinfo:
assert b.value == 2
assert 'incompatible function arguments' in str(excinfo.value)
del a, b
del wa, wb
collect()
assert t.stats() == (2, 2)
t.reset()
a = t.unique_from_cpp()
b = t.unique_from_cpp_2()
wa = t.UniqueWrapper(a)
wb = t.UniqueWrapper(b)
a2 = wa.get()
b2 = wb.get()
assert a2.value == 1 and b2.value == 2
assert a2 is a and b2 is b
assert a.value == 1 and b.value == 2
collect()
assert t.stats() == (2, 0)
del a, b, a2, b2
collect()
assert t.stats() == (2, 2)
def test05b_uniqueptr_list(clean):
t.reset()
# Test ownership exchange as part of a larger data structure
k = t.unique_from_cpp(1)
v = t.unique_from_cpp(2)
res = t.passthrough_unique_pairs([(k, v)])
assert res == [(k, v)]
assert k.value == 1 and v.value == 2
res = t.passthrough_unique_pairs([(k, v)], clear=True)
assert res == []
for obj in (k, v):
with pytest.warns(RuntimeWarning, match='nanobind: attempted to access a relinquished instance of type \'test_holders_ext.Example\'!'):
with pytest.raises(TypeError) as excinfo:
obj.value
collect()
assert t.stats() == (2, 2)
def test05c_uniqueptr_structure_duplicate(clean):
t.reset()
# Test ownership exchange that fails partway through
# (can't take ownership from k twice)
k = t.unique_from_cpp(3)
with pytest.warns(RuntimeWarning, match=r'nanobind::detail::nb_relinquish_ownership()'):
with pytest.raises(TypeError):
t.passthrough_unique_pairs([(k, k)])
# Ownership passes back to Python
assert k.value == 3
del k
collect()
assert t.stats() == (1, 1)
def test05d_uniqueptr_reinit(clean):
x = t.unique_from_cpp()
assert x.value == 1
w = t.UniqueWrapper(x)
with pytest.warns(RuntimeWarning, match='nanobind: attempted to access a relinquished instance of type \'test_holders_ext.Example\'!'):
with pytest.raises(TypeError):
x.value
with pytest.warns(RuntimeWarning, match='nanobind: attempted to access a relinquished instance of type \'test_holders_ext.Example\'!'):
with pytest.raises(TypeError):
x.__init__(3)
y = w.get()
assert y is x and y.value == 1
def test06_uniqueptr_from_py(clean):
# Test ownership exchange when the object has been created on the Python side
a = t.Example(1)
with pytest.warns(RuntimeWarning, match=r'nanobind::detail::nb_relinquish_ownership()'):
with pytest.raises(TypeError) as excinfo:
wa = t.UniqueWrapper(a)
wa = t.UniqueWrapper2(a)
with pytest.warns(RuntimeWarning, match='nanobind: attempted to access a relinquished instance of type \'test_holders_ext.Example\'!'):
with pytest.raises(TypeError) as excinfo:
assert a.value == 1
assert 'incompatible function arguments' in str(excinfo.value)
a2 = wa.get()
assert a2.value == 1 and a is a2
del a, a2
collect()
assert t.stats() == (1, 1)
# Test that ownership exchange as part of a larger data structure fails
# gracefully rather than crashing
k = t.Example(1)
v = t.Example(2)
with pytest.warns(RuntimeWarning, match=r'nanobind::detail::nb_relinquish_ownership()'):
with pytest.raises(TypeError) as excinfo:
t.passthrough_unique_pairs([(k, v)])
assert k.value == 1 and v.value == 2
# Test the case where the key relinquishes ownership successfully and
# then the value can't do
v = t.unique_from_cpp(3)
with pytest.warns(RuntimeWarning, match=r'nanobind::detail::nb_relinquish_ownership()'):
with pytest.raises(TypeError) as excinfo:
t.passthrough_unique_pairs([(k, v)])
assert k.value == 1 and v.value == 3
del k, v
collect()
assert t.stats() == (4, 4)
def test07_uniqueptr_passthrough(clean):
assert t.passthrough_unique(None) is None
assert t.passthrough_unique(t.unique_from_cpp()).value == 1
assert t.passthrough_unique(t.unique_from_cpp_2()).value == 2
assert t.passthrough_unique_2(t.unique_from_cpp()).value == 1
assert t.passthrough_unique_2(t.unique_from_cpp_2()).value == 2
collect()
assert t.stats() == (4, 4)
t.reset()
with pytest.warns(RuntimeWarning, match=r'nanobind::detail::nb_relinquish_ownership()'):
with pytest.raises(TypeError):
assert t.passthrough_unique(t.Example(1)).value == 1
assert t.passthrough_unique_2(t.Example(1)).value == 1
collect()
assert t.stats() == (2, 2)
def test07_polymorphic_downcast_unique():
assert isinstance(t.u_factory(), t.Base)
assert isinstance(t.u_factory_2(), t.Base)
assert isinstance(t.u_polymorphic_factory(), t.PolymorphicSubclass)
assert isinstance(t.u_polymorphic_factory_2(), t.PolymorphicBase)
def test08_polymorphic_downcast_shared():
assert isinstance(t.s_factory(), t.Base)
assert isinstance(t.s_factory_2(), t.Base)
assert isinstance(t.s_polymorphic_factory(), t.PolymorphicSubclass)
assert isinstance(t.s_polymorphic_factory_2(), t.PolymorphicBase)
def test09_tag_based():
assert isinstance(t.make_pet(t.PetKind.Dog), t.Dog)
assert isinstance(t.make_pet(t.PetKind.Cat), t.Cat)
def test09_tag_based_unique():
assert isinstance(t.make_pet_u(t.PetKind.Dog), t.Dog)
assert isinstance(t.make_pet_u(t.PetKind.Cat), t.Cat)
def test09_tag_based_shared():
assert isinstance(t.make_pet_s(t.PetKind.Dog), t.Dog)
assert isinstance(t.make_pet_s(t.PetKind.Cat), t.Cat)
def check_shared_from_this_py_owned(ty, factory, value):
e = ty(value)
# Creating from Python does not enable shared_from_this
assert e.value == value
assert not e.has_shared_from_this()
assert t.owns_cpp(e)
# Passing to C++ as a shared_ptr does
w = t.SharedWrapperST(e)
assert e.has_shared_from_this()
assert w.ptr is e
# Execute shared_from_this on the C++ side
w2 = t.SharedWrapperST.from_existing(e)
assert e.use_count() == 2
assert w.value == w2.value == e.value == value
assert t.same_owner(w, w2)
# Returning a raw pointer from C++ locates the existing instance
assert w2.get_own() is w2.get_ref() is e
assert t.owns_cpp(e)
if hasattr(sys, "getrefcount"):
# One reference is held by the C++ shared_ptr, one by our
# locals dict, and one by the arg to getrefcount
rc = sys.getrefcount(e)
# On Python 3.14a7, an optimization was introduced where
# stack-based function calling no longer acquires a reference
if py_3_14a7_or_newer:
assert rc == 2 or rc == 3
else:
assert rc == 3
# Dropping the Python object does not actually destroy it, because
# the shared_ptr holds a reference. There is still a PyObject* at
# the same address.
prev_id = id(e)
del e
collect()
assert t.stats() == (1, 0)
assert id(w.get_ref()) == prev_id
assert t.owns_cpp(w.get_ref())
assert type(w.get_ref()) is ty
# Dropping the wrappers' shared_ptrs drops the PyObject reference and
# the object is finally GC'ed
del w, w2
collect()
assert t.stats() == (1, 1)
def test10_shared_from_this_create_in_python(clean):
check_shared_from_this_py_owned(t.ExampleST, t.ExampleST, 42)
# Subclass in C++
t.reset()
check_shared_from_this_py_owned(t.DerivedST, t.DerivedST, 30)
# Subclass in Python
class SubST(t.ExampleST):
pass
t.reset()
check_shared_from_this_py_owned(SubST, SubST, 20)
def test11_shared_from_this_create_raw_in_cpp(clean):
# Creating a raw pointer from C++ does not enable shared_from_this;
# although the object is held by pointer rather than value, the logical
# ownership transfers to Python and the behavior is equivalent to test10.
# Once we get a shared_ptr it owns a reference to the Python object.
check_shared_from_this_py_owned(t.ExampleST, t.ExampleST.make, 10)
# Subclass in C++
t.reset()
check_shared_from_this_py_owned(t.DerivedST, t.DerivedST.make, 5)
def test12_shared_from_this_create_shared_in_cpp(clean):
# Creating a shared_ptr from C++ enables shared_from_this. Now the
# shared_ptr does not keep the Python object alive; it's directly
# owning the ExampleST object on the C++ side.
e = t.ExampleST.make_shared(10)
assert e.value == 10
assert e.has_shared_from_this()
assert e.shared_from_this() is e # same instance
assert e.use_count() == 1
assert not t.owns_cpp(e)
if hasattr(sys, "getrefcount"):
# One reference is held by our locals dict and one by the
# arg to getrefcount
rc = sys.getrefcount(e)
# On Python 3.14a7, an optimization was introduced where
# stack-based function calling no longer acquires a reference
if py_3_14a7_or_newer:
assert rc == 1 or rc == 2
else:
assert rc == 2
w = t.SharedWrapperST.from_existing(e)
assert w.ptr is e
# One shared_ptr whose lifetime is tied to e. And one inside the wrapper
assert e.use_count() == 2
# Drop the Python object; C++ object still remains owned by the wrapper
del e
collect()
assert t.stats() == (1, 0)
assert w.use_count() == 1
# Get a new Python object reference; it will share ownership of the
# same underlying C++ object
e2 = w.get_own()
assert not t.owns_cpp(e2)
assert w.ptr is e2
assert w.use_count() == 2
del e2
collect()
assert t.stats() == (1, 0)
assert w.use_count() == 1
# Get a new C++-side reference
w2 = t.SharedWrapperST.from_wrapper(w)
assert w2.use_count() == 2
assert t.same_owner(w, w2)
# Get another one by roundtripping through Python.
# The nanobind conversion to shared_ptr<ExampleST> should use the
# existing shared_from_this shared_ptr
w3 = t.SharedWrapperST(w.ptr)
collect() # on pypy the w.ptr temporary can stay alive
assert w3.use_count() == 3
assert t.same_owner(w2, w3)
# Destroy everything
assert t.stats() == (1, 0)
del w, w2, w3
collect()
assert t.stats() == (1, 1)
def test13_shared_from_this_create_derived_in_cpp(clean):
# This tests that keep_shared_from_this_alive is inherited by
# derived classes properly
# Pass shared_ptr<T> to Python
e = t.DerivedST.make_shared(20)
assert type(e) is t.DerivedST
assert e.value == 20
assert e.has_shared_from_this()
assert not t.owns_cpp(e)
assert e.use_count() == 1
# Pass it back to C++
w = t.SharedWrapperST(e)
assert e.use_count() == w.use_count() == 2
del e
collect()
assert t.stats() == (1, 0)
assert w.use_count() == 1
# Pass it back to Python as a raw pointer
e = w.get_own()
# ExampleST is not polymorphic, so the derived-class identity is
# lost once the Python instance is destroyed
assert type(e) is t.ExampleST
assert not t.owns_cpp(e)
assert w.use_count() == 2
assert w.get_own() is e
del e, w
collect()
assert t.stats() == (1, 1)
def test14_nullable_properties():
a = t.ExampleWrapper()
assert a.value is not None
assert a.value.value == 5
assert a.value_nullable is None
a.value = t.Example(123)
a.value_nullable = t.Example(456)
assert a.value.value == 123
assert a.value_nullable.value == 456
a.value_nullable = None
assert a.value_nullable is None
with pytest.raises(TypeError):
a.value = None
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
tests/test_inter_module.py | Python | import test_inter_module_1_ext as t1
import test_inter_module_2_ext as t2
import test_classes_ext as t3
import pytest
from common import xfail_on_pypy_darwin
try:
from concurrent import interpreters # Added in Python 3.14
def needs_interpreters(x):
return x
except:
needs_interpreters = pytest.mark.skip(reason="interpreters required")
@xfail_on_pypy_darwin
def test01_inter_module():
s = t1.create_shared()
assert t2.check_shared(s, 123)
t2.increment_shared(s)
assert t2.check_shared(s, 124)
with pytest.raises(TypeError) as excinfo:
assert t3.check_shared(s)
assert 'incompatible function arguments' in str(excinfo.value)
@xfail_on_pypy_darwin
def test02_reload_module():
s1 = t1.create_shared()
s2 = t1.create_shared()
assert s2 is not s1
assert type(s2) is type(s1)
t2.increment_shared(s2)
import importlib
new_t1 = importlib.reload(t1)
assert new_t1 is t1
s3 = new_t1.create_shared()
assert type(s3) is type(s1)
new_t2 = importlib.reload(t2)
assert new_t2 is t2
s4 = new_t1.create_shared()
assert type(s4) is type(s1)
assert new_t2.check_shared(s2, 124)
@xfail_on_pypy_darwin
def test03_reimport_module():
s1 = t1.create_shared()
s2 = t1.create_shared()
t2.increment_shared(s2)
import sys
del sys.modules['test_inter_module_1_ext']
import test_inter_module_1_ext as new_t1
assert new_t1 is not t1
s3 = new_t1.create_shared()
assert type(s3) is type(s1)
del sys.modules['test_inter_module_2_ext']
with pytest.warns(RuntimeWarning, match="'Shared' was already registered"):
import test_inter_module_2_ext as new_t2
assert new_t2 is not t2
s4 = new_t1.create_shared()
assert type(s4) is type(s1)
assert new_t2.check_shared(s2, 124)
def run():
import sys
if 'tests' not in sys.path[0]:
import os
builddir = sys.path[0]
sys.path.insert(0, os.path.join(builddir, 'tests', 'Release'))
sys.path.insert(0, os.path.join(builddir, 'tests', 'Debug'))
sys.path.insert(0, os.path.join(builddir, 'tests'))
import test_inter_module_1_ext as new_t1
import test_inter_module_2_ext as new_t2
success = True
s = new_t1.create_shared()
success &= new_t2.check_shared(s, 123)
new_t2.increment_shared(s)
success &= new_t2.check_shared(s, 124)
return success
@needs_interpreters
def test04_subinterpreters():
assert run()
interp = interpreters.create()
with pytest.raises(interpreters.ExecutionFailed) as excinfo:
assert interp.call(run)
assert 'does not support loading in subinterpreters' in str(excinfo.value)
interp.close()
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
tests/test_inter_module_1.cpp | C++ | #include <nanobind/nanobind.h>
#include "inter_module.h"
namespace nb = nanobind;
NB_MODULE(test_inter_module_1_ext, m) {
m.def("create_shared", &create_shared);
}
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
tests/test_inter_module_2.cpp | C++ | #include <nanobind/nanobind.h>
#include "inter_module.h"
namespace nb = nanobind;
NB_MODULE(test_inter_module_2_ext, m) {
nb::class_<Shared>(m, "Shared");
m.def("check_shared", &check_shared);
m.def("increment_shared", &increment_shared);
}
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
tests/test_intrusive.cpp | C++ | #include <nanobind/nanobind.h>
#include <nanobind/stl/pair.h>
#include <nanobind/trampoline.h>
#include <nanobind/intrusive/counter.h>
#include <nanobind/intrusive/ref.h>
namespace nb = nanobind;
using namespace nb::literals;
static int test_constructed = 0;
static int test_destructed = 0;
class Test : public nb::intrusive_base {
public:
Test() { test_constructed++; }
virtual ~Test() { test_destructed++; }
virtual int value(int i) const { return 123 + i; }
static Test *create_raw() { return new Test(); }
static nb::ref<Test> create_ref() { return new Test(); }
};
class PyTest : Test {
NB_TRAMPOLINE(Test, 1);
virtual int value(int i) const {
NB_OVERRIDE(value, i);
}
};
NB_MODULE(test_intrusive_ext, m) {
nb::intrusive_init(
[](PyObject *o) noexcept {
nb::gil_scoped_acquire guard;
Py_INCREF(o);
},
[](PyObject *o) noexcept {
nb::gil_scoped_acquire guard;
Py_DECREF(o);
});
nb::class_<nb::intrusive_base>(
m, "intrusive_base",
nb::intrusive_ptr<nb::intrusive_base>(
[](nb::intrusive_base *o, PyObject *po) noexcept { o->set_self_py(po); }));
nb::class_<Test, nb::intrusive_base, PyTest>(m, "Test")
.def(nb::init<>())
.def("value", &Test::value)
.def_static("create_raw", &Test::create_raw)
.def_static("create_ref", &Test::create_ref);
m.def("reset", [] {
test_constructed = 0;
test_destructed = 0;
});
m.def("stats", []() -> std::pair<int, int> {
return { test_constructed, test_destructed };
});
m.def("get_value_1", [](Test *o) { nb::ref<Test> x(o); return x->value(1); });
m.def("get_value_2", [](nb::ref<Test> x) { return x->value(2); });
m.def("get_value_3", [](const nb::ref<Test> &x) { return x->value(3); });
}
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
tests/test_intrusive.py | Python | import test_intrusive_ext as t
import pytest
from common import collect
@pytest.fixture
def clean():
collect()
t.reset()
def test01_construct(clean):
o = t.Test()
assert o.value(0) == 123
assert t.get_value_1(o) == 124
assert t.get_value_2(o) == 125
assert t.get_value_3(o) == 126
del o
collect()
assert t.stats() == (1, 1)
def test02_factory(clean):
o = t.Test.create_raw()
assert o.value(0) == 123
assert t.get_value_1(o) == 124
assert t.get_value_2(o) == 125
assert t.get_value_3(o) == 126
del o
collect()
assert t.stats() == (1, 1)
def test03_factory_ref(clean):
o = t.Test.create_ref()
assert o.value(0) == 123
assert t.get_value_1(o) == 124
assert t.get_value_2(o) == 125
assert t.get_value_3(o) == 126
del o
collect()
assert t.stats() == (1, 1)
def test04_subclass(clean):
class MyTest(t.Test):
def __init__(self, x):
super().__init__()
self.x = x
def value(self, value):
return self.x * value
o = MyTest(456)
assert o.value(0) == 0
assert t.get_value_1(o) == 456
assert t.get_value_2(o) == 456*2
assert t.get_value_3(o) == 456*3
del o
collect()
assert t.stats() == (1, 1)
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
tests/test_intrusive_impl.cpp | C++ | #include <nanobind/intrusive/counter.inl>
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
tests/test_issue.cpp | C++ | #include <nanobind/stl/shared_ptr.h>
#include <nanobind/stl/string.h>
#include <nanobind/stl/vector.h>
#include <unordered_map>
namespace nb = nanobind;
using namespace nb::literals;
NB_MODULE(test_issue_ext, m) {
// ------------------------------------
// issue #279: dynamic_attr broken
// ------------------------------------
struct Component {
virtual ~Component() = default;
};
struct Param : Component { };
struct Model : Component {
void add_param(const std::string &name, std::shared_ptr<Param> p) {
params_[name] = std::move(p);
}
std::shared_ptr<Param> get_param(const std::string &name) {
return params_.find(name) != params_.end() ? params_[name] : nullptr;
}
std::unordered_map<std::string, std::shared_ptr<Param>> params_;
};
struct ModelA : Model {
ModelA() {
add_param("a", std::make_shared<Param>());
add_param("b", std::make_shared<Param>());
}
};
nb::class_<Component>(m, "Component");
nb::class_<Param, Component>(m, "ParamBase");
nb::class_<Model, Component>(m, "Model", nb::dynamic_attr()).def(nb::init<>{})
.def("_get_param", &Model::get_param, "name"_a)
.def("_add_param", &Model::add_param, "name"_a, "p"_a);
nb::class_<ModelA, Model>(m, "ModelA").def(nb::init<>{});
/// Issue #307: move constructor unexpectedly called
struct Example { std::string text; };
nb::class_<Example>(m, "Example")
.def(nb::init<const std::string&>())
.def("__repr__",
[](const Example& e) {
return std::string("Example(\"") + e.text + "\")";
});
m.def("process",
[](const std::vector<Example>& v) {
return v.size();
}, nb::arg("v"));
// pull/602: stack corruption when binding a copy constructor with a named argument.
struct Empty {};
nb::class_<Empty>(m, "Empty")
.def(nb::init<const Empty&>(), nb::arg("original"));
}
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
tests/test_issue.py | Python | import test_issue_ext as m
import pytest
# Issue #279: dynamic_attr broken
@pytest.mark.parametrize("variant", [1, 2])
def test01_issue_279(variant):
def _get_parameter(self: m.Model, key: str):
p = self._get_param(key)
if p is not None: # cache it for fast access later
setattr(self, key, p)
return p
raise AttributeError(f"'key' not found in {self}")
m.Model.__getattr__ = _get_parameter
if variant == 2:
def _print_model(self):
return f"{self.__class__.__qualname__}()"
m.Model.__str__ = _print_model
class Top(m.Model):
def __init__(self):
super().__init__()
self.model_a = m.ModelA()
top = Top()
str(top.model_a)
str(top.model_a.a)
# Issue #307: move constructor unexpectedly called
def test02_issue_307():
l = [m.Example("A"), m.Example("B")]
assert str(l) == '[Example("A"), Example("B")]'
m.process(l)
assert str(l) == '[Example("A"), Example("B")]'
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
tests/test_jax.cpp | C++ | #include <nanobind/nanobind.h>
#include <nanobind/ndarray.h>
namespace nb = nanobind;
int destruct_count = 0;
NB_MODULE(test_jax_ext, m) {
m.def("destruct_count", []() { return destruct_count; });
m.def("ret_jax", []() {
struct alignas(64) Buf {
float f[8];
};
Buf *buf = new Buf({ 1, 2, 3, 4, 5, 6, 7, 8 });
size_t shape[2] = { 2, 4 };
nb::capsule deleter(buf, [](void *p) noexcept {
destruct_count++;
delete (Buf *) p;
});
return nb::ndarray<nb::jax, float, nb::shape<2, 4>>(buf->f, 2, shape,
deleter);
});
}
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
tests/test_jax.py | Python | import test_ndarray_ext as t
import test_jax_ext as tj
import pytest
import warnings
import importlib
from common import collect
try:
import jax.numpy as jnp
def needs_jax(x):
return x
except:
needs_jax = pytest.mark.skip(reason="JAX is required")
@needs_jax
def test01_constrain_order():
with warnings.catch_warnings():
warnings.simplefilter("ignore")
try:
c = jnp.zeros((3, 5))
except:
pytest.skip('jax is missing')
z = jnp.zeros((3, 5, 4, 6))
assert t.check_order(z) == 'C'
@needs_jax
def test02_implicit_conversion():
with warnings.catch_warnings():
warnings.simplefilter("ignore")
try:
c = jnp.zeros((3, 5))
except:
pytest.skip('jax is missing')
t.implicit(jnp.zeros((2, 2), dtype=jnp.int32))
t.implicit(jnp.zeros((2, 2, 10), dtype=jnp.float32)[:, :, 4])
t.implicit(jnp.zeros((2, 2, 10), dtype=jnp.int32)[:, :, 4])
t.implicit(jnp.zeros((2, 2, 10), dtype=jnp.bool_)[:, :, 4])
with pytest.raises(TypeError) as excinfo:
t.noimplicit(jnp.zeros((2, 2), dtype=jnp.int32))
with pytest.raises(TypeError) as excinfo:
t.noimplicit(jnp.zeros((2, 2), dtype=jnp.uint8))
@needs_jax
def test03_return_jax():
collect()
dc = tj.destruct_count()
x = tj.ret_jax()
assert x.shape == (2, 4)
assert jnp.all(x == jnp.array([[1,2,3,4], [5,6,7,8]], dtype=jnp.float32))
del x
collect()
assert tj.destruct_count() - dc == 1
@needs_jax
def test04_check():
assert t.check(jnp.zeros((1)))
@needs_jax
def test05_passthrough():
a = tj.ret_jax()
b = t.passthrough(a)
assert a is b
a = jnp.array([1, 2, 3])
b = t.passthrough(a)
assert a is b
a = None
with pytest.raises(TypeError) as excinfo:
b = t.passthrough(a)
assert 'incompatible function arguments' in str(excinfo.value)
b = t.passthrough_arg_none(a)
assert a is b
@needs_jax
def test06_ro_array():
if (not hasattr(jnp, '__array_api_version__')
or jnp.__array_api_version__ < '2024'):
pytest.skip('jax version is too old')
a = jnp.array([1, 2], dtype=jnp.float32) # JAX arrays are immutable.
assert t.accept_ro(a) == 1
# If the next line fails, delete it, update the array_api_version above,
# and uncomment the three lines below.
assert t.accept_rw(a) == 1
# with pytest.raises(TypeError) as excinfo:
# t.accept_rw(a)
# assert 'incompatible function arguments' in str(excinfo.value)
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
tests/test_make_iterator.cpp | C++ | #include <nanobind/make_iterator.h>
#include <nanobind/stl/unordered_map.h>
#include <nanobind/stl/string.h>
namespace nb = nanobind;
NB_MODULE(test_make_iterator_ext, m) {
struct StringMap {
std::unordered_map<std::string, std::string> map;
decltype(map.cbegin()) begin() const { return map.cbegin(); }
decltype(map.cend()) end() const { return map.cend(); }
};
nb::class_<StringMap>(m, "StringMap")
.def(nb::init<>())
.def(nb::init<std::unordered_map<std::string, std::string>>())
.def("__iter__",
[](const StringMap &map) {
return nb::make_key_iterator(nb::type<StringMap>(),
"key_iterator",
map.begin(),
map.end());
}, nb::keep_alive<0, 1>())
.def("items",
[](const StringMap &map) {
return nb::make_iterator(nb::type<StringMap>(),
"item_iterator",
map.begin(),
map.end());
}, nb::keep_alive<0, 1>())
.def("items_l",
[](const StringMap &map) {
// Make sure iterators don't dangle even if passed as lvalue
auto begin = map.begin(), end = map.end();
return nb::make_iterator(nb::type<StringMap>(),
"item_iterator_l",
begin, end);
}, nb::keep_alive<0, 1>())
.def("values", [](const StringMap &map) {
return nb::make_value_iterator(nb::type<StringMap>(),
"value_iterator",
map.begin(),
map.end());
}, nb::keep_alive<0, 1>());
nb::handle mod = m;
m.def("iterator_passthrough", [mod](nb::iterator s) -> nb::iterator {
return nb::make_iterator(mod, "pt_iterator", std::begin(s), std::end(s));
});
// test of map where dereferencing the iterator returns a value,
// not a reference
struct IdentityMap {
struct iterator {
int val;
std::pair<int, int> operator*() const { return {val, val}; }
iterator& operator++() { ++val; return *this; }
bool operator==(const iterator& other) const {
return val == other.val;
}
};
iterator begin() const { return iterator{0}; }
iterator end() const { return iterator{10}; }
};
nb::class_<IdentityMap>(m, "IdentityMap")
.def(nb::init<>())
.def("__iter__",
[](const IdentityMap &map) {
return nb::make_key_iterator(nb::type<IdentityMap>(),
"key_iterator",
map.begin(),
map.end());
}, nb::keep_alive<0, 1>())
.def("items",
[](const IdentityMap &map) {
return nb::make_iterator(nb::type<IdentityMap>(),
"item_iterator",
map.begin(),
map.end());
}, nb::keep_alive<0, 1>())
.def("items_l",
[](const IdentityMap &map) {
auto begin = map.begin(), end = map.end();
return nb::make_iterator(nb::type<IdentityMap>(),
"item_iterator_l",
begin, end);
}, nb::keep_alive<0, 1>())
.def("values", [](const IdentityMap &map) {
return nb::make_value_iterator(nb::type<IdentityMap>(),
"value_iterator",
map.begin(),
map.end());
}, nb::keep_alive<0, 1>());
nb::list all;
all.append("iterator_passthrough");
all.append("StringMap");
all.append("IdentityMap");
m.attr("__all__") = all;
}
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
tests/test_make_iterator.py | Python | import test_make_iterator_ext as t
from common import parallelize
data = [
{},
{ 'a' : 'b' },
{ str(i) : chr(i) for i in range(1000) }
]
def test01_key_iterator():
for d in data:
m = t.StringMap(d)
assert sorted(list(m)) == sorted(list(d))
def test02_value_iterator():
types = []
for d in data:
m = t.StringMap(d)
types.append(type(m.values()))
assert sorted(list(m.values())) == sorted(list(d.values()))
assert types[0] is types[1] and types[1] is types[2]
def test03_items_iterator():
for d in data:
m = t.StringMap(d)
assert sorted(list(m.items())) == sorted(list(d.items()))
assert sorted(list(m.items_l())) == sorted(list(d.items()))
def test03_items_iterator_parallel(n_threads=8):
parallelize(test03_items_iterator, n_threads=n_threads)
def test04_passthrough_iterator():
for d in data:
m = t.StringMap(d)
assert list(t.iterator_passthrough(m.values())) == list(m.values())
def test05_iterator_returning_temporary():
im = t.IdentityMap()
assert list(im) == list(range(10))
assert list(im.values()) == list(range(10))
assert list(im.items()) == list(zip(range(10), range(10)))
assert list(im.items_l()) == list(zip(range(10), range(10)))
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
tests/test_ndarray.cpp | C++ | #include <nanobind/nanobind.h>
#include <nanobind/ndarray.h>
#include <nanobind/stl/pair.h>
#include <algorithm>
#include <complex>
#include <vector>
namespace nb = nanobind;
using namespace nb::literals;
int destruct_count = 0;
static float f_global[] { 1, 2, 3, 4, 5, 6, 7, 8 };
static int i_global[] { 1, 2, 3, 4, 5, 6, 7, 8 };
#if defined(__aarch64__) || defined(__AVX512FP16__)
namespace nanobind::detail {
template <> struct dtype_traits<_Float16> {
static constexpr dlpack::dtype value {
(uint8_t) dlpack::dtype_code::Float, // type code
16, // size in bits
1 // lanes (simd)
};
static constexpr auto name = const_name("float16");
};
}
#endif
template<bool expect_ro, bool is_shaped, typename... Ts>
bool check_ro(const nb::ndarray<Ts...>& a) { // Pytest passes five doubles
static_assert(std::remove_reference_t<decltype(a)>::ReadOnly == expect_ro);
static_assert(std::is_const_v<std::remove_pointer_t<decltype(a.data())>>
== expect_ro);
auto vd = a.template view<double, nb::ndim<1>>();
static_assert(std::is_const_v<std::remove_pointer_t<decltype(vd.data())>>
== expect_ro);
static_assert(std::is_const_v<std::remove_reference_t<decltype(vd(0))>>
== expect_ro);
auto vcd = a.template view<const double, nb::ndim<1>>();
static_assert(std::is_const_v<std::remove_pointer_t<decltype(vcd.data())>>);
static_assert(std::is_const_v<std::remove_reference_t<decltype(vcd(0))>>);
bool pass = vd.data() == a.data() && vcd.data() == a.data();
if constexpr (!expect_ro) {
vd(1) = 1.414214;
pass &= vcd(1) == 1.414214;
}
if constexpr (is_shaped) {
static_assert(std::is_const_v<std::remove_reference_t<decltype(a(0))>>
== expect_ro);
auto v = a.view();
static_assert(std::is_const_v<std::remove_pointer_t<decltype(v.data())>>
== expect_ro);
static_assert(std::is_const_v<std::remove_reference_t<decltype(v(0))>>
== expect_ro);
pass &= v.data() == a.data();
if constexpr (!expect_ro) {
a(2) = 2.718282;
v(4) = 16.0;
}
}
pass &= vcd(3) == 3.14159;
return pass;
}
NB_MODULE(test_ndarray_ext, m) {
m.def("get_is_valid", [](const nb::ndarray<nb::ro> &t) {
return t.is_valid();
}, "array"_a.noconvert().none());
m.def("get_shape", [](const nb::ndarray<nb::ro> &t) {
nb::list l;
for (size_t i = 0; i < t.ndim(); ++i)
l.append(t.shape(i));
return l;
}, "array"_a.noconvert());
m.def("get_size", [](const nb::ndarray<> &t) {
return t.size();
}, "array"_a.noconvert().none());
m.def("get_itemsize", [](const nb::ndarray<> &t) {
return t.itemsize();
}, "array"_a.noconvert().none());
m.def("get_nbytes", [](const nb::ndarray<> &t) {
return t.nbytes();
}, "array"_a.noconvert().none());
m.def("get_stride", [](const nb::ndarray<> &t, size_t i) {
return t.stride(i);
}, "array"_a.noconvert(), "i"_a);
m.def("check_shape_ptr", [](const nb::ndarray<> &t) {
std::vector<int64_t> shape(t.ndim());
std::copy(t.shape_ptr(), t.shape_ptr() + t.ndim(), shape.begin());
for (size_t i = 0; i < t.ndim(); ++i)
if (shape[i] != (int64_t) t.shape(i))
return false;
return true;
});
m.def("check_stride_ptr", [](const nb::ndarray<> &t) {
std::vector<int64_t> stride(t.ndim());
std::copy(t.stride_ptr(), t.stride_ptr() + t.ndim(), stride.begin());
for (size_t i = 0; i < t.ndim(); ++i)
if (stride[i] != (int64_t) t.stride(i))
return false;
return true;
});
m.def("check_float", [](const nb::ndarray<> &t) {
return t.dtype() == nb::dtype<float>();
});
m.def("check_bool", [](const nb::ndarray<> &t) {
return t.dtype() == nb::dtype<bool>();
});
m.def("pass_float32", [](const nb::ndarray<float> &) { }, "array"_a.noconvert());
m.def("pass_float32_const", [](const nb::ndarray<const float> &) { }, "array"_a.noconvert());
m.def("pass_complex64", [](const nb::ndarray<std::complex<float>> &) { }, "array"_a.noconvert());
m.def("pass_complex64_const", [](nb::ndarray<const std::complex<float>>) { }, "array"_a.noconvert());
m.def("pass_uint32", [](const nb::ndarray<uint32_t> &) { }, "array"_a.noconvert());
m.def("pass_bool", [](const nb::ndarray<bool> &) { }, "array"_a.noconvert());
m.def("pass_float32_shaped",
[](const nb::ndarray<float, nb::shape<3, -1, 4>> &) {}, "array"_a.noconvert());
m.def("pass_float32_shaped_ordered",
[](const nb::ndarray<float, nb::c_contig,
nb::shape<-1, -1, 4>> &) {}, "array"_a.noconvert());
m.def("check_rw_by_value",
[](nb::ndarray<> a) {
return check_ro</*expect_ro=*/false, /*is_shaped=*/false>(a);
});
m.def("check_ro_by_value_ro",
[](nb::ndarray<nb::ro> a) {
return check_ro</*expect_ro=*/true, /*is_shaped=*/false>(a);
});
m.def("check_rw_by_value_float64",
[](nb::ndarray<double, nb::ndim<1>> a) {
return check_ro</*expect_ro=*/false, /*is_shaped=*/true>(a);
});
m.def("check_ro_by_value_const_float64",
[](nb::ndarray<const double, nb::ndim<1>> a) {
return check_ro</*expect_ro=*/true, /*is_shaped=*/true>(a);
});
m.def("check_rw_by_const_ref",
[](const nb::ndarray<>& a) {
return check_ro</*expect_ro=*/false, /*is_shaped=*/false>(a);
});
m.def("check_ro_by_const_ref_ro",
[](const nb::ndarray<nb::ro>& a) {
return check_ro</*expect_ro=*/true, /*is_shaped=*/false>(a);
});
m.def("check_rw_by_const_ref_float64",
[](nb::ndarray<double, nb::ndim<1>> a) {
return check_ro</*expect_ro=*/false, /*is_shaped=*/true>(a);
});
m.def("check_ro_by_const_ref_const_float64",
[](const nb::ndarray<const double, nb::ndim<1>>& a) {
return check_ro</*expect_ro=*/true, /*is_shaped=*/true>(a);
});
m.def("check_rw_by_rvalue_ref",
[](nb::ndarray<>&& a) {
return check_ro</*expect_ro=*/false, /*is_shaped=*/false>(a);
});
m.def("check_ro_by_rvalue_ref_ro",
[](nb::ndarray<nb::ro>&& a) {
return check_ro</*expect_ro=*/true, /*is_shaped=*/false>(a);
});
m.def("check_rw_by_rvalue_ref_float64",
[](nb::ndarray<double, nb::ndim<1>>&& a) {
return check_ro</*expect_ro=*/false, /*is_shaped=*/true>(a);
});
m.def("check_ro_by_rvalue_ref_const_float64",
[](nb::ndarray<const double, nb::ndim<1>>&& a) {
return check_ro</*expect_ro=*/true, /*is_shaped=*/true>(a);
});
m.def("check_order", [](nb::ndarray<nb::c_contig>) -> char { return 'C'; });
m.def("check_order", [](nb::ndarray<nb::f_contig>) -> char { return 'F'; });
m.def("check_order", [](nb::ndarray<>) -> char { return '?'; });
m.def("make_contig", [](nb::ndarray<nb::c_contig> a) { return a; });
m.def("check_device", [](nb::ndarray<nb::device::cpu>) -> const char * { return "cpu"; });
m.def("check_device", [](nb::ndarray<nb::device::cuda>) -> const char * { return "cuda"; });
m.def("initialize",
[](nb::ndarray<unsigned char, nb::shape<10>, nb::device::cpu> &t) {
for (size_t i = 0; i < 10; ++i)
t(i) = (unsigned char) i;
});
m.def("initialize",
[](nb::ndarray<float, nb::shape<10>, nb::device::cpu> &t) {
for (size_t i = 0; i < 10; ++i)
t(i) = (float) i;
});
m.def("initialize",
[](nb::ndarray<float, nb::shape<10, -1>, nb::device::cpu> &t) {
int k = 0;
for (size_t i = 0; i < 10; ++i)
for (size_t j = 0; j < t.shape(1); ++j)
t(i, j) = (float) k++;
});
m.def(
"noimplicit",
[](nb::ndarray<float, nb::c_contig, nb::shape<2, 2>>) { return 0; },
"array"_a.noconvert());
m.def(
"implicit",
[](nb::ndarray<float, nb::c_contig, nb::shape<2, 2>>) { return 0; },
"array"_a);
m.def("inspect_ndarray", [](const nb::ndarray<>& ndarray) {
printf("Tensor data pointer : %p\n", ndarray.data());
printf("Tensor dimension : %zu\n", ndarray.ndim());
for (size_t i = 0; i < ndarray.ndim(); ++i) {
printf("Tensor dimension [%zu] : %zu\n", i, ndarray.shape(i));
printf("Tensor stride [%zu] : %zu\n", i, (size_t) ndarray.stride(i));
}
printf("Tensor is on CPU? %i\n", ndarray.device_type() == nb::device::cpu::value);
printf("Device ID = %u\n", ndarray.device_id());
printf("Tensor dtype check: int16=%i, uint32=%i, float32=%i complex64=%i\n",
ndarray.dtype() == nb::dtype<int16_t>(),
ndarray.dtype() == nb::dtype<uint32_t>(),
ndarray.dtype() == nb::dtype<float>(),
ndarray.dtype() == nb::dtype<std::complex<float>>()
);
});
m.def("process", [](nb::ndarray<uint8_t, nb::shape<-1, -1, 3>,
nb::c_contig, nb::device::cpu> ndarray) {
// Double brightness of the MxNx3 RGB image
for (size_t y = 0; y < ndarray.shape(0); ++y)
for (size_t x = 0; x < ndarray.shape(1); ++x)
for (size_t ch = 0; ch < 3; ++ch)
ndarray(y, x, ch) = (uint8_t) std::min(255, ndarray(y, x, ch) * 2);
});
m.def("destruct_count", []() { return destruct_count; });
m.def("return_no_framework", []() {
float *f = new float[8] { 1, 2, 3, 4, 5, 6, 7, 8 };
size_t shape[2] = { 2, 4 };
nb::capsule deleter(f, [](void *data) noexcept {
destruct_count++;
delete[] (float *) data;
});
return nb::ndarray<float, nb::shape<2, 4>>(f, 2, shape, deleter);
});
m.def("passthrough", [](nb::ndarray<> a) { return a; }, nb::rv_policy::none);
m.def("passthrough_copy", [](nb::ndarray<> a) { return a; }, nb::rv_policy::copy);
m.def("passthrough_arg_none", [](nb::ndarray<> a) { return a; },
nb::arg().none(), nb::rv_policy::none);
m.def("ret_numpy", []() {
float *f = new float[8] { 1, 2, 3, 4, 5, 6, 7, 8 };
size_t shape[2] = { 2, 4 };
nb::capsule deleter(f, [](void *data) noexcept {
destruct_count++;
delete[] (float *) data;
});
return nb::ndarray<nb::numpy, float, nb::shape<2, 4>>(f, 2, shape,
deleter);
});
m.def("ret_numpy_const_ref", []() {
size_t shape[2] = { 2, 4 };
return nb::ndarray<nb::numpy, const float, nb::shape<2, 4>, nb::c_contig>(f_global, 2, shape, nb::handle());
}, nb::rv_policy::reference);
m.def("ret_numpy_const_ref_f", []() {
size_t shape[2] = { 2, 4 };
return nb::ndarray<nb::numpy, const float, nb::shape<2, 4>, nb::f_contig>(f_global, 2, shape, nb::handle());
}, nb::rv_policy::reference);
m.def("ret_numpy_const", []() {
return nb::ndarray<nb::numpy, const float, nb::shape<2, 4>>(f_global, { 2, 4 }, nb::handle());
});
m.def("ret_pytorch", []() {
float *f = new float[8] { 1, 2, 3, 4, 5, 6, 7, 8 };
size_t shape[2] = { 2, 4 };
nb::capsule deleter(f, [](void *data) noexcept {
destruct_count++;
delete[] (float *) data;
});
return nb::ndarray<nb::pytorch, float, nb::shape<2, 4>>(f, 2, shape,
deleter);
});
m.def("ret_memview", []() {
double *d = new double[8] { 1, 2, 3, 4, 5, 6, 7, 8 };
size_t shape[2] = { 2, 4 };
nb::capsule deleter(d, [](void *data) noexcept {
destruct_count++;
delete[] (double *) data;
});
return nb::ndarray<nb::memview, double, nb::shape<2, 4>>(d, 2, shape,
deleter);
});
m.def("ret_array_api", []() {
double *d = new double[8] { 1, 2, 3, 4, 5, 6, 7, 8 };
size_t shape[2] = { 2, 4 };
nb::capsule deleter(d, [](void *data) noexcept {
destruct_count++;
delete[] (double *) data;
});
return nb::ndarray<nb::array_api, double, nb::shape<2, 4>>(d, 2, shape,
deleter);
});
m.def("ret_array_scalar", []() {
float* f = new float{ 1.0f };
nb::capsule deleter(f, [](void* data) noexcept {
destruct_count++;
delete (float *) data;
});
return nb::ndarray<nb::numpy, float>(f, 0, nullptr, deleter);
});
m.def("noop_3d_c_contig",
[](nb::ndarray<float, nb::ndim<3>, nb::c_contig>) { return; });
m.def("noop_2d_f_contig",
[](nb::ndarray<float, nb::ndim<2>, nb::f_contig>) { return; });
m.def("accept_rw", [](nb::ndarray<float, nb::shape<2>> a) { return a(0); });
m.def("accept_ro", [](nb::ndarray<const float, nb::shape<2>> a) { return a(0); });
m.def("check", [](nb::handle h) { return nb::ndarray_check(h); });
m.def("accept_np_both_true_contig_a",
[](nb::ndarray<float, nb::numpy, nb::shape<2, 1>, nb::any_contig> a) { return a(0, 0); });
m.def("accept_np_both_true_contig_c",
[](nb::ndarray<float, nb::numpy, nb::shape<2, 1>, nb::c_contig> a) { return a(0, 0); });
m.def("accept_np_both_true_contig_f",
[](nb::ndarray<float, nb::numpy, nb::shape<2, 1>, nb::f_contig> a) { return a(0, 0); });
struct Cls {
auto f1() { return nb::ndarray<nb::numpy, float>(data, { 10 }, nb::handle()); }
auto f2() { return nb::ndarray<nb::numpy, float>(data, { 10 }, nb::cast(this, nb::rv_policy::none)); }
auto f3(nb::handle owner) { return nb::ndarray<nb::numpy, float>(data, { 10 }, owner); }
~Cls() {
destruct_count++;
}
float data[10] { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 };
};
nb::class_<Cls>(m, "Cls")
.def(nb::init<>())
.def("f1", &Cls::f1)
.def("f2", &Cls::f2)
.def("f1_ri", &Cls::f1, nb::rv_policy::reference_internal)
.def("f2_ri", &Cls::f2, nb::rv_policy::reference_internal)
.def("f3_ri", &Cls::f3, nb::rv_policy::reference_internal);
m.def("fill_view_1", [](nb::ndarray<> x) {
if (x.ndim() == 2 && x.dtype() == nb::dtype<float>()) {
auto v = x.view<float, nb::ndim<2>>();
for (size_t i = 0; i < v.shape(0); i++)
for (size_t j = 0; j < v.shape(1); j++)
v(i, j) *= 2;
}
}, "x"_a.noconvert());
m.def("fill_view_2", [](nb::ndarray<float, nb::ndim<2>, nb::device::cpu> x) {
auto v = x.view();
for (size_t i = 0; i < v.shape(0); ++i)
for (size_t j = 0; j < v.shape(1); ++j)
v(i, j) = (float) (i * 10 + j);
}, "x"_a.noconvert());
m.def("fill_view_3", [](nb::ndarray<float, nb::shape<3, 4>, nb::c_contig, nb::device::cpu> x) {
auto v = x.view();
for (size_t i = 0; i < v.shape(0); ++i)
for (size_t j = 0; j < v.shape(1); ++j)
v(i, j) = (float) (i * 10 + j);
}, "x"_a.noconvert());
m.def("fill_view_4", [](nb::ndarray<float, nb::shape<3, 4>, nb::f_contig, nb::device::cpu> x) {
auto v = x.view();
for (size_t i = 0; i < v.shape(0); ++i)
for (size_t j = 0; j < v.shape(1); ++j)
v(i, j) = (float) (i * 10 + j);
}, "x"_a.noconvert());
m.def("fill_view_5", [](nb::ndarray<std::complex<float>, nb::shape<2, 2>, nb::c_contig, nb::device::cpu> x) {
auto v = x.view();
for (size_t i = 0; i < v.shape(0); ++i)
for (size_t j = 0; j < v.shape(1); ++j)
v(i, j) *= std::complex<float>(-1.0f, 2.0f);
}, "x"_a.noconvert());
m.def("fill_view_6", [](nb::ndarray<std::complex<float>, nb::shape<2, 2>, nb::c_contig, nb::device::cpu> x) {
auto v = x.view<nb::shape<4>>();
for (size_t i = 0; i < v.shape(0); ++i)
v(i) = -v(i);
}, "x"_a.noconvert());
#if defined(__aarch64__) || defined(__AVX512FP16__)
m.def("ret_numpy_half", []() {
_Float16 *f = new _Float16[8] { 1, 2, 3, 4, 5, 6, 7, 8 };
size_t shape[2] = { 2, 4 };
nb::capsule deleter(f, [](void *data) noexcept {
destruct_count++;
delete[] (_Float16*) data;
});
return nb::ndarray<nb::numpy, _Float16, nb::shape<2, 4>>(f, 2, shape,
deleter);
});
#endif
m.def("cast", [](bool b) -> nb::ndarray<nb::numpy> {
using Ret = nb::ndarray<nb::numpy>;
if (b)
return Ret(nb::ndarray<nb::numpy, float, nb::shape<>>(f_global, 0, nullptr, nb::handle()));
else
return Ret(nb::ndarray<nb::numpy, int, nb::shape<>>(i_global, 0, nullptr, nb::handle()));
});
// issue #365
m.def("set_item",
[](nb::ndarray<double, nb::ndim<1>, nb::c_contig> data, uint32_t) {
data(0) = 123;
});
m.def("set_item",
[](nb::ndarray<std::complex<double>, nb::ndim<1>, nb::c_contig> data, uint32_t) {
data(0) = 123;
});
// issue #709
m.def("test_implicit_conversion",
[](nb::ndarray<nb::ro, nb::c_contig, nb::device::cpu> arg) {
return arg;
},
nb::arg());
m.def("ret_infer_c",
[]() { return nb::ndarray<float, nb::shape<2, 4>, nb::numpy, nb::c_contig>(f_global); });
m.def("ret_infer_f",
[]() { return nb::ndarray<float, nb::shape<2, 4>, nb::numpy, nb::f_contig>(f_global); });
using Array = nb::ndarray<float, nb::numpy, nb::shape<4, 4>, nb::f_contig>;
struct Matrix4f {
float m[4][4];
Array data() { return Array(m); }
auto data_ref() { return Array(m).cast(nb::rv_policy::reference_internal, nb::find(this)); }
auto data_copy() { return Array(m).cast(nb::rv_policy::copy); }
};
nb::class_<Matrix4f>(m, "Matrix4f")
.def(nb::init<>())
.def("data", &Matrix4f::data, nb::rv_policy::reference_internal)
.def("data_ref", &Matrix4f::data_ref)
.def("data_copy", &Matrix4f::data_copy);
using Vector3f = nb::ndarray<float, nb::numpy, nb::shape<3>>;
m.def("ret_from_stack_1", []() {
float f[] { 1, 2, 3 };
return nb::cast(Vector3f(f));
});
m.def("ret_from_stack_2", []() {
float f[] { 1, 2, 3 };
return Vector3f(f).cast();
});
// Fix issue reported in discussion #930
struct Wrapper {
nb::ndarray<float> value;
static int tp_traverse(PyObject* self, visitproc visit, void* arg) {
Wrapper* w = nb::inst_ptr<Wrapper>(self);
nb::handle value = nb::find(w->value);
Py_VISIT(value.ptr());
Py_VISIT(Py_TYPE(self));
return 0;
}
static int tp_clear(PyObject* self) {
Wrapper* w = nb::inst_ptr<Wrapper>(self);
w->value = {};
return 0;
}
};
PyType_Slot wrapper_slots[] = {
{Py_tp_traverse, (void*)Wrapper::tp_traverse},
{Py_tp_clear, (void*)Wrapper::tp_clear},
{0, 0},
};
nb::class_<Wrapper>(m, "Wrapper", nb::type_slots(wrapper_slots))
.def(nb::init<nb::ndarray<float>>())
.def_rw("value", &Wrapper::value);
// Example from docs/ndarray.rst in section "Array libraries"
class MyArray {
double* d;
public:
MyArray() { d = new double[5] { 0.0, 1.0, 2.0, 3.0, 4.0 }; }
~MyArray() { delete[] d; }
double* data() const { return d; }
void mutate() { for (int i = 0; i < 5; ++i) d[i] += 0.5; }
};
nb::class_<MyArray>(m, "MyArray")
.def(nb::init<>())
.def("mutate", &MyArray::mutate)
.def("__dlpack__", [](nb::pointer_and_handle<MyArray> self,
nb::kwargs kwargs) {
using array_api_t = nb::ndarray<nb::array_api, double>;
nb::object aa = nb::cast(array_api_t(self.p->data(), {5}),
nb::rv_policy::reference_internal,
self.h);
return aa.attr("__dlpack__")(**kwargs);
})
.def("__dlpack_device__", [](nb::handle /*self*/) {
return std::make_pair(nb::device::cpu::value, 0);
})
.def("array_api", [](const MyArray& self) {
return nb::ndarray<nb::array_api, double>(self.data(), {5});
}, nb::rv_policy::reference_internal);
}
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
tests/test_ndarray.py | Python | import test_ndarray_ext as t
import pytest
import warnings
import importlib
from common import collect, skip_on_pypy
try:
import numpy as np
def needs_numpy(x):
return x
except:
needs_numpy = pytest.mark.skip(reason="NumPy is required")
try:
import torch
def needs_torch(x):
return x
except:
needs_torch = pytest.mark.skip(reason="PyTorch is required")
try:
import cupy as cp
def needs_cupy(x):
return x
except:
needs_cupy = pytest.mark.skip(reason="CuPy is required")
@needs_numpy
def test01_metadata():
a = np.zeros(shape=())
assert t.get_shape(a) == []
if hasattr(a, '__dlpack__'):
b = a.__dlpack__()
assert t.get_shape(b) == []
else:
b = None
with pytest.raises(TypeError) as excinfo:
# Capsule can only be consumed once
assert t.get_shape(b) == []
assert 'incompatible function arguments' in str(excinfo.value)
a = np.zeros(shape=(3, 4, 5), dtype=np.float64)
assert t.get_is_valid(a)
assert t.get_shape(a) == [3, 4, 5]
assert t.get_size(a) == 60
assert t.get_nbytes(a) == 60*8
assert t.get_itemsize(a) == 8
assert t.check_shape_ptr(a)
assert t.check_stride_ptr(a)
if hasattr(a, '__dlpack__'):
assert t.get_shape(a.__dlpack__()) == [3, 4, 5]
assert not t.check_float(np.array([1], dtype=np.bool_)) and \
not t.check_float(np.array([1], dtype=np.uint32)) and \
t.check_float(np.array([1], dtype=np.float32))
assert not t.check_bool(np.array([1], dtype=np.uint32)) and \
not t.check_bool(np.array([1], dtype=np.float32)) and \
t.check_bool(np.array([1], dtype=np.bool_))
assert not t.get_is_valid(None)
assert t.get_size(None) == 0
assert t.get_nbytes(None) == 0
assert t.get_itemsize(None) == 0
def test02_docstr():
assert t.pass_uint32.__doc__ == "pass_uint32(array: ndarray[dtype=uint32]) -> None"
assert t.get_shape.__doc__ == "get_shape(array: ndarray[writable=False]) -> list"
assert t.pass_float32.__doc__ == "pass_float32(array: ndarray[dtype=float32]) -> None"
assert t.pass_complex64.__doc__ == "pass_complex64(array: ndarray[dtype=complex64]) -> None"
assert t.pass_bool.__doc__ == "pass_bool(array: ndarray[dtype=bool]) -> None"
assert t.pass_float32_shaped.__doc__ == "pass_float32_shaped(array: ndarray[dtype=float32, shape=(3, *, 4)]) -> None"
assert t.pass_float32_shaped_ordered.__doc__ == "pass_float32_shaped_ordered(array: ndarray[dtype=float32, shape=(*, *, 4), order='C']) -> None"
assert t.check_device.__doc__ == ("check_device(arg: ndarray[device='cpu'], /) -> str\n"
"check_device(arg: ndarray[device='cuda'], /) -> str")
@needs_numpy
def test03_constrain_dtype():
a_u32 = np.array([1], dtype=np.uint32)
a_f32 = np.array([1], dtype=np.float32)
a_cf64 = np.array([1+1j], dtype=np.complex64)
a_bool = np.array([1], dtype=np.bool_)
t.pass_uint32(a_u32)
t.pass_float32(a_f32)
t.pass_complex64(a_cf64)
t.pass_complex64_const(a_cf64)
t.pass_bool(a_bool)
a_f32_const = a_f32.copy()
a_f32_const.flags.writeable = False
t.pass_float32_const(a_f32_const)
a_cf64_const = a_cf64.copy()
a_cf64_const.flags.writeable = False
t.pass_complex64_const(a_cf64_const)
with pytest.raises(TypeError) as excinfo:
t.pass_uint32(a_f32)
assert 'incompatible function arguments' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
t.pass_float32(a_u32)
assert 'incompatible function arguments' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
t.pass_complex64(a_u32)
assert 'incompatible function arguments' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
t.pass_bool(a_u32)
assert 'incompatible function arguments' in str(excinfo.value)
@needs_numpy
def test04_constrain_shape():
t.pass_float32_shaped(np.zeros((3, 0, 4), dtype=np.float32))
t.pass_float32_shaped(np.zeros((3, 5, 4), dtype=np.float32))
with pytest.raises(TypeError) as excinfo:
t.pass_float32_shaped(np.zeros((3, 5), dtype=np.float32))
with pytest.raises(TypeError) as excinfo:
t.pass_float32_shaped(np.zeros((2, 5, 4), dtype=np.float32))
with pytest.raises(TypeError) as excinfo:
t.pass_float32_shaped(np.zeros((3, 5, 6), dtype=np.float32))
with pytest.raises(TypeError) as excinfo:
t.pass_float32_shaped(np.zeros((3, 5, 4, 6), dtype=np.float32))
def test05_bytes():
a = bytearray(range(10))
assert t.get_is_valid(a)
assert t.get_shape(a) == [10]
assert t.get_size(a) == 10
assert t.get_nbytes(a) == 10
assert t.get_itemsize(a) == 1
assert t.check_order(a) == 'C'
b = b'hello' # immutable
assert t.get_is_valid(b)
assert t.get_shape(b) == [5]
@needs_numpy
def test06_constrain_order_numpy():
assert t.check_order(np.zeros((3, 5, 4, 6), order='C')) == 'C'
assert t.check_order(np.zeros((3, 5, 4, 6), order='F')) == 'F'
assert t.check_order(np.zeros((3, 5, 4, 6), order='C')[:, 2, :, :]) == '?'
assert t.check_order(np.zeros((3, 5, 4, 6), order='F')[:, 2, :, :]) == '?'
@needs_torch
@pytest.mark.filterwarnings
def test07_constrain_order_pytorch():
try:
c = torch.zeros(3, 5)
c.__dlpack__()
except:
pytest.skip('pytorch is missing')
f = c.t().contiguous().t()
assert t.check_order(c) == 'C'
assert t.check_order(f) == 'F'
assert t.check_order(c[:, 2:5]) == '?'
assert t.check_order(f[1:3, :]) == '?'
assert t.check_device(torch.zeros(3, 5)) == 'cpu'
if torch.cuda.is_available():
assert t.check_device(torch.zeros(3, 5, device='cuda')) == 'cuda'
def test08_write_bytes_from_cpp():
a = bytearray(10)
t.initialize(a)
assert a == bytearray(range(10))
b = b'helloHello' # ten immutable bytes
with pytest.raises(TypeError) as excinfo:
t.initialize(b)
assert 'incompatible function arguments' in str(excinfo.value)
@needs_numpy
def test09_write_numpy_from_cpp():
x = np.zeros(10, dtype=np.float32)
t.initialize(x)
assert np.all(x == np.arange(10, dtype=np.float32))
x = np.zeros((10, 3), dtype=np.float32)
t.initialize(x)
assert np.all(x == np.arange(30, dtype=np.float32).reshape(10, 3))
@needs_numpy
def test10_implicit_conversion():
t.implicit(np.zeros((2, 2), dtype=np.uint32))
t.implicit(np.zeros((2, 2, 10), dtype=np.float32)[:, :, 4])
t.implicit(np.zeros((2, 2, 10), dtype=np.uint32)[:, :, 4])
t.implicit(np.zeros((2, 2, 10), dtype=np.bool_)[:, :, 4])
with pytest.raises(TypeError) as excinfo:
t.noimplicit(np.zeros((2, 2), dtype=np.bool_))
with pytest.raises(TypeError) as excinfo:
t.noimplicit(np.zeros((2, 2), dtype=np.uint32))
with pytest.raises(TypeError) as excinfo:
t.noimplicit(np.zeros((2, 2, 10), dtype=np.float32)[:, :, 4])
@needs_torch
def test11_implicit_conversion_pytorch():
with warnings.catch_warnings():
warnings.simplefilter("ignore")
try:
c = torch.zeros(3, 5)
c.__dlpack__()
except:
pytest.skip('pytorch is missing')
t.implicit(torch.zeros(2, 2, dtype=torch.int32))
t.implicit(torch.zeros(2, 2, 10, dtype=torch.float32)[:, :, 4])
t.implicit(torch.zeros(2, 2, 10, dtype=torch.int32)[:, :, 4])
with pytest.raises(TypeError) as excinfo:
t.noimplicit(torch.zeros(2, 2, dtype=torch.int32))
with pytest.raises(TypeError) as excinfo:
t.noimplicit(torch.zeros(2, 2, 10, dtype=torch.float32)[:, :, 4])
@needs_numpy
def test12_process_image():
x = np.arange(120, dtype=np.ubyte).reshape(8, 5, 3)
t.process(x)
assert np.all(x == np.arange(0, 240, 2, dtype=np.ubyte).reshape(8, 5, 3))
def test13_destroy_capsule():
collect()
dc = t.destruct_count()
capsule = t.return_no_framework()
assert 'dltensor' in repr(capsule)
assert 'versioned' not in repr(capsule)
assert t.destruct_count() == dc
del capsule
collect()
assert t.destruct_count() - dc == 1
@needs_numpy
def test14_consume_numpy():
collect()
class wrapper:
def __init__(self, value):
self.value = value
def __dlpack__(self):
return self.value
dc = t.destruct_count()
capsule = t.return_no_framework()
if hasattr(np, '_from_dlpack'):
x = np._from_dlpack(wrapper(capsule))
elif hasattr(np, 'from_dlpack'):
x = np.from_dlpack(wrapper(capsule))
else:
pytest.skip('your version of numpy is too old')
del capsule
collect()
assert x.shape == (2, 4)
assert np.all(x == [[1, 2, 3, 4], [5, 6, 7, 8]])
assert t.destruct_count() == dc
del x
collect()
assert t.destruct_count() - dc == 1
@needs_numpy
def test15_passthrough_numpy():
a = t.ret_numpy()
b = t.passthrough(a)
assert a is b
a = np.array([1, 2, 3])
b = t.passthrough(a)
assert a is b
a = None
with pytest.raises(TypeError) as excinfo:
b = t.passthrough(a)
assert 'incompatible function arguments' in str(excinfo.value)
b = t.passthrough_arg_none(a)
assert a is b
@needs_torch
def test16_passthrough_torch():
a = t.ret_pytorch()
b = t.passthrough(a)
assert a is b
a = torch.tensor([1, 2, 3])
b = t.passthrough(a)
assert a is b
a = None
with pytest.raises(TypeError) as excinfo:
b = t.passthrough(a)
assert 'incompatible function arguments' in str(excinfo.value)
b = t.passthrough_arg_none(a)
assert a is b
@needs_numpy
def test17_return_numpy():
collect()
dc = t.destruct_count()
x = t.ret_numpy()
assert x.shape == (2, 4)
assert x.flags.writeable
assert np.all(x == [[1, 2, 3, 4], [5, 6, 7, 8]])
del x
collect()
assert t.destruct_count() - dc == 1
@needs_torch
def test18_return_pytorch():
try:
c = torch.zeros(3, 5)
except:
pytest.skip('pytorch is missing')
collect()
dc = t.destruct_count()
x = t.ret_pytorch()
assert x.shape == (2, 4)
assert torch.all(x == torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]]))
del x
collect()
assert t.destruct_count() - dc == 1
@skip_on_pypy
def test19_return_memview():
collect()
dc = t.destruct_count()
x = t.ret_memview()
assert isinstance(x, memoryview)
assert x.itemsize == 8
assert x.ndim == 2
assert x.shape == (2, 4)
assert x.strides == (32, 8) # in bytes
assert x.tolist() == [[1, 2, 3, 4], [5, 6, 7, 8]]
del x
collect()
assert t.destruct_count() - dc == 1
@needs_numpy
def test20_return_array_api():
collect()
dc = t.destruct_count()
obj = t.ret_array_api()
assert obj.__dlpack_device__() == (1, 0) # (type == CPU, id == 0)
capsule = obj.__dlpack__()
assert 'dltensor' in repr(capsule)
assert 'versioned' not in repr(capsule)
capsule = obj.__dlpack__(max_version=None)
assert 'dltensor' in repr(capsule)
assert 'versioned' not in repr(capsule)
capsule = obj.__dlpack__(max_version=(0, 0)) # (major == 0, minor == 0)
assert 'dltensor' in repr(capsule)
assert 'versioned' not in repr(capsule)
capsule = obj.__dlpack__(max_version=(1, 0)) # (major == 1, minor == 0)
assert 'dltensor_versioned' in repr(capsule)
with pytest.raises(TypeError) as excinfo:
capsule = obj.__dlpack__(0)
assert 'does not accept positional arguments' in str(excinfo.value)
del obj
collect()
assert t.destruct_count() == dc
del capsule
collect()
assert t.destruct_count() - dc == 1
dc += 1
obj = t.ret_array_api() # obj also supports the buffer protocol
mv = memoryview(obj)
assert mv.tolist() == [[1, 2, 3, 4], [5, 6, 7, 8]]
del obj
collect()
assert t.destruct_count() == dc
del mv
collect()
assert t.destruct_count() - dc == 1
dc += 1
if (hasattr(np, '__array_api_version__') and
np.__array_api_version__ >= '2024'):
obj = t.ret_array_api()
x = np.from_dlpack(obj)
del obj
collect()
assert t.destruct_count() == dc
assert x.shape == (2, 4)
assert x.flags.writeable
assert np.all(x == [[1, 2, 3, 4], [5, 6, 7, 8]])
del x
collect()
assert t.destruct_count() - dc == 1
@needs_numpy
def test21_return_array_scalar():
collect()
dc = t.destruct_count()
x = t.ret_array_scalar()
assert np.array_equal(x, np.array(1))
del x
collect()
assert t.destruct_count() - dc == 1
# See PR #162
@needs_torch
def test22_single_and_empty_dimension_pytorch():
a = torch.ones((1,100,1025), dtype=torch.float32)
t.noop_3d_c_contig(a)
a = torch.ones((100,1,1025), dtype=torch.float32)
t.noop_3d_c_contig(a)
a = torch.ones((0,100,1025), dtype=torch.float32)
t.noop_3d_c_contig(a)
a = torch.ones((100,0,1025), dtype=torch.float32)
t.noop_3d_c_contig(a)
a = torch.ones((100,1025,0), dtype=torch.float32)
t.noop_3d_c_contig(a)
a = torch.ones((100,0,0), dtype=torch.float32)
t.noop_3d_c_contig(a)
a = torch.ones((0,0,0), dtype=torch.float32)
t.noop_3d_c_contig(a)
# See PR #162
@needs_numpy
def test23_single_and_empty_dimension_numpy():
a = np.ones((1,100,1025), dtype=np.float32)
t.noop_3d_c_contig(a)
a = np.ones((100,1,1025), dtype=np.float32)
t.noop_3d_c_contig(a)
a = np.ones((0,100,1025), dtype=np.float32)
t.noop_3d_c_contig(a)
a = np.ones((100,0,1025), dtype=np.float32)
t.noop_3d_c_contig(a)
a = np.ones((100,1025,0), dtype=np.float32)
t.noop_3d_c_contig(a)
a = np.ones((100,0,0), dtype=np.float32)
t.noop_3d_c_contig(a)
a = np.ones((0,0,0), dtype=np.float32)
t.noop_3d_c_contig(a)
# See PR #162
@needs_torch
def test24_single_and_empty_dimension_fortran_order_pytorch():
# This idiom creates a pytorch 2D tensor in column major (aka, 'F') ordering
a = torch.ones((0,100), dtype=torch.float32).t().contiguous().t()
t.noop_2d_f_contig(a)
a = torch.ones((100,0), dtype=torch.float32).t().contiguous().t()
t.noop_2d_f_contig(a)
a = torch.ones((1,100), dtype=torch.float32).t().contiguous().t()
t.noop_2d_f_contig(a)
a = torch.ones((100,1), dtype=torch.float32).t().contiguous().t()
t.noop_2d_f_contig(a)
@needs_numpy
def test25_ro_array():
a = np.array([1, 2], dtype=np.float32)
assert t.accept_ro(a) == 1
assert t.accept_rw(a) == 1
a.setflags(write=False)
assert t.accept_ro(a) == 1
with pytest.raises(TypeError) as excinfo:
t.accept_rw(a)
assert 'incompatible function arguments' in str(excinfo.value)
@needs_numpy
def test26_return_ro():
x = t.ret_numpy_const_ref()
y = t.ret_numpy_const_ref_f()
assert t.ret_numpy_const_ref.__doc__ == 'ret_numpy_const_ref() -> numpy.ndarray[dtype=float32, shape=(2, 4), order=\'C\', writable=False]'
assert t.ret_numpy_const_ref_f.__doc__ == 'ret_numpy_const_ref_f() -> numpy.ndarray[dtype=float32, shape=(2, 4), order=\'F\', writable=False]'
assert x.shape == (2, 4)
assert y.shape == (2, 4)
assert not x.flags.writeable
assert not y.flags.writeable
assert np.all(x == [[1, 2, 3, 4], [5, 6, 7, 8]])
assert np.all(y == [[1, 3, 5, 7], [2, 4, 6, 8]])
with pytest.raises(ValueError) as excinfo:
x[0,0] =1
assert 'read-only' in str(excinfo.value)
with pytest.raises(ValueError) as excinfo:
y[0,0] =1
assert 'read-only' in str(excinfo.value)
def test27_python_array():
import array
a = array.array('d', [0, 0, 0, 3.14159, 0])
assert t.check(a)
assert t.check_rw_by_value(a)
assert a[1] == 1.414214
assert t.check_rw_by_value_float64(a)
assert a[2] == 2.718282
assert a[4] == 16.0
assert t.check_ro_by_value_ro(a)
assert t.check_ro_by_value_const_float64(a)
a[1] = 0.1
a[2] = 0.2
a[4] = 0.4
mv = memoryview(a)
assert t.check(mv)
assert t.check_rw_by_value(mv)
assert a[1] == 1.414214
assert t.check_rw_by_value_float64(mv)
assert a[2] == 2.718282
assert a[4] == 16.0
assert t.check_ro_by_value_ro(mv)
assert t.check_ro_by_value_const_float64(mv)
x = t.passthrough(a)
assert x is a
def test28_check_bytearray():
a = bytearray(b'xyz')
assert t.check(a)
mv = memoryview(a)
assert t.check(mv)
@needs_numpy
def test29_check_numpy():
assert t.check(np.zeros(1))
@needs_torch
def test30_check_torch():
assert t.check(torch.zeros((1)))
@needs_numpy
def test31_rv_policy():
def p(a):
return a.__array_interface__['data']
x1 = t.ret_numpy_const_ref()
x2 = t.ret_numpy_const_ref()
y1 = t.ret_numpy_const()
y2 = t.ret_numpy_const()
z1 = t.passthrough(y1)
z2 = t.passthrough(y2)
q1 = t.passthrough_copy(y1)
q2 = t.passthrough_copy(y2)
assert p(x1) == p(x2)
assert p(y1) != p(y2)
assert z1 is y1
assert z2 is y2
assert q1 is not y1
assert q2 is not y2
assert p(q1) != p(y1)
assert p(q2) != p(y2)
@needs_numpy
def test32_reference_internal():
collect()
dc = t.destruct_count()
c = t.Cls()
v1_a = c.f1()
v1_b = c.f1()
v2_a = c.f2()
v2_b = c.f2()
del c
assert np.all(v1_a == np.arange(10, dtype=np.float32))
assert np.all(v1_b == np.arange(10, dtype=np.float32))
v1_a += 1
v1_b += 2
assert np.all(v1_a == np.arange(10, dtype=np.float32) + 1)
assert np.all(v1_b == np.arange(10, dtype=np.float32) + 2)
del v1_a
del v1_b
assert np.all(v2_a == np.arange(10, dtype=np.float32))
assert np.all(v2_b == np.arange(10, dtype=np.float32))
v2_a += 1
v2_b += 2
assert np.all(v2_a == np.arange(10, dtype=np.float32) + 3)
assert np.all(v2_b == np.arange(10, dtype=np.float32) + 3)
del v2_a
collect()
assert t.destruct_count() == dc
del v2_b
collect()
dc += 1
assert t.destruct_count() == dc
for i in range(2):
c2 = t.Cls()
if i == 0:
v3_a = c2.f1_ri()
v3_b = c2.f1_ri()
else:
v3_a = c2.f2_ri()
v3_b = c2.f2_ri()
del c2
assert np.all(v3_a == np.arange(10, dtype=np.float32))
assert np.all(v3_b == np.arange(10, dtype=np.float32))
v3_a += 1
v3_b += 2
assert np.all(v3_a == np.arange(10, dtype=np.float32) + 3)
assert np.all(v3_b == np.arange(10, dtype=np.float32) + 3)
del v3_a
collect()
assert t.destruct_count() == dc
del v3_b
collect()
dc += 1
assert t.destruct_count() == dc
c3 = t.Cls()
c3_t = (c3,)
with pytest.raises(RuntimeError) as excinfo:
c3.f3_ri(c3_t)
msg = 'nanobind::detail::ndarray_export(): reference_internal policy cannot be applied (ndarray already has an owner)'
assert msg in str(excinfo.value)
@needs_numpy
def test33_force_contig_numpy():
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
b = t.make_contig(a)
assert b is a
a = a.T
b = t.make_contig(a)
assert b is not a
assert np.all(b == a)
@needs_torch
@pytest.mark.filterwarnings
def test34_force_contig_pytorch():
a = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
b = t.make_contig(a)
assert b is a
a = a.T
b = t.make_contig(a)
assert b is not a
assert torch.all(b == a)
@needs_numpy
def test35_view():
# 1
x1 = np.array([[1,2],[3,4]], dtype=np.float32)
x2 = np.array([[1,2],[3,4]], dtype=np.float64)
assert np.allclose(x1, x2)
t.fill_view_1(x1)
assert np.allclose(x1, x2*2)
t.fill_view_1(x2)
assert np.allclose(x1, x2*2)
# 2
x1 = np.zeros((3, 4), dtype=np.float32, order='C')
x2 = np.zeros((3, 4), dtype=np.float32, order='F')
t.fill_view_2(x1)
t.fill_view_2(x2)
x3 = np.zeros((3, 4), dtype=np.float32, order='C')
t.fill_view_3(x3)
x4 = np.zeros((3, 4), dtype=np.float32, order='F')
t.fill_view_4(x4)
assert np.all(x1 == x2) and np.all(x2 == x3) and np.all(x3 == x4)
# 3
x1 = np.array([[1+2j, 3+4j], [5+6j, 7+8j]], dtype=np.complex64)
x2 = x1 * 2
t.fill_view_1(x1.view(np.float32))
assert np.allclose(x1, x2)
x2 = x1 * (-1+2j)
t.fill_view_5(x1)
assert np.allclose(x1, x2)
x2 = -x2
t.fill_view_6(x1)
assert np.allclose(x1, x2)
@needs_numpy
def test36_half():
if not hasattr(t, 'ret_numpy_half'):
pytest.skip('half precision test is missing')
x = t.ret_numpy_half()
assert x.dtype == np.float16
assert x.shape == (2, 4)
assert np.all(x == [[1, 2, 3, 4], [5, 6, 7, 8]])
@needs_numpy
def test37_cast():
a = t.cast(False)
b = t.cast(True)
assert a.ndim == 0 and b.ndim == 0
assert a.dtype == np.int32 and b.dtype == np.float32
assert a == 1 and b == 1
@needs_numpy
def test38_complex_decompose():
x1 = np.array([1 + 2j, 3 + 4j, 5 + 6j], dtype=np.complex64)
assert np.all(x1.real == np.array([1, 3, 5], dtype=np.float32))
assert np.all(x1.imag == np.array([2, 4, 6], dtype=np.float32))
@needs_numpy
@pytest.mark.parametrize("variant", [1, 2])
def test_uint32_complex_do_not_convert(variant):
if variant == 1:
arg = 1
else:
arg = np.uint32(1)
data = np.array([1.0 + 2.0j, 3.0 + 4.0j])
t.set_item(data, arg)
data2 = np.array([123, 3.0 + 4.0j])
assert np.all(data == data2)
@needs_numpy
def test40_check_generic():
class DLPackWrapper:
def __init__(self, o):
self.o = o
def __dlpack__(self):
return self.o.__dlpack__()
arr = DLPackWrapper(np.zeros((1)))
assert t.check(arr)
@needs_numpy
def test41_noninteger_stride():
a = np.array([[1, 2, 3, 4, 0, 0], [5, 6, 7, 8, 0, 0]], dtype=np.float32)
s = a[:, 0:4] # slice
t.pass_float32(s)
assert t.get_stride(s, 0) == 6
assert t.get_stride(s, 1) == 1
try:
v = s.view(np.complex64)
except:
pytest.skip('your version of numpy is too old')
t.pass_complex64(v)
assert t.get_stride(v, 0) == 3
assert t.get_stride(v, 1) == 1
a = np.array([[1, 2, 3, 4, 0], [5, 6, 7, 8, 0]], dtype=np.float32)
s = a[:, 0:4] # slice
t.pass_float32(s)
assert t.get_stride(s, 0) == 5
assert t.get_stride(s, 1) == 1
v = s.view(np.complex64)
with pytest.raises(TypeError) as excinfo:
t.pass_complex64(v)
assert 'incompatible function arguments' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
t.get_stride(v, 0)
assert 'incompatible function arguments' in str(excinfo.value)
@needs_numpy
def test42_const_qualifiers_numpy():
a = np.array([0, 0, 0, 3.14159, 0], dtype=np.float64)
assert t.check_rw_by_value(a)
assert a[1] == 1.414214
assert t.check_rw_by_value_float64(a)
assert a[2] == 2.718282
assert a[4] == 16.0
assert t.check_ro_by_value_ro(a)
assert t.check_ro_by_value_const_float64(a)
a.setflags(write=False)
assert t.check_ro_by_value_ro(a)
assert t.check_ro_by_value_const_float64(a)
assert a[0] == 0.0
assert a[3] == 3.14159
a = np.array([0, 0, 0, 3.14159, 0], dtype=np.float64)
assert t.check_rw_by_const_ref(a)
assert a[1] == 1.414214
assert t.check_rw_by_const_ref_float64(a)
assert a[2] == 2.718282
assert a[4] == 16.0
assert t.check_ro_by_const_ref_ro(a)
assert t.check_ro_by_const_ref_const_float64(a)
a.setflags(write=False)
assert t.check_ro_by_const_ref_ro(a)
assert t.check_ro_by_const_ref_const_float64(a)
assert a[0] == 0.0
assert a[3] == 3.14159
a = np.array([0, 0, 0, 3.14159, 0], dtype=np.float64)
assert t.check_rw_by_rvalue_ref(a)
assert a[1] == 1.414214
assert t.check_rw_by_rvalue_ref_float64(a)
assert a[2] == 2.718282
assert a[4] == 16.0
assert t.check_ro_by_rvalue_ref_ro(a)
assert t.check_ro_by_rvalue_ref_const_float64(a)
a.setflags(write=False)
assert t.check_ro_by_rvalue_ref_ro(a)
assert t.check_ro_by_rvalue_ref_const_float64(a)
assert a[0] == 0.0
assert a[3] == 3.14159
@needs_torch
def test43_const_qualifiers_pytorch():
a = torch.tensor([0, 0, 0, 3.14159, 0], dtype=torch.float64)
assert t.check_rw_by_value(a)
assert a[1] == 1.414214
assert t.check_rw_by_value_float64(a)
assert a[2] == 2.718282
assert a[4] == 16.0
assert t.check_ro_by_value_ro(a)
assert t.check_ro_by_value_const_float64(a)
assert a[0] == 0.0
assert a[3] == 3.14159
a = torch.tensor([0, 0, 0, 3.14159, 0], dtype=torch.float64)
assert t.check_rw_by_const_ref(a)
assert a[1] == 1.414214
assert t.check_rw_by_const_ref_float64(a)
assert a[2] == 2.718282
assert a[4] == 16.0
assert t.check_ro_by_const_ref_ro(a)
assert t.check_ro_by_const_ref_const_float64(a)
assert a[0] == 0.0
assert a[3] == 3.14159
a = torch.tensor([0, 0, 0, 3.14159, 0], dtype=torch.float64)
assert t.check_rw_by_rvalue_ref(a)
assert a[1] == 1.414214
assert t.check_rw_by_rvalue_ref_float64(a)
assert a[2] == 2.718282
assert a[4] == 16.0
assert t.check_ro_by_rvalue_ref_ro(a)
assert t.check_ro_by_rvalue_ref_const_float64(a)
assert a[0] == 0.0
assert a[3] == 3.14159
@needs_cupy
@pytest.mark.filterwarnings
def test44_constrain_order_cupy():
try:
c = cp.zeros((3, 5))
c.__dlpack__()
except:
pytest.skip('cupy is missing')
f = cp.asarray(c, order="F")
assert t.check_order(c) == 'C'
assert t.check_order(f) == 'F'
assert t.check_order(c[:, 2:5]) == '?'
assert t.check_order(f[1:3, :]) == '?'
assert t.check_device(cp.zeros((3, 5))) == 'cuda'
@needs_cupy
def test45_implicit_conversion_cupy():
with warnings.catch_warnings():
warnings.simplefilter("ignore")
try:
c = cp.zeros((3, 5))
except:
pytest.skip('cupy is missing')
t.implicit(cp.zeros((2, 2), dtype=cp.int32))
t.implicit(cp.zeros((2, 2, 10), dtype=cp.float32)[:, :, 4])
t.implicit(cp.zeros((2, 2, 10), dtype=cp.int32)[:, :, 4])
t.implicit(cp.zeros((2, 2, 10), dtype=cp.bool_)[:, :, 4])
with pytest.raises(TypeError) as excinfo:
t.noimplicit(cp.zeros((2, 2), dtype=cp.int32))
with pytest.raises(TypeError) as excinfo:
t.noimplicit(cp.zeros((2, 2), dtype=cp.uint8))
@needs_numpy
def test46_implicit_conversion_contiguous_complex():
# Test fix for issue #709
c_f32 = np.random.rand(10, 10)
c_c64 = c_f32.astype(np.complex64)
assert c_f32.flags['C_CONTIGUOUS']
assert c_c64.flags['C_CONTIGUOUS']
def test_conv(x):
y = t.test_implicit_conversion(x)
assert np.all(x == y)
assert y.flags['C_CONTIGUOUS']
test_conv(c_f32)
test_conv(c_c64)
nc_f32 = c_f32.T
nc_c64 = c_c64.T
assert not nc_f32.flags['C_CONTIGUOUS']
assert not nc_c64.flags['C_CONTIGUOUS']
test_conv(nc_f32)
test_conv(nc_c64)
@needs_numpy
def test_47_ret_infer():
assert np.all(t.ret_infer_c() == [[1, 2, 3, 4], [5, 6, 7, 8]])
assert np.all(t.ret_infer_f() == [[1, 3, 5, 7], [2, 4, 6, 8]])
@needs_numpy
def test48_test_matrix4f():
a = t.Matrix4f()
ad = a.data()
bd = a.data()
for i in range(16):
ad[i%4, i//4] = i
del a, ad
for i in range(16):
assert bd[i%4, i//4] == i
@needs_numpy
def test49_test_matrix4f_ref():
assert t.Matrix4f.data_ref.__doc__.replace('data_ref', 'data') == t.Matrix4f.data.__doc__
a = t.Matrix4f()
ad = a.data_ref()
bd = a.data_ref()
for i in range(16):
ad[i%4, i//4] = i
del a, ad
for i in range(16):
assert bd[i%4, i//4] == i
@needs_numpy
def test50_test_matrix4f_copy():
assert t.Matrix4f.data_ref.__doc__.replace('data_ref', 'data') == t.Matrix4f.data.__doc__
a = t.Matrix4f()
ad = a.data_ref()
for i in range(16):
ad[i%4, i//4] = i
bd = a.data_copy()
for i in range(16):
ad[i%4, i//4] = 0
del a, ad
for i in range(16):
assert bd[i%4, i//4] == i
@needs_numpy
def test51_return_from_stack():
assert np.all(t.ret_from_stack_1() == [1,2,3])
assert np.all(t.ret_from_stack_2() == [1,2,3])
@needs_numpy
def test52_accept_np_both_true_contig():
a = np.zeros((2, 1), dtype=np.float32)
assert a.flags['C_CONTIGUOUS'] and a.flags['F_CONTIGUOUS']
t.accept_np_both_true_contig_a(a)
t.accept_np_both_true_contig_c(a)
t.accept_np_both_true_contig_f(a)
@needs_numpy
def test53_issue_930():
wrapper = t.Wrapper(np.ones(3, dtype=np.float32))
assert wrapper.value[0] == 1
@needs_numpy
def test54_docs_example():
ma = t.MyArray()
aa = ma.array_api()
assert 'versioned' not in repr(aa.__dlpack__())
assert 'versioned' not in repr(ma.__dlpack__())
assert 'versioned' in repr(aa.__dlpack__(max_version=(1, 2)))
assert 'versioned' in repr(ma.__dlpack__(max_version=(1, 2)))
assert aa.__dlpack_device__() == (1, 0)
assert ma.__dlpack_device__() == (1, 0)
if hasattr(np, 'from_dlpack'):
x = np.from_dlpack(aa)
y = np.from_dlpack(ma)
assert np.all(x == [0.0, 1.0, 2.0, 3.0, 4.0])
assert np.all(y == [0.0, 1.0, 2.0, 3.0, 4.0])
ma.mutate()
assert np.all(x == [0.5, 1.5, 2.5, 3.5, 4.5])
assert np.all(y == [0.5, 1.5, 2.5, 3.5, 4.5])
else:
pytest.skip('your version of numpy is too old')
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
tests/test_specialization.py | Python | import sys
import sysconfig
import dis
import pytest
# Note: these tests verify that CPython's adaptive specializing interpreter can
# optimize various expressions involving nanobind types. They are expected to
# be somewhat fragile across Python versions as the bytecode and specialization
# opcodes may change.
# Skip tests on PyPy and free-threaded Python
skip_tests = sys.implementation.name == "pypy" or \
sysconfig.get_config_var("Py_GIL_DISABLED")
import test_classes_ext as t
def disasm(func):
"""Extract specialized opcode names from a function"""
instructions = list(dis.get_instructions(func, adaptive=True))
return [(instr.opname, instr.argval) for instr in instructions]
def warmup(fn):
# Call the function a few times to ensure that it is specialized
for _ in range(8):
fn()
def count_op(ops, expected):
hits = 0
for opname, _ in ops:
if opname == expected:
hits += 1
return hits
@pytest.mark.skipif(
sys.version_info < (3, 14) or skip_tests,
reason="Static attribute specialization requires CPython 3.14+")
def test_static_attribute_specialization():
s = t.Struct
def fn():
return s.static_test
ops = disasm(fn)
print(ops)
op_base = count_op(ops, "LOAD_ATTR")
op_opt = (
count_op(ops, "LOAD_ATTR_ADAPTIVE") +
count_op(ops, "LOAD_ATTR_CLASS"))
assert op_base == 1 and op_opt == 0
warmup(fn)
ops = disasm(fn)
print(ops)
op_base = count_op(ops, "LOAD_ATTR")
op_opt = (
count_op(ops, "LOAD_ATTR_ADAPTIVE") +
count_op(ops, "LOAD_ATTR_CLASS"))
assert op_base == 0 and op_opt == 1
@pytest.mark.skipif(
sys.version_info < (3, 11) or skip_tests,
reason="Method call specialization requires CPython 3.14+")
def test_method_call_specialization():
s = t.Struct()
def fn():
return s.value()
ops = disasm(fn)
op_base = (
count_op(ops, "LOAD_METHOD") +
count_op(ops, "LOAD_ATTR"))
op_opt = (
count_op(ops, "LOAD_ATTR_METHOD_NO_DICT") +
count_op(ops, "CALL_ADAPTIVE"))
print(ops)
assert op_base == 1 and op_opt == 0
warmup(fn)
ops = disasm(fn)
print(ops)
op_base = (
count_op(ops, "LOAD_METHOD") +
count_op(ops, "LOAD_ATTR"))
op_opt = (
count_op(ops, "LOAD_ATTR_METHOD_NO_DICT") +
count_op(ops, "CALL_ADAPTIVE"))
assert op_base == 0 and op_opt == 1
@pytest.mark.skipif(sys.version_info < (3, 11) or skip_tests,
reason="Immutability requires Python 3.11+")
def test_immutability():
# Test nb_method immutability
method = t.Struct.value
method_type = type(method)
assert method_type.__name__ == "nb_method"
with pytest.raises(TypeError, match="immutable"):
method_type.test_attr = 123
# Test metaclass immutability
metaclass = type(t.Struct)
assert metaclass.__name__.startswith("nb_type")
with pytest.raises(TypeError, match="immutable"):
metaclass.test_attr = 123
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
tests/test_stl.cpp | C++ | #include <nanobind/stl/tuple.h>
#include <nanobind/stl/pair.h>
#include <nanobind/stl/vector.h>
#include <nanobind/stl/function.h>
#include <nanobind/stl/list.h>
#include <nanobind/stl/string.h>
#include <nanobind/stl/string_view.h>
#include <nanobind/stl/optional.h>
#include <nanobind/stl/variant.h>
#include <nanobind/stl/map.h>
#include <nanobind/stl/array.h>
#include <nanobind/stl/unordered_set.h>
#include <nanobind/stl/set.h>
#include <nanobind/stl/filesystem.h>
#include <nanobind/stl/complex.h>
#include <nanobind/stl/wstring.h>
NB_MAKE_OPAQUE(std::vector<float, std::allocator<float>>)
namespace nb = nanobind;
static_assert(nb::detail::has_arg_defaults_v<std::optional<bool>>);
static int default_constructed = 0, value_constructed = 0, copy_constructed = 0,
move_constructed = 0, copy_assigned = 0, move_assigned = 0,
destructed = 0;
struct Movable {
int value = 5;
Movable() { default_constructed++; }
Movable(int value) : value(value) { value_constructed++; }
Movable(const Movable &s) : value(s.value) { copy_constructed++; }
Movable(Movable &&s) noexcept : value(s.value) { s.value = 0; move_constructed++; }
Movable &operator=(const Movable &s) { value = s.value; copy_assigned++; return *this; }
Movable &operator=(Movable &&s) noexcept { std::swap(value, s.value); move_assigned++; return *this; }
~Movable() { destructed++; }
};
struct Copyable {
int value = 5;
Copyable() { default_constructed++; }
Copyable(int value) : value(value) { value_constructed++; }
Copyable(const Copyable &s) : value(s.value) { copy_constructed++; }
Copyable &operator=(const Copyable &s) { value = s.value; copy_assigned++; return *this; }
~Copyable() { destructed++; }
};
struct NonAssignable {
int value = 5;
NonAssignable() = default;
NonAssignable(const NonAssignable &x) : value(x.value) { }
NonAssignable &operator=(const NonAssignable &) = delete;
};
struct NonDefaultConstructible : Movable {
NonDefaultConstructible(int v) : Movable(v) {}
};
struct StructWithReadonlyMap {
std::map<std::string, uint64_t> map;
};
struct FuncWrapper {
std::function<void(void)> f;
static int alive;
FuncWrapper() { alive++; }
~FuncWrapper() { alive--; }
};
int funcwrapper_tp_traverse(PyObject *self, visitproc visit, void *arg) {
Py_VISIT(Py_TYPE(self));
if (!nb::inst_ready(self)) {
return 0;
}
FuncWrapper *w = nb::inst_ptr<FuncWrapper>(self);
nb::handle f = nb::cast(w->f, nb::rv_policy::none);
Py_VISIT(f.ptr());
return 0;
}
int FuncWrapper::alive = 0;
void fail() { throw std::exception(); }
NB_MODULE(test_stl_ext, m) {
m.def("stats", []{
nb::dict d;
d["default_constructed"] = default_constructed;
d["value_constructed"] = value_constructed;
d["copy_constructed"] = copy_constructed;
d["move_constructed"] = move_constructed;
d["copy_assigned"] = copy_assigned;
d["move_assigned"] = move_assigned;
d["destructed"] = destructed;
return d;
});
m.def("reset", []() {
default_constructed = 0;
value_constructed = 0;
copy_constructed = 0;
move_constructed = 0;
copy_assigned = 0;
move_assigned = 0;
destructed = 0;
});
nb::class_<Movable>(m, "Movable")
.def(nb::init<>())
.def(nb::init<int>())
.def_rw("value", &Movable::value);
nb::class_<Copyable>(m, "Copyable")
.def(nb::init<>())
.def(nb::init<int>())
.def_rw("value", &Copyable::value);
nb::class_<NonAssignable>(m, "NonAssignable")
.def(nb::init<>())
.def_rw("value", &NonAssignable::value);
nb::class_<NonDefaultConstructible>(m, "NonDefaultConstructible")
.def(nb::init<int>())
.def_rw("value", &NonDefaultConstructible::value);
nb::class_<StructWithReadonlyMap>(m, "StructWithReadonlyMap")
.def(nb::init<>())
.def_ro("map", &StructWithReadonlyMap::map);
// ----- test01-test12 ------
m.def("return_movable", []() { return Movable(); });
m.def("return_movable_ptr", []() { return new Movable(); });
m.def("movable_in_value", [](Movable m) { if (m.value != 5) fail(); });
m.def("movable_in_lvalue_ref", [](Movable &m) { if (m.value != 5) fail(); });
m.def("movable_in_rvalue_ref", [](Movable &&m) { Movable x(std::move(m)); if (x.value != 5) fail(); });
m.def("movable_in_ptr", [](Movable *m) { if (m->value != 5) fail(); });
m.def("return_copyable", []() { return Copyable(); });
m.def("return_copyable_ptr", []() { return new Copyable(); });
m.def("copyable_in_value", [](Copyable m) { if (m.value != 5) fail(); });
m.def("copyable_in_lvalue_ref", [](Copyable &m) { if (m.value != 5) fail(); });
m.def("copyable_in_rvalue_ref", [](Copyable &&m) { Copyable x(m); if (x.value != 5) fail(); });
m.def("copyable_in_ptr", [](Copyable *m) { if (m->value != 5) fail(); });
// ----- test13-test20 ------
m.def("tuple_return_movable", []() { return std::make_tuple(Movable()); });
m.def("tuple_return_movable_ptr", []() { return std::make_tuple(new Movable()); });
m.def("tuple_movable_in_value", [](std::tuple<Movable> m) { if (std::get<0>(m).value != 5) fail(); });
m.def("tuple_movable_in_lvalue_ref", [](std::tuple<Movable&> m) { if (std::get<0>(m).value != 5) fail(); });
m.def("tuple_movable_in_lvalue_ref_2", [](const std::tuple<Movable> &m) { if (std::get<0>(m).value != 5) fail(); });
m.def("tuple_movable_in_rvalue_ref", [](std::tuple<Movable&&> m) { Movable x(std::move(std::get<0>(m))); if (x.value != 5) fail(); });
m.def("tuple_movable_in_rvalue_ref_2", [](std::tuple<Movable> &&m) { Movable x(std::move(std::get<0>(m))); if (x.value != 5) fail(); });
m.def("tuple_movable_in_ptr", [](std::tuple<Movable*> m) { if (std::get<0>(m)->value != 5) fail(); });
// ----- test21 ------
m.def("empty_tuple", [](std::tuple<>) { return std::tuple<>(); });
m.def("swap_tuple", [](const std::tuple<int, float> &v) {
return std::tuple<float, int>(std::get<1>(v), std::get<0>(v));
});
m.def("swap_pair", [](const std::pair<int, float> &v) {
return std::pair<float, int>(std::get<1>(v), std::get<0>(v));
});
// ----- test22 ------
m.def("vec_return_movable", [](){
std::vector<Movable> x;
x.reserve(10);
for (int i = 0; i< 10; ++i)
x.emplace_back(i);
return x;
});
m.def("vec_return_copyable", [](){
std::vector<Copyable> x;
x.reserve(10);
for (int i = 0; i < 10; ++i) {
Copyable c(i);
x.push_back(c);
}
return x;
});
m.def("vec_movable_in_value", [](std::vector<Movable> x) {
if (x.size() != 10)
fail();
for (int i = 0; i< 10; ++i)
if (x[i].value != i)
fail();
});
m.def("vec_copyable_in_value", [](std::vector<Copyable> x) {
if (x.size() != 10)
fail();
for (int i = 0; i< 10; ++i)
if (x[i].value != i)
fail();
});
m.def("vec_movable_in_lvalue_ref", [](std::vector<Movable> &x) {
if (x.size() != 10)
fail();
for (int i = 0; i< 10; ++i)
if (x[i].value != i)
fail();
});
m.def("vec_movable_in_rvalue_ref", [](std::vector<Movable> &&x) {
if (x.size() != 10)
fail();
for (int i = 0; i< 10; ++i)
if (x[i].value != i)
fail();
});
m.def("vec_movable_in_ptr_2", [](std::vector<Movable *> x) {
if (x.size() != 10)
fail();
for (int i = 0; i< 10; ++i)
if (x[i]->value != i)
fail();
});
// ----- test29 ------
using fvec = std::vector<float, std::allocator<float>>;
nb::class_<fvec>(m, "float_vec")
.def(nb::init<>())
.def("push_back", [](fvec *fv, float f) { fv->push_back(f); })
.def("size", [](const fvec &fv) { return fv.size(); });
// ----- test30 ------
m.def("return_empty_function", []() -> std::function<int(int)> {
return {};
});
m.def("return_function", []() -> std::function<int(int)> {
int k = 5;
return [k](int l) { return k + l; };
});
m.def("call_function", [](std::function<int(int)> &f, int x) {
return f(x);
});
m.def("return_void_function", [](std::function<void(void)> &f) {
return f;
});
m.def("identity_list", [](std::list<int> &x) { return x; });
PyType_Slot slots[] = {
{ Py_tp_traverse, (void *) funcwrapper_tp_traverse },
{ 0, 0 }
};
nb::class_<FuncWrapper>(m, "FuncWrapper", nb::type_slots(slots))
.def(nb::init<>())
.def_rw("f", &FuncWrapper::f)
.def_ro_static("alive", &FuncWrapper::alive, "static read-only property");
// ----- test35 ------
m.def("identity_string", [](std::string& x) { return x; });
m.def("identity_string_view", [](std::string_view& x) { return x; });
// ----- test36-test42 ------
m.def("optional_copyable", [](std::optional<Copyable> &) {}, nb::arg("x").none());
m.def("optional_copyable_ptr", [](std::optional<Copyable *> &) {}, nb::arg("x").none());
m.def("optional_none", [](std::optional<Copyable> &x) { if(x) fail(); }, nb::arg("x").none());
m.def("optional_ret_opt_movable", []() { return std::optional<Movable>(Movable()); });
m.def("optional_ret_opt_movable_ptr", []() { return new std::optional<Movable *>(new Movable()); });
m.def("optional_ret_opt_none", []() { return std::optional<Movable>(); });
m.def("optional_unbound_type", [](std::optional<int> &x) { return x; }, nb::arg("x") = nb::none());
m.def("optional_unbound_type_with_nullopt_as_default", [](std::optional<int> &x) { return x; }, nb::arg("x") = std::nullopt);
m.def("optional_non_assignable", [](std::optional<NonAssignable> &x) { return x; });
// ----- test43-test50 ------
m.def("variant_copyable", [](std::variant<Copyable, int> &) {});
m.def("variant_copyable_none", [](std::variant<std::monostate, int, Copyable> &) {}, nb::arg("x").none());
m.def("variant_copyable_ptr", [](std::variant<Copyable *, int> &) {});
m.def("variant_copyable_ptr_none", [](std::variant<Copyable *, int> &) {}, nb::arg("x").none());
m.def("variant_ret_var_copyable", []() { return std::variant<Copyable, int>(); });
m.def("variant_ret_var_none", []() { return std::variant<std::monostate, Copyable, int>(); });
m.def("variant_unbound_type", [](std::variant<std::monostate, nb::list, nb::tuple, int> &x) { return x; },
nb::arg("x") = nb::none());
m.def("variant_nondefault",
[](std::variant<NonDefaultConstructible, int> v) {
return v.index() == 0 ? std::get<0>(v).value : -std::get<1>(v);
});
// ----- test50-test57 ------
m.def("map_return_movable_value", [](){
std::map<std::string, Movable> x;
for (int i = 0; i < 10; ++i)
x.emplace(std::string(1, (char) ('a' + i)), i);
return x;
});
m.def("map_return_copyable_value", [](){
std::map<std::string, Copyable> x;
for (int i = 0; i < 10; ++i) {
Copyable c(i);
x.insert({std::string(1, (char) ('a' + i)), c});
}
return x;
});
m.def("map_movable_in_value", [](std::map<std::string, Movable> x) {
if (x.size() != 10) fail();
for (int i = 0; i < 10; ++i) {
std::string key(1, (char) ('a' + i));
if (x.find(key) == x.end()) fail();
if (x[key].value != i) fail();
}
}, nb::arg("x"));
m.def("map_copyable_in_value", [](std::map<std::string, Copyable> x) {
if (x.size() != 10) fail();
for (int i = 0; i < 10; ++i) {
std::string key(1, (char) ('a' + i));
if (x.find(key) == x.end()) fail();
if (x[key].value != i) fail();
}
}, nb::arg("x"));
m.def("map_movable_in_lvalue_ref", [](std::map<std::string, Movable> &x) {
if (x.size() != 10) fail();
for (int i = 0; i < 10; ++i) {
std::string key(1, (char) ('a' + i));
if (x.find(key) == x.end()) fail();
if (x[key].value != i) fail();
}
}, nb::arg("x"));
m.def("map_movable_in_rvalue_ref", [](std::map<std::string, Movable> &&x) {
if (x.size() != 10) fail();
for (int i = 0; i < 10; ++i) {
std::string key(1, (char) ('a' + i));
if (x.find(key) == x.end()) fail();
if (x[key].value != i) fail();
}
}, nb::arg("x"));
m.def("map_movable_in_ptr", [](std::map<std::string, Movable *> x) {
if (x.size() != 10) fail();
for (int i = 0; i < 10; ++i) {
std::string key(1, (char) ('a' + i));
if (x.find(key) == x.end()) fail();
if (x[key]->value != i) fail();
}
}, nb::arg("x"));
m.def("map_return_readonly_value", [](){
StructWithReadonlyMap x;
for (int i = 0; i < 10; ++i) {
x.map.insert({std::string(1, (char) ('a' + i)), i});
}
return x;
});
// test58
m.def("array_out", [](){ return std::array<int, 3>{1, 2, 3}; });
m.def("array_in", [](std::array<int, 3> x) { return x[0] + x[1] + x[2]; });
// ----- test60-test64 ------
m.def("set_return_value", []() {
std::set<std::string> x;
for (int i = 0; i < 10; ++i)
x.emplace(std::string(1, (char) ('a' + i)));
return x;
});
m.def("unordered_set_return_value", []() {
std::unordered_set<std::string> x;
for (int i = 0; i < 10; ++i)
x.emplace(std::string(1, (char) ('a' + i)));
return x;
});
m.def("set_in_value", [](std::set<std::string> x) {
if (x.size() != 10)
fail();
for (int i = 0; i < 10; ++i) {
std::string key(1, (char) ('a' + i));
if (x.find(key) == x.end())
fail();
}
},
nb::arg("x"));
m.def(
"unordered_set_in_value", [](std::unordered_set<std::string> x) {
if (x.size() != 10)
fail();
for (int i = 0; i < 10; ++i) {
std::string key(1, (char) ('a' + i));
if (x.find(key) == x.end())
fail();
}
},
nb::arg("x"));
m.def(
"set_in_lvalue_ref", [](std::set<std::string>& x) {
if (x.size() != 10)
fail();
for (int i = 0; i < 10; ++i) {
std::string key(1, (char) ('a' + i));
if (x.find(key) == x.end())
fail();
}
},
nb::arg("x"));
m.def(
"set_in_rvalue_ref", [](std::set<std::string>&& x) {
if (x.size() != 10)
fail();
for (int i = 0; i < 10; ++i) {
std::string key(1, (char) ('a' + i));
if (x.find(key) == x.end())
fail();
}
},
nb::arg("x"));
// std::filesystem incomplete on GCC 8
#if !(defined(__GNUC__) && !defined(__clang__) && __GNUC__ <= 8)
// test66
m.def("replace_extension", [](std::filesystem::path p, std::string ext) {
return p.replace_extension(ext);
});
m.def("parent_path", [](const std::filesystem::path &p) { return p.parent_path(); });
#endif
struct ClassWithMovableField {
std::vector<Movable> movable;
};
nb::class_<ClassWithMovableField>(m, "ClassWithMovableField")
.def(nb::init<>())
.def_rw("movable", &ClassWithMovableField::movable);
// test67 std::vector<bool>
m.def("flip_vector_bool", [](std::vector<bool> vec) {
vec.flip();
return vec;
});
m.def("complex_value_float", [](const std::complex<float>& x) {
return x;
});
m.def("complex_value_float_nc", [](const std::complex<float>& x) {
return x;
}, nb::arg().noconvert());
m.def("complex_value_double", [](const std::complex<double>& x) {
return x;
});
m.def("complex_value_double_nc", [](const std::complex<double>& x) {
return x;
}, nb::arg().noconvert());
m.def("complex_array_float", [](const std::vector<std::complex<float>>& x) {
return x;
});
m.def("complex_array_double", [](const std::vector<std::complex<double>>& x) {
return x;
});
m.def("vector_str", [](const std::vector<std::string>& x){
return x;
});
m.def("vector_str", [](std::string& x){
return x;
});
m.def("vector_optional_str", [](const std::vector<std::optional<std::string>>& x) {
return x;
});
m.def("pass_wstr", [](std::wstring ws) { return ws; });
// uncomment to see compiler error:
// m.def("optional_intptr", [](std::optional<int*>) {});
m.def("optional_cstr", [](std::optional<const char*> arg) {
return arg.value_or("none");
}, nb::arg().none());
// test74
struct BasicID1 {
uint64_t id;
BasicID1(uint64_t id) : id(id) {}
};
struct BasicID2 {
uint64_t id;
BasicID2(uint64_t id) : id(id) {}
};
nb::class_<BasicID1>(m, "BasicID1")
.def(nb::init<uint64_t>())
.def("__int__", [](const BasicID1& x) { return x.id; })
;
nb::class_<BasicID2>(m, "BasicID2")
.def(nb::init_implicit<uint64_t>());
using IDVariants = std::variant<std::monostate, BasicID2, BasicID1>;
struct IDHavingEvent {
IDVariants id;
IDHavingEvent() = default;
};
nb::class_<IDHavingEvent>(m, "IDHavingEvent")
.def(nb::init<>())
.def_rw("id", &IDHavingEvent::id);
}
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
tests/test_stl.py | Python | import test_stl_ext as t
import typing
import pytest
import sys
from common import collect, skip_on_pypy
def optional(arg: str, /) -> str:
if sys.version_info < (3, 10):
return "typing.Optional[" + arg + "]"
else:
return arg + " | " + "None"
def union(*args: str) -> str:
if sys.version_info < (3, 10):
return "typing.Union[" + ", ".join(args) + "]"
else:
return " | ".join(args)
@pytest.fixture
def clean():
collect()
t.reset()
def assert_stats(**kwargs):
collect()
for k, v in t.stats().items():
fail = False
if k in kwargs:
if v != kwargs[k]:
fail = True
elif v != 0:
fail = True
if fail:
raise Exception(f"Mismatch for key {k}: {t.stats()}")
# ------------------------------------------------------------------
# The following aren't strictly STL tests, but they are helpful in
# ensuring that move constructors/copy constructors of bound C++ types
# are properly triggered, which the STL type casters depend on.
# ------------------------------------------------------------------
def test01_movable_return(clean):
assert t.return_movable().value == 5
assert_stats(default_constructed=1, move_constructed=1, destructed=2)
def test02_movable_return_ptr(clean):
assert t.return_movable_ptr().value == 5
assert_stats(default_constructed=1, destructed=1)
def test03_movable_in_value(clean):
s = t.Movable()
t.movable_in_value(s)
assert s.value == 5
del s
assert_stats(default_constructed=1, copy_constructed=1, destructed=2)
def test04_movable_in_lvalue_ref(clean):
s = t.Movable()
t.movable_in_lvalue_ref(s)
assert s.value == 5
del s
assert_stats(default_constructed=1, destructed=1)
def test05_movable_in_ptr(clean):
s = t.Movable()
t.movable_in_ptr(s)
assert s.value == 5
del s
assert_stats(default_constructed=1, destructed=1)
def test06_movable_in_rvalue_ref(clean):
s = t.Movable()
t.movable_in_rvalue_ref(s)
assert s.value == 0
del s
assert_stats(default_constructed=1, move_constructed=1, destructed=2)
def test07_copyable_return(clean):
assert t.return_copyable().value == 5
assert_stats(default_constructed=1, copy_constructed=1, destructed=2)
def test08_copyable_return_ptr(clean):
assert t.return_copyable_ptr().value == 5
assert_stats(default_constructed=1, destructed=1)
def test09_copyable_in_value(clean):
s = t.Copyable()
t.copyable_in_value(s)
assert s.value == 5
del s
assert_stats(default_constructed=1, copy_constructed=1, destructed=2)
def test10_copyable_in_lvalue_ref(clean):
s = t.Copyable()
t.copyable_in_lvalue_ref(s)
assert s.value == 5
del s
assert_stats(default_constructed=1, destructed=1)
def test11_copyable_in_ptr(clean):
s = t.Copyable()
t.copyable_in_ptr(s)
assert s.value == 5
del s
assert_stats(default_constructed=1, destructed=1)
def test12_copyable_in_rvalue_ref(clean):
s = t.Copyable()
t.copyable_in_rvalue_ref(s)
assert s.value == 5
del s
assert_stats(default_constructed=1, copy_constructed=1, destructed=2)
# ------------------------------------------------------------------
def test13_tuple_movable_return(clean):
assert t.tuple_return_movable()[0].value == 5
assert_stats(default_constructed=1, move_constructed=2, destructed=3)
def test14_tuple_movable_return_ptr(clean):
assert t.return_movable_ptr().value == 5
assert_stats(default_constructed=1, destructed=1)
def test15_tuple_movable_in_value(clean):
s = t.Movable()
t.tuple_movable_in_value((s,))
assert s.value == 5
del s
assert_stats(default_constructed=1, copy_constructed=1, destructed=2)
def test16_tuple_movable_in_lvalue_ref(clean):
s = t.Movable()
t.tuple_movable_in_lvalue_ref((s,))
assert s.value == 5
del s
assert_stats(default_constructed=1, destructed=1)
def test17_tuple_movable_in_lvalue_ref_2(clean):
s = t.Movable()
t.tuple_movable_in_lvalue_ref_2((s,))
assert s.value == 5
del s
assert_stats(default_constructed=1, copy_constructed=1, destructed=2)
def test18_tuple_movable_in_ptr(clean):
s = t.Movable()
t.tuple_movable_in_ptr((s,))
assert s.value == 5
del s
assert_stats(default_constructed=1, destructed=1)
def test19_tuple_movable_in_rvalue_ref(clean):
s = t.Movable()
t.tuple_movable_in_rvalue_ref((s,))
assert s.value == 0
del s
assert_stats(default_constructed=1, move_constructed=1, destructed=2)
def test20_tuple_movable_in_rvalue_ref_2(clean):
s = t.Movable()
t.tuple_movable_in_rvalue_ref_2((s,))
assert s.value == 5
del s
assert_stats(
default_constructed=1, copy_constructed=1, move_constructed=1, destructed=3
)
# ------------------------------------------------------------------
def test21_tuple_pair_basic():
assert t.empty_tuple(()) == ()
assert t.swap_tuple((1, 2.5)) == (2.5, 1)
assert t.swap_pair((1, 2.5)) == (2.5, 1)
# ------------------------------------------------------------------
def test22_vec_return_movable(clean):
for i, x in enumerate(t.vec_return_movable()):
assert x.value == i
del x
assert_stats(value_constructed=10, move_constructed=10, destructed=20)
def test23_vec_return_copyable(clean):
for i, x in enumerate(t.vec_return_copyable()):
assert x.value == i
del x
assert_stats(value_constructed=10, copy_constructed=20, destructed=30)
def test24_vec_movable_in_value(clean):
t.vec_movable_in_value([t.Movable(i) for i in range(10)])
assert_stats(value_constructed=10, copy_constructed=10, destructed=20)
# Test that None values don't cause a crash
with pytest.raises(TypeError):
t.vec_movable_in_value([None])
def test25_vec_movable_in_value(clean):
t.vec_copyable_in_value([t.Copyable(i) for i in range(10)])
assert_stats(value_constructed=10, copy_constructed=10, destructed=20)
def test26_vec_movable_in_lvalue_ref(clean):
t.vec_movable_in_lvalue_ref([t.Movable(i) for i in range(10)])
assert_stats(value_constructed=10, copy_constructed=10, destructed=20)
def test27_vec_movable_in_ptr_2(clean):
t.vec_movable_in_ptr_2([t.Movable(i) for i in range(10)])
assert_stats(value_constructed=10, destructed=10)
# Test that None values are permitted when casting to pointer;
# instead we reach 'if (x.size() != 10) fail();' in the bound function
with pytest.raises(RuntimeError):
t.vec_movable_in_ptr_2([None])
def test28_vec_movable_in_rvalue_ref(clean):
t.vec_movable_in_rvalue_ref([t.Movable(i) for i in range(10)])
assert_stats(value_constructed=10, copy_constructed=10, destructed=20)
def test29_opaque_vector():
f = t.float_vec()
assert f.size() == 0
assert isinstance(f, t.float_vec)
f.push_back(1)
assert f.size() == 1
def test30_std_function():
assert t.return_empty_function() is None
assert t.return_function()(3) == 8
assert t.call_function(lambda x: 5 + x, 3) == 8
with pytest.raises(TypeError) as excinfo:
assert t.call_function(5, 3) == 8
assert "incompatible function arguments" in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
assert t.call_function(lambda x, y: x + y, 3) == 8
assert "missing 1 required positional argument" in str(excinfo.value)
l = []
def f():
l.append(1)
f2 = t.return_void_function(f)
f2()
assert l == [1]
def test31_std_function_roundtrip():
def f():
l.append(1)
f2 = t.return_void_function(f)
assert f2 is f
# cpyext reference cycles are not supported, see https://foss.heptapod.net/pypy/pypy/-/issues/3849
@skip_on_pypy
def test32_std_function_gc():
# Test class -> function -> class cyclic reference
class A(t.FuncWrapper):
pass
assert t.FuncWrapper.alive == 0
a = A()
assert t.FuncWrapper.alive == 1
del a
collect()
assert t.FuncWrapper.alive == 0
# t.FuncWrapper is a C extension type with a custom property 'f', which
# can store Python function objects It implements the tp_traverse
# callback so that reference cycles arising from this function object
# can be detected.
class Test(t.FuncWrapper):
def __init__(self):
super().__init__()
# The constructor creates a closure, which references 'self'
# and assigns it to the 'self.f' member.
# This creates a cycle self -> self.f -> self
def f():
print(self.f)
self.f = f
# The Test class declared above inherits from 'FuncWrapper'.
# This class keeps track of how many references are alive at
# any point to help track down leak issues.
assert t.FuncWrapper.alive == 0
b = Test()
assert t.FuncWrapper.alive == 1
del b
collect()
assert t.FuncWrapper.alive == 0
def test33_vec_type_check():
with pytest.raises(TypeError) as excinfo:
t.vec_movable_in_value(0)
def test34_list():
assert t.identity_list([]) == []
assert t.identity_list([1, 2, 3]) == [1, 2, 3]
assert t.identity_list(()) == []
assert t.identity_list((1, 2, 3)) == [1, 2, 3]
def test35_string_and_string_view():
assert t.identity_string("") == ""
assert t.identity_string("orange") == "orange"
assert t.identity_string("橘子") == "橘子"
assert t.identity_string("ส้ม") == "ส้ม"
assert t.identity_string("البرتقالي") == "البرتقالي"
assert t.identity_string("🍊") == "🍊"
assert t.identity_string_view("") == ""
assert t.identity_string_view("orange") == "orange"
assert t.identity_string_view("橘子") == "橘子"
assert t.identity_string_view("ส้ม") == "ส้ม"
assert t.identity_string_view("البرتقالي") == "البرتقالي"
assert t.identity_string_view("🍊") == "🍊"
def test36_std_optional_copyable(clean):
t.optional_copyable(t.Copyable())
opt_copyable = optional("test_stl_ext.Copyable")
assert t.optional_copyable.__doc__ == (
f"optional_copyable(x: {opt_copyable}) -> None"
)
assert_stats(default_constructed=1, copy_constructed=1, destructed=2)
def test37_std_optional_copyable_ptr(clean):
t.optional_copyable_ptr(t.Copyable())
opt_copyable = optional("test_stl_ext.Copyable")
assert t.optional_copyable_ptr.__doc__ == (
f"optional_copyable_ptr(x: {opt_copyable}) -> None"
)
assert_stats(default_constructed=1, destructed=1)
def test38_std_optional_none():
t.optional_none(None)
assert t.optional_cstr(None) == "none"
assert t.optional_cstr("hi") == "hi"
assert t.optional_non_assignable(None) == None
def test39_std_optional_ret_opt_movable(clean):
assert t.optional_ret_opt_movable().value == 5
opt_movable = optional("test_stl_ext.Movable")
assert t.optional_ret_opt_movable.__doc__ == (
f"optional_ret_opt_movable() -> {opt_movable}"
)
assert_stats(default_constructed=1, move_constructed=2, destructed=3)
def test40_std_optional_ret_opt_movable_ptr(clean):
assert t.optional_ret_opt_movable_ptr().value == 5
assert_stats(default_constructed=1, destructed=1)
def test41_std_optional_ret_opt_none():
assert t.optional_ret_opt_none() is None
def test42_std_optional_unbound_type():
for method_name in (
"optional_unbound_type",
"optional_unbound_type_with_nullopt_as_default",
):
method = getattr(t, method_name)
assert method(3) == 3
assert method(None) is None
assert method() is None
opt_int = optional("int")
assert method.__doc__ == (f"{method_name}(x: {opt_int} = None) -> {opt_int}")
def test42a_std_optional_non_assignable():
assert t.optional_non_assignable(t.NonAssignable()).value == 5
def test43_std_variant_copyable(clean):
t.variant_copyable(t.Copyable())
t.variant_copyable(5)
arg_t = union('test_stl_ext.Copyable', 'int')
assert t.variant_copyable.__doc__ == (
f"variant_copyable(arg: {arg_t}, /) -> None"
)
assert_stats(default_constructed=3, copy_assigned=1, destructed=3)
def test44_std_variant_copyable_none(clean):
t.variant_copyable_none(t.Copyable())
t.variant_copyable_none(5)
t.variant_copyable_none(None)
arg_t = optional(union("int", "test_stl_ext.Copyable"))
assert t.variant_copyable_none.__doc__ == (
f"variant_copyable_none(x: {arg_t}) -> None"
)
assert_stats(default_constructed=1, copy_constructed=1, destructed=2)
def test45_std_variant_copyable_ptr(clean):
t.variant_copyable_ptr(t.Copyable())
t.variant_copyable_ptr(5)
arg_t = union("test_stl_ext.Copyable", "int")
assert t.variant_copyable_ptr.__doc__ == (
f"variant_copyable_ptr(arg: {arg_t}, /) -> None"
)
assert_stats(default_constructed=1, destructed=1)
def test46_std_variant_copyable_ptr_none(clean):
t.variant_copyable_ptr_none(t.Copyable())
t.variant_copyable_ptr_none(5)
t.variant_copyable_ptr_none(None)
arg_t = optional(union("test_stl_ext.Copyable", "int"))
assert t.variant_copyable_ptr_none.__doc__ == (
f"variant_copyable_ptr_none(x: {arg_t}) -> None"
)
assert_stats(default_constructed=1, destructed=1)
def test47_std_variant_ret_var_copyable():
assert t.variant_ret_var_copyable().value == 5
rv_t = union("test_stl_ext.Copyable", "int")
assert t.variant_ret_var_copyable.__doc__ == (
f"variant_ret_var_copyable() -> {rv_t}"
)
def test48_std_variant_ret_var_none():
assert t.variant_ret_var_none() is None
rv_t = union("None", "test_stl_ext.Copyable", "int")
assert t.variant_ret_var_none.__doc__ == (f"variant_ret_var_none() -> {rv_t}")
def test49_std_variant_unbound_type():
assert t.variant_unbound_type() is None
assert t.variant_unbound_type(None) is None
assert t.variant_unbound_type([5]) == [5]
assert t.variant_unbound_type((1, 2, 3)) == (1, 2, 3)
assert t.variant_unbound_type(5) == 5
arg_t = optional(union("list", "tuple", "int"))
rv_t = union("None", "list", "tuple", "int")
assert t.variant_unbound_type.__doc__ == (
f"variant_unbound_type(x: {arg_t} = None) -> {rv_t}"
)
def test49b_std_variant_nondefault(clean):
assert t.variant_nondefault(t.NonDefaultConstructible(10)) == 10
assert t.variant_nondefault(20) == -20
assert_stats(
value_constructed=1, # constructed in NonDefaultConstructible pyobject
copy_constructed=1, # copied into type_caster variant member
move_constructed=1, # moved into function argument value
destructed=3,
)
def test50_map_return_movable_value():
for i, (k, v) in enumerate(sorted(t.map_return_movable_value().items())):
assert k == chr(ord("a") + i)
assert v.value == i
assert t.map_return_movable_value.__doc__ == (
"map_return_movable_value() -> dict[str, test_stl_ext.Movable]"
)
def test51_map_return_copyable_value():
for i, (k, v) in enumerate(sorted(t.map_return_copyable_value().items())):
assert k == chr(ord("a") + i)
assert v.value == i
assert t.map_return_copyable_value.__doc__ == (
"map_return_copyable_value() -> dict[str, test_stl_ext.Copyable]"
)
def test52_map_movable_in_value():
t.map_movable_in_value(dict([(chr(ord("a") + i), t.Movable(i)) for i in range(10)]))
assert t.map_movable_in_value.__doc__ == (
"map_movable_in_value(x: collections.abc.Mapping[str, test_stl_ext.Movable]) -> None"
)
def test53_map_copyable_in_value():
t.map_copyable_in_value(
dict([(chr(ord("a") + i), t.Copyable(i)) for i in range(10)])
)
assert t.map_copyable_in_value.__doc__ == (
"map_copyable_in_value(x: collections.abc.Mapping[str, test_stl_ext.Copyable]) -> None"
)
def test54_map_movable_in_lvalue_ref():
t.map_movable_in_lvalue_ref(
dict([(chr(ord("a") + i), t.Movable(i)) for i in range(10)])
)
assert t.map_movable_in_lvalue_ref.__doc__ == (
"map_movable_in_lvalue_ref(x: collections.abc.Mapping[str, test_stl_ext.Movable]) -> None"
)
def test55_map_movable_in_rvalue_ref():
t.map_movable_in_rvalue_ref(
dict([(chr(ord("a") + i), t.Movable(i)) for i in range(10)])
)
assert t.map_movable_in_rvalue_ref.__doc__ == (
"map_movable_in_rvalue_ref(x: collections.abc.Mapping[str, test_stl_ext.Movable]) -> None"
)
def test56_map_movable_in_ptr():
t.map_movable_in_ptr(dict([(chr(ord("a") + i), t.Movable(i)) for i in range(10)]))
assert t.map_movable_in_ptr.__doc__ == (
"map_movable_in_ptr(x: collections.abc.Mapping[str, test_stl_ext.Movable]) -> None"
)
def test57_map_return_readonly_value():
for i, (k, v) in enumerate(sorted(t.map_return_readonly_value().map.items())):
assert k == chr(ord("a") + i)
assert v == i
assert t.map_return_readonly_value.__doc__ == (
"map_return_readonly_value() -> test_stl_ext.StructWithReadonlyMap"
)
def test58_array():
o = t.array_out()
assert isinstance(o, list) and o == [1, 2, 3]
assert t.array_in([1, 2, 3]) == 6
assert t.array_in((1, 2, 3)) == 6
with pytest.raises(TypeError) as excinfo:
assert t.array_in((1, 2, 3, 4)) == 6
assert "incompatible function arguments" in str(excinfo.value)
def test59_map_movable_in_failure():
with pytest.raises(TypeError) as excinfo:
t.map_copyable_in_value({1: 2})
assert "incompatible function arguments" in str(excinfo.value)
def test60_set_return_value():
for i, k in enumerate(sorted(t.set_return_value())):
assert k == chr(ord("a") + i)
for i, k in enumerate(sorted(t.unordered_set_return_value())):
assert k == chr(ord("a") + i)
assert t.set_return_value.__doc__ == "set_return_value() -> set[str]"
assert t.unordered_set_return_value.__doc__ == (
"unordered_set_return_value() -> set[str]"
)
def test61_set_in_value():
t.set_in_value(set([chr(ord("a") + i) for i in range(10)]))
t.unordered_set_in_value(set([chr(ord("a") + i) for i in range(10)]))
assert t.set_in_value.__doc__ == (
"set_in_value(x: collections.abc.Set[str]) -> None"
)
assert t.unordered_set_in_value.__doc__ == (
"unordered_set_in_value(x: collections.abc.Set[str]) -> None"
)
def test62_set_in_lvalue_ref():
t.set_in_lvalue_ref(set([chr(ord("a") + i) for i in range(10)]))
assert t.set_in_lvalue_ref.__doc__ == (
"set_in_lvalue_ref(x: collections.abc.Set[str]) -> None"
)
def test63_set_in_rvalue_ref():
t.set_in_rvalue_ref(set([chr(ord("a") + i) for i in range(10)]))
assert t.set_in_rvalue_ref.__doc__ == (
"set_in_rvalue_ref(x: collections.abc.Set[str]) -> None"
)
def test64_set_in_failure():
with pytest.raises(TypeError) as excinfo:
t.set_in_value(set([i for i in range(10)]))
assert "incompatible function arguments" in str(excinfo.value)
def test65_class_with_movable_field(clean):
cwmf = t.ClassWithMovableField()
m1 = t.Movable(1)
m2 = t.Movable(2)
assert_stats(value_constructed=2)
cwmf.movable = [m1, m2]
assert_stats(value_constructed=2, copy_constructed=2)
del m1, m2
collect()
assert_stats(value_constructed=2, copy_constructed=2, destructed=2)
del cwmf
collect()
assert_stats(value_constructed=2, copy_constructed=2, destructed=4)
def test66_replace_extension():
from pathlib import Path
if not hasattr(t, "replace_extension"):
pytest.skip("filesystem tests not compiled")
filename = Path("test.txt")
assert t.replace_extension(filename, ".obj") == filename.with_suffix(".obj")
filename = Path("🍊.html")
assert t.replace_extension(filename, ".svg") == filename.with_suffix(".svg")
class PseudoStrPath:
def __fspath__(self):
return "foo/bar"
class PseudoBytesPath:
def __fspath__(self):
return b"foo/bar"
assert t.parent_path(Path("foo/bar")) == Path("foo")
assert t.parent_path("foo/bar") == Path("foo")
assert t.parent_path(b"foo/bar") == Path("foo")
assert t.parent_path(PseudoStrPath()) == Path("foo")
assert t.parent_path(PseudoBytesPath()) == Path("foo")
def test67_vector_bool():
bool_vector = [True, False, True, False]
result = t.flip_vector_bool(bool_vector)
assert result == [not x for x in bool_vector]
def test68_complex_value():
# double: 64bits
assert t.complex_value_double(1.0) == complex(1.0)
assert t.complex_value_double(1.0j) == 1.0j
assert t.complex_value_double(0.0) == complex(0.0)
assert t.complex_value_double(0.0j) == 0.0j
assert t.complex_value_double(0) == complex(0.0)
assert t.complex_value_double_nc(1.0 + 2.0j) == 1.0 + 2.0j
assert t.complex_value_double_nc(1.0j) == 1.0j
assert t.complex_value_double_nc(0.0 + 0.0j) == complex(0.0)
assert t.complex_value_double_nc(0.0j) == 0.0j
with pytest.raises(TypeError, match="incompatible function arguments"):
t.complex_value_double_nc(0)
with pytest.raises(TypeError, match="incompatible function arguments"):
t.complex_value_double_nc(0.0)
# float: 32bits
assert t.complex_value_float(1.0) == complex(1.0)
assert t.complex_value_float(1.0j) == 1.0j
assert t.complex_value_float(0.0) == complex(0.0)
assert t.complex_value_float(0.0j) == 0.0j
assert t.complex_value_float(0) == complex(0.0)
assert t.complex_value_float_nc(1.0 + 2.0j) == 1.0 + 2.0j
assert t.complex_value_float_nc(1.0j) == 1.0j
assert t.complex_value_float_nc(0.0 + 0.0j) == complex(0.0)
assert t.complex_value_float_nc(0.0j) == 0.0j
with pytest.raises(TypeError, match="incompatible function arguments"):
t.complex_value_float_nc(0)
with pytest.raises(TypeError, match="incompatible function arguments"):
t.complex_value_float_nc(0.0)
with pytest.raises(TypeError, match="incompatible function arguments"):
t.complex_value_float_nc(1.1 + 2.0j) # Inexact narrowing conversion
with pytest.raises(TypeError, match="incompatible function arguments"):
t.complex_value_float_nc(1.0 + 2.1j) # Inexact narrowing conversion
val_64 = 2.7 - 3.2j
val_32 = 2.700000047683716 - 3.200000047683716j
assert val_64 != val_32
assert t.complex_value_double(val_32) == val_32
assert t.complex_value_double(val_64) == val_64
assert t.complex_value_double_nc(val_32) == val_32
assert t.complex_value_double_nc(val_64) == val_64
assert t.complex_value_float(val_32) == val_32
assert t.complex_value_float(val_64) == val_32
assert t.complex_value_float_nc(val_32) == val_32
with pytest.raises(TypeError, match="incompatible function arguments"):
t.complex_value_float_nc(val_64) # Inexact narrowing conversion
with pytest.raises(TypeError, match="incompatible function arguments"):
t.complex_value_float([])
class MyInt(int):
def __new__(cls, value):
return super().__new__(cls, value)
assert t.complex_value_double(MyInt(7)) == complex(7.0)
assert t.complex_value_float(MyInt(7)) == complex(7.0)
with pytest.raises(TypeError, match="incompatible function arguments"):
t.complex_value_double_nc(MyInt(7))
with pytest.raises(TypeError, match="incompatible function arguments"):
t.complex_value_float_nc(MyInt(7))
class MyComplex:
def __init__(self, real, imag):
self.re = real
self.im = imag
def __complex__(self):
return complex(self.re, self.im)
assert t.complex_value_double(MyComplex(1, 2)) == complex(1.0, 2.0)
assert t.complex_value_float(MyComplex(1, 2)) == complex(1.0, 2.0)
with pytest.raises(TypeError, match="incompatible function arguments"):
t.complex_value_double_nc(MyComplex(1, 2))
with pytest.raises(TypeError, match="incompatible function arguments"):
t.complex_value_float_nc(MyComplex(1, 2))
class MyComplexSubclass(complex): ...
assert t.complex_value_double(MyComplexSubclass(1, 2)) == complex(1.0, 2.0)
assert t.complex_value_float(MyComplexSubclass(1, 2)) == complex(1.0, 2.0)
with pytest.raises(TypeError, match="incompatible function arguments"):
t.complex_value_double_nc(MyComplexSubclass(1, 2))
with pytest.raises(TypeError, match="incompatible function arguments"):
t.complex_value_float_nc(MyComplexSubclass(1, 2))
try:
import numpy as np
assert t.complex_value_float(np.complex64(val_32)) == val_32
assert t.complex_value_float(np.complex64(val_64)) == val_32
assert t.complex_value_double(np.complex64(val_32)) == val_32
assert t.complex_value_double(np.complex64(val_64)) == val_32
assert t.complex_value_float(np.complex128(val_32)) == val_32
assert t.complex_value_float(np.complex128(val_64)) == val_32
assert t.complex_value_double(np.complex128(val_32)) == val_32
assert t.complex_value_double(np.complex128(val_64)) == val_64
with pytest.raises(TypeError, match="incompatible function arguments"):
t.complex_value_double_nc(np.complex128(val_64))
with pytest.raises(TypeError, match="incompatible function arguments"):
t.complex_value_float_nc(np.complex64(val_32))
except ImportError:
pass
def test69_complex_array():
val1_64 = 2.7 - 3.2j
val1_32 = 2.700000047683716 - 3.200000047683716j
val2_64 = 3.1415
val2_32 = 3.1414999961853027 + 0j
# test 64 bit casts
assert t.complex_array_double([val1_64, -1j, val2_64]) == [
val1_64,
-0 - 1j,
val2_64,
]
# test 32 bit casts
assert t.complex_array_float([val1_64, -1j, val2_64]) == [
val1_32,
(-0 - 1j),
val2_32,
]
with pytest.raises(TypeError, match="incompatible function arguments"):
t.complex_array_float([[]])
try:
import numpy as np
# test 64 bit casts
assert t.complex_array_double(np.array([val1_64, -1j, val2_64])) == [
val1_64,
-0 - 1j,
val2_64,
]
assert t.complex_array_double(
np.array([val1_64, -1j, val2_64], dtype=np.complex128)
) == [val1_64, -0 - 1j, val2_64]
assert t.complex_array_double(
np.array([val1_64, -1j, val2_64], dtype=np.complex64)
) == [val1_32, -0 - 1j, val2_32]
# test 32 bit casts
assert t.complex_array_float(np.array([val1_64, -1j, val2_64])) == [
val1_32,
(-0 - 1j),
val2_32,
]
assert t.complex_array_float(
np.array([val1_64, -1j, val2_64], dtype=np.complex128)
) == [val1_32, (-0 - 1j), val2_32]
assert t.complex_array_float(
np.array([val1_64, -1j, val2_64], dtype=np.complex64)
) == [val1_32, (-0 - 1j), val2_32]
except ImportError:
pass
def test70_vec_char():
assert isinstance(t.vector_str("123"), str)
assert isinstance(t.vector_str(["123", "345"]), list)
assert t.vector_optional_str(["abc", None]) == ["abc", None]
def test71_null_input():
with pytest.raises(TypeError):
t.vec_movable_in_value([None])
with pytest.raises(TypeError):
t.map_copyable_in_value({"a": None})
@skip_on_pypy # PyPy fails this test on Windows :-(
def test72_wstr():
assert t.pass_wstr('🎈') == '🎈'
def test73_bad_input_to_set():
with pytest.raises(TypeError):
t.set_in_value(None)
def test74_variant_implicit_conversions():
event = t.IDHavingEvent()
assert event.id is None
event.id = t.BasicID1(78)
assert type(event.id) is t.BasicID1
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
tests/test_stl_bind_map.cpp | C++ | #include <map>
#include <string>
#include <unordered_map>
#include <vector>
#include <nanobind/stl/bind_map.h>
#include <nanobind/stl/string.h>
#include <nanobind/stl/vector.h>
namespace nb = nanobind;
// testing for insertion of non-copyable class
class E_nc {
public:
explicit E_nc(int i) : value{i} {}
E_nc(const E_nc &) = delete;
E_nc &operator=(const E_nc &) = delete;
E_nc(E_nc &&) = default;
E_nc &operator=(E_nc &&) = default;
int value;
};
template <class Map>
Map *times_ten(int n) {
auto *m = new Map();
for (int i = 1; i <= n; i++) {
m->emplace(int(i), E_nc(10 * i));
}
return m;
}
template <class NestMap>
NestMap *times_hundred(int n) {
auto *m = new NestMap();
for (int i = 1; i <= n; i++) {
for (int j = 1; j <= n; j++) {
(*m)[i].emplace(int(j * 10), E_nc(100 * j));
}
}
return m;
}
NB_MODULE(test_stl_bind_map_ext, m) {
// test_map_string_double
nb::bind_map<std::map<std::string, double>>(m, "MapStringDouble");
nb::bind_map<std::unordered_map<std::string, double>>(m, "UnorderedMapStringDouble");
// test_map_string_double_const
nb::bind_map<std::map<std::string, double const>>(m, "MapStringDoubleConst");
nb::bind_map<std::unordered_map<std::string, double const>>(m,
"UnorderedMapStringDoubleConst");
nb::class_<E_nc>(m, "ENC").def(nb::init<int>()).def_rw("value", &E_nc::value);
// On Windows, NVCC has difficulties with the following code. My guess is that
// decltype() in the iterator_value_access macro used in bind_map.h loses a reference.
#if defined(_WIN32) && !defined(__CUDACC__)
// By default, the bindings produce a __getitem__ that makes a copy, which
// won't take this non-copyable type: (uncomment to verify build error)
//nb::bind_map<std::map<int, E_nc>>(m, "MapENC");
//nb::bind_map<std::unordered_map<int, E_nc>>(m, "UmapENC");
// But we can request reference semantics instead (some care required, read
// the documentation):
nb::bind_map<std::map<int, E_nc>,
nb::rv_policy::reference_internal>(m, "MapENC");
nb::bind_map<std::unordered_map<int, E_nc>,
nb::rv_policy::reference_internal>(m, "UmapENC");
m.def("get_mnc", ×_ten<std::map<int, E_nc>>);
m.def("get_umnc", ×_ten<std::unordered_map<int, E_nc>>);
// pybind11 issue #1885: binding nested std::map<X, Container<E>>
// with E non-copyable
nb::bind_map<std::map<int, std::vector<E_nc>>,
nb::rv_policy::reference_internal>(m, "MapVecENC");
m.def("get_nvnc", [](int n) {
auto *m = new std::map<int, std::vector<E_nc>>();
for (int i = 1; i <= n; i++) {
for (int j = 1; j <= n; j++) {
(*m)[i].emplace_back(j);
}
}
return m;
});
nb::bind_map<std::map<int, std::map<int, E_nc>>,
nb::rv_policy::reference_internal>(m, "MapMapENC");
m.def("get_nmnc", ×_hundred<std::map<int, std::map<int, E_nc>>>);
nb::bind_map<std::unordered_map<int, std::unordered_map<int, E_nc>>,
nb::rv_policy::reference_internal>(m, "UmapUmapENC");
m.def("get_numnc", ×_hundred<std::unordered_map<int, std::unordered_map<int, E_nc>>>);
#endif
}
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
tests/test_stl_bind_map.py | Python | import pytest
import sys
import platform
import test_stl_bind_map_ext as t
def test_map_string_double(capfd):
mm = t.MapStringDouble()
mm["a"] = 1
mm["b"] = 2.5
assert list(mm) == ["a", "b"]
assert "b" in mm
assert 123 not in mm
assert mm["b"] == 2.5
assert "c" not in mm
with pytest.raises(KeyError):
mm["c"]
assert "c" not in mm
# Copy constructor
mm2 = t.MapStringDouble(mm)
assert list(mm2) == ["a", "b"]
assert "b" in mm2
assert "c" not in mm2
assert 123 not in mm2
assert mm2["b"] == 2.5
assert mm == mm2
assert not (mm != mm2)
del mm2["b"]
assert not (mm == mm2)
assert mm != mm2
assert len(mm2) == 1
mm2.clear()
assert len(mm2) == 0
assert repr(mm) == "test_stl_bind_map_ext.MapStringDouble({'a': 1.0, 'b': 2.5})"
with pytest.raises(TypeError):
mm2.update({"a" : "b"})
captured = capfd.readouterr().err.strip()
ref = "nanobind: implicit conversion from type 'dict' to type 'test_stl_bind_map_ext.MapStringDouble' failed!"
# Work around Pytest-related flakiness (https://github.com/pytest-dev/pytest/issues/10843)
if platform.system() == 'Windows':
assert captured == ref or captured == ''
else:
assert captured == ref
mm2.update({"a" : 2.5})
assert len(mm2) == 1
# Construction from an iterable
mm3 = t.MapStringDouble(
{ "a" : 1, "b" : 2.5 })
assert list(mm3) == ["a", "b"]
assert mm3["b"] == 2.5
# Check that keys, values, items are views, not merely iterable
keys = mm.keys()
values = mm.values()
items = mm.items()
assert list(keys) == ["a", "b"]
assert len(keys) == 2
assert "a" in keys
assert "c" not in keys
assert 123 not in keys
assert list(items) == [("a", 1), ("b", 2.5)]
assert len(items) == 2
assert ("b", 2.5) in items
assert "hello" not in items
assert ("b", 2.5, None) not in items
assert list(values) == [1, 2.5]
assert len(values) == 2
assert 1 in values
assert 2 not in values
# Check that views update when the map is updated
mm["c"] = -1
assert list(keys) == ["a", "b", "c"]
assert list(values) == [1, 2.5, -1]
assert list(items) == [("a", 1), ("b", 2.5), ("c", -1)]
um = t.UnorderedMapStringDouble()
um["ua"] = 1.1
um["ub"] = 2.6
assert sorted(list(um)) == ["ua", "ub"]
assert list(um.keys()) == list(um)
assert sorted(list(um.items())) == [("ua", 1.1), ("ub", 2.6)]
assert list(zip(um.keys(), um.values())) == list(um.items())
assert type(keys).__qualname__ == 'MapStringDouble.KeyView'
assert type(values).__qualname__ == 'MapStringDouble.ValueView'
assert type(items).__qualname__ == 'MapStringDouble.ItemView'
d = "dict"
assert t.MapStringDouble.__init__.__doc__ == \
"""__init__(self) -> None
__init__(self, arg: test_stl_bind_map_ext.MapStringDouble) -> None
__init__(self, arg: %s[str, float], /) -> None
Overloaded function.
1. ``__init__(self) -> None``
Default constructor
2. ``__init__(self, arg: test_stl_bind_map_ext.MapStringDouble) -> None``
Copy constructor
3. ``__init__(self, arg: %s[str, float], /) -> None``
Construct from a dictionary""" % (d, d)
def test_map_string_double_const():
mc = t.MapStringDoubleConst()
mc["a"] = 10
mc["b"] = 20.5
umc = t.UnorderedMapStringDoubleConst()
umc["a"] = 11
umc["b"] = 21.5
str(umc)
def test_maps_with_noncopyable_values():
if not hasattr(t, 'get_mnc'):
return
# std::map
mnc = t.get_mnc(5)
for i in range(1, 6):
assert mnc[i].value == 10 * i
vsum = 0
for k, v in mnc.items():
assert v.value == 10 * k
vsum += v.value
assert vsum == 150
# std::unordered_map
mnc = t.get_umnc(5)
for i in range(1, 6):
assert mnc[i].value == 10 * i
vsum = 0
for k, v in mnc.items():
assert v.value == 10 * k
vsum += v.value
assert vsum == 150
# nested std::map<std::vector>
nvnc = t.get_nvnc(5)
for i in range(1, 6):
for j in range(0, 5):
assert nvnc[i][j].value == j + 1
# Note: maps do not have .values()
for _, v in nvnc.items():
for i, j in enumerate(v, start=1):
assert j.value == i
# nested std::map<std::map>
nmnc = t.get_nmnc(5)
for i in range(1, 6):
for j in range(10, 60, 10):
assert nmnc[i][j].value == 10 * j
vsum = 0
for _, v_o in nmnc.items():
for k_i, v_i in v_o.items():
assert v_i.value == 10 * k_i
vsum += v_i.value
assert vsum == 7500
# nested std::unordered_map<std::unordered_map>
numnc = t.get_numnc(5)
for i in range(1, 6):
for j in range(10, 60, 10):
assert numnc[i][j].value == 10 * j
vsum = 0
for _, v_o in numnc.items():
for k_i, v_i in v_o.items():
assert v_i.value == 10 * k_i
vsum += v_i.value
assert vsum == 7500
def test_map_delitem():
mm = t.MapStringDouble()
mm["a"] = 1
mm["b"] = 2.5
assert list(mm) == ["a", "b"]
assert list(mm.items()) == [("a", 1), ("b", 2.5)]
del mm["a"]
assert list(mm) == ["b"]
assert list(mm.items()) == [("b", 2.5)]
um = t.UnorderedMapStringDouble()
um["ua"] = 1.1
um["ub"] = 2.6
assert sorted(list(um)) == ["ua", "ub"]
assert sorted(list(um.items())) == [("ua", 1.1), ("ub", 2.6)]
del um["ua"]
assert sorted(list(um)) == ["ub"]
assert sorted(list(um.items())) == [("ub", 2.6)]
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
tests/test_stl_bind_vector.cpp | C++ | #include <nanobind/stl/bind_vector.h>
#include <nanobind/stl/shared_ptr.h>
namespace nb = nanobind;
NB_MODULE(test_stl_bind_vector_ext, m) {
nb::bind_vector<std::vector<unsigned int>>(m, "VectorInt");
nb::bind_vector<std::vector<bool>>(m, "VectorBool");
// Ensure that a repeated binding call is ignored
nb::bind_vector<std::vector<bool>>(m, "VectorBool");
struct El {
explicit El(int v) : a(v) {}
int a;
};
// test_vector_custom
nb::class_<El>(m, "El").def(nb::init<int>())
.def_rw("a", &El::a);
nb::bind_vector<std::vector<El>>(m, "VectorEl");
nb::bind_vector<std::vector<std::vector<El>>>(m, "VectorVectorEl");
// test_vector_shared_ptr
nb::bind_vector<std::vector<std::shared_ptr<El>>>(m, "VectorElShared");
struct E_nc {
explicit E_nc(int i) : value{i} {}
E_nc(const E_nc &) = delete;
E_nc &operator=(const E_nc &) = delete;
E_nc(E_nc &&) = default;
E_nc &operator=(E_nc &&) = default;
int value;
};
// test_noncopyable_containers
nb::class_<E_nc>(m, "ENC")
.def(nb::init<int>())
.def_rw("value", &E_nc::value);
// By default, the bindings produce a __getitem__ that makes a copy, which
// won't take this non-copyable type: (uncomment to verify build error)
//nb::bind_vector<std::vector<E_nc>>(m, "VectorENC");
// But we can request reference semantics instead (extreme care required,
// read the documentation):
nb::bind_vector<std::vector<E_nc>,
nb::rv_policy::reference_internal>(m, "VectorENC");
m.def("get_vnc", [](int n) {
std::vector<E_nc> result;
for (int i = 1; i <= n; i++)
result.emplace_back(i);
return result;
});
}
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
tests/test_stl_bind_vector.py | Python | import pytest
import platform
import test_stl_bind_vector_ext as t
def test01_vector_int(capfd):
v_int = t.VectorInt([0, 0])
assert len(v_int) == 2
assert bool(v_int) is True
# test construction from a generator
v_int1 = t.VectorInt(x for x in range(5))
assert t.VectorInt(v_int1) == t.VectorInt([0, 1, 2, 3, 4])
assert repr(v_int1) == "test_stl_bind_vector_ext.VectorInt([0, 1, 2, 3, 4])"
v_int2 = t.VectorInt([0, 0])
assert v_int == v_int2
v_int2[1] = 2
v_int2[1] -= 1
assert v_int2[1] == 1
assert v_int != v_int2
v_int2.append(2)
v_int2.insert(0, 1)
v_int2.insert(0, 2)
v_int2.insert(0, 3)
v_int2.insert(6, 3)
with pytest.raises(IndexError):
v_int2.insert(8, 4)
v_int.append(99)
v_int2[2:-2] = v_int
assert v_int2 == t.VectorInt([3, 2, 0, 0, 99, 2, 3])
del v_int2[1:3]
assert v_int2 == t.VectorInt([3, 0, 99, 2, 3])
del v_int2[0]
assert v_int2 == t.VectorInt([0, 99, 2, 3])
v_int2.extend(t.VectorInt([4, 5]))
assert v_int2 == t.VectorInt([0, 99, 2, 3, 4, 5])
v_int2.extend([6, 7])
assert v_int2 == t.VectorInt([0, 99, 2, 3, 4, 5, 6, 7])
# test error handling, and that the vector is unchanged
with pytest.raises(TypeError):
v_int2.extend([8, "a"])
captured = capfd.readouterr().err.strip()
ref = "nanobind: implicit conversion from type 'list' to type 'test_stl_bind_vector_ext.VectorInt' failed!"
# Work around Pytest-related flakiness (https://github.com/pytest-dev/pytest/issues/10843)
if platform.system() == 'Windows':
assert captured == ref or captured == ''
else:
assert captured == ref
assert v_int2 == t.VectorInt([0, 99, 2, 3, 4, 5, 6, 7])
# test extending from a generator
v_int2.extend(x for x in range(5))
assert v_int2 == t.VectorInt([0, 99, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4])
# Test count feature
assert v_int2.count(2) == 2
assert v_int2.count(5) == 1
assert v_int2.count(8) == 0
assert 2 in v_int2
assert 5 in v_int2
assert 8 not in v_int2
# test negative indexing
assert v_int2[-1] == 4
# insert with negative index
v_int2.insert(-1, 88)
assert v_int2 == t.VectorInt([0, 99, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 88, 4])
# delete negative index
del v_int2[-1]
assert v_int2 == t.VectorInt([0, 99, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 88])
assert v_int2.pop() == 88
assert v_int2 == t.VectorInt([0, 99, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3])
assert v_int2.pop(1) == 99
assert v_int2 == t.VectorInt([0, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3])
v_int2.clear()
assert len(v_int2) == 0
def test02_vector_bool():
vv_c = t.VectorBool()
for i in range(9):
vv_c.append(i % 2 == 0)
for i in range(9):
assert vv_c[i] == (i % 2 == 0)
assert vv_c.count(True) == 5
assert vv_c.count(False) == 4
def test03_vector_custom():
v_a = t.VectorEl()
v_a.append(t.El(1))
v_a.append(t.El(2))
assert len(v_a) == 2 and v_a[0].a == 1 and v_a[1].a == 2
vv_a = t.VectorVectorEl()
vv_a.append(v_a)
v_b = vv_a[0]
assert len(v_b) == 2 and v_b[0].a == 1 and v_b[1].a == 2
def test04_vector_slicing():
l1 = list(range(100))
l2 = t.VectorInt(l1)
def check_same(s):
assert l1[s] == l2[s]
def check_del(s):
l1c = type(l1)(l1)
l2c = type(l2)(l2)
del l1c[s]
del l2c[s]
l2c = list(l2c)
assert l1c == l2c
check_same(slice(1, 13, 4))
check_same(slice(1, 14, 4))
check_same(slice(10, 2000, 1))
check_same(slice(200, 10, 1))
check_same(slice(200, 10, -1))
check_same(slice(200, 10, -3))
check_del(slice(1, 13, 4))
check_del(slice(1, 14, 4))
check_del(slice(10, 2000, 1))
check_del(slice(200, 10, 1))
check_del(slice(200, 10, -1))
check_del(slice(200, 10, -3))
def test05_vector_non_shared():
v = t.VectorEl()
v.append(t.El(1))
v.append(t.El(2))
v0, v1 = v[0], v[1]
v0.a, v1.a = 100, 200
assert v[0].a == 1
assert v[1].a == 2
# Check that elements accessed through the iterator *cannot* be modified
q = next(iter(v))
q.a = 5
assert v[0].a == 1
def test06_vector_shared():
v = t.VectorElShared()
v.append(t.El(1))
v.append(t.El(2))
v0, v1 = v[0], v[1]
v0.a, v1.a = 100, 200
assert v[0].a == 100
assert v[1].a == 200
# Check that elements accessed through the iterator *can* be modified
q = next(iter(v))
q.a = 5
assert v[0].a == 5
def test07_vector_noncopyable():
vnc = t.get_vnc(5)
for i in range(0, 5):
assert vnc[i].value == i + 1
for i, j in enumerate(vnc, start=1):
assert j.value == i
# Check that elements accessed through the iterator *can* be modified
q = next(iter(vnc))
q.value = 5
assert vnc[0].value == 5
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
tests/test_stubs.py | Python | import os
import pathlib
import difflib
import sys
import platform
import pytest
is_unsupported = platform.python_implementation() == 'PyPy' or sys.version_info < (3, 10)
skip_on_unsupported = pytest.mark.skipif(
is_unsupported, reason="Stub generation is only tested on CPython >= 3.10.0")
def remove_platform_dependent(s):
'''Remove platform-dependent functions from the stubs'''
s2 = []
i = 0
while i < len(s):
v = s[i]
if v.strip().startswith('float16'):
i += 1
elif v.startswith('def ret_numpy_half()') or \
v.startswith('def test_slots()') or \
v.startswith('TypeAlias'):
i += 2
else:
s2.append(v)
i += 1
return s2
ref_paths = list(pathlib.Path(__file__).parent.rglob('*.pyi.ref'))
ref_path_ids = [p.name[:-len('.pyi.ref')] for p in ref_paths]
assert len(ref_paths) > 0, "Stub reference files not found!"
@skip_on_unsupported
@pytest.mark.parametrize('p_ref', ref_paths, ids=ref_path_ids)
def test01_check_stub_refs(p_ref, request):
"""
Check that generated stub files match reference input
"""
if not request.config.getoption('enable-slow-tests') and any(
(x in p_ref.name for x in ['jax', 'tensorflow'])):
pytest.skip("skipping because slow tests are not enabled")
p_in = p_ref.with_suffix('')
with open(p_ref, 'r') as f:
s_ref = f.read().split('\n')
with open(p_in, 'r') as f:
s_in = f.read().split('\n')
if "test_functions_ext" in p_in.name and sys.version_info < (3, 13):
s_ref = [line.replace("types.CapsuleType", "typing_extensions.CapsuleType") for line in s_ref]
s_ref.insert(5, "")
s_ref.insert(6, "import typing_extensions")
s_in = remove_platform_dependent(s_in)
s_ref = remove_platform_dependent(s_ref)
diff = list(difflib.unified_diff(
s_ref,
s_in,
fromfile=str(p_ref),
tofile=str(p_in)
))
if len(diff):
for p in diff:
print(p.rstrip(), file=sys.stderr)
print(
'\nWarning: generated stubs do not match their references. If you\n'
'intentionally changed a test suite extension, it may be necessary\n'
'to replace the .pyi.ref file with the generated .pyi file. But\n'
'please double-check that the change makes sense.',
file=sys.stderr
)
assert False
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
tests/test_tensorflow.cpp | C++ | #include <nanobind/nanobind.h>
#include <nanobind/ndarray.h>
namespace nb = nanobind;
int destruct_count = 0;
NB_MODULE(test_tensorflow_ext, m) {
m.def("destruct_count", []() { return destruct_count; });
m.def("ret_tensorflow", []() {
struct alignas(256) Buf {
float f[8];
};
Buf *buf = new Buf({ 1, 2, 3, 4, 5, 6, 7, 8 });
size_t shape[2] = { 2, 4 };
nb::capsule deleter(buf, [](void *p) noexcept {
destruct_count++;
delete (Buf *) p;
});
return nb::ndarray<nb::tensorflow, float, nb::shape<2, 4>>(buf->f,
2,
shape,
deleter);
});
}
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
tests/test_tensorflow.py | Python | import test_ndarray_ext as t
import test_tensorflow_ext as ttf
import pytest
import warnings
import importlib
from common import collect
try:
import tensorflow as tf
import tensorflow.config
def needs_tensorflow(x):
return x
except:
needs_tensorflow = pytest.mark.skip(reason="TensorFlow is required")
@needs_tensorflow
def test01_constrain_order():
with warnings.catch_warnings():
warnings.simplefilter("ignore")
try:
c = tf.zeros((3, 5))
except:
pytest.skip('tensorflow is missing')
assert t.check_order(c) == 'C'
@needs_tensorflow
def test02_implicit_conversion():
with warnings.catch_warnings():
warnings.simplefilter("ignore")
try:
c = tf.zeros((3, 5))
except:
pytest.skip('tensorflow is missing')
t.implicit(tf.zeros((2, 2), dtype=tf.int32))
t.implicit(tf.zeros((2, 2, 10), dtype=tf.float32)[:, :, 4])
t.implicit(tf.zeros((2, 2, 10), dtype=tf.int32)[:, :, 4])
t.implicit(tf.zeros((2, 2, 10), dtype=tf.bool)[:, :, 4])
with pytest.raises(TypeError) as excinfo:
t.noimplicit(tf.zeros((2, 2), dtype=tf.int32))
with pytest.raises(TypeError) as excinfo:
t.noimplicit(tf.zeros((2, 2), dtype=tf.bool))
@needs_tensorflow
def test03_return_tensorflow():
collect()
dc = ttf.destruct_count()
x = ttf.ret_tensorflow()
assert x.get_shape().as_list() == [2, 4]
assert tf.math.reduce_all(
x == tf.constant([[1,2,3,4], [5,6,7,8]], dtype=tf.float32))
del x
collect()
assert ttf.destruct_count() - dc == 1
@needs_tensorflow
def test04_check():
assert t.check(tf.zeros((1)))
@needs_tensorflow
def test05_passthrough():
a = ttf.ret_tensorflow()
b = t.passthrough(a)
assert a is b
a = tf.constant([1, 2, 3])
b = t.passthrough(a)
assert a is b
a = None
with pytest.raises(TypeError) as excinfo:
b = t.passthrough(a)
assert 'incompatible function arguments' in str(excinfo.value)
b = t.passthrough_arg_none(a)
assert a is b
@needs_tensorflow
def test06_ro_array():
if tf.__version__ < '2.19':
pytest.skip('tensorflow version is too old')
a = tf.constant([1, 2], dtype=tf.float32) # immutable
assert t.accept_ro(a) == 1
# If the next line fails, delete it, update the version above,
# and uncomment the three lines below.
assert t.accept_rw(a) == 1
# with pytest.raises(TypeError) as excinfo:
# t.accept_rw(a)
# assert 'incompatible function arguments' in str(excinfo.value)
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
tests/test_thread.cpp | C++ | #include <nanobind/nanobind.h>
#include <nanobind/stl/shared_ptr.h>
#include <memory>
#include <vector>
namespace nb = nanobind;
using namespace nb::literals;
struct Counter {
size_t value = 0;
void inc() { value++; }
void merge(Counter &o) {
value += o.value;
o.value = 0;
}
};
struct GlobalData {} global_data;
nb::ft_mutex mutex;
struct ClassWithProperty {
public:
ClassWithProperty(int value): value_(value) {}
int get_prop() const { return value_; }
private:
int value_;
};
class ClassWithClassProperty {
public:
ClassWithClassProperty(ClassWithProperty value) : value_(std::move(value)) {};
const ClassWithProperty& get_prop() const { return value_; }
private:
ClassWithProperty value_;
};
struct AnInt {
int value;
AnInt(int v) : value(v) {}
};
NB_MODULE(test_thread_ext, m) {
nb::class_<Counter>(m, "Counter")
.def(nb::init<>())
.def_ro("value", &Counter::value)
.def("inc_unsafe", &Counter::inc)
.def("inc_safe", &Counter::inc, nb::lock_self())
.def("merge_unsafe", &Counter::merge)
.def("merge_safe", &Counter::merge, nb::lock_self(), "o"_a.lock());
m.def("return_self", [](Counter *c) -> Counter * { return c; });
m.def("inc_safe",
[](Counter &c) { c.inc(); },
nb::arg().lock());
m.def("inc_global",
[](Counter &c) {
nb::ft_lock_guard guard(mutex);
c.inc();
}, "counter");
nb::class_<GlobalData>(m, "GlobalData")
.def_static("get", [] { return &global_data; }, nb::rv_policy::reference);
nb::class_<ClassWithProperty>(m, "ClassWithProperty")
.def(nb::init<int>(), nb::arg("value"))
.def_prop_ro("prop2", &ClassWithProperty::get_prop);
nb::class_<ClassWithClassProperty>(m, "ClassWithClassProperty")
.def(
"__init__",
[](ClassWithClassProperty* self, ClassWithProperty value) {
new (self) ClassWithClassProperty(std::move(value));
}, nb::arg("value"))
.def_prop_ro("prop1", &ClassWithClassProperty::get_prop);
nb::class_<AnInt>(m, "AnInt")
.def(nb::init<int>())
.def_rw("value", &AnInt::value);
std::vector<std::shared_ptr<AnInt>> shared_ints;
for (int i = 0; i < 5; ++i) {
shared_ints.push_back(std::make_shared<AnInt>(i));
}
m.def("fetch_shared_int", [shared_ints](int i) {
return shared_ints.at(i);
});
m.def("consume_an_int", [](AnInt* p) { return p->value; });
}
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
tests/test_thread.py | Python | import random
import threading
import test_thread_ext as t
from test_thread_ext import Counter, GlobalData, ClassWithProperty, ClassWithClassProperty
from common import parallelize
def test01_object_creation(n_threads=8):
# This test hammers 'inst_c2p' from multiple threads, and
# checks that the locking of internal data structures works
n = 100000
def f():
r = [None]*n
for i in range(n):
c = Counter()
c.inc_unsafe()
r[i] = c
for i in range(n):
assert t.return_self(r[i]) is r[i]
return r
v = parallelize(f, n_threads=n_threads)
assert len(v) == n_threads
for v2 in v:
assert len(v2) == n
for v3 in v2:
assert v3.value == 1
def test02_global_lock(n_threads=8):
# Test that a global PyMutex protects the counter
n = 100000
c = Counter()
def f():
for _ in range(n):
t.inc_global(c)
parallelize(f, n_threads=n_threads)
assert c.value == n * n_threads
def test03_locked_method(n_threads=8):
# Checks that nb::lock_self() protects an internal counter
n = 100000
c = Counter()
def f():
for i in range(n):
c.inc_safe()
parallelize(f, n_threads=n_threads)
assert c.value == n * n_threads
def test04_locked_function(n_threads=8):
# Checks that nb::lock_self() protects an internal counter
n = 100000
c = Counter()
def f():
for _ in range(n):
t.inc_safe(c)
parallelize(f, n_threads=n_threads)
assert c.value == n * n_threads
def test05_locked_twoargs(n_threads=8):
# Check two-argument locking
n = 100000
c = Counter()
def f():
c2 = Counter()
for i in range(n):
c2.inc_unsafe()
if i & 1 == 0:
c2.merge_safe(c)
else:
c.merge_safe(c2)
parallelize(f, n_threads=n_threads)
assert c.value == n * n_threads
def test06_global_wrapper(n_threads=8):
# Check wrapper lookup racing with wrapper deallocation
n = 10000
def f():
for _ in range(n):
GlobalData.get()
GlobalData.get()
GlobalData.get()
GlobalData.get()
parallelize(f, n_threads=n_threads)
def test07_access_attributes(n_threads=8):
n = 1000
c1 = ClassWithProperty(123)
c2 = ClassWithClassProperty(c1)
def f():
for i in range(n):
_ = c2.prop1.prop2
parallelize(f, n_threads=n_threads)
def test08_shared_ptr_threaded_access(n_threads=8):
# Test for keep_alive racing with other fields.
def f(barrier):
i = random.randint(0, 4)
barrier.wait()
p = t.fetch_shared_int(i)
assert t.consume_an_int(p) == i
for _ in range(100):
barrier = threading.Barrier(n_threads)
parallelize(lambda: f(barrier), n_threads=n_threads)
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
tests/test_typing.cpp | C++ | #include <nanobind/typing.h>
#include <nanobind/operators.h>
namespace nb = nanobind;
using namespace nb::literals;
class NestedClass {};
namespace nanobind {
namespace detail {
template <>
struct type_caster<NestedClass> {
NB_TYPE_CASTER(NestedClass, const_name("py_stub_test.AClass.NestedClass"))
bool from_python(handle /*src*/, uint8_t /*flags*/, cleanup_list*) noexcept {
return true;
}
static handle from_cpp(const NestedClass&, rv_policy, cleanup_list*) noexcept {
nanobind::object py_class =
nanobind::module_::import_("py_stub_test").attr("AClass").attr("NestedClass");
return py_class().release();
}
};
}
}
// Declarations of various advanced constructions to test the stub generator
NB_MODULE(test_typing_ext, m) {
// A submodule which won't be included, but we must be able to import it
// and resolve declarations from there
nb::module_ sm = m.def_submodule("submodule");
// Some elements of the submodule
struct F { };
sm.def("f", [] { });
nb::class_<F>(sm, "F");
// Submodule aliases
m.attr("f2") = sm.attr("f");
m.attr("F") = sm.attr("F");
// A top-level type and a function
struct Foo {
bool operator<(Foo) const { return false; }
bool operator>(Foo) const { return false; }
bool operator<=(Foo) const { return false; }
bool operator>=(Foo) const { return false; }
};
nb::class_<Foo>(m, "Foo")
.def(nb::self < nb::self)
.def(nb::self > nb::self)
.def(nb::self <= nb::self)
.def(nb::self >= nb::self);
m.def("f", []{});
m.def("makeNestedClass", [] { return NestedClass(); });
m.attr("AnyTuple") = nb::typing().attr("Tuple")[nb::make_tuple(nb::any_type(), nb::ellipsis())];
// Aliases to local functions and types
m.attr("FooAlias") = m.attr("Foo");
m.attr("f_alias") = m.attr("f");
nb::type<Foo>().attr("lt_alias") = nb::type<Foo>().attr("__lt__");
// Custom signature generation for classes and methods
struct CustomSignature { int value; };
nb::class_<CustomSignature>(
m, "CustomSignature", nb::sig("@my_decorator\nclass CustomSignature(collections.abc.Iterable[int])"))
.def("method", []{}, nb::sig("@my_decorator\ndef method(self: typing.Self)"))
.def("method_with_default", [](CustomSignature&,bool){}, "value"_a.sig("bool(True)") = true)
.def_rw("value", &CustomSignature::value,
nb::for_getter(nb::sig("def value(self, /) -> typing.Optional[int]")),
nb::for_setter(nb::sig("def value(self, value: typing.Optional[int], /) -> None")),
nb::for_getter("docstring for getter"),
nb::for_setter("docstring for setter"));
// Stubification of simple constants
nb::dict d;
nb::list l;
l.append(123);
d["a"] = nb::make_tuple("b", l);
m.attr("pytree") = d;
// A generic type
struct Wrapper {
nb::object value;
bool operator==(const Wrapper &w) const { return value.is(w.value); }
};
// 1. Instantiate a placeholder ("type variable") used below
m.attr("T") = nb::type_var("T", "contravariant"_a = true);
// 2. Create a generic type, and indicate in generated stubs
// that it derives from Generic[T]
auto wrapper = nb::class_<Wrapper>(m, "Wrapper", nb::is_generic(),
nb::sig("class Wrapper(typing.Generic[T])"))
.def(nb::init<nb::object>(),
nb::sig("def __init__(self, arg: T, /) -> None"))
.def("get", [](Wrapper &w) { return w.value; },
nb::sig("def get(self, /) -> T"))
.def(nb::self == nb::self, nb::sig("def __eq__(self, arg: object, /) -> bool"));
#if !defined(PYPY_VERSION) // https://github.com/pypy/pypy/issues/4914
struct WrapperFoo : Wrapper { };
nb::class_<WrapperFoo>(m, "WrapperFoo", wrapper[nb::type<Foo>()]);
#endif
// Type parameter syntax for Python 3.12+
struct WrapperTypeParam { };
nb::class_<WrapperTypeParam>(m, "WrapperTypeParam",
nb::sig("class WrapperTypeParam[T]"));
m.def("list_front", [](nb::list l) { return l[0]; },
nb::sig("def list_front[T](arg: list[T], /) -> T"));
// Type variables with constraints and a bound.
m.attr("T2") = nb::type_var("T2", "bound"_a = nb::type<Foo>());
m.attr("T3") = nb::type_var("T3", *nb::make_tuple(nb::type<Foo>(), nb::type<Wrapper>()));
// Some statements that will be modified by the pattern file
m.def("remove_me", []{});
m.def("tweak_me", [](nb::object o) { return o; }, "prior docstring\nremains preserved");
}
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
tests/test_typing.py | Python | import test_typing_ext as t
import sys
import pytest
import platform
def test01_parameterize_generic():
assert str(type(t.Wrapper[int]) == 't.Wrapper[int]')
if platform.python_implementation() != 'PyPy':
assert issubclass(t.WrapperFoo, t.Wrapper)
assert t.WrapperFoo.__bases__ == (t.Wrapper,)
assert t.WrapperFoo.__orig_bases__ == (t.Wrapper[t.Foo],)
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
src/nanobind_example/__init__.py | Python | from .nanobind_example_ext import add, __doc__
| wjakob/nanobind_example | 119 | A nanobind example project | CMake | wjakob | Wenzel Jakob | EPFL |
src/nanobind_example_ext.cpp | C++ | #include <nanobind/nanobind.h>
namespace nb = nanobind;
using namespace nb::literals;
NB_MODULE(nanobind_example_ext, m) {
m.doc() = "This is a \"hello world\" example with nanobind";
m.def("add", [](int a, int b) { return a + b; }, "a"_a, "b"_a);
}
| wjakob/nanobind_example | 119 | A nanobind example project | CMake | wjakob | Wenzel Jakob | EPFL |
tests/test_basic.py | Python | import nanobind_example as m
def test_add():
assert m.add(1, 2) == 3
| wjakob/nanobind_example | 119 | A nanobind example project | CMake | wjakob | Wenzel Jakob | EPFL |
parallel_stable_sort.h | C/C++ Header | /*
Copyright (C) 2014 Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
* Neither the name of Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#include <iterator>
#include <algorithm>
#include <tbb/task.h>
#include "pss_common.h"
namespace pss {
namespace internal {
template<typename RandomAccessIterator1, typename RandomAccessIterator2, typename RandomAccessIterator3, typename Compare>
class merge_task: public tbb::task {
tbb::task* execute();
RandomAccessIterator1 xs, xe;
RandomAccessIterator2 ys, ye;
RandomAccessIterator3 zs;
Compare comp;
bool destroy;
public:
merge_task( RandomAccessIterator1 xs_, RandomAccessIterator1 xe_, RandomAccessIterator2 ys_, RandomAccessIterator2 ye_, RandomAccessIterator3 zs_, bool destroy_, Compare comp_ ) :
xs(xs_), xe(xe_), ys(ys_), ye(ye_), zs(zs_), comp(comp_), destroy(destroy_)
{}
};
template<typename RandomAccessIterator1, typename RandomAccessIterator2, typename RandomAccessIterator3, typename Compare>
tbb::task* merge_task<RandomAccessIterator1,RandomAccessIterator2,RandomAccessIterator3,Compare>::execute() {
const size_t MERGE_CUT_OFF = 2000;
auto n = (xe-xs) + (ye-ys);
if( (size_t) n <= MERGE_CUT_OFF ) {
serial_move_merge( xs, xe, ys, ye, zs, comp );
if( destroy ) {
serial_destroy(xs,xe);
serial_destroy(ys,ye);
}
return NULL;
} else {
RandomAccessIterator1 xm;
RandomAccessIterator2 ym;
if( xe-xs < ye-ys ) {
ym = ys+(ye-ys)/2;
xm = std::upper_bound(xs,xe,*ym,comp);
} else {
xm = xs+(xe-xs)/2;
ym = std::lower_bound(ys,ye,*xm,comp);
}
RandomAccessIterator3 zm = zs + ((xm-xs) + (ym-ys));
tbb::task* right = new( allocate_additional_child_of(*parent()) ) merge_task( xm, xe, ym, ye, zm, destroy, comp );
spawn(*right);
recycle_as_continuation();
xe = xm;
ye = ym;
return this;
}
}
template<typename RandomAccessIterator1, typename RandomAccessIterator2, typename Compare>
class stable_sort_task: public tbb::task {
tbb::task* execute();
RandomAccessIterator1 xs, xe;
RandomAccessIterator2 zs;
Compare comp;
signed char inplace;
public:
stable_sort_task(RandomAccessIterator1 xs_, RandomAccessIterator1 xe_, RandomAccessIterator2 zs_, int inplace_, Compare comp_ ) :
xs(xs_), xe(xe_), zs(zs_), comp(comp_), inplace(inplace_)
{}
};
template<typename RandomAccessIterator1, typename RandomAccessIterator2, typename Compare>
tbb::task* stable_sort_task<RandomAccessIterator1, RandomAccessIterator2, Compare>::execute() {
const size_t SORT_CUT_OFF = 500;
if ((size_t) (xe - xs) <= SORT_CUT_OFF) {
stable_sort_base_case(xs, xe, zs, inplace, comp);
return NULL;
} else {
RandomAccessIterator1 xm = xs + (xe - xs) / 2;
RandomAccessIterator2 zm = zs + (xm - xs);
RandomAccessIterator2 ze = zs + (xe - xs);
task* m;
if (inplace)
m = new (allocate_continuation()) merge_task<RandomAccessIterator2,RandomAccessIterator2,RandomAccessIterator1,Compare>(zs, zm, zm, ze, xs, inplace==2, comp);
else
m = new (allocate_continuation()) merge_task<RandomAccessIterator1,RandomAccessIterator1,RandomAccessIterator2,Compare>(xs, xm, xm, xe, zs, false, comp);
m->set_ref_count(2);
task* right = new(m->allocate_child()) stable_sort_task(xm,xe,zm,!inplace, comp);
spawn(*right);
recycle_as_child_of(*m);
xe=xm;
inplace=!inplace;
return this;
}
}
} // namespace internal
template<typename RandomAccessIterator, typename Compare>
void parallel_stable_sort( RandomAccessIterator xs, RandomAccessIterator xe, Compare comp ) {
typedef typename std::iterator_traits<RandomAccessIterator>::value_type T;
if( internal::raw_buffer z = internal::raw_buffer( sizeof(T)*(xe-xs) ) ) {
using tbb::task;
typedef typename std::iterator_traits<RandomAccessIterator>::value_type T;
internal::raw_buffer buf( sizeof(T)*(xe-xs) );
task::spawn_root_and_wait(*new( task::allocate_root() ) internal::stable_sort_task<RandomAccessIterator,T*,Compare>( xs, xe, (T*)buf.get(), 2, comp ));
} else
// Not enough memory available - fall back on serial sort
std::stable_sort( xs, xe, comp );
}
} // namespace pss
| wjakob/pss | 15 | Parallel Stable Sort | C++ | wjakob | Wenzel Jakob | EPFL |
pss_common.h | C/C++ Header | /*
Copyright (C) 2014 Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
* Neither the name of Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#include <utility>
#include <iterator>
namespace pss {
namespace internal {
//! Destroy sequence [xs,xe)
template<class RandomAccessIterator>
void serial_destroy( RandomAccessIterator zs, RandomAccessIterator ze ) {
typedef typename std::iterator_traits<RandomAccessIterator>::value_type T;
while( zs!=ze ) {
--ze;
(*ze).~T();
}
}
//! Merge sequences [xs,xe) and [ys,ye) to output sequence [zs,(xe-xs)+(ye-ys)), using std::move
template<class RandomAccessIterator1, class RandomAccessIterator2, class RandomAccessIterator3, class Compare>
void serial_move_merge( RandomAccessIterator1 xs, RandomAccessIterator1 xe, RandomAccessIterator2 ys, RandomAccessIterator2 ye, RandomAccessIterator3 zs, Compare comp ) {
if( xs!=xe ) {
if( ys!=ye ) {
for(;;) {
if( comp(*ys,*xs) ) {
*zs = std::move(*ys);
++zs;
if( ++ys==ye ) { break; }
} else {
*zs = std::move(*xs);
++zs;
if( ++xs==xe ) { goto movey; }
}
}
}
ys = xs;
ye = xe;
}
movey:
std::move( ys, ye, zs );
}
template<typename RandomAccessIterator1, typename RandomAccessIterator2, typename Compare>
void stable_sort_base_case( RandomAccessIterator1 xs, RandomAccessIterator1 xe, RandomAccessIterator2 zs, int inplace, Compare comp) {
std::stable_sort( xs, xe, comp );
if( inplace!=2 ) {
RandomAccessIterator2 ze = zs + (xe-xs);
typedef typename std::iterator_traits<RandomAccessIterator2>::value_type T;
if( inplace )
// Initialize the temporary buffer
for( ; zs<ze; ++zs )
new(&*zs) T;
else
// Initialize the temporary buffer and move keys to it.
for( ; zs<ze; ++xs, ++zs )
new(&*zs) T(std::move(*xs));
}
}
//! Raw memory buffer with automatic cleanup.
class raw_buffer {
void* ptr;
public:
//! Try to obtain buffer of given size.
raw_buffer( size_t bytes ) : ptr( operator new(bytes,std::nothrow) ) {}
//! True if buffer was successfully obtained, zero otherwise.
operator bool() const {return ptr;}
//! Return pointer to buffer, or NULL if buffer could not be obtained.
void* get() const {return ptr;}
//! Destroy buffer
~raw_buffer() {operator delete(ptr);}
};
} // namespace internal
} // namespace pss
| wjakob/pss | 15 | Parallel Stable Sort | C++ | wjakob | Wenzel Jakob | EPFL |
rigol/__init__.py | Python | """
Rigol instrument control and measurement tools.
This package provides tools for controlling Rigol oscilloscopes and
performing measurements.
Submodules:
rigol.bode - Bode plot measurement tool
rigol.scope - Generic oscilloscope control interface
rigol.util - Utility functions for analysis and visualization
"""
__version__ = "1.0.0"
| wjakob/rigol | 2 | Bode plot utility for Rigol DHO900 series oscilloscopes | Python | wjakob | Wenzel Jakob | EPFL |
rigol/bode.py | Python | """
BodePlot: Unified class for Bode plot measurements with Rigol DHO924S.
Combines scope configuration, measurement sweeps, and data management.
Supports flexible callbacks for progress reporting and live plotting.
Usage:
bode = BodePlot(
input_ch=1,
output_ch=2,
afg_amplitude=10.0,
max_voltage=12.0,
probe_factor=10,
headroom=1.2,
)
# Run sweep with custom callback
freqs = np.logspace(np.log10(1e3), np.log10(10e6), 30)
bode.sweep(freqs, on_measurement=lambda **kw: print(f"{kw['freq_hz']:.0f} Hz"))
# Save results
bode.save_csv('results.csv')
# Cleanup
bode.close()
"""
from typing import Tuple, Optional, Callable
import argparse
import numpy as np
from .scope import Scope
from .util import (
format_frequency,
parse_si,
generate_frequencies_per_decade,
rc_lowpass,
rc_highpass,
rlc_lowpass,
rlc_highpass,
lc_bandpass,
lc_bandstop,
create_print_callback,
LivePlotUpdater,
)
class BodePlot:
"""
Unified Bode plot measurement class.
Manages scope configuration, frequency sweeps, amplitude/phase extraction,
and data export. Uses callbacks for flexible progress reporting and plotting.
"""
def __init__(
self,
ip: Optional[str] = None,
input_ch: int = 1,
output_ch: int = 2,
desired_cycles: int = 10,
mem_depth: str = '10K',
max_voltage: float = 10.0,
probe_factor: int = 10,
afg_amplitude: float = 10.0,
headroom: float = 1.2,
terminated: bool = False,
debug_level: int = 0,
quiet: bool = False,
):
"""
Initialize BodePlot measurement system.
Parameters
----------
ip : str, optional
Oscilloscope IP address. If None, auto-discovers on network.
input_ch : int
Input channel number (1-4)
output_ch : int
Output channel number (1-4)
desired_cycles : int
Number of signal cycles to display on screen
mem_depth : str
Memory depth ('1K', '10K', '100K', '1M', '10M', '25M', '50M')
max_voltage : float
Channel voltage range (includes headroom)
probe_factor : int
Probe attenuation factor (e.g., 10 for 10x probe)
afg_amplitude : float
AFG output signal amplitude (peak voltage, not peak-to-peak)
headroom : float
Headroom factor for dynamic range (must be >= 1.0)
terminated : bool
If True, compensate for 50Ω termination on channels
debug_level : int
Debug verbosity level (0=off, 1=print commands, 2=print and check errors)
quiet : bool
If True, suppress informational messages
"""
if headroom < 1.0:
raise ValueError(f"headroom must be >= 1.0, got {headroom}")
self.desired_cycles = desired_cycles
self.headroom = headroom
self.quiet = quiet
# Create scope instance
self.scope = Scope(ip=ip, debug_level=debug_level)
self.scope.stop()
# Store channel references
self.input_ch = self.scope.channels[input_ch - 1]
self.output_ch = self.scope.channels[output_ch - 1]
# Configure AFG (set termination first so amplitude is compensated correctly)
self.scope.afg.termination = 50.0 if terminated else float('inf')
self.scope.afg.function = 'SINusoid'
self.scope.afg.frequency = 1000.0 # Default 1kHz (will be set to actual value during sweep)
self.scope.afg.amplitude = afg_amplitude
self.scope.afg.offset = 0.0
self.scope.afg.enabled = True
# Configure input channel
self.input_ch.enabled = True
self.input_ch.coupling = 'DC'
self.input_ch.probe = probe_factor
self.input_ch.bwlimit = '20M'
self.input_ch.offset = 0.0
self.input_ch.vmax = max_voltage
# Configure output channel
self.output_ch.enabled = True
self.output_ch.coupling = 'DC'
self.output_ch.probe = probe_factor
self.output_ch.bwlimit = '20M'
self.output_ch.offset = 0.0
self.output_ch.vmax = max_voltage
# Disable other channels
for i in range(4):
if i != input_ch - 1 and i != output_ch - 1:
self.scope.channels[i].enabled = False
# Configure acquisition
self.scope.mem_depth = mem_depth
self.scope.acq_type = 'NORMal'
self.scope.acq_averages = 1
self.scope.tmode = 'MAIN'
# Configure trigger
self.scope.trigger.mode = 'EDGE'
self.scope.trigger.source = self.input_ch
self.scope.trigger.level = 0.0
self.scope.trigger.slope = 'POSitive'
self.scope.trigger.sweep = 'SINGle'
# Store last sweep results
self.freqs: Optional[np.ndarray] = None
self.gain_db: Optional[np.ndarray] = None
self.phase_deg: Optional[np.ndarray] = None
if not quiet:
print(f"BodePlot initialized: CH{input_ch} (input), CH{output_ch} (output)")
def sweep(
self,
freqs: np.ndarray,
on_measurement: Optional[Callable] = None,
max_scale_adjustments: int = 6,
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""
Perform frequency sweep and measure gain/phase at each point.
Parameters
----------
freqs : np.ndarray
Array of frequencies to measure (in Hz)
on_measurement : callable, optional
Callback function called after each measurement with kwargs:
- freq_hz: float - measured frequency
- gain_db: float - gain in dB
- phase_deg: float - phase in degrees
- gain_linear: float - linear gain
- index: int - current measurement index (0-based)
- total: int - total number of frequencies
max_scale_adjustments : int
Maximum iterations for adaptive voltage scaling
Returns
-------
freqs : np.ndarray
Frequency points measured
gain_db : np.ndarray
Gain in dB at each frequency
phase_deg : np.ndarray
Phase in degrees at each frequency (unwrapped)
"""
gains = np.zeros_like(freqs)
phases = np.zeros_like(freqs)
for i, freq_hz in enumerate(freqs):
# Configure frequency and timebase
target_span = self.desired_cycles / freq_hz
safety_factor = 1.2
visible_span = target_span * safety_factor
tscale = visible_span / 10.0
self.scope.afg.frequency = freq_hz
self.scope.tdiv = tscale
self.scope.single()
v_out, dt = self.output_ch.waveform(
dt=True,
adaptive=True,
headroom=self.headroom,
max_iterations=max_scale_adjustments
)
# Read input from the same final acquisition (non-adaptive)
v_in = self.input_ch.waveform()
# Extract amplitude and phase using sine fitting
A_in, phi_in = self._fit_sine_at_freq(v_in, freq_hz, dt)
A_out, phi_out = self._fit_sine_at_freq(v_out, freq_hz, dt)
# Store gain and phase
gain = A_out / A_in
phase = phi_out - phi_in # radians
gains[i] = gain
phases[i] = phase
# Invoke callback if provided
if on_measurement:
gain_db_current = 20 * np.log10(gain)
phase_deg_current = self._wrap_phase(np.degrees(phase))
gain_linear = gain
on_measurement(
freq_hz=freq_hz,
gain_db=gain_db_current,
phase_deg=phase_deg_current,
gain_linear=gain_linear,
index=i,
total=len(freqs),
)
# Convert to dB and unwrap phase
self.freqs = freqs
self.gain_db = 20 * np.log10(gains)
self.phase_deg = np.degrees(np.unwrap(phases))
return self.freqs, self.gain_db, self.phase_deg
def save_csv(self, filename: str) -> None:
"""
Save last sweep results to CSV file.
Parameters
----------
filename : str
Output CSV filename
Raises
------
RuntimeError
If no sweep has been performed yet
"""
if self.freqs is None or self.gain_db is None or self.phase_deg is None:
raise RuntimeError("No sweep results to save. Run sweep() first.")
import csv
with open(filename, 'w', newline='') as f:
writer = csv.writer(f)
writer.writerow(['Frequency (Hz)', 'Gain (dB)', 'Phase (deg)'])
for freq, gain, phase in zip(self.freqs, self.gain_db, self.phase_deg):
writer.writerow([freq, gain, phase])
def close(self) -> None:
"""Disable AFG and return scope to RUN mode."""
self.scope.afg.enabled = False
self.scope.run()
def _fit_sine_at_freq(
self,
sig: np.ndarray,
freq: float,
dt: float
) -> Tuple[float, float]:
"""
Fit a sine wave at known frequency to the signal using least squares.
Parameters
----------
sig : np.ndarray
1D array of voltage samples
freq : float
Excitation frequency in Hz
dt : float
Sample interval in seconds
Returns
-------
amplitude : float
Fitted sine wave amplitude
phase : float
Fitted sine wave phase in radians (relative to sine)
"""
N = len(sig)
t = np.arange(N) * dt
w = 2 * np.pi * freq
# Fit: sig ≈ a*cos(wt) + b*sin(wt)
A = np.column_stack([np.cos(w * t), np.sin(w * t)])
coeffs, *_ = np.linalg.lstsq(A, sig, rcond=None)
a, b = coeffs
amplitude = np.hypot(a, b) # sqrt(a^2 + b^2)
phase = np.arctan2(a, b) # radians
return amplitude, phase
@staticmethod
def _wrap_phase(deg: float) -> float:
"""
Wrap phase to (-180, 180] for display friendliness (avoids 0..360 jumps).
"""
return ((deg + 180.0) % 360.0) - 180.0
def parse_resistance(value: str) -> float:
"""
Parse resistance value with SI prefixes (e.g., '3.3k', '0.5', '10M', '100m').
"""
value = value.strip()
# Strip optional 'Ohm' or 'Ω' suffix (case-insensitive)
import re
value = re.sub(r'(Ohm|Ω)$', '', value, flags=re.IGNORECASE).strip()
# Try plain float first
try:
return float(value)
except ValueError:
pass
# Parse with SI multipliers
match = re.match(r'^([\d.]+)\s*([pnuµmkKMGT]?)$', value)
if not match:
raise ValueError(f"Invalid resistance format: {value}")
number = float(match.group(1))
prefix = match.group(2)
multipliers = {
'': 1,
'p': 1e-12,
'n': 1e-9,
'u': 1e-6,
'µ': 1e-6,
'm': 1e-3,
'k': 1e3,
'K': 1e3,
'M': 1e6,
'G': 1e9,
'T': 1e12,
}
return number * multipliers.get(prefix, 1)
def main():
parser = argparse.ArgumentParser(
description='''Bode plot measurement tool for Rigol DHO924S oscilloscope.
Measures frequency response (gain and phase) of circuits by sweeping the
internal AFG across a frequency range and comparing input/output channels.
Features automatic dynamic range adjustment.''',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog='''
Basic Examples:
%(prog)s
Run with all defaults: 5V amplitude, 1KHz-10MHz, 30 steps, live plot
%(prog)s -a 2.5V -s 100Hz -e 1MHz --steps 50
Custom amplitude and frequency range with 50 measurement points
%(prog)s -a 10mV --start 1KHz --end 100KHz
Low amplitude measurement (e.g., for sensitive circuits)
%(prog)s --dump data.csv --headless
Save to CSV without displaying plots (shows progress)
%(prog)s -a 2V --terminated
Use when 50Ω terminators are physically connected to channels (compensates for voltage divider)
Reference Curves:
%(prog)s --rc-lowpass 10KHz
Overlay theoretical 1st-order RC lowpass response at 10KHz cutoff
%(prog)s --rlc-lowpass 100KHz:3.6
Overlay theoretical 2nd-order RLC lowpass at 100KHz with 3.6Ω resistance
%(prog)s --rc-highpass 100Hz --rc-lowpass 10KHz
Compare measurement against RC highpass and lowpass models
%(prog)s -a 2.5V --rlc-lowpass 10KHz --rlc-lowpass 10KHz:5
Compare ideal LC (R=0) vs RLC with 5Ω resistance at same frequency
Advanced:
%(prog)s -i 2 -o 4 -A 192.168.1.100
Use CH2 input, CH4 output, specify oscilloscope IP explicitly
%(prog)s --headroom 1.5 --mem-depth 100K
Increase headroom to 50%% and use 100K sample memory
%(prog)s --dump result.csv -H
Automated measurement: save CSV, no GUI
Notes:
- Amplitude is specified as peak voltage (not peak-to-peak)
- Channel voltage ranges include headroom (default 20%%) to prevent clipping
- Dynamic range adjustment automatically optimizes output channel scale
- Use --quiet to suppress all output except errors
'''
)
# Connection and channel options
parser.add_argument('-A', '--addr', default=None,
help='Oscilloscope IP address (auto-discovers if not specified)')
parser.add_argument('-i', '--input', type=int, default=1, choices=[1, 2, 3, 4],
help='Input channel (default: 1)')
parser.add_argument('-o', '--output', type=int, default=2, choices=[1, 2, 3, 4],
help='Output channel (default: 2)')
# Measurement parameters
parser.add_argument('-a', '--amplitude', type=str, default='5V',
help='AFG signal amplitude (peak voltage, not peak-to-peak), e.g., 10mV, 5V. Channel voltage ranges are automatically set to amplitude × headroom factor (default: 5V)')
parser.add_argument('-s', '--start', type=str, default='1KHz',
help='Start frequency for sweep (e.g., 100Hz, 1KHz, 10KHz) (default: 1KHz)')
parser.add_argument('-e', '--end', type=str, default='10MHz',
help='End frequency for sweep (e.g., 100KHz, 1MHz, 10MHz) (default: 10MHz)')
# Frequency steps (mutually exclusive)
steps_group = parser.add_mutually_exclusive_group()
steps_group.add_argument('--steps', type=int, default=30,
help='Number of logarithmically-spaced frequency points to measure (default: 30)')
steps_group.add_argument('--steps-per-decade', type=int, metavar='N',
help='Number of frequency points per decade (logarithmically spaced), matching Rigol behavior. Example: 10 steps per decade from 1kHz to 10MHz gives 10×4=40 points plus endpoints')
# Output options
parser.add_argument('-d', '--dump', type=str, metavar='FILE',
help='Save measurement data to CSV file (frequency, gain, phase)')
parser.add_argument('-H', '--headless', action='store_true',
help='Run without displaying plots (shows progress on stderr, useful for automation)')
parser.add_argument('-q', '--quiet', action='store_true',
help='Suppress all output except errors (implies headless)')
# Reference circuit options
parser.add_argument('--rc-lowpass', type=str, action='append', metavar='FREQ',
help='Overlay theoretical 1st-order RC lowpass response at cutoff frequency (e.g., 10KHz). Can be specified multiple times')
parser.add_argument('--rc-highpass', type=str, action='append', metavar='FREQ',
help='Overlay theoretical 1st-order RC highpass response at cutoff frequency (e.g., 100Hz). Can be specified multiple times')
parser.add_argument('--rlc-lowpass', type=str, action='append', metavar='FREQ[:R]',
help='Overlay theoretical 2nd-order RLC lowpass response at resonant frequency. Format: FREQ or FREQ:RESISTANCE (e.g., 10KHz, 100KHz:3.6). RESISTANCE in Ohms accounts for inductor ESR. Can be specified multiple times')
parser.add_argument('--rlc-highpass', type=str, action='append', metavar='FREQ[:R]',
help='Overlay theoretical 2nd-order RLC highpass response at resonant frequency. Format: FREQ or FREQ:RESISTANCE (e.g., 100Hz, 10KHz:3.6). RESISTANCE in Ohms accounts for inductor ESR. Can be specified multiple times')
parser.add_argument('--lc-bandpass', type=str, action='append', metavar='L:C:R_ESR:R_SRC',
help='Overlay theoretical LC bandpass (parallel LC with voltage divider). Format: L:C:R_ESR:R_SOURCE (e.g., 1mH:10nF:0.5:3.3k). L is inductance, C is capacitance, R_ESR is inductor ESR in Ohms, R_SOURCE is source/protection resistor in Ohms. All parameters required. Can be specified multiple times')
parser.add_argument('--lc-bandstop', type=str, action='append', metavar='L:C:R_ESR:R_SRC',
help='Overlay theoretical LC bandstop/notch (series LC shunt to ground). Format: L:C:R_ESR:R_SOURCE (e.g., 1mH:10nF:0.5:3.3k). L is inductance, C is capacitance, R_ESR is inductor ESR in Ohms, R_SOURCE is source/protection resistor in Ohms. All parameters required. Can be specified multiple times')
# Advanced options
parser.add_argument('--mem-depth', type=str, default='10K',
help='Oscilloscope memory depth (e.g., 10K, 100K, 1M, 10M). Higher values capture more cycles (default: 10K)')
parser.add_argument('--probe-factor', type=int, default=10,
help='Probe attenuation factor (1 for 1:1, 10 for 10:1, etc.) (default: 10)')
parser.add_argument('--cycles', type=int, default=10,
help='Target number of waveform cycles to display on screen timebase (default: 10)')
parser.add_argument('--headroom', type=float, default=1.2,
help='Safety margin above signal voltage to prevent clipping. 1.2 = 20%% headroom. Must be >= 1.0 (default: 1.2)')
parser.add_argument('--terminated', action='store_true',
help='Specify if 50Ω terminators are physically connected to oscilloscope channels. Termination creates a voltage divider that halves signal amplitude; this flag adjusts channel sensitivity to compensate')
parser.add_argument('--debug', type=int, default=0, choices=[0, 1, 2],
help='Debug level: 0=off, 1=print SCPI commands, 2=print commands and check errors after each')
args = parser.parse_args()
# Validate that input and output are different
if args.input == args.output:
parser.error("Input and output channels must be different")
# Validate headroom
if args.headroom < 1.0:
parser.error("Headroom must be >= 1.0")
# Parse voltage and frequencies
try:
voltage_v = parse_si(args.amplitude, unit='V')
start_hz = parse_si(args.start, unit='Hz')
end_hz = parse_si(args.end, unit='Hz')
except ValueError as e:
parser.error(str(e))
if start_hz >= end_hz:
parser.error("Start frequency must be less than end frequency")
# Parse reference frequencies
rc_lowpass_freqs = []
if args.rc_lowpass:
for lp in args.rc_lowpass:
try:
rc_lowpass_freqs.append(parse_si(lp, unit='Hz'))
except ValueError as e:
parser.error(f"Invalid RC lowpass frequency: {e}")
rc_highpass_freqs = []
if args.rc_highpass:
for hp in args.rc_highpass:
try:
rc_highpass_freqs.append(parse_si(hp, unit='Hz'))
except ValueError as e:
parser.error(f"Invalid RC highpass frequency: {e}")
# Parse RLC filters with optional resistance (format: FREQ or FREQ:R)
rlc_lowpass_params = []
if args.rlc_lowpass:
for lp in args.rlc_lowpass:
try:
if ':' in lp:
freq_str, r_str = lp.split(':', 1)
freq = parse_si(freq_str, unit='Hz')
r = float(r_str)
else:
freq = parse_si(lp, unit='Hz')
r = 0.0
rlc_lowpass_params.append((freq, r))
except ValueError as e:
parser.error(f"Invalid RLC lowpass parameter '{lp}': {e}")
rlc_highpass_params = []
if args.rlc_highpass:
for hp in args.rlc_highpass:
try:
if ':' in hp:
freq_str, r_str = hp.split(':', 1)
freq = parse_si(freq_str, unit='Hz')
r = float(r_str)
else:
freq = parse_si(hp, unit='Hz')
r = 0.0
rlc_highpass_params.append((freq, r))
except ValueError as e:
parser.error(f"Invalid RLC highpass parameter '{hp}': {e}")
# Parse LC bandpass filters (format: L:C:R_ESR:R_SOURCE)
lc_bandpass_params = []
if args.lc_bandpass:
for bp in args.lc_bandpass:
try:
parts = bp.split(':')
if len(parts) != 4:
raise ValueError("Format must be L:C:R_ESR:R_SOURCE (e.g., 1mH:10nF:0.5:3.3k)")
# Parse with SI units
L = parse_si(parts[0], unit='H')
C = parse_si(parts[1], unit='F')
r_esr = parse_resistance(parts[2])
r_source = parse_resistance(parts[3])
lc_bandpass_params.append((L, C, r_esr, r_source))
except ValueError as e:
parser.error(f"Invalid LC bandpass parameter '{bp}': {e}")
# Parse LC bandstop filters (format: L:C:R_ESR:R_SOURCE)
lc_bandstop_params = []
if args.lc_bandstop:
for bs in args.lc_bandstop:
try:
parts = bs.split(':')
if len(parts) != 4:
raise ValueError("Format must be L:C:R_ESR:R_SOURCE (e.g., 1mH:10nF:0.5:3.3k)")
# Parse with SI units
L = parse_si(parts[0], unit='H')
C = parse_si(parts[1], unit='F')
r_esr = parse_resistance(parts[2])
r_source = parse_resistance(parts[3])
lc_bandstop_params.append((L, C, r_esr, r_source))
except ValueError as e:
parser.error(f"Invalid LC bandstop parameter '{bs}': {e}")
# Build extra plot functions if requested
extra = {}
if rc_lowpass_freqs:
for fc in rc_lowpass_freqs:
label = f"RC lowpass ({format_frequency(fc)})"
extra[label] = rc_lowpass(fc)
if rc_highpass_freqs:
for fc in rc_highpass_freqs:
label = f"RC highpass ({format_frequency(fc)})"
extra[label] = rc_highpass(fc)
if rlc_lowpass_params:
for fc, r in rlc_lowpass_params:
if r > 0:
label = f"RLC lowpass ({format_frequency(fc)}, R={r:.1f}Ω)"
else:
label = f"RLC lowpass ({format_frequency(fc)})"
extra[label] = rlc_lowpass(fc, r)
if rlc_highpass_params:
for fc, r in rlc_highpass_params:
if r > 0:
label = f"RLC highpass ({format_frequency(fc)}, R={r:.1f}Ω)"
else:
label = f"RLC highpass ({format_frequency(fc)})"
extra[label] = rlc_highpass(fc, r)
if lc_bandpass_params:
for L, C, r_esr, r_source in lc_bandpass_params:
# Calculate resonant frequency for the label
fc = 1.0 / (2 * np.pi * np.sqrt(L * C))
# Build label showing component values
r_esr_str = f"{r_esr:.1f}Ω" if r_esr < 1000 else f"{r_esr/1000:.1f}kΩ"
r_src_str = f"{r_source:.1f}Ω" if r_source < 1000 else f"{r_source/1000:.1f}kΩ"
label = f"LC bandpass (L={L*1e3:.2f}mH, C={C*1e9:.1f}nF, R_esr={r_esr_str}, R_src={r_src_str}, f₀={format_frequency(fc)})"
extra[label] = lc_bandpass(L, C, r_esr, r_source)
if lc_bandstop_params:
for L, C, r_esr, r_source in lc_bandstop_params:
# Calculate resonant frequency for the label
fc = 1.0 / (2 * np.pi * np.sqrt(L * C))
r_esr_str = f"{r_esr:.1f}Ω" if r_esr < 1000 else f"{r_esr/1000:.1f}kΩ"
r_src_str = f"{r_source:.1f}Ω" if r_source < 1000 else f"{r_source/1000:.1f}kΩ"
label = f"LC bandstop (L={L*1e3:.2f}mH, C={C*1e9:.1f}nF, R_esr={r_esr_str}, R_src={r_src_str}, f₀={format_frequency(fc)})"
extra[label] = lc_bandstop(L, C, r_esr, r_source)
# Pass None if no extra plots requested
extra = extra if extra else None
afg_amplitude = voltage_v
input_max_voltage = voltage_v * args.headroom
mem_depth_str = args.mem_depth
if args.steps_per_decade:
freqs = generate_frequencies_per_decade(start_hz, end_hz, args.steps_per_decade)
if not args.quiet:
print(f"Using {args.steps_per_decade} steps per decade: {len(freqs)} total frequency points")
else:
freqs = np.logspace(np.log10(start_hz), np.log10(end_hz), args.steps)
if not args.quiet:
print(f"Connecting to oscilloscope at {args.addr}...")
print(f"Measurement range: {start_hz/1e3:.1f} kHz to {end_hz/1e6:.1f} MHz")
termination_str = " (50Ω terminated)" if args.terminated else ""
print(f"Signal amplitude: {voltage_v:.3f} V (peak), Headroom: {args.headroom}x{termination_str}")
print(f"AFG amplitude: {afg_amplitude:.3f} V, Channel range: ±{input_max_voltage:.3f} V")
print(f"Channels: Input=CH{args.input}, Output=CH{args.output}")
bode = BodePlot(
ip=args.addr,
input_ch=args.input,
output_ch=args.output,
desired_cycles=args.cycles,
mem_depth=mem_depth_str,
max_voltage=input_max_voltage,
probe_factor=args.probe_factor,
afg_amplitude=afg_amplitude,
headroom=args.headroom,
terminated=args.terminated,
debug_level=args.debug,
quiet=args.quiet,
)
plotter = None
try:
if args.headless:
if not args.quiet:
print("Running sweep in headless mode...")
print(f"{'Frequency':>13} {'Gain':>21} {'Phase':>8}")
print(f"{'-'*13} {'-'*21} {'-'*8}")
callback = create_print_callback(quiet=args.quiet)
else:
if not args.quiet:
print("Running sweep with live plotting...")
print(f"{'Frequency':>13} {'Gain':>21} {'Phase':>8}")
print(f"{'-'*13} {'-'*21} {'-'*8}")
plotter = LivePlotUpdater(freqs, extra=extra)
print_cb = create_print_callback(quiet=args.quiet)
def callback(**kwargs):
if print_cb:
print_cb(**kwargs)
plotter.update(**kwargs)
freqs, gain_db, phase_deg = bode.sweep(freqs, on_measurement=callback)
if args.dump:
bode.save_csv(args.dump)
if not args.quiet:
print(f"Data saved to {args.dump}")
if not args.quiet:
print("Measurement complete!")
finally:
bode.close()
if not args.quiet:
print("Disconnected from oscilloscope.")
if plotter is not None:
plotter.show()
if __name__ == "__main__":
# Allow `python -m rigol.bode` as a convenience alias for the CLI
main()
| wjakob/rigol | 2 | Bode plot utility for Rigol DHO900 series oscilloscopes | Python | wjakob | Wenzel Jakob | EPFL |
rigol/scope.py | Python | """
Generic scope abstraction for SCPI-controlled oscilloscopes.
This module provides a property-based API for controlling oscilloscope parameters
with batched command execution for efficiency.
Properties are automatically generated from parameter tables for efficiency and maintainability.
"""
from enum import Enum
from typing import Optional, Any, Dict, Tuple, overload, Literal, TYPE_CHECKING
import numpy as np
import pyvisa
import sys
import time
import re
from .util import parse_si
class Type(Enum):
"""Parameter type enum for SCPI value formatting."""
UNITLESS = 1
VOLTAGE = 2
STRING = 3
FREQUENCY = 4
BOOLEAN = 5
TIME = 6
# Parameter table structure: (name, type, scpi_template, valid_values, priority)
# Priority is used to sort commands before sending. Lower = earlier. None = no reordering.
SCOPE_PARAMS = [
('tdiv', Type.TIME, 'TIMebase:MAIN:SCALe', None, None),
('toffset', Type.TIME, 'TIMebase:MAIN:OFFSet', None, None),
('tmode', Type.STRING, 'TIMebase:MODE', ['MAIN', 'XY', 'ROLL'], None),
('mem_depth', Type.STRING, 'ACQuire:MDEPth', ['1K', '10K', '100K', '1M', '10M', '25M', '50M'], None),
('acq_type', Type.STRING, 'ACQuire:TYPE', ['NORMal', 'PEAK', 'AVERages', 'ULTRa'], None),
('acq_averages', Type.UNITLESS, 'ACQuire:AVERages', None, None),
]
CHANNEL_PARAMS = [
# Priority enforces order: enabled -> probe -> vdiv -> offset (each affects the next's valid range)
('enabled', Type.BOOLEAN, 'CHANnel{ch}:DISPlay', None, 0),
('probe', Type.UNITLESS, 'CHANnel{ch}:PROBe', None, 1),
('vdiv', Type.VOLTAGE, 'CHANnel{ch}:SCALe', None, 2),
('offset', Type.VOLTAGE, 'CHANnel{ch}:OFFSet', None, 3),
('position', Type.VOLTAGE, 'CHANnel{ch}:POSition', None, None),
('coupling', Type.STRING, 'CHANnel{ch}:COUPling', ['DC', 'AC', 'GND'], None),
('bwlimit', Type.STRING, 'CHANnel{ch}:BWLimit', ['20M', 'OFF'], None),
('invert', Type.BOOLEAN, 'CHANnel{ch}:INVert', None, None),
]
AFG_PARAMS = [
('enabled', Type.BOOLEAN, 'SOURce:OUTPut:STATe', None, None),
('function', Type.STRING, 'SOURce:FUNCtion', ['SINusoid', 'SQUare', 'RAMP', 'PULSe', 'DC', 'NOISe', 'ARB'], None),
('_amplitude_raw', Type.VOLTAGE, 'SOURce:VOLTage:AMPLitude', None, None),
('frequency', Type.FREQUENCY, 'SOURce:FREQuency', None, None),
('_offset_raw', Type.VOLTAGE, 'SOURce:VOLTage:OFFSet', None, None),
('phase', Type.UNITLESS, 'SOURce:PHASe', None, None),
('duty', Type.UNITLESS, 'SOURce:FUNCtion:SQUare:DUTY', None, None),
('symmetry', Type.UNITLESS, 'SOURce:FUNCtion:RAMP:SYMMetry', None, None),
]
TRIGGER_PARAMS = [
('mode', Type.STRING, 'TRIGger:MODE', ['EDGE', 'PULSe', 'RUNT', 'WIND', 'NEDG', 'SLOPe', 'VIDeo', 'PATTern', 'DELay', 'TIMeout', 'DURation', 'SHOLd', 'RS232', 'IIC', 'SPI'], None),
('_source_raw', Type.STRING, 'TRIGger:EDGE:SOURce', ['CHAN1', 'CHAN2', 'CHAN3', 'CHAN4', 'AC'], None),
('level', Type.VOLTAGE, 'TRIGger:EDGE:LEVel', None, None),
('slope', Type.STRING, 'TRIGger:EDGE:SLOPe', ['POSitive', 'NEGative', 'EITHer'], None),
('sweep', Type.STRING, 'TRIGger:SWEep', ['AUTO', 'NORMal', 'SINGle'], None),
('nreject', Type.BOOLEAN, 'TRIGger:NREJect', None, None),
]
# Build SCPI command -> priority lookup from param tables
_SCPI_PRIORITY = {}
for _name, _type, _tmpl, _valid, _prio in (
SCOPE_PARAMS + CHANNEL_PARAMS + AFG_PARAMS + TRIGGER_PARAMS
):
if _prio is not None:
if '{ch}' in _tmpl:
for _ch in range(1, 5):
_SCPI_PRIORITY[_tmpl.format(ch=_ch)] = _prio
else:
_SCPI_PRIORITY[_tmpl] = _prio
# Helper function for SI unit parsing - used by property setters
def _normalize_value(value: Any, ptype: Type) -> Any:
"""Parse SI unit strings and normalize values based on type."""
if not isinstance(value, str):
return value
match ptype:
case Type.VOLTAGE:
return parse_si(value, unit='V')
case Type.FREQUENCY:
return parse_si(value, unit='Hz')
case Type.TIME:
return parse_si(value, unit='s')
case _:
return value
class Scope:
"""
Generic SCPI-controlled oscilloscope interface.
All parameter changes are batched and automatically committed when needed
(e.g., when reading values, arming triggers, or running acquisitions).
Call ``commit()`` explicitly to flush pending changes immediately.
Example:
scope = Scope(ip='192.168.5.2', debug_level=0)
# Configure scope parameters
scope.mem_depth = 100000
scope.tdiv = 1e-3 # 1ms/div
# Configure channel
scope.channels[0].enabled = True
scope.channels[0].probe = 10
scope.channels[0].vmax = 10
scope.channels[0].coupling = 'DC'
# Configure trigger
scope.trigger.mode = 'EDGE'
scope.trigger.source = scope.channels[0]
scope.trigger.level = 0.0
# Configure AFG
scope.afg.enabled = True
scope.afg.amplitude = '5V'
scope.afg.frequency = '1kHz'
# Arm single-shot trigger (automatically commits all pending changes)
scope.single()
# Wait for trigger
scope.wait_trigger()
# Read waveform (automatically commits before reading)
waveform = scope.channels[0].waveform()
# Or use adaptive capture
waveform = scope.channels[0].waveform(adaptive=True, headroom=1.2)
"""
if TYPE_CHECKING:
@property
def tdiv(self) -> float:
"""Time per division in seconds (horizontal scale). Accepts SI strings like '1ms'."""
...
@tdiv.setter
def tdiv(self, value: float | str) -> None: ...
@property
def toffset(self) -> float:
"""Horizontal time offset in seconds. Accepts SI strings like '100us'."""
...
@toffset.setter
def toffset(self, value: float | str) -> None: ...
@property
def tmode(self) -> Literal['MAIN', 'XY', 'ROLL']:
"""Timebase mode: MAIN (normal), XY, or ROLL."""
...
@tmode.setter
def tmode(self, value: Literal['MAIN', 'XY', 'ROLL']) -> None: ...
@property
def mem_depth(self) -> Literal['1K', '10K', '100K', '1M', '10M', '25M', '50M']:
"""Acquisition memory depth (number of samples)."""
...
@mem_depth.setter
def mem_depth(self, value: Literal['1K', '10K', '100K', '1M', '10M', '25M', '50M']) -> None: ...
@property
def acq_type(self) -> Literal['NORMal', 'PEAK', 'AVERages', 'ULTRa']:
"""Acquisition type: NORMal, PEAK detect, AVERages, or ULTRa."""
...
@acq_type.setter
def acq_type(self, value: Literal['NORMal', 'PEAK', 'AVERages', 'ULTRa']) -> None: ...
@property
def acq_averages(self) -> int:
"""Number of acquisitions to average (when acq_type is AVERages)."""
...
@acq_averages.setter
def acq_averages(self, value: int) -> None: ...
def __init__(self, ip: Optional[str] = None, debug_level: int = 0):
"""
Initialize scope connection.
Args:
ip: IP address of the oscilloscope. If None, auto-discovers the first
available scope on the network (requires 'zeroconf' package).
debug_level: Debug verbosity level:
0 = no debug output
1 = print SCPI commands to stderr
2 = print SCPI commands to stderr and check device status after each command
"""
self.debug_level = debug_level
# Queue stores pending changes: {scpi_command: (value, type)}
self._queue: Dict[str, Tuple[Any, Type]] = {}
# Cache stores committed values: {scpi_command: (value, type)}
self._cache: Dict[str, Tuple[Any, Type]] = {}
# Connect to scope
rm = pyvisa.ResourceManager()
if ip is None:
# Auto-discover scope on network
try:
resources = rm.list_resources()
except Exception:
resources = ()
tcpip_resources = [r for r in resources if r.startswith('TCPIP')]
if not tcpip_resources:
raise RuntimeError(
"No oscilloscope found on network. "
"For auto-discovery, install the 'zeroconf' package: pip install zeroconf. "
"Alternatively, specify the IP address explicitly: Scope(ip='192.168.x.x')"
)
discovered = tcpip_resources[0]
if self.debug_level >= 1:
print(f"< Auto-discovered: {discovered}", file=sys.stderr)
# Extract host from something like TCPIP0::192.168.0.188::INSTR
import re
m = re.match(r"TCPIP\d*::([^:]+)::", discovered)
if not m:
raise RuntimeError(f"Unexpected VISA resource format: {discovered}")
host = m.group(1)
resource = f"TCPIP0::{host}::5555::SOCKET"
if self.debug_level >= 1:
print(f"< Using VISA resource: {resource}", file=sys.stderr)
else:
resource = f"TCPIP0::{ip}::INSTR"
resource = f"TCPIP0::{ip}::5555::SOCKET"
# Set up line endings for SOCKET communication
self.inst = rm.open_resource(resource)
self.inst.read_termination = "\n"
self.inst.write_termination = "\n"
self.inst.timeout = 120_000
# Clear any existing errors in the queue before we start
if self.debug_level >= 1:
# Read and discard all errors until we get "0,No error"
while True:
try:
error = self.inst.query(":SYSTem:ERRor?").strip()
print(f"< Clearing old error: {error}", file=sys.stderr)
if error.startswith("0,"):
break
except:
break
# Create child objects (4 channels for DHO924S)
self.channels = [Channel(self, i) for i in range(4)]
self.afg = AFG(self)
self.trigger = Trigger(self)
# Waveform transfer settings are constant; set once
self._write(":WAVeform:MODE RAW")
self._write(":WAVeform:FORMat WORD")
def _write(self, cmd: str, check_errors: bool = True) -> None:
"""
Execute SCPI write command.
Args:
cmd: SCPI command to write
check_errors: If True (default), check for errors when debug_level >= 2
"""
if not self.debug_level:
self.inst.write(cmd)
else:
print(f"> {cmd}", file=sys.stderr)
self.inst.write(cmd)
if self.debug_level >= 2 and check_errors:
self._check_error(cmd)
def _query(self, cmd: str) -> str:
"""Execute SCPI query command."""
if not self.debug_level:
return self.inst.query(cmd)
else:
print(f"> {cmd}", file=sys.stderr)
result = self.inst.query(cmd)
print(f"< {result.strip()}", file=sys.stderr)
if self.debug_level >= 2:
self._check_error(cmd)
return result
def _check_error(self, last_cmd: str = "") -> None:
"""
Check for SCPI errors and raise exception if found.
Only called when debug_level >= 2 to help diagnose issues.
Args:
last_cmd: The command that was just executed (for error reporting)
"""
error_response = self.inst.query(":SYSTem:ERRor?").strip()
# Error format: "code,message" e.g. "0,No error" or "-113,Undefined header"
try:
code_str, message = error_response.split(',', 1)
code = int(code_str)
if code != 0:
cmd_info = f" after command: {last_cmd}" if last_cmd else ""
raise RuntimeError(f"SCPI Error {code}: {message}{cmd_info}")
except ValueError:
# If parsing fails, just print the raw error response
print(f" Error response: {error_response}", file=sys.stderr)
def commit(self, extra_cmd: Optional[str] = None) -> None:
"""
Flush all queued parameter changes to the oscilloscope.
Parameter changes made via property setters (e.g., ``scope.tdiv = 1e-3``)
are batched in an internal queue for efficiency. This method sends all
pending changes to the device. It is called automatically before operations
that require current settings (e.g., ``single()``, ``run()``, property reads),
but can also be called explicitly when immediate execution is needed.
When debug_level >= 2: sends each command sequentially and checks for errors after each.
Otherwise: batches all commands into a single SCPI command for efficiency.
Args:
extra_cmd: Optional SCPI command to append (e.g., ':SINGle').
Primarily used internally by methods like ``single()`` and ``run()``.
Example:
scope.tdiv = 1e-3
scope.channels[0].vdiv = 0.5
scope.commit() # Send both changes now
"""
if not self._queue and not extra_cmd:
return
# Sort queue by priority (for channels: enabled -> probe -> scale -> offset)
# Stable sort preserves insertion order for commands with equal priority (99)
sorted_items = sorted(self._queue.items(), key=lambda x: _SCPI_PRIORITY.get(x[0], 99))
if self.debug_level >= 2:
# Debug mode: send commands one-by-one to catch errors
for scpi_cmd, (value, ptype) in sorted_items:
value_str = ('ON' if value else 'OFF') if ptype == Type.BOOLEAN else str(value)
cmd = f":{scpi_cmd} {value_str}"
self._write(cmd)
if extra_cmd:
self._write(extra_cmd)
else:
# Normal mode: batch commands for efficiency
batched_cmd = ''
for scpi_cmd, (value, ptype) in sorted_items:
value_str = ('ON' if value else 'OFF') if ptype == Type.BOOLEAN else str(value)
batched_cmd += f';:{scpi_cmd} {value_str}'
self._cache.pop(scpi_cmd, None)
if extra_cmd:
batched_cmd += f';{extra_cmd}'
# Execute batched command (strip leading ';')
self._write(batched_cmd[1:])
# Move queued items to cache (they're now committed)
# Clear queue
self._queue.clear()
def _parse_value(self, value_str: str, ptype: Type) -> Any:
"""Parse a value from SCPI response according to its type."""
value_str = value_str.strip()
match ptype:
case Type.BOOLEAN:
val = value_str.upper()
if val in ('ON', '1'):
return True
elif val in ('OFF', '0'):
return False
else:
raise ValueError(f"Invalid boolean value: {value_str!r}")
case Type.VOLTAGE | Type.FREQUENCY | Type.TIME:
return float(value_str)
case Type.UNITLESS:
try:
return int(value_str) if '.' not in value_str else float(value_str)
except ValueError:
return value_str
case Type.STRING:
return value_str
def single(self) -> None:
"""Arm single-shot acquisition."""
self.commit(':SINGle')
def run(self) -> None:
"""Start continuous acquisition."""
self.commit(':RUN')
def stop(self) -> None:
"""Stop acquisition and freeze waveform buffer."""
self.commit(':STOP')
def force(self) -> None:
"""Force trigger event immediately (useful for testing)."""
self.commit(':TFORce')
def reset(self) -> None:
"""
Reset the oscilloscope to factory default settings.
This restores all operational settings (timebase, channels, trigger, etc.)
to their factory defaults. Calibration data is stored separately and
should not be affected by this command.
Note: Clears the parameter queue since reset invalidates pending changes.
"""
# Don't check errors after *RST because scope is resetting
self._write('*RST', check_errors=False)
self._query('*OPC?')
# Additional delay - some subsystems may not be fully ready even after *OPC?
time.sleep(0.5)
self._queue.clear()
self._cache.clear()
def wait_trigger(self, timeout: float = 5.0) -> None:
"""
Wait for single-shot acquisition to complete and stop.
This method polls the trigger status until acquisition completes,
then issues a STOP command to freeze the waveform buffer for reading.
Args:
timeout: Maximum time to wait in seconds (default: 5.0)
Raises:
TimeoutError: If acquisition does not complete within timeout
"""
t0 = time.time()
while True:
status = self._query(':TRIGger:STATus?').strip().upper()
if status == 'TD' or status == 'STOP': # Trigger detected or already stopped
break
if time.time() - t0 > timeout:
raise TimeoutError(
f"Acquisition did not trigger within {timeout} s; last status={status!r}"
)
time.sleep(0.02)
def clear_cache(self) -> None:
"""
Clear the parameter cache.
This forces subsequent property reads to query the device rather than
returning cached values. Useful after external changes to scope settings
or for testing purposes.
"""
self._cache.clear()
@property
def tmax(self) -> float:
"""Total time on screen (10 horizontal divisions). Accepts SI strings like '100ms'."""
return self.tdiv * 10
@tmax.setter
def tmax(self, value: float | str) -> None:
value = _normalize_value(value, Type.TIME)
self.tdiv = value / 10
class Channel:
"""
Oscilloscope channel interface.
Provides property-based access to channel parameters. All changes
are queued in the parent Scope's queue.
Properties are automatically generated from CHANNEL_PARAMS table.
"""
if TYPE_CHECKING:
@property
def vdiv(self) -> float:
"""Volts per division (vertical scale). Accepts SI strings like '100mV'."""
...
@vdiv.setter
def vdiv(self, value: float | str) -> None: ...
@property
def probe(self) -> int:
"""Probe attenuation ratio (1, 10, 100, etc.)."""
...
@probe.setter
def probe(self, value: int) -> None: ...
@property
def enabled(self) -> bool:
"""Whether the channel is displayed."""
...
@enabled.setter
def enabled(self, value: bool) -> None: ...
@property
def coupling(self) -> Literal['DC', 'AC', 'GND']:
"""Input coupling mode: DC, AC, or GND."""
...
@coupling.setter
def coupling(self, value: Literal['DC', 'AC', 'GND']) -> None: ...
@property
def bwlimit(self) -> Literal['20M', 'OFF']:
"""Bandwidth limit: 20M (20 MHz filter) or OFF."""
...
@bwlimit.setter
def bwlimit(self, value: Literal['20M', 'OFF']) -> None: ...
@property
def offset(self) -> float:
"""Vertical offset in volts. Accepts SI strings like '500mV'."""
...
@offset.setter
def offset(self, value: float | str) -> None: ...
@property
def position(self) -> float:
"""Vertical position (bias voltage) in volts. Accepts SI strings like '500mV'."""
...
@position.setter
def position(self, value: float | str) -> None: ...
@property
def invert(self) -> bool:
"""Whether the channel display is inverted."""
...
@invert.setter
def invert(self, value: bool) -> None: ...
def __init__(self, scope: Scope, ch_num: int):
"""
Initialize channel.
Args:
scope: Parent Scope instance
ch_num: 0-based channel index (0-3 for 4-channel scope)
"""
self._scope = scope
self._ch_num = ch_num
@property
def vmax(self) -> float:
"""Maximum voltage (full scale = 4 divisions). Accepts SI strings like '500mV'."""
return self.vdiv * 4
@vmax.setter
def vmax(self, value: float | str) -> None:
value = _normalize_value(value, Type.VOLTAGE)
self.vdiv = value / 4
@overload
def waveform(self, dt: Literal[False] = False, adaptive: bool = False,
headroom: float = 1.2, max_iterations: int = 6) -> np.ndarray: ...
@overload
def waveform(self, dt: Literal[True], adaptive: bool = False,
headroom: float = 1.2, max_iterations: int = 6) -> Tuple[np.ndarray, float]: ...
def waveform(self, dt: bool = False, adaptive: bool = False,
headroom: float = 1.2, max_iterations: int = 6) -> np.ndarray | Tuple[np.ndarray, float]:
"""
Read waveform data from the most recent acquisition.
For adaptive mode, reads the current acquisition and may re-trigger
with adjusted voltage scale if needed.
Args:
dt: If True, return (waveform, sample_interval) tuple
adaptive: If True, automatically adjust voltage scale for optimal signal quality.
May re-trigger acquisition if scale needs adjustment.
headroom: Headroom factor for adaptive mode (must be >= 1.0)
max_iterations: Maximum scale adjustment iterations for adaptive mode
Returns:
If dt=False: numpy array of voltage values
If dt=True: tuple of (voltage_array, sample_interval_seconds)
Examples:
# Basic usage (after single() + wait_trigger())
scope.single()
waveform = scope.channels[0].waveform()
# Get waveform with sample interval
waveform, dt_val = scope.channels[0].waveform(dt=True)
# Adaptive capture (reads current acquisition, may re-trigger)
scope.single()
waveform = scope.channels[0].waveform(adaptive=True, headroom=1.2)
"""
if adaptive:
if headroom < 1.0:
raise ValueError(f"headroom must be >= 1.0, got {headroom}")
current_vmax = self.vmax
# Try to adapt the scale if signal doesn't fit well
for _ in range(max_iterations):
# Read current acquisition (first iteration uses existing data from single/wait_trigger)
voltage_array, xincr = self._read_waveform()
# Check if scale needs adjustment
max_allowed = current_vmax / headroom
peak_voltage = np.max(np.abs(voltage_array))
pct_exceeding = 100.0 * np.sum(np.abs(voltage_array) > max_allowed) / len(voltage_array)
if pct_exceeding > 1.0:
current_vmax = current_vmax * 2.0
print(f' CH{self._ch_num + 1}: {pct_exceeding:.1f}% samples exceed headroom, zooming out to {current_vmax:.3f}V', file=sys.stderr)
self.vmax = current_vmax
self._scope.single()
continue
# Zoom in if peak < 25% of range
if peak_voltage < 0.25 * current_vmax and current_vmax > 0.001:
current_vmax = current_vmax / 2.0
print(f' CH{self._ch_num + 1}: Peak {peak_voltage:.3f}V < 25% of scale, zooming in to {current_vmax:.3f}V', file=sys.stderr)
self.vmax = current_vmax
self._scope.single()
continue
# Scale is good
break
else:
# Non-adaptive: just read current waveform
voltage_array, xincr = self._read_waveform()
# Return based on dt flag
if dt:
return voltage_array, xincr
else:
return voltage_array
def _read_waveform(self) -> Tuple[np.ndarray, float]:
"""Internal method to read waveform data from scope's acquisition buffer."""
ch_num = self._ch_num + 1 # 1-based for SCPI
# Get waveform preamble for scaling info
preamble_str = self._scope._query(f":WAVeform:SOURce CHANnel{ch_num};:WAVeform:PREamble?")
preamble = [float(x) for x in preamble_str.split(',')]
# Extract scaling parameters (format: format, type, points, count, xincr, xorig, xref, yincr, yorig, yref)
xincr = preamble[4] # Sample interval (dt)
yincr, yorig, yref = preamble[7:10]
# Read definite-length block: #<n><length><data><newline>
# Parse header to determine exact byte count, avoiding buffer desync
self._scope._write(f":WAVeform:DATA?")
header = self._scope.inst.read_bytes(2) # "#" + digit count
n_digits = int(chr(header[1]))
length = int(self._scope.inst.read_bytes(n_digits))
data_bytes = self._scope.inst.read_bytes(length)
self._scope.inst.read_bytes(1) # consume trailing newline
data_array = np.frombuffer(data_bytes, dtype="<u2").astype(np.float64)
voltage_array = (data_array - (yref - yorig)) * yincr
return voltage_array, xincr
class AFG:
"""
Arbitrary Function Generator (AFG) interface.
Provides property-based access to AFG parameters. All changes
are queued in the parent Scope's queue.
Properties are automatically generated from AFG_PARAMS table.
"""
if TYPE_CHECKING:
@property
def enabled(self) -> bool:
"""Whether the AFG output is enabled."""
...
@enabled.setter
def enabled(self, value: bool) -> None: ...
@property
def function(self) -> Literal['SINusoid', 'SQUare', 'RAMP', 'PULSe', 'DC', 'NOISe', 'ARB']:
"""Waveform function type."""
...
@function.setter
def function(self, value: Literal['SINusoid', 'SQUare', 'RAMP', 'PULSe', 'DC', 'NOISe', 'ARB']) -> None: ...
@property
def termination(self) -> float:
"""Load termination resistance (default: inf). Compensates voltage/offset for voltage divider."""
...
@termination.setter
def termination(self, value: float) -> None: ...
@property
def amplitude(self) -> float:
"""Output amplitude (peak, not peak-to-peak). Accepts SI strings like '1V'. Set termination first."""
...
@amplitude.setter
def amplitude(self, value: float | str) -> None: ...
@property
def frequency(self) -> float:
"""Output frequency in Hz. Accepts SI strings like '1kHz'."""
...
@frequency.setter
def frequency(self, value: float | str) -> None: ...
@property
def offset(self) -> float:
"""DC offset at the load. Accepts SI strings like '500mV'. Set termination first."""
...
@offset.setter
def offset(self, value: float | str) -> None: ...
@property
def vrange(self) -> Tuple[float, float]:
"""Output voltage range (min, max) at the load. Settable with (V_min, V_max) tuple. Accepts SI strings."""
...
@vrange.setter
def vrange(self, value: Tuple[float | str, float | str]) -> None: ...
@property
def phase(self) -> float:
"""Phase offset in degrees (0-360)."""
...
@phase.setter
def phase(self, value: float) -> None: ...
@property
def duty(self) -> float:
"""Duty cycle for square wave (0-100%)."""
...
@duty.setter
def duty(self, value: float) -> None: ...
@property
def symmetry(self) -> float:
"""Symmetry for ramp wave (0-100%)."""
...
@symmetry.setter
def symmetry(self, value: float) -> None: ...
def __init__(self, scope: Scope):
"""
Initialize AFG.
Args:
scope: Parent Scope instance
"""
self._scope = scope
self._termination: float = float('inf')
@property
def termination(self) -> float:
"""
Load termination resistance in Ohms (default: inf for high-impedance).
When set, amplitude and offset values are automatically scaled to compensate
for the voltage divider formed by the AFG's 50Ω output impedance and the
load termination. Set this property BEFORE setting amplitude or offset.
Common values: 50 (for 50Ω termination), inf (high-impedance, no compensation).
"""
return self._termination
@termination.setter
def termination(self, value: float | str) -> None:
if isinstance(value, str):
# Parse SI strings like '50 Ohm', '50Ω', or just '50'
value = parse_si(value, unit='Ohm')
self._termination = value
def _compensation_factor(self) -> float:
"""Return the voltage compensation factor based on termination."""
# Voltage divider: V_load = V_source * R_load / (R_source + R_load)
# For high-impedance (inf), no compensation needed (factor = 1.0)
if self._termination == float('inf'):
return 1.0
return (50.0 + self._termination) / self._termination
@property
def amplitude(self) -> float:
"""
Output amplitude (peak voltage, not peak-to-peak). Accepts SI strings like '1V'.
The device internally uses Vpp, so this property converts automatically.
If termination is set, returns the actual amplitude at the load (compensated).
"""
# Device stores Vpp, convert to peak amplitude
return self._amplitude_raw / self._compensation_factor() / 2
@amplitude.setter
def amplitude(self, value: float | str) -> None:
# Convert peak amplitude to Vpp for device
self._amplitude_raw = _normalize_value(value, Type.VOLTAGE) * 2 * self._compensation_factor()
@property
def offset(self) -> float:
"""
DC offset in volts. Accepts SI strings like '500mV'.
If termination is set, returns the actual offset at the load (compensated).
The AFG is commanded with a higher offset to account for the voltage divider.
"""
return self._offset_raw / self._compensation_factor()
@offset.setter
def offset(self, value: float | str) -> None:
self._offset_raw = _normalize_value(value, Type.VOLTAGE) * self._compensation_factor()
@property
def vrange(self) -> Tuple[float, float]:
"""
Output voltage range (min, max) at the load.
Can be set with a tuple (V_min, V_max) which computes amplitude and offset automatically.
"""
return (self.offset - self.amplitude, self.offset + self.amplitude)
@vrange.setter
def vrange(self, value: Tuple[float | str, float | str]) -> None:
v_min = _normalize_value(value[0], Type.VOLTAGE)
v_max = _normalize_value(value[1], Type.VOLTAGE)
self.amplitude = (v_max - v_min) / 2
self.offset = (v_max + v_min) / 2
class Trigger:
"""
Trigger interface.
Provides property-based access to trigger parameters. All changes
are queued in the parent Scope's queue.
Properties are automatically generated from TRIGGER_PARAMS table.
"""
if TYPE_CHECKING:
@property
def mode(self) -> Literal['EDGE', 'PULSe', 'RUNT', 'WIND', 'NEDG', 'SLOPe', 'VIDeo', 'PATTern', 'DELay', 'TIMeout', 'DURation', 'SHOLd', 'RS232', 'IIC', 'SPI']:
"""Trigger mode."""
...
@mode.setter
def mode(self, value: Literal['EDGE', 'PULSe', 'RUNT', 'WIND', 'NEDG', 'SLOPe', 'VIDeo', 'PATTern', 'DELay', 'TIMeout', 'DURation', 'SHOLd', 'RS232', 'IIC', 'SPI']) -> None: ...
@property
def source(self) -> str:
"""Trigger source. Accepts Channel object, channel number (1-4), or string like 'CHAN1'."""
...
@source.setter
def source(self, value: Channel | int | str) -> None: ...
@property
def level(self) -> float:
"""Trigger level in volts. Accepts SI strings like '500mV'."""
...
@level.setter
def level(self, value: float | str) -> None: ...
@property
def slope(self) -> Literal['POSitive', 'NEGative', 'EITHer']:
"""Trigger slope: POSitive, NEGative, or EITHer."""
...
@slope.setter
def slope(self, value: Literal['POSitive', 'NEGative', 'EITHer']) -> None: ...
@property
def sweep(self) -> Literal['AUTO', 'NORMal', 'SINGle']:
"""Trigger sweep mode: AUTO, NORMal, or SINGle."""
...
@sweep.setter
def sweep(self, value: Literal['AUTO', 'NORMal', 'SINGle']) -> None: ...
@property
def nreject(self) -> bool:
"""Noise rejection filter enabled."""
...
@nreject.setter
def nreject(self, value: bool) -> None: ...
def __init__(self, scope: Scope):
"""
Initialize Trigger.
Args:
scope: Parent Scope instance
"""
self._scope = scope
@property
def source(self) -> str:
"""
Trigger source (e.g., 'CHAN1', 'EXT').
Can be set with a Channel object, channel number (1-4), or string.
"""
return self._source_raw
@source.setter
def source(self, value: Channel | int | str) -> None:
if isinstance(value, Channel):
value = f'CHAN{value._ch_num + 1}'
elif isinstance(value, int):
value = f'CHAN{value}'
self._source_raw = value
# Unified property generator - eliminates duplication
def _generate_properties(cls, params, scpi_cmd_fn):
"""
Generate and attach properties to a class from a parameter table.
Args:
cls: Class to attach properties to
params: Parameter table (list of tuples)
scpi_cmd_fn: Function to generate SCPI command from template and instance
"""
for name, ptype, scpi_template, valid_values, _priority in params:
def make_getter(name, ptype, scpi_template):
def getter(self):
scope = self if isinstance(self, Scope) else self._scope
scpi_cmd = scpi_cmd_fn(self, scpi_template)
# Check cache first (committed values)
if scpi_cmd in scope._cache:
return scope._cache[scpi_cmd][0] # Return just the value
# Not in cache: commit pending changes, query device, and cache the result
scope.commit()
result = scope._query(f":{scpi_cmd}?")
parsed = scope._parse_value(result, ptype)
scope._cache[scpi_cmd] = (parsed, ptype)
return parsed
return getter
def make_setter(name, ptype, scpi_template, valid_values):
def setter(self, value):
scope = self if isinstance(self, Scope) else self._scope
# Normalize value (parse SI units)
value = _normalize_value(value, ptype)
# Validate against valid values
if valid_values is not None:
# Case-insensitive string comparison for STRING types
value_upper = str(value).upper()
if not any(value_upper == str(v).upper() for v in valid_values):
raise ValueError(f"Invalid {name}: {value}. Must be one of {valid_values}")
# Check if value matches cache (skip if unchanged)
scpi_cmd = scpi_cmd_fn(self, scpi_template)
if scpi_cmd in scope._cache:
cached_value, cached_type = scope._cache[scpi_cmd]
# Compare values - for floats use approximate comparison
if ptype in (Type.VOLTAGE, Type.FREQUENCY, Type.TIME):
if isinstance(value, (int, float)) and isinstance(cached_value, (int, float)):
if abs(value - cached_value) < 1e-9:
return # Value unchanged, skip
elif value == cached_value:
return # Value unchanged, skip
# Clear cache entry (will be re-populated on next read)
if scpi_cmd in scope._cache:
del scope._cache[scpi_cmd]
# Queue the value with SCPI command as key
scope._queue[scpi_cmd] = (value, ptype)
return setter
prop = property(
make_getter(name, ptype, scpi_template),
make_setter(name, ptype, scpi_template, valid_values)
)
setattr(cls, name, prop)
# Generate all properties at module import time
_generate_properties(
Scope,
SCOPE_PARAMS,
lambda _, template: template
)
_generate_properties(
Channel,
CHANNEL_PARAMS,
lambda self, template: template.format(ch=self._ch_num + 1)
)
_generate_properties(
AFG,
AFG_PARAMS,
lambda _, template: template
)
_generate_properties(
Trigger,
TRIGGER_PARAMS,
lambda _, template: template
)
del _generate_properties
| wjakob/rigol | 2 | Bode plot utility for Rigol DHO900 series oscilloscopes | Python | wjakob | Wenzel Jakob | EPFL |
rigol/util.py | Python | """
Utility functions for Bode plot analysis and visualization.
"""
import re
import sys
from typing import Tuple, Optional, Dict, Callable
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
# Precompiled regex for SI unit parsing
# Matches: optional sign, number, optional whitespace, optional SI prefix, optional whitespace, optional unit
_SI_PATTERN = re.compile(r'^(-?[\d.]+)\s*([pnuµmkKMGT]?)\s*([a-zA-Z]+)?$')
# SI prefix multipliers (case-sensitive to distinguish m (milli) from M (mega))
_SI_MULTIPLIERS = {
'': 1,
'p': 1e-12, # pico
'n': 1e-9, # nano
'u': 1e-6, # micro (ASCII)
'µ': 1e-6, # micro (Unicode)
'm': 1e-3, # milli
'k': 1e3, # kilo (lowercase)
'K': 1e3, # kilo (uppercase)
'M': 1e6, # mega
'G': 1e9, # giga
'T': 1e12, # tera
}
def _format_frequency_tick(value, pos):
"""Format frequency tick labels in Hz/KHz/MHz."""
if value >= 1e6:
return f'{value/1e6:.0f} MHz' if value >= 10e6 else f'{value/1e6:.1f} MHz'
elif value >= 1e3:
return f'{value/1e3:.0f} KHz' if value >= 10e3 else f'{value/1e3:.1f} KHz'
else:
return f'{value:.0f} Hz'
def format_frequency(value: float) -> str:
"""Format frequency value with appropriate units (Hz/KHz/MHz)."""
if value >= 1e6:
return f'{value/1e6:.3f} MHz' if value < 100e6 else f'{value/1e6:.2f} MHz'
elif value >= 1e3:
return f'{value/1e3:.3f} KHz' if value < 100e3 else f'{value/1e3:.2f} KHz'
else:
return f'{value:.2f} Hz'
def parse_si(value: str, unit: str = 'Hz') -> float:
"""
Parse a string with SI prefixes into a numeric value.
Supports standard SI prefixes with case sensitivity:
- p = pico (10^-12)
- n = nano (10^-9)
- u/µ = micro (10^-6)
- m = milli (10^-3)
- k/K = kilo (10^3)
- M = mega (10^6)
- G = giga (10^9)
- T = tera (10^12)
Parameters
----------
value : str
String to parse, e.g., '10KHz', '5V', '10mV', '1.5MHz', '10nF'
unit : str, optional
Expected unit ('Hz', 'V', 'H', 'F', etc.). Default is 'Hz'.
Used to validate the input.
Returns
-------
float
Numeric value in base units (Hz for frequency, V for voltage, etc.)
Examples
--------
>>> parse_si('10KHz', unit='Hz')
10000.0
>>> parse_si('1.5MHz', unit='Hz')
1500000.0
>>> parse_si('10mV', unit='V')
0.01
>>> parse_si('10nF', unit='F')
1e-08
>>> parse_si('1mH', unit='H')
0.001
>>> parse_si('5V', unit='V')
5.0
>>> parse_si('10K', unit='Hz') # Unit suffix optional
10000.0
>>> parse_si('10k', unit='Hz') # Lowercase k also works
10000.0
"""
original_value = value
value = value.strip()
match = _SI_PATTERN.match(value)
if not match:
raise ValueError(f"Invalid format: {original_value}")
number = float(match.group(1))
prefix = match.group(2)
found_unit = match.group(3)
# Normalize unit comparison (case-insensitive)
if found_unit and found_unit.upper() != unit.upper():
raise ValueError(f"Expected unit '{unit}' but found '{found_unit}' in: {original_value}")
return number * _SI_MULTIPLIERS.get(prefix, 1)
def generate_frequencies_per_decade(f_min: float, f_max: float, steps_per_decade: int) -> np.ndarray:
"""
Generate logarithmically-spaced frequencies with a specified number of steps per decade.
This matches the Rigol oscilloscope behavior where frequencies are distributed
evenly on a logarithmic scale within each decade.
Parameters
----------
f_min : float
Start frequency in Hz
f_max : float
End frequency in Hz
steps_per_decade : int
Number of frequency points per decade (e.g., 10)
Returns
-------
np.ndarray
Array of frequencies logarithmically spaced with steps_per_decade points per decade
Examples
--------
>>> freqs = generate_frequencies_per_decade(1000, 10000, 10)
>>> len(freqs) # 1 decade with 10 steps per decade
11
>>> freqs = generate_frequencies_per_decade(1000, 100000, 10)
>>> len(freqs) # 2 decades with 10 steps per decade
21
"""
# Calculate the number of decades
log_min = np.log10(f_min)
log_max = np.log10(f_max)
num_decades = log_max - log_min
# Total number of steps is steps_per_decade × num_decades + 1 (to include endpoint)
# We use linspace to ensure we hit both endpoints exactly
total_steps = int(np.ceil(steps_per_decade * num_decades)) + 1
# Generate logarithmically-spaced frequencies
freqs = np.logspace(log_min, log_max, total_steps)
return freqs
def rc_lowpass(fc: float):
"""
Create a callable for a 1st-order RC lowpass filter.
Parameters
----------
fc : float
Cutoff frequency (-3dB point) in Hz
Returns
-------
callable
Function that takes frequency array and returns (gain_db, phase_deg)
"""
def func(f: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
x = f / fc
# Low pass: |H(jω)| = 1 / sqrt(1 + (f/fc)^2)
gain = 1.0 / np.sqrt(1.0 + x**2)
gain_db = 20 * np.log10(gain)
# φ = -atan(f/fc), ranges from 0° to -90°
phase_deg = -np.degrees(np.arctan(x))
return gain_db, phase_deg
return func
def rc_highpass(fc: float):
"""
Create a callable for a 1st-order RC highpass filter.
Parameters
----------
fc : float
Cutoff frequency (-3dB point) in Hz
Returns
-------
callable
Function that takes frequency array and returns (gain_db, phase_deg)
"""
def func(f: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
x = f / fc
# High-pass RC: H(jω) = jωRC / (1 + jωRC)
# |H| = (f/fc) / sqrt(1 + (f/fc)^2)
gain = x / np.sqrt(1.0 + x**2)
gain_db = 20 * np.log10(gain)
# φ = 90° - atan(f/fc), ranges from 90° to 0°
# Convert to negative convention: ranges from -90° to 0° by subtracting 180°
# Actually, for highpass: at low f, phase = 90°, at high f, phase = 0°
# In negative convention: at low f, phase should be -270° = 90°
# Let's keep it as is for now, it's already correct
phase_deg = 90.0 - np.degrees(np.arctan(x))
return gain_db, phase_deg
return func
def rlc_lowpass(fc: float, r: float = 0.0):
"""
Create a callable for a 2nd-order RLC lowpass filter.
Parameters
----------
fc : float
Resonant frequency in Hz (f₀ = 1/(2π√LC))
r : float
Series resistance in Ohms (default: 0.0 for ideal LC)
Returns
-------
callable
Function that takes frequency array and returns (gain_db, phase_deg)
Notes
-----
Models an RLC lowpass filter (L and R in series, C to ground).
Transfer function: H(jω) = 1 / (1 - ω²LC + jωRC)
Phase transitions smoothly from 0° (low freq) to -180° (high freq).
At resonance: phase = -90°
The quality factor Q = ω₀L/R determines damping.
"""
def func(f: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
omega = 2 * np.pi * f
omega_0 = 2 * np.pi * fc
# Normalized frequency
x = f / fc
# For the phase calculation, we need the impedance ratio
# H(jω) = 1 / (1 - (ω/ω₀)² + jω(R/ωL))
# We need to know R/(ω₀L), which is 1/Q
# From ω₀ = 1/√(LC), we get L = 1/(ω₀²C)
# But we don't know L and C individually, only their product via fc
# Let's use a different approach: quality factor Q
# For small R: Q ≈ ω₀L/R is large (underdamped)
# We can parameterize by Q = 1/(R√(C/L)) = √(L/C)/R
# But we need to know Z₀ = √(L/C) - the characteristic impedance
# Assume a reasonable characteristic impedance, say 50Ω if R not specified
# Or we can make Q a function of R and fc
# Let's use: if R=0, Q=1000 (very underdamped), otherwise Q = Z₀/R with Z₀=50Ω
if r == 0:
Q = 1000.0 # Very high Q for ideal LC
else:
Z0 = 50.0 # Assume 50Ω characteristic impedance
Q = Z0 / r
# Denominator: 1 - x² + jx/Q
real_part = 1.0 - x**2
imag_part = x / Q
# Magnitude
gain = 1.0 / np.sqrt(real_part**2 + imag_part**2)
gain_db = 20 * np.log10(gain)
# Phase: -arctan(imag/real) = -arctan((x/Q) / (1 - x²))
phase_deg = -np.degrees(np.arctan2(imag_part, real_part))
return gain_db, phase_deg
return func
def rlc_highpass(fc: float, r: float = 0.0):
"""
Create a callable for a 2nd-order RLC highpass filter.
Parameters
----------
fc : float
Resonant frequency in Hz (f₀ = 1/(2π√LC))
r : float
Series resistance in Ohms (default: 0.0 for ideal LC)
Returns
-------
callable
Function that takes frequency array and returns (gain_db, phase_deg)
Notes
-----
Models an RLC highpass filter (C and R in series, L to ground).
Transfer function: H(jω) = -ω²LC / (1 - ω²LC + jωRC)
Phase transitions smoothly from -180° (low freq) to 0° (high freq).
At resonance: phase = -90°
The quality factor Q = ω₀L/R determines damping.
"""
def func(f: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
omega = 2 * np.pi * f
omega_0 = 2 * np.pi * fc
# Normalized frequency
x = f / fc
if r == 0:
Q = 1000.0 # Very high Q for ideal LC
else:
Z0 = 50.0 # Assume 50Ω characteristic impedance
Q = Z0 / r
# Numerator: -x²
# Denominator: 1 - x² + jx/Q
real_part = 1.0 - x**2
imag_part = x / Q
# Magnitude: |numerator|/|denominator| = x² / sqrt(real² + imag²)
gain = x**2 / np.sqrt(real_part**2 + imag_part**2)
gain_db = 20 * np.log10(gain)
# Phase: arg(-x²) - arg(1 - x² + jx/Q)
# arg(-x²) = -180° (negative real number)
# arg(denominator) = arctan2(imag, real)
phase_numerator = -180.0 # Phase of -x²
phase_denominator = np.degrees(np.arctan2(imag_part, real_part))
phase_deg = phase_numerator - phase_denominator
# Wrap phase to [-180, 180] range
phase_deg = ((phase_deg + 180.0) % 360.0) - 180.0
return gain_db, phase_deg
return func
def lc_bandpass(L: float, C: float, r_esr: float = 0.0, r_source: float = 50.0):
"""
Create a callable for a 2nd-order LC bandpass filter.
Parameters
----------
L : float
Inductance in Henries (e.g., 1e-3 for 1mH)
C : float
Capacitance in Farads (e.g., 10e-9 for 10nF)
r_esr : float
Inductor ESR (equivalent series resistance) in Ohms (default: 0.0 for ideal inductor)
r_source : float
Source resistance in Ohms (default: 50.0)
Returns
-------
callable
Function that takes frequency array and returns (gain_db, phase_deg)
Notes
-----
Models a voltage divider topology:
AFG -> R_source -> [L(R_esr) || C] -> GND
^ ^
CH1 CH2
Where the parallel LC (with inductor ESR) acts as the lower leg of the divider.
Transfer function: H(jω) = Z_LC / (R_source + Z_LC)
At resonance (f₀ = 1/(2π√LC)): Z_LC is maximum → gain approaches 0 dB (signal passes)
Away from resonance: Z_LC is small → gain drops (signal blocked)
The quality factor Q = Z₀/R_esr (where Z₀ = √(L/C)) determines the bandwidth:
- High Q (low R_esr): narrow bandwidth, sharp peak
- Low Q (high R_esr): wide bandwidth, broad peak
"""
def func(f: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
omega = 2 * np.pi * f
# Calculate impedances
# Z_L = R_esr + jωL
# Z_C = 1/(jωC) = -j/(ωC)
Z_L = r_esr + 1j * omega * L
Z_C = -1j / (omega * C)
# Parallel combination: Z_LC = (Z_L × Z_C) / (Z_L + Z_C)
Z_LC = (Z_L * Z_C) / (Z_L + Z_C)
# Voltage divider: H = Z_LC / (R_source + Z_LC)
H = Z_LC / (r_source + Z_LC)
# Extract magnitude and phase
gain = np.abs(H)
gain_db = 20 * np.log10(gain)
phase_deg = np.degrees(np.angle(H))
return gain_db, phase_deg
return func
def lc_bandstop(L: float, C: float, r_esr: float = 0.0, r_source: float = 50.0):
"""
Create a callable for a 2nd-order LC bandstop (notch) filter.
Parameters
----------
L : float
Inductance in Henries (e.g., 1e-3 for 1mH)
C : float
Capacitance in Farads (e.g., 10e-9 for 10nF)
r_esr : float
Inductor ESR (equivalent series resistance) in Ohms (default: 0.0 for ideal inductor)
r_source : float
Source resistance in Ohms (default: 50.0)
Returns
-------
callable
Function that takes frequency array and returns (gain_db, phase_deg)
Notes
-----
Models a bandstop/notch filter with series LC shunt to ground:
AFG -> R_source -> output
^ ^
CH1 CH2
|
L-C (series, with L having R_esr)
|
GND
At resonance (f₀ = 1/(2π√LC)): Series LC has minimum impedance (≈R_esr) → shorts output to ground → deep notch
Away from resonance: Series LC has high impedance → output not loaded → signal passes (0 dB)
Transfer function: H(jω) = Z_LC / (R_source + Z_LC)
where Z_LC = R_esr + jωL + 1/(jωC) is the series LC impedance
The quality factor Q = ω₀L/R_esr determines notch characteristics:
- High Q (low R_esr): deep, narrow notch
- Low Q (high R_esr): shallow, wide notch
"""
def func(f: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
omega = 2 * np.pi * f
# Series LC impedance: Z_LC = R_esr + jωL + 1/(jωC)
# = R_esr + j(ωL - 1/(ωC))
X_L = omega * L
X_C = 1.0 / (omega * C)
Z_LC = r_esr + 1j * (X_L - X_C)
# Voltage divider: H = Z_LC / (R_source + Z_LC)
H = Z_LC / (r_source + Z_LC)
# Extract magnitude and phase
gain = np.abs(H)
# Avoid log(0) at exact resonance for ideal LC
gain = np.maximum(gain, 1e-10)
gain_db = 20 * np.log10(gain)
phase_deg = np.degrees(np.angle(H))
return gain_db, phase_deg
return func
def create_print_callback(quiet: bool = False):
"""
Create a measurement callback that prints each frequency point.
Parameters
----------
quiet : bool
If True, returns None (no printing)
Returns
-------
callable or None
Callback function compatible with BodePlot.sweep()
"""
if quiet:
return None
def callback(freq_hz, gain_db, phase_deg, gain_linear, **kwargs):
freq_str = format_frequency(freq_hz)
print(f'{freq_str:>13} {gain_db:>7.3f} dB ({gain_linear:>6.4f}×) {phase_deg:>8.2f}°')
return callback
class LivePlotUpdater:
"""
Helper class for live Bode plot updates during sweep.
Usage:
plotter = LivePlotUpdater(freqs, extra_curves)
bode.sweep(freqs, on_measurement=plotter.update)
plotter.show() # Display final plot
"""
def __init__(
self,
freqs: np.ndarray,
extra: Optional[Dict[str, Callable[[np.ndarray], Tuple[np.ndarray, np.ndarray]]]] = None
):
"""
Initialize live plotter.
Parameters
----------
freqs : np.ndarray
Full frequency array for the sweep
extra : dict, optional
Dictionary mapping label -> function(freqs) -> (gain_db, phase_deg)
for plotting reference curves
"""
self.freqs = freqs
self.gains = []
self.phases = []
# Set up interactive plot
plt.ion()
self.fig, (self.ax_mag, self.ax_phase) = plt.subplots(2, 1, sharex=True, figsize=(8, 6))
# Measured curves
self.line_gain_meas, = self.ax_mag.semilogx([], [], marker='o', label="Measured")
self.line_phase_meas, = self.ax_phase.semilogx([], [], marker='o', label="Measured")
# Set X-axis limits to full frequency range
if len(freqs) > 0:
f_min, f_max = freqs.min(), freqs.max()
margin = (np.log10(f_max) - np.log10(f_min)) * 0.05
self.ax_mag.set_xlim(10**(np.log10(f_min) - margin), 10**(np.log10(f_max) + margin))
# Extra reference curves
if extra:
f_ref = np.logspace(np.log10(freqs.min()), np.log10(freqs.max()), 400)
for label, func in extra.items():
gain_db_ref, phase_deg_ref = func(f_ref)
self.ax_mag.semilogx(f_ref, gain_db_ref, linestyle='--', label=label, alpha=0.7)
self.ax_phase.semilogx(f_ref, phase_deg_ref, linestyle='--', alpha=0.7)
# Configure axes
self.ax_mag.set_ylabel("Gain [dB]")
self.ax_mag.grid(True, which="both", ls=":")
self.ax_mag.legend(loc="best")
self.ax_phase.set_ylabel("Phase shift [deg]")
self.ax_phase.set_xlabel("Frequency")
self.ax_phase.grid(True, which="both", ls=":")
self.ax_phase.legend(loc="best")
# Format X-axis tick labels
from matplotlib.ticker import FuncFormatter
freq_formatter = FuncFormatter(_format_frequency_tick)
self.ax_phase.xaxis.set_major_formatter(freq_formatter)
self.fig.tight_layout()
def update(self, freq_hz, gain_db, phase_deg, index, total, **kwargs):
"""
Callback to update plot with new measurement.
Parameters are passed by BodePlot.sweep() as kwargs.
"""
# Check if window was closed
if not plt.fignum_exists(self.fig.number):
print("\nPlot window closed by user - aborting measurement")
sys.exit(1)
# Store data
self.gains.append(gain_db)
self.phases.append(phase_deg)
# Update plot lines
freqs_partial = self.freqs[:index+1]
self.line_gain_meas.set_data(freqs_partial, self.gains)
self.line_phase_meas.set_data(freqs_partial, self.phases)
# Rescale y-limits only
self.ax_mag.relim()
self.ax_mag.autoscale_view(scalex=False, scaley=True)
self.ax_phase.relim()
self.ax_phase.autoscale_view(scalex=False, scaley=True)
# Redraw
self.fig.canvas.draw()
self.fig.canvas.flush_events()
plt.pause(0.01)
def show(self):
"""Display final plot (blocking)."""
plt.ioff()
self.fig.tight_layout()
plt.show()
def save_to_csv(filename: str, freqs: np.ndarray, gain_db: np.ndarray, phase_deg: np.ndarray):
"""Save Bode plot data to a CSV file."""
import csv
with open(filename, 'w', newline='') as f:
writer = csv.writer(f)
writer.writerow(['Frequency (Hz)', 'Gain (dB)', 'Phase (deg)'])
for freq, gain, phase in zip(freqs, gain_db, phase_deg):
writer.writerow([freq, gain, phase])
| wjakob/rigol | 2 | Bode plot utility for Rigol DHO900 series oscilloscopes | Python | wjakob | Wenzel Jakob | EPFL |
test_scope.py | Python | """
Comprehensive test suite for Scope abstraction.
Tests every exposed parameter with set + query round-trip verification.
Requires a connected oscilloscope at 192.168.5.2.
"""
import pytest
import numpy as np
from rigol.scope import Scope
@pytest.fixture(scope="module")
def scope():
"""Create scope instance for testing. Connects to default IP."""
try:
s = Scope(debug_level=0)
yield s
except Exception as e:
pytest.skip(f"Scope not available: {e}")
@pytest.fixture(autouse=True)
def clear_cache(scope):
"""Clear scope cache between tests to ensure isolation."""
# Commit any pending changes before clearing to avoid leaving scope in bad state
scope.commit()
scope.clear_cache()
yield
class TestScopeParameters:
"""Test scope-level parameters."""
def test_reset_method(self, scope):
"""Test reset to factory defaults - run first to establish clean state."""
# Set some parameters
scope.tdiv = 1e-3
scope.channels[0].vdiv = 1.0
# Reset to factory defaults (blocks until complete)
scope.reset()
# Queue should be cleared
assert len(scope._queue) == 0
# Re-establish known good state for subsequent tests
# After reset, set up basic configuration that tests expect
scope.tdiv = 1e-3
scope.channels[0].enabled = True
scope.channels[0].vdiv = 1.0
scope.channels[0].probe = 1
def test_mem_depth_valid_values(self, scope):
"""Test setting and querying memory depth with valid string values."""
# Set up known-good scope state: only one channel enabled, reasonable timebase
# This maximizes available memory depth options
for i in range(4):
scope.channels[i].enabled = (i == 0)
scope.tdiv = 1e-3 # 1ms/div - moderate timebase
# Test commonly supported values as strings
test_values = ['10K', '100K', '1M']
for value in test_values:
scope.mem_depth = value
result = scope.mem_depth
# Just verify it doesn't error - scope may return scientific notation
assert isinstance(result, str), f"Expected string result, got {type(result)}"
def test_mem_depth_invalid_value(self, scope):
"""Test that invalid memory depth values raise ValueError."""
# Set up known-good scope state
for i in range(4):
scope.channels[i].enabled = (i == 0)
scope.tdiv = 1e-3
# Test a value not in the valid list - should raise ValueError
with pytest.raises(ValueError, match="Invalid mem_depth"):
scope.mem_depth = '50K' # Not in valid values list
def test_mem_depth_string_value(self, scope):
"""Test that mem_depth accepts string values."""
# Set up known-good scope state
for i in range(4):
scope.channels[i].enabled = (i == 0)
scope.tdiv = 1e-3
# Test string format - scope may return scientific notation
scope.mem_depth = '100K'
result = scope.mem_depth
# Scope returns scientific notation like '1.0000E+05'
assert result.upper() in ['100K', '1.0000E+05', '1E+05'], f"Unexpected format: {result}"
def test_tdiv(self, scope):
"""Test setting and querying timebase scale."""
test_values = [1e-6, 1e-3, 1.0] # 1us, 1ms, 1s
for value in test_values:
scope.tdiv = value
result = scope.tdiv
assert abs(result - value) / value < 0.01, f"Expected {value}, got {result}"
def test_tdiv_string(self, scope):
"""Test setting timebase scale with SI string."""
scope.tdiv = '10us'
result = scope.tdiv
# Allow 2% tolerance for scope rounding
assert abs(result - 1e-5) / 1e-5 <= 0.02
def test_toffset(self, scope):
"""Test setting and querying timebase offset."""
# Set timebase scale to allow reasonable offset range
# According to SCPI: MainLeftTime = -5 x MainScale
# With tdiv=1ms, we can offset up to -5ms
scope.tdiv = 1e-3
test_values = [-1e-3, 0.0, 1e-3] # -1ms, 0, 1ms
for value in test_values:
scope.toffset = value
result = scope.toffset
assert abs(result - value) < 1e-6, f"Expected {value}, got {result}"
def test_toffset_string(self, scope):
"""Test setting timebase offset with SI string."""
scope.toffset = '1ms'
result = scope.toffset
assert abs(result - 1e-3) < 1e-6
def test_tmode(self, scope):
"""Test timebase mode."""
# Test MAIN mode
scope.tmode = 'MAIN'
result = scope.tmode
assert result.upper() == 'MAIN'
# Test ROLL mode
scope.tmode = 'ROLL'
result = scope.tmode
assert result.upper() == 'ROLL'
# Return to MAIN
scope.tmode = 'MAIN'
def test_acq_type(self, scope):
"""Test acquisition type."""
# Test NORMal
scope.acq_type = 'NORMal'
result = scope.acq_type
assert 'NORM' in result.upper()
# Test AVERages
scope.acq_type = 'AVERages'
result = scope.acq_type
assert 'AVER' in result.upper()
# Return to NORMal
scope.acq_type = 'NORMal'
def test_acq_averages(self, scope):
"""Test acquisition averages count."""
# First set to average mode
scope.acq_type = 'AVERages'
# Test powers of 2
test_values = [2, 4, 8, 16, 32, 64, 128]
for value in test_values:
scope.acq_averages = value
result = scope.acq_averages
assert result == value, f"Expected {value}, got {result}"
# Return to NORMal mode
scope.acq_type = 'NORMal'
class TestChannelParameters:
"""Test channel parameters."""
def test_vdiv(self, scope):
"""Test voltage per division setting (standard 1-2-5 sequence)."""
ch = scope.channels[0]
# Channel must be enabled for vdiv to work
ch.enabled = True
ch.probe = 1 # Use 1x probe for predictable values
# Test standard 1-2-5 sequence values
test_values = [0.001, 0.002, 0.005, 0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1.0, 2.0, 5.0, 10.0]
for value in test_values:
ch.vdiv = value
result = ch.vdiv
assert abs(result - value) < 1e-6, f"Expected {value}, got {result}"
def test_vdiv_string(self, scope):
"""Test setting vdiv with SI string."""
ch = scope.channels[0]
ch.enabled = True
ch.probe = 1
ch.vdiv = '500mV'
result = ch.vdiv
assert abs(result - 0.5) < 1e-6, f"Expected 0.5, got {result}"
def test_vmax(self, scope):
"""Test vmax property (derived from vdiv)."""
ch = scope.channels[0]
ch.enabled = True
ch.probe = 1
ch.vmax = 16.0 # Should set vdiv to 4.0 (16/4)
vdiv_result = ch.vdiv
assert abs(vdiv_result - 4.0) < 1e-6, f"Expected vdiv=2.0, got {vdiv_result}"
vmax_result = ch.vmax
assert abs(vmax_result - 16.0) < 1e-6, f"Expected vmax=16.0, got {vmax_result}"
def test_probe(self, scope):
"""Test probe attenuation factor."""
ch = scope.channels[0]
test_values = [1, 10, 100]
for value in test_values:
ch.probe = value
result = ch.probe
assert result == value or abs(result - value) < 0.1, f"Expected {value}, got {result}"
def test_enabled(self, scope):
"""Test channel enable/disable."""
ch = scope.channels[0]
ch.enabled = True
assert ch.enabled is True
ch.enabled = False
assert ch.enabled is False
# Restore to True
ch.enabled = True
def test_coupling(self, scope):
"""Test coupling mode."""
ch = scope.channels[0]
for value in ['DC', 'AC', 'GND']:
ch.coupling = value
result = ch.coupling
assert result.upper() == value.upper()
def test_coupling_case_insensitive(self, scope):
"""Test coupling with lowercase."""
ch = scope.channels[0]
ch.coupling = 'dc'
result = ch.coupling
assert result.upper() == 'DC'
def test_coupling_invalid(self, scope):
"""Test that invalid coupling raises ValueError."""
ch = scope.channels[0]
with pytest.raises(ValueError):
ch.coupling = 'INVALID'
def test_bwlimit(self, scope):
"""Test bandwidth limit setting."""
ch = scope.channels[0]
# Test setting to 20M
ch.bwlimit = '20M'
result = ch.bwlimit
assert result == '20M'
# Test setting to OFF
ch.bwlimit = 'OFF'
result = ch.bwlimit
assert result == 'OFF'
def test_offset(self, scope):
"""Test vertical offset."""
ch = scope.channels[0]
test_values = [-1.0, 0.0, 1.0]
for value in test_values:
ch.offset = value
result = ch.offset
assert abs(result - value) < 1e-6, f"Expected {value}, got {result}"
def test_all_channels_accessible(self, scope):
"""Test that all 4 channels are accessible."""
assert len(scope.channels) == 4
for i in range(4):
ch = scope.channels[i]
# Just verify we can set a parameter
ch.probe = 10
assert ch.probe == 10
def test_position(self, scope):
"""Test channel position (bias voltage)."""
ch = scope.channels[0]
test_values = [-1.0, 0.0, 1.0]
for value in test_values:
ch.position = value
result = ch.position
assert abs(result - value) < 0.01, f"Expected {value}, got {result}"
def test_invert(self, scope):
"""Test channel signal invert."""
ch = scope.channels[0]
ch.invert = True
assert ch.invert is True
ch.invert = False
assert ch.invert is False
class TestAFGParameters:
"""Test AFG parameters."""
def test_enabled(self, scope):
"""Test AFG enable/disable."""
afg = scope.afg
afg.enabled = True
assert afg.enabled is True
afg.enabled = False
assert afg.enabled is False
def test_function(self, scope):
"""Test waveform function type."""
afg = scope.afg
afg.function = 'SINusoid'
result = afg.function
# Scope may return abbreviated form 'SIN' or full 'SINUSOID'
assert result.upper() in ['SIN', 'SINUSOID']
def test_amplitude(self, scope):
"""Test AFG amplitude (peak voltage, not Vpp). Max is 5V peak (10 Vpp)."""
afg = scope.afg
test_values = [0.5, 1.0, 5.0] # Max 5V peak (device caps at 10 Vpp)
for value in test_values:
afg.amplitude = value
result = afg.amplitude
assert abs(result - value) < 1e-3, f"Expected {value}, got {result}"
def test_amplitude_string(self, scope):
"""Test setting AFG amplitude with SI string."""
afg = scope.afg
afg.amplitude = '5V'
result = afg.amplitude
assert abs(result - 5.0) < 1e-3
def test_frequency(self, scope):
"""Test AFG frequency."""
afg = scope.afg
test_values = [100.0, 1000.0, 10000.0]
for value in test_values:
afg.frequency = value
result = afg.frequency
assert abs(result - value) < 1.0, f"Expected {value}, got {result}"
def test_frequency_string(self, scope):
"""Test setting AFG frequency with SI string."""
afg = scope.afg
afg.frequency = '1kHz'
result = afg.frequency
assert abs(result - 1000.0) < 1.0
def test_offset(self, scope):
"""Test AFG DC offset."""
afg = scope.afg
# Set amplitude low enough to allow offset range (amplitude + |offset| <= 5V)
afg.amplitude = 1.0
test_values = [-1.0, 0.0, 1.0]
for value in test_values:
afg.offset = value
result = afg.offset
assert abs(result - value) < 1e-3, f"Expected {value}, got {result}"
def test_phase(self, scope):
"""Test AFG phase offset."""
afg = scope.afg
test_values = [0, 90, 180, 270]
for value in test_values:
afg.phase = value
result = afg.phase
assert abs(result - value) < 1.0, f"Expected {value}, got {result}"
def test_duty(self, scope):
"""Test AFG square wave duty cycle."""
afg = scope.afg
# Set to square wave first
afg.function = 'SQUare'
afg.duty = 50
result = afg.duty
assert abs(result - 50) < 1.0, f"Expected 50, got {result}"
def test_symmetry(self, scope):
"""Test AFG ramp symmetry."""
afg = scope.afg
# Set to ramp wave first
afg.function = 'RAMP'
afg.symmetry = 50
result = afg.symmetry
assert abs(result - 50) < 1.0, f"Expected 50, got {result}"
class TestCommitBehavior:
"""Test commit and queue behavior."""
def test_queue_and_cache_behavior(self, scope):
"""Test that writing queues values, and reading auto-commits then reads from device."""
ch = scope.channels[0]
# Channel must be enabled for vdiv to work
ch.enabled = True
ch.probe = 1
# Set a parameter
ch.vdiv = 2.0
# Check queue contains the value
assert len(scope._queue) > 0
# Read it back - this should auto-commit and read from device
result = ch.vdiv
assert abs(result - 2.0) < 1e-6
# After reading, queue should be cleared (auto-committed)
assert len(scope._queue) == 0
# Value should now be in cache
scpi_cmd = f'CHANnel{ch._ch_num + 1}:SCALe'
assert scpi_cmd in scope._cache
# Reading again should return cached value without device query
result2 = ch.vdiv
assert abs(result2 - 2.0) < 1e-6
def test_batch_multiple_parameters(self, scope):
"""Test that multiple parameters are batched and auto-committed on first read."""
ch0 = scope.channels[0]
ch1 = scope.channels[1]
# Ensure channel is enabled
ch0.enabled = True
# Set probe first, then vdiv (probe may need to be set before vdiv)
ch0.probe = 10
# Set multiple parameters - these are queued
ch0.vdiv = 1.0
ch1.enabled = False
scope.afg.frequency = 1000.0
scope.mem_depth = '100K'
# Queue should have multiple items
assert len(scope._queue) > 1
# Reading any value auto-commits all pending changes
result = ch0.vdiv
assert abs(result - 1.0) < 1e-6
# Queue should be cleared after auto-commit
assert len(scope._queue) == 0
# Verify all were applied
assert ch0.probe == 10
assert ch1.enabled is False
assert abs(scope.afg.frequency - 1000.0) < 1.0
assert isinstance(scope.mem_depth, str)
def test_multiple_writes_and_reads(self, scope):
"""Test that queue is cleared after auto-commit on read."""
ch = scope.channels[0]
# First write and read
ch.vdiv = 1.0
result1 = ch.vdiv
assert abs(result1 - 1.0) < 1e-6
# Queue should be empty after auto-commit
assert len(scope._queue) == 0
# Second write with different value
ch.vdiv = 2.0
# Queue should have the new value
assert len(scope._queue) > 0
# Read should auto-commit and return new value
result2 = ch.vdiv
assert abs(result2 - 2.0) < 1e-6
# Queue should be empty again
assert len(scope._queue) == 0
def test_auto_commit_on_single(self, scope):
"""Test that single() auto-commits pending changes."""
ch = scope.channels[0]
# Set a parameter without commit
ch.vdiv = 2.0
# Call single() - should auto-commit
scope.single()
# Force trigger and wait for completion
scope.force()
scope.wait_trigger()
# Verify parameter was applied
result = ch.vdiv
assert abs(result - 2.0) < 1e-6
class TestWaveformAcquisition:
"""Test waveform acquisition."""
def test_waveform_capture(self, scope):
"""Test capturing waveform from channel."""
# Configure AFG to generate signal
scope.afg.enabled = True
scope.afg.function = 'SINusoid'
scope.afg.frequency = 1000.0
scope.afg.amplitude = 2.0
# Configure channel
ch = scope.channels[0]
ch.enabled = True
ch.vmax = 5.0
ch.coupling = 'DC'
ch.probe = 10
# Configure timebase
scope.tdiv = 1e-3 # 1ms/div
# Commit and trigger
scope.single()
# Force trigger and wait for completion
scope.force()
scope.wait_trigger()
# Capture waveform
waveform = ch.waveform()
# Verify waveform properties
assert isinstance(waveform, np.ndarray)
assert waveform.dtype == np.float64
assert len(waveform) > 100 # Should have data points
# Disable AFG
scope.afg.enabled = False
def test_waveform_multiple_channels(self, scope):
"""Test capturing waveforms from multiple channels."""
# Configure AFG
scope.afg.enabled = True
scope.afg.function = 'SINusoid'
scope.afg.frequency = 1000.0
# Configure channels - both should be enabled even if only ch0 has signal
scope.channels[0].enabled = True
scope.channels[1].enabled = True
scope.tdiv = 1e-3
# Trigger
scope.single()
# Force trigger and wait for completion
scope.force()
scope.wait_trigger()
# Capture from both channels
# Note: Only channel 0 has AFG signal, channel 1 may be empty/noise
wf0 = scope.channels[0].waveform()
wf1 = scope.channels[1].waveform()
# Verify both return numpy arrays
assert isinstance(wf0, np.ndarray)
assert isinstance(wf1, np.ndarray)
# Channel 0 should have data (connected to AFG)
assert len(wf0) > 0
# Channel 1 should also return an array (even if just noise/zeros)
# The waveform property should always return data of the configured memory depth
assert isinstance(wf1, np.ndarray)
# Cleanup
scope.afg.enabled = False
class TestTriggerParameters:
"""Test trigger parameter configuration."""
def test_trigger_mode(self, scope):
"""Test setting and querying trigger mode."""
scope.trigger.mode = 'EDGE'
result = scope.trigger.mode
assert result.upper() == 'EDGE'
def test_trigger_source_with_channel_object(self, scope):
"""Test setting trigger source using Channel object."""
scope.trigger.source = scope.channels[0]
result = scope.trigger.source
assert 'CHAN1' in result.upper()
def test_trigger_source_with_string(self, scope):
"""Test setting trigger source using string."""
scope.trigger.source = 'CHAN2'
result = scope.trigger.source
assert 'CHAN2' in result.upper()
def test_trigger_level(self, scope):
"""Test setting and querying trigger level."""
test_level = 0.5
scope.trigger.level = test_level
result = scope.trigger.level
assert abs(result - test_level) < 0.01
def test_trigger_slope(self, scope):
"""Test setting and querying trigger slope."""
scope.trigger.slope = 'POSitive'
result = scope.trigger.slope
assert 'POS' in result.upper()
def test_trigger_sweep(self, scope):
"""Test setting and querying trigger sweep mode."""
scope.trigger.sweep = 'NORMal'
result = scope.trigger.sweep
assert 'NORM' in result.upper()
def test_run_stop_methods(self, scope):
"""Test run and stop methods."""
# These should not raise errors
scope.stop()
scope.run()
def test_force_method(self, scope):
"""Test force trigger method."""
# Should not raise errors
scope.force()
def test_trigger_nreject(self, scope):
"""Test trigger noise rejection."""
scope.trigger.nreject = True
assert scope.trigger.nreject is True
scope.trigger.nreject = False
assert scope.trigger.nreject is False
class TestAdaptiveCapture:
"""Test adaptive waveform capture functionality."""
@pytest.mark.skip(reason="Requires real signal for adaptive capture")
def test_capture_adaptive_basic(self, scope):
"""Test basic adaptive capture with AFG signal.
This test requires a real signal connected to the scope.
Skipped in automated testing.
"""
# Configure AFG
scope.afg.enabled = True
scope.afg.function = 'SINusoid'
scope.afg.frequency = 1000.0
scope.afg.amplitude = 2.0
# Configure channel with large initial scale
ch = scope.channels[0]
ch.enabled = True
ch.vmax = 20.0 # Start large
ch.coupling = 'DC'
ch.probe = 10
# Configure timebase and trigger
scope.tdiv = 1e-3
scope.trigger.mode = 'EDGE'
scope.trigger.source = ch
scope.trigger.level = 0.0
initial_vmax = ch.vmax
# Trigger first acquisition
scope.single()
scope.wait_trigger()
# Capture with adaptation (may re-trigger if needed)
waveform = ch.waveform(adaptive=True, headroom=1.2, max_iterations=3)
final_vmax = ch.vmax
# Verify waveform was captured
assert isinstance(waveform, np.ndarray)
assert len(waveform) > 100
# Verify scale was adjusted (should zoom in from 20V)
assert final_vmax <= initial_vmax
# Cleanup
scope.afg.enabled = False
def test_capture_adaptive_headroom_validation(self, scope):
"""Test that invalid headroom values raise errors."""
ch = scope.channels[0]
with pytest.raises(ValueError, match="headroom must be >= 1.0"):
ch.waveform(adaptive=True, headroom=0.5)
class TestErrorHandling:
"""Test error handling and validation."""
def test_invalid_channel_index(self, scope):
"""Test that accessing invalid channel index raises IndexError."""
with pytest.raises(IndexError):
_ = scope.channels[10]
def test_type_conversion(self, scope):
"""Test that string values are properly converted."""
ch = scope.channels[0]
# Voltage with units - should be parsed to float
ch.vdiv = '500mV'
# Reading triggers auto-commit and reads actual value from device
assert abs(ch.vdiv - 0.5) < 1e-6
# Frequency with units - should be parsed to float
scope.afg.frequency = '10kHz'
# Reading triggers auto-commit and reads actual value from device
assert abs(scope.afg.frequency - 10000.0) < 1.0
def test_boolean_conversion(self, scope):
"""Test boolean parameter handling."""
ch = scope.channels[0]
# Python booleans
ch.enabled = True
assert ch.enabled is True
ch.enabled = False
assert ch.enabled is False
if __name__ == '__main__':
pytest.main([__file__, '-v'])
| wjakob/rigol | 2 | Bode plot utility for Rigol DHO900 series oscilloscopes | Python | wjakob | Wenzel Jakob | EPFL |
setup.py | Python | import sys, re, os
try:
from skbuild import setup
import nanobind
except ImportError:
print("The preferred way to invoke 'setup.py' is via pip, as in 'pip "
"install .'. If you wish to run the setup script directly, you must "
"first install the build dependencies listed in pyproject.toml!",
file=sys.stderr)
raise
setup(
name="typing_repro",
version="0.0.1",
author="Wenzel Jakob",
author_email="wenzel.jakob@epfl.ch",
description="An example minimal project that compiles bindings using nanobind and scikit-build",
url="https://github.com/wjakob/typing_repro",
license="BSD",
packages=['typing_repro'],
package_dir={'': 'src'},
cmake_install_dir="src/typing_repro",
include_package_data=True,
python_requires=">=3.8"
)
| wjakob/typing_repro | 5 | CMake | wjakob | Wenzel Jakob | EPFL | |
src/typing_repro/__init__.py | Python | from .typing_repro_ext import A
| wjakob/typing_repro | 5 | CMake | wjakob | Wenzel Jakob | EPFL | |
src/typing_repro_ext.cpp | C++ | #include <nanobind/nanobind.h>
namespace nb = nanobind;
using namespace nb::literals;
NB_MODULE(typing_repro_ext, m) {
struct A { };
nb::class_<A>(m, "A")
.def(nb::init<>())
.def("add", [](A &, int a, int b) { return a + b; }, "a"_a, "b"_a);
}
| wjakob/typing_repro | 5 | CMake | wjakob | Wenzel Jakob | EPFL | |
tests/test_basic.py | Python | import typing_repro as m
def test_add():
assert m.A().add(1, 2) == 3
| wjakob/typing_repro | 5 | CMake | wjakob | Wenzel Jakob | EPFL | |
flight_server.py | Python | # Copied from https://github.com/apache/arrow/blob/master/python/examples/flight/server.py
# + modified to use grpc+unix
"""An example Flight Python server."""
import argparse
import ast
import threading
import time
import pyarrow
import pyarrow.flight
class FlightServer(pyarrow.flight.FlightServerBase):
def __init__(self, host="localhost", location=None,
tls_certificates=None, verify_client=False,
root_certificates=None, auth_handler=None):
super(FlightServer, self).__init__(
location, auth_handler, tls_certificates, verify_client,
root_certificates)
self.flights = {}
self.host = host
self.tls_certificates = tls_certificates
self.location = location
@classmethod
def descriptor_to_key(self, descriptor):
return (descriptor.descriptor_type.value, descriptor.command,
tuple(descriptor.path or tuple()))
def _make_flight_info(self, key, descriptor, table):
location = pyarrow.flight.Location.for_grpc_unix(self.location)
endpoints = [pyarrow.flight.FlightEndpoint(repr(key), [location]), ]
mock_sink = pyarrow.MockOutputStream()
stream_writer = pyarrow.RecordBatchStreamWriter(
mock_sink, table.schema)
stream_writer.write_table(table)
stream_writer.close()
data_size = mock_sink.size()
return pyarrow.flight.FlightInfo(table.schema,
descriptor, endpoints,
table.num_rows, data_size)
def list_flights(self, context, criteria):
for key, table in self.flights.items():
if key[1] is not None:
descriptor = \
pyarrow.flight.FlightDescriptor.for_command(key[1])
else:
descriptor = pyarrow.flight.FlightDescriptor.for_path(*key[2])
yield self._make_flight_info(key, descriptor, table)
def get_flight_info(self, context, descriptor):
key = FlightServer.descriptor_to_key(descriptor)
if key in self.flights:
table = self.flights[key]
return self._make_flight_info(key, descriptor, table)
raise KeyError('Flight not found.')
def do_put(self, context, descriptor, reader, writer):
key = FlightServer.descriptor_to_key(descriptor)
print(key)
self.flights[key] = reader.read_all()
# table = reader.read_all()
# chunk_size = 2**16
# self.flights[key] = pyarrow.Table.from_batches(table.to_batches(max_chunksize=chunk_size))
# print(self.flights[key])
def do_get(self, context, ticket):
key = ast.literal_eval(ticket.ticket.decode())
if key not in self.flights:
return None
return pyarrow.flight.RecordBatchStream(self.flights[key].to_reader(64_000))
def list_actions(self, context):
return [
("clear", "Clear the stored flights."),
("shutdown", "Shut down this server."),
]
def do_action(self, context, action):
if action.type == "clear":
self.flights = {}
elif action.type == "healthcheck":
pass
elif action.type == "shutdown":
yield pyarrow.flight.Result(pyarrow.py_buffer(b'Shutdown!'))
# Shut down on background thread to avoid blocking current
# request
threading.Thread(target=self._shutdown).start()
else:
raise KeyError("Unknown action {!r}".format(action.type))
def _shutdown(self):
"""Shut down after a delay."""
print("Server is shutting down...")
time.sleep(2)
self.shutdown()
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--use-tcp", type=bool, default=False)
args = parser.parse_args()
if args.use_tcp:
location = "grpc+tcp://localhost:3000"
else:
location = "grpc+unix:///tmp/test.sock"
server = FlightServer("localhost", location)
print("Serving on", location)
server.serve()
if __name__ == '__main__':
main()
| wjones127/arrow-ipc-bench | 16 | Testing various methods of moving Arrow data between processes | Python | wjones127 | Will Jones | lancedb |
retrieve_arrow.py | Python | # In new Python
from io import TextIOWrapper
import pyarrow as pa
import pyarrow.compute as pc
import pyarrow.flight
import pyarrow.plasma as plasma
from multiprocessing import shared_memory
from contextlib import contextmanager
import time
def retrieve_sharedmemory(name: str) -> pa.Table:
table_shm = shared_memory.SharedMemory(name=name)
table = pa.ipc.open_stream(table_shm.buf).read_all()
for column in table:
pc.sum(column)
# del table
# table_shm.close()
def retrieve_plasma(client, object_id: bytes) -> pa.Table:
[buffer] = client.get_buffers([object_id])
reader = pa.BufferReader(buffer)
table = pa.ipc.open_stream(reader).read_all()
for column in table:
pc.sum(column)
def retrieve_flight(client) -> pa.Table:
descriptor = pa.flight.FlightDescriptor.for_path("table")
flight_into = client.get_flight_info(descriptor)
reader = client.do_get(flight_into.endpoints[0].ticket)
for chunk in reader:
for col in chunk.data.columns:
pc.sum(col)
@contextmanager
def timer(f: TextIOWrapper, name: str):
start_time = time.time()
yield None
end_time = time.time()
f.write(f'"{name}",{(end_time-start_time)}\n')
if __name__ == "__main__":
n_iters = 10
f = open("retrieve_results.csv", mode="w")
plasma_client = plasma.connect("/tmp/plasma")
object_id = plasma.ObjectID(20 * b"a")
for i in range(n_iters):
with timer(f, "plasma_import"):
retrieve_plasma(plasma_client, object_id)
shared_memory_name = "table"
for i in range(n_iters):
with timer(f, "sharedmemory_import"):
retrieve_sharedmemory(shared_memory_name)
location = "grpc+unix:///tmp/test.sock"
flight_client = pa.flight.connect(location)
for i in range(n_iters):
with timer(f, "flight_unix_import"):
retrieve_flight(flight_client)
location = "grpc+tcp://localhost:3000"
flight_client = pa.flight.connect(location)
for i in range(n_iters):
with timer(f, "flight_tcp_import"):
retrieve_flight(flight_client)
| wjones127/arrow-ipc-bench | 16 | Testing various methods of moving Arrow data between processes | Python | wjones127 | Will Jones | lancedb |
share_arrow.py | Python | from io import TextIOWrapper
import time
import pyarrow as pa
import pyarrow.flight
from multiprocessing import shared_memory
import pyarrow.plasma as plasma
from contextlib import contextmanager
import numpy as np
# TODO: show IPC file for comparison
# TODO: show Ray actor for comparison
def calculate_ipc_size(table: pa.Table) -> int:
sink = pa.MockOutputStream()
with pa.ipc.new_stream(sink, table.schema) as writer:
writer.write_table(table)
return sink.size()
def write_ipc_buffer(table: pa.Table) -> pa.Buffer:
sink = pa.BufferOutputStream()
with pa.ipc.new_stream(sink, table.schema) as writer:
writer.write_table(table)
return sink.getvalue()
def get_table(nrows: int):
return pa.table({
"x": np.random.random(nrows),
"y": np.random.random(nrows),
})
def export_to_shared_memory(name: str, table: pa.Table):
size = calculate_ipc_size(table)
shm = shared_memory.SharedMemory(create=True, name=name, size=size)
stream = pa.FixedSizeBufferWriter(pa.py_buffer(shm.buf))
with pa.RecordBatchStreamWriter(stream, table.schema) as writer:
writer.write_table(table)
return shm
def clear_shared_memory(name: str):
try:
shm = shared_memory.SharedMemory(name=name)
shm.unlink()
except:
pass
def export_to_plasma(client, object_id: bytes, table: pa.Table):
size = calculate_ipc_size(table)
buffer = client.create(object_id, size)
stream = pa.FixedSizeBufferWriter(buffer)
with pa.RecordBatchStreamWriter(stream, table.schema) as writer:
writer.write_table(table)
client.seal(object_id)
def clear_plasma(client, object_id: bytes):
client.delete([object_id])
def export_to_flight(client, table: pa.Table):
descriptor = pa.flight.FlightDescriptor.for_path("table")
writer, _ = client.do_put(descriptor, table.schema)
writer.write_table(table, max_chunksize=64_000)
writer.close()
@contextmanager
def timer(f: TextIOWrapper, name: str, size: int):
start_time = time.time()
yield None
end_time = time.time()
f.write(f'"{name}",{(end_time-start_time)},{size}\n')
if __name__ == "__main__":
n_iters = 10
n_rows = 100_000_000
table = get_table(n_rows)
f = open("share_results.csv", mode="w")
# Warm up the memory pool, so all runs are comparable
buf = write_ipc_buffer(table)
del buf
buffer_size = calculate_ipc_size(table)
# First, do plasma
plasma_client = plasma.connect("/tmp/plasma")
object_id = plasma.ObjectID(20 * b"a")
for i in range(n_iters):
clear_plasma(plasma_client, object_id)
with timer(f, "plasma_export", buffer_size):
export_to_plasma(plasma_client, object_id, table)
# Then, do shared memory
shared_memory_name = "table"
for i in range(n_iters):
clear_shared_memory(shared_memory_name)
with timer(f, "sharedmemory_export", buffer_size):
export_to_shared_memory(shared_memory_name, table)
# Flight over unix domain socket
location = "grpc+unix:///tmp/test.sock"
flight_client = pa.flight.connect(location)
for i in range(n_iters):
# More graceful way to collect all the results?
list(flight_client.do_action("clear"))
with timer(f, "flight_unix_export", buffer_size):
export_to_flight(flight_client, table)
# Flight over TCP
location = "grpc+tcp://localhost:3000"
flight_client = pa.flight.connect(location)
for i in range(n_iters):
# More graceful way to collect all the results?
list(flight_client.do_action("clear"))
with timer(f, "flight_tcp_export", buffer_size):
export_to_flight(flight_client, table)
# We need to disconnect, since only one client can connection to a Flight
# server at a time when using the
# flight_client.close()
# del flight_client
f.close()
print("ready!")
while True:
time.sleep(10) | wjones127/arrow-ipc-bench | 16 | Testing various methods of moving Arrow data between processes | Python | wjones127 | Will Jones | lancedb |
local_bench.sh | Shell | # Create a 1GB file
cargo run --release file://$(pwd)/test.bin \
upload-data --size $((1024 * 1024 * 1024))
OUTPUT_FILE=results.ndjson
rm $OUTPUT_FILE
# Test parallel download to see what parallelism works best
for i in {1,5,10,20}; do
echo "Running test $i"
cargo run --release file://$(pwd)/test.bin \
download --parallel-downloads $i >> $OUTPUT_FILE
done
# Test columnar page reads
for page_size in {4096,65536,$((10 * 1024 * 1024))}; do
echo "Running test $page_size"
cargo run --release file://$(pwd)/test.bin \
columnar --page-sizes=$page_size,$page_size,$page_size >> $OUTPUT_FILE
done
# Test columnar page reads with large blobs
cargo run --release file://$(pwd)/test.bin \
columnar --page-sizes=4096,65536,$((100 * 1024 * 1024)) >> $OUTPUT_FILE | wjones127/object-store-bench | 0 | Rust | wjones127 | Will Jones | lancedb | |
src/columnar.rs | Rust | //! A simulated columnar format. Various pages are stored in a single file.
//!
//! We simulate this by considering an existing blob and a set of fixed-size pages
//! and splitting up the file into those pages so we can read.
//!
//! For example, we might get a parameter `--page-sizes=1024,4096,16384` and
//! so then we split up the file into pages of those sizes, repeating as necessary.
use std::sync::Arc;
use futures::{StreamExt, TryStreamExt};
use object_store::{path::Path, ObjectStore};
use crate::inspect_location;
pub async fn columnar_read_test(
object_store: Arc<dyn ObjectStore>,
location: Path,
parallel_downloads: usize,
page_sizes: Vec<usize>,
) -> Result<(), Box<dyn std::error::Error>> {
let objects = inspect_location(object_store.as_ref(), &location).await?;
let object_size = objects[0].size;
assert!(
objects.iter().all(|o| o.size == object_size),
"expected all objects to have the same size"
);
let num_columns = page_sizes.len();
let group_size = page_sizes.iter().sum::<usize>();
let num_groups = object_size / group_size;
let mut page_offsets = vec![Vec::with_capacity(num_groups); num_columns];
let total_size = group_size * num_groups;
assert!(total_size <= object_size, "object is too small");
let mut offset = 0;
for _group_i in 0..num_groups {
for column_i in 0..num_columns {
let page_size = page_sizes[column_i];
page_offsets[column_i].push(offset);
offset += page_size;
}
}
let objects_ref = objects.as_slice();
let ranges_iter = (0..num_groups).flat_map(move |group_i| {
objects_ref
.iter()
.map(move |meta| (meta.location.clone(), group_i))
.collect::<Vec<_>>()
});
let start = std::time::Instant::now();
let page_sizes_ref = page_sizes.as_slice();
let page_offsets_ref = page_offsets.as_slice();
let _counts = futures::stream::iter(ranges_iter)
.map(|(location, group_i)| {
let object_store = object_store.clone();
async move {
let reads = page_offsets_ref
.iter()
.enumerate()
.map(|(column_i, offsets)| {
let page_size = page_sizes_ref[column_i];
let offset = offsets[group_i];
// We already checked the object size, so this should be safe
let range = offset..(offset + page_size);
let location = location.clone();
let object_store = object_store.clone();
tokio::task::spawn(async move {
object_store
.get_range(&location, range)
.await
.map(|res| res.len())
})
})
.collect::<Vec<_>>();
let counts = futures::future::join_all(reads).await;
let mut total = 0;
for count in counts {
total += match count {
Ok(Ok(count)) => count,
Ok(Err(e)) => return Err(e),
Err(e) => return Err(object_store::Error::JoinError { source: e }),
};
}
Ok(total)
}
})
.buffered(parallel_downloads)
.try_collect::<Vec<_>>()
.await?;
let end = std::time::Instant::now();
let elapsed_us = (end - start).as_micros();
let total_size = objects.len() * group_size * num_groups;
let mbps = total_size as f64 / 1024.0 / 1024.0 / (elapsed_us as f64 / 1_000_000.0);
println!("{{\"num_objects\": {}, \"num_groups\": {}, \"page_sizes\": {:?}, \"parallel_downloads\": {}, \"elapsed_us\": {}, \"mbps\": {}}}",
objects.len(), num_groups, page_sizes, parallel_downloads, elapsed_us, mbps);
Ok(())
}
| wjones127/object-store-bench | 0 | Rust | wjones127 | Will Jones | lancedb | |
src/download.rs | Rust | //! Parallel download implementation
use std::sync::Arc;
use futures::{StreamExt, TryStreamExt};
use object_store::{path::Path, ObjectStore};
use tracing::instrument;
use crate::inspect_location;
/// Benchmarks the approach of downloading an object in parallel
///
/// * `location`: where the test object should be made
/// * `parallel_downloads`: maximum number of requests to make in parallel
/// * `block_size`: size of each block to download
pub async fn parallel_download_bench(
object_store: Arc<dyn ObjectStore>,
location: Path,
parallel_downloads: usize,
block_size: Option<usize>,
) -> Result<(), Box<dyn std::error::Error>> {
let objects = inspect_location(object_store.as_ref(), &location).await?;
let object_size = objects[0].size;
assert!(
objects.iter().all(|o| o.size == object_size),
"expected all objects to have the same size"
);
let block_size = block_size.unwrap_or(object_size / parallel_downloads);
let num_blocks = (object_size + block_size - 1) / block_size;
// Make requests interleaving across objects.
let objects_ref = &objects;
let ranges_iter = (0..num_blocks).flat_map(move |block_i| {
let start = block_i * block_size;
let end = std::cmp::min((block_i + 1) * block_size, object_size);
objects_ref
.iter()
.map(move |meta| (meta.location.clone(), start..end))
.collect::<Vec<_>>()
});
// TODO: add tracing
let start = std::time::Instant::now();
let _counts = futures::stream::iter(ranges_iter)
.map(|(location, range)| fetch_range_len(object_store.clone(), location, range))
.buffer_unordered(parallel_downloads)
.try_collect::<Vec<_>>()
.await?;
let end = std::time::Instant::now();
let elapsed_us = (end - start).as_micros();
let total_size = object_size * objects.len();
let mbps = total_size as f64 / 1024.0 / 1024.0 / (elapsed_us as f64 / 1_000_000.0);
println!("{{\"num_objects\": {}, \"num_blocks\": {}, \"block_size\": {}, \"parallel_downloads\": {}, \"elapsed_us\": {}, \"mbps\": {}}}",
objects.len(), num_blocks, block_size, parallel_downloads, elapsed_us, mbps);
Ok(())
}
#[instrument(skip(object_store))]
async fn fetch_range_len(
object_store: Arc<dyn ObjectStore>,
location: Path,
range: std::ops::Range<usize>,
) -> Result<usize, Box<dyn std::error::Error>> {
Ok(tokio::task::spawn(async move {
object_store
.get_range(&location, range)
.await
.map(|res| res.len())
})
.await??)
}
| wjones127/object-store-bench | 0 | Rust | wjones127 | Will Jones | lancedb | |
src/main.rs | Rust | use std::sync::Arc;
use clap::{Parser, Subcommand};
use futures::TryStreamExt;
use object_store::{parse_url, ObjectMeta};
use object_store::{path::Path, ObjectStore};
use rand::{thread_rng, Rng, RngCore};
use tokio::io::AsyncWriteExt;
use tracing_chrome::{ChromeLayerBuilder, TraceStyle};
use tracing_subscriber::prelude::*;
mod columnar;
mod download;
/// Upload a test object of the given size
///
/// This will upload in batches of 10MB, allowing for objects larger than memory.
///
/// The data generated will be random bytes.
async fn upload_test_data(
object_store: Arc<dyn ObjectStore>,
location: &Path,
size: usize,
) -> Result<(), Box<dyn std::error::Error>> {
let (_id, mut writer) = object_store.put_multipart(location).await?;
// Write 10 MB at a time
let mut written = 0;
let mut rng = rand::thread_rng();
let mut buffer = vec![0; 10 * 1024 * 1024];
while written < size {
let to_write = std::cmp::min(size - written, 10 * 1024 * 1024);
rng.fill_bytes(&mut buffer);
writer.write_all(&buffer[0..to_write]).await?;
written += to_write;
}
writer.flush().await?;
writer.shutdown().await?;
Ok(())
}
async fn upload_multiple(
object_store: Arc<dyn ObjectStore>,
location: &Path,
num_objects: usize,
size: usize,
random_prefixes: bool,
) -> Result<(), Box<dyn std::error::Error>> {
let size_per_object = size / num_objects;
if size % num_objects != 0 {
panic!("size must be divisible by num_objects");
}
for i in 0..num_objects {
let mut location = location.parts().collect::<Vec<_>>();
if random_prefixes {
let prefix = thread_rng()
.sample_iter(rand::distributions::Alphanumeric)
.take(8)
.collect::<Vec<u8>>();
let prefix = String::from_utf8(prefix).unwrap();
location.push(prefix.into());
}
location.push(format!("object_{}.bin", i).into());
let location = Path::from_iter(location);
upload_test_data(object_store.clone(), &location, size_per_object).await?;
}
Ok(())
}
/// Inspects the given location and returns a list of all objects and their sizes.
///
/// If the location is an object itself, it will just return that object.
///
/// If the location is a common prefix, it will return all objects with that prefix.
async fn inspect_location(
object_store: &dyn ObjectStore,
location: &Path,
) -> Result<Vec<ObjectMeta>, Box<dyn std::error::Error>> {
match object_store.head(location).await {
Ok(metadata) => Ok(vec![metadata]),
Err(object_store::Error::NotFound { .. }) => Ok(object_store
.list(Some(location))
.await?
.try_collect()
.await?),
Err(err) => Err(err.into()),
}
}
#[derive(Parser)]
#[command(author, version, about, long_about = None)]
struct Args {
/// Optional name to operate on
object_uri: String,
/// Enable tracing for debugging
#[arg(short, long, default_value = "false")]
traced: bool,
#[command(subcommand)]
command: Option<Commands>,
}
#[derive(Subcommand)]
enum Commands {
/// Uploads test data to the given object store uri
///
/// This will overwrite any existing data at the given location.
UploadData {
/// Number of bytes to upload to the object. Defaults to 100MB.
#[arg(short, long, default_value = "104857600")]
size: usize,
},
/// Uploads multiple test objects
UploadMultiple {
/// Number of objects to upload
#[arg(short, long, default_value = "10")]
num_objects: usize,
/// Total number of bytes to upload across all objects
/// Default: 10GB
#[arg(short, long, default_value = "10737418240")]
size: usize,
/// Whether to use random prefixes
#[arg(short, long, default_value = "false")]
random_prefixes: bool,
},
/// Times how long it takes to download an object.
///
/// This downloads the object in parallel, using the given number of parallel downloads.
/// The file is split evenly into parallel_downloads blocks and the blocks are
/// downloaded concurrently.
///
///
Download {
#[arg(short, long, default_value = "10")]
parallel_downloads: usize,
#[arg(short, long, default_value = None)]
block_size: Option<usize>,
},
Columnar {
/// Number of batches to read in parallel
#[arg(short, long, default_value = "10")]
parallel_downloads: usize,
/// Comma-separated list of page sizes to use
#[arg(short, long, default_value = "65536,65536,65536")]
page_sizes: Option<String>,
},
}
#[tokio::main]
async fn main() {
let args: Args = Args::parse();
let (object_store, location) = parse_url(&url::Url::parse(&args.object_uri).unwrap()).unwrap();
let object_store: Arc<_> = object_store.into();
let _maybe_guard = if args.traced {
let builder = ChromeLayerBuilder::new().trace_style(TraceStyle::Async);
let (chrome_layer, guard) = builder.build();
tracing_subscriber::registry().with(chrome_layer).init();
Some(guard)
} else {
None
};
match args.command {
Some(Commands::UploadData { size }) => {
upload_test_data(object_store, &location, size)
.await
.unwrap();
}
Some(Commands::UploadMultiple {
num_objects,
size,
random_prefixes,
}) => {
upload_multiple(object_store, &location, num_objects, size, random_prefixes)
.await
.unwrap();
}
Some(Commands::Download {
parallel_downloads,
block_size,
}) => {
download::parallel_download_bench(
object_store,
location,
parallel_downloads,
block_size,
)
.await
.unwrap();
}
Some(Commands::Columnar {
parallel_downloads,
page_sizes,
}) => {
let page_sizes = page_sizes
.unwrap()
.split(',')
.map(|s| s.parse().unwrap())
.collect();
columnar::columnar_read_test(object_store, location, parallel_downloads, page_sizes)
.await
.unwrap();
}
None => {
println!("No command specified");
}
}
}
| wjones127/object-store-bench | 0 | Rust | wjones127 | Will Jones | lancedb | |
01/aoc_01/src/main.rs | Rust | fn find_pairs_that_add_to_sum(
target_sum: u64,
numbers: &Vec<u64>
) -> Result<(u64, u64), &'static str>
{
for op1 in numbers {
for op2 in numbers {
if op1 + op2 == target_sum {
return Ok((*op1, *op2));
}
}
}
Err("failed to find a matching pair")
}
fn find_triplets_that_add_to_sum(
target_sum: u64,
numbers: &Vec<u64>
) -> Result<(u64, u64, u64), &'static str>
{
for op1 in numbers {
for op2 in numbers {
for op3 in numbers {
if op1 + op2 + op3 == target_sum {
return Ok((*op1, *op2, *op3));
}
}
}
}
Err("failed to find a matching pair")
}
fn main() {
// let input = vec![
// 1721,
// 979,
// 366,
// 299,
// 675,
// 1456,
// ];
let input = vec![
1348, 1621, 1500, 1818, 1266, 1449, 1880, 1416, 1862, 1665, 1588, 1704,
1922, 1482, 1679, 1263, 1137, 1045, 1405, 1048, 1619, 1520, 455, 1142, 1415,
1554, 1690, 1886, 1891, 1701, 1915, 1521, 1253, 1580, 1376, 1564, 1747,
1814, 1749, 1485, 1969, 974, 1566, 1413, 1451, 1200, 1558, 1756, 1910, 1044,
470, 1620, 1772, 1066, 1261, 1776, 988, 1976, 1834, 1896, 1646, 1626, 1300,
1692, 1204, 2006, 1265, 1911, 1361, 1766, 1750, 2000, 1824, 1726, 1672, 651,
1226, 1954, 1055, 1999, 1793, 1640, 1567, 1040, 1426, 1717, 1658, 1864,
1917, 695, 1071, 1573, 1897, 1546, 1727, 1801, 1259, 1290, 1481, 1148, 1332,
1262, 1536, 1184, 1821, 1681, 1671, 1612, 1678, 1703, 1604, 1697, 2003,
1453, 1493, 1797, 1180, 1234, 1775, 1859, 1388, 1393, 667, 1767, 1429, 1990,
1322, 1684, 1696, 1565, 1380, 1745, 1685, 1189, 1396, 1593, 1850, 1722,
1495, 1844, 1285, 1483, 1635, 1072, 1947, 1109, 1586, 1730, 1723, 1246,
1389, 1135, 1827, 1531, 1583, 1743, 1958, 183, 1323, 1949, 1799, 1269, 1379,
1950, 1592, 1467, 1052, 1418, 2009, 1227, 1254, 1865, 1609, 1848, 1653,
1691, 1633, 1349, 1104, 1790, 1755, 1847, 1598, 1872, 1478, 1778, 1952,
1694, 1238, 1825, 1508, 1141, 1464, 1838, 1292, 1403, 1365, 1494, 934,
1235,
];
let pair = find_pairs_that_add_to_sum(2020, &input).unwrap();
println!("a: {:?}", pair.0 * pair.1);
let triplet = find_triplets_that_add_to_sum(2020, &input).unwrap();
println!("b: {:?}", triplet.0 * triplet.1 * triplet.2);
}
| wjwwood/advent_of_code_2020 | 2 | Advent of Code for 2020 | Rust | wjwwood | William Woodall | |
02/aoc_02/src/main.rs | Rust | use std::fs;
fn parse_password_db_string<'a>(
passwords_db_string: &'a String
) -> Vec<(usize, usize, &'a str, &'a str)>
{
let lines = passwords_db_string.lines();
let mut passwords: Vec<(usize, usize, &str, &str)> = Vec::new();
for line in lines {
let tokens: Vec<&str> = line.split_whitespace().collect();
assert_eq!(tokens.len(), 3);
let range_str = tokens[0];
let char_str = tokens[1];
let password = tokens[2];
let range_tokens: Vec<&str> = range_str.splitn(2, "-").collect();
assert_eq!(range_tokens.len(), 2);
let range = (
range_tokens[0].parse::<usize>().unwrap(),
range_tokens[1].parse::<usize>().unwrap(),
);
let target_char = char_str.trim_end_matches(":");
passwords.push((range.0, range.1, target_char, password));
}
passwords
}
fn count_valid_passwords1(passwords: &Vec<(usize, usize, &str, &str)>) -> usize {
let mut count: usize = 0;
for password_tuple in passwords {
let occurrences: usize = password_tuple.3.matches(password_tuple.2).count();
if occurrences >= password_tuple.0 && occurrences <= password_tuple.1 {
count += 1;
}
}
count
}
fn count_valid_passwords2(passwords: &Vec<(usize, usize, &str, &str)>) -> usize {
let mut count: usize = 0;
for password_tuple in passwords {
let password_len = password_tuple.3.len();
let target: char = password_tuple.2.chars().next().unwrap();
if password_tuple.0 <= password_len && password_tuple.1 <= password_len {
let first_matches = password_tuple.3.chars().nth(password_tuple.0 - 1).unwrap() == target;
let second_matches = password_tuple.3.chars().nth(password_tuple.1 - 1).unwrap() == target;
let mut result = first_matches;
result ^= second_matches;
if result {
count += 1;
}
}
}
count
}
fn main() {
let password_db = fs::read_to_string("password_db1.txt").unwrap();
let passwords = parse_password_db_string(&password_db);
println!("{:?}", count_valid_passwords1(&passwords));
println!("{:?}", count_valid_passwords2(&passwords));
}
| wjwwood/advent_of_code_2020 | 2 | Advent of Code for 2020 | Rust | wjwwood | William Woodall | |
cmake/serialConfig.cmake | CMake | get_filename_component(SERIAL_CMAKE_DIR "${CMAKE_CURRENT_LIST_FILE}" PATH)
set(SERIAL_INCLUDE_DIRS "${SERIAL_CMAKE_DIR}/../../../include")
find_library(SERIAL_LIBRARIES serial PATHS ${SERIAL_CMAKE_DIR}/../../../lib/serial)
| wjwwood/cxx_serial | 7 | Cross-platform, Serial Port library written in C++ | C++ | wjwwood | William Woodall | |
examples/serial_example.cc | C++ | /***
* This example expects the serial port has a loopback on it.
*
* Alternatively, you could use an Arduino:
*
* <pre>
* void setup() {
* Serial.begin(<insert your baudrate here>);
* }
*
* void loop() {
* if (Serial.available()) {
* Serial.write(Serial.read());
* }
* }
* </pre>
*/
#include <string>
#include <iostream>
#include <cstdio>
// OS Specific sleep
#ifdef _WIN32
#include <windows.h>
#else
#include <unistd.h>
#endif
#include "serial/serial.h"
using std::string;
using std::exception;
using std::cout;
using std::cerr;
using std::endl;
using std::vector;
void my_sleep(unsigned long milliseconds) {
#ifdef _WIN32
Sleep(milliseconds); // 100 ms
#else
usleep(milliseconds*1000); // 100 ms
#endif
}
void enumerate_ports()
{
vector<serial::PortInfo> devices_found = serial::list_ports();
vector<serial::PortInfo>::iterator iter = devices_found.begin();
while( iter != devices_found.end() )
{
serial::PortInfo device = *iter++;
printf( "(%s, %s, %s)\n", device.port.c_str(), device.description.c_str(),
device.hardware_id.c_str() );
}
}
void print_usage()
{
cerr << "Usage: test_serial {-e|<serial port address>} ";
cerr << "<baudrate> [test string]" << endl;
}
int run(int argc, char **argv)
{
if(argc < 2) {
print_usage();
return 0;
}
// Argument 1 is the serial port or enumerate flag
string port(argv[1]);
if( port == "-e" ) {
enumerate_ports();
return 0;
}
else if( argc < 3 ) {
print_usage();
return 1;
}
// Argument 2 is the baudrate
unsigned long baud = 0;
#if defined(WIN32) && !defined(__MINGW32__)
sscanf_s(argv[2], "%lu", &baud);
#else
sscanf(argv[2], "%lu", &baud);
#endif
// port, baudrate, timeout in milliseconds
serial::Serial my_serial(port, baud, serial::Timeout::simpleTimeout(1000));
cout << "Is the serial port open?";
if(my_serial.isOpen())
cout << " Yes." << endl;
else
cout << " No." << endl;
// Get the Test string
int count = 0;
string test_string;
if (argc == 4) {
test_string = argv[3];
} else {
test_string = "Testing.";
}
// Test the timeout, there should be 1 second between prints
cout << "Timeout == 1000ms, asking for 1 more byte than written." << endl;
while (count < 10) {
size_t bytes_wrote = my_serial.write(test_string);
string result = my_serial.read(test_string.length()+1);
cout << "Iteration: " << count << ", Bytes written: ";
cout << bytes_wrote << ", Bytes read: ";
cout << result.length() << ", String read: " << result << endl;
count += 1;
}
// Test the timeout at 250ms
my_serial.setTimeout(serial::Timeout::max(), 250, 0, 250, 0);
count = 0;
cout << "Timeout == 250ms, asking for 1 more byte than written." << endl;
while (count < 10) {
size_t bytes_wrote = my_serial.write(test_string);
string result = my_serial.read(test_string.length()+1);
cout << "Iteration: " << count << ", Bytes written: ";
cout << bytes_wrote << ", Bytes read: ";
cout << result.length() << ", String read: " << result << endl;
count += 1;
}
// Test the timeout at 250ms, but asking exactly for what was written
count = 0;
cout << "Timeout == 250ms, asking for exactly what was written." << endl;
while (count < 10) {
size_t bytes_wrote = my_serial.write(test_string);
string result = my_serial.read(test_string.length());
cout << "Iteration: " << count << ", Bytes written: ";
cout << bytes_wrote << ", Bytes read: ";
cout << result.length() << ", String read: " << result << endl;
count += 1;
}
// Test the timeout at 250ms, but asking for 1 less than what was written
count = 0;
cout << "Timeout == 250ms, asking for 1 less than was written." << endl;
while (count < 10) {
size_t bytes_wrote = my_serial.write(test_string);
string result = my_serial.read(test_string.length()-1);
cout << "Iteration: " << count << ", Bytes written: ";
cout << bytes_wrote << ", Bytes read: ";
cout << result.length() << ", String read: " << result << endl;
count += 1;
}
return 0;
}
int main(int argc, char **argv) {
try {
return run(argc, argv);
} catch (exception &e) {
cerr << "Unhandled Exception: " << e.what() << endl;
}
}
| wjwwood/cxx_serial | 7 | Cross-platform, Serial Port library written in C++ | C++ | wjwwood | William Woodall | |
include/serial/impl/unix.h | C/C++ Header | /*!
* \file serial/impl/unix.h
* \author William Woodall <wjwwood@gmail.com>
* \author John Harrison <ash@greaterthaninfinity.com>
* \version 0.1
*
* \section LICENSE
*
* The MIT License
*
* Copyright (c) 2012 William Woodall, John Harrison
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* \section DESCRIPTION
*
* This provides a unix based pimpl for the Serial class. This implementation is
* based off termios.h and uses select for multiplexing the IO ports.
*
*/
#if !defined(_WIN32)
#ifndef SERIAL_IMPL_UNIX_H
#define SERIAL_IMPL_UNIX_H
#include "serial/serial.h"
#include <pthread.h>
namespace serial {
using std::size_t;
using std::string;
using std::invalid_argument;
using serial::SerialException;
using serial::IOException;
class MillisecondTimer {
public:
MillisecondTimer(const uint32_t millis);
int64_t remaining();
private:
static timespec timespec_now();
timespec expiry;
};
class serial::Serial::SerialImpl {
public:
SerialImpl (const string &port,
unsigned long baudrate,
bytesize_t bytesize,
parity_t parity,
stopbits_t stopbits,
flowcontrol_t flowcontrol);
virtual ~SerialImpl ();
void
open ();
void
close ();
bool
isOpen () const;
size_t
available ();
bool
waitReadable (uint32_t timeout);
void
waitByteTimes (size_t count);
size_t
read (uint8_t *buf, size_t size = 1);
size_t
write (const uint8_t *data, size_t length);
void
flush ();
void
flushInput ();
void
flushOutput ();
void
sendBreak (int duration);
void
setBreak (bool level);
void
setRTS (bool level);
void
setDTR (bool level);
bool
waitForChange ();
bool
getCTS ();
bool
getDSR ();
bool
getRI ();
bool
getCD ();
void
setPort (const string &port);
string
getPort () const;
void
setTimeout (Timeout &timeout);
Timeout
getTimeout () const;
void
setBaudrate (unsigned long baudrate);
unsigned long
getBaudrate () const;
void
setBytesize (bytesize_t bytesize);
bytesize_t
getBytesize () const;
void
setParity (parity_t parity);
parity_t
getParity () const;
void
setStopbits (stopbits_t stopbits);
stopbits_t
getStopbits () const;
void
setFlowcontrol (flowcontrol_t flowcontrol);
flowcontrol_t
getFlowcontrol () const;
void
readLock ();
void
readUnlock ();
void
writeLock ();
void
writeUnlock ();
protected:
void reconfigurePort ();
private:
string port_; // Path to the file descriptor
int fd_; // The current file descriptor
bool is_open_;
bool xonxoff_;
bool rtscts_;
Timeout timeout_; // Timeout for read operations
unsigned long baudrate_; // Baudrate
uint32_t byte_time_ns_; // Nanoseconds to transmit/receive a single byte
parity_t parity_; // Parity
bytesize_t bytesize_; // Size of the bytes
stopbits_t stopbits_; // Stop Bits
flowcontrol_t flowcontrol_; // Flow Control
// Mutex used to lock the read functions
pthread_mutex_t read_mutex;
// Mutex used to lock the write functions
pthread_mutex_t write_mutex;
};
}
#endif // SERIAL_IMPL_UNIX_H
#endif // !defined(_WIN32)
| wjwwood/cxx_serial | 7 | Cross-platform, Serial Port library written in C++ | C++ | wjwwood | William Woodall | |
include/serial/impl/win.h | C/C++ Header | /*!
* \file serial/impl/windows.h
* \author William Woodall <wjwwood@gmail.com>
* \author John Harrison <ash@greaterthaninfinity.com>
* \version 0.1
*
* \section LICENSE
*
* The MIT License
*
* Copyright (c) 2012 William Woodall, John Harrison
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* \section DESCRIPTION
*
* This provides a windows implementation of the Serial class interface.
*
*/
#if defined(_WIN32)
#ifndef SERIAL_IMPL_WINDOWS_H
#define SERIAL_IMPL_WINDOWS_H
#include "serial/serial.h"
#include "windows.h"
namespace serial {
using std::string;
using std::wstring;
using std::invalid_argument;
using serial::SerialException;
using serial::IOException;
class serial::Serial::SerialImpl {
public:
SerialImpl (const string &port,
unsigned long baudrate,
bytesize_t bytesize,
parity_t parity,
stopbits_t stopbits,
flowcontrol_t flowcontrol);
virtual ~SerialImpl ();
void
open ();
void
close ();
bool
isOpen () const;
size_t
available ();
bool
waitReadable (uint32_t timeout);
void
waitByteTimes (size_t count);
size_t
read (uint8_t *buf, size_t size = 1);
size_t
write (const uint8_t *data, size_t length);
void
flush ();
void
flushInput ();
void
flushOutput ();
void
sendBreak (int duration);
void
setBreak (bool level);
void
setRTS (bool level);
void
setDTR (bool level);
bool
waitForChange ();
bool
getCTS ();
bool
getDSR ();
bool
getRI ();
bool
getCD ();
void
setPort (const string &port);
string
getPort () const;
void
setTimeout (Timeout &timeout);
Timeout
getTimeout () const;
void
setBaudrate (unsigned long baudrate);
unsigned long
getBaudrate () const;
void
setBytesize (bytesize_t bytesize);
bytesize_t
getBytesize () const;
void
setParity (parity_t parity);
parity_t
getParity () const;
void
setStopbits (stopbits_t stopbits);
stopbits_t
getStopbits () const;
void
setFlowcontrol (flowcontrol_t flowcontrol);
flowcontrol_t
getFlowcontrol () const;
void
readLock ();
void
readUnlock ();
void
writeLock ();
void
writeUnlock ();
protected:
void reconfigurePort ();
private:
wstring port_; // Path to the file descriptor
HANDLE fd_;
bool is_open_;
Timeout timeout_; // Timeout for read operations
unsigned long baudrate_; // Baudrate
parity_t parity_; // Parity
bytesize_t bytesize_; // Size of the bytes
stopbits_t stopbits_; // Stop Bits
flowcontrol_t flowcontrol_; // Flow Control
// Mutex used to lock the read functions
HANDLE read_mutex;
// Mutex used to lock the write functions
HANDLE write_mutex;
};
}
#endif // SERIAL_IMPL_WINDOWS_H
#endif // if defined(_WIN32)
| wjwwood/cxx_serial | 7 | Cross-platform, Serial Port library written in C++ | C++ | wjwwood | William Woodall | |
include/serial/serial.h | C/C++ Header | /*!
* \file serial/serial.h
* \author William Woodall <wjwwood@gmail.com>
* \author John Harrison <ash.gti@gmail.com>
* \version 0.1
*
* \section LICENSE
*
* The MIT License
*
* Copyright (c) 2012 William Woodall
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* \section DESCRIPTION
*
* This provides a cross platform interface for interacting with Serial Ports.
*/
#ifndef SERIAL_H
#define SERIAL_H
#include <limits>
#include <vector>
#include <string>
#include <cstring>
#include <sstream>
#include <exception>
#include <stdexcept>
#include <serial/v8stdint.h>
#define THROW(exceptionClass, message) throw exceptionClass(__FILE__, \
__LINE__, (message) )
namespace serial {
/*!
* Enumeration defines the possible bytesizes for the serial port.
*/
typedef enum {
fivebits = 5,
sixbits = 6,
sevenbits = 7,
eightbits = 8
} bytesize_t;
/*!
* Enumeration defines the possible parity types for the serial port.
*/
typedef enum {
parity_none = 0,
parity_odd = 1,
parity_even = 2,
parity_mark = 3,
parity_space = 4
} parity_t;
/*!
* Enumeration defines the possible stopbit types for the serial port.
*/
typedef enum {
stopbits_one = 1,
stopbits_two = 2,
stopbits_one_point_five
} stopbits_t;
/*!
* Enumeration defines the possible flowcontrol types for the serial port.
*/
typedef enum {
flowcontrol_none = 0,
flowcontrol_software,
flowcontrol_hardware
} flowcontrol_t;
/*!
* Structure for setting the timeout of the serial port, times are
* in milliseconds.
*
* In order to disable the interbyte timeout, set it to Timeout::max().
*/
struct Timeout {
#ifdef max
# undef max
#endif
static uint32_t max() {return std::numeric_limits<uint32_t>::max();}
/*!
* Convenience function to generate Timeout structs using a
* single absolute timeout.
*
* \param timeout A long that defines the time in milliseconds until a
* timeout occurs after a call to read or write is made.
*
* \return Timeout struct that represents this simple timeout provided.
*/
static Timeout simpleTimeout(uint32_t timeout) {
return Timeout(max(), timeout, 0, timeout, 0);
}
/*! Number of milliseconds between bytes received to timeout on. */
uint32_t inter_byte_timeout;
/*! A constant number of milliseconds to wait after calling read. */
uint32_t read_timeout_constant;
/*! A multiplier against the number of requested bytes to wait after
* calling read.
*/
uint32_t read_timeout_multiplier;
/*! A constant number of milliseconds to wait after calling write. */
uint32_t write_timeout_constant;
/*! A multiplier against the number of requested bytes to wait after
* calling write.
*/
uint32_t write_timeout_multiplier;
explicit Timeout (uint32_t inter_byte_timeout_=0,
uint32_t read_timeout_constant_=0,
uint32_t read_timeout_multiplier_=0,
uint32_t write_timeout_constant_=0,
uint32_t write_timeout_multiplier_=0)
: inter_byte_timeout(inter_byte_timeout_),
read_timeout_constant(read_timeout_constant_),
read_timeout_multiplier(read_timeout_multiplier_),
write_timeout_constant(write_timeout_constant_),
write_timeout_multiplier(write_timeout_multiplier_)
{}
};
/*!
* Class that provides a portable serial port interface.
*/
class Serial {
public:
/*!
* Creates a Serial object and opens the port if a port is specified,
* otherwise it remains closed until serial::Serial::open is called.
*
* \param port A std::string containing the address of the serial port,
* which would be something like 'COM1' on Windows and '/dev/ttyS0'
* on Linux.
*
* \param baudrate An unsigned 32-bit integer that represents the baudrate
*
* \param timeout A serial::Timeout struct that defines the timeout
* conditions for the serial port. \see serial::Timeout
*
* \param bytesize Size of each byte in the serial transmission of data,
* default is eightbits, possible values are: fivebits, sixbits, sevenbits,
* eightbits
*
* \param parity Method of parity, default is parity_none, possible values
* are: parity_none, parity_odd, parity_even
*
* \param stopbits Number of stop bits used, default is stopbits_one,
* possible values are: stopbits_one, stopbits_one_point_five, stopbits_two
*
* \param flowcontrol Type of flowcontrol used, default is
* flowcontrol_none, possible values are: flowcontrol_none,
* flowcontrol_software, flowcontrol_hardware
*
* \throw serial::PortNotOpenedException
* \throw serial::IOException
* \throw std::invalid_argument
*/
Serial (const std::string &port = "",
uint32_t baudrate = 9600,
Timeout timeout = Timeout(),
bytesize_t bytesize = eightbits,
parity_t parity = parity_none,
stopbits_t stopbits = stopbits_one,
flowcontrol_t flowcontrol = flowcontrol_none);
/*! Destructor */
virtual ~Serial ();
/*!
* Opens the serial port as long as the port is set and the port isn't
* already open.
*
* If the port is provided to the constructor then an explicit call to open
* is not needed.
*
* \see Serial::Serial
*
* \throw std::invalid_argument
* \throw serial::SerialException
* \throw serial::IOException
*/
void
open ();
/*! Gets the open status of the serial port.
*
* \return Returns true if the port is open, false otherwise.
*/
bool
isOpen () const;
/*! Closes the serial port. */
void
close ();
/*! Return the number of characters in the buffer. */
size_t
available ();
/*! Block until there is serial data to read or read_timeout_constant
* number of milliseconds have elapsed. The return value is true when
* the function exits with the port in a readable state, false otherwise
* (due to timeout or select interruption). */
bool
waitReadable ();
/*! Block for a period of time corresponding to the transmission time of
* count characters at present serial settings. This may be used in con-
* junction with waitReadable to read larger blocks of data from the
* port. */
void
waitByteTimes (size_t count);
/*! Read a given amount of bytes from the serial port into a given buffer.
*
* The read function will return in one of three cases:
* * The number of requested bytes was read.
* * In this case the number of bytes requested will match the size_t
* returned by read.
* * A timeout occurred, in this case the number of bytes read will not
* match the amount requested, but no exception will be thrown. One of
* two possible timeouts occurred:
* * The inter byte timeout expired, this means that number of
* milliseconds elapsed between receiving bytes from the serial port
* exceeded the inter byte timeout.
* * The total timeout expired, which is calculated by multiplying the
* read timeout multiplier by the number of requested bytes and then
* added to the read timeout constant. If that total number of
* milliseconds elapses after the initial call to read a timeout will
* occur.
* * An exception occurred, in this case an actual exception will be thrown.
*
* \param buffer An uint8_t array of at least the requested size.
* \param size A size_t defining how many bytes to be read.
*
* \return A size_t representing the number of bytes read as a result of the
* call to read.
*
* \throw serial::PortNotOpenedException
* \throw serial::SerialException
*/
size_t
read (uint8_t *buffer, size_t size);
/*! Read a given amount of bytes from the serial port into a give buffer.
*
* \param buffer A reference to a std::vector of uint8_t.
* \param size A size_t defining how many bytes to be read.
*
* \return A size_t representing the number of bytes read as a result of the
* call to read.
*
* \throw serial::PortNotOpenedException
* \throw serial::SerialException
*/
size_t
read (std::vector<uint8_t> &buffer, size_t size = 1);
/*! Read a given amount of bytes from the serial port into a give buffer.
*
* \param buffer A reference to a std::string.
* \param size A size_t defining how many bytes to be read.
*
* \return A size_t representing the number of bytes read as a result of the
* call to read.
*
* \throw serial::PortNotOpenedException
* \throw serial::SerialException
*/
size_t
read (std::string &buffer, size_t size = 1);
/*! Read a given amount of bytes from the serial port and return a string
* containing the data.
*
* \param size A size_t defining how many bytes to be read.
*
* \return A std::string containing the data read from the port.
*
* \throw serial::PortNotOpenedException
* \throw serial::SerialException
*/
std::string
read (size_t size = 1);
/*! Reads in a line or until a given delimiter has been processed.
*
* Reads from the serial port until a single line has been read.
*
* \param buffer A std::string reference used to store the data.
* \param size A maximum length of a line, defaults to 65536 (2^16)
* \param eol A string to match against for the EOL.
*
* \return A size_t representing the number of bytes read.
*
* \throw serial::PortNotOpenedException
* \throw serial::SerialException
*/
size_t
readline (std::string &buffer, size_t size = 65536, std::string eol = "\n");
/*! Reads in a line or until a given delimiter has been processed.
*
* Reads from the serial port until a single line has been read.
*
* \param size A maximum length of a line, defaults to 65536 (2^16)
* \param eol A string to match against for the EOL.
*
* \return A std::string containing the line.
*
* \throw serial::PortNotOpenedException
* \throw serial::SerialException
*/
std::string
readline (size_t size = 65536, std::string eol = "\n");
/*! Reads in multiple lines until the serial port times out.
*
* This requires a timeout > 0 before it can be run. It will read until a
* timeout occurs and return a list of strings.
*
* \param size A maximum length of combined lines, defaults to 65536 (2^16)
*
* \param eol A string to match against for the EOL.
*
* \return A vector<string> containing the lines.
*
* \throw serial::PortNotOpenedException
* \throw serial::SerialException
*/
std::vector<std::string>
readlines (size_t size = 65536, std::string eol = "\n");
/*! Write a string to the serial port.
*
* \param data A const reference containing the data to be written
* to the serial port.
*
* \param size A size_t that indicates how many bytes should be written from
* the given data buffer.
*
* \return A size_t representing the number of bytes actually written to
* the serial port.
*
* \throw serial::PortNotOpenedException
* \throw serial::SerialException
* \throw serial::IOException
*/
size_t
write (const uint8_t *data, size_t size);
/*! Write a string to the serial port.
*
* \param data A const reference containing the data to be written
* to the serial port.
*
* \return A size_t representing the number of bytes actually written to
* the serial port.
*
* \throw serial::PortNotOpenedException
* \throw serial::SerialException
* \throw serial::IOException
*/
size_t
write (const std::vector<uint8_t> &data);
/*! Write a string to the serial port.
*
* \param data A const reference containing the data to be written
* to the serial port.
*
* \return A size_t representing the number of bytes actually written to
* the serial port.
*
* \throw serial::PortNotOpenedException
* \throw serial::SerialException
* \throw serial::IOException
*/
size_t
write (const std::string &data);
/*! Sets the serial port identifier.
*
* \param port A const std::string reference containing the address of the
* serial port, which would be something like 'COM1' on Windows and
* '/dev/ttyS0' on Linux.
*
* \throw std::invalid_argument
*/
void
setPort (const std::string &port);
/*! Gets the serial port identifier.
*
* \see Serial::setPort
*
* \throw std::invalid_argument
*/
std::string
getPort () const;
/*! Sets the timeout for reads and writes using the Timeout struct.
*
* There are two timeout conditions described here:
* * The inter byte timeout:
* * The inter_byte_timeout component of serial::Timeout defines the
* maximum amount of time, in milliseconds, between receiving bytes on
* the serial port that can pass before a timeout occurs. Setting this
* to zero will prevent inter byte timeouts from occurring.
* * Total time timeout:
* * The constant and multiplier component of this timeout condition,
* for both read and write, are defined in serial::Timeout. This
* timeout occurs if the total time since the read or write call was
* made exceeds the specified time in milliseconds.
* * The limit is defined by multiplying the multiplier component by the
* number of requested bytes and adding that product to the constant
* component. In this way if you want a read call, for example, to
* timeout after exactly one second regardless of the number of bytes
* you asked for then set the read_timeout_constant component of
* serial::Timeout to 1000 and the read_timeout_multiplier to zero.
* This timeout condition can be used in conjunction with the inter
* byte timeout condition with out any problems, timeout will simply
* occur when one of the two timeout conditions is met. This allows
* users to have maximum control over the trade-off between
* responsiveness and efficiency.
*
* Read and write functions will return in one of three cases. When the
* reading or writing is complete, when a timeout occurs, or when an
* exception occurs.
*
* A timeout of 0 enables non-blocking mode.
*
* \param timeout A serial::Timeout struct containing the inter byte
* timeout, and the read and write timeout constants and multipliers.
*
* \see serial::Timeout
*/
void
setTimeout (Timeout &timeout);
/*! Sets the timeout for reads and writes. */
void
setTimeout (uint32_t inter_byte_timeout, uint32_t read_timeout_constant,
uint32_t read_timeout_multiplier, uint32_t write_timeout_constant,
uint32_t write_timeout_multiplier)
{
Timeout timeout(inter_byte_timeout, read_timeout_constant,
read_timeout_multiplier, write_timeout_constant,
write_timeout_multiplier);
return setTimeout(timeout);
}
/*! Gets the timeout for reads in seconds.
*
* \return A Timeout struct containing the inter_byte_timeout, and read
* and write timeout constants and multipliers.
*
* \see Serial::setTimeout
*/
Timeout
getTimeout () const;
/*! Sets the baudrate for the serial port.
*
* Possible baudrates depends on the system but some safe baudrates include:
* 110, 300, 600, 1200, 2400, 4800, 9600, 14400, 19200, 28800, 38400, 56000,
* 57600, 115200
* Some other baudrates that are supported by some comports:
* 128000, 153600, 230400, 256000, 460800, 500000, 921600
*
* \param baudrate An integer that sets the baud rate for the serial port.
*
* \throw std::invalid_argument
*/
void
setBaudrate (uint32_t baudrate);
/*! Gets the baudrate for the serial port.
*
* \return An integer that sets the baud rate for the serial port.
*
* \see Serial::setBaudrate
*
* \throw std::invalid_argument
*/
uint32_t
getBaudrate () const;
/*! Sets the bytesize for the serial port.
*
* \param bytesize Size of each byte in the serial transmission of data,
* default is eightbits, possible values are: fivebits, sixbits, sevenbits,
* eightbits
*
* \throw std::invalid_argument
*/
void
setBytesize (bytesize_t bytesize);
/*! Gets the bytesize for the serial port.
*
* \see Serial::setBytesize
*
* \throw std::invalid_argument
*/
bytesize_t
getBytesize () const;
/*! Sets the parity for the serial port.
*
* \param parity Method of parity, default is parity_none, possible values
* are: parity_none, parity_odd, parity_even
*
* \throw std::invalid_argument
*/
void
setParity (parity_t parity);
/*! Gets the parity for the serial port.
*
* \see Serial::setParity
*
* \throw std::invalid_argument
*/
parity_t
getParity () const;
/*! Sets the stopbits for the serial port.
*
* \param stopbits Number of stop bits used, default is stopbits_one,
* possible values are: stopbits_one, stopbits_one_point_five, stopbits_two
*
* \throw std::invalid_argument
*/
void
setStopbits (stopbits_t stopbits);
/*! Gets the stopbits for the serial port.
*
* \see Serial::setStopbits
*
* \throw std::invalid_argument
*/
stopbits_t
getStopbits () const;
/*! Sets the flow control for the serial port.
*
* \param flowcontrol Type of flowcontrol used, default is flowcontrol_none,
* possible values are: flowcontrol_none, flowcontrol_software,
* flowcontrol_hardware
*
* \throw std::invalid_argument
*/
void
setFlowcontrol (flowcontrol_t flowcontrol);
/*! Gets the flow control for the serial port.
*
* \see Serial::setFlowcontrol
*
* \throw std::invalid_argument
*/
flowcontrol_t
getFlowcontrol () const;
/*! Flush the input and output buffers */
void
flush ();
/*! Flush only the input buffer */
void
flushInput ();
/*! Flush only the output buffer */
void
flushOutput ();
/*! Sends the RS-232 break signal. See tcsendbreak(3). */
void
sendBreak (int duration);
/*! Set the break condition to a given level. Defaults to true. */
void
setBreak (bool level = true);
/*! Set the RTS handshaking line to the given level. Defaults to true. */
void
setRTS (bool level = true);
/*! Set the DTR handshaking line to the given level. Defaults to true. */
void
setDTR (bool level = true);
/*!
* Blocks until CTS, DSR, RI, CD changes or something interrupts it.
*
* Can throw an exception if an error occurs while waiting.
* You can check the status of CTS, DSR, RI, and CD once this returns.
* Uses TIOCMIWAIT via ioctl if available (mostly only on Linux) with a
* resolution of less than +-1ms and as good as +-0.2ms. Otherwise a
* polling method is used which can give +-2ms.
*
* \return Returns true if one of the lines changed, false if something else
* occurred.
*
* \throw SerialException
*/
bool
waitForChange ();
/*! Returns the current status of the CTS line. */
bool
getCTS ();
/*! Returns the current status of the DSR line. */
bool
getDSR ();
/*! Returns the current status of the RI line. */
bool
getRI ();
/*! Returns the current status of the CD line. */
bool
getCD ();
private:
// Disable copy constructors
Serial(const Serial&);
Serial& operator=(const Serial&);
// Pimpl idiom, d_pointer
class SerialImpl;
SerialImpl *pimpl_;
// Scoped Lock Classes
class ScopedReadLock;
class ScopedWriteLock;
// Read common function
size_t
read_ (uint8_t *buffer, size_t size);
// Write common function
size_t
write_ (const uint8_t *data, size_t length);
};
class SerialException : public std::exception
{
// Disable copy constructors
SerialException& operator=(const SerialException&);
std::string e_what_;
public:
SerialException (const char *description) {
std::stringstream ss;
ss << "SerialException " << description << " failed.";
e_what_ = ss.str();
}
SerialException (const SerialException& other) : e_what_(other.e_what_) {}
virtual ~SerialException() throw() {}
virtual const char* what () const throw () {
return e_what_.c_str();
}
};
class IOException : public std::exception
{
// Disable copy constructors
IOException& operator=(const IOException&);
std::string file_;
int line_;
std::string e_what_;
int errno_;
public:
explicit IOException (std::string file, int line, int errnum)
: file_(file), line_(line), errno_(errnum) {
std::stringstream ss;
#if defined(_WIN32) && !defined(__MINGW32__)
char error_str [1024];
strerror_s(error_str, 1024, errnum);
#else
char * error_str = strerror(errnum);
#endif
ss << "IO Exception (" << errno_ << "): " << error_str;
ss << ", file " << file_ << ", line " << line_ << ".";
e_what_ = ss.str();
}
explicit IOException (std::string file, int line, const char * description)
: file_(file), line_(line), errno_(0) {
std::stringstream ss;
ss << "IO Exception: " << description;
ss << ", file " << file_ << ", line " << line_ << ".";
e_what_ = ss.str();
}
virtual ~IOException() throw() {}
IOException (const IOException& other) : line_(other.line_), e_what_(other.e_what_), errno_(other.errno_) {}
int getErrorNumber () const { return errno_; }
virtual const char* what () const throw () {
return e_what_.c_str();
}
};
class PortNotOpenedException : public std::exception
{
// Disable copy constructors
const PortNotOpenedException& operator=(PortNotOpenedException);
std::string e_what_;
public:
PortNotOpenedException (const char * description) {
std::stringstream ss;
ss << "PortNotOpenedException " << description << " failed.";
e_what_ = ss.str();
}
PortNotOpenedException (const PortNotOpenedException& other) : e_what_(other.e_what_) {}
virtual ~PortNotOpenedException() throw() {}
virtual const char* what () const throw () {
return e_what_.c_str();
}
};
/*!
* Structure that describes a serial device.
*/
struct PortInfo {
/*! Address of the serial port (this can be passed to the constructor of Serial). */
std::string port;
/*! Human readable description of serial device if available. */
std::string description;
/*! Hardware ID (e.g. VID:PID of USB serial devices) or "n/a" if not available. */
std::string hardware_id;
};
/* Lists the serial ports available on the system
*
* Returns a vector of available serial ports, each represented
* by a serial::PortInfo data structure:
*
* \return vector of serial::PortInfo.
*/
std::vector<PortInfo>
list_ports();
} // namespace serial
#endif
| wjwwood/cxx_serial | 7 | Cross-platform, Serial Port library written in C++ | C++ | wjwwood | William Woodall | |
include/serial/v8stdint.h | C/C++ Header | // This header is from the v8 google project:
// http://code.google.com/p/v8/source/browse/trunk/include/v8stdint.h
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Load definitions of standard types.
#ifndef V8STDINT_H_
#define V8STDINT_H_
#include <stddef.h>
#include <stdio.h>
#if defined(_WIN32) && !defined(__MINGW32__)
typedef signed char int8_t;
typedef unsigned char uint8_t;
typedef short int16_t; // NOLINT
typedef unsigned short uint16_t; // NOLINT
typedef int int32_t;
typedef unsigned int uint32_t;
typedef __int64 int64_t;
typedef unsigned __int64 uint64_t;
// intptr_t and friends are defined in crtdefs.h through stdio.h.
#else
#include <stdint.h>
#endif
#endif // V8STDINT_H_
| wjwwood/cxx_serial | 7 | Cross-platform, Serial Port library written in C++ | C++ | wjwwood | William Woodall | |
src/impl/list_ports/list_ports_linux.cc | C++ | #if defined(__linux__)
/*
* Copyright (c) 2014 Craig Lilley <cralilley@gmail.com>
* This software is made available under the terms of the MIT licence.
* A copy of the licence can be obtained from:
* http://opensource.org/licenses/MIT
*/
#include <vector>
#include <string>
#include <sstream>
#include <stdexcept>
#include <iostream>
#include <fstream>
#include <cstdio>
#include <cstdarg>
#include <cstdlib>
#include <glob.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include "serial/serial.h"
using serial::PortInfo;
using std::istringstream;
using std::ifstream;
using std::getline;
using std::vector;
using std::string;
using std::cout;
using std::endl;
static vector<string> glob(const vector<string>& patterns);
static string basename(const string& path);
static string dirname(const string& path);
static bool path_exists(const string& path);
static string realpath(const string& path);
static string usb_sysfs_friendly_name(const string& sys_usb_path);
static vector<string> get_sysfs_info(const string& device_path);
static string read_line(const string& file);
static string usb_sysfs_hw_string(const string& sysfs_path);
static string format(const char* format, ...);
vector<string>
glob(const vector<string>& patterns)
{
vector<string> paths_found;
if(patterns.size() == 0)
return paths_found;
glob_t glob_results;
int glob_retval = glob(patterns[0].c_str(), 0, NULL, &glob_results);
vector<string>::const_iterator iter = patterns.begin();
while(++iter != patterns.end())
{
glob_retval = glob(iter->c_str(), GLOB_APPEND, NULL, &glob_results);
}
for(int path_index = 0; path_index < glob_results.gl_pathc; path_index++)
{
paths_found.push_back(glob_results.gl_pathv[path_index]);
}
globfree(&glob_results);
return paths_found;
}
string
basename(const string& path)
{
size_t pos = path.rfind("/");
if(pos == std::string::npos)
return path;
return string(path, pos+1, string::npos);
}
string
dirname(const string& path)
{
size_t pos = path.rfind("/");
if(pos == std::string::npos)
return path;
else if(pos == 0)
return "/";
return string(path, 0, pos);
}
bool
path_exists(const string& path)
{
struct stat sb;
if( stat(path.c_str(), &sb ) == 0 )
return true;
return false;
}
string
realpath(const string& path)
{
char* real_path = realpath(path.c_str(), NULL);
string result;
if(real_path != NULL)
{
result = real_path;
free(real_path);
}
return result;
}
string
usb_sysfs_friendly_name(const string& sys_usb_path)
{
unsigned int device_number = 0;
istringstream( read_line(sys_usb_path + "/devnum") ) >> device_number;
string manufacturer = read_line( sys_usb_path + "/manufacturer" );
string product = read_line( sys_usb_path + "/product" );
string serial = read_line( sys_usb_path + "/serial" );
if( manufacturer.empty() && product.empty() && serial.empty() )
return "";
return format("%s %s %s", manufacturer.c_str(), product.c_str(), serial.c_str() );
}
vector<string>
get_sysfs_info(const string& device_path)
{
string device_name = basename( device_path );
string friendly_name;
string hardware_id;
string sys_device_path = format( "/sys/class/tty/%s/device", device_name.c_str() );
if( device_name.compare(0,6,"ttyUSB") == 0 )
{
sys_device_path = dirname( dirname( realpath( sys_device_path ) ) );
if( path_exists( sys_device_path ) )
{
friendly_name = usb_sysfs_friendly_name( sys_device_path );
hardware_id = usb_sysfs_hw_string( sys_device_path );
}
}
else if( device_name.compare(0,6,"ttyACM") == 0 )
{
sys_device_path = dirname( realpath( sys_device_path ) );
if( path_exists( sys_device_path ) )
{
friendly_name = usb_sysfs_friendly_name( sys_device_path );
hardware_id = usb_sysfs_hw_string( sys_device_path );
}
}
else
{
// Try to read ID string of PCI device
string sys_id_path = sys_device_path + "/id";
if( path_exists( sys_id_path ) )
hardware_id = read_line( sys_id_path );
}
if( friendly_name.empty() )
friendly_name = device_name;
if( hardware_id.empty() )
hardware_id = "n/a";
vector<string> result;
result.push_back(friendly_name);
result.push_back(hardware_id);
return result;
}
string
read_line(const string& file)
{
ifstream ifs(file.c_str(), ifstream::in);
string line;
if(ifs)
{
getline(ifs, line);
}
return line;
}
string
format(const char* format, ...)
{
va_list ap;
size_t buffer_size_bytes = 256;
string result;
char* buffer = (char*)malloc(buffer_size_bytes);
if( buffer == NULL )
return result;
bool done = false;
unsigned int loop_count = 0;
while(!done)
{
va_start(ap, format);
int return_value = vsnprintf(buffer, buffer_size_bytes, format, ap);
if( return_value < 0 )
{
done = true;
}
else if( return_value >= buffer_size_bytes )
{
// Realloc and try again.
buffer_size_bytes = return_value + 1;
char* new_buffer_ptr = (char*)realloc(buffer, buffer_size_bytes);
if( new_buffer_ptr == NULL )
{
done = true;
}
else
{
buffer = new_buffer_ptr;
}
}
else
{
result = buffer;
done = true;
}
va_end(ap);
if( ++loop_count > 5 )
done = true;
}
free(buffer);
return result;
}
string
usb_sysfs_hw_string(const string& sysfs_path)
{
string serial_number = read_line( sysfs_path + "/serial" );
if( serial_number.length() > 0 )
{
serial_number = format( "SNR=%s", serial_number.c_str() );
}
string vid = read_line( sysfs_path + "/idVendor" );
string pid = read_line( sysfs_path + "/idProduct" );
return format("USB VID:PID=%s:%s %s", vid.c_str(), pid.c_str(), serial_number.c_str() );
}
vector<PortInfo>
serial::list_ports()
{
vector<PortInfo> results;
vector<string> search_globs;
search_globs.push_back("/dev/ttyACM*");
search_globs.push_back("/dev/ttyS*");
search_globs.push_back("/dev/ttyUSB*");
search_globs.push_back("/dev/tty.*");
search_globs.push_back("/dev/cu.*");
search_globs.push_back("/dev/rfcomm*");
vector<string> devices_found = glob( search_globs );
vector<string>::iterator iter = devices_found.begin();
while( iter != devices_found.end() )
{
string device = *iter++;
vector<string> sysfs_info = get_sysfs_info( device );
string friendly_name = sysfs_info[0];
string hardware_id = sysfs_info[1];
PortInfo device_entry;
device_entry.port = device;
device_entry.description = friendly_name;
device_entry.hardware_id = hardware_id;
results.push_back( device_entry );
}
return results;
}
#endif // defined(__linux__)
| wjwwood/cxx_serial | 7 | Cross-platform, Serial Port library written in C++ | C++ | wjwwood | William Woodall | |
src/impl/list_ports/list_ports_osx.cc | C++ | #if defined(__APPLE__)
#include <sys/param.h>
#include <stdint.h>
#include <CoreFoundation/CoreFoundation.h>
#include <IOKit/IOKitLib.h>
#include <IOKit/serial/IOSerialKeys.h>
#include <IOKit/IOBSD.h>
#include <iostream>
#include <string>
#include <vector>
#include "serial/serial.h"
using serial::PortInfo;
using std::string;
using std::vector;
#define HARDWARE_ID_STRING_LENGTH 128
string cfstring_to_string( CFStringRef cfstring );
string get_device_path( io_object_t& serial_port );
string get_class_name( io_object_t& obj );
io_registry_entry_t get_parent_iousb_device( io_object_t& serial_port );
string get_string_property( io_object_t& device, const char* property );
uint16_t get_int_property( io_object_t& device, const char* property );
string rtrim(const string& str);
string
cfstring_to_string( CFStringRef cfstring )
{
char cstring[MAXPATHLEN];
string result;
if( cfstring )
{
Boolean success = CFStringGetCString( cfstring,
cstring,
sizeof(cstring),
kCFStringEncodingASCII );
if( success )
result = cstring;
}
return result;
}
string
get_device_path( io_object_t& serial_port )
{
CFTypeRef callout_path;
string device_path;
callout_path = IORegistryEntryCreateCFProperty( serial_port,
CFSTR(kIOCalloutDeviceKey),
kCFAllocatorDefault,
0 );
if (callout_path)
{
if( CFGetTypeID(callout_path) == CFStringGetTypeID() )
device_path = cfstring_to_string( static_cast<CFStringRef>(callout_path) );
CFRelease(callout_path);
}
return device_path;
}
string
get_class_name( io_object_t& obj )
{
string result;
io_name_t class_name;
kern_return_t kern_result;
kern_result = IOObjectGetClass( obj, class_name );
if( kern_result == KERN_SUCCESS )
result = class_name;
return result;
}
io_registry_entry_t
get_parent_iousb_device( io_object_t& serial_port )
{
io_object_t device = serial_port;
io_registry_entry_t parent = 0;
io_registry_entry_t result = 0;
kern_return_t kern_result = KERN_FAILURE;
string name = get_class_name(device);
// Walk the IO Registry tree looking for this devices parent IOUSBDevice.
while( name != "IOUSBDevice" )
{
kern_result = IORegistryEntryGetParentEntry( device,
kIOServicePlane,
&parent );
if(kern_result != KERN_SUCCESS)
{
result = 0;
break;
}
device = parent;
name = get_class_name(device);
}
if(kern_result == KERN_SUCCESS)
result = device;
return result;
}
string
get_string_property( io_object_t& device, const char* property )
{
string property_name;
if( device )
{
CFStringRef property_as_cfstring = CFStringCreateWithCString (
kCFAllocatorDefault,
property,
kCFStringEncodingASCII );
CFTypeRef name_as_cfstring = IORegistryEntryCreateCFProperty(
device,
property_as_cfstring,
kCFAllocatorDefault,
0 );
if( name_as_cfstring )
{
if( CFGetTypeID(name_as_cfstring) == CFStringGetTypeID() )
property_name = cfstring_to_string( static_cast<CFStringRef>(name_as_cfstring) );
CFRelease(name_as_cfstring);
}
if(property_as_cfstring)
CFRelease(property_as_cfstring);
}
return property_name;
}
uint16_t
get_int_property( io_object_t& device, const char* property )
{
uint16_t result = 0;
if( device )
{
CFStringRef property_as_cfstring = CFStringCreateWithCString (
kCFAllocatorDefault,
property,
kCFStringEncodingASCII );
CFTypeRef number = IORegistryEntryCreateCFProperty( device,
property_as_cfstring,
kCFAllocatorDefault,
0 );
if(property_as_cfstring)
CFRelease(property_as_cfstring);
if( number )
{
if( CFGetTypeID(number) == CFNumberGetTypeID() )
{
bool success = CFNumberGetValue( static_cast<CFNumberRef>(number),
kCFNumberSInt16Type,
&result );
if( !success )
result = 0;
}
CFRelease(number);
}
}
return result;
}
string rtrim(const string& str)
{
string result = str;
string whitespace = " \t\f\v\n\r";
std::size_t found = result.find_last_not_of(whitespace);
if (found != std::string::npos)
result.erase(found+1);
else
result.clear();
return result;
}
vector<PortInfo>
serial::list_ports(void)
{
vector<PortInfo> devices_found;
CFMutableDictionaryRef classes_to_match;
io_iterator_t serial_port_iterator;
io_object_t serial_port;
mach_port_t master_port;
kern_return_t kern_result;
kern_result = IOMasterPort(MACH_PORT_NULL, &master_port);
if(kern_result != KERN_SUCCESS)
return devices_found;
classes_to_match = IOServiceMatching(kIOSerialBSDServiceValue);
if (classes_to_match == NULL)
return devices_found;
CFDictionarySetValue( classes_to_match,
CFSTR(kIOSerialBSDTypeKey),
CFSTR(kIOSerialBSDAllTypes) );
kern_result = IOServiceGetMatchingServices(master_port, classes_to_match, &serial_port_iterator);
if (KERN_SUCCESS != kern_result)
return devices_found;
while ( (serial_port = IOIteratorNext(serial_port_iterator)) )
{
string device_path = get_device_path( serial_port );
io_registry_entry_t parent = get_parent_iousb_device( serial_port );
IOObjectRelease(serial_port);
if( device_path.empty() )
continue;
PortInfo port_info;
port_info.port = device_path;
port_info.description = "n/a";
port_info.hardware_id = "n/a";
string device_name = rtrim( get_string_property( parent, "USB Product Name" ) );
string vendor_name = rtrim( get_string_property( parent, "USB Vendor Name") );
string description = rtrim( vendor_name + " " + device_name );
if( !description.empty() )
port_info.description = description;
string serial_number = rtrim(get_string_property( parent, "USB Serial Number" ) );
uint16_t vendor_id = get_int_property( parent, "idVendor" );
uint16_t product_id = get_int_property( parent, "idProduct" );
if( vendor_id && product_id )
{
char cstring[HARDWARE_ID_STRING_LENGTH];
if(serial_number.empty())
serial_number = "None";
int ret = snprintf( cstring, HARDWARE_ID_STRING_LENGTH, "USB VID:PID=%04x:%04x SNR=%s",
vendor_id,
product_id,
serial_number.c_str() );
if( (ret >= 0) && (ret < HARDWARE_ID_STRING_LENGTH) )
port_info.hardware_id = cstring;
}
devices_found.push_back(port_info);
}
IOObjectRelease(serial_port_iterator);
return devices_found;
}
#endif // defined(__APPLE__)
| wjwwood/cxx_serial | 7 | Cross-platform, Serial Port library written in C++ | C++ | wjwwood | William Woodall | |
src/impl/list_ports/list_ports_win.cc | C++ | #if defined(_WIN32)
/*
* Copyright (c) 2014 Craig Lilley <cralilley@gmail.com>
* This software is made available under the terms of the MIT licence.
* A copy of the licence can be obtained from:
* http://opensource.org/licenses/MIT
*/
#include "serial/serial.h"
#include <tchar.h>
#include <windows.h>
#include <setupapi.h>
#include <initguid.h>
#include <devguid.h>
#include <cstring>
using serial::PortInfo;
using std::vector;
using std::string;
static const DWORD port_name_max_length = 256;
static const DWORD friendly_name_max_length = 256;
static const DWORD hardware_id_max_length = 256;
// Convert a wide Unicode string to an UTF8 string
std::string utf8_encode(const std::wstring &wstr)
{
int size_needed = WideCharToMultiByte(CP_UTF8, 0, &wstr[0], (int)wstr.size(), NULL, 0, NULL, NULL);
std::string strTo( size_needed, 0 );
WideCharToMultiByte (CP_UTF8, 0, &wstr[0], (int)wstr.size(), &strTo[0], size_needed, NULL, NULL);
return strTo;
}
vector<PortInfo>
serial::list_ports()
{
vector<PortInfo> devices_found;
HDEVINFO device_info_set = SetupDiGetClassDevs(
(const GUID *) &GUID_DEVCLASS_PORTS,
NULL,
NULL,
DIGCF_PRESENT);
unsigned int device_info_set_index = 0;
SP_DEVINFO_DATA device_info_data;
device_info_data.cbSize = sizeof(SP_DEVINFO_DATA);
while(SetupDiEnumDeviceInfo(device_info_set, device_info_set_index, &device_info_data))
{
device_info_set_index++;
// Get port name
HKEY hkey = SetupDiOpenDevRegKey(
device_info_set,
&device_info_data,
DICS_FLAG_GLOBAL,
0,
DIREG_DEV,
KEY_READ);
TCHAR port_name[port_name_max_length];
DWORD port_name_length = port_name_max_length;
LONG return_code = RegQueryValueEx(
hkey,
_T("PortName"),
NULL,
NULL,
(LPBYTE)port_name,
&port_name_length);
RegCloseKey(hkey);
if(return_code != EXIT_SUCCESS)
continue;
if(port_name_length > 0 && port_name_length <= port_name_max_length)
port_name[port_name_length-1] = '\0';
else
port_name[0] = '\0';
// Ignore parallel ports
if(_tcsstr(port_name, _T("LPT")) != NULL)
continue;
// Get port friendly name
TCHAR friendly_name[friendly_name_max_length];
DWORD friendly_name_actual_length = 0;
BOOL got_friendly_name = SetupDiGetDeviceRegistryProperty(
device_info_set,
&device_info_data,
SPDRP_FRIENDLYNAME,
NULL,
(PBYTE)friendly_name,
friendly_name_max_length,
&friendly_name_actual_length);
if(got_friendly_name == TRUE && friendly_name_actual_length > 0)
friendly_name[friendly_name_actual_length-1] = '\0';
else
friendly_name[0] = '\0';
// Get hardware ID
TCHAR hardware_id[hardware_id_max_length];
DWORD hardware_id_actual_length = 0;
BOOL got_hardware_id = SetupDiGetDeviceRegistryProperty(
device_info_set,
&device_info_data,
SPDRP_HARDWAREID,
NULL,
(PBYTE)hardware_id,
hardware_id_max_length,
&hardware_id_actual_length);
if(got_hardware_id == TRUE && hardware_id_actual_length > 0)
hardware_id[hardware_id_actual_length-1] = '\0';
else
hardware_id[0] = '\0';
#ifdef UNICODE
std::string portName = utf8_encode(port_name);
std::string friendlyName = utf8_encode(friendly_name);
std::string hardwareId = utf8_encode(hardware_id);
#else
std::string portName = port_name;
std::string friendlyName = friendly_name;
std::string hardwareId = hardware_id;
#endif
PortInfo port_entry;
port_entry.port = portName;
port_entry.description = friendlyName;
port_entry.hardware_id = hardwareId;
devices_found.push_back(port_entry);
}
SetupDiDestroyDeviceInfoList(device_info_set);
return devices_found;
}
#endif // #if defined(_WIN32)
| wjwwood/cxx_serial | 7 | Cross-platform, Serial Port library written in C++ | C++ | wjwwood | William Woodall | |
src/impl/unix.cc | C++ | /* Copyright 2012 William Woodall and John Harrison
*
* Additional Contributors: Christopher Baker @bakercp
*/
#if !defined(_WIN32)
#include <stdio.h>
#include <string.h>
#include <sstream>
#include <unistd.h>
#include <fcntl.h>
#include <sys/ioctl.h>
#include <sys/signal.h>
#include <errno.h>
#include <paths.h>
#include <sysexits.h>
#include <termios.h>
#include <sys/param.h>
#include <pthread.h>
#if defined(__linux__)
# include <linux/serial.h>
#endif
#include <sys/select.h>
#include <sys/time.h>
#include <time.h>
#ifdef __MACH__
#include <AvailabilityMacros.h>
#include <mach/clock.h>
#include <mach/mach.h>
#endif
#include "serial/impl/unix.h"
#ifndef TIOCINQ
#ifdef FIONREAD
#define TIOCINQ FIONREAD
#else
#define TIOCINQ 0x541B
#endif
#endif
#if defined(MAC_OS_X_VERSION_10_3) && (MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_3)
#include <IOKit/serial/ioss.h>
#endif
using std::string;
using std::stringstream;
using std::invalid_argument;
using serial::MillisecondTimer;
using serial::Serial;
using serial::SerialException;
using serial::PortNotOpenedException;
using serial::IOException;
MillisecondTimer::MillisecondTimer (const uint32_t millis)
: expiry(timespec_now())
{
int64_t tv_nsec = expiry.tv_nsec + (millis * 1e6);
if (tv_nsec >= 1e9) {
int64_t sec_diff = tv_nsec / static_cast<int> (1e9);
expiry.tv_nsec = tv_nsec % static_cast<int>(1e9);
expiry.tv_sec += sec_diff;
} else {
expiry.tv_nsec = tv_nsec;
}
}
int64_t
MillisecondTimer::remaining ()
{
timespec now(timespec_now());
int64_t millis = (expiry.tv_sec - now.tv_sec) * 1e3;
millis += (expiry.tv_nsec - now.tv_nsec) / 1e6;
return millis;
}
timespec
MillisecondTimer::timespec_now ()
{
timespec time;
# ifdef __MACH__ // OS X does not have clock_gettime, use clock_get_time
clock_serv_t cclock;
mach_timespec_t mts;
host_get_clock_service(mach_host_self(), SYSTEM_CLOCK, &cclock);
clock_get_time(cclock, &mts);
mach_port_deallocate(mach_task_self(), cclock);
time.tv_sec = mts.tv_sec;
time.tv_nsec = mts.tv_nsec;
# else
clock_gettime(CLOCK_MONOTONIC, &time);
# endif
return time;
}
timespec
timespec_from_ms (const uint32_t millis)
{
timespec time;
time.tv_sec = millis / 1e3;
time.tv_nsec = (millis - (time.tv_sec * 1e3)) * 1e6;
return time;
}
Serial::SerialImpl::SerialImpl (const string &port, unsigned long baudrate,
bytesize_t bytesize,
parity_t parity, stopbits_t stopbits,
flowcontrol_t flowcontrol)
: port_ (port), fd_ (-1), is_open_ (false), xonxoff_ (false), rtscts_ (false),
baudrate_ (baudrate), parity_ (parity),
bytesize_ (bytesize), stopbits_ (stopbits), flowcontrol_ (flowcontrol)
{
pthread_mutex_init(&this->read_mutex, NULL);
pthread_mutex_init(&this->write_mutex, NULL);
if (port_.empty () == false)
open ();
}
Serial::SerialImpl::~SerialImpl ()
{
close();
pthread_mutex_destroy(&this->read_mutex);
pthread_mutex_destroy(&this->write_mutex);
}
void
Serial::SerialImpl::open ()
{
if (port_.empty ()) {
throw invalid_argument ("Empty port is invalid.");
}
if (is_open_ == true) {
throw SerialException ("Serial port already open.");
}
fd_ = ::open (port_.c_str(), O_RDWR | O_NOCTTY | O_NONBLOCK);
if (fd_ == -1) {
switch (errno) {
case EINTR:
// Recurse because this is a recoverable error.
open ();
return;
case ENFILE:
case EMFILE:
THROW (IOException, "Too many file handles open.");
default:
THROW (IOException, errno);
}
}
reconfigurePort();
is_open_ = true;
}
void
Serial::SerialImpl::reconfigurePort ()
{
if (fd_ == -1) {
// Can only operate on a valid file descriptor
THROW (IOException, "Invalid file descriptor, is the serial port open?");
}
struct termios options; // The options for the file descriptor
if (tcgetattr(fd_, &options) == -1) {
THROW (IOException, "::tcgetattr");
}
// set up raw mode / no echo / binary
options.c_cflag |= (tcflag_t) (CLOCAL | CREAD);
options.c_lflag &= (tcflag_t) ~(ICANON | ECHO | ECHOE | ECHOK | ECHONL |
ISIG | IEXTEN); //|ECHOPRT
options.c_oflag &= (tcflag_t) ~(OPOST);
options.c_iflag &= (tcflag_t) ~(INLCR | IGNCR | ICRNL | IGNBRK);
#ifdef IUCLC
options.c_iflag &= (tcflag_t) ~IUCLC;
#endif
#ifdef PARMRK
options.c_iflag &= (tcflag_t) ~PARMRK;
#endif
// setup baud rate
bool custom_baud = false;
speed_t baud;
switch (baudrate_) {
#ifdef B0
case 0: baud = B0; break;
#endif
#ifdef B50
case 50: baud = B50; break;
#endif
#ifdef B75
case 75: baud = B75; break;
#endif
#ifdef B110
case 110: baud = B110; break;
#endif
#ifdef B134
case 134: baud = B134; break;
#endif
#ifdef B150
case 150: baud = B150; break;
#endif
#ifdef B200
case 200: baud = B200; break;
#endif
#ifdef B300
case 300: baud = B300; break;
#endif
#ifdef B600
case 600: baud = B600; break;
#endif
#ifdef B1200
case 1200: baud = B1200; break;
#endif
#ifdef B1800
case 1800: baud = B1800; break;
#endif
#ifdef B2400
case 2400: baud = B2400; break;
#endif
#ifdef B4800
case 4800: baud = B4800; break;
#endif
#ifdef B7200
case 7200: baud = B7200; break;
#endif
#ifdef B9600
case 9600: baud = B9600; break;
#endif
#ifdef B14400
case 14400: baud = B14400; break;
#endif
#ifdef B19200
case 19200: baud = B19200; break;
#endif
#ifdef B28800
case 28800: baud = B28800; break;
#endif
#ifdef B57600
case 57600: baud = B57600; break;
#endif
#ifdef B76800
case 76800: baud = B76800; break;
#endif
#ifdef B38400
case 38400: baud = B38400; break;
#endif
#ifdef B115200
case 115200: baud = B115200; break;
#endif
#ifdef B128000
case 128000: baud = B128000; break;
#endif
#ifdef B153600
case 153600: baud = B153600; break;
#endif
#ifdef B230400
case 230400: baud = B230400; break;
#endif
#ifdef B256000
case 256000: baud = B256000; break;
#endif
#ifdef B460800
case 460800: baud = B460800; break;
#endif
#ifdef B500000
case 500000: baud = B500000; break;
#endif
#ifdef B576000
case 576000: baud = B576000; break;
#endif
#ifdef B921600
case 921600: baud = B921600; break;
#endif
#ifdef B1000000
case 1000000: baud = B1000000; break;
#endif
#ifdef B1152000
case 1152000: baud = B1152000; break;
#endif
#ifdef B1500000
case 1500000: baud = B1500000; break;
#endif
#ifdef B2000000
case 2000000: baud = B2000000; break;
#endif
#ifdef B2500000
case 2500000: baud = B2500000; break;
#endif
#ifdef B3000000
case 3000000: baud = B3000000; break;
#endif
#ifdef B3500000
case 3500000: baud = B3500000; break;
#endif
#ifdef B4000000
case 4000000: baud = B4000000; break;
#endif
default:
custom_baud = true;
}
if (custom_baud == false) {
#ifdef _BSD_SOURCE
::cfsetspeed(&options, baud);
#else
::cfsetispeed(&options, baud);
::cfsetospeed(&options, baud);
#endif
}
// setup char len
options.c_cflag &= (tcflag_t) ~CSIZE;
if (bytesize_ == eightbits)
options.c_cflag |= CS8;
else if (bytesize_ == sevenbits)
options.c_cflag |= CS7;
else if (bytesize_ == sixbits)
options.c_cflag |= CS6;
else if (bytesize_ == fivebits)
options.c_cflag |= CS5;
else
throw invalid_argument ("invalid char len");
// setup stopbits
if (stopbits_ == stopbits_one)
options.c_cflag &= (tcflag_t) ~(CSTOPB);
else if (stopbits_ == stopbits_one_point_five)
// ONE POINT FIVE same as TWO.. there is no POSIX support for 1.5
options.c_cflag |= (CSTOPB);
else if (stopbits_ == stopbits_two)
options.c_cflag |= (CSTOPB);
else
throw invalid_argument ("invalid stop bit");
// setup parity
options.c_iflag &= (tcflag_t) ~(INPCK | ISTRIP);
if (parity_ == parity_none) {
options.c_cflag &= (tcflag_t) ~(PARENB | PARODD);
} else if (parity_ == parity_even) {
options.c_cflag &= (tcflag_t) ~(PARODD);
options.c_cflag |= (PARENB);
} else if (parity_ == parity_odd) {
options.c_cflag |= (PARENB | PARODD);
}
#ifdef CMSPAR
else if (parity_ == parity_mark) {
options.c_cflag |= (PARENB | CMSPAR | PARODD);
}
else if (parity_ == parity_space) {
options.c_cflag |= (PARENB | CMSPAR);
options.c_cflag &= (tcflag_t) ~(PARODD);
}
#else
// CMSPAR is not defined on OSX. So do not support mark or space parity.
else if (parity_ == parity_mark || parity_ == parity_space) {
throw invalid_argument ("OS does not support mark or space parity");
}
#endif // ifdef CMSPAR
else {
throw invalid_argument ("invalid parity");
}
// setup flow control
if (flowcontrol_ == flowcontrol_none) {
xonxoff_ = false;
rtscts_ = false;
}
if (flowcontrol_ == flowcontrol_software) {
xonxoff_ = true;
rtscts_ = false;
}
if (flowcontrol_ == flowcontrol_hardware) {
xonxoff_ = false;
rtscts_ = true;
}
// xonxoff
#ifdef IXANY
if (xonxoff_)
options.c_iflag |= (IXON | IXOFF); //|IXANY)
else
options.c_iflag &= (tcflag_t) ~(IXON | IXOFF | IXANY);
#else
if (xonxoff_)
options.c_iflag |= (IXON | IXOFF);
else
options.c_iflag &= (tcflag_t) ~(IXON | IXOFF);
#endif
// rtscts
#ifdef CRTSCTS
if (rtscts_)
options.c_cflag |= (CRTSCTS);
else
options.c_cflag &= (unsigned long) ~(CRTSCTS);
#elif defined CNEW_RTSCTS
if (rtscts_)
options.c_cflag |= (CNEW_RTSCTS);
else
options.c_cflag &= (unsigned long) ~(CNEW_RTSCTS);
#else
#error "OS Support seems wrong."
#endif
// http://www.unixwiz.net/techtips/termios-vmin-vtime.html
// this basically sets the read call up to be a polling read,
// but we are using select to ensure there is data available
// to read before each call, so we should never needlessly poll
options.c_cc[VMIN] = 0;
options.c_cc[VTIME] = 0;
// activate settings
::tcsetattr (fd_, TCSANOW, &options);
// apply custom baud rate, if any
if (custom_baud == true) {
// OS X support
#if defined(MAC_OS_X_VERSION_10_4) && (MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_4)
// Starting with Tiger, the IOSSIOSPEED ioctl can be used to set arbitrary baud rates
// other than those specified by POSIX. The driver for the underlying serial hardware
// ultimately determines which baud rates can be used. This ioctl sets both the input
// and output speed.
speed_t new_baud = static_cast<speed_t> (baudrate_);
// PySerial uses IOSSIOSPEED=0x80045402
if (-1 == ioctl (fd_, IOSSIOSPEED, &new_baud, 1)) {
THROW (IOException, errno);
}
// Linux Support
#elif defined(__linux__) && defined (TIOCSSERIAL)
struct serial_struct ser;
if (-1 == ioctl (fd_, TIOCGSERIAL, &ser)) {
THROW (IOException, errno);
}
// set custom divisor
ser.custom_divisor = ser.baud_base / static_cast<int> (baudrate_);
// update flags
ser.flags &= ~ASYNC_SPD_MASK;
ser.flags |= ASYNC_SPD_CUST;
if (-1 == ioctl (fd_, TIOCSSERIAL, &ser)) {
THROW (IOException, errno);
}
#else
throw invalid_argument ("OS does not currently support custom bauds");
#endif
}
// Update byte_time_ based on the new settings.
uint32_t bit_time_ns = 1e9 / baudrate_;
byte_time_ns_ = bit_time_ns * (1 + bytesize_ + parity_ + stopbits_);
// Compensate for the stopbits_one_point_five enum being equal to int 3,
// and not 1.5.
if (stopbits_ == stopbits_one_point_five) {
byte_time_ns_ += ((1.5 - stopbits_one_point_five) * bit_time_ns);
}
}
void
Serial::SerialImpl::close ()
{
if (is_open_ == true) {
if (fd_ != -1) {
int ret;
ret = ::close (fd_);
if (ret == 0) {
fd_ = -1;
} else {
THROW (IOException, errno);
}
}
is_open_ = false;
}
}
bool
Serial::SerialImpl::isOpen () const
{
return is_open_;
}
size_t
Serial::SerialImpl::available ()
{
if (!is_open_) {
return 0;
}
int count = 0;
if (-1 == ioctl (fd_, TIOCINQ, &count)) {
THROW (IOException, errno);
} else {
return static_cast<size_t> (count);
}
}
bool
Serial::SerialImpl::waitReadable (uint32_t timeout)
{
// Setup a select call to block for serial data or a timeout
fd_set readfds;
FD_ZERO (&readfds);
FD_SET (fd_, &readfds);
timespec timeout_ts (timespec_from_ms (timeout));
int r = pselect (fd_ + 1, &readfds, NULL, NULL, &timeout_ts, NULL);
if (r < 0) {
// Select was interrupted
if (errno == EINTR) {
return false;
}
// Otherwise there was some error
THROW (IOException, errno);
}
// Timeout occurred
if (r == 0) {
return false;
}
// This shouldn't happen, if r > 0 our fd has to be in the list!
if (!FD_ISSET (fd_, &readfds)) {
THROW (IOException, "select reports ready to read, but our fd isn't"
" in the list, this shouldn't happen!");
}
// Data available to read.
return true;
}
void
Serial::SerialImpl::waitByteTimes (size_t count)
{
timespec wait_time = { 0, static_cast<long>(byte_time_ns_ * count)};
pselect (0, NULL, NULL, NULL, &wait_time, NULL);
}
size_t
Serial::SerialImpl::read (uint8_t *buf, size_t size)
{
// If the port is not open, throw
if (!is_open_) {
throw PortNotOpenedException ("Serial::read");
}
size_t bytes_read = 0;
// Calculate total timeout in milliseconds t_c + (t_m * N)
long total_timeout_ms = timeout_.read_timeout_constant;
total_timeout_ms += timeout_.read_timeout_multiplier * static_cast<long> (size);
MillisecondTimer total_timeout(total_timeout_ms);
// Pre-fill buffer with available bytes
{
ssize_t bytes_read_now = ::read (fd_, buf, size);
if (bytes_read_now > 0) {
bytes_read = bytes_read_now;
}
}
while (bytes_read < size) {
int64_t timeout_remaining_ms = total_timeout.remaining();
if (timeout_remaining_ms <= 0) {
// Timed out
break;
}
// Timeout for the next select is whichever is less of the remaining
// total read timeout and the inter-byte timeout.
uint32_t timeout = std::min(static_cast<uint32_t> (timeout_remaining_ms),
timeout_.inter_byte_timeout);
// Wait for the device to be readable, and then attempt to read.
if (waitReadable(timeout)) {
// If it's a fixed-length multi-byte read, insert a wait here so that
// we can attempt to grab the whole thing in a single IO call. Skip
// this wait if a non-max inter_byte_timeout is specified.
if (size > 1 && timeout_.inter_byte_timeout == Timeout::max()) {
size_t bytes_available = available();
if (bytes_available + bytes_read < size) {
waitByteTimes(size - (bytes_available + bytes_read));
}
}
// This should be non-blocking returning only what is available now
// Then returning so that select can block again.
ssize_t bytes_read_now =
::read (fd_, buf + bytes_read, size - bytes_read);
// read should always return some data as select reported it was
// ready to read when we get to this point.
if (bytes_read_now < 1) {
// Disconnected devices, at least on Linux, show the
// behavior that they are always ready to read immediately
// but reading returns nothing.
throw SerialException ("device reports readiness to read but "
"returned no data (device disconnected?)");
}
// Update bytes_read
bytes_read += static_cast<size_t> (bytes_read_now);
// If bytes_read == size then we have read everything we need
if (bytes_read == size) {
break;
}
// If bytes_read < size then we have more to read
if (bytes_read < size) {
continue;
}
// If bytes_read > size then we have over read, which shouldn't happen
if (bytes_read > size) {
throw SerialException ("read over read, too many bytes where "
"read, this shouldn't happen, might be "
"a logical error!");
}
}
}
return bytes_read;
}
size_t
Serial::SerialImpl::write (const uint8_t *data, size_t length)
{
if (is_open_ == false) {
throw PortNotOpenedException ("Serial::write");
}
fd_set writefds;
size_t bytes_written = 0;
// Calculate total timeout in milliseconds t_c + (t_m * N)
long total_timeout_ms = timeout_.write_timeout_constant;
total_timeout_ms += timeout_.write_timeout_multiplier * static_cast<long> (length);
MillisecondTimer total_timeout(total_timeout_ms);
bool first_iteration = true;
while (bytes_written < length) {
int64_t timeout_remaining_ms = total_timeout.remaining();
// Only consider the timeout if it's not the first iteration of the loop
// otherwise a timeout of 0 won't be allowed through
if (!first_iteration && (timeout_remaining_ms <= 0)) {
// Timed out
break;
}
first_iteration = false;
timespec timeout(timespec_from_ms(timeout_remaining_ms));
FD_ZERO (&writefds);
FD_SET (fd_, &writefds);
// Do the select
int r = pselect (fd_ + 1, NULL, &writefds, NULL, &timeout, NULL);
// Figure out what happened by looking at select's response 'r'
/** Error **/
if (r < 0) {
// Select was interrupted, try again
if (errno == EINTR) {
continue;
}
// Otherwise there was some error
THROW (IOException, errno);
}
/** Timeout **/
if (r == 0) {
break;
}
/** Port ready to write **/
if (r > 0) {
// Make sure our file descriptor is in the ready to write list
if (FD_ISSET (fd_, &writefds)) {
// This will write some
ssize_t bytes_written_now =
::write (fd_, data + bytes_written, length - bytes_written);
// even though pselect returned readiness the call might still be
// interrupted. In that case simply retry.
if (bytes_written_now == -1 && errno == EINTR) {
continue;
}
// write should always return some data as select reported it was
// ready to write when we get to this point.
if (bytes_written_now < 1) {
// Disconnected devices, at least on Linux, show the
// behavior that they are always ready to write immediately
// but writing returns nothing.
std::stringstream strs;
strs << "device reports readiness to write but "
"returned no data (device disconnected?)";
strs << " errno=" << errno;
strs << " bytes_written_now= " << bytes_written_now;
strs << " bytes_written=" << bytes_written;
strs << " length=" << length;
throw SerialException(strs.str().c_str());
}
// Update bytes_written
bytes_written += static_cast<size_t> (bytes_written_now);
// If bytes_written == size then we have written everything we need to
if (bytes_written == length) {
break;
}
// If bytes_written < size then we have more to write
if (bytes_written < length) {
continue;
}
// If bytes_written > size then we have over written, which shouldn't happen
if (bytes_written > length) {
throw SerialException ("write over wrote, too many bytes where "
"written, this shouldn't happen, might be "
"a logical error!");
}
}
// This shouldn't happen, if r > 0 our fd has to be in the list!
THROW (IOException, "select reports ready to write, but our fd isn't"
" in the list, this shouldn't happen!");
}
}
return bytes_written;
}
void
Serial::SerialImpl::setPort (const string &port)
{
port_ = port;
}
string
Serial::SerialImpl::getPort () const
{
return port_;
}
void
Serial::SerialImpl::setTimeout (serial::Timeout &timeout)
{
timeout_ = timeout;
}
serial::Timeout
Serial::SerialImpl::getTimeout () const
{
return timeout_;
}
void
Serial::SerialImpl::setBaudrate (unsigned long baudrate)
{
baudrate_ = baudrate;
if (is_open_)
reconfigurePort ();
}
unsigned long
Serial::SerialImpl::getBaudrate () const
{
return baudrate_;
}
void
Serial::SerialImpl::setBytesize (serial::bytesize_t bytesize)
{
bytesize_ = bytesize;
if (is_open_)
reconfigurePort ();
}
serial::bytesize_t
Serial::SerialImpl::getBytesize () const
{
return bytesize_;
}
void
Serial::SerialImpl::setParity (serial::parity_t parity)
{
parity_ = parity;
if (is_open_)
reconfigurePort ();
}
serial::parity_t
Serial::SerialImpl::getParity () const
{
return parity_;
}
void
Serial::SerialImpl::setStopbits (serial::stopbits_t stopbits)
{
stopbits_ = stopbits;
if (is_open_)
reconfigurePort ();
}
serial::stopbits_t
Serial::SerialImpl::getStopbits () const
{
return stopbits_;
}
void
Serial::SerialImpl::setFlowcontrol (serial::flowcontrol_t flowcontrol)
{
flowcontrol_ = flowcontrol;
if (is_open_)
reconfigurePort ();
}
serial::flowcontrol_t
Serial::SerialImpl::getFlowcontrol () const
{
return flowcontrol_;
}
void
Serial::SerialImpl::flush ()
{
if (is_open_ == false) {
throw PortNotOpenedException ("Serial::flush");
}
tcdrain (fd_);
}
void
Serial::SerialImpl::flushInput ()
{
if (is_open_ == false) {
throw PortNotOpenedException ("Serial::flushInput");
}
tcflush (fd_, TCIFLUSH);
}
void
Serial::SerialImpl::flushOutput ()
{
if (is_open_ == false) {
throw PortNotOpenedException ("Serial::flushOutput");
}
tcflush (fd_, TCOFLUSH);
}
void
Serial::SerialImpl::sendBreak (int duration)
{
if (is_open_ == false) {
throw PortNotOpenedException ("Serial::sendBreak");
}
tcsendbreak (fd_, static_cast<int> (duration / 4));
}
void
Serial::SerialImpl::setBreak (bool level)
{
if (is_open_ == false) {
throw PortNotOpenedException ("Serial::setBreak");
}
if (level) {
if (-1 == ioctl (fd_, TIOCSBRK))
{
stringstream ss;
ss << "setBreak failed on a call to ioctl(TIOCSBRK): " << errno << " " << strerror(errno);
throw(SerialException(ss.str().c_str()));
}
} else {
if (-1 == ioctl (fd_, TIOCCBRK))
{
stringstream ss;
ss << "setBreak failed on a call to ioctl(TIOCCBRK): " << errno << " " << strerror(errno);
throw(SerialException(ss.str().c_str()));
}
}
}
void
Serial::SerialImpl::setRTS (bool level)
{
if (is_open_ == false) {
throw PortNotOpenedException ("Serial::setRTS");
}
int command = TIOCM_RTS;
if (level) {
if (-1 == ioctl (fd_, TIOCMBIS, &command))
{
stringstream ss;
ss << "setRTS failed on a call to ioctl(TIOCMBIS): " << errno << " " << strerror(errno);
throw(SerialException(ss.str().c_str()));
}
} else {
if (-1 == ioctl (fd_, TIOCMBIC, &command))
{
stringstream ss;
ss << "setRTS failed on a call to ioctl(TIOCMBIC): " << errno << " " << strerror(errno);
throw(SerialException(ss.str().c_str()));
}
}
}
void
Serial::SerialImpl::setDTR (bool level)
{
if (is_open_ == false) {
throw PortNotOpenedException ("Serial::setDTR");
}
int command = TIOCM_DTR;
if (level) {
if (-1 == ioctl (fd_, TIOCMBIS, &command))
{
stringstream ss;
ss << "setDTR failed on a call to ioctl(TIOCMBIS): " << errno << " " << strerror(errno);
throw(SerialException(ss.str().c_str()));
}
} else {
if (-1 == ioctl (fd_, TIOCMBIC, &command))
{
stringstream ss;
ss << "setDTR failed on a call to ioctl(TIOCMBIC): " << errno << " " << strerror(errno);
throw(SerialException(ss.str().c_str()));
}
}
}
bool
Serial::SerialImpl::waitForChange ()
{
#ifndef TIOCMIWAIT
while (is_open_ == true) {
int status;
if (-1 == ioctl (fd_, TIOCMGET, &status))
{
stringstream ss;
ss << "waitForChange failed on a call to ioctl(TIOCMGET): " << errno << " " << strerror(errno);
throw(SerialException(ss.str().c_str()));
}
else
{
if (0 != (status & TIOCM_CTS)
|| 0 != (status & TIOCM_DSR)
|| 0 != (status & TIOCM_RI)
|| 0 != (status & TIOCM_CD))
{
return true;
}
}
usleep(1000);
}
return false;
#else
int command = (TIOCM_CD|TIOCM_DSR|TIOCM_RI|TIOCM_CTS);
if (-1 == ioctl (fd_, TIOCMIWAIT, &command)) {
stringstream ss;
ss << "waitForDSR failed on a call to ioctl(TIOCMIWAIT): "
<< errno << " " << strerror(errno);
throw(SerialException(ss.str().c_str()));
}
return true;
#endif
}
bool
Serial::SerialImpl::getCTS ()
{
if (is_open_ == false) {
throw PortNotOpenedException ("Serial::getCTS");
}
int status;
if (-1 == ioctl (fd_, TIOCMGET, &status))
{
stringstream ss;
ss << "getCTS failed on a call to ioctl(TIOCMGET): " << errno << " " << strerror(errno);
throw(SerialException(ss.str().c_str()));
}
else
{
return 0 != (status & TIOCM_CTS);
}
}
bool
Serial::SerialImpl::getDSR ()
{
if (is_open_ == false) {
throw PortNotOpenedException ("Serial::getDSR");
}
int status;
if (-1 == ioctl (fd_, TIOCMGET, &status))
{
stringstream ss;
ss << "getDSR failed on a call to ioctl(TIOCMGET): " << errno << " " << strerror(errno);
throw(SerialException(ss.str().c_str()));
}
else
{
return 0 != (status & TIOCM_DSR);
}
}
bool
Serial::SerialImpl::getRI ()
{
if (is_open_ == false) {
throw PortNotOpenedException ("Serial::getRI");
}
int status;
if (-1 == ioctl (fd_, TIOCMGET, &status))
{
stringstream ss;
ss << "getRI failed on a call to ioctl(TIOCMGET): " << errno << " " << strerror(errno);
throw(SerialException(ss.str().c_str()));
}
else
{
return 0 != (status & TIOCM_RI);
}
}
bool
Serial::SerialImpl::getCD ()
{
if (is_open_ == false) {
throw PortNotOpenedException ("Serial::getCD");
}
int status;
if (-1 == ioctl (fd_, TIOCMGET, &status))
{
stringstream ss;
ss << "getCD failed on a call to ioctl(TIOCMGET): " << errno << " " << strerror(errno);
throw(SerialException(ss.str().c_str()));
}
else
{
return 0 != (status & TIOCM_CD);
}
}
void
Serial::SerialImpl::readLock ()
{
int result = pthread_mutex_lock(&this->read_mutex);
if (result) {
THROW (IOException, result);
}
}
void
Serial::SerialImpl::readUnlock ()
{
int result = pthread_mutex_unlock(&this->read_mutex);
if (result) {
THROW (IOException, result);
}
}
void
Serial::SerialImpl::writeLock ()
{
int result = pthread_mutex_lock(&this->write_mutex);
if (result) {
THROW (IOException, result);
}
}
void
Serial::SerialImpl::writeUnlock ()
{
int result = pthread_mutex_unlock(&this->write_mutex);
if (result) {
THROW (IOException, result);
}
}
#endif // !defined(_WIN32)
| wjwwood/cxx_serial | 7 | Cross-platform, Serial Port library written in C++ | C++ | wjwwood | William Woodall | |
src/impl/win.cc | C++ | #if defined(_WIN32)
/* Copyright 2012 William Woodall and John Harrison */
#include <sstream>
#include "serial/impl/win.h"
using std::string;
using std::wstring;
using std::stringstream;
using std::invalid_argument;
using serial::Serial;
using serial::Timeout;
using serial::bytesize_t;
using serial::parity_t;
using serial::stopbits_t;
using serial::flowcontrol_t;
using serial::SerialException;
using serial::PortNotOpenedException;
using serial::IOException;
inline wstring
_prefix_port_if_needed(const wstring &input)
{
static wstring windows_com_port_prefix = L"\\\\.\\";
if (input.compare(0, windows_com_port_prefix.size(), windows_com_port_prefix) != 0)
{
return windows_com_port_prefix + input;
}
return input;
}
Serial::SerialImpl::SerialImpl (const string &port, unsigned long baudrate,
bytesize_t bytesize,
parity_t parity, stopbits_t stopbits,
flowcontrol_t flowcontrol)
: port_ (port.begin(), port.end()), fd_ (INVALID_HANDLE_VALUE), is_open_ (false),
baudrate_ (baudrate), parity_ (parity),
bytesize_ (bytesize), stopbits_ (stopbits), flowcontrol_ (flowcontrol)
{
if (port_.empty () == false)
open ();
read_mutex = CreateMutex(NULL, false, NULL);
write_mutex = CreateMutex(NULL, false, NULL);
}
Serial::SerialImpl::~SerialImpl ()
{
this->close();
CloseHandle(read_mutex);
CloseHandle(write_mutex);
}
void
Serial::SerialImpl::open ()
{
if (port_.empty ()) {
throw invalid_argument ("Empty port is invalid.");
}
if (is_open_ == true) {
throw SerialException ("Serial port already open.");
}
// See: https://github.com/wjwwood/serial/issues/84
wstring port_with_prefix = _prefix_port_if_needed(port_);
LPCWSTR lp_port = port_with_prefix.c_str();
fd_ = CreateFileW(lp_port,
GENERIC_READ | GENERIC_WRITE,
0,
0,
OPEN_EXISTING,
FILE_ATTRIBUTE_NORMAL,
0);
if (fd_ == INVALID_HANDLE_VALUE) {
DWORD create_file_err = GetLastError();
stringstream ss;
switch (create_file_err) {
case ERROR_FILE_NOT_FOUND:
// Use this->getPort to convert to a std::string
ss << "Specified port, " << this->getPort() << ", does not exist.";
THROW (IOException, ss.str().c_str());
default:
ss << "Unknown error opening the serial port: " << create_file_err;
THROW (IOException, ss.str().c_str());
}
}
reconfigurePort();
is_open_ = true;
}
void
Serial::SerialImpl::reconfigurePort ()
{
if (fd_ == INVALID_HANDLE_VALUE) {
// Can only operate on a valid file descriptor
THROW (IOException, "Invalid file descriptor, is the serial port open?");
}
DCB dcbSerialParams = {0};
dcbSerialParams.DCBlength=sizeof(dcbSerialParams);
if (!GetCommState(fd_, &dcbSerialParams)) {
//error getting state
THROW (IOException, "Error getting the serial port state.");
}
// setup baud rate
switch (baudrate_) {
#ifdef CBR_0
case 0: dcbSerialParams.BaudRate = CBR_0; break;
#endif
#ifdef CBR_50
case 50: dcbSerialParams.BaudRate = CBR_50; break;
#endif
#ifdef CBR_75
case 75: dcbSerialParams.BaudRate = CBR_75; break;
#endif
#ifdef CBR_110
case 110: dcbSerialParams.BaudRate = CBR_110; break;
#endif
#ifdef CBR_134
case 134: dcbSerialParams.BaudRate = CBR_134; break;
#endif
#ifdef CBR_150
case 150: dcbSerialParams.BaudRate = CBR_150; break;
#endif
#ifdef CBR_200
case 200: dcbSerialParams.BaudRate = CBR_200; break;
#endif
#ifdef CBR_300
case 300: dcbSerialParams.BaudRate = CBR_300; break;
#endif
#ifdef CBR_600
case 600: dcbSerialParams.BaudRate = CBR_600; break;
#endif
#ifdef CBR_1200
case 1200: dcbSerialParams.BaudRate = CBR_1200; break;
#endif
#ifdef CBR_1800
case 1800: dcbSerialParams.BaudRate = CBR_1800; break;
#endif
#ifdef CBR_2400
case 2400: dcbSerialParams.BaudRate = CBR_2400; break;
#endif
#ifdef CBR_4800
case 4800: dcbSerialParams.BaudRate = CBR_4800; break;
#endif
#ifdef CBR_7200
case 7200: dcbSerialParams.BaudRate = CBR_7200; break;
#endif
#ifdef CBR_9600
case 9600: dcbSerialParams.BaudRate = CBR_9600; break;
#endif
#ifdef CBR_14400
case 14400: dcbSerialParams.BaudRate = CBR_14400; break;
#endif
#ifdef CBR_19200
case 19200: dcbSerialParams.BaudRate = CBR_19200; break;
#endif
#ifdef CBR_28800
case 28800: dcbSerialParams.BaudRate = CBR_28800; break;
#endif
#ifdef CBR_57600
case 57600: dcbSerialParams.BaudRate = CBR_57600; break;
#endif
#ifdef CBR_76800
case 76800: dcbSerialParams.BaudRate = CBR_76800; break;
#endif
#ifdef CBR_38400
case 38400: dcbSerialParams.BaudRate = CBR_38400; break;
#endif
#ifdef CBR_115200
case 115200: dcbSerialParams.BaudRate = CBR_115200; break;
#endif
#ifdef CBR_128000
case 128000: dcbSerialParams.BaudRate = CBR_128000; break;
#endif
#ifdef CBR_153600
case 153600: dcbSerialParams.BaudRate = CBR_153600; break;
#endif
#ifdef CBR_230400
case 230400: dcbSerialParams.BaudRate = CBR_230400; break;
#endif
#ifdef CBR_256000
case 256000: dcbSerialParams.BaudRate = CBR_256000; break;
#endif
#ifdef CBR_460800
case 460800: dcbSerialParams.BaudRate = CBR_460800; break;
#endif
#ifdef CBR_921600
case 921600: dcbSerialParams.BaudRate = CBR_921600; break;
#endif
default:
// Try to blindly assign it
dcbSerialParams.BaudRate = baudrate_;
}
// setup char len
if (bytesize_ == eightbits)
dcbSerialParams.ByteSize = 8;
else if (bytesize_ == sevenbits)
dcbSerialParams.ByteSize = 7;
else if (bytesize_ == sixbits)
dcbSerialParams.ByteSize = 6;
else if (bytesize_ == fivebits)
dcbSerialParams.ByteSize = 5;
else
throw invalid_argument ("invalid char len");
// setup stopbits
if (stopbits_ == stopbits_one)
dcbSerialParams.StopBits = ONESTOPBIT;
else if (stopbits_ == stopbits_one_point_five)
dcbSerialParams.StopBits = ONE5STOPBITS;
else if (stopbits_ == stopbits_two)
dcbSerialParams.StopBits = TWOSTOPBITS;
else
throw invalid_argument ("invalid stop bit");
// setup parity
if (parity_ == parity_none) {
dcbSerialParams.Parity = NOPARITY;
} else if (parity_ == parity_even) {
dcbSerialParams.Parity = EVENPARITY;
} else if (parity_ == parity_odd) {
dcbSerialParams.Parity = ODDPARITY;
} else if (parity_ == parity_mark) {
dcbSerialParams.Parity = MARKPARITY;
} else if (parity_ == parity_space) {
dcbSerialParams.Parity = SPACEPARITY;
} else {
throw invalid_argument ("invalid parity");
}
// setup flowcontrol
if (flowcontrol_ == flowcontrol_none) {
dcbSerialParams.fOutxCtsFlow = false;
dcbSerialParams.fRtsControl = RTS_CONTROL_DISABLE;
dcbSerialParams.fOutX = false;
dcbSerialParams.fInX = false;
}
if (flowcontrol_ == flowcontrol_software) {
dcbSerialParams.fOutxCtsFlow = false;
dcbSerialParams.fRtsControl = RTS_CONTROL_DISABLE;
dcbSerialParams.fOutX = true;
dcbSerialParams.fInX = true;
}
if (flowcontrol_ == flowcontrol_hardware) {
dcbSerialParams.fOutxCtsFlow = true;
dcbSerialParams.fRtsControl = RTS_CONTROL_HANDSHAKE;
dcbSerialParams.fOutX = false;
dcbSerialParams.fInX = false;
}
// activate settings
if (!SetCommState(fd_, &dcbSerialParams)){
CloseHandle(fd_);
THROW (IOException, "Error setting serial port settings.");
}
// Setup timeouts
COMMTIMEOUTS timeouts = {0};
timeouts.ReadIntervalTimeout = timeout_.inter_byte_timeout;
timeouts.ReadTotalTimeoutConstant = timeout_.read_timeout_constant;
timeouts.ReadTotalTimeoutMultiplier = timeout_.read_timeout_multiplier;
timeouts.WriteTotalTimeoutConstant = timeout_.write_timeout_constant;
timeouts.WriteTotalTimeoutMultiplier = timeout_.write_timeout_multiplier;
if (!SetCommTimeouts(fd_, &timeouts)) {
THROW (IOException, "Error setting timeouts.");
}
}
void
Serial::SerialImpl::close ()
{
if (is_open_ == true) {
if (fd_ != INVALID_HANDLE_VALUE) {
int ret;
ret = CloseHandle(fd_);
if (ret == 0) {
stringstream ss;
ss << "Error while closing serial port: " << GetLastError();
THROW (IOException, ss.str().c_str());
} else {
fd_ = INVALID_HANDLE_VALUE;
}
}
is_open_ = false;
}
}
bool
Serial::SerialImpl::isOpen () const
{
return is_open_;
}
size_t
Serial::SerialImpl::available ()
{
if (!is_open_) {
return 0;
}
COMSTAT cs;
if (!ClearCommError(fd_, NULL, &cs)) {
stringstream ss;
ss << "Error while checking status of the serial port: " << GetLastError();
THROW (IOException, ss.str().c_str());
}
return static_cast<size_t>(cs.cbInQue);
}
bool
Serial::SerialImpl::waitReadable (uint32_t /*timeout*/)
{
THROW (IOException, "waitReadable is not implemented on Windows.");
return false;
}
void
Serial::SerialImpl::waitByteTimes (size_t /*count*/)
{
THROW (IOException, "waitByteTimes is not implemented on Windows.");
}
size_t
Serial::SerialImpl::read (uint8_t *buf, size_t size)
{
if (!is_open_) {
throw PortNotOpenedException ("Serial::read");
}
DWORD bytes_read;
if (!ReadFile(fd_, buf, static_cast<DWORD>(size), &bytes_read, NULL)) {
stringstream ss;
ss << "Error while reading from the serial port: " << GetLastError();
THROW (IOException, ss.str().c_str());
}
return (size_t) (bytes_read);
}
size_t
Serial::SerialImpl::write (const uint8_t *data, size_t length)
{
if (is_open_ == false) {
throw PortNotOpenedException ("Serial::write");
}
DWORD bytes_written;
if (!WriteFile(fd_, data, static_cast<DWORD>(length), &bytes_written, NULL)) {
stringstream ss;
ss << "Error while writing to the serial port: " << GetLastError();
THROW (IOException, ss.str().c_str());
}
return (size_t) (bytes_written);
}
void
Serial::SerialImpl::setPort (const string &port)
{
port_ = wstring(port.begin(), port.end());
}
string
Serial::SerialImpl::getPort () const
{
return string(port_.begin(), port_.end());
}
void
Serial::SerialImpl::setTimeout (serial::Timeout &timeout)
{
timeout_ = timeout;
if (is_open_) {
reconfigurePort ();
}
}
serial::Timeout
Serial::SerialImpl::getTimeout () const
{
return timeout_;
}
void
Serial::SerialImpl::setBaudrate (unsigned long baudrate)
{
baudrate_ = baudrate;
if (is_open_) {
reconfigurePort ();
}
}
unsigned long
Serial::SerialImpl::getBaudrate () const
{
return baudrate_;
}
void
Serial::SerialImpl::setBytesize (serial::bytesize_t bytesize)
{
bytesize_ = bytesize;
if (is_open_) {
reconfigurePort ();
}
}
serial::bytesize_t
Serial::SerialImpl::getBytesize () const
{
return bytesize_;
}
void
Serial::SerialImpl::setParity (serial::parity_t parity)
{
parity_ = parity;
if (is_open_) {
reconfigurePort ();
}
}
serial::parity_t
Serial::SerialImpl::getParity () const
{
return parity_;
}
void
Serial::SerialImpl::setStopbits (serial::stopbits_t stopbits)
{
stopbits_ = stopbits;
if (is_open_) {
reconfigurePort ();
}
}
serial::stopbits_t
Serial::SerialImpl::getStopbits () const
{
return stopbits_;
}
void
Serial::SerialImpl::setFlowcontrol (serial::flowcontrol_t flowcontrol)
{
flowcontrol_ = flowcontrol;
if (is_open_) {
reconfigurePort ();
}
}
serial::flowcontrol_t
Serial::SerialImpl::getFlowcontrol () const
{
return flowcontrol_;
}
void
Serial::SerialImpl::flush ()
{
if (is_open_ == false) {
throw PortNotOpenedException ("Serial::flush");
}
FlushFileBuffers (fd_);
}
void
Serial::SerialImpl::flushInput ()
{
if (is_open_ == false) {
throw PortNotOpenedException("Serial::flushInput");
}
PurgeComm(fd_, PURGE_RXCLEAR);
}
void
Serial::SerialImpl::flushOutput ()
{
if (is_open_ == false) {
throw PortNotOpenedException("Serial::flushOutput");
}
PurgeComm(fd_, PURGE_TXCLEAR);
}
void
Serial::SerialImpl::sendBreak (int /*duration*/)
{
THROW (IOException, "sendBreak is not supported on Windows.");
}
void
Serial::SerialImpl::setBreak (bool level)
{
if (is_open_ == false) {
throw PortNotOpenedException ("Serial::setBreak");
}
if (level) {
EscapeCommFunction (fd_, SETBREAK);
} else {
EscapeCommFunction (fd_, CLRBREAK);
}
}
void
Serial::SerialImpl::setRTS (bool level)
{
if (is_open_ == false) {
throw PortNotOpenedException ("Serial::setRTS");
}
if (level) {
EscapeCommFunction (fd_, SETRTS);
} else {
EscapeCommFunction (fd_, CLRRTS);
}
}
void
Serial::SerialImpl::setDTR (bool level)
{
if (is_open_ == false) {
throw PortNotOpenedException ("Serial::setDTR");
}
if (level) {
EscapeCommFunction (fd_, SETDTR);
} else {
EscapeCommFunction (fd_, CLRDTR);
}
}
bool
Serial::SerialImpl::waitForChange ()
{
if (is_open_ == false) {
throw PortNotOpenedException ("Serial::waitForChange");
}
DWORD dwCommEvent;
if (!SetCommMask(fd_, EV_CTS | EV_DSR | EV_RING | EV_RLSD)) {
// Error setting communications mask
return false;
}
if (!WaitCommEvent(fd_, &dwCommEvent, NULL)) {
// An error occurred waiting for the event.
return false;
} else {
// Event has occurred.
return true;
}
}
bool
Serial::SerialImpl::getCTS ()
{
if (is_open_ == false) {
throw PortNotOpenedException ("Serial::getCTS");
}
DWORD dwModemStatus;
if (!GetCommModemStatus(fd_, &dwModemStatus)) {
THROW (IOException, "Error getting the status of the CTS line.");
}
return (MS_CTS_ON & dwModemStatus) != 0;
}
bool
Serial::SerialImpl::getDSR ()
{
if (is_open_ == false) {
throw PortNotOpenedException ("Serial::getDSR");
}
DWORD dwModemStatus;
if (!GetCommModemStatus(fd_, &dwModemStatus)) {
THROW (IOException, "Error getting the status of the DSR line.");
}
return (MS_DSR_ON & dwModemStatus) != 0;
}
bool
Serial::SerialImpl::getRI()
{
if (is_open_ == false) {
throw PortNotOpenedException ("Serial::getRI");
}
DWORD dwModemStatus;
if (!GetCommModemStatus(fd_, &dwModemStatus)) {
THROW (IOException, "Error getting the status of the RI line.");
}
return (MS_RING_ON & dwModemStatus) != 0;
}
bool
Serial::SerialImpl::getCD()
{
if (is_open_ == false) {
throw PortNotOpenedException ("Serial::getCD");
}
DWORD dwModemStatus;
if (!GetCommModemStatus(fd_, &dwModemStatus)) {
// Error in GetCommModemStatus;
THROW (IOException, "Error getting the status of the CD line.");
}
return (MS_RLSD_ON & dwModemStatus) != 0;
}
void
Serial::SerialImpl::readLock()
{
if (WaitForSingleObject(read_mutex, INFINITE) != WAIT_OBJECT_0) {
THROW (IOException, "Error claiming read mutex.");
}
}
void
Serial::SerialImpl::readUnlock()
{
if (!ReleaseMutex(read_mutex)) {
THROW (IOException, "Error releasing read mutex.");
}
}
void
Serial::SerialImpl::writeLock()
{
if (WaitForSingleObject(write_mutex, INFINITE) != WAIT_OBJECT_0) {
THROW (IOException, "Error claiming write mutex.");
}
}
void
Serial::SerialImpl::writeUnlock()
{
if (!ReleaseMutex(write_mutex)) {
THROW (IOException, "Error releasing write mutex.");
}
}
#endif // #if defined(_WIN32)
| wjwwood/cxx_serial | 7 | Cross-platform, Serial Port library written in C++ | C++ | wjwwood | William Woodall | |
src/serial.cc | C++ | /* Copyright 2012 William Woodall and John Harrison */
#include <algorithm>
#if !defined(_WIN32) && !defined(__OpenBSD__) && !defined(__FreeBSD__)
# include <alloca.h>
#endif
#if defined (__MINGW32__)
# define alloca __builtin_alloca
#endif
#include "serial/serial.h"
#ifdef _WIN32
#include "serial/impl/win.h"
#else
#include "serial/impl/unix.h"
#endif
using std::invalid_argument;
using std::min;
using std::numeric_limits;
using std::vector;
using std::size_t;
using std::string;
using serial::Serial;
using serial::SerialException;
using serial::IOException;
using serial::bytesize_t;
using serial::parity_t;
using serial::stopbits_t;
using serial::flowcontrol_t;
class Serial::ScopedReadLock {
public:
ScopedReadLock(SerialImpl *pimpl) : pimpl_(pimpl) {
this->pimpl_->readLock();
}
~ScopedReadLock() {
this->pimpl_->readUnlock();
}
private:
// Disable copy constructors
ScopedReadLock(const ScopedReadLock&);
const ScopedReadLock& operator=(ScopedReadLock);
SerialImpl *pimpl_;
};
class Serial::ScopedWriteLock {
public:
ScopedWriteLock(SerialImpl *pimpl) : pimpl_(pimpl) {
this->pimpl_->writeLock();
}
~ScopedWriteLock() {
this->pimpl_->writeUnlock();
}
private:
// Disable copy constructors
ScopedWriteLock(const ScopedWriteLock&);
const ScopedWriteLock& operator=(ScopedWriteLock);
SerialImpl *pimpl_;
};
Serial::Serial (const string &port, uint32_t baudrate, serial::Timeout timeout,
bytesize_t bytesize, parity_t parity, stopbits_t stopbits,
flowcontrol_t flowcontrol)
: pimpl_(new SerialImpl (port, baudrate, bytesize, parity,
stopbits, flowcontrol))
{
pimpl_->setTimeout(timeout);
}
Serial::~Serial ()
{
delete pimpl_;
}
void
Serial::open ()
{
pimpl_->open ();
}
void
Serial::close ()
{
pimpl_->close ();
}
bool
Serial::isOpen () const
{
return pimpl_->isOpen ();
}
size_t
Serial::available ()
{
return pimpl_->available ();
}
bool
Serial::waitReadable ()
{
serial::Timeout timeout(pimpl_->getTimeout ());
return pimpl_->waitReadable(timeout.read_timeout_constant);
}
void
Serial::waitByteTimes (size_t count)
{
pimpl_->waitByteTimes(count);
}
size_t
Serial::read_ (uint8_t *buffer, size_t size)
{
return this->pimpl_->read (buffer, size);
}
size_t
Serial::read (uint8_t *buffer, size_t size)
{
ScopedReadLock lock(this->pimpl_);
return this->pimpl_->read (buffer, size);
}
size_t
Serial::read (std::vector<uint8_t> &buffer, size_t size)
{
ScopedReadLock lock(this->pimpl_);
uint8_t *buffer_ = new uint8_t[size];
size_t bytes_read = 0;
try {
bytes_read = this->pimpl_->read (buffer_, size);
}
catch (const std::exception &e) {
delete[] buffer_;
throw;
}
buffer.insert (buffer.end (), buffer_, buffer_+bytes_read);
delete[] buffer_;
return bytes_read;
}
size_t
Serial::read (std::string &buffer, size_t size)
{
ScopedReadLock lock(this->pimpl_);
uint8_t *buffer_ = new uint8_t[size];
size_t bytes_read = 0;
try {
bytes_read = this->pimpl_->read (buffer_, size);
}
catch (const std::exception &e) {
delete[] buffer_;
throw;
}
buffer.append (reinterpret_cast<const char*>(buffer_), bytes_read);
delete[] buffer_;
return bytes_read;
}
string
Serial::read (size_t size)
{
std::string buffer;
this->read (buffer, size);
return buffer;
}
size_t
Serial::readline (string &buffer, size_t size, string eol)
{
ScopedReadLock lock(this->pimpl_);
size_t eol_len = eol.length ();
uint8_t *buffer_ = static_cast<uint8_t*>
(alloca (size * sizeof (uint8_t)));
size_t read_so_far = 0;
while (true)
{
size_t bytes_read = this->read_ (buffer_ + read_so_far, 1);
read_so_far += bytes_read;
if (bytes_read == 0) {
break; // Timeout occured on reading 1 byte
}
if(read_so_far < eol_len) continue;
if (string (reinterpret_cast<const char*>
(buffer_ + read_so_far - eol_len), eol_len) == eol) {
break; // EOL found
}
if (read_so_far == size) {
break; // Reached the maximum read length
}
}
buffer.append(reinterpret_cast<const char*> (buffer_), read_so_far);
return read_so_far;
}
string
Serial::readline (size_t size, string eol)
{
std::string buffer;
this->readline (buffer, size, eol);
return buffer;
}
vector<string>
Serial::readlines (size_t size, string eol)
{
ScopedReadLock lock(this->pimpl_);
std::vector<std::string> lines;
size_t eol_len = eol.length ();
uint8_t *buffer_ = static_cast<uint8_t*>
(alloca (size * sizeof (uint8_t)));
size_t read_so_far = 0;
size_t start_of_line = 0;
while (read_so_far < size) {
size_t bytes_read = this->read_ (buffer_+read_so_far, 1);
read_so_far += bytes_read;
if (bytes_read == 0) {
if (start_of_line != read_so_far) {
lines.push_back (
string (reinterpret_cast<const char*> (buffer_ + start_of_line),
read_so_far - start_of_line));
}
break; // Timeout occured on reading 1 byte
}
if(read_so_far < eol_len) continue;
if (string (reinterpret_cast<const char*>
(buffer_ + read_so_far - eol_len), eol_len) == eol) {
// EOL found
lines.push_back(
string(reinterpret_cast<const char*> (buffer_ + start_of_line),
read_so_far - start_of_line));
start_of_line = read_so_far;
}
if (read_so_far == size) {
if (start_of_line != read_so_far) {
lines.push_back(
string(reinterpret_cast<const char*> (buffer_ + start_of_line),
read_so_far - start_of_line));
}
break; // Reached the maximum read length
}
}
return lines;
}
size_t
Serial::write (const string &data)
{
ScopedWriteLock lock(this->pimpl_);
return this->write_ (reinterpret_cast<const uint8_t*>(data.c_str()),
data.length());
}
size_t
Serial::write (const std::vector<uint8_t> &data)
{
ScopedWriteLock lock(this->pimpl_);
return this->write_ (&data[0], data.size());
}
size_t
Serial::write (const uint8_t *data, size_t size)
{
ScopedWriteLock lock(this->pimpl_);
return this->write_(data, size);
}
size_t
Serial::write_ (const uint8_t *data, size_t length)
{
return pimpl_->write (data, length);
}
void
Serial::setPort (const string &port)
{
ScopedReadLock rlock(this->pimpl_);
ScopedWriteLock wlock(this->pimpl_);
bool was_open = pimpl_->isOpen ();
if (was_open) close();
pimpl_->setPort (port);
if (was_open) open ();
}
string
Serial::getPort () const
{
return pimpl_->getPort ();
}
void
Serial::setTimeout (serial::Timeout &timeout)
{
pimpl_->setTimeout (timeout);
}
serial::Timeout
Serial::getTimeout () const {
return pimpl_->getTimeout ();
}
void
Serial::setBaudrate (uint32_t baudrate)
{
pimpl_->setBaudrate (baudrate);
}
uint32_t
Serial::getBaudrate () const
{
return uint32_t(pimpl_->getBaudrate ());
}
void
Serial::setBytesize (bytesize_t bytesize)
{
pimpl_->setBytesize (bytesize);
}
bytesize_t
Serial::getBytesize () const
{
return pimpl_->getBytesize ();
}
void
Serial::setParity (parity_t parity)
{
pimpl_->setParity (parity);
}
parity_t
Serial::getParity () const
{
return pimpl_->getParity ();
}
void
Serial::setStopbits (stopbits_t stopbits)
{
pimpl_->setStopbits (stopbits);
}
stopbits_t
Serial::getStopbits () const
{
return pimpl_->getStopbits ();
}
void
Serial::setFlowcontrol (flowcontrol_t flowcontrol)
{
pimpl_->setFlowcontrol (flowcontrol);
}
flowcontrol_t
Serial::getFlowcontrol () const
{
return pimpl_->getFlowcontrol ();
}
void Serial::flush ()
{
ScopedReadLock rlock(this->pimpl_);
ScopedWriteLock wlock(this->pimpl_);
pimpl_->flush ();
}
void Serial::flushInput ()
{
ScopedReadLock lock(this->pimpl_);
pimpl_->flushInput ();
}
void Serial::flushOutput ()
{
ScopedWriteLock lock(this->pimpl_);
pimpl_->flushOutput ();
}
void Serial::sendBreak (int duration)
{
pimpl_->sendBreak (duration);
}
void Serial::setBreak (bool level)
{
pimpl_->setBreak (level);
}
void Serial::setRTS (bool level)
{
pimpl_->setRTS (level);
}
void Serial::setDTR (bool level)
{
pimpl_->setDTR (level);
}
bool Serial::waitForChange()
{
return pimpl_->waitForChange();
}
bool Serial::getCTS ()
{
return pimpl_->getCTS ();
}
bool Serial::getDSR ()
{
return pimpl_->getDSR ();
}
bool Serial::getRI ()
{
return pimpl_->getRI ();
}
bool Serial::getCD ()
{
return pimpl_->getCD ();
}
| wjwwood/cxx_serial | 7 | Cross-platform, Serial Port library written in C++ | C++ | wjwwood | William Woodall | |
tests/proof_of_concepts/mdc2250.cc | C++ | #include "" | wjwwood/cxx_serial | 7 | Cross-platform, Serial Port library written in C++ | C++ | wjwwood | William Woodall | |
tests/proof_of_concepts/python_serial_test.py | Python | #!/usr/bin/env python
import serial, sys
if len(sys.argv) != 2:
print "python: Usage_serial_test <port name like: /dev/ttyUSB0>"
sys.exit(1)
sio = serial.Serial(sys.argv[1], 115200)
sio.timeout = 250
while True:
sio.write("Testing.")
print sio.read(8)
| wjwwood/cxx_serial | 7 | Cross-platform, Serial Port library written in C++ | C++ | wjwwood | William Woodall | |
tests/proof_of_concepts/tokenizer.cc | C++ | #include <iostream>
#include <string>
#include <vector>
#include <boost/bind.hpp>
#include <boost/function.hpp>
#include <boost/algorithm/string.hpp>
#include <boost/foreach.hpp>
void
_delimeter_tokenizer (std::string &data, std::vector<std::string> &tokens,
std::string delimeter)
{
boost::split(tokens, data, boost::is_any_of(delimeter));
}
typedef boost::function<void(std::string&,std::vector<std::string>&)> TokenizerType;
int main(void) {
std::string data = "a\rb\rc\r";
std::vector<std::string> tokens;
std::string delimeter = "\r";
TokenizerType f = boost::bind(_delimeter_tokenizer, _1, _2, delimeter);
f(data, tokens);
BOOST_FOREACH(std::string token, tokens)
std::cout << token << std::endl;
return 0;
} | wjwwood/cxx_serial | 7 | Cross-platform, Serial Port library written in C++ | C++ | wjwwood | William Woodall | |
tests/unit/unix_timer_tests.cc | C++ | #include "gtest/gtest.h"
#include "serial/impl/unix.h"
#include <unistd.h>
#include <stdlib.h>
using serial::MillisecondTimer;
namespace {
/**
* Do 100 trials of timing gaps between 0 and 19 milliseconds.
* Expect accuracy within one millisecond.
*/
TEST(timer_tests, short_intervals) {
for (int trial = 0; trial < 100; trial++)
{
uint32_t ms = rand() % 20;
MillisecondTimer mt(ms);
usleep(1000 * ms);
int32_t r = mt.remaining();
// 1ms slush, for the cost of calling usleep.
EXPECT_NEAR(r+1, 0, 1);
}
}
TEST(timer_tests, overlapping_long_intervals) {
MillisecondTimer* timers[10];
// Experimentally determined. Corresponds to the extra time taken by the loops,
// the big usleep, and the test infrastructure itself.
const int slush_factor = 14;
// Set up the timers to each time one second, 1ms apart.
for (int t = 0; t < 10; t++)
{
timers[t] = new MillisecondTimer(1000);
usleep(1000);
}
// Check in on them after 500ms.
usleep(500000);
for (int t = 0; t < 10; t++)
{
EXPECT_NEAR(timers[t]->remaining(), 500 - slush_factor + t, 5);
}
// Check in on them again after another 500ms and free them.
usleep(500000);
for (int t = 0; t < 10; t++)
{
EXPECT_NEAR(timers[t]->remaining(), -slush_factor + t, 5);
delete timers[t];
}
}
} // namespace
int main(int argc, char **argv) {
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
| wjwwood/cxx_serial | 7 | Cross-platform, Serial Port library written in C++ | C++ | wjwwood | William Woodall | |
tests/unix_serial_tests.cc | C++ | /* To run these tests you need to change the define below to the serial port
* with a loop back device attached.
*
* Alternatively you could use an Arduino:
void setup()
{
Serial.begin(115200);
}
void loop()
{
while (Serial.available() > 0) {
Serial.write(Serial.read());
}
}
*/
#include <string>
#include "gtest/gtest.h"
// Use FRIEND_TEST... its not as nasty, thats what friends are for
// // OMG this is so nasty...
// #define private public
// #define protected public
#include "serial/serial.h"
#if defined(__linux__)
#include <pty.h>
#else
#include <util.h>
#endif
using namespace serial;
using std::string;
namespace {
class SerialTests : public ::testing::Test {
protected:
virtual void SetUp() {
if (openpty(&master_fd, &slave_fd, name, NULL, NULL) == -1) {
perror("openpty");
exit(127);
}
ASSERT_TRUE(master_fd > 0);
ASSERT_TRUE(slave_fd > 0);
ASSERT_TRUE(string(name).length() > 0);
port1 = new Serial(string(name), 115200, Timeout::simpleTimeout(250));
}
virtual void TearDown() {
port1->close();
delete port1;
}
Serial * port1;
int master_fd;
int slave_fd;
char name[100];
};
TEST_F(SerialTests, readWorks) {
write(master_fd, "abc\n", 4);
string r = port1->read(4);
EXPECT_EQ(r, string("abc\n"));
}
TEST_F(SerialTests, writeWorks) {
char buf[5] = "";
port1->write("abc\n");
read(master_fd, buf, 4);
EXPECT_EQ(string(buf, 4), string("abc\n"));
}
TEST_F(SerialTests, timeoutWorks) {
// Timeout a read, returns an empty string
string empty = port1->read();
EXPECT_EQ(empty, string(""));
// Ensure that writing/reading still works after a timeout.
write(master_fd, "abc\n", 4);
string r = port1->read(4);
EXPECT_EQ(r, string("abc\n"));
}
TEST_F(SerialTests, partialRead) {
// Write some data, but request more than was written.
write(master_fd, "abc\n", 4);
// Should timeout, but return what was in the buffer.
string empty = port1->read(10);
EXPECT_EQ(empty, string("abc\n"));
// Ensure that writing/reading still works after a timeout.
write(master_fd, "abc\n", 4);
string r = port1->read(4);
EXPECT_EQ(r, string("abc\n"));
}
} // namespace
int main(int argc, char **argv) {
try {
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
} catch (std::exception &e) {
std::cerr << "Unhandled Exception: " << e.what() << std::endl;
}
return 1;
}
| wjwwood/cxx_serial | 7 | Cross-platform, Serial Port library written in C++ | C++ | wjwwood | William Woodall | |
http_requester/src/http_requester.cpp | C++ | // Copyright 2023 William Woodall
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <curl/curl.h>
#include <rclcpp/rclcpp.hpp>
#include <http_requester_interfaces/srv/make_http_request.hpp>
namespace http_requester
{
extern "C" {
// libcurl write function for appending data to the result as it is received
size_t
write_func(void * buffer, size_t size, size_t nmemb, void * userp)
{
try {
if (nullptr == buffer) {
throw std::runtime_error("buffer unexpectedly nullptr");
}
auto & response_ref = *reinterpret_cast<std::string *>(userp);
size_t real_size = size * nmemb;
std::string new_data(static_cast<char *>(buffer), real_size);
response_ref += new_data;
return real_size;
} catch (const std::exception & exec) {
fprintf(stderr, "error reading data from http request: %s\n", exec.what());
return 0;
} catch (...) {
fprintf(stderr, "unhandled exception in reading data from http request\n");
return 0;
}
}
} // extern "C"
class HTTPRequesterNode : public rclcpp::Node
{
public:
explicit HTTPRequesterNode(const rclcpp::NodeOptions & node_options)
: Node("http_requester", node_options)
{
curl_handle_ = curl_easy_init();
using RequestT = std::shared_ptr<MakeHTTPRequest::Request>;
using ResponseT = std::shared_ptr<MakeHTTPRequest::Response>;
auto callback =
[this](RequestT request, ResponseT response) {
RCLCPP_INFO(
this->get_logger(),
"Making HTTP reqeust, method: '%s', url: '%s', payload: '%s'",
request->method.c_str(),
request->url.c_str(),
request->payload.c_str());
if (request->method == "POST") {
curl_easy_setopt(curl_handle_, CURLOPT_POSTFIELDS, request->payload.c_str());
curl_easy_setopt(curl_handle_, CURLOPT_POSTFIELDSIZE, request->payload.size());
} else if (request->method != "GET") {
response->status_code = 0;
response->response =
"Unsupported method '" + request->method + "', only POST or GET is supported.";
RCLCPP_ERROR(this->get_logger(), "%s", response->response.c_str());
return;
}
curl_easy_setopt(curl_handle_, CURLOPT_URL, request->url.c_str());
curl_easy_setopt(curl_handle_, CURLOPT_WRITEFUNCTION, write_func);
char error_buffer[CURL_ERROR_SIZE] = "";
curl_easy_setopt(curl_handle_, CURLOPT_ERRORBUFFER, error_buffer);
curl_easy_setopt(curl_handle_, CURLOPT_WRITEDATA, &response->response);
CURLcode ret = curl_easy_perform(curl_handle_);
curl_easy_getinfo(curl_handle_, CURLINFO_RESPONSE_CODE, &response->status_code);
if (response->status_code != 200 || ret != CURLE_OK) {
RCLCPP_WARN(
this->get_logger(),
"HTTP request failed (%d): %s", response->status_code, error_buffer);
response->response = std::string(error_buffer, strnlen(error_buffer, CURL_ERROR_SIZE));
}
};
service_ = this->create_service<MakeHTTPRequest>("~/make_http_request", callback);
RCLCPP_INFO(this->get_logger(), "Ready to serve http requests...");
}
virtual ~HTTPRequesterNode()
{
if (curl_handle_) {
curl_easy_cleanup(curl_handle_);
}
}
private:
using MakeHTTPRequest = http_requester_interfaces::srv::MakeHTTPRequest;
using ServiceT = rclcpp::Service<MakeHTTPRequest>;
std::shared_ptr<ServiceT> service_;
CURL * curl_handle_;
};
class CURLStaticInit
{
public:
CURLStaticInit()
: initialized_(false)
{
CURLcode ret = curl_global_init(CURL_GLOBAL_ALL);
if (ret != 0) {
fprintf(stderr, "Error initializing libcurl! retcode = %d", ret);
} else {
initialized_ = true;
}
}
~CURLStaticInit()
{
if (initialized_) {
curl_global_cleanup();
}
}
bool initialized_;
};
static CURLStaticInit g_curl_init;
} // namespace http_requester
#include "rclcpp_components/register_node_macro.hpp"
RCLCPP_COMPONENTS_REGISTER_NODE(http_requester::HTTPRequesterNode)
| wjwwood/http_requester | 4 | ROS packages for making HTTP Requests via ROS | C++ | wjwwood | William Woodall |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.