file_path stringlengths 3 280 | file_language stringclasses 66 values | content stringlengths 1 1.04M | repo_name stringlengths 5 92 | repo_stars int64 0 154k | repo_description stringlengths 0 402 | repo_primary_language stringclasses 108 values | developer_username stringlengths 1 25 | developer_name stringlengths 0 30 | developer_company stringlengths 0 82 |
|---|---|---|---|---|---|---|---|---|---|
src/main/kotlin/com/wire/apps/jira/core/exceptions/DomainExceptions.kt | Kotlin | package com.wire.apps.jira.core.exceptions
/**
* Base exception for all domain-related errors
*/
sealed class DomainException(
message: String,
cause: Throwable? = null,
) : Exception(message, cause)
/**
* Thrown when a Jira issue is not found
*/
class JiraIssueNotFoundException(
key: String,
) : DomainException("Jira issue not found: $key")
/**
* Thrown when there's a connection error with Jira
*/
class JiraConnectionException(
message: String,
cause: Throwable? = null,
) : DomainException(message, cause)
/**
* Thrown when a messaging operation fails
*/
class MessagingException(
message: String,
cause: Throwable? = null,
) : DomainException(message, cause)
/**
* Thrown when a command is invalid
*/
class InvalidCommandException(
command: String,
) : DomainException("Invalid command: $command")
| wireapp/jira-app | 0 | Jira app demo for wire integration using the jvm sdk. | Kotlin | wireapp | Wire Swiss GmbH | |
src/main/kotlin/com/wire/apps/jira/core/model/JiraIssue.kt | Kotlin | package com.wire.apps.jira.core.model
data class JiraIssue(
val key: String,
val summary: String,
val assignee: String,
val status: String,
val epicLink: String?,
val sprintName: String?,
)
| wireapp/jira-app | 0 | Jira app demo for wire integration using the jvm sdk. | Kotlin | wireapp | Wire Swiss GmbH | |
src/main/kotlin/com/wire/apps/jira/di/ModulesConfig.kt | Kotlin | package com.wire.apps.jira.di
import com.wire.apps.jira.api.CommandHandler
import com.wire.apps.jira.api.CommandMapper
import com.wire.apps.jira.api.HealthController
import com.wire.apps.jira.core.GetJiraIssue
import com.wire.apps.jira.core.JiraRepository
import com.wire.apps.jira.core.MessagingGateway
import com.wire.apps.jira.core.SendHelp
import com.wire.apps.jira.infra.config.JacksonConfig
import com.wire.apps.jira.infra.config.OutboundHttpClientConfig
import com.wire.apps.jira.infra.external.JiraHttpClient
import com.wire.apps.jira.infra.messaging.WireMessagingGateway
import com.wire.apps.jira.infra.repository.JiraRepositoryImpl
import com.wire.sdk.WireAppSdk
import com.wire.sdk.WireEventsHandlerDefault
import com.wire.sdk.model.WireMessage
import com.wire.sdk.service.WireApplicationManager
import org.koin.dsl.module
import java.util.UUID
// TODO: Split into separate module files per layer (api/ApiModule.kt, core/CoreModule.kt, infra/InfraModule.kt)
internal object ModulesConfig {
/**
* Infrastructure layer dependencies
* Contains implementations of repositories, gateways, and external clients
*/
private val infrastructure =
module {
// HTTP client configuration
single { JacksonConfig.configure() }
single { OutboundHttpClientConfig(get()) }
// External clients
single { JiraHttpClient(get()) }
// Repository implementations (bind interface to implementation)
single<JiraRepository> { JiraRepositoryImpl(get()) }
// Gateway implementations (bind interface to implementation)
single<MessagingGateway> { WireMessagingGateway(get()) }
// Wire SDK setup
single<WireApplicationManager> {
val applicationId = UUID.randomUUID()
val wireAppSdk =
WireAppSdk(
applicationId = applicationId,
// TODO: dummy use real with to environment variable
apiToken = "your-api-token",
// TODO: dummy use real with to environment variable
apiHost = "https://staging-nginz-https.zinfra.io",
// TODO: dummy use real with to environment variable
cryptographyStoragePassword = "myDummyPasswordOfRandom32BytesCH",
object : WireEventsHandlerDefault() {
override fun onTextMessageReceived(wireMessage: WireMessage.Text) {
CommandMapper.mapCommands(wireMessage.conversationId, wireMessage.text)
}
},
)
wireAppSdk.startListening()
wireAppSdk.getApplicationManager()
}
}
/**
* Core/Domain layer dependencies
* Contains use cases that depend only on interfaces
*/
private val core =
module {
single { GetJiraIssue(get(), get()) }
single { SendHelp(get()) }
}
/**
* API/Presentation layer dependencies
* Contains controllers and handlers
*/
private val api =
module {
single { HealthController() }
single { CommandHandler(get(), get()) }
}
val allModules =
listOf(
infrastructure,
core,
api,
)
}
| wireapp/jira-app | 0 | Jira app demo for wire integration using the jvm sdk. | Kotlin | wireapp | Wire Swiss GmbH | |
src/main/kotlin/com/wire/apps/jira/infra/config/AppConfig.kt | Kotlin | package com.wire.apps.jira.infra.config
import com.wire.apps.jira.di.ModulesConfig.allModules
import com.wire.sdk.service.WireApplicationManager
import io.javalin.Javalin
import io.javalin.http.HttpStatus
import org.koin.core.component.KoinComponent
import org.koin.core.component.inject
import org.koin.core.context.startKoin
object AppConfig : KoinComponent {
fun setup(): Javalin {
startKoin { modules(allModules) }
// Eagerly initialize WireApplicationManager to start listening to events
// How can this be improved? for now respects the architecture boundaries.
val wireManager: WireApplicationManager by inject()
wireManager // trigger initialization
val app =
Javalin.create().apply {
exception(Exception::class.java) { exception, _ ->
exception.printStackTrace()
error(HttpStatus.INTERNAL_SERVER_ERROR) { context ->
context.json(
HttpStatus.INTERNAL_SERVER_ERROR.message,
)
}
}
}
return app
}
}
| wireapp/jira-app | 0 | Jira app demo for wire integration using the jvm sdk. | Kotlin | wireapp | Wire Swiss GmbH | |
src/main/kotlin/com/wire/apps/jira/infra/config/JacksonConfig.kt | Kotlin | package com.wire.apps.jira.infra.config
import com.fasterxml.jackson.databind.DeserializationFeature
import com.fasterxml.jackson.databind.ObjectMapper
import com.fasterxml.jackson.databind.SerializationFeature
import io.javalin.json.JavalinJackson
object JacksonConfig {
fun configure(): ObjectMapper =
JavalinJackson.defaultMapper().apply {
findAndRegisterModules()
configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false)
enable(SerializationFeature.INDENT_OUTPUT)
}
}
| wireapp/jira-app | 0 | Jira app demo for wire integration using the jvm sdk. | Kotlin | wireapp | Wire Swiss GmbH | |
src/main/kotlin/com/wire/apps/jira/infra/config/OutboundHttpClientConfig.kt | Kotlin | package com.wire.apps.jira.infra.config
import com.fasterxml.jackson.databind.ObjectMapper
import java.net.URI
import java.net.http.HttpClient
import java.net.http.HttpRequest
import java.net.http.HttpResponse
import java.time.Duration
import java.util.Base64
import java.util.concurrent.CompletableFuture
class OutboundHttpClientConfig(
private val mapper: ObjectMapper,
) {
private val client =
HttpClient
.newBuilder()
.connectTimeout(Duration.ofSeconds(20))
.followRedirects(HttpClient.Redirect.NORMAL)
.build()
// GET with optional auth
fun <T> get(
url: String,
token: String? = null,
basicAuth: Pair<String, String>? = null,
responseType: Class<T>,
): CompletableFuture<T> {
val request =
HttpRequest
.newBuilder(URI.create(url))
.GET()
.apply {
token?.let { header("Authorization", "Bearer $it") }
basicAuth?.let { (u, p) ->
val auth = Base64.getEncoder().encode("$u:$p".toByteArray()).toString(Charsets.UTF_8)
header("Authorization", "Basic $auth")
}
header("Accept", "application/json")
}.build()
return client
.sendAsync(request, HttpResponse.BodyHandlers.ofString())
.thenApply { response ->
val body = response.body()
mapper.readValue(body, responseType)
}
}
}
| wireapp/jira-app | 0 | Jira app demo for wire integration using the jvm sdk. | Kotlin | wireapp | Wire Swiss GmbH | |
src/main/kotlin/com/wire/apps/jira/infra/external/JiraHttpClient.kt | Kotlin | package com.wire.apps.jira.infra.external
import com.wire.apps.jira.infra.config.OutboundHttpClientConfig
import com.wire.apps.jira.infra.external.dto.JiraIssueResponseDTO
import org.slf4j.LoggerFactory
import java.util.concurrent.CompletableFuture
/**
* HTTP client for fetching Jira issues from the Jira API.
* Returns DTOs which should be mapped to domain models by the repository layer.
*/
class JiraHttpClient(
private val httpClient: OutboundHttpClientConfig,
) {
private val logger = LoggerFactory.getLogger(JiraHttpClient::class.java)
val username = System.getenv("JIRA_USERNAME") ?: error("JIRA_USERNAME env variable is not set")
val password = System.getenv("JIRA_TOKEN") ?: error("JIRA_TOKEN env variable is not set")
val url = System.getenv("JIRA_URL") ?: error("JIRA_URL env variable is not set")
fun getJiraIssue(key: String): CompletableFuture<JiraIssueResponseDTO> {
logger.debug("Fetching Jira issue from API: $key")
return httpClient
.get(
"$url/issue/$key",
basicAuth = username to password,
responseType = JiraIssueResponseDTO::class.java,
).whenComplete { _, error ->
if (error != null) {
logger.error("Error fetching Jira issue: $key", error)
} else {
logger.debug("Successfully fetched Jira issue from API: $key")
}
}
}
}
| wireapp/jira-app | 0 | Jira app demo for wire integration using the jvm sdk. | Kotlin | wireapp | Wire Swiss GmbH | |
src/main/kotlin/com/wire/apps/jira/infra/external/dto/JiraEpicLinkDTO.kt | Kotlin | package com.wire.apps.jira.infra.external.dto
import com.fasterxml.jackson.annotation.JsonIgnoreProperties
@JsonIgnoreProperties(ignoreUnknown = true)
data class JiraEpicLinkDTO(
val key: String,
)
| wireapp/jira-app | 0 | Jira app demo for wire integration using the jvm sdk. | Kotlin | wireapp | Wire Swiss GmbH | |
src/main/kotlin/com/wire/apps/jira/infra/external/dto/JiraIssueFieldsDTO.kt | Kotlin | package com.wire.apps.jira.infra.external.dto
import com.fasterxml.jackson.annotation.JsonIgnoreProperties
import com.fasterxml.jackson.annotation.JsonProperty
// === Only the fields you care about ===
@JsonIgnoreProperties(ignoreUnknown = true)
data class JiraIssueFieldsDTO(
val summary: String, // Title
val status: JiraStatusDTO,
val creator: JiraUserDTO, // Author
val assignee: JiraUserDTO?, // Can be null
val parent: JiraEpicLinkDTO?, // Epic
@JsonProperty("customfield_10007")
val sprint: List<JiraSprintDTO>? = null, // Sprint field
)
| wireapp/jira-app | 0 | Jira app demo for wire integration using the jvm sdk. | Kotlin | wireapp | Wire Swiss GmbH | |
src/main/kotlin/com/wire/apps/jira/infra/external/dto/JiraIssueResponseDTO.kt | Kotlin | package com.wire.apps.jira.infra.external.dto
import com.fasterxml.jackson.annotation.JsonIgnoreProperties
// === Root Response ===
@JsonIgnoreProperties(ignoreUnknown = true)
data class JiraIssueResponseDTO(
val key: String,
val fields: JiraIssueFieldsDTO,
)
| wireapp/jira-app | 0 | Jira app demo for wire integration using the jvm sdk. | Kotlin | wireapp | Wire Swiss GmbH | |
src/main/kotlin/com/wire/apps/jira/infra/external/dto/JiraSprintDTO.kt | Kotlin | package com.wire.apps.jira.infra.external.dto
import com.fasterxml.jackson.annotation.JsonIgnoreProperties
@JsonIgnoreProperties(ignoreUnknown = true)
data class JiraSprintDTO(
val name: String,
)
| wireapp/jira-app | 0 | Jira app demo for wire integration using the jvm sdk. | Kotlin | wireapp | Wire Swiss GmbH | |
src/main/kotlin/com/wire/apps/jira/infra/external/dto/JiraStatusDTO.kt | Kotlin | package com.wire.apps.jira.infra.external.dto
import com.fasterxml.jackson.annotation.JsonIgnoreProperties
// === Tiny nested models ===
@JsonIgnoreProperties(ignoreUnknown = true)
data class JiraStatusDTO(
val name: String,
)
| wireapp/jira-app | 0 | Jira app demo for wire integration using the jvm sdk. | Kotlin | wireapp | Wire Swiss GmbH | |
src/main/kotlin/com/wire/apps/jira/infra/external/dto/JiraUserDTO.kt | Kotlin | package com.wire.apps.jira.infra.external.dto
import com.fasterxml.jackson.annotation.JsonIgnoreProperties
import com.fasterxml.jackson.annotation.JsonProperty
@JsonIgnoreProperties(ignoreUnknown = true)
data class JiraUserDTO(
@JsonProperty("displayName") val displayName: String,
)
| wireapp/jira-app | 0 | Jira app demo for wire integration using the jvm sdk. | Kotlin | wireapp | Wire Swiss GmbH | |
src/main/kotlin/com/wire/apps/jira/infra/messaging/WireMessagingGateway.kt | Kotlin | package com.wire.apps.jira.infra.messaging
import com.wire.apps.jira.core.MessagingGateway
import com.wire.apps.jira.core.exceptions.MessagingException
import com.wire.sdk.model.QualifiedId
import com.wire.sdk.model.WireMessage
import com.wire.sdk.service.WireApplicationManager
import org.slf4j.LoggerFactory
/**
* Implementation of MessagingGateway using Wire SDK.
* Handles message sending and error handling.
*/
class WireMessagingGateway(
private val wireApplicationManager: WireApplicationManager,
) : MessagingGateway {
private val logger = LoggerFactory.getLogger(WireMessagingGateway::class.java)
override fun sendMessage(
conversationId: String,
message: String,
) {
try {
logger.debug("Sending message to conversation: $conversationId")
val qualifiedId = parseQualifiedId(conversationId)
wireApplicationManager.sendMessage(
WireMessage.Text.create(qualifiedId, message),
)
logger.debug("Message sent successfully to conversation: $conversationId")
} catch (e: Exception) {
logger.error("Error sending message to conversation: $conversationId", e)
throw MessagingException("Failed to send message to conversation: $conversationId", e)
}
}
private fun parseQualifiedId(conversationId: String): QualifiedId {
val parts = conversationId.split("@")
if (parts.size != 2) {
throw MessagingException("Invalid conversation ID format: $conversationId. Expected format: uuid@domain")
}
val uuid =
try {
java.util.UUID.fromString(parts[0])
} catch (e: IllegalArgumentException) {
throw MessagingException("Invalid UUID in conversation ID: ${parts[0]}", e)
}
return QualifiedId(uuid, parts[1])
}
}
| wireapp/jira-app | 0 | Jira app demo for wire integration using the jvm sdk. | Kotlin | wireapp | Wire Swiss GmbH | |
src/main/kotlin/com/wire/apps/jira/infra/repository/JiraRepositoryImpl.kt | Kotlin | package com.wire.apps.jira.infra.repository
import com.wire.apps.jira.core.JiraRepository
import com.wire.apps.jira.core.exceptions.JiraConnectionException
import com.wire.apps.jira.core.exceptions.JiraIssueNotFoundException
import com.wire.apps.jira.core.model.JiraIssue
import com.wire.apps.jira.infra.external.JiraHttpClient
import org.slf4j.LoggerFactory
/**
* Implementation of JiraRepository using HTTP client.
* Handles DTO to domain model mapping and error handling.
*/
class JiraRepositoryImpl(
private val jiraHttpClient: JiraHttpClient,
) : JiraRepository {
private val logger = LoggerFactory.getLogger(JiraRepositoryImpl::class.java)
override fun getIssue(key: String): JiraIssue =
try {
logger.debug("Fetching Jira issue: $key")
val response = jiraHttpClient.getJiraIssue(key).join()
// Map DTO to domain model
JiraIssue(
key = response.key,
summary = response.fields.summary,
epicLink = response.fields.parent?.key,
status = response.fields.status.name,
assignee = response.fields.assignee?.displayName ?: "Unassigned",
sprintName =
response.fields.sprint
?.firstOrNull()
?.name,
).also {
logger.debug("Successfully fetched Jira issue: ${it.key}")
}
} catch (e: Exception) {
logger.error("Error fetching Jira issue: $key", e)
when {
e.message?.contains("404") == true -> throw JiraIssueNotFoundException(key)
else -> throw JiraConnectionException("Failed to fetch Jira issue: $key", e)
}
}
}
| wireapp/jira-app | 0 | Jira app demo for wire integration using the jvm sdk. | Kotlin | wireapp | Wire Swiss GmbH | |
pysrc/__init__.py | Python | import enoki
from ._ek_python_test import *
| wjakob/ek_python_test | 9 | Enoki + pybind11 example project | CMake | wjakob | Wenzel Jakob | EPFL |
setup.py | Python | import setuptools
try:
from skbuild import setup
import pybind11
except ImportError:
print("The preferred way to invoke 'setup.py' is via pip, as in 'pip "
"install .'. If you wish to run the setup script directly, you must "
"first install the build dependencies listed in pyproject.toml!",
file=sys.stderr)
raise
setup(
name="ek_python_test",
version="0.0.1",
author="Author name",
author_email="user@domain.org",
description="A brief description",
url="https://github.com/...",
long_description="A longer description",
long_description_content_type='text/markdown',
cmake_args=[
# List of commands that should be specified to CMake
'-DSOME_FLAGS=1234'
],
packages=['ek_python_test']
)
| wjakob/ek_python_test | 9 | Enoki + pybind11 example project | CMake | wjakob | Wenzel Jakob | EPFL |
src/ek_python_test.cpp | C++ | #include <enoki/array.h>
#include <enoki/math.h>
#include <enoki/cuda.h>
#include <pybind11/pybind11.h>
// Import pybind11 and Enoki namespaces
namespace py = pybind11;
namespace ek = enoki;
// The function we want to expose in Python
template <typename Float>
ek::Array<Float, 3> sph_to_cartesian(Float theta, Float phi) {
auto [sin_theta, cos_theta ] = ek::sincos(theta);
auto [sin_phi, cos_phi ] = ek::sincos(phi);
return { sin_theta * cos_phi,
sin_theta * sin_phi,
cos_theta };
}
/* The function below is called when the extension module is loaded. It performs a
sequence of m.def(...) calls which define functions in the module namespace 'm' */
PYBIND11_MODULE(_ek_python_test /* <- name of extension module */, m) {
m.doc() = "Enoki & pybind11 test plugin"; // Set a docstring
// 1. Bind the scalar version of the function
m.def("sph_to_cartesian", // Function name in Python
sph_to_cartesian<float>, // Function to be exposed
// Docstring (shown in the auto-generated help)
"Convert from spherical to cartesian coordinates [scalar version]",
// Designate parameter names for help and keyword-based calls
py::arg("theta"), py::arg("phi"));
// 2. Bind the GPU version of the function
m.def("sph_to_cartesian",
sph_to_cartesian<ek::CUDAArray<float>>,
"Convert from spherical to cartesian coordinates [GPU version]",
py::arg("theta"), py::arg("phi"));
}
| wjakob/ek_python_test | 9 | Enoki + pybind11 example project | CMake | wjakob | Wenzel Jakob | EPFL |
cmake/collect-symbols-pypy.py | Python | from urllib.request import urlopen
import tarfile
import subprocess
funcs: "set[str]" = set()
files = [
('https://downloads.python.org/pypy/pypy3.9-v7.3.11-macos_arm64.tar.bz2', 'pypy3.9-v7.3.11-macos_arm64/bin/libpypy3.9-c.dylib'),
('https://downloads.python.org/pypy/pypy3.11-v7.3.20-macos_arm64.tar.bz2', 'pypy3.11-v7.3.20-macos_arm64/bin/libpypy3.11-c.dylib'),
]
for f in files:
fs = urlopen(f[0])
ft = tarfile.open(fileobj=fs, mode="r|bz2")
success = False
for member in ft: # move to the next file each loop
if member.name == f[1]:
ft.extract(member, path='tmp')
success = True
assert success
out = subprocess.check_output(['nm', '-gjU', 'tmp/' + f[1]])
for line in out.decode().split('\n'):
if line.startswith('_Py') or line.startswith('__Py'):
funcs.add(line)
with open("darwin-ld-pypy.sym", "w") as f:
for func in sorted(list(funcs)):
f.write(f'-U {func}\n')
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
cmake/collect-symbols.py | Python | #!/usr/bin/env python3
#
# This script collects a list of symbols that are considered to be part of the
# CPython API. The result is used to inform the macOS linker that it's fine for
# those symbols to be undefined when an extension module is linked, as they
# will be provided when the extension module is loaded into the interpreter.
from urllib.request import urlopen
import re
funcs: "set[str]" = set()
for ver in ['3.9']:
url = f'https://raw.githubusercontent.com/python/cpython/{ver}/PC/python3.def'
output = urlopen(url).read().decode('utf-8')
for match in re.findall(r" (.*)=.*", output):
funcs.add(match)
for ver in ['3.10', '3.11', '3.12', '3.13', '3.14', 'main']:
url = f'https://raw.githubusercontent.com/python/cpython/{ver}/PC/python3dll.c'
output = urlopen(url).read().decode('utf-8')
for match in re.findall(r"EXPORT_FUNC\((.*)\)", output):
funcs.add(match)
funcs.remove('name')
# Add a few more functions that nanobind uses and which aren't in the above list
funcs |= {
'PyFrame_GetBack',
'PyGILState_Check',
'PyObject_LengthHint',
'Py_CompileStringExFlags',
'_PyInterpreterState_Get',
'_PyObject_MakeTpCall',
'_PyObject_NextNotImplemented',
'_Py_CheckFunctionResult',
'_Py_RefTotal'
}
with open("darwin-ld-cpython.sym", "w") as f:
for func in sorted(list(funcs)):
f.write(f'-U _{func}\n')
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
cmake/darwin-python-path.py | Python | #!/usr/bin/env python3
# On macOS, system python binaries like /usr/bin/python and $(xcrun -f python3)
# are shims. They do some light validation work and then spawn the "real" python
# binary. Find the "real" python by asking dyld -- sys.executable reports the
# wrong thing more often than not. This is also useful when we're running under
# a Homebrew python3 binary, which also appears to be some kind of shim.
# (Taken from https://reviews.llvm.org/D79607)
import ctypes
dyld = ctypes.cdll.LoadLibrary('/usr/lib/system/libdyld.dylib')
namelen = ctypes.c_ulong(1024)
name = ctypes.create_string_buffer(b'\000', namelen.value)
dyld._NSGetExecutablePath(ctypes.byref(name), ctypes.byref(namelen))
print(name.value.decode('utf8').strip())
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
cmake/nanobind-config.cmake | CMake | include_guard(GLOBAL)
if (NOT TARGET Python::Module)
message(FATAL_ERROR "You must invoke 'find_package(Python COMPONENTS Interpreter Development REQUIRED)' prior to including nanobind.")
endif()
if (Python_VERSION VERSION_LESS "3.9")
message(FATAL_ERROR "nanobind requires Python 3.9 or newer (found Python ${Python_VERSION}).")
endif()
# Determine the right suffix for ordinary and stable ABI extensions.
# We always need to know the extension
if(WIN32)
set(NB_SUFFIX_EXT ".pyd")
else()
set(NB_SUFFIX_EXT "${CMAKE_SHARED_MODULE_SUFFIX}")
endif()
# Check if FindPython/scikit-build-core defined a SOABI/SOSABI variable
if(DEFINED SKBUILD_SOABI)
set(NB_SOABI "${SKBUILD_SOABI}")
elseif(DEFINED Python_SOABI)
set(NB_SOABI "${Python_SOABI}")
endif()
if(DEFINED SKBUILD_SOSABI)
set(NB_SOSABI "${SKBUILD_SOSABI}")
elseif(DEFINED Python_SOSABI)
set(NB_SOSABI "${Python_SOSABI}")
endif()
# Error if scikit-build-core is trying to build Stable ABI < 3.12 wheels
if(DEFINED SKBUILD_SABI_VERSION AND SKBUILD_ABI_VERSION AND SKBUILD_SABI_VERSION VERSION_LESS "3.12")
message(FATAL_ERROR "You must set tool.scikit-build.wheel.py-api to 'cp312' or later when "
"using scikit-build-core with nanobind, '${SKBUILD_SABI_VERSION}' is too old.")
endif()
# PyPy sets an invalid SOABI (platform missing), causing older FindPythons to
# report an incorrect value. Only use it if it looks correct (X-X-X form).
if(DEFINED NB_SOABI AND "${NB_SOABI}" MATCHES ".+-.+-.+")
set(NB_SUFFIX ".${NB_SOABI}${NB_SUFFIX_EXT}")
endif()
if(DEFINED NB_SOSABI)
if(NB_SOSABI STREQUAL "")
set(NB_SUFFIX_S "${NB_SUFFIX_EXT}")
else()
set(NB_SUFFIX_S ".${NB_SOSABI}${NB_SUFFIX_EXT}")
endif()
endif()
# Extract Python version and extensions (e.g. free-threaded build)
string(REGEX REPLACE "[^-]*-([^-]*)-.*" "\\1" NB_ABI "${NB_SOABI}")
# Determine whether the interpreter was built without the GIL using the ABI tag
# (free-threaded builds encode this using a trailing 't').
set(NB_FREE_THREADED 0)
if(NB_ABI MATCHES "[0-9]t")
set(NB_FREE_THREADED 1)
endif()
# If either suffix is missing, call Python to compute it
if(NOT DEFINED NB_SUFFIX OR NOT DEFINED NB_SUFFIX_S)
# Query Python directly to get the right suffix.
execute_process(
COMMAND "${Python_EXECUTABLE}" "-c"
"import sysconfig; print(sysconfig.get_config_var('EXT_SUFFIX'))"
RESULT_VARIABLE NB_SUFFIX_RET
OUTPUT_VARIABLE EXT_SUFFIX
OUTPUT_STRIP_TRAILING_WHITESPACE)
if(NB_SUFFIX_RET AND NOT NB_SUFFIX_RET EQUAL 0)
message(FATAL_ERROR "nanobind: Python sysconfig query to "
"find 'EXT_SUFFIX' property failed!")
endif()
if(NOT DEFINED NB_SUFFIX)
set(NB_SUFFIX "${EXT_SUFFIX}")
endif()
if(NOT DEFINED NB_SUFFIX_S)
get_filename_component(NB_SUFFIX_EXT "${EXT_SUFFIX}" LAST_EXT)
if(WIN32)
set(NB_SUFFIX_S "${NB_SUFFIX_EXT}")
else()
set(NB_SUFFIX_S ".abi3${NB_SUFFIX_EXT}")
endif()
endif()
endif()
# Stash these for later use
set(NB_SUFFIX ${NB_SUFFIX} CACHE INTERNAL "")
set(NB_SUFFIX_S ${NB_SUFFIX_S} CACHE INTERNAL "")
set(NB_ABI ${NB_ABI} CACHE INTERNAL "")
set(NB_FREE_THREADED ${NB_FREE_THREADED} CACHE INTERNAL "")
get_filename_component(NB_DIR "${CMAKE_CURRENT_LIST_FILE}" PATH)
get_filename_component(NB_DIR "${NB_DIR}" PATH)
set(NB_DIR ${NB_DIR} CACHE INTERNAL "")
set(NB_OPT $<OR:$<CONFIG:Release>,$<CONFIG:MinSizeRel>> CACHE INTERNAL "")
set(NB_OPT_SIZE $<OR:$<CONFIG:Release>,$<CONFIG:MinSizeRel>,$<CONFIG:RelWithDebInfo>> CACHE INTERNAL "")
# ---------------------------------------------------------------------------
# Helper function to handle undefined CPython API symbols on macOS
# ---------------------------------------------------------------------------
function (nanobind_link_options name)
if (APPLE)
if (Python_INTERPRETER_ID STREQUAL "PyPy")
set(NB_LINKER_RESPONSE_FILE darwin-ld-pypy.sym)
else()
set(NB_LINKER_RESPONSE_FILE darwin-ld-cpython.sym)
endif()
target_link_options(${name} PRIVATE "-Wl,@${NB_DIR}/cmake/${NB_LINKER_RESPONSE_FILE}")
endif()
endfunction()
# ---------------------------------------------------------------------------
# Create shared/static library targets for nanobind's non-templated core
# ---------------------------------------------------------------------------
function (nanobind_build_library TARGET_NAME)
cmake_parse_arguments(PARSE_ARGV 1 ARG
"AS_SYSINCLUDE" "" "")
if (TARGET ${TARGET_NAME})
return()
endif()
if (TARGET_NAME MATCHES "-static")
set (TARGET_TYPE STATIC)
else()
set (TARGET_TYPE SHARED)
endif()
if (${ARG_AS_SYSINCLUDE})
set (AS_SYSINCLUDE SYSTEM)
endif()
add_library(${TARGET_NAME} ${TARGET_TYPE}
EXCLUDE_FROM_ALL
${NB_DIR}/include/nanobind/make_iterator.h
${NB_DIR}/include/nanobind/nanobind.h
${NB_DIR}/include/nanobind/nb_accessor.h
${NB_DIR}/include/nanobind/nb_attr.h
${NB_DIR}/include/nanobind/nb_call.h
${NB_DIR}/include/nanobind/nb_cast.h
${NB_DIR}/include/nanobind/nb_class.h
${NB_DIR}/include/nanobind/nb_defs.h
${NB_DIR}/include/nanobind/nb_descr.h
${NB_DIR}/include/nanobind/nb_enums.h
${NB_DIR}/include/nanobind/nb_error.h
${NB_DIR}/include/nanobind/nb_func.h
${NB_DIR}/include/nanobind/nb_lib.h
${NB_DIR}/include/nanobind/nb_misc.h
${NB_DIR}/include/nanobind/nb_python.h
${NB_DIR}/include/nanobind/nb_traits.h
${NB_DIR}/include/nanobind/nb_tuple.h
${NB_DIR}/include/nanobind/nb_types.h
${NB_DIR}/include/nanobind/ndarray.h
${NB_DIR}/include/nanobind/trampoline.h
${NB_DIR}/include/nanobind/typing.h
${NB_DIR}/include/nanobind/operators.h
${NB_DIR}/include/nanobind/stl/array.h
${NB_DIR}/include/nanobind/stl/bind_map.h
${NB_DIR}/include/nanobind/stl/bind_vector.h
${NB_DIR}/include/nanobind/stl/detail
${NB_DIR}/include/nanobind/stl/detail/nb_array.h
${NB_DIR}/include/nanobind/stl/detail/nb_dict.h
${NB_DIR}/include/nanobind/stl/detail/nb_list.h
${NB_DIR}/include/nanobind/stl/detail/nb_set.h
${NB_DIR}/include/nanobind/stl/detail/traits.h
${NB_DIR}/include/nanobind/stl/filesystem.h
${NB_DIR}/include/nanobind/stl/function.h
${NB_DIR}/include/nanobind/stl/list.h
${NB_DIR}/include/nanobind/stl/map.h
${NB_DIR}/include/nanobind/stl/optional.h
${NB_DIR}/include/nanobind/stl/pair.h
${NB_DIR}/include/nanobind/stl/set.h
${NB_DIR}/include/nanobind/stl/shared_ptr.h
${NB_DIR}/include/nanobind/stl/string.h
${NB_DIR}/include/nanobind/stl/string_view.h
${NB_DIR}/include/nanobind/stl/tuple.h
${NB_DIR}/include/nanobind/stl/unique_ptr.h
${NB_DIR}/include/nanobind/stl/unordered_map.h
${NB_DIR}/include/nanobind/stl/unordered_set.h
${NB_DIR}/include/nanobind/stl/variant.h
${NB_DIR}/include/nanobind/stl/vector.h
${NB_DIR}/include/nanobind/eigen/dense.h
${NB_DIR}/include/nanobind/eigen/sparse.h
${NB_DIR}/src/buffer.h
${NB_DIR}/src/hash.h
${NB_DIR}/src/nb_internals.h
${NB_DIR}/src/nb_internals.cpp
${NB_DIR}/src/nb_func.cpp
${NB_DIR}/src/nb_type.cpp
${NB_DIR}/src/nb_enum.cpp
${NB_DIR}/src/nb_ndarray.cpp
${NB_DIR}/src/nb_static_property.cpp
${NB_DIR}/src/nb_ft.h
${NB_DIR}/src/common.cpp
${NB_DIR}/src/error.cpp
${NB_DIR}/src/trampoline.cpp
${NB_DIR}/src/implicit.cpp
)
if (NB_FREE_THREADED)
target_sources(${TARGET_NAME} PRIVATE ${NB_DIR}/src/nb_ft.cpp)
endif()
if (TARGET_TYPE STREQUAL "SHARED")
nanobind_link_options(${TARGET_NAME})
target_compile_definitions(${TARGET_NAME} PRIVATE -DNB_BUILD)
target_compile_definitions(${TARGET_NAME} PUBLIC -DNB_SHARED)
nanobind_lto(${TARGET_NAME})
nanobind_strip(${TARGET_NAME})
elseif(NOT WIN32 AND NOT APPLE)
target_compile_options(${TARGET_NAME} PUBLIC $<${NB_OPT_SIZE}:-ffunction-sections -fdata-sections>)
target_link_options(${TARGET_NAME} PUBLIC $<${NB_OPT_SIZE}:-Wl,--gc-sections>)
endif()
set_target_properties(${TARGET_NAME} PROPERTIES
POSITION_INDEPENDENT_CODE ON)
if (${ARG_AS_SYSINCLUDE})
set_target_properties(${TARGET_NAME} PROPERTIES
CXX_CLANG_TIDY "")
endif()
if (MSVC)
# Do not complain about vsnprintf
target_compile_definitions(${TARGET_NAME} PRIVATE -D_CRT_SECURE_NO_WARNINGS)
else()
# Generally needed to handle type punning in Python code
target_compile_options(${TARGET_NAME} PRIVATE -fno-strict-aliasing)
endif()
if (WIN32)
if (${TARGET_NAME} MATCHES "-abi3")
target_link_libraries(${TARGET_NAME} PUBLIC Python::SABIModule)
else()
target_link_libraries(${TARGET_NAME} PUBLIC Python::Module)
endif()
endif()
if (TARGET_NAME MATCHES "-ft")
target_compile_definitions(${TARGET_NAME} PUBLIC NB_FREE_THREADED)
endif()
# Nanobind performs many assertion checks -- detailed error messages aren't
# included in Release/MinSizeRel/RelWithDebInfo modes
target_compile_definitions(${TARGET_NAME} PRIVATE
$<${NB_OPT_SIZE}:NB_COMPACT_ASSERTIONS>)
# If nanobind was installed without submodule dependencies, then the
# dependencies directory won't exist and we need to find them.
# However, if the directory _does_ exist, then the user is free to choose
# whether nanobind uses them (based on `NB_USE_SUBMODULE_DEPS`), with a
# preference to choose them if `NB_USE_SUBMODULE_DEPS` is not defined
if(IS_DIRECTORY ${NB_DIR}/ext/robin_map/include
AND (NOT DEFINED NB_USE_SUBMODULE_DEPS OR NB_USE_SUBMODULE_DEPS)
AND NOT TARGET tsl::robin_map)
add_library(tsl::robin_map INTERFACE IMPORTED)
set_target_properties(tsl::robin_map PROPERTIES
INTERFACE_INCLUDE_DIRECTORIES ${NB_DIR}/ext/robin_map/include)
endif()
if(NOT TARGET tsl::robin_map)
include(CMakeFindDependencyMacro)
find_dependency(tsl-robin-map CONFIG REQUIRED)
endif()
target_link_libraries(${TARGET_NAME} PRIVATE tsl::robin_map)
target_include_directories(${TARGET_NAME} ${AS_SYSINCLUDE} PUBLIC
${Python_INCLUDE_DIRS}
${NB_DIR}/include)
target_compile_features(${TARGET_NAME} PUBLIC cxx_std_17)
nanobind_set_visibility(${TARGET_NAME})
if (MSVC)
# warning #1388-D: base class dllexport/dllimport specification differs from that of the derived class
target_compile_options(${TARGET_NAME} PUBLIC $<$<COMPILE_LANGUAGE:CUDA>:-Xcudafe --diag_suppress=1388>)
endif()
endfunction()
# ---------------------------------------------------------------------------
# Define a convenience function for creating nanobind targets
# ---------------------------------------------------------------------------
function(nanobind_opt_size name)
if (MSVC)
target_compile_options(${name} PRIVATE $<${NB_OPT_SIZE}:$<$<COMPILE_LANGUAGE:CXX>:/Os>>)
else()
target_compile_options(${name} PRIVATE $<${NB_OPT_SIZE}:$<$<COMPILE_LANGUAGE:CXX>:-Os>>)
endif()
endfunction()
function(nanobind_disable_stack_protector name)
if (NOT MSVC)
# The stack protector affects binding size negatively (+8% on Linux in my
# benchmarks). Protecting from stack smashing in a Python VM seems in any
# case futile, so let's get rid of it by default in optimized modes.
target_compile_options(${name} PRIVATE $<${NB_OPT}:-fno-stack-protector>)
endif()
endfunction()
function(nanobind_extension name)
set_target_properties(${name} PROPERTIES PREFIX "" SUFFIX "${NB_SUFFIX}")
endfunction()
function(nanobind_extension_abi3 name)
set_target_properties(${name} PROPERTIES PREFIX "" SUFFIX "${NB_SUFFIX_S}")
endfunction()
function (nanobind_lto name)
set_target_properties(${name} PROPERTIES
INTERPROCEDURAL_OPTIMIZATION_RELEASE ON
INTERPROCEDURAL_OPTIMIZATION_MINSIZEREL ON)
endfunction()
function (nanobind_compile_options name)
if (MSVC)
target_compile_options(${name} PRIVATE $<$<COMPILE_LANGUAGE:CXX>:/bigobj /MP>)
endif()
endfunction()
function (nanobind_strip name)
if (APPLE)
target_link_options(${name} PRIVATE $<${NB_OPT}:-Wl,-dead_strip -Wl,-x -Wl,-S>)
elseif (NOT WIN32)
target_link_options(${name} PRIVATE $<${NB_OPT}:-Wl,-s>)
endif()
endfunction()
function (nanobind_set_visibility name)
set_target_properties(${name} PROPERTIES CXX_VISIBILITY_PRESET hidden)
endfunction()
function (nanobind_musl_static_libcpp name)
if ("$ENV{AUDITWHEEL_PLAT}" MATCHES "musllinux")
target_link_options(${name} PRIVATE -static-libstdc++ -static-libgcc)
endif()
endfunction()
function(nanobind_add_module name)
cmake_parse_arguments(PARSE_ARGV 1 ARG
"STABLE_ABI;FREE_THREADED;NB_STATIC;NB_SHARED;PROTECT_STACK;LTO;NOMINSIZE;NOSTRIP;MUSL_DYNAMIC_LIBCPP;NB_SUPPRESS_WARNINGS"
"NB_DOMAIN" "")
add_library(${name} MODULE ${ARG_UNPARSED_ARGUMENTS})
nanobind_compile_options(${name})
nanobind_link_options(${name})
set_target_properties(${name} PROPERTIES LINKER_LANGUAGE CXX)
if (ARG_NB_SHARED AND ARG_NB_STATIC)
message(FATAL_ERROR "NB_SHARED and NB_STATIC cannot be specified at the same time!")
elseif (NOT ARG_NB_SHARED)
set(ARG_NB_STATIC TRUE)
endif()
# Stable ABI builds require CPython >= 3.12 and Python::SABIModule
if ((Python_VERSION VERSION_LESS 3.12) OR
(NOT Python_INTERPRETER_ID STREQUAL "Python") OR
(NOT TARGET Python::SABIModule))
set(ARG_STABLE_ABI FALSE)
endif()
if (NB_ABI MATCHES "[0-9]t")
# Free-threaded Python interpreters don't support building a nanobind
# module that uses the stable ABI.
set(ARG_STABLE_ABI FALSE)
else()
# A free-threaded Python interpreter is required to build a free-threaded
# nanobind module.
set(ARG_FREE_THREADED FALSE)
endif()
set(libname "nanobind")
if (ARG_NB_STATIC)
set(libname "${libname}-static")
endif()
if (ARG_STABLE_ABI)
set(libname "${libname}-abi3")
endif()
if (ARG_FREE_THREADED)
set(libname "${libname}-ft")
endif()
if (ARG_NB_DOMAIN AND ARG_NB_SHARED)
set(libname ${libname}-${ARG_NB_DOMAIN})
endif()
if (ARG_NB_SUPPRESS_WARNINGS)
set(EXTRA_LIBRARY_PARAMS AS_SYSINCLUDE)
endif()
nanobind_build_library(${libname} ${EXTRA_LIBRARY_PARAMS})
if (ARG_NB_DOMAIN)
target_compile_definitions(${name} PRIVATE NB_DOMAIN=${ARG_NB_DOMAIN})
endif()
if (ARG_STABLE_ABI)
target_compile_definitions(${libname} PUBLIC -DPy_LIMITED_API=0x030C0000)
nanobind_extension_abi3(${name})
else()
nanobind_extension(${name})
endif()
if (ARG_FREE_THREADED)
target_compile_definitions(${name} PRIVATE NB_FREE_THREADED)
endif()
target_link_libraries(${name} PRIVATE ${libname})
if (NOT ARG_PROTECT_STACK)
nanobind_disable_stack_protector(${name})
endif()
if (NOT ARG_NOMINSIZE)
nanobind_opt_size(${name})
endif()
if (NOT ARG_NOSTRIP)
nanobind_strip(${name})
endif()
if (ARG_LTO)
nanobind_lto(${name})
endif()
if (ARG_NB_STATIC AND NOT ARG_MUSL_DYNAMIC_LIBCPP)
nanobind_musl_static_libcpp(${name})
endif()
nanobind_set_visibility(${name})
endfunction()
# ---------------------------------------------------------------------------
# Detect if a list of targets uses sanitizers (ASAN/UBSAN/TSAN). If so, compute
# a shared library preload directive so that these sanitizers can be safely
# together with a Python binary that will in general not import them.
# ---------------------------------------------------------------------------
function(nanobind_sanitizer_preload_env env_var)
set(detected_san "")
# Process each target
foreach(target ${ARGN})
if (NOT TARGET ${target})
continue()
endif()
# Check for sanitizer flags in various compile and link options
set(san_flags "")
set(san_options_to_search
COMPILE_OPTIONS LINK_OPTIONS
INTERFACE_LINK_OPTIONS INTERFACE_COMPILE_OPTIONS
)
if(CMAKE_VERSION VERSION_GREATER_EQUAL "3.30")
set(san_options_to_search
${san_options_to_search}
TRANSITIVE_LINK_PROPERTIES
TRANSITIVE_COMPILE_PROPERTIES
)
endif()
# create a list of all dependent targets and scan those for dependencies on sanitizers
set(all_deps "${target}")
get_target_property(deps ${target} LINK_LIBRARIES)
if(deps AND NOT deps STREQUAL "deps-NOTFOUND")
foreach(dep ${deps})
if(NOT "${dep}" IN_LIST all_deps AND TARGET "${dep}")
list(APPEND all_deps "${dep}")
endif()
endforeach()
endif()
foreach(tgt ${all_deps})
# Check target type
get_target_property(target_type ${tgt} TYPE)
foreach(prop ${san_options_to_search})
# Skip non-interface properties for INTERFACE_LIBRARY targets
if(target_type STREQUAL "INTERFACE_LIBRARY")
if(NOT prop MATCHES "^INTERFACE_")
continue()
endif()
endif()
get_target_property(options ${tgt} ${prop})
if(options)
foreach(opt ${options})
if(opt MATCHES "-fsanitize=([^ >]+)")
list(APPEND san_flags "${CMAKE_MATCH_1}")
endif()
endforeach()
endif()
endforeach()
endforeach()
list(REMOVE_DUPLICATES san_flags)
# Parse sanitizer flags
foreach(flag ${san_flags})
string(REPLACE "\"" "" flag "${flag}")
string(REPLACE "," ";" san_list "${flag}")
foreach(san ${san_list})
if(san MATCHES "^(address|asan)$")
list(APPEND detected_san "asan")
elseif(san MATCHES "^(thread|tsan)$")
list(APPEND detected_san "tsan")
elseif(san MATCHES "^(realtime)$")
list(APPEND detected_san "rtsan")
elseif(san MATCHES "^(undefined|ubsan)$")
list(APPEND detected_san "ubsan")
endif()
endforeach()
endforeach()
endforeach()
if (detected_san)
set(libs "")
foreach(san ${detected_san})
set(san_libname "")
if(CMAKE_CXX_COMPILER_ID MATCHES "Clang")
if(APPLE)
set(san_libname "libclang_rt.${san}_osx_dynamic.dylib")
else()
set(san_libname "libclang_rt.${san}.so")
endif()
else()
set(san_libname "lib${san}.so")
endif()
# Get the full path using a file name query
execute_process(
COMMAND ${CMAKE_CXX_COMPILER} -print-file-name=${san_libname}
RESULT_VARIABLE san_success
OUTPUT_VARIABLE san_libpath
OUTPUT_STRIP_TRAILING_WHITESPACE
)
if(NOT san_success EQUAL 0)
message(FATAL_ERROR "Error querying ${san_libname}: ${san_success}")
endif()
# Check if a real path was returned (and not just echoing back the input)
if(NOT san_libpath OR (san_libpath STREQUAL san_libname))
continue()
endif()
# Read the file content and turn into a single-line string
file(READ "${san_libpath}" san_libdata LIMIT 1024)
string(REPLACE "\n" " " san_libdata "${san_libdata}")
if(san_libdata MATCHES "INPUT[ \t]*\\([ \t]*([^ \t)]+)")
# If this is a linker script with INPUT directive, extract the path
list(APPEND libs "${CMAKE_MATCH_1}")
else()
# Use the original library path
list(APPEND libs "${san_libpath}")
endif()
endforeach()
# Set platform-specific environment variable
string(REPLACE ";" ":" libs_str "${libs}")
if(APPLE)
set(${env_var} "DYLD_INSERT_LIBRARIES=${libs_str}" PARENT_SCOPE)
else()
set(${env_var} "LD_PRELOAD=${libs_str}" PARENT_SCOPE)
endif()
else()
set(${env_var} "" PARENT_SCOPE)
endif()
endfunction()
# On macOS, it's quite tricky to get the actual path of the Python executable
# which is often hidden behind several layers of shims. We need this path to
# inject sanitizers.
function(nanobind_resolve_python_path)
if(NOT DEFINED NB_PY_PATH)
if (APPLE)
execute_process(
COMMAND ${Python_EXECUTABLE} "${NB_DIR}/cmake/darwin-python-path.py"
RESULT_VARIABLE rv
OUTPUT_VARIABLE NB_PY_PATH
OUTPUT_STRIP_TRAILING_WHITESPACE
)
if(NOT rv EQUAL 0)
message(FATAL_ERROR "Could not query Python binary path")
endif()
else()
set(NB_PY_PATH "${Python_EXECUTABLE}")
endif()
set(NB_PY_PATH ${NB_PY_PATH} CACHE STRING "" FORCE)
endif()
endfunction()
# ---------------------------------------------------------------------------
# Convenient Cmake frontent for nanobind's stub generator
# ---------------------------------------------------------------------------
function (nanobind_add_stub name)
cmake_parse_arguments(PARSE_ARGV 1 ARG "VERBOSE;INCLUDE_PRIVATE;EXCLUDE_DOCSTRINGS;EXCLUDE_VALUES;INSTALL_TIME;RECURSIVE;EXCLUDE_FROM_ALL" "MODULE;COMPONENT;PATTERN_FILE;OUTPUT_PATH" "PYTHON_PATH;DEPENDS;MARKER_FILE;OUTPUT")
if (EXISTS ${NB_DIR}/src/stubgen.py)
set(NB_STUBGEN "${NB_DIR}/src/stubgen.py")
elseif (EXISTS ${NB_DIR}/stubgen.py)
set(NB_STUBGEN "${NB_DIR}/stubgen.py")
else()
message(FATAL_ERROR "nanobind_add_stub(): could not locate 'stubgen.py'!")
endif()
if (NOT ARG_VERBOSE)
list(APPEND NB_STUBGEN_ARGS -q)
else()
set(NB_STUBGEN_EXTRA USES_TERMINAL)
endif()
if (ARG_INCLUDE_PRIVATE)
list(APPEND NB_STUBGEN_ARGS -P)
endif()
if (ARG_EXCLUDE_DOCSTRINGS)
list(APPEND NB_STUBGEN_ARGS -D)
endif()
if (ARG_EXCLUDE_VALUES)
list(APPEND NB_STUBGEN_ARGS --exclude-values)
endif()
if (ARG_RECURSIVE)
list(APPEND NB_STUBGEN_ARGS -r)
endif()
foreach (PYTHON_PATH IN LISTS ARG_PYTHON_PATH)
list(APPEND NB_STUBGEN_ARGS -i "${PYTHON_PATH}")
endforeach()
if (ARG_PATTERN_FILE)
list(APPEND NB_STUBGEN_ARGS -p "${ARG_PATTERN_FILE}")
endif()
if (ARG_MARKER_FILE)
foreach (MARKER_FILE IN LISTS ARG_MARKER_FILE)
list(APPEND NB_STUBGEN_ARGS -M "${MARKER_FILE}")
list(APPEND NB_STUBGEN_OUTPUTS "${MARKER_FILE}")
endforeach()
endif()
if (NOT ARG_MODULE)
message(FATAL_ERROR "nanobind_add_stub(): a 'MODULE' argument must be specified!")
else()
list(APPEND NB_STUBGEN_ARGS -m "${ARG_MODULE}")
endif()
list(LENGTH ARG_OUTPUT OUTPUT_LEN)
# Some sanity hecks
if (ARG_RECURSIVE)
if (NOT ARG_INSTALL_TIME)
if ((OUTPUT_LEN EQUAL 0) AND NOT ARG_OUTPUT_PATH)
message(FATAL_ERROR "nanobind_add_stub(): either 'OUTPUT' or 'OUTPUT_PATH' must be specified when 'RECURSIVE' is set!")
endif()
endif()
else()
if ((OUTPUT_LEN EQUAL 0) AND NOT ARG_INSTALL_TIME)
message(FATAL_ERROR "nanobind_add_stub(): an 'OUTPUT' argument must be specified.")
endif()
if ((OUTPUT_LEN GREATER 0) AND ARG_OUTPUT_PATH)
message(FATAL_ERROR "nanobind_add_stub(): 'OUTPUT' and 'OUTPUT_PATH' can only be specified together when 'RECURSIVE' is set!")
endif()
if (OUTPUT_LEN GREATER 1)
message(FATAL_ERROR "nanobind_add_stub(): specifying more than one 'OUTPUT' requires that 'RECURSIVE' is set!")
endif()
endif()
if (ARG_OUTPUT_PATH)
list(APPEND NB_STUBGEN_ARGS -O "${ARG_OUTPUT_PATH}")
endif()
foreach (OUTPUT IN LISTS ARG_OUTPUT)
if (NOT ARG_RECURSIVE)
list(APPEND NB_STUBGEN_ARGS -o "${OUTPUT}")
endif()
list(APPEND NB_STUBGEN_OUTPUTS "${OUTPUT}")
endforeach()
file(TO_CMAKE_PATH ${Python_EXECUTABLE} NB_Python_EXECUTABLE)
set(NB_STUBGEN_CMD "${NB_Python_EXECUTABLE}" "${NB_STUBGEN}" ${NB_STUBGEN_ARGS})
if (NOT WIN32)
# Pass sanitizer flags to nanobind if needed
nanobind_sanitizer_preload_env(NB_STUBGEN_ENV ${ARG_DEPENDS})
if (NB_STUBGEN_ENV)
nanobind_resolve_python_path()
if (NB_STUBGEN_ENV MATCHES asan)
list(APPEND NB_STUBGEN_ENV "ASAN_OPTIONS=detect_leaks=0")
endif()
set(NB_STUBGEN_CMD ${CMAKE_COMMAND} -E env "${NB_STUBGEN_ENV}" "${NB_PY_PATH}" "${NB_STUBGEN}" ${NB_STUBGEN_ARGS})
endif()
endif()
if (NOT ARG_INSTALL_TIME)
add_custom_command(
OUTPUT ${NB_STUBGEN_OUTPUTS}
COMMAND ${NB_STUBGEN_CMD}
WORKING_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}"
DEPENDS ${ARG_DEPENDS} "${NB_STUBGEN}" "${ARG_PATTERN_FILE}"
${NB_STUBGEN_EXTRA}
)
add_custom_target(${name} ALL DEPENDS ${NB_STUBGEN_OUTPUTS})
else()
set(NB_STUBGEN_EXTRA "")
if (ARG_COMPONENT)
list(APPEND NB_STUBGEN_EXTRA COMPONENT ${ARG_COMPONENT})
endif()
if (ARG_EXCLUDE_FROM_ALL)
list(APPEND NB_STUBGEN_EXTRA EXCLUDE_FROM_ALL)
endif()
# \${CMAKE_INSTALL_PREFIX} has same effect as $<INSTALL_PREFIX>
# This is for compatibility with CMake < 3.27.
# For more info: https://github.com/wjakob/nanobind/issues/420#issuecomment-1971353531
install(CODE "set(CMD \"${NB_STUBGEN_CMD}\")\nexecute_process(\n COMMAND \$\{CMD\}\n WORKING_DIRECTORY \"\${CMAKE_INSTALL_PREFIX}\"\n)" ${NB_STUBGEN_EXTRA})
endif()
endfunction()
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
docs/conf.py | Python | #!/usr/bin/env python3
#
import os
import re
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx_copybutton",
"sphinxcontrib.rsvgconverter",
"sphinxcontrib.moderncmakedomain",
"sphinx.ext.intersphinx",
]
intersphinx_mapping = {
"python": ("https://docs.python.org/3", None),
}
# Add any paths that contain templates here, relative to this directory.
templates_path = [".templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "nanobind"
copyright = "2023, Wenzel Jakob"
author = "Wenzel Jakob"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
# Read the listed version
VERSION_REGEX = re.compile(
r"^\s*#\s*define\s+NB_VERSION_([A-Z]+)\s+(.*)$", re.MULTILINE)
this_directory = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
with open(os.path.join(this_directory, "include/nanobind/nanobind.h")) as f:
matches = dict(VERSION_REGEX.findall(f.read()))
version = "{MAJOR}.{MINOR}.{PATCH}".format(**matches)
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = "en"
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = [".build", "release.rst"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
default_role = "any"
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
# pygments_style = 'monokai'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "furo"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<version> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ["_static"]
# html_css_files = [ ]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = "nanobind_doc"
# -- Options for LaTeX output ---------------------------------------------
latex_engine = "pdflatex"
latex_elements = {
'papersize': 'a4paper',
'pointsize': '10pt',
"classoptions": ",openany,oneside",
"preamble": r"""
\usepackage{MnSymbol}
\DeclareUnicodeCharacter{25CB}{\ensuremath{\circ}}
\DeclareUnicodeCharacter{25CF}{\ensuremath{\bullet}}
\DeclareUnicodeCharacter{21B5}{\ensuremath{\rhookswarrow}}
\DeclareUnicodeCharacter{2194}{\ensuremath{\leftrightarrow}}
""",
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, "nanobind.tex", "nanobind Documentation", author, "manual"),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = 'nanobind-logo.png'
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
primary_domain = "cpp"
highlight_language = "cpp"
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
docs/cppyy.h | C/C++ Header | float test_0000(uint16_t a, int32_t b, uint32_t c, int64_t d, uint64_t e,
float f) {
return a + b + c + d + e + f;
}
struct Struct0 {
uint16_t a;
int32_t b;
uint32_t c;
int64_t d;
uint64_t e;
float f;
Struct0(uint16_t a, int32_t b, uint32_t c, int64_t d, uint64_t e, float f)
: a(a), b(b), c(c), d(d), e(e), f(f) {}
float sum() const { return a + b + c + d + e + f; }
};
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
include/nanobind/eigen/dense.h | C/C++ Header | /*
nanobind/eigen/dense.h: type casters for dense Eigen
vectors and matrices
Copyright (c) 2023 Wenzel Jakob
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include <nanobind/ndarray.h>
#include <Eigen/Core>
static_assert(EIGEN_VERSION_AT_LEAST(3, 3, 1),
"Eigen matrix support in nanobind requires Eigen >= 3.3.1");
NAMESPACE_BEGIN(NB_NAMESPACE)
/// Function argument types that are compatible with various array flavors
using DStride = Eigen::Stride<Eigen::Dynamic, Eigen::Dynamic>;
template <typename T> using DRef = Eigen::Ref<T, 0, DStride>;
template <typename T> using DMap = Eigen::Map<T, 0, DStride>;
NAMESPACE_BEGIN(detail)
/// Determine the number of dimensions of the given Eigen type
template <typename T>
constexpr int ndim_v = bool(T::IsVectorAtCompileTime) ? 1 : 2;
/// Extract the compile-time strides of the given Eigen type
template <typename T> struct stride {
using type = Eigen::Stride<0, 0>;
};
template <typename T, int Options, typename StrideType> struct stride<Eigen::Map<T, Options, StrideType>> {
using type = StrideType;
};
template <typename T, int Options, typename StrideType> struct stride<Eigen::Ref<T, Options, StrideType>> {
using type = StrideType;
};
template <typename T> using stride_t = typename stride<T>::type;
/** \brief Identify types with a contiguous memory representation.
*
* This includes all specializations of ``Eigen::Matrix``/``Eigen::Array`` and
* certain specializations of ``Eigen::Map`` and ``Eigen::Ref``. Note: Eigen
* interprets a compile-time stride of 0 as contiguous.
*/
template <typename T>
constexpr bool is_contiguous_v =
(stride_t<T>::InnerStrideAtCompileTime == 0 ||
stride_t<T>::InnerStrideAtCompileTime == 1) &&
(ndim_v<T> == 1 || stride_t<T>::OuterStrideAtCompileTime == 0 ||
(stride_t<T>::OuterStrideAtCompileTime != Eigen::Dynamic &&
int(stride_t<T>::OuterStrideAtCompileTime) == int(T::InnerSizeAtCompileTime)));
/// Identify types with a static or dynamic layout that support contiguous storage
template <typename T>
constexpr bool can_map_contiguous_memory_v =
(stride_t<T>::InnerStrideAtCompileTime == 0 ||
stride_t<T>::InnerStrideAtCompileTime == 1 ||
stride_t<T>::InnerStrideAtCompileTime == Eigen::Dynamic) &&
(ndim_v<T> == 1 || stride_t<T>::OuterStrideAtCompileTime == 0 ||
stride_t<T>::OuterStrideAtCompileTime == Eigen::Dynamic ||
int(stride_t<T>::OuterStrideAtCompileTime) == int(T::InnerSizeAtCompileTime));
/* This type alias builds the most suitable 'ndarray' for the given Eigen type.
In particular, it
- matches the underlying scalar type
- matches the number of dimensions (i.e. whether the type is a vector/matrix)
- matches the shape (if the row/column count is known at compile time)
- matches the in-memory ordering when the Eigen type is contiguous.
This is helpful because type_caster<ndarray<..>> will then perform the
necessary conversion steps (if given incompatible input) to enable data
exchange with Eigen.
A limitation of this approach is that ndarray does not support compile-time
strides besides c_contig and f_contig. If an Eigen type requires
non-contiguous strides (at compile-time) and we are given an ndarray with
unsuitable strides (at run-time), type casting will fail. Note, however, that
this is rather unusual, since the default stride type of Eigen::Map requires
contiguous memory, and the one of Eigen::Ref requires a contiguous inner
stride, while handling any outer stride.
*/
template <typename T, typename Scalar = typename T::Scalar>
using array_for_eigen_t = ndarray<
Scalar,
numpy,
std::conditional_t<
ndim_v<T> == 1,
shape<T::SizeAtCompileTime>,
shape<T::RowsAtCompileTime,
T::ColsAtCompileTime>>,
std::conditional_t<
is_contiguous_v<T>,
std::conditional_t<
ndim_v<T> == 1 || T::IsRowMajor,
c_contig,
f_contig>,
unused>>;
/// Any kind of Eigen class
template <typename T> constexpr bool is_eigen_v = is_base_of_template_v<T, Eigen::EigenBase>;
/// Detects Eigen::Array, Eigen::Matrix, etc.
template <typename T> constexpr bool is_eigen_plain_v = is_base_of_template_v<T, Eigen::PlainObjectBase>;
/// Detect Eigen::SparseMatrix
template <typename T> constexpr bool is_eigen_sparse_v = is_base_of_template_v<T, Eigen::SparseMatrixBase>;
/// Detects expression templates
template <typename T> constexpr bool is_eigen_xpr_v =
is_eigen_v<T> && !is_eigen_plain_v<T> && !is_eigen_sparse_v<T> &&
!std::is_base_of_v<Eigen::MapBase<T, Eigen::ReadOnlyAccessors>, T>;
template <typename T>
struct type_caster<T, enable_if_t<is_eigen_plain_v<T> &&
is_ndarray_scalar_v<typename T::Scalar>>> {
using Scalar = typename T::Scalar;
using NDArray = array_for_eigen_t<T>;
using NDArrayCaster = make_caster<NDArray>;
NB_TYPE_CASTER(T, NDArrayCaster::Name)
bool from_python(handle src, uint8_t flags, cleanup_list *cleanup) noexcept {
// We're in any case making a copy, so non-writable inputs area also okay
using NDArrayConst = array_for_eigen_t<T, const typename T::Scalar>;
make_caster<NDArrayConst> caster;
if (!caster.from_python(src, flags & ~(uint8_t)cast_flags::accepts_none, cleanup))
return false;
const NDArrayConst &array = caster.value;
if constexpr (ndim_v<T> == 1)
value.resize(array.shape(0));
else
value.resize(array.shape(0), array.shape(1));
// The layout is contiguous & compatible thanks to array_for_eigen_t<T>
memcpy(value.data(), array.data(), array.size() * sizeof(Scalar));
return true;
}
template <typename T2>
static handle from_cpp(T2 &&v, rv_policy policy, cleanup_list *cleanup) noexcept {
policy = infer_policy<T2>(policy);
if constexpr (std::is_pointer_v<T2>)
return from_cpp_internal((const T &) *v, policy, cleanup);
else
return from_cpp_internal((const T &) v, policy, cleanup);
}
static handle from_cpp_internal(const T &v, rv_policy policy, cleanup_list *cleanup) noexcept {
size_t shape[ndim_v<T>];
int64_t strides[ndim_v<T>];
if constexpr (ndim_v<T> == 1) {
shape[0] = v.size();
strides[0] = v.innerStride();
} else {
shape[0] = v.rows();
shape[1] = v.cols();
strides[0] = v.rowStride();
strides[1] = v.colStride();
}
void *ptr = (void *) v.data();
if (policy == rv_policy::move) {
// Don't bother moving when the data is static or occupies <1KB
if ((T::SizeAtCompileTime != Eigen::Dynamic ||
(size_t) v.size() < (1024 / sizeof(Scalar))))
policy = rv_policy::copy;
}
object owner;
if (policy == rv_policy::move) {
T *temp = new T((T&&) v);
owner = capsule(temp, [](void *p) noexcept { delete (T *) p; });
ptr = temp->data();
policy = rv_policy::reference;
} else if (policy == rv_policy::reference_internal && cleanup->self()) {
owner = borrow(cleanup->self());
policy = rv_policy::reference;
}
object o = steal(NDArrayCaster::from_cpp(
NDArray(ptr, ndim_v<T>, shape, owner, strides),
policy, cleanup));
return o.release();
}
};
/// Caster for Eigen expression templates
template <typename T>
struct type_caster<T, enable_if_t<is_eigen_xpr_v<T> &&
is_ndarray_scalar_v<typename T::Scalar>>> {
using Array = Eigen::Array<typename T::Scalar, T::RowsAtCompileTime,
T::ColsAtCompileTime>;
using Caster = make_caster<Array>;
static constexpr auto Name = Caster::Name;
template <typename T_> using Cast = T;
template <typename T_> static constexpr bool can_cast() { return true; }
/// Generating an expression template from a Python object is, of course, not possible
bool from_python(handle src, uint8_t flags, cleanup_list *cleanup) noexcept = delete;
template <typename T2>
static handle from_cpp(T2 &&v, rv_policy policy, cleanup_list *cleanup) noexcept {
return Caster::from_cpp(std::forward<T2>(v), policy, cleanup);
}
};
/** \brief Type caster for ``Eigen::Map<T>``
The ``Eigen::Map<..>`` type exists to efficiently access memory provided by a
caller. Given that, the nanobind type caster refuses to turn incompatible
inputs into a ``Eigen::Map<T>`` when this would require an implicit
conversion.
*/
template <typename T, int Options, typename StrideType>
struct type_caster<Eigen::Map<T, Options, StrideType>,
enable_if_t<is_eigen_plain_v<T> &&
is_ndarray_scalar_v<typename T::Scalar>>> {
using Map = Eigen::Map<T, Options, StrideType>;
using NDArray =
array_for_eigen_t<Map, std::conditional_t<std::is_const_v<T>,
const typename Map::Scalar,
typename Map::Scalar>>;
using NDArrayCaster = type_caster<NDArray>;
static constexpr auto Name = NDArrayCaster::Name;
template <typename T_> using Cast = Map;
template <typename T_> static constexpr bool can_cast() { return true; }
NDArrayCaster caster;
bool from_python(handle src, uint8_t flags, cleanup_list *cleanup) noexcept {
// Disable implicit conversions
return from_python_(src, flags & ~(uint8_t)cast_flags::convert, cleanup);
}
bool from_python_(handle src, uint8_t flags, cleanup_list* cleanup) noexcept {
if (!caster.from_python(src, flags & ~(uint8_t)cast_flags::accepts_none, cleanup))
return false;
// Check for memory layout compatibility of non-contiguous 'Map' types
if constexpr (!is_contiguous_v<Map>) {
// Dynamic inner strides support any input, check the fixed case
if constexpr (StrideType::InnerStrideAtCompileTime != Eigen::Dynamic) {
// A compile-time stride of 0 implies "contiguous" ..
int64_t is_expected = StrideType::InnerStrideAtCompileTime == 0
? 1 /* .. and equals 1 for the inner stride */
: StrideType::InnerStrideAtCompileTime,
is_actual = caster.value.stride(
(ndim_v<T> != 1 && T::IsRowMajor) ? 1 : 0);
if (is_expected != is_actual)
return false;
}
// Analogous check for the outer strides
if constexpr (ndim_v<T> == 2 && StrideType::OuterStrideAtCompileTime != Eigen::Dynamic) {
int64_t os_expected = StrideType::OuterStrideAtCompileTime == 0
? caster.value.shape(T::IsRowMajor ? 1 : 0)
: StrideType::OuterStrideAtCompileTime,
os_actual = caster.value.stride(T::IsRowMajor ? 0 : 1);
if (os_expected != os_actual)
return false;
}
}
return true;
}
static handle from_cpp(const Map &v, rv_policy policy, cleanup_list *cleanup) noexcept {
size_t shape[ndim_v<T>];
int64_t strides[ndim_v<T>];
if constexpr (ndim_v<T> == 1) {
shape[0] = v.size();
strides[0] = v.innerStride();
} else {
shape[0] = v.rows();
shape[1] = v.cols();
strides[0] = v.rowStride();
strides[1] = v.colStride();
}
return NDArrayCaster::from_cpp(
NDArray((void *) v.data(), ndim_v<T>, shape, handle(), strides),
(policy == rv_policy::automatic ||
policy == rv_policy::automatic_reference)
? rv_policy::reference
: policy,
cleanup);
}
StrideType strides() const {
constexpr int IS = StrideType::InnerStrideAtCompileTime,
OS = StrideType::OuterStrideAtCompileTime;
int64_t inner = caster.value.stride(0),
outer;
if constexpr (ndim_v<T> == 1)
outer = caster.value.shape(0);
else
outer = caster.value.stride(1);
(void) inner; (void) outer;
if constexpr (ndim_v<T> == 2 && T::IsRowMajor)
std::swap(inner, outer);
// Eigen may expect a stride of 0 to avoid an assertion failure
if constexpr (IS == 0)
inner = 0;
// Starting from numpy 2.4, dl_tensors' stride field is *always* set (for ndim > 0).
// This also includes when shape=(0,0), when numpy reports the stride to be zero.
// This creates an incompatibility with Eigen compile-time vectors, which expect
// runtime and compile-time strides to be identical (e.g. for Eigen::VectorXi, equal to 1).
if (ndim_v<T> == 1 && caster.value.shape(0) == 0)
inner = IS;
if constexpr (OS == 0)
outer = 0;
if constexpr (std::is_same_v<StrideType, Eigen::InnerStride<IS>>)
return StrideType(inner);
else if constexpr (std::is_same_v<StrideType, Eigen::OuterStride<OS>>)
return StrideType(outer);
else
return StrideType(outer, inner);
}
operator Map() {
NDArray &t = caster.value;
if constexpr (ndim_v<T> == 1)
return Map(t.data(), t.shape(0), strides());
else
return Map(t.data(), t.shape(0), t.shape(1), strides());
}
};
/** \brief Caster for Eigen::Ref<T>
Compared to the ``Eigen::Map<T>`` type caster above, the reference caster
accepts a wider set of inputs when it is used in *constant reference* mode
(i.e., ``Eigen::Ref<const T>``). In this case, it performs stride conversions
(except for unusual non-contiguous strides) as well as conversions of the
underlying scalar type (if implicit conversions are enabled).
For non-constant references, the caster matches that of ``Eigen::Map<T>`` and
requires an input with the expected layout (so that changes can propagate to
the caller).
*/
template <typename T, int Options, typename StrideType>
struct type_caster<Eigen::Ref<T, Options, StrideType>,
enable_if_t<is_eigen_plain_v<T> &&
is_ndarray_scalar_v<typename T::Scalar>>> {
using Ref = Eigen::Ref<T, Options, StrideType>;
/// Potentially convert strides/dtype when casting constant references
static constexpr bool MaybeConvert =
std::is_const_v<T> &&
// Restrict to contiguous 'T' (limitation in Eigen, see PR #215)
can_map_contiguous_memory_v<Ref>;
using NDArray =
array_for_eigen_t<Ref, std::conditional_t<std::is_const_v<T>,
const typename Ref::Scalar,
typename Ref::Scalar>>;
using NDArrayCaster = type_caster<NDArray>;
/// Eigen::Map<T> caster with fixed strides
using Map = Eigen::Map<T, Options, StrideType>;
using MapCaster = make_caster<Map>;
// Extended version taking arbitrary strides
using DMap = Eigen::Map<const T, Options, DStride>;
using DMapCaster = make_caster<DMap>;
/**
* The constructor of ``Ref<const T>`` uses one of two strategies
* depending on the input. It may either
*
* 1. Create a copy ``Ref<const T>::m_object`` (owned by Ref), or
* 2. Reference the existing input (non-owned).
*
* When the value below is ``true``, then it is guaranteed that
* ``Ref(<DMap instance>)`` owns the underlying data.
*/
static constexpr bool DMapConstructorOwnsData =
!Eigen::internal::traits<Ref>::template match<DMap>::type::value;
static constexpr auto Name =
const_name<MaybeConvert>(DMapCaster::Name, MapCaster::Name);
template <typename T_> using Cast = Ref;
template <typename T_> static constexpr bool can_cast() { return true; }
MapCaster caster;
struct Empty { };
std::conditional_t<MaybeConvert, DMapCaster, Empty> dcaster;
bool from_python(handle src, uint8_t flags, cleanup_list *cleanup) noexcept {
// Try a direct cast without implicit conversion first
if (caster.from_python(src, flags, cleanup))
return true;
// Potentially convert strides/dtype when casting constant references
if constexpr (MaybeConvert) {
/* Generating an implicit copy requires some object to assume
ownership. During a function call, ``dcaster`` can serve that
role (this case is detected by checking whether ``flags`` has
the ``manual`` flag set). When used in other situations (e.g.
``nb::cast()``), the created ``Eigen::Ref<..>`` must take
ownership of the copy. This is only guranteed to work if
DMapConstructorOwnsData.
If neither of these is possible, we disable implicit
conversions. */
if ((flags & (uint8_t) cast_flags::manual) &&
!DMapConstructorOwnsData)
flags &= ~(uint8_t) cast_flags::convert;
if (dcaster.from_python_(src, flags, cleanup))
return true;
}
return false;
}
static handle from_cpp(const Ref &v, rv_policy policy, cleanup_list *cleanup) noexcept {
// Copied from the Eigen::Map caster
size_t shape[ndim_v<T>];
int64_t strides[ndim_v<T>];
if constexpr (ndim_v<T> == 1) {
shape[0] = v.size();
strides[0] = v.innerStride();
} else {
shape[0] = v.rows();
shape[1] = v.cols();
strides[0] = v.rowStride();
strides[1] = v.colStride();
}
return NDArrayCaster::from_cpp(
NDArray((void *) v.data(), ndim_v<T>, shape, handle(), strides),
(policy == rv_policy::automatic ||
policy == rv_policy::automatic_reference)
? rv_policy::reference
: policy,
cleanup);
}
operator Ref() {
if constexpr (MaybeConvert) {
if (dcaster.caster.value.is_valid())
return Ref(dcaster.operator DMap());
}
return Ref(caster.operator Map());
}
};
NAMESPACE_END(detail)
NAMESPACE_END(NB_NAMESPACE)
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
include/nanobind/eigen/sparse.h | C/C++ Header | /*
nanobind/eigen/sparse.h: type casters for sparse Eigen matrices
Copyright (c) 2023 Henri Menke and Wenzel Jakob
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include <nanobind/ndarray.h>
#include <nanobind/eigen/dense.h>
#include <Eigen/SparseCore>
#include <memory>
#include <type_traits>
#include <utility>
NAMESPACE_BEGIN(NB_NAMESPACE)
NAMESPACE_BEGIN(detail)
/// Detect Eigen::SparseMatrix
template <typename T> constexpr bool is_eigen_sparse_matrix_v =
is_eigen_sparse_v<T> &&
!std::is_base_of_v<Eigen::SparseMapBase<T, Eigen::ReadOnlyAccessors>, T>;
/// Caster for Eigen::SparseMatrix
template <typename T> struct type_caster<T, enable_if_t<is_eigen_sparse_matrix_v<T>>> {
using Scalar = typename T::Scalar;
using StorageIndex = typename T::StorageIndex;
using Index = typename T::Index;
using SparseMap = Eigen::Map<T>;
static_assert(std::is_same_v<T, Eigen::SparseMatrix<Scalar, T::Options, StorageIndex>>,
"nanobind: Eigen sparse caster only implemented for matrices");
static constexpr bool RowMajor = T::IsRowMajor;
using ScalarNDArray = ndarray<numpy, Scalar, shape<-1>>;
using StorageIndexNDArray = ndarray<numpy, StorageIndex, shape<-1>>;
using ScalarCaster = make_caster<ScalarNDArray>;
using StorageIndexCaster = make_caster<StorageIndexNDArray>;
NB_TYPE_CASTER(T, const_name<RowMajor>("scipy.sparse.csr_matrix[",
"scipy.sparse.csc_matrix[")
+ make_caster<Scalar>::Name + const_name("]"))
ScalarCaster data_caster;
StorageIndexCaster indices_caster, indptr_caster;
bool from_python(handle src, uint8_t flags, cleanup_list *cleanup) noexcept {
object obj = borrow(src);
try {
object matrix_type =
module_::import_("scipy.sparse")
.attr(RowMajor ? "csr_matrix" : "csc_matrix");
if (!obj.type().is(matrix_type))
obj = matrix_type(obj);
if (!cast<bool>(obj.attr("has_sorted_indices")))
obj.attr("sort_indices")();
if (object data_o = obj.attr("data");
!data_caster.from_python(data_o, flags, cleanup))
return false;
if (object indices_o = obj.attr("indices");
!indices_caster.from_python(indices_o, flags, cleanup))
return false;
if (object indptr_o = obj.attr("indptr");
!indptr_caster.from_python(indptr_o, flags, cleanup))
return false;
object shape_o = obj.attr("shape");
if (len(shape_o) != 2)
return false;
Index rows = cast<Index>(shape_o[0]),
cols = cast<Index>(shape_o[1]),
nnz = cast<Index>(obj.attr("nnz"));
value = SparseMap(rows, cols, nnz,
indptr_caster.value.data(),
indices_caster.value.data(),
data_caster.value.data());
return true;
} catch (const python_error &) {
return false;
}
}
static handle from_cpp(T &&v, rv_policy policy, cleanup_list *cleanup) noexcept {
if (policy == rv_policy::automatic ||
policy == rv_policy::automatic_reference)
policy = rv_policy::move;
return from_cpp((const T &) v, policy, cleanup);
}
template <typename T2>
static handle from_cpp(T2 &&v, rv_policy policy, cleanup_list *cleanup) noexcept {
policy = infer_policy<T2>(policy);
if constexpr (std::is_pointer_v<T2>)
return from_cpp_internal((const T &) *v, policy, cleanup);
else
return from_cpp_internal((const T &) v, policy, cleanup);
}
static handle from_cpp_internal(const T &v, rv_policy policy, cleanup_list *) noexcept {
if (!v.isCompressed()) {
PyErr_SetString(PyExc_ValueError,
"nanobind: unable to return an Eigen sparse matrix that is not in a compressed format. "
"Please call `.makeCompressed()` before returning the value on the C++ end.");
return handle();
}
object matrix_type;
try {
matrix_type = module_::import_("scipy.sparse").attr(RowMajor ? "csr_matrix" : "csc_matrix");
} catch (python_error &e) {
e.restore();
return handle();
}
const Index rows = v.rows(), cols = v.cols();
const size_t data_shape[] = { (size_t) v.nonZeros() };
const size_t outer_indices_shape[] = { (size_t) ((RowMajor ? rows : cols) + 1) };
T *src = std::addressof(const_cast<T &>(v));
object owner;
if (policy == rv_policy::move) {
src = new T(std::move(v));
owner = capsule(src, [](void *p) noexcept { delete (T *) p; });
}
ScalarNDArray data(src->valuePtr(), 1, data_shape, owner);
StorageIndexNDArray outer_indices(src->outerIndexPtr(), 1, outer_indices_shape, owner);
StorageIndexNDArray inner_indices(src->innerIndexPtr(), 1, data_shape, owner);
try {
return matrix_type(nanobind::make_tuple(
std::move(data), std::move(inner_indices), std::move(outer_indices)),
nanobind::make_tuple(rows, cols))
.release();
} catch (python_error &e) {
e.restore();
return handle();
}
}
};
/// Caster for Eigen::Map<Eigen::SparseMatrix>, still needs to be implemented.
template <typename T>
struct type_caster<Eigen::Map<T>, enable_if_t<is_eigen_sparse_matrix_v<T>>> {
using Scalar = typename T::Scalar;
using StorageIndex = typename T::StorageIndex;
using Index = typename T::Index;
using SparseMap = Eigen::Map<T>;
using Map = Eigen::Map<T>;
using SparseMatrixCaster = type_caster<T>;
static constexpr bool RowMajor = T::IsRowMajor;
using ScalarNDArray = ndarray<numpy, Scalar, shape<-1>>;
using StorageIndexNDArray = ndarray<numpy, StorageIndex, shape<-1>>;
using ScalarCaster = make_caster<ScalarNDArray>;
using StorageIndexCaster = make_caster<StorageIndexNDArray>;
static constexpr auto Name = const_name<RowMajor>("scipy.sparse.csr_matrix[",
"scipy.sparse.csc_matrix[")
+ make_caster<Scalar>::Name + const_name("]");
template <typename T_> using Cast = Map;
template <typename T_> static constexpr bool can_cast() { return true; }
ScalarCaster data_caster;
StorageIndexCaster indices_caster, indptr_caster;
Index rows, cols, nnz;
bool from_python(handle src, uint8_t flags, cleanup_list *cleanup) noexcept {
flags = ~(uint8_t) cast_flags::convert;
try {
object matrix_type =
module_::import_("scipy.sparse")
.attr(RowMajor ? "csr_matrix" : "csc_matrix");
if (!src.type().is(matrix_type))
return false;
if (!cast<bool>(src.attr("has_sorted_indices")))
src.attr("sort_indices")();
if (object data_o = src.attr("data");
!data_caster.from_python(data_o, flags, cleanup))
return false;
if (object indices_o = src.attr("indices");
!indices_caster.from_python(indices_o, flags, cleanup))
return false;
if (object indptr_o = src.attr("indptr");
!indptr_caster.from_python(indptr_o, flags, cleanup))
return false;
object shape_o = src.attr("shape");
if (len(shape_o) != 2)
return false;
rows = cast<Index>(shape_o[0]);
cols = cast<Index>(shape_o[1]);
nnz = cast<Index>(src.attr("nnz"));
} catch (const python_error &) {
return false;
}
return true;
}
static handle from_cpp(const Map &v, rv_policy, cleanup_list *) noexcept {
if (!v.isCompressed()) {
PyErr_SetString(
PyExc_ValueError,
"nanobind: unable to return an Eigen sparse matrix that is not "
"in a compressed format. Please call `.makeCompressed()` "
"before returning the value on the C++ end.");
return handle();
}
object matrix_type;
try {
matrix_type = module_::import_("scipy.sparse")
.attr(RowMajor ? "csr_matrix" : "csc_matrix");
const Index rows = v.rows(), cols = v.cols();
const size_t data_shape[] = { (size_t) v.nonZeros() };
const size_t outer_indices_shape[] = {
(size_t) ((RowMajor ? rows : cols) + 1)
};
ScalarNDArray data((void *) v.valuePtr(), 1, data_shape);
StorageIndexNDArray
outer_indices((void *) v.outerIndexPtr(), 1, outer_indices_shape),
inner_indices((void *) v.innerIndexPtr(), 1, data_shape);
return matrix_type(nanobind::make_tuple(
cast(data, rv_policy::reference),
cast(inner_indices, rv_policy::reference),
cast(outer_indices, rv_policy::reference)),
nanobind::make_tuple(rows, cols))
.release();
} catch (python_error &e) {
e.restore();
return handle();
}
};
operator Map() {
return SparseMap(rows, cols, nnz,
indptr_caster.value.data(),
indices_caster.value.data(),
data_caster.value.data());
}
};
/// Caster for Eigen::Ref<Eigen::SparseMatrix>, still needs to be implemented
template <typename T, int Options>
struct type_caster<Eigen::Ref<T, Options>, enable_if_t<is_eigen_sparse_matrix_v<T>>> {
using Ref = Eigen::Ref<T, Options>;
using Map = Eigen::Map<T, Options>;
using MapCaster = make_caster<Map>;
static constexpr auto Name = MapCaster::Name;
template <typename T_> using Cast = Ref;
template <typename T_> static constexpr bool can_cast() { return true; }
bool from_python(handle src, uint8_t flags, cleanup_list *cleanup) noexcept = delete;
static handle from_cpp(const Ref &v, rv_policy policy, cleanup_list *cleanup) noexcept = delete;
};
NAMESPACE_END(detail)
NAMESPACE_END(NB_NAMESPACE)
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
include/nanobind/eval.h | C/C++ Header | /*
nanobind/eval.h: Support for evaluating Python expressions and
statements from strings
Adapted by Nico Schlömer from pybind11's eval.h.
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include <nanobind/nanobind.h>
NAMESPACE_BEGIN(NB_NAMESPACE)
enum eval_mode {
// Evaluate a string containing an isolated expression
eval_expr = Py_eval_input,
// Evaluate a string containing a single statement. Returns \c none
eval_single_statement = Py_single_input,
// Evaluate a string containing a sequence of statement. Returns \c none
eval_statements = Py_file_input
};
template <eval_mode start = eval_expr>
object eval(const str &expr, handle global = handle(), handle local = handle()) {
if (!local.is_valid())
local = global;
// This used to be PyRun_String, but that function isn't in the stable ABI.
object codeobj = steal(Py_CompileString(expr.c_str(), "<string>", start));
if (!codeobj.is_valid())
raise_python_error();
PyObject *result = PyEval_EvalCode(codeobj.ptr(), global.ptr(), local.ptr());
if (!result)
raise_python_error();
return steal(result);
}
template <eval_mode start = eval_expr, size_t N>
object eval(const char (&s)[N], handle global = handle(), handle local = handle()) {
// Support raw string literals by removing common leading whitespace
str expr = (s[0] == '\n') ? str(module_::import_("textwrap").attr("dedent")(s)) : str(s);
return eval<start>(expr, global, local);
}
inline void exec(const str &expr, handle global = handle(), handle local = handle()) {
eval<eval_statements>(expr, global, local);
}
template <size_t N>
void exec(const char (&s)[N], handle global = handle(), handle local = handle()) {
eval<eval_statements>(s, global, local);
}
NAMESPACE_END(NB_NAMESPACE)
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
include/nanobind/intrusive/counter.h | C/C++ Header | /*
nanobind/intrusive/counter.h: Intrusive reference counting sample
implementation.
Intrusive reference counting is a simple solution for various lifetime and
ownership-related issues that can arise in Python bindings of C++ code. The
implementation here represents one of many ways in which intrusive
reference counting can be realized and is included for convenience.
The code in this file is designed to be truly minimal: it depends neither
on Python, nanobind, nor the STL. This enables its use in small projects
with a 100% optional Python interface.
Two section of nanobind's documentation discuss intrusive reference
counting in general:
- https://nanobind.readthedocs.io/en/latest/ownership.html
- https://nanobind.readthedocs.io/en/latest/ownership_adv.html
Comments below are specific to this sample implementation.
Copyright (c) 2023 Wenzel Jakob
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include <cstdint>
// Override this definition to specify DLL export/import declarations
#if !defined(NB_INTRUSIVE_EXPORT)
# define NB_INTRUSIVE_EXPORT
#endif
#if !defined(Py_PYTHON_H)
/* While the implementation below does not directly depend on Python, the
PyObject type occurs in a few function interfaces (in a fully opaque
manner). The lines below forward-declare it. */
extern "C" {
struct _object;
typedef _object PyObject;
};
#endif
#if !defined(NAMESPACE_BEGIN)
# define NAMESPACE_BEGIN(name) namespace name {
#endif
#if !defined(NAMESPACE_END)
# define NAMESPACE_END(name) }
#endif
NAMESPACE_BEGIN(nanobind)
/** \brief Simple intrusive reference counter.
*
* Intrusive reference counting is a simple solution for various lifetime and
* ownership-related issues that can arise in Python bindings of C++ code. The
* implementation here represents one of many ways in which intrusive reference
* counting can be realized and is included for convenience.
*
* The ``intrusive_counter`` class represents an atomic counter that can be
* increased (via ``inc_ref()``) or decreased (via ``dec_ref()``). When the
* counter reaches zero, the object should be deleted, which ``dec_ref()``
* indicates by returning ``true``.
*
* In addition to this simple counting mechanism, ownership of the object can
* also be transferred to Python (via ``set_self_py()``). In this case,
* subsequent calls to ``inc_ref()`` and ``dec_ref()`` modify the reference
* count of the underlying Python object. The ``intrusive_counter`` class
* supports both cases using only ``sizeof(void*)`` bytes of storage.
*
* To incorporate intrusive reference counting into your own project, you would
* usually add an ``intrusive_counter``-typed member to the base class of an
* object hierarchy and expose it as follows:
*
* ```cpp
* #include <nanobind/intrusive/counter.h>
*
* class Object {
* public:
* void inc_ref() noexcept { m_ref_count.inc_ref(); }
* bool dec_ref() noexcept { return m_ref_count.dec_ref(); }
*
* // Important: must declare virtual destructor
* virtual ~Object() = default;
*
* void set_self_py(PyObject *self) noexcept {
* m_ref_count.set_self_py(self);
* }
*
* private:
* nb::intrusive_counter m_ref_count;
* };
*
* // Convenience function for increasing the reference count of an instance
* inline void inc_ref(Object *o) noexcept {
* if (o)
* o->inc_ref();
* }
*
* // Convenience function for decreasing the reference count of an instance
* // and potentially deleting it when the count reaches zero
* inline void dec_ref(Object *o) noexcept {
* if (o && o->dec_ref())
* delete o;
* }
* ```
*
* Alternatively, you could also inherit from ``intrusive_base``, which obviates
* the need for all of the above declarations:
*
* ```cpp
* class Object : public intrusive_base {
* public:
* // ...
* };
* ```
*
* When binding the base class in Python, you must indicate to nanobind that
* this type uses intrusive reference counting and expose the ``set_self_py``
* member. This must only be done once, as the attribute is automatically
* inherited by subclasses.
*
* ```cpp
* nb::class_<Object>(
* m, "Object",
* nb::intrusive_ptr<Object>(
* [](Object *o, PyObject *po) noexcept { o->set_self_py(po); }));
* ```
*
* Also, somewhere in your binding initialization code, you must call
*
* ```cpp
* nb::intrusive_init(
* [](PyObject *o) noexcept {
* nb::gil_scoped_acquire guard;
* Py_INCREF(o);
* },
* [](PyObject *o) noexcept {
* nb::gil_scoped_acquire guard;
* Py_DECREF(o);
* });
* ```
*
* For this all to compile, a single one of your .cpp files must include this
* header file from somewhere as follows:
*
* ```cpp
* #include <nanobind/intrusive/counter.inl>
* ```
*
* Calling the ``inc_ref()`` and ``dec_ref()`` members many times throughout
* the code can quickly become tedious. Nanobind also ships with a ``ref<T>``
* RAII helper class to help with this.
*
* ```cpp
* #include <nanobind/intrusive/ref.h>
*
* {
* ref<MyObject> x = new MyObject(); // <-- assigment to ref<..> automatically calls inc_ref()
* x->func(); // ref<..> can be used like a normal pointer
* } // <-- Destruction of ref<..> calls dec_ref(), deleting the instance in this example.
* ```
*
* When the file ``nanobind/intrusive/ref.h`` is included following
* ``nanobind/nanobind.h``, it also exposes a custom type caster to bind
* functions taking or returning ``ref<T>``-typed values.
*/
struct NB_INTRUSIVE_EXPORT intrusive_counter {
public:
intrusive_counter() noexcept = default;
// The counter value is not affected by copy/move assignment/construction
intrusive_counter(const intrusive_counter &) noexcept { }
intrusive_counter(intrusive_counter &&) noexcept { }
intrusive_counter &operator=(const intrusive_counter &) noexcept { return *this; }
intrusive_counter &operator=(intrusive_counter &&) noexcept { return *this; }
/// Increase the object's reference count
void inc_ref() const noexcept;
/// Decrease the object's reference count, return ``true`` if it should be deallocated
bool dec_ref() const noexcept;
/// Return the Python object associated with this instance (or NULL)
PyObject *self_py() const noexcept;
/// Set the Python object associated with this instance
void set_self_py(PyObject *self) noexcept;
protected:
/**
* \brief Mutable counter. Note that the value ``1`` actually encodes
* a zero reference count (see the file ``counter.inl`` for details).
*/
mutable uintptr_t m_state = 1;
};
static_assert(
sizeof(intrusive_counter) == sizeof(void *),
"The intrusive_counter class should always have the same size as a pointer.");
/// Reference-counted base type of an object hierarchy
class NB_INTRUSIVE_EXPORT intrusive_base {
public:
/// Increase the object's reference count
void inc_ref() const noexcept { m_ref_count.inc_ref(); }
/// Decrease the object's reference count, return ``true`` if it should be deallocated
bool dec_ref() const noexcept { return m_ref_count.dec_ref(); }
/// Set the Python object associated with this instance
void set_self_py(PyObject *self) noexcept { m_ref_count.set_self_py(self); }
/// Return the Python object associated with this instance (or NULL)
PyObject *self_py() const noexcept { return m_ref_count.self_py(); }
/// Virtual destructor
virtual ~intrusive_base() = default;
private:
mutable intrusive_counter m_ref_count;
};
/**
* \brief Increase the reference count of an intrusively reference-counted
* object ``o`` if ``o`` is non-NULL.
*/
inline void inc_ref(const intrusive_base *o) noexcept {
if (o)
o->inc_ref();
}
/**
* \brief Decrease the reference count and potentially delete an intrusively
* reference-counted object ``o`` if ``o`` is non-NULL.
*/
inline void dec_ref(const intrusive_base *o) noexcept {
if (o && o->dec_ref())
delete o;
}
/**
* \brief Install Python reference counting handlers
*
* The ``intrusive_counter`` class is designed so that the dependency on Python is
* *optional*: the code compiles in ordinary C++ projects, in which case the
* Python reference counting functionality will simply not be used.
*
* Python binding code must invoke ``intrusive_init`` once to supply two
* functions that increase and decrease the reference count of a Python object,
* while ensuring that the GIL is held.
*/
extern NB_INTRUSIVE_EXPORT
void intrusive_init(void (*intrusive_inc_ref_py)(PyObject *) noexcept,
void (*intrusive_dec_ref_py)(PyObject *) noexcept);
NAMESPACE_END(nanobind)
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
include/nanobind/intrusive/ref.h | C/C++ Header | /*
nanobind/intrusive/ref.h: This file defines the ``ref<T>`` RAII scoped
reference counting helper class.
When included following ``nanobind/nanobind.h``, the code below also
exposes a custom type caster to bind functions taking or returning
``ref<T>``-typed values.
Copyright (c) 2023 Wenzel Jakob
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include "counter.h"
NAMESPACE_BEGIN(nanobind)
/**
* \brief RAII scoped reference counting helper class
*
* ``ref`` is a simple RAII wrapper class that encapsulates a pointer to an
* instance with intrusive reference counting.
*
* It takes care of increasing and decreasing the reference count as needed and
* deleting the instance when the count reaches zero.
*
* For this to work, compatible functions ``inc_ref()`` and ``dec_ref()`` must
* be defined before including this file. Default implementations for
* subclasses of the type ``intrusive_base`` are already provided as part of the
* file ``counter.h``.
*/
template <typename T> class ref {
public:
/// Create a null reference
ref() = default;
/// Construct a reference from a pointer
ref(T *ptr) : m_ptr(ptr) { inc_ref((intrusive_base *) m_ptr); }
/// Copy a reference, increases the reference count
ref(const ref &r) : m_ptr(r.m_ptr) { inc_ref((intrusive_base *) m_ptr); }
/// Move a reference without changing the reference count
ref(ref &&r) noexcept : m_ptr(r.m_ptr) { r.m_ptr = nullptr; }
/// Destroy this reference
~ref() { dec_ref((intrusive_base *) m_ptr); }
/// Move-assign another reference into this one
ref &operator=(ref &&r) noexcept {
dec_ref((intrusive_base *) m_ptr);
m_ptr = r.m_ptr;
r.m_ptr = nullptr;
return *this;
}
/// Copy-assign another reference into this one
ref &operator=(const ref &r) {
inc_ref((intrusive_base *) r.m_ptr);
dec_ref((intrusive_base *) m_ptr);
m_ptr = r.m_ptr;
return *this;
}
/// Overwrite this reference with a pointer to another object
ref &operator=(T *ptr) {
inc_ref((intrusive_base *) ptr);
dec_ref((intrusive_base *) m_ptr);
m_ptr = ptr;
return *this;
}
/// Clear the currently stored reference
void reset() {
dec_ref((intrusive_base *) m_ptr);
m_ptr = nullptr;
}
/// Compare this reference with another reference
bool operator==(const ref &r) const { return m_ptr == r.m_ptr; }
/// Compare this reference with another reference
bool operator!=(const ref &r) const { return m_ptr != r.m_ptr; }
/// Compare this reference with a pointer
bool operator==(const T *ptr) const { return m_ptr == ptr; }
/// Compare this reference with a pointer
bool operator!=(const T *ptr) const { return m_ptr != ptr; }
/// Access the object referenced by this reference
T *operator->() { return m_ptr; }
/// Access the object referenced by this reference
const T *operator->() const { return m_ptr; }
/// Return a C++ reference to the referenced object
T &operator*() { return *m_ptr; }
/// Return a const C++ reference to the referenced object
const T &operator*() const { return *m_ptr; }
/// Return a pointer to the referenced object
operator T *() { return m_ptr; }
/// Return a const pointer to the referenced object
operator const T *() const { return m_ptr; }
/// Return a pointer to the referenced object
T *get() { return m_ptr; }
/// Return a const pointer to the referenced object
const T *get() const { return m_ptr; }
private:
T *m_ptr = nullptr;
};
// Register a type caster for ``ref<T>`` if nanobind was previously #included
#if defined(NB_VERSION_MAJOR)
NAMESPACE_BEGIN(detail)
template <typename T> struct type_caster<nanobind::ref<T>> {
using Caster = make_caster<T>;
static constexpr bool IsClass = true;
NB_TYPE_CASTER(ref<T>, Caster::Name)
bool from_python(handle src, uint8_t flags,
cleanup_list *cleanup) noexcept {
Caster caster;
if (!caster.from_python(src, flags, cleanup))
return false;
value = Value(caster.operator T *());
return true;
}
static handle from_cpp(const ref<T> &value, rv_policy policy,
cleanup_list *cleanup) noexcept {
if constexpr (std::is_base_of_v<intrusive_base, T>)
if (policy != rv_policy::copy && policy != rv_policy::move && value.get())
if (PyObject* obj = value->self_py())
return handle(obj).inc_ref();
return Caster::from_cpp(value.get(), policy, cleanup);
}
};
NAMESPACE_END(detail)
#endif
NAMESPACE_END(nanobind)
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
include/nanobind/make_iterator.h | C/C++ Header | /*
nanobind/make_iterator.h: nb::make_[key,value_]iterator()
This implementation is a port from pybind11 with minimal adjustments.
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include <nanobind/nanobind.h>
#include <nanobind/stl/pair.h>
NAMESPACE_BEGIN(NB_NAMESPACE)
NAMESPACE_BEGIN(detail)
/* There are a large number of apparently unused template arguments because
each combination requires a separate nb::class_ registration. */
template <typename Access, rv_policy Policy, typename Iterator,
typename Sentinel, typename ValueType, typename... Extra>
struct iterator_state {
Iterator it;
Sentinel end;
bool first_or_done;
};
template <typename T>
struct remove_rvalue_ref { using type = T; };
template <typename T>
struct remove_rvalue_ref<T&&> { using type = T; };
// Note: these helpers take the iterator by non-const reference because some
// iterators in the wild can't be dereferenced when const.
template <typename Iterator> struct iterator_access {
using result_type = decltype(*std::declval<Iterator &>());
result_type operator()(Iterator &it) const { return *it; }
};
template <typename Iterator> struct iterator_key_access {
// Note double parens in decltype((...)) to capture the value category
// as well. This will be lvalue if the iterator's operator* returned an
// lvalue reference, and xvalue if the iterator's operator* returned an
// object (or rvalue reference but that's unlikely). decltype of an xvalue
// produces T&&, but we want to return a value T from operator() in that
// case, in order to avoid creating a Python object that references a
// C++ temporary. Thus, pass the result through remove_rvalue_ref.
using result_type = typename remove_rvalue_ref<
decltype(((*std::declval<Iterator &>()).first))>::type;
result_type operator()(Iterator &it) const { return (*it).first; }
};
template <typename Iterator> struct iterator_value_access {
using result_type = typename remove_rvalue_ref<
decltype(((*std::declval<Iterator &>()).second))>::type;
result_type operator()(Iterator &it) const { return (*it).second; }
};
template <typename Access, rv_policy Policy, typename Iterator,
typename Sentinel, typename ValueType, typename... Extra>
typed<iterator, ValueType> make_iterator_impl(handle scope, const char *name,
Iterator first, Sentinel last,
Extra &&...extra) {
using State = iterator_state<Access, Policy, Iterator, Sentinel, ValueType, Extra...>;
static_assert(
!detail::is_base_caster_v<detail::make_caster<ValueType>> ||
detail::is_copy_constructible_v<ValueType> ||
(Policy != rv_policy::automatic_reference &&
Policy != rv_policy::copy),
"make_iterator_impl(): the generated __next__ would copy elements, so the "
"element type must be copy-constructible");
{
static ft_mutex mu;
ft_lock_guard lock(mu);
if (!type<State>().is_valid()) {
class_<State>(scope, name)
.def("__iter__", [](handle h) { return h; })
.def("__next__",
[](State &s) -> ValueType {
if (!s.first_or_done)
++s.it;
else
s.first_or_done = false;
if (s.it == s.end) {
s.first_or_done = true;
throw stop_iteration();
}
return Access()(s.it);
},
std::forward<Extra>(extra)...,
Policy);
}
}
return borrow<typed<iterator, ValueType>>(cast(State{
std::forward<Iterator>(first), std::forward<Sentinel>(last), true }));
}
NAMESPACE_END(detail)
/// Makes a python iterator from a first and past-the-end C++ InputIterator.
template <rv_policy Policy = rv_policy::automatic_reference,
typename Iterator,
typename Sentinel,
typename ValueType = typename detail::iterator_access<Iterator>::result_type,
typename... Extra,
typename = decltype(std::declval<Iterator>() == std::declval<Sentinel>())>
auto make_iterator(handle scope, const char *name, Iterator first, Sentinel last, Extra &&...extra) {
return detail::make_iterator_impl<detail::iterator_access<Iterator>, Policy,
Iterator, Sentinel, ValueType, Extra...>(
scope, name, std::forward<Iterator>(first),
std::forward<Sentinel>(last), std::forward<Extra>(extra)...);
}
/// Makes an iterator over the keys (`.first`) of a iterator over pairs from a
/// first and past-the-end InputIterator.
template <rv_policy Policy = rv_policy::automatic_reference, typename Iterator,
typename Sentinel,
typename KeyType =
typename detail::iterator_key_access<Iterator>::result_type,
typename... Extra>
auto make_key_iterator(handle scope, const char *name, Iterator first,
Sentinel last, Extra &&...extra) {
return detail::make_iterator_impl<detail::iterator_key_access<Iterator>,
Policy, Iterator, Sentinel, KeyType,
Extra...>(
scope, name, std::forward<Iterator>(first),
std::forward<Sentinel>(last), std::forward<Extra>(extra)...);
}
/// Makes an iterator over the values (`.second`) of a iterator over pairs from a
/// first and past-the-end InputIterator.
template <rv_policy Policy = rv_policy::automatic_reference,
typename Iterator,
typename Sentinel,
typename ValueType = typename detail::iterator_value_access<Iterator>::result_type,
typename... Extra>
auto make_value_iterator(handle scope, const char *name, Iterator first, Sentinel last, Extra &&...extra) {
return detail::make_iterator_impl<detail::iterator_value_access<Iterator>,
Policy, Iterator, Sentinel, ValueType,
Extra...>(
scope, name, std::forward<Iterator>(first),
std::forward<Sentinel>(last), std::forward<Extra>(extra)...);
}
/// Makes an iterator over values of a container supporting `std::begin()`/`std::end()`
template <rv_policy Policy = rv_policy::automatic_reference,
typename Type,
typename... Extra,
typename = decltype(std::begin(std::declval<Type&>()))>
auto make_iterator(handle scope, const char *name, Type &value, Extra &&...extra) {
return make_iterator<Policy>(scope, name, std::begin(value),
std::end(value),
std::forward<Extra>(extra)...);
}
NAMESPACE_END(NB_NAMESPACE)
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
include/nanobind/nanobind.h | C/C++ Header | /*
nanobind/nanobind.h: Main include file for core nanobind components
Copyright (c) 2022 Wenzel Jakob
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#if __cplusplus < 201703L && (!defined(_MSVC_LANG) || _MSVC_LANG < 201703L)
# error The nanobind library requires C++17!
#endif
#if defined(_MSC_VER)
# pragma warning(push)
# pragma warning(disable: 4702) // unreachable code (e.g. when binding a noreturn function)
// The next two lines disable warnings that are "just noise" according to Stephan T. Lavavej (a MSFT STL maintainer)
# pragma warning(disable: 4275) // non dll-interface class 'std::exception' used as base for dll-interface class [..]
# pragma warning(disable: 4251) // [..] needs to have a dll-interface to be used by clients of class [..]
#endif
#define NB_VERSION_MAJOR 2
#define NB_VERSION_MINOR 11
#define NB_VERSION_PATCH 1
#define NB_VERSION_DEV 1 // A value > 0 indicates a development release
// Core C++ headers that nanobind depends on
#include <cstddef>
#include <cstdint>
#include <cstdlib>
#include <exception>
#include <stdexcept>
#include <type_traits>
#include <typeinfo>
#include <utility>
#include <new>
// Implementation. The nb_*.h files should only be included through nanobind.h
// IWYU pragma: begin_exports
#include "nb_python.h"
#include "nb_defs.h"
#include "nb_enums.h"
#include "nb_traits.h"
#include "nb_tuple.h"
#include "nb_lib.h"
#include "nb_descr.h"
#include "nb_types.h"
#include "nb_accessor.h"
#include "nb_error.h"
#include "nb_attr.h"
#include "nb_cast.h"
#include "nb_misc.h"
#include "nb_call.h"
#include "nb_func.h"
#include "nb_class.h"
// IWYU pragma: end_exports
#if defined(_MSC_VER)
# pragma warning(pop)
#endif
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
include/nanobind/nb_accessor.h | C/C++ Header | /*
nanobind/nb_accessor.h: Accessor helper class for .attr(), operator[]
Copyright (c) 2022 Wenzel Jakob
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
NAMESPACE_BEGIN(NB_NAMESPACE)
NAMESPACE_BEGIN(detail)
#define NB_DECL_ACCESSOR_OP_I(name) \
template <typename T> accessor& name(const api<T> &o);
#define NB_IMPL_ACCESSOR_OP_I(name, op) \
template <typename Impl> template <typename T> \
accessor<Impl>& accessor<Impl>::name(const api<T> &o) { \
PyObject *res = obj_op_2(ptr(), o.derived().ptr(), op); \
Impl::set(m_base, m_key, res); \
return *this; \
}
template <typename Impl> class accessor : public api<accessor<Impl>> {
template <typename T> friend void nanobind::del(accessor<T> &);
template <typename T> friend void nanobind::del(accessor<T> &&);
public:
static constexpr auto Name = const_name("object");
template <typename Key>
accessor(handle obj, Key &&key)
: m_base(obj.ptr()), m_key(std::move(key)) { }
accessor(const accessor &) = delete;
accessor(accessor &&) = delete;
~accessor() {
if constexpr (Impl::cache_dec_ref)
Py_XDECREF(m_cache);
}
template <typename T> accessor& operator=(T &&value);
template <typename T, enable_if_t<std::is_base_of_v<object, T>> = 0>
operator T() const { return borrow<T>(ptr()); }
NB_INLINE PyObject *ptr() const {
Impl::get(m_base, m_key, &m_cache);
return m_cache;
}
NB_INLINE handle base() const { return m_base; }
NB_INLINE object key() const { return steal(Impl::key(m_key)); }
NB_DECL_ACCESSOR_OP_I(operator+=)
NB_DECL_ACCESSOR_OP_I(operator-=)
NB_DECL_ACCESSOR_OP_I(operator*=)
NB_DECL_ACCESSOR_OP_I(operator/=)
NB_DECL_ACCESSOR_OP_I(operator%=)
NB_DECL_ACCESSOR_OP_I(operator|=)
NB_DECL_ACCESSOR_OP_I(operator&=)
NB_DECL_ACCESSOR_OP_I(operator^=)
NB_DECL_ACCESSOR_OP_I(operator<<=)
NB_DECL_ACCESSOR_OP_I(operator>>=)
private:
NB_INLINE void del () { Impl::del(m_base, m_key); }
private:
PyObject *m_base;
mutable PyObject *m_cache{nullptr};
typename Impl::key_type m_key;
};
struct str_attr {
static constexpr bool cache_dec_ref = true;
using key_type = const char *;
NB_INLINE static void get(PyObject *obj, const char *key, PyObject **cache) {
detail::getattr_or_raise(obj, key, cache);
}
NB_INLINE static void set(PyObject *obj, const char *key, PyObject *v) {
setattr(obj, key, v);
}
NB_INLINE static PyObject *key(const char *key) {
return PyUnicode_InternFromString(key);
}
};
struct obj_attr {
static constexpr bool cache_dec_ref = true;
using key_type = handle;
NB_INLINE static void get(PyObject *obj, handle key, PyObject **cache) {
detail::getattr_or_raise(obj, key.ptr(), cache);
}
NB_INLINE static void set(PyObject *obj, handle key, PyObject *v) {
setattr(obj, key.ptr(), v);
}
NB_INLINE static PyObject *key(handle key) {
Py_INCREF(key.ptr());
return key.ptr();
}
};
struct str_item {
static constexpr bool cache_dec_ref = true;
using key_type = const char *;
NB_INLINE static void get(PyObject *obj, const char *key, PyObject **cache) {
detail::getitem_or_raise(obj, key, cache);
}
NB_INLINE static void set(PyObject *obj, const char *key, PyObject *v) {
setitem(obj, key, v);
}
NB_INLINE static void del(PyObject *obj, const char *key) {
delitem(obj, key);
}
};
struct obj_item {
static constexpr bool cache_dec_ref = true;
using key_type = handle;
NB_INLINE static void get(PyObject *obj, handle key, PyObject **cache) {
detail::getitem_or_raise(obj, key.ptr(), cache);
}
NB_INLINE static void set(PyObject *obj, handle key, PyObject *v) {
setitem(obj, key.ptr(), v);
}
NB_INLINE static void del(PyObject *obj, handle key) {
delitem(obj, key.ptr());
}
};
struct num_item {
static constexpr bool cache_dec_ref = true;
using key_type = Py_ssize_t;
NB_INLINE static void get(PyObject *obj, Py_ssize_t index, PyObject **cache) {
detail::getitem_or_raise(obj, index, cache);
}
NB_INLINE static void set(PyObject *obj, Py_ssize_t index, PyObject *v) {
setitem(obj, index, v);
}
NB_INLINE static void del(PyObject *obj, Py_ssize_t index) {
delitem(obj, index);
}
};
struct num_item_list {
#if defined(Py_GIL_DISABLED)
static constexpr bool cache_dec_ref = true;
#else
static constexpr bool cache_dec_ref = false;
#endif
using key_type = Py_ssize_t;
NB_INLINE static void get(PyObject *obj, Py_ssize_t index, PyObject **cache) {
#if defined(Py_GIL_DISABLED)
*cache = PyList_GetItemRef(obj, index);
#else
*cache = NB_LIST_GET_ITEM(obj, index);
#endif
}
NB_INLINE static void set(PyObject *obj, Py_ssize_t index, PyObject *v) {
#if defined(Py_LIMITED_API) || defined(NB_FREE_THREADED)
Py_INCREF(v);
PyList_SetItem(obj, index, v);
#else
PyObject *old = NB_LIST_GET_ITEM(obj, index);
Py_INCREF(v);
NB_LIST_SET_ITEM(obj, index, v);
Py_DECREF(old);
#endif
}
NB_INLINE static void del(PyObject *obj, Py_ssize_t index) {
delitem(obj, index);
}
};
struct num_item_tuple {
static constexpr bool cache_dec_ref = false;
using key_type = Py_ssize_t;
NB_INLINE static void get(PyObject *obj, Py_ssize_t index, PyObject **cache) {
*cache = NB_TUPLE_GET_ITEM(obj, index);
}
template <typename...Ts> static void set(Ts...) {
static_assert(false_v<Ts...>, "tuples are immutable!");
}
};
template <typename D> accessor<obj_attr> api<D>::attr(handle key) const {
return { derived(), borrow(key) };
}
template <typename D> accessor<str_attr> api<D>::attr(const char *key) const {
return { derived(), key };
}
template <typename D> accessor<str_attr> api<D>::doc() const {
return { derived(), "__doc__" };
}
template <typename D> accessor<obj_item> api<D>::operator[](handle key) const {
return { derived(), borrow(key) };
}
template <typename D> accessor<str_item> api<D>::operator[](const char *key) const {
return { derived(), key };
}
template <typename D>
template <typename T, enable_if_t<std::is_arithmetic_v<T>>>
accessor<num_item> api<D>::operator[](T index) const {
return { derived(), (Py_ssize_t) index };
}
NB_IMPL_ACCESSOR_OP_I(operator+=, PyNumber_InPlaceAdd)
NB_IMPL_ACCESSOR_OP_I(operator%=, PyNumber_InPlaceRemainder)
NB_IMPL_ACCESSOR_OP_I(operator-=, PyNumber_InPlaceSubtract)
NB_IMPL_ACCESSOR_OP_I(operator*=, PyNumber_InPlaceMultiply)
NB_IMPL_ACCESSOR_OP_I(operator/=, PyNumber_InPlaceTrueDivide)
NB_IMPL_ACCESSOR_OP_I(operator|=, PyNumber_InPlaceOr)
NB_IMPL_ACCESSOR_OP_I(operator&=, PyNumber_InPlaceAnd)
NB_IMPL_ACCESSOR_OP_I(operator^=, PyNumber_InPlaceXor)
NB_IMPL_ACCESSOR_OP_I(operator<<=,PyNumber_InPlaceLshift)
NB_IMPL_ACCESSOR_OP_I(operator>>=,PyNumber_InPlaceRshift)
NAMESPACE_END(detail)
template <typename T, detail::enable_if_t<std::is_arithmetic_v<T>>>
detail::accessor<detail::num_item_list> list::operator[](T index) const {
return { derived(), (Py_ssize_t) index };
}
template <typename T, detail::enable_if_t<std::is_arithmetic_v<T>>>
detail::accessor<detail::num_item_tuple> tuple::operator[](T index) const {
return { derived(), (Py_ssize_t) index };
}
template <typename... Args> str str::format(Args&&... args) const {
return steal<str>(
derived().attr("format")((detail::forward_t<Args>) args...).release());
}
NAMESPACE_END(NB_NAMESPACE)
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
include/nanobind/nb_attr.h | C/C++ Header | /*
nanobind/nb_attr.h: Annotations for function and class declarations
Copyright (c) 2022 Wenzel Jakob
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
NAMESPACE_BEGIN(NB_NAMESPACE)
struct scope {
PyObject *value;
NB_INLINE scope(handle value) : value(value.ptr()) {}
};
struct name {
const char *value;
NB_INLINE name(const char *value) : value(value) {}
};
struct arg_v;
struct arg_locked;
struct arg_locked_v;
// Basic function argument descriptor (no default value, not locked)
struct arg {
NB_INLINE constexpr explicit arg(const char *name = nullptr) : name_(name), signature_(nullptr) { }
// operator= can be used to provide a default value
template <typename T> NB_INLINE arg_v operator=(T &&value) const;
// Mutators that don't change default value or locked state
NB_INLINE arg &noconvert(bool value = true) {
convert_ = !value;
return *this;
}
NB_INLINE arg &none(bool value = true) {
none_ = value;
return *this;
}
NB_INLINE arg &sig(const char *value) {
signature_ = value;
return *this;
}
// After lock(), this argument is locked
NB_INLINE arg_locked lock();
const char *name_, *signature_;
uint8_t convert_{ true };
bool none_{ false };
};
// Function argument descriptor with default value (not locked)
struct arg_v : arg {
object value;
NB_INLINE arg_v(const arg &base, object &&value)
: arg(base), value(std::move(value)) {}
private:
// Inherited mutators would slice off the default, and are not generally needed
using arg::noconvert;
using arg::none;
using arg::sig;
using arg::lock;
};
// Function argument descriptor that is locked (no default value)
struct arg_locked : arg {
NB_INLINE constexpr explicit arg_locked(const char *name = nullptr) : arg(name) { }
NB_INLINE constexpr explicit arg_locked(const arg &base) : arg(base) { }
// operator= can be used to provide a default value
template <typename T> NB_INLINE arg_locked_v operator=(T &&value) const;
// Mutators must be respecified in order to not slice off the locked status
NB_INLINE arg_locked &noconvert(bool value = true) {
convert_ = !value;
return *this;
}
NB_INLINE arg_locked &none(bool value = true) {
none_ = value;
return *this;
}
NB_INLINE arg_locked &sig(const char *value) {
signature_ = value;
return *this;
}
// Redundant extra lock() is allowed
NB_INLINE arg_locked &lock() { return *this; }
};
// Function argument descriptor that is potentially locked and has a default value
struct arg_locked_v : arg_locked {
object value;
NB_INLINE arg_locked_v(const arg_locked &base, object &&value)
: arg_locked(base), value(std::move(value)) {}
private:
// Inherited mutators would slice off the default, and are not generally needed
using arg_locked::noconvert;
using arg_locked::none;
using arg_locked::sig;
using arg_locked::lock;
};
NB_INLINE arg_locked arg::lock() { return arg_locked{*this}; }
template <typename... Ts> struct call_guard {
using type = detail::tuple<Ts...>;
};
struct dynamic_attr {};
struct is_weak_referenceable {};
struct is_method {};
struct is_implicit {};
struct is_operator {};
struct is_arithmetic {};
struct is_flag {};
struct is_final {};
struct is_generic {};
struct kw_only {};
struct lock_self {};
struct never_destruct {};
template <size_t /* Nurse */, size_t /* Patient */> struct keep_alive {};
template <typename T> struct supplement {};
template <typename T> struct intrusive_ptr {
intrusive_ptr(void (*set_self_py)(T *, PyObject *) noexcept)
: set_self_py(set_self_py) { }
void (*set_self_py)(T *, PyObject *) noexcept;
};
struct type_slots {
type_slots (const PyType_Slot *value) : value(value) { }
const PyType_Slot *value;
};
struct type_slots_callback {
using cb_t = void (*)(const detail::type_init_data *t,
PyType_Slot *&slots, size_t max_slots) noexcept;
type_slots_callback(cb_t callback) : callback(callback) { }
cb_t callback;
};
struct sig {
const char *value;
sig(const char *value) : value(value) { }
};
struct is_getter { };
template <typename Policy> struct call_policy final {};
NAMESPACE_BEGIN(literals)
constexpr arg operator""_a(const char *name, size_t) { return arg(name); }
NAMESPACE_END(literals)
NAMESPACE_BEGIN(detail)
enum class func_flags : uint32_t {
/* Low 3 bits reserved for return value policy */
/// Did the user specify a name for this function, or is it anonymous?
has_name = (1 << 4),
/// Did the user specify a scope in which this function should be installed?
has_scope = (1 << 5),
/// Did the user specify a docstring?
has_doc = (1 << 6),
/// Did the user specify nb::arg/arg_v annotations for all arguments?
has_args = (1 << 7),
/// Does the function signature contain an *args-style argument?
has_var_args = (1 << 8),
/// Does the function signature contain an *kwargs-style argument?
has_var_kwargs = (1 << 9),
/// Is this function a method of a class?
is_method = (1 << 10),
/// Is this function a method called __init__? (automatically generated)
is_constructor = (1 << 11),
/// Can this constructor be used to perform an implicit conversion?
is_implicit = (1 << 12),
/// Is this function an arithmetic operator?
is_operator = (1 << 13),
/// When the function is GCed, do we need to call func_data_prelim::free_capture?
has_free = (1 << 14),
/// Should the func_new() call return a new reference?
return_ref = (1 << 15),
/// Does this overload specify a custom function signature (for docstrings, typing)
has_signature = (1 << 16),
/// Does this function potentially modify the elements of the PyObject*[] array
/// representing its arguments? (nb::keep_alive() or call_policy annotations)
can_mutate_args = (1 << 17)
};
enum cast_flags : uint8_t {
// Enable implicit conversions (code assumes this has value 1, don't reorder..)
convert = (1 << 0),
// Passed to the 'self' argument in a constructor call (__init__)
construct = (1 << 1),
// Indicates that the function dispatcher should accept 'None' arguments
accepts_none = (1 << 2),
// Indicates that this cast is performed by nb::cast or nb::try_cast.
// This implies that objects added to the cleanup list may be
// released immediately after the caster's final output value is
// obtained, i.e., before it is used.
manual = (1 << 3)
};
struct arg_data {
const char *name;
const char *signature;
PyObject *name_py;
PyObject *value;
uint8_t flag;
};
struct func_data_prelim_base {
// A small amount of space to capture data used by the function/closure
void *capture[3];
// Callback to clean up the 'capture' field
void (*free_capture)(void *);
/// Implementation of the function call
PyObject *(*impl)(void *, PyObject **, uint8_t *, rv_policy,
cleanup_list *);
/// Function signature description
const char *descr;
/// C++ types referenced by 'descr'
const std::type_info **descr_types;
/// Supplementary flags
uint32_t flags;
/// Total number of parameters accepted by the C++ function; nb::args
/// and nb::kwargs parameters are counted as one each. If the
/// 'has_args' flag is set, then there is one arg_data structure
/// for each of these.
uint16_t nargs;
/// Number of parameters to the C++ function that may be filled from
/// Python positional arguments without additional ceremony.
/// nb::args and nb::kwargs parameters are not counted in this total, nor
/// are any parameters after nb::args or after a nb::kw_only annotation.
/// The parameters counted here may be either named (nb::arg("name")) or
/// unnamed (nb::arg()). If unnamed, they are effectively positional-only.
/// nargs_pos is always <= nargs.
uint16_t nargs_pos;
// ------- Extra fields -------
const char *name;
const char *doc;
PyObject *scope;
};
template<size_t Size> struct func_data_prelim : func_data_prelim_base {
arg_data args[Size];
};
template<> struct func_data_prelim<0> : func_data_prelim_base {};
template <typename F>
NB_INLINE void func_extra_apply(F &f, const name &name, size_t &) {
f.name = name.value;
f.flags |= (uint32_t) func_flags::has_name;
}
template <typename F>
NB_INLINE void func_extra_apply(F &f, const scope &scope, size_t &) {
f.scope = scope.value;
f.flags |= (uint32_t) func_flags::has_scope;
}
template <typename F>
NB_INLINE void func_extra_apply(F &f, const sig &s, size_t &) {
f.flags |= (uint32_t) func_flags::has_signature;
f.name = s.value;
}
template <typename F>
NB_INLINE void func_extra_apply(F &f, const char *doc, size_t &) {
f.doc = doc;
f.flags |= (uint32_t) func_flags::has_doc;
}
template <typename F>
NB_INLINE void func_extra_apply(F &f, is_method, size_t &) {
f.flags |= (uint32_t) func_flags::is_method;
}
template <typename F>
NB_INLINE void func_extra_apply(F &, is_getter, size_t &) { }
template <typename F>
NB_INLINE void func_extra_apply(F &f, is_implicit, size_t &) {
f.flags |= (uint32_t) func_flags::is_implicit;
}
template <typename F>
NB_INLINE void func_extra_apply(F &f, is_operator, size_t &) {
f.flags |= (uint32_t) func_flags::is_operator;
}
template <typename F>
NB_INLINE void func_extra_apply(F &f, rv_policy pol, size_t &) {
f.flags = (f.flags & ~0b111) | (uint16_t) pol;
}
template <typename F>
NB_INLINE void func_extra_apply(F &, std::nullptr_t, size_t &) { }
template <typename F>
NB_INLINE void func_extra_apply(F &f, const arg &a, size_t &index) {
uint8_t flag = 0;
if (a.none_)
flag |= (uint8_t) cast_flags::accepts_none;
if (a.convert_)
flag |= (uint8_t) cast_flags::convert;
arg_data &arg = f.args[index];
arg.flag |= flag;
arg.name = a.name_;
arg.signature = a.signature_;
arg.value = nullptr;
index++;
}
// arg_locked will select the arg overload; the locking is added statically
// in nb_func.h
template <typename F>
NB_INLINE void func_extra_apply(F &f, const arg_v &a, size_t &index) {
arg_data &ad = f.args[index];
func_extra_apply(f, (const arg &) a, index);
ad.value = a.value.ptr();
}
template <typename F>
NB_INLINE void func_extra_apply(F &f, const arg_locked_v &a, size_t &index) {
arg_data &ad = f.args[index];
func_extra_apply(f, (const arg_locked &) a, index);
ad.value = a.value.ptr();
}
template <typename F>
NB_INLINE void func_extra_apply(F &, kw_only, size_t &) {}
template <typename F>
NB_INLINE void func_extra_apply(F &, lock_self, size_t &) {}
template <typename F, typename... Ts>
NB_INLINE void func_extra_apply(F &, call_guard<Ts...>, size_t &) {}
template <typename F, size_t Nurse, size_t Patient>
NB_INLINE void func_extra_apply(F &f, nanobind::keep_alive<Nurse, Patient>, size_t &) {
f.flags |= (uint32_t) func_flags::can_mutate_args;
}
template <typename F, typename Policy>
NB_INLINE void func_extra_apply(F &f, call_policy<Policy>, size_t &) {
f.flags |= (uint32_t) func_flags::can_mutate_args;
}
template <typename... Ts> struct func_extra_info {
using call_guard = void;
static constexpr bool pre_post_hooks = false;
static constexpr size_t nargs_locked = 0;
};
template <typename T, typename... Ts> struct func_extra_info<T, Ts...>
: func_extra_info<Ts...> { };
template <typename... Cs, typename... Ts>
struct func_extra_info<call_guard<Cs...>, Ts...> : func_extra_info<Ts...> {
static_assert(std::is_same_v<typename func_extra_info<Ts...>::call_guard, void>,
"call_guard<> can only be specified once!");
using call_guard = nanobind::call_guard<Cs...>;
};
template <size_t Nurse, size_t Patient, typename... Ts>
struct func_extra_info<nanobind::keep_alive<Nurse, Patient>, Ts...> : func_extra_info<Ts...> {
static constexpr bool pre_post_hooks = true;
};
template <typename Policy, typename... Ts>
struct func_extra_info<call_policy<Policy>, Ts...> : func_extra_info<Ts...> {
static constexpr bool pre_post_hooks = true;
};
template <typename... Ts>
struct func_extra_info<arg_locked, Ts...> : func_extra_info<Ts...> {
static constexpr size_t nargs_locked = 1 + func_extra_info<Ts...>::nargs_locked;
};
template <typename... Ts>
struct func_extra_info<lock_self, Ts...> : func_extra_info<Ts...> {
static constexpr size_t nargs_locked = 1 + func_extra_info<Ts...>::nargs_locked;
};
NB_INLINE void process_precall(PyObject **, size_t, detail::cleanup_list *, void *) { }
template <size_t NArgs, typename Policy>
NB_INLINE void
process_precall(PyObject **args, std::integral_constant<size_t, NArgs> nargs,
detail::cleanup_list *cleanup, call_policy<Policy> *) {
Policy::precall(args, nargs, cleanup);
}
NB_INLINE void process_postcall(PyObject **, size_t, PyObject *, void *) { }
template <size_t NArgs, size_t Nurse, size_t Patient>
NB_INLINE void
process_postcall(PyObject **args, std::integral_constant<size_t, NArgs>,
PyObject *result, nanobind::keep_alive<Nurse, Patient> *) {
static_assert(Nurse != Patient,
"keep_alive with the same argument as both nurse and patient "
"doesn't make sense");
static_assert(Nurse <= NArgs && Patient <= NArgs,
"keep_alive template parameters must be in the range "
"[0, number of C++ function arguments]");
keep_alive(Nurse == 0 ? result : args[Nurse - 1],
Patient == 0 ? result : args[Patient - 1]);
}
template <size_t NArgs, typename Policy>
NB_INLINE void
process_postcall(PyObject **args, std::integral_constant<size_t, NArgs> nargs,
PyObject *&result, call_policy<Policy> *) {
// result_guard avoids leaking a reference to the return object
// if postcall throws an exception
object result_guard = steal(result);
Policy::postcall(args, nargs, result);
result_guard.release();
}
NAMESPACE_END(detail)
NAMESPACE_END(NB_NAMESPACE)
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
include/nanobind/nb_call.h | C/C++ Header | /*
nanobind/nb_call.h: Functionality for calling Python functions from C++
Copyright (c) 2022 Wenzel Jakob
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
NAMESPACE_BEGIN(NB_NAMESPACE)
NAMESPACE_BEGIN(detail)
#if defined(_MSC_VER)
# pragma warning(push)
# pragma warning(disable: 6255) // _alloca indicates failure by raising a stack overflow exception
#endif
class kwargs_proxy : public handle {
public:
explicit kwargs_proxy(handle h) : handle(h) { }
};
class args_proxy : public handle {
public:
explicit args_proxy(handle h) : handle(h) { }
kwargs_proxy operator*() const { return kwargs_proxy(*this); }
};
template <typename Derived>
args_proxy api<Derived>::operator*() const {
return args_proxy(derived().ptr());
}
/// Implementation detail of api<T>::operator() (call operator)
template <typename T>
NB_INLINE void call_analyze(size_t &nargs, size_t &nkwargs, const T &value) {
using D = std::decay_t<T>;
static_assert(!std::is_base_of_v<arg_locked, D>,
"nb::arg().lock() may be used only when defining functions, "
"not when calling them");
if constexpr (std::is_same_v<D, arg_v>)
nkwargs++;
else if constexpr (std::is_same_v<D, args_proxy>)
nargs += len(value);
else if constexpr (std::is_same_v<D, kwargs_proxy>)
nkwargs += len(value);
else
nargs += 1;
(void) nargs; (void) nkwargs; (void) value;
}
/// Implementation detail of api<T>::operator() (call operator)
template <rv_policy policy, typename T>
NB_INLINE void call_init(PyObject **args, PyObject *kwnames, size_t &nargs,
size_t &nkwargs, const size_t kwargs_offset,
T &&value) {
using D = std::decay_t<T>;
if constexpr (std::is_same_v<D, arg_v>) {
args[kwargs_offset + nkwargs] = value.value.release().ptr();
NB_TUPLE_SET_ITEM(kwnames, nkwargs++,
PyUnicode_InternFromString(value.name_));
} else if constexpr (std::is_same_v<D, args_proxy>) {
for (size_t i = 0, l = len(value); i < l; ++i)
args[nargs++] = borrow(value[i]).release().ptr();
} else if constexpr (std::is_same_v<D, kwargs_proxy>) {
PyObject *key, *entry;
Py_ssize_t pos = 0;
#if defined(NB_FREE_THREADED)
ft_object_guard guard(value);
#endif
while (PyDict_Next(value.ptr(), &pos, &key, &entry)) {
Py_INCREF(key); Py_INCREF(entry);
args[kwargs_offset + nkwargs] = entry;
NB_TUPLE_SET_ITEM(kwnames, nkwargs++, key);
}
} else {
args[nargs++] =
make_caster<T>::from_cpp((forward_t<T>) value, policy, nullptr).ptr();
}
(void) args; (void) kwnames; (void) nargs;
(void) nkwargs; (void) kwargs_offset;
}
#define NB_DO_VECTORCALL() \
PyObject *base, **args_p; \
if constexpr (method_call) { \
base = derived().key().release().ptr(); \
args[0] = derived().base().inc_ref().ptr(); \
args_p = args; \
nargs++; \
} else { \
base = derived().inc_ref().ptr(); \
args[0] = nullptr; \
args_p = args + 1; \
} \
nargs |= PY_VECTORCALL_ARGUMENTS_OFFSET; \
return steal(obj_vectorcall(base, args_p, nargs, kwnames, method_call))
template <typename Derived>
template <rv_policy policy, typename... Args>
object api<Derived>::operator()(Args &&...args_) const {
static constexpr bool method_call =
std::is_same_v<Derived, accessor<obj_attr>> ||
std::is_same_v<Derived, accessor<str_attr>>;
if constexpr (((std::is_same_v<Args, arg_v> ||
std::is_same_v<Args, args_proxy> ||
std::is_same_v<Args, kwargs_proxy>) || ...)) {
// Complex call with keyword arguments, *args/**kwargs expansion, etc.
size_t nargs = 0, nkwargs = 0, nargs2 = 0, nkwargs2 = 0;
// Determine storage requirements for positional and keyword args
(call_analyze(nargs, nkwargs, (const Args &) args_), ...);
// Allocate memory on the stack
PyObject **args =
(PyObject **) alloca((nargs + nkwargs + 1) * sizeof(PyObject *));
PyObject *kwnames =
nkwargs ? PyTuple_New((Py_ssize_t) nkwargs) : nullptr;
// Fill 'args' and 'kwnames' variables
(call_init<policy>(args + 1, kwnames, nargs2, nkwargs2, nargs,
(forward_t<Args>) args_), ...);
NB_DO_VECTORCALL();
} else {
// Simple version with only positional arguments
PyObject *args[sizeof...(Args) + 1], *kwnames = nullptr;
size_t nargs = 0;
((args[1 + nargs++] =
detail::make_caster<Args>::from_cpp(
(detail::forward_t<Args>) args_, policy, nullptr)
.ptr()),
...);
NB_DO_VECTORCALL();
}
}
#undef NB_DO_VECTORCALL
#if defined(_MSC_VER)
# pragma warning(pop)
#endif
NAMESPACE_END(detail)
NAMESPACE_END(NB_NAMESPACE)
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
include/nanobind/nb_cast.h | C/C++ Header | /*
nanobind/nb_cast.h: Type caster interface and essential type casters
Copyright (c) 2022 Wenzel Jakob
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#define NB_TYPE_CASTER(Value_, descr) \
using Value = Value_; \
static constexpr auto Name = descr; \
template <typename T_> using Cast = movable_cast_t<T_>; \
template <typename T_> static constexpr bool can_cast() { return true; } \
template <typename T_, \
enable_if_t<std::is_same_v<std::remove_cv_t<T_>, Value>> = 0> \
static handle from_cpp(T_ *p, rv_policy policy, cleanup_list *list) { \
if (!p) \
return none().release(); \
return from_cpp(*p, policy, list); \
} \
explicit operator Value*() { return &value; } \
explicit operator Value&() { return (Value &) value; } \
explicit operator Value&&() { return (Value &&) value; } \
Value value;
#define NB_MAKE_OPAQUE(...) \
namespace nanobind::detail { \
template <> class type_caster<__VA_ARGS__> \
: public type_caster_base<__VA_ARGS__> { }; }
NAMESPACE_BEGIN(NB_NAMESPACE)
NAMESPACE_BEGIN(detail)
/**
* Type casters expose a member 'Cast<T>' which users of a type caster must
* query to determine what the caster actually can (and prefers) to produce.
* The convenience alias ``cast_t<T>`` defined below performs this query for a
* given type ``T``.
*
* Often ``cast_t<T>`` is simply equal to ``T`` or ``T&``. More significant
* deviations are also possible, which could be due to one of the following
* two reasons:
*
* 1. Efficiency: most STL type casters create a local copy (``value`` member)
* of the value being cast. The caller should move this value to its
* intended destination instead of making further copies along the way.
* Consequently, ``cast_t<std::vector<T>>`` yields ``cast_t<std::vector<T>>
* &&`` to enable such behavior.
*
* 2. STL pairs may contain references, and such pairs aren't
* default-constructible. The STL pair caster therefore cannot create a local
* copy and must construct the pair on the fly, which in turns means that it
* cannot return references. Therefore, ``cast_t<const std::pair<T1, T2>&>``
* yields ``std::pair<T1, T2>``.
*/
/// Ask a type caster what flavors of a type it can actually produce -- may be different from 'T'
template <typename T> using cast_t = typename make_caster<T>::template Cast<T>;
/// This is a default choice for the 'Cast' type alias described above. It
/// prefers to return rvalue references to allow the caller to move the object.
template <typename T>
using movable_cast_t =
std::conditional_t<is_pointer_v<T>, intrinsic_t<T> *,
std::conditional_t<std::is_lvalue_reference_v<T>,
intrinsic_t<T> &, intrinsic_t<T> &&>>;
/// This version is more careful about what the caller actually requested and
/// only moves when this was explicitly requested. It is the default for the
/// base type caster (i.e., types bound via ``nanobind::class_<..>``)
template <typename T>
using precise_cast_t =
std::conditional_t<is_pointer_v<T>, intrinsic_t<T> *,
std::conditional_t<std::is_rvalue_reference_v<T>,
intrinsic_t<T> &&, intrinsic_t<T> &>>;
/// Many type casters delegate to another caster using the pattern:
/// ~~~ .cc
/// bool from_python(handle src, uint8_t flags, cleanup_list *cl) noexcept {
/// SomeCaster c;
/// if (!c.from_python(src, flags, cl)) return false;
/// /* do something with */ c.operator T();
/// return true;
/// }
/// ~~~
/// This function adjusts the flags to avoid issues where the resulting T object
/// refers into storage that will dangle after SomeCaster is destroyed, and
/// causes a static assertion failure if that's not sufficient. Use it like:
/// ~~~ .cc
/// if (!c.from_python(src, flags_for_local_caster<T>(flags), cl))
/// return false;
/// ~~~
/// where the template argument T is the type you plan to extract.
template <typename T>
NB_INLINE uint8_t flags_for_local_caster(uint8_t flags) noexcept {
using Caster = make_caster<T>;
constexpr bool is_ref = std::is_pointer_v<T> || std::is_reference_v<T>;
if constexpr (is_base_caster_v<Caster>) {
if constexpr (is_ref) {
/* References/pointers to a type produced by implicit conversions
refer to storage owned by the cleanup_list. In a nb::cast() call,
that storage will be released before the reference can be used;
to prevent dangling, don't allow implicit conversions there. */
if (flags & ((uint8_t) cast_flags::manual))
flags &= ~((uint8_t) cast_flags::convert);
}
} else {
/* Any pointer produced by a non-base caster will generally point
into storage owned by the caster, which won't live long enough.
Exception: the 'char' caster produces a result that points to
storage owned by the incoming Python 'str' object, so it's OK. */
static_assert(!is_ref || std::is_same_v<T, const char*> ||
(std::is_pointer_v<T> && std::is_constructible_v<T*, Caster>),
"nanobind generally cannot produce objects that "
"contain interior pointers T* (or references T&) if "
"the pointee T is not handled by nanobind's regular "
"class binding mechanism. For example, you can write "
"a function that accepts int*, or std::vector<int>, "
"but not std::vector<int*>.");
}
return flags;
}
template <typename T>
struct type_caster<T, enable_if_t<std::is_arithmetic_v<T> && !is_std_char_v<T>>> {
NB_INLINE bool from_python(handle src, uint8_t flags, cleanup_list *) noexcept {
if constexpr (std::is_floating_point_v<T>) {
if constexpr (std::is_same_v<T, double>) {
return detail::load_f64(src.ptr(), flags, &value);
} else if constexpr (std::is_same_v<T, float>) {
return detail::load_f32(src.ptr(), flags, &value);
} else {
double d;
if (!detail::load_f64(src.ptr(), flags, &d))
return false;
T result = (T) d;
if ((flags & (uint8_t) cast_flags::convert)
|| (double) result == d
|| (result != result && d != d)) {
value = result;
return true;
}
return false;
}
} else {
if constexpr (std::is_signed_v<T>) {
if constexpr (sizeof(T) == 8)
return detail::load_i64(src.ptr(), flags, (int64_t *) &value);
else if constexpr (sizeof(T) == 4)
return detail::load_i32(src.ptr(), flags, (int32_t *) &value);
else if constexpr (sizeof(T) == 2)
return detail::load_i16(src.ptr(), flags, (int16_t *) &value);
else
return detail::load_i8(src.ptr(), flags, (int8_t *) &value);
} else {
if constexpr (sizeof(T) == 8)
return detail::load_u64(src.ptr(), flags, (uint64_t *) &value);
else if constexpr (sizeof(T) == 4)
return detail::load_u32(src.ptr(), flags, (uint32_t *) &value);
else if constexpr (sizeof(T) == 2)
return detail::load_u16(src.ptr(), flags, (uint16_t *) &value);
else
return detail::load_u8(src.ptr(), flags, (uint8_t *) &value);
}
}
}
NB_INLINE static handle from_cpp(T src, rv_policy, cleanup_list *) noexcept {
if constexpr (std::is_floating_point_v<T>) {
return PyFloat_FromDouble((double) src);
} else {
if constexpr (std::is_signed_v<T>) {
if constexpr (sizeof(T) <= sizeof(long))
return PyLong_FromLong((long) src);
else
return PyLong_FromLongLong((long long) src);
} else {
if constexpr (sizeof(T) <= sizeof(unsigned long))
return PyLong_FromUnsignedLong((unsigned long) src);
else
return PyLong_FromUnsignedLongLong((unsigned long long) src);
}
}
}
NB_TYPE_CASTER(T, const_name<std::is_integral_v<T>>("int", "float"))
};
template <typename T>
struct type_caster<T, enable_if_t<std::is_enum_v<T>>> {
NB_INLINE bool from_python(handle src, uint8_t flags, cleanup_list *) noexcept {
int64_t result;
bool rv = enum_from_python(&typeid(T), src.ptr(), &result, flags);
value = (T) result;
return rv;
}
NB_INLINE static handle from_cpp(T src, rv_policy, cleanup_list *) noexcept {
return enum_from_cpp(&typeid(T), (int64_t) src);
}
NB_TYPE_CASTER(T, const_name<T>())
};
template <> struct type_caster<void_type> {
static constexpr auto Name = const_name("None");
};
template <> struct type_caster<void> {
template <typename T_> using Cast = void *;
template <typename T_> static constexpr bool can_cast() { return true; }
using Value = void*;
static constexpr auto Name = const_name(NB_TYPING_CAPSULE);
explicit operator void *() { return value; }
Value value;
bool from_python(handle src, uint8_t, cleanup_list *) noexcept {
if (src.is_none()) {
value = nullptr;
return true;
} else {
value = PyCapsule_GetPointer(src.ptr(), "nb_handle");
if (!value) {
PyErr_Clear();
return false;
}
return true;
}
}
static handle from_cpp(void *ptr, rv_policy, cleanup_list *) noexcept {
if (ptr)
return PyCapsule_New(ptr, "nb_handle", nullptr);
else
return none().release();
}
};
template <typename T> struct none_caster {
bool from_python(handle src, uint8_t, cleanup_list *) noexcept {
if (src.is_none())
return true;
return false;
}
static handle from_cpp(T, rv_policy, cleanup_list *) noexcept {
return none().release();
}
NB_TYPE_CASTER(T, const_name("None"))
};
template <> struct type_caster<std::nullptr_t> : none_caster<std::nullptr_t> { };
template <> struct type_caster<bool> {
bool from_python(handle src, uint8_t, cleanup_list *) noexcept {
if (src.ptr() == Py_True) {
value = true;
return true;
} else if (src.ptr() == Py_False) {
value = false;
return true;
} else {
return false;
}
}
static handle from_cpp(bool src, rv_policy, cleanup_list *) noexcept {
return handle(src ? Py_True : Py_False).inc_ref();
}
NB_TYPE_CASTER(bool, const_name("bool"))
};
template <> struct type_caster<char> {
using Value = const char *;
Value value;
Py_ssize_t size;
static constexpr auto Name = const_name("str");
template <typename T_>
using Cast = std::conditional_t<is_pointer_v<T_>, const char *, char>;
bool from_python(handle src, uint8_t, cleanup_list *) noexcept {
value = PyUnicode_AsUTF8AndSize(src.ptr(), &size);
if (!value) {
PyErr_Clear();
return false;
}
return true;
}
static handle from_cpp(const char *value, rv_policy,
cleanup_list *) noexcept {
if (value == nullptr) {
PyObject* result = Py_None;
Py_INCREF(result);
return result;
}
return PyUnicode_FromString(value);
}
static handle from_cpp(char value, rv_policy, cleanup_list *) noexcept {
return PyUnicode_FromStringAndSize(&value, 1);
}
template <typename T_>
NB_INLINE bool can_cast() const noexcept {
return std::is_pointer_v<T_> || (value && size == 1);
}
explicit operator const char *() { return value; }
explicit operator char() {
if (can_cast<char>())
return value[0];
else
throw next_overload();
}
};
template <typename T> struct type_caster<pointer_and_handle<T>> {
using Caster = make_caster<T>;
using T2 = pointer_and_handle<T>;
NB_TYPE_CASTER(T2, Caster::Name)
bool from_python(handle src, uint8_t flags, cleanup_list *cleanup) noexcept {
Caster c;
if (!c.from_python(src, flags_for_local_caster<T*>(flags), cleanup) ||
!c.template can_cast<T*>())
return false;
value.h = src;
value.p = c.operator T*();
return true;
}
};
template <> struct type_caster<fallback> {
NB_TYPE_CASTER(fallback, const_name("object"))
bool from_python(handle src, uint8_t flags, cleanup_list *) noexcept {
if (!(flags & (uint8_t) cast_flags::convert))
return false;
value = src;
return true;
}
};
template <typename T> struct typed_base_name {
static constexpr auto Name = type_caster<T>::Name;
};
// Base case: typed<T, Ts...> renders as T[Ts...]
template <typename T, typename... Ts> struct typed_name {
static constexpr auto Name =
typed_base_name<intrinsic_t<T>>::Name + const_name("[") +
concat(const_name<std::is_same_v<Ts, ellipsis>>(const_name("..."),
make_caster<Ts>::Name)...) + const_name("]");
};
// typed<object, T> or typed<handle, T> renders as T, rather than as
// the nonsensical object[T]
template <typename T> struct typed_name<object, T> {
static constexpr auto Name = make_caster<T>::Name;
};
template <typename T> struct typed_name<handle, T> {
static constexpr auto Name = make_caster<T>::Name;
};
// typed<callable, R(Args...)> renders as Callable[[Args...], R]
template <typename R, typename... Args>
struct typed_name<callable, R(Args...)> {
using Ret = std::conditional_t<std::is_void_v<R>, void_type, R>;
static constexpr auto Name =
const_name("collections.abc.Callable[[") +
concat(make_caster<Args>::Name...) + const_name("], ") +
make_caster<Ret>::Name + const_name("]");
};
// typed<callable, R(...)> renders as Callable[..., R]
template <typename R>
struct typed_name<callable, R(...)> {
using Ret = std::conditional_t<std::is_void_v<R>, void_type, R>;
static constexpr auto Name =
const_name("collections.abc.Callable[..., ") +
make_caster<Ret>::Name + const_name("]");
};
template <typename T, typename... Ts> struct type_caster<typed<T, Ts...>> {
using Caster = make_caster<T>;
using Typed = typed<T, Ts...>;
NB_TYPE_CASTER(Typed, (typed_name<T, Ts...>::Name))
bool from_python(handle src, uint8_t flags, cleanup_list *cleanup) noexcept {
Caster caster;
if (!caster.from_python(src, flags_for_local_caster<T>(flags), cleanup) ||
!caster.template can_cast<T>())
return false;
value = caster.operator cast_t<T>();
return true;
}
static handle from_cpp(const Value &src, rv_policy policy, cleanup_list *cleanup) noexcept {
return Caster::from_cpp(src, policy, cleanup);
}
};
template <typename T>
struct type_caster<T, enable_if_t<std::is_base_of_v<detail::api_tag, T> && !T::nb_typed>> {
public:
NB_TYPE_CASTER(T, T::Name)
type_caster() : value(nullptr, ::nanobind::detail::steal_t()) { }
bool from_python(handle src, uint8_t, cleanup_list *) noexcept {
if (!isinstance<T>(src))
return false;
if constexpr (std::is_base_of_v<object, T>)
value = borrow<T>(src);
else
value = src;
return true;
}
static handle from_cpp(T&& src, rv_policy, cleanup_list *) noexcept {
if constexpr (std::is_base_of_v<object, T>)
return src.release();
else
return src.inc_ref();
}
static handle from_cpp(const T &src, rv_policy, cleanup_list *) noexcept {
return src.inc_ref();
}
};
template <typename T> NB_INLINE rv_policy infer_policy(rv_policy policy) {
if constexpr (is_pointer_v<T>) {
if (policy == rv_policy::automatic)
policy = rv_policy::take_ownership;
else if (policy == rv_policy::automatic_reference)
policy = rv_policy::reference;
} else if constexpr (std::is_lvalue_reference_v<T>) {
if (policy == rv_policy::automatic ||
policy == rv_policy::automatic_reference)
policy = rv_policy::copy;
} else {
if (policy == rv_policy::automatic ||
policy == rv_policy::automatic_reference ||
policy == rv_policy::reference ||
policy == rv_policy::reference_internal)
policy = rv_policy::move;
}
return policy;
}
template <typename T, typename SFINAE = int> struct type_hook : std::false_type { };
template <typename Type_> struct type_caster_base : type_caster_base_tag {
using Type = Type_;
static constexpr auto Name = const_name<Type>();
template <typename T> using Cast = precise_cast_t<T>;
NB_INLINE bool from_python(handle src, uint8_t flags,
cleanup_list *cleanup) noexcept {
return nb_type_get(&typeid(Type), src.ptr(), flags, cleanup,
(void **) &value);
}
template <typename T>
NB_INLINE static handle from_cpp(T &&value, rv_policy policy,
cleanup_list *cleanup) noexcept {
Type *ptr;
if constexpr (is_pointer_v<T>)
ptr = (Type *) value;
else
ptr = (Type *) &value;
policy = infer_policy<T>(policy);
const std::type_info *type = &typeid(Type);
constexpr bool has_type_hook =
!std::is_base_of_v<std::false_type, type_hook<Type>>;
if constexpr (has_type_hook)
type = type_hook<Type>::get(ptr);
if constexpr (!std::is_polymorphic_v<Type>) {
return nb_type_put(type, ptr, policy, cleanup);
} else {
const std::type_info *type_p =
(!has_type_hook && ptr) ? &typeid(*ptr) : nullptr;
return nb_type_put_p(type, type_p, ptr, policy, cleanup);
}
}
template <typename T_>
bool can_cast() const noexcept {
return std::is_pointer_v<T_> || (value != nullptr);
}
operator Type*() { return value; }
operator Type&() {
raise_next_overload_if_null(value);
return *value;
}
operator Type&&() {
raise_next_overload_if_null(value);
return (Type &&) *value;
}
private:
Type *value;
};
template <typename Type, typename SFINAE>
struct type_caster : type_caster_base<Type> { };
template <bool Convert, typename T>
T cast_impl(handle h) {
using Caster = detail::make_caster<T>;
// A returned reference/pointer would usually refer into the type_caster
// object, which will be destroyed before the returned value can be used,
// so we prohibit it by default, with two exceptions that we know are safe:
//
// - If we're casting to a bound object type, the returned pointer points
// into storage owned by that object, not the type caster. Note this is
// only safe if we don't allow implicit conversions, because the pointer
// produced after an implicit conversion points into storage owned by
// a temporary object in the cleanup list, and we have to release those
// temporaries before we return.
//
// - If we're casting to const char*, the caster was provided by nanobind,
// and we know it will only accept Python 'str' objects, producing
// a pointer to storage owned by that object.
constexpr bool is_ref = std::is_reference_v<T> || std::is_pointer_v<T>;
static_assert(
!is_ref ||
is_base_caster_v<Caster> ||
std::is_same_v<const char *, T>,
"nanobind::cast(): cannot return a reference to a temporary.");
Caster caster;
bool rv;
if constexpr (Convert && !is_ref) {
// Release the values in the cleanup list only after we
// initialize the return object, since the initialization
// might access those temporaries.
struct raii_cleanup {
cleanup_list list{nullptr};
~raii_cleanup() { list.release(); }
} cleanup;
rv = caster.from_python(h.ptr(),
((uint8_t) cast_flags::convert) |
((uint8_t) cast_flags::manual),
&cleanup.list);
if (!rv)
detail::raise_python_or_cast_error();
return caster.operator cast_t<T>();
} else {
rv = caster.from_python(h.ptr(), (uint8_t) cast_flags::manual, nullptr);
if (!rv)
detail::raise_python_or_cast_error();
return caster.operator cast_t<T>();
}
}
template <bool Convert, typename T>
bool try_cast_impl(handle h, T &out) noexcept {
using Caster = detail::make_caster<T>;
// See comments in cast_impl above
constexpr bool is_ref = std::is_reference_v<T> || std::is_pointer_v<T>;
static_assert(
!is_ref ||
is_base_caster_v<Caster> ||
std::is_same_v<const char *, T>,
"nanobind::try_cast(): cannot return a reference to a temporary.");
Caster caster;
bool rv;
if constexpr (Convert && !is_ref) {
cleanup_list cleanup(nullptr);
rv = caster.from_python(h.ptr(),
((uint8_t) cast_flags::convert) |
((uint8_t) cast_flags::manual),
&cleanup) &&
caster.template can_cast<T>();
if (rv) {
out = caster.operator cast_t<T>();
}
cleanup.release(); // 'from_python' is 'noexcept', so this always runs
} else {
rv = caster.from_python(h.ptr(), (uint8_t) cast_flags::manual, nullptr) &&
caster.template can_cast<T>();
if (rv) {
out = caster.operator cast_t<T>();
}
}
return rv;
}
NAMESPACE_END(detail)
template <typename T, typename Derived>
NB_INLINE T cast(const detail::api<Derived> &value, bool convert = true) {
if constexpr (std::is_same_v<T, void>) {
(void) value; (void) convert;
return;
} else {
if (convert)
return detail::cast_impl<true, T>(value);
else
return detail::cast_impl<false, T>(value);
}
}
template <typename T, typename Derived>
NB_INLINE bool try_cast(const detail::api<Derived> &value, T &out, bool convert = true) noexcept {
if (convert)
return detail::try_cast_impl<true, T>(value, out);
else
return detail::try_cast_impl<false, T>(value, out);
}
template <typename T>
object cast(T &&value, rv_policy policy = rv_policy::automatic_reference) {
handle h = detail::make_caster<T>::from_cpp((detail::forward_t<T>) value,
policy, nullptr);
if (!h.is_valid())
detail::raise_python_or_cast_error();
return steal(h);
}
template <typename T>
object cast(T &&value, rv_policy policy, handle parent) {
detail::cleanup_list cleanup(parent.ptr());
handle h = detail::make_caster<T>::from_cpp((detail::forward_t<T>) value,
policy, &cleanup);
cleanup.release();
if (!h.is_valid())
detail::raise_python_or_cast_error();
return steal(h);
}
template <typename T> object find(const T &value) noexcept {
return steal(detail::make_caster<T>::from_cpp(value, rv_policy::none, nullptr));
}
template <rv_policy policy = rv_policy::automatic, typename... Args>
tuple make_tuple(Args &&...args) {
tuple result = steal<tuple>(PyTuple_New((Py_ssize_t) sizeof...(Args)));
size_t nargs = 0;
PyObject *o = result.ptr();
(NB_TUPLE_SET_ITEM(o, nargs++,
detail::make_caster<Args>::from_cpp(
(detail::forward_t<Args>) args, policy, nullptr)
.ptr()),
...);
detail::tuple_check(o, sizeof...(Args));
return result;
}
template <typename T> arg_v arg::operator=(T &&value) const {
return arg_v(*this, cast((detail::forward_t<T>) value));
}
template <typename T> arg_locked_v arg_locked::operator=(T &&value) const {
return arg_locked_v(*this, cast((detail::forward_t<T>) value));
}
template <typename Impl> template <typename T>
detail::accessor<Impl>& detail::accessor<Impl>::operator=(T &&value) {
object result = cast((detail::forward_t<T>) value);
Impl::set(m_base, m_key, result.ptr());
return *this;
}
template <typename T> void list::append(T &&value) {
object o = nanobind::cast((detail::forward_t<T>) value);
if (PyList_Append(m_ptr, o.ptr()))
raise_python_error();
}
template <typename T> void list::insert(Py_ssize_t index, T &&value) {
object o = nanobind::cast((detail::forward_t<T>) value);
if (PyList_Insert(m_ptr, index, o.ptr()))
raise_python_error();
}
template <typename T> bool dict::contains(T&& key) const {
object o = nanobind::cast((detail::forward_t<T>) key);
int rv = PyDict_Contains(m_ptr, o.ptr());
if (rv == -1)
raise_python_error();
return rv == 1;
}
template <typename T> bool set::contains(T&& key) const {
object o = nanobind::cast((detail::forward_t<T>) key);
int rv = PySet_Contains(m_ptr, o.ptr());
if (rv == -1)
raise_python_error();
return rv == 1;
}
template <typename T> void set::add(T&& key) {
object o = nanobind::cast((detail::forward_t<T>) key);
int rv = PySet_Add(m_ptr, o.ptr());
if (rv == -1)
raise_python_error();
}
template <typename T> bool set::discard(T &&value) {
object o = nanobind::cast((detail::forward_t<T>) value);
int rv = PySet_Discard(m_ptr, o.ptr());
if (rv < 0)
raise_python_error();
return rv == 1;
}
template <typename T> bool frozenset::contains(T&& key) const {
object o = nanobind::cast((detail::forward_t<T>) key);
int rv = PySet_Contains(m_ptr, o.ptr());
if (rv == -1)
raise_python_error();
return rv == 1;
}
template <typename T> bool mapping::contains(T&& key) const {
object o = nanobind::cast((detail::forward_t<T>) key);
int rv = PyMapping_HasKey(m_ptr, o.ptr());
if (rv == -1)
raise_python_error();
return rv == 1;
}
NAMESPACE_END(NB_NAMESPACE)
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
include/nanobind/nb_class.h | C/C++ Header | /*
nanobind/nb_class.h: Functionality for binding C++ classes/structs
Copyright (c) 2022 Wenzel Jakob
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
NAMESPACE_BEGIN(NB_NAMESPACE)
NAMESPACE_BEGIN(detail)
/// Flags about a type that persist throughout its lifetime
enum class type_flags : uint32_t {
/// Does the type provide a C++ destructor?
is_destructible = (1 << 0),
/// Does the type provide a C++ copy constructor?
is_copy_constructible = (1 << 1),
/// Does the type provide a C++ move constructor?
is_move_constructible = (1 << 2),
/// Is the 'destruct' field of the type_data structure set?
has_destruct = (1 << 4),
/// Is the 'copy' field of the type_data structure set?
has_copy = (1 << 5),
/// Is the 'move' field of the type_data structure set?
has_move = (1 << 6),
/// Internal: does the type maintain a list of implicit conversions?
has_implicit_conversions = (1 << 7),
/// Is this a python type that extends a bound C++ type?
is_python_type = (1 << 8),
/// This type does not permit subclassing from Python
is_final = (1 << 9),
/// Instances of this type support dynamic attribute assignment
has_dynamic_attr = (1 << 10),
/// The class uses an intrusive reference counting approach
intrusive_ptr = (1 << 11),
/// Is this a class that inherits from enable_shared_from_this?
/// If so, type_data::keep_shared_from_this_alive is also set.
has_shared_from_this = (1 << 12),
/// Instances of this type can be referenced by 'weakref'
is_weak_referenceable = (1 << 13),
/// A custom signature override was specified
has_signature = (1 << 14),
/// The class implements __class_getitem__ similar to typing.Generic
is_generic = (1 << 15),
/// Does the type implement a custom __new__ operator?
has_new = (1 << 16),
/// Does the type implement a custom __new__ operator that can take no args
/// (except the type object)?
has_nullary_new = (1 << 17)
// One more bit available without needing a larger reorganization
};
/// Flags about a type that are only relevant when it is being created.
/// These are currently stored in type_data::flags alongside the type_flags
/// for more efficient memory layout, but could move elsewhere if we run
/// out of flags.
enum class type_init_flags : uint32_t {
/// Is the 'supplement' field of the type_init_data structure set?
has_supplement = (1 << 19),
/// Is the 'doc' field of the type_init_data structure set?
has_doc = (1 << 20),
/// Is the 'base' field of the type_init_data structure set?
has_base = (1 << 21),
/// Is the 'base_py' field of the type_init_data structure set?
has_base_py = (1 << 22),
/// This type provides extra PyType_Slot fields
has_type_slots = (1 << 23),
all_init_flags = (0x1f << 19)
};
// See internals.h
struct nb_alias_chain;
// Implicit conversions for C++ type bindings, used in type_data below
struct implicit_t {
const std::type_info **cpp;
bool (**py)(PyTypeObject *, PyObject *, cleanup_list *) noexcept;
};
// Forward and reverse mappings for enumerations, used in type_data below
struct enum_tbl_t {
void *fwd;
void *rev;
};
/// Information about a type that persists throughout its lifetime
struct type_data {
uint32_t size;
uint32_t align : 8;
uint32_t flags : 24;
const char *name;
const std::type_info *type;
PyTypeObject *type_py;
nb_alias_chain *alias_chain;
#if defined(Py_LIMITED_API)
PyObject* (*vectorcall)(PyObject *, PyObject * const*, size_t, PyObject *);
#endif
void *init; // Constructor nb_func
void (*destruct)(void *);
void (*copy)(void *, const void *);
void (*move)(void *, void *) noexcept;
union {
implicit_t implicit; // for C++ type bindings
enum_tbl_t enum_tbl; // for enumerations
};
void (*set_self_py)(void *, PyObject *) noexcept;
bool (*keep_shared_from_this_alive)(PyObject *) noexcept;
uint32_t dictoffset;
uint32_t weaklistoffset;
};
/// Information about a type that is only relevant when it is being created
struct type_init_data : type_data {
PyObject *scope;
const std::type_info *base;
PyTypeObject *base_py;
const char *doc;
const PyType_Slot *type_slots;
size_t supplement;
};
NB_INLINE void type_extra_apply(type_init_data &t, const handle &h) {
t.flags |= (uint32_t) type_init_flags::has_base_py;
t.base_py = (PyTypeObject *) h.ptr();
}
NB_INLINE void type_extra_apply(type_init_data &t, const char *doc) {
t.flags |= (uint32_t) type_init_flags::has_doc;
t.doc = doc;
}
NB_INLINE void type_extra_apply(type_init_data &t, type_slots c) {
t.flags |= (uint32_t) type_init_flags::has_type_slots;
t.type_slots = c.value;
}
template <typename T>
NB_INLINE void type_extra_apply(type_init_data &t, intrusive_ptr<T> ip) {
t.flags |= (uint32_t) type_flags::intrusive_ptr;
t.set_self_py = (void (*)(void *, PyObject *) noexcept) ip.set_self_py;
}
NB_INLINE void type_extra_apply(type_init_data &t, is_final) {
t.flags |= (uint32_t) type_flags::is_final;
}
NB_INLINE void type_extra_apply(type_init_data &t, dynamic_attr) {
t.flags |= (uint32_t) type_flags::has_dynamic_attr;
}
NB_INLINE void type_extra_apply(type_init_data & t, is_weak_referenceable) {
t.flags |= (uint32_t) type_flags::is_weak_referenceable;
}
NB_INLINE void type_extra_apply(type_init_data & t, is_generic) {
t.flags |= (uint32_t) type_flags::is_generic;
}
NB_INLINE void type_extra_apply(type_init_data & t, const sig &s) {
t.flags |= (uint32_t) type_flags::has_signature;
t.name = s.value;
}
NB_INLINE void type_extra_apply(type_init_data &, never_destruct) {
// intentionally empty
}
template <typename T>
NB_INLINE void type_extra_apply(type_init_data &t, supplement<T>) {
static_assert(std::is_trivially_default_constructible_v<T>,
"The supplement must be a POD (plain old data) type");
static_assert(alignof(T) <= alignof(void *),
"The alignment requirement of the supplement is too high.");
t.flags |= (uint32_t) type_init_flags::has_supplement | (uint32_t) type_flags::is_final;
t.supplement = sizeof(T);
}
enum class enum_flags : uint32_t {
/// Is this an arithmetic enumeration?
is_arithmetic = (1 << 1),
/// Is the number type underlying the enumeration signed?
is_signed = (1 << 2),
/// Is the underlying enumeration type Flag?
is_flag = (1 << 3)
};
struct enum_init_data {
const std::type_info *type;
PyObject *scope;
const char *name;
const char *docstr;
uint32_t flags;
};
NB_INLINE void enum_extra_apply(enum_init_data &e, is_arithmetic) {
e.flags |= (uint32_t) enum_flags::is_arithmetic;
}
NB_INLINE void enum_extra_apply(enum_init_data &e, is_flag) {
e.flags |= (uint32_t) enum_flags::is_flag;
}
NB_INLINE void enum_extra_apply(enum_init_data &e, const char *doc) {
e.docstr = doc;
}
template <typename T>
NB_INLINE void enum_extra_apply(enum_init_data &, T) {
static_assert(
std::is_void_v<T>,
"Invalid enum binding annotation. The implementation of "
"enums changed nanobind 2.0.0: only nb::is_arithmetic and "
"docstrings can be passed since this change.");
}
template <typename T> void wrap_copy(void *dst, const void *src) {
new ((T *) dst) T(*(const T *) src);
}
template <typename T> void wrap_move(void *dst, void *src) noexcept {
new ((T *) dst) T(std::move(*(T *) src));
}
template <typename T> void wrap_destruct(void *value) noexcept {
((T *) value)->~T();
}
template <typename, template <typename, typename> typename, typename...>
struct extract;
template <typename T, template <typename, typename> typename Pred>
struct extract<T, Pred> {
using type = T;
};
template <typename T, template <typename, typename> typename Pred,
typename Tv, typename... Ts>
struct extract<T, Pred, Tv, Ts...> {
using type = std::conditional_t<
Pred<T, Tv>::value,
Tv,
typename extract<T, Pred, Ts...>::type
>;
};
template <typename T, typename Arg> using is_alias = std::is_base_of<T, Arg>;
template <typename T, typename Arg> using is_base = std::is_base_of<Arg, T>;
enum op_id : int;
enum op_type : int;
struct undefined_t;
template <op_id id, op_type ot, typename L = undefined_t, typename R = undefined_t> struct op_;
// The header file include/nanobind/stl/detail/traits.h extends this type trait
template <typename T, typename SFINAE = int>
struct is_copy_constructible : std::is_copy_constructible<T> { };
template <typename T>
constexpr bool is_copy_constructible_v = is_copy_constructible<T>::value;
NAMESPACE_END(detail)
// Low level access to nanobind type objects
inline bool type_check(handle h) { return detail::nb_type_check(h.ptr()); }
inline size_t type_size(handle h) { return detail::nb_type_size(h.ptr()); }
inline size_t type_align(handle h) { return detail::nb_type_align(h.ptr()); }
inline const std::type_info& type_info(handle h) { return *detail::nb_type_info(h.ptr()); }
template <typename T>
inline T &type_supplement(handle h) { return *(T *) detail::nb_type_supplement(h.ptr()); }
inline str type_name(handle h) { return steal<str>(detail::nb_type_name(h.ptr())); }
// Low level access to nanobind instance objects
inline bool inst_check(handle h) { return type_check(h.type()); }
inline str inst_name(handle h) {
return steal<str>(detail::nb_inst_name(h.ptr()));
}
inline object inst_alloc(handle h) {
return steal(detail::nb_inst_alloc((PyTypeObject *) h.ptr()));
}
inline object inst_alloc_zero(handle h) {
return steal(detail::nb_inst_alloc_zero((PyTypeObject *) h.ptr()));
}
inline object inst_take_ownership(handle h, void *p) {
return steal(detail::nb_inst_take_ownership((PyTypeObject *) h.ptr(), p));
}
inline object inst_reference(handle h, void *p, handle parent = handle()) {
return steal(detail::nb_inst_reference((PyTypeObject *) h.ptr(), p, parent.ptr()));
}
inline void inst_zero(handle h) { detail::nb_inst_zero(h.ptr()); }
inline void inst_set_state(handle h, bool ready, bool destruct) {
detail::nb_inst_set_state(h.ptr(), ready, destruct);
}
inline std::pair<bool, bool> inst_state(handle h) {
return detail::nb_inst_state(h.ptr());
}
inline void inst_mark_ready(handle h) { inst_set_state(h, true, true); }
inline bool inst_ready(handle h) { return inst_state(h).first; }
inline void inst_destruct(handle h) { detail::nb_inst_destruct(h.ptr()); }
inline void inst_copy(handle dst, handle src) { detail::nb_inst_copy(dst.ptr(), src.ptr()); }
inline void inst_move(handle dst, handle src) { detail::nb_inst_move(dst.ptr(), src.ptr()); }
inline void inst_replace_copy(handle dst, handle src) { detail::nb_inst_replace_copy(dst.ptr(), src.ptr()); }
inline void inst_replace_move(handle dst, handle src) { detail::nb_inst_replace_move(dst.ptr(), src.ptr()); }
template <typename T> T *inst_ptr(handle h) { return (T *) detail::nb_inst_ptr(h.ptr()); }
inline void *type_get_slot(handle h, int slot_id) {
#if NB_TYPE_GET_SLOT_IMPL
return detail::type_get_slot((PyTypeObject *) h.ptr(), slot_id);
#else
return PyType_GetSlot((PyTypeObject *) h.ptr(), slot_id);
#endif
}
template <typename Visitor> struct def_visitor {
protected:
// Ensure def_visitor<T> can only be derived from, not constructed
// directly
def_visitor() {
static_assert(std::is_base_of_v<def_visitor, Visitor>,
"def_visitor uses CRTP: def_visitor<T> should be "
"a base of T");
}
};
template <typename... Args> struct init : def_visitor<init<Args...>> {
template <typename T, typename... Ts> friend class class_;
NB_INLINE init() {}
private:
template <typename Class, typename... Extra>
NB_INLINE static void execute(Class &cl, const Extra&... extra) {
using Type = typename Class::Type;
using Alias = typename Class::Alias;
cl.def(
"__init__",
[](pointer_and_handle<Type> v, Args... args) {
if constexpr (!std::is_same_v<Type, Alias> &&
std::is_constructible_v<Type, Args...>) {
if (!detail::nb_inst_python_derived(v.h.ptr())) {
new (v.p) Type{ (detail::forward_t<Args>) args... };
return;
}
}
new ((void *) v.p) Alias{ (detail::forward_t<Args>) args... };
},
extra...);
}
};
template <typename Arg> struct init_implicit : def_visitor<init_implicit<Arg>> {
template <typename T, typename... Ts> friend class class_;
NB_INLINE init_implicit() { }
private:
template <typename Class, typename... Extra>
NB_INLINE static void execute(Class &cl, const Extra&... extra) {
using Type = typename Class::Type;
using Alias = typename Class::Alias;
cl.def(
"__init__",
[](pointer_and_handle<Type> v, Arg arg) {
if constexpr (!std::is_same_v<Type, Alias> &&
std::is_constructible_v<Type, Arg>) {
if (!detail::nb_inst_python_derived(v.h.ptr())) {
new ((Type *) v.p) Type{ (detail::forward_t<Arg>) arg };
return;
}
}
new ((Alias *) v.p) Alias{ (detail::forward_t<Arg>) arg };
}, is_implicit(), extra...);
using Caster = detail::make_caster<Arg>;
if constexpr (!detail::is_class_caster_v<Caster>) {
detail::implicitly_convertible(
[](PyTypeObject *, PyObject *src,
detail::cleanup_list *cleanup) noexcept -> bool {
return Caster().from_python(
src, detail::cast_flags::convert, cleanup);
},
&typeid(Type));
}
}
};
namespace detail {
// This is 'inline' so we can define it in a header and not pay
// for it if unused, and also 'noinline' so we don't generate
// multiple copies and produce code bloat.
NB_NOINLINE inline void wrap_base_new(handle cls, bool do_wrap) {
if (PyCFunction_Check(cls.attr("__new__").ptr())) {
if (do_wrap) {
cpp_function_def(
[](handle type) {
if (!type_check(type))
throw cast_error();
return inst_alloc(type);
},
scope(cls), name("__new__"));
}
} else {
if (!do_wrap) {
// We already defined the wrapper, so this zero-arg overload
// would be unreachable. Raise an error rather than hiding it.
raise("nanobind: %s must define its zero-argument __new__ "
"before any other overloads", type_name(cls).c_str());
}
}
}
// Call policy that ensures __new__ returns an instance of the correct
// Python type, even when deriving from the C++ class in Python
struct new_returntype_fixup_policy {
static inline void precall(PyObject **, size_t,
detail::cleanup_list *) {}
NB_NOINLINE static inline void postcall(PyObject **args, size_t,
PyObject *&ret) {
handle type_requested = args[0];
if (ret == nullptr || !type_requested.is_type())
return; // somethign strange about this call; don't meddle
handle type_created = Py_TYPE(ret);
if (type_created.is(type_requested))
return; // already created the requested type so no fixup needed
if (type_check(type_created) &&
PyType_IsSubtype((PyTypeObject *) type_requested.ptr(),
(PyTypeObject *) type_created.ptr()) &&
type_info(type_created) == type_info(type_requested)) {
// The new_ constructor returned an instance of a bound type T.
// The user wanted an instance of some python subclass S of T.
// Since both wrap the same C++ type, we can satisfy the request
// by returning a pyobject of type S that wraps a C++ T*, and
// handling the lifetimes by having that pyobject keep the
// already-created T pyobject alive.
object wrapper = inst_reference(type_requested,
inst_ptr<void>(ret),
/* parent = */ ret);
handle(ret).dec_ref();
ret = wrapper.release().ptr();
}
}
};
}
template <typename Func, typename Sig = detail::function_signature_t<Func>>
struct new_;
template <typename Func, typename Return, typename... Args>
struct new_<Func, Return(Args...)> : def_visitor<new_<Func, Return(Args...)>> {
std::remove_reference_t<Func> func;
new_(Func &&f) : func((detail::forward_t<Func>) f) {}
template <typename Class, typename... Extra>
NB_INLINE void execute(Class &cl, const Extra&... extra) {
// If this is the first __new__ overload we're defining, then wrap
// nanobind's built-in __new__ so we overload with it instead of
// replacing it; this is important for pickle support.
// We can't do this if the user-provided __new__ takes no
// arguments, because it would make an ambiguous overload set.
constexpr size_t num_defaults =
((std::is_same_v<Extra, arg_v> ||
std::is_same_v<Extra, arg_locked_v>) + ... + 0);
constexpr size_t num_varargs =
((std::is_same_v<detail::intrinsic_t<Args>, args> ||
std::is_same_v<detail::intrinsic_t<Args>, kwargs>) + ... + 0);
detail::wrap_base_new(cl, sizeof...(Args) > num_defaults + num_varargs);
auto wrapper = [func_ = (detail::forward_t<Func>) func](handle, Args... args) {
return func_((detail::forward_t<Args>) args...);
};
auto policy = call_policy<detail::new_returntype_fixup_policy>();
if constexpr ((std::is_base_of_v<arg, Extra> || ...)) {
// If any argument annotations are specified, add another for the
// extra class argument that we don't forward to Func, so visible
// arg() annotations stay aligned with visible function arguments.
cl.def_static("__new__", std::move(wrapper), arg("cls"), extra...,
policy);
} else {
cl.def_static("__new__", std::move(wrapper), extra..., policy);
}
cl.def("__init__", [](handle, Args...) {}, extra...);
}
};
template <typename Func> new_(Func&& f) -> new_<Func>;
template <typename T> struct for_setter {
T value;
for_setter(const T &value) : value(value) { }
};
template <typename T> struct for_getter {
T value;
for_getter(const T &value) : value(value) { }
};
template <typename T> for_getter(T) -> for_getter<std::decay_t<T>>;
template <typename T> for_setter(T) -> for_setter<std::decay_t<T>>;
namespace detail {
template <typename T> auto filter_getter(const T &v) { return v; }
template <typename T> auto filter_getter(const for_getter<T> &v) { return v.value; }
template <typename T> std::nullptr_t filter_getter(const for_setter<T> &) { return nullptr; }
template <typename T> auto filter_setter(const T &v) { return v; }
template <typename T> auto filter_setter(const for_setter<T> &v) { return v.value; }
template <typename T> std::nullptr_t filter_setter(const for_getter<T> &) { return nullptr; }
}
template <typename T, typename... Ts>
class class_ : public object {
public:
NB_OBJECT_DEFAULT(class_, object, "type", PyType_Check)
using Type = T;
using Base = typename detail::extract<T, detail::is_base, Ts...>::type;
using Alias = typename detail::extract<T, detail::is_alias, Ts...>::type;
static_assert(sizeof(Alias) < (((uint64_t) 1) << 32), "Instance size is too big!");
static_assert(alignof(Alias) < (1 << 8), "Instance alignment is too big!");
static_assert(
sizeof...(Ts) == !std::is_same_v<Base, T> + !std::is_same_v<Alias, T>,
"nanobind::class_<> was invoked with extra arguments that could not be handled");
static_assert(
detail::is_base_caster_v<detail::make_caster<Type>>,
"You attempted to bind a type that is already intercepted by a type "
"caster. Having both at the same time is not allowed. Are you perhaps "
"binding an STL type, while at the same time including a matching "
"type caster from <nanobind/stl/*>? Or did you perhaps forget to "
"declare NB_MAKE_OPAQUE(..) to specifically disable the type caster "
"catch-all for a specific type? Please review the documentation "
"to learn about the difference between bindings and type casters.");
template <typename... Extra>
NB_INLINE class_(handle scope, const char *name, const Extra &... extra) {
detail::type_init_data d;
d.flags = 0;
d.align = (uint8_t) alignof(Alias);
d.size = (uint32_t) sizeof(Alias);
d.name = name;
d.scope = scope.ptr();
d.type = &typeid(T);
if constexpr (!std::is_same_v<Base, T>) {
d.base = &typeid(Base);
d.flags |= (uint32_t) detail::type_init_flags::has_base;
}
if constexpr (detail::is_copy_constructible_v<T>) {
d.flags |= (uint32_t) detail::type_flags::is_copy_constructible;
if constexpr (!std::is_trivially_copy_constructible_v<T>) {
d.flags |= (uint32_t) detail::type_flags::has_copy;
d.copy = detail::wrap_copy<T>;
}
}
if constexpr (std::is_move_constructible<T>::value) {
d.flags |= (uint32_t) detail::type_flags::is_move_constructible;
if constexpr (!std::is_trivially_move_constructible_v<T>) {
d.flags |= (uint32_t) detail::type_flags::has_move;
d.move = detail::wrap_move<T>;
}
}
constexpr bool has_never_destruct = (std::is_same_v<Extra, never_destruct> || ...);
if constexpr (std::is_destructible_v<T> && !has_never_destruct) {
d.flags |= (uint32_t) detail::type_flags::is_destructible;
if constexpr (!std::is_trivially_destructible_v<T>) {
d.flags |= (uint32_t) detail::type_flags::has_destruct;
d.destruct = detail::wrap_destruct<T>;
}
}
if constexpr (detail::has_shared_from_this_v<T>) {
d.flags |= (uint32_t) detail::type_flags::has_shared_from_this;
d.keep_shared_from_this_alive = [](PyObject *self) noexcept {
// weak_from_this().lock() is equivalent to shared_from_this(),
// except that it returns an empty shared_ptr instead of
// throwing an exception if there is no active shared_ptr
// for this object. (Added in C++17.)
if (auto sp = inst_ptr<T>(self)->weak_from_this().lock()) {
detail::keep_alive(self, new auto(std::move(sp)),
[](void *p) noexcept {
delete (decltype(sp) *) p;
});
return true;
}
return false;
};
}
(detail::type_extra_apply(d, extra), ...);
m_ptr = detail::nb_type_new(&d);
}
template <typename Func, typename... Extra>
NB_INLINE class_ &def(const char *name_, Func &&f, const Extra &... extra) {
cpp_function_def<T>((detail::forward_t<Func>) f, scope(*this),
name(name_), is_method(), extra...);
return *this;
}
template <typename Visitor, typename... Extra>
NB_INLINE class_ &def(def_visitor<Visitor> &&arg, const Extra &... extra) {
static_cast<Visitor&&>(arg).execute(*this, extra...);
return *this;
}
template <typename Func, typename... Extra>
NB_INLINE class_ &def_static(const char *name_, Func &&f,
const Extra &... extra) {
static_assert(
!std::is_member_function_pointer_v<Func>,
"def_static(...) called with a non-static member function pointer");
cpp_function_def((detail::forward_t<Func>) f, scope(*this), name(name_),
extra...);
return *this;
}
template <typename Getter, typename Setter, typename... Extra>
NB_INLINE class_ &def_prop_rw(const char *name_, Getter &&getter,
Setter &&setter, const Extra &...extra) {
object get_p, set_p;
if constexpr (!std::is_same_v<Getter, std::nullptr_t>)
get_p = cpp_function<T>((detail::forward_t<Getter>) getter,
is_method(), is_getter(),
rv_policy::reference_internal,
detail::filter_getter(extra)...);
if constexpr (!std::is_same_v<Setter, std::nullptr_t>)
set_p = cpp_function<T>((detail::forward_t<Setter>) setter,
is_method(), detail::filter_setter(extra)...);
detail::property_install(m_ptr, name_, get_p.ptr(), set_p.ptr());
return *this;
}
template <typename Getter, typename Setter, typename... Extra>
NB_INLINE class_ &def_prop_rw_static(const char *name_, Getter &&getter,
Setter &&setter,
const Extra &...extra) {
object get_p, set_p;
if constexpr (!std::is_same_v<Getter, std::nullptr_t>)
get_p = cpp_function((detail::forward_t<Getter>) getter, is_getter(),
rv_policy::reference,
detail::filter_getter(extra)...);
if constexpr (!std::is_same_v<Setter, std::nullptr_t>)
set_p = cpp_function((detail::forward_t<Setter>) setter,
detail::filter_setter(extra)...);
detail::property_install_static(m_ptr, name_, get_p.ptr(), set_p.ptr());
return *this;
}
template <typename Getter, typename... Extra>
NB_INLINE class_ &def_prop_ro(const char *name_, Getter &&getter,
const Extra &...extra) {
return def_prop_rw(name_, getter, nullptr, extra...);
}
template <typename Getter, typename... Extra>
NB_INLINE class_ &def_prop_ro_static(const char *name_,
Getter &&getter,
const Extra &...extra) {
return def_prop_rw_static(name_, getter, nullptr, extra...);
}
template <typename C, typename D, typename... Extra>
NB_INLINE class_ &def_rw(const char *name, D C::*p,
const Extra &...extra) {
// Unions never satisfy is_base_of, thus the is_same alternative
static_assert(std::is_base_of_v<C, T> || std::is_same_v<C, T>,
"def_rw() requires a (base) class member!");
using Q =
std::conditional_t<detail::is_base_caster_v<detail::make_caster<D>>,
const D &, D &&>;
def_prop_rw(name,
[p](const T &c) -> const D & { return c.*p; },
[p](T &c, Q value) { c.*p = (Q) value; },
extra...);
return *this;
}
template <typename D, typename... Extra>
NB_INLINE class_ &def_rw_static(const char *name, D *p,
const Extra &...extra) {
using Q =
std::conditional_t<detail::is_base_caster_v<detail::make_caster<D>>,
const D &, D &&>;
def_prop_rw_static(name,
[p](handle) -> const D & { return *p; },
[p](handle, Q value) { *p = (Q) value; }, extra...);
return *this;
}
template <typename C, typename D, typename... Extra>
NB_INLINE class_ &def_ro(const char *name, D C::*p,
const Extra &...extra) {
// Unions never satisfy is_base_of, thus the is_same alternative
static_assert(std::is_base_of_v<C, T> || std::is_same_v<C, T>,
"def_ro() requires a (base) class member!");
def_prop_ro(name,
[p](const T &c) -> const D & { return c.*p; }, extra...);
return *this;
}
template <typename D, typename... Extra>
NB_INLINE class_ &def_ro_static(const char *name, D *p,
const Extra &...extra) {
def_prop_ro_static(name,
[p](handle) -> const D & { return *p; }, extra...);
return *this;
}
template <detail::op_id id, detail::op_type ot, typename L, typename R, typename... Extra>
class_ &def(const detail::op_<id, ot, L, R> &op, const Extra&... extra) {
op.execute(*this, extra...);
return *this;
}
template <detail::op_id id, detail::op_type ot, typename L, typename R, typename... Extra>
class_ & def_cast(const detail::op_<id, ot, L, R> &op, const Extra&... extra) {
op.execute_cast(*this, extra...);
return *this;
}
};
template <typename T> class enum_ : public object {
public:
static_assert(std::is_enum_v<T>, "nanobind::enum_<> requires an enumeration type!");
using Base = class_<T>;
using Underlying = std::underlying_type_t<T>;
template <typename... Extra>
NB_INLINE enum_(handle scope, const char *name, const Extra &... extra) {
detail::enum_init_data ed { };
ed.type = &typeid(T);
ed.scope = scope.ptr();
ed.name = name;
ed.flags = std::is_signed_v<Underlying>
? (uint32_t) detail::enum_flags::is_signed
: 0;
(detail::enum_extra_apply(ed, extra), ...);
m_ptr = detail::enum_create(&ed);
}
NB_INLINE enum_ &value(const char *name, T value, const char *doc = nullptr) {
detail::enum_append(m_ptr, name, (int64_t) value, doc);
return *this;
}
NB_INLINE enum_ &export_values() { detail::enum_export(m_ptr); return *this; }
template <typename Func, typename... Extra>
NB_INLINE enum_ &def(const char *name_, Func &&f, const Extra &... extra) {
cpp_function_def<T>((detail::forward_t<Func>) f, scope(*this),
name(name_), is_method(), extra...);
return *this;
}
template <typename Func, typename... Extra>
NB_INLINE enum_ &def_static(const char *name_, Func &&f,
const Extra &... extra) {
static_assert(
!std::is_member_function_pointer_v<Func>,
"def_static(...) called with a non-static member function pointer");
cpp_function_def((detail::forward_t<Func>) f, scope(*this), name(name_),
extra...);
return *this;
}
template <typename Getter, typename Setter, typename... Extra>
NB_INLINE enum_ &def_prop_rw(const char *name_, Getter &&getter,
Setter &&setter, const Extra &...extra) {
object get_p, set_p;
if constexpr (!std::is_same_v<Getter, std::nullptr_t>)
get_p = cpp_function<T>((detail::forward_t<Getter>) getter,
is_method(), is_getter(),
rv_policy::reference_internal,
detail::filter_getter(extra)...);
if constexpr (!std::is_same_v<Setter, std::nullptr_t>)
set_p = cpp_function<T>((detail::forward_t<Setter>) setter,
is_method(), detail::filter_setter(extra)...);
detail::property_install(m_ptr, name_, get_p.ptr(), set_p.ptr());
return *this;
}
template <typename Getter, typename... Extra>
NB_INLINE enum_ &def_prop_ro(const char *name_, Getter &&getter,
const Extra &...extra) {
return def_prop_rw(name_, getter, nullptr, extra...);
}
};
template <typename Source, typename Target> void implicitly_convertible() {
if constexpr (!std::is_same_v<Source, Target>) {
using Caster = detail::make_caster<Source>;
static_assert(
!std::is_enum_v<Target> || !detail::is_base_caster_v<Target>,
"implicitly_convertible(): 'Target' cannot be an enumeration "
"unless it is opaque.");
if constexpr (detail::is_base_caster_v<Caster>) {
detail::implicitly_convertible(&typeid(Source), &typeid(Target));
} else {
detail::implicitly_convertible(
[](PyTypeObject *, PyObject *src,
detail::cleanup_list *cleanup) noexcept -> bool {
return Caster().from_python(src, detail::cast_flags::convert,
cleanup);
},
&typeid(Target));
}
}
}
NAMESPACE_END(NB_NAMESPACE)
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
include/nanobind/nb_defs.h | C/C++ Header | /*
nanobind/nb_defs.h: Preprocessor definitions used by the project
Copyright (c) 2022 Wenzel Jakob
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#define NB_STRINGIFY(x) #x
#define NB_TOSTRING(x) NB_STRINGIFY(x)
#define NB_CONCAT(first, second) first##second
#define NB_NEXT_OVERLOAD ((PyObject *) 1) // special failure return code
#if !defined(NAMESPACE_BEGIN)
# define NAMESPACE_BEGIN(name) namespace name {
#endif
#if !defined(NAMESPACE_END)
# define NAMESPACE_END(name) }
#endif
#if defined(_WIN32)
# define NB_EXPORT __declspec(dllexport)
# define NB_IMPORT __declspec(dllimport)
# define NB_INLINE __forceinline
# define NB_NOINLINE __declspec(noinline)
# define NB_INLINE_LAMBDA
# define NB_NOUNROLL
#else
# define NB_EXPORT __attribute__ ((visibility("default")))
# define NB_IMPORT NB_EXPORT
# define NB_INLINE inline __attribute__((always_inline))
# define NB_NOINLINE __attribute__((noinline))
# if defined(__clang__)
# define NB_INLINE_LAMBDA __attribute__((always_inline))
# define NB_NOUNROLL _Pragma("nounroll")
# else
# define NB_INLINE_LAMBDA
# if defined(__GNUC__)
# define NB_NOUNROLL _Pragma("GCC unroll 0")
# else
# define NB_NOUNROLL
# endif
# endif
#endif
#if defined(__GNUC__) && !defined(_WIN32)
# define NB_NAMESPACE nanobind __attribute__((visibility("hidden")))
#else
# define NB_NAMESPACE nanobind
#endif
#if defined(__GNUC__)
# define NB_UNLIKELY(x) __builtin_expect(bool(x), 0)
# define NB_LIKELY(x) __builtin_expect(bool(x), 1)
#else
# define NB_LIKELY(x) x
# define NB_UNLIKELY(x) x
#endif
#if defined(NB_SHARED)
# if defined(NB_BUILD)
# define NB_CORE NB_EXPORT
# else
# define NB_CORE NB_IMPORT
# endif
#else
# define NB_CORE
#endif
#if !defined(NB_SHARED) && defined(__GNUC__) && !defined(_WIN32)
# define NB_EXPORT_SHARED __attribute__ ((visibility("hidden")))
#else
# define NB_EXPORT_SHARED
#endif
#if defined(__cpp_lib_char8_t) && __cpp_lib_char8_t >= 201811L
# define NB_HAS_U8STRING
#endif
#if PY_VERSION_HEX < 0x030D0000
# define NB_TYPING_CAPSULE "typing_extensions.CapsuleType"
#else
# define NB_TYPING_CAPSULE "types.CapsuleType"
#endif
#if defined(Py_LIMITED_API)
# if PY_VERSION_HEX < 0x030C0000 || defined(PYPY_VERSION)
# error "nanobind can target Python's limited API, but this requires CPython >= 3.12"
# endif
# define NB_TUPLE_GET_SIZE PyTuple_Size
# define NB_TUPLE_GET_ITEM PyTuple_GetItem
# define NB_TUPLE_SET_ITEM PyTuple_SetItem
# define NB_LIST_GET_SIZE PyList_Size
# define NB_LIST_GET_ITEM PyList_GetItem
# define NB_LIST_SET_ITEM PyList_SetItem
# define NB_DICT_GET_SIZE PyDict_Size
# define NB_SET_GET_SIZE PySet_Size
#else
# define NB_TUPLE_GET_SIZE PyTuple_GET_SIZE
# define NB_TUPLE_GET_ITEM PyTuple_GET_ITEM
# define NB_TUPLE_SET_ITEM PyTuple_SET_ITEM
# define NB_LIST_GET_SIZE PyList_GET_SIZE
# define NB_LIST_GET_ITEM PyList_GET_ITEM
# define NB_LIST_SET_ITEM PyList_SET_ITEM
# define NB_DICT_GET_SIZE PyDict_GET_SIZE
# define NB_SET_GET_SIZE PySet_GET_SIZE
#endif
#if defined(PYPY_VERSION_NUM) && PYPY_VERSION_NUM < 0x07030a00
# error "nanobind requires a newer PyPy version (>= 7.3.10)"
#endif
#if defined(NB_FREE_THREADED) && !defined(Py_GIL_DISABLED)
# error "Free-threaded extensions require a free-threaded version of Python"
#endif
#if defined(NB_DOMAIN)
# define NB_DOMAIN_STR NB_TOSTRING(NB_DOMAIN)
#else
# define NB_DOMAIN_STR nullptr
#endif
#if !defined(PYPY_VERSION)
# if PY_VERSION_HEX < 0x030A0000
# define NB_TYPE_GET_SLOT_IMPL 1 // Custom implementation of nb::type_get_slot
# else
# define NB_TYPE_GET_SLOT_IMPL 0
# endif
# if PY_VERSION_HEX < 0x030C0000
# define NB_TYPE_FROM_METACLASS_IMPL 1 // Custom implementation of PyType_FromMetaclass
# else
# define NB_TYPE_FROM_METACLASS_IMPL 0
# endif
#else
# define NB_TYPE_FROM_METACLASS_IMPL 1
# define NB_TYPE_GET_SLOT_IMPL 1
#endif
#if defined(Py_LIMITED_API)
# define NB_DYNAMIC_VERSION Py_Version
#else
# define NB_DYNAMIC_VERSION PY_VERSION_HEX
#endif
#define NB_MODULE_SLOTS_0 { 0, nullptr }
#if PY_VERSION_HEX < 0x030C0000
# define NB_MODULE_SLOTS_1 NB_MODULE_SLOTS_0
#else
# define NB_MODULE_SLOTS_1 \
{ Py_mod_multiple_interpreters, \
Py_MOD_MULTIPLE_INTERPRETERS_NOT_SUPPORTED }, \
NB_MODULE_SLOTS_0
#endif
#if !defined(NB_FREE_THREADED)
# define NB_MODULE_SLOTS_2 NB_MODULE_SLOTS_1
#else
# define NB_MODULE_SLOTS_2 \
{ Py_mod_gil, Py_MOD_GIL_NOT_USED }, \
NB_MODULE_SLOTS_1
#endif
#define NB_NONCOPYABLE(X) \
X(const X &) = delete; \
X &operator=(const X &) = delete;
// Helper macros to ensure macro arguments are expanded before token pasting/stringification
#define NB_MODULE_IMPL(name, variable) NB_MODULE_IMPL2(name, variable)
#define NB_MODULE_IMPL2(name, variable) \
static void nanobind_##name##_exec_impl(nanobind::module_); \
static int nanobind_##name##_exec(PyObject *m) { \
nanobind::detail::nb_module_exec(NB_DOMAIN_STR, m); \
try { \
nanobind_##name##_exec_impl( \
nanobind::borrow<nanobind::module_>(m)); \
return 0; \
} catch (nanobind::python_error &e) { \
e.restore(); \
nanobind::chain_error( \
PyExc_ImportError, \
"Encountered an error while initializing the extension."); \
} catch (const std::exception &e) { \
PyErr_SetString(PyExc_ImportError, e.what()); \
} \
return -1; \
} \
static PyModuleDef_Slot nanobind_##name##_slots[] = { \
{ Py_mod_exec, (void *) nanobind_##name##_exec }, \
NB_MODULE_SLOTS_2 \
}; \
static struct PyModuleDef nanobind_##name##_module = { \
PyModuleDef_HEAD_INIT, #name, nullptr, 0, nullptr, \
nanobind_##name##_slots, nullptr, nullptr, \
nanobind::detail::nb_module_free \
}; \
extern "C" [[maybe_unused]] NB_EXPORT PyObject *PyInit_##name(void); \
extern "C" PyObject *PyInit_##name(void) { \
return PyModuleDef_Init(&nanobind_##name##_module); \
} \
void nanobind_##name##_exec_impl(nanobind::module_ variable)
#define NB_MODULE(name, variable) NB_MODULE_IMPL(name, variable)
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
include/nanobind/nb_descr.h | C/C++ Header | /*
nanobind/nb_descr.h: Constexpr string class for function signatures
Copyright (c) 2022 Wenzel Jakob
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
NAMESPACE_BEGIN(NB_NAMESPACE)
NAMESPACE_BEGIN(detail)
/// Helper type for concatenating type signatures at compile time
template <size_t N, typename... Ts>
struct descr {
char text[N + 1]{'\0'};
constexpr descr() = default;
constexpr descr(char const (&s)[N+1]) : descr(s, std::make_index_sequence<N>()) { }
template <size_t... Is>
constexpr descr(char const (&s)[N+1], std::index_sequence<Is...>) : text{s[Is]..., '\0'} { }
template <typename... Cs>
constexpr descr(char c, Cs... cs) : text{c, static_cast<char>(cs)..., '\0'} { }
constexpr size_t type_count() const { return sizeof...(Ts); }
constexpr size_t size() const { return N; }
NB_INLINE void put_types(const std::type_info **out) const {
size_t ctr = 0;
((out[ctr++] = &typeid(Ts)), ...);
out[ctr++] = nullptr;
}
};
template <size_t N1, size_t N2, typename... Ts1, typename... Ts2, size_t... Is1, size_t... Is2>
constexpr descr<N1 + N2, Ts1..., Ts2...> plus_impl(const descr<N1, Ts1...> &a, const descr<N2, Ts2...> &b,
std::index_sequence<Is1...>, std::index_sequence<Is2...>) {
return {a.text[Is1]..., b.text[Is2]...};
}
template <size_t N1, size_t N2, typename... Ts1, typename... Ts2>
constexpr descr<N1 + N2, Ts1..., Ts2...> operator+(const descr<N1, Ts1...> &a, const descr<N2, Ts2...> &b) {
return plus_impl(a, b, std::make_index_sequence<N1>(), std::make_index_sequence<N2>());
}
template <size_t N>
constexpr descr<N - 1> const_name(char const(&text)[N]) { return descr<N - 1>(text); }
constexpr descr<0> const_name(char const(&)[1]) { return {}; }
template <size_t Rem, size_t... Digits>
struct int_to_str : int_to_str<Rem / 10, Rem % 10, Digits...> {};
template <size_t... Digits> struct int_to_str<0, Digits...> {
static constexpr auto digits = descr<sizeof...(Digits)>(('0' + Digits)...);
};
constexpr auto const_name(char c) { return descr<1>(c); }
// Ternary description (like std::conditional)
template <bool B, size_t N1, size_t N2>
constexpr auto const_name(char const(&text1)[N1], char const(&text2)[N2]) {
(void) text1; (void) text2;
if constexpr(B)
return const_name(text1);
else
return const_name(text2);
}
template <bool B, typename T1, typename T2>
constexpr auto const_name(const T1 &d1, const T2 &d2) {
(void) d1; (void) d2;
if constexpr (B)
return d1;
else
return d2;
}
// Use a different name based on whether the parameter is used as input or output
template <size_t N1, size_t N2>
constexpr auto io_name(char const (&text1)[N1], char const (&text2)[N2]) {
return const_name('@') + const_name(text1) + const_name('@') +
const_name(text2) + const_name('@');
}
#if PY_VERSION_HEX < 0x030A0000
template <typename T> constexpr auto optional_name(const T &v) {
return const_name("typing.Optional[") + v + const_name("]");
}
template <typename... Ts> constexpr auto union_name(const Ts&... vs) {
return const_name("typing.Union[") + concat(vs...) + const_name("]");
}
#else
template <typename T> constexpr auto optional_name(const T &v) {
return v + const_name(" | None");
}
template <typename T> constexpr auto union_name(const T &v) {
return v;
}
template <typename T1, typename T2, typename... Ts>
constexpr auto union_name(const T1 &v1, const T2 &v2, const Ts &...vs) {
return v1 + const_name(" | ") + union_name(v2, vs...);
}
#endif
template <size_t Size>
auto constexpr const_name() -> std::remove_cv_t<decltype(int_to_str<Size / 10, Size % 10>::digits)> {
return int_to_str<Size / 10, Size % 10>::digits;
}
template <typename Type> constexpr descr<1, Type> const_name() { return {'%'}; }
constexpr descr<0> concat() { return {}; }
constexpr descr<0> concat_maybe() { return {}; }
template <size_t N, typename... Ts>
constexpr descr<N, Ts...> concat(const descr<N, Ts...> &descr) { return descr; }
template <size_t N, typename... Ts>
constexpr descr<N, Ts...> concat_maybe(const descr<N, Ts...> &descr) { return descr; }
template <size_t N, typename... Ts, typename... Args>
constexpr auto concat(const descr<N, Ts...> &d, const Args &...args)
-> decltype(std::declval<descr<N + 2, Ts...>>() + concat(args...)) {
return d + const_name(", ") + concat(args...);
}
template <typename... Args>
constexpr auto concat_maybe(const descr<0> &, const descr<0> &, const Args &...args)
-> decltype(concat_maybe(args...)) { return concat_maybe(args...); }
template <size_t N, typename... Ts, typename... Args>
constexpr auto concat_maybe(const descr<0> &, const descr<N, Ts...> &arg, const Args &...args)
-> decltype(concat_maybe(arg, args...)) { return concat_maybe(arg, args...); }
template <size_t N, typename... Ts, typename... Args>
constexpr auto concat_maybe(const descr<N, Ts...> &arg, const descr<0> &, const Args &...args)
-> decltype(concat_maybe(arg, args...)) { return concat_maybe(arg, args...); }
template <size_t N, size_t N2, typename... Ts, typename... Ts2, typename... Args,
enable_if_t<N != 0 && N2 != 0> = 0>
constexpr auto concat_maybe(const descr<N, Ts...> &arg0, const descr<N2, Ts2...> &arg1, const Args &...args)
-> decltype(concat(arg0, concat_maybe(arg1, args...))) {
return concat(arg0, concat_maybe(arg1, args...));
}
template <size_t N, typename... Ts>
constexpr descr<N + 2, Ts...> type_descr(const descr<N, Ts...> &descr) {
return const_name("{") + descr + const_name("}");
}
NAMESPACE_END(detail)
NAMESPACE_END(NB_NAMESPACE)
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
include/nanobind/nb_enums.h | C/C++ Header | /*
nanobind/nb_enums.h: enumerations used in nanobind (just rv_policy atm.)
Copyright (c) 2022 Wenzel Jakob
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
NAMESPACE_BEGIN(NB_NAMESPACE)
// Approach used to cast a previously unknown C++ instance into a Python object
enum class rv_policy {
automatic,
automatic_reference,
take_ownership,
copy,
move,
reference,
reference_internal,
none
/* Note to self: nb_func.h assumes that this value fits into 3 bits,
hence no further policies can be added. */
};
NAMESPACE_END(NB_NAMESPACE)
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
include/nanobind/nb_error.h | C/C++ Header | /*
nanobind/nb_error.h: Python exception handling, binding of exceptions
Copyright (c) 2022 Wenzel Jakob
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
NAMESPACE_BEGIN(NB_NAMESPACE)
/// RAII wrapper that temporarily clears any Python error state
#if PY_VERSION_HEX >= 0x030C0000
struct error_scope {
error_scope() { value = PyErr_GetRaisedException(); }
~error_scope() { PyErr_SetRaisedException(value); }
private:
PyObject *value;
};
#else
struct error_scope {
error_scope() { PyErr_Fetch(&type, &value, &trace); }
~error_scope() { PyErr_Restore(type, value, trace); }
private:
PyObject *type, *value, *trace;
};
#endif
/// Wraps a Python error state as a C++ exception
class NB_EXPORT python_error : public std::exception {
public:
NB_EXPORT_SHARED python_error();
NB_EXPORT_SHARED python_error(const python_error &);
NB_EXPORT_SHARED python_error(python_error &&) noexcept;
NB_EXPORT_SHARED ~python_error() override;
bool matches(handle exc) const noexcept {
#if PY_VERSION_HEX < 0x030C0000
return PyErr_GivenExceptionMatches(m_type, exc.ptr()) != 0;
#else
return PyErr_GivenExceptionMatches(m_value, exc.ptr()) != 0;
#endif
}
/// Move the error back into the Python domain. This may only be called
/// once, and you should not reraise the exception in C++ afterward.
NB_EXPORT_SHARED void restore() noexcept;
/// Pass the error to Python's `sys.unraisablehook`, which prints
/// a traceback to `sys.stderr` by default but may be overridden.
/// The *context* should be some object whose repr() helps clarify where
/// the error occurred. Like `.restore()`, this consumes the error and
/// you should not reraise the exception in C++ afterward.
void discard_as_unraisable(handle context) noexcept {
restore();
PyErr_WriteUnraisable(context.ptr());
}
void discard_as_unraisable(const char *context) noexcept {
object context_s = steal(PyUnicode_FromString(context));
discard_as_unraisable(context_s);
}
handle value() const { return m_value; }
#if PY_VERSION_HEX < 0x030C0000
handle type() const { return m_type; }
object traceback() const { return borrow(m_traceback); }
#else
handle type() const { return value().type(); }
object traceback() const { return steal(PyException_GetTraceback(m_value)); }
#endif
[[deprecated]]
object trace() const { return traceback(); }
NB_EXPORT_SHARED const char *what() const noexcept override;
private:
#if PY_VERSION_HEX < 0x030C0000
mutable PyObject *m_type = nullptr;
mutable PyObject *m_value = nullptr;
mutable PyObject *m_traceback = nullptr;
#else
mutable PyObject *m_value = nullptr;
#endif
mutable char *m_what = nullptr;
};
/// Thrown by nanobind::cast when casting fails
using cast_error = std::bad_cast;
enum class exception_type {
runtime_error, stop_iteration, index_error, key_error, value_error,
type_error, buffer_error, import_error, attribute_error, next_overload
};
// Base interface used to expose common Python exceptions in C++
class NB_EXPORT builtin_exception : public std::runtime_error {
public:
NB_EXPORT_SHARED builtin_exception(exception_type type, const char *what);
NB_EXPORT_SHARED builtin_exception(builtin_exception &&) = default;
NB_EXPORT_SHARED builtin_exception(const builtin_exception &) = default;
NB_EXPORT_SHARED ~builtin_exception();
NB_EXPORT_SHARED exception_type type() const { return m_type; }
private:
exception_type m_type;
};
#define NB_EXCEPTION(name) \
inline builtin_exception name(const char *what = nullptr) { \
return builtin_exception(exception_type::name, what); \
}
NB_EXCEPTION(stop_iteration)
NB_EXCEPTION(index_error)
NB_EXCEPTION(key_error)
NB_EXCEPTION(value_error)
NB_EXCEPTION(type_error)
NB_EXCEPTION(buffer_error)
NB_EXCEPTION(import_error)
NB_EXCEPTION(attribute_error)
NB_EXCEPTION(next_overload)
#undef NB_EXCEPTION
inline void register_exception_translator(detail::exception_translator t,
void *payload = nullptr) {
detail::register_exception_translator(t, payload);
}
template <typename T>
class exception : public object {
NB_OBJECT_DEFAULT(exception, object, "Exception", PyExceptionClass_Check)
exception(handle scope, const char *name, handle base = PyExc_Exception)
: object(detail::exception_new(scope.ptr(), name, base.ptr()),
detail::steal_t()) {
detail::register_exception_translator(
[](const std::exception_ptr &p, void *payload) {
try {
std::rethrow_exception(p);
} catch (T &e) {
PyErr_SetString((PyObject *) payload, e.what());
}
}, m_ptr);
}
};
NB_CORE void chain_error(handle type, const char *fmt, ...) noexcept;
[[noreturn]] NB_CORE void raise_from(python_error &e, handle type, const char *fmt, ...);
NAMESPACE_END(NB_NAMESPACE)
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
include/nanobind/nb_func.h | C/C++ Header | /*
nanobind/nb_func.h: Functionality for binding C++ functions/methods
Copyright (c) 2022 Wenzel Jakob
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
NAMESPACE_BEGIN(NB_NAMESPACE)
NAMESPACE_BEGIN(detail)
template <typename Caster>
bool from_python_remember_conv(Caster &c, PyObject **args, uint8_t *args_flags,
cleanup_list *cleanup, size_t index) {
size_t size_before = cleanup->size();
if (!c.from_python(args[index], args_flags[index], cleanup))
return false;
// If an implicit conversion took place, update the 'args' array so that
// any keep_alive annotation or postcall hook can be aware of this change
size_t size_after = cleanup->size();
if (size_after != size_before)
args[index] = (*cleanup)[size_after - 1];
return true;
}
// Return the number of nb::arg and nb::arg_v types in the first I types Ts.
// Invoke with std::make_index_sequence<sizeof...(Ts)>() to provide
// an index pack 'Is' that parallels the types pack Ts.
template <size_t I, typename... Ts, size_t... Is>
constexpr size_t count_args_before_index(std::index_sequence<Is...>) {
static_assert(sizeof...(Is) == sizeof...(Ts));
return ((Is < I && std::is_base_of_v<arg, Ts>) + ... + 0);
}
#if defined(NB_FREE_THREADED)
struct ft_args_collector {
PyObject **args;
handle h1;
handle h2;
size_t index = 0;
NB_INLINE explicit ft_args_collector(PyObject **a) : args(a) {}
NB_INLINE void apply(arg_locked *) {
if (h1.ptr() == nullptr)
h1 = args[index];
h2 = args[index];
++index;
}
NB_INLINE void apply(arg *) { ++index; }
NB_INLINE void apply(...) {}
};
struct ft_args_guard {
NB_INLINE void lock(const ft_args_collector& info) {
PyCriticalSection2_Begin(&cs, info.h1.ptr(), info.h2.ptr());
}
~ft_args_guard() {
PyCriticalSection2_End(&cs);
}
PyCriticalSection2 cs;
};
#endif
struct no_guard {};
template <bool ReturnRef, bool CheckGuard, typename Func, typename Return,
typename... Args, size_t... Is, typename... Extra>
NB_INLINE PyObject *func_create(Func &&func, Return (*)(Args...),
std::index_sequence<Is...> is,
const Extra &...extra) {
using Info = func_extra_info<Extra...>;
if constexpr (CheckGuard && !std::is_same_v<typename Info::call_guard, void>) {
return func_create<ReturnRef, false>(
[func = (forward_t<Func>) func](Args... args) NB_INLINE_LAMBDA -> Return {
typename Info::call_guard::type g;
(void) g;
return func((forward_t<Args>) args...);
},
(Return(*)(Args...)) nullptr, is, extra...);
}
(void) is;
// Detect locations of nb::args / nb::kwargs (if they exist).
// Find the first and last occurrence of each; we'll later make sure these
// match, in order to guarantee there's only one instance.
static constexpr size_t
args_pos_1 = index_1_v<std::is_same_v<intrinsic_t<Args>, args>...>,
args_pos_n = index_n_v<std::is_same_v<intrinsic_t<Args>, args>...>,
kwargs_pos_1 = index_1_v<std::is_same_v<intrinsic_t<Args>, kwargs>...>,
kwargs_pos_n = index_n_v<std::is_same_v<intrinsic_t<Args>, kwargs>...>,
nargs = sizeof...(Args);
constexpr bool has_arg_defaults = (detail::has_arg_defaults_v<Args> || ... || false);
// Determine the number of nb::arg/nb::arg_v annotations
constexpr size_t nargs_provided =
(std::is_base_of_v<arg, Extra> + ... + 0);
constexpr bool is_method_det =
(std::is_same_v<is_method, Extra> + ... + 0) != 0;
constexpr bool is_getter_det =
(std::is_same_v<is_getter, Extra> + ... + 0) != 0;
constexpr bool has_arg_annotations = has_arg_defaults || (nargs_provided > 0 && !is_getter_det);
// Determine the number of potentially-locked function arguments
constexpr bool lock_self_det =
(std::is_same_v<lock_self, Extra> + ... + 0) != 0;
static_assert(Info::nargs_locked <= 2,
"At most two function arguments can be locked");
static_assert(!(lock_self_det && !is_method_det),
"The nb::lock_self() annotation only applies to methods");
// Detect location of nb::kw_only annotation, if supplied. As with args/kwargs
// we find the first and last location and later verify they match each other.
// Note this is an index in Extra... while args/kwargs_pos_* are indices in
// Args... .
constexpr size_t
kwonly_pos_1 = index_1_v<std::is_same_v<kw_only, Extra>...>,
kwonly_pos_n = index_n_v<std::is_same_v<kw_only, Extra>...>;
// Arguments after nb::args are implicitly keyword-only even if there is no
// nb::kw_only annotation
constexpr bool explicit_kw_only = kwonly_pos_1 != sizeof...(Extra);
constexpr bool implicit_kw_only = args_pos_1 + 1 < kwargs_pos_1;
// A few compile-time consistency checks
static_assert(args_pos_1 == args_pos_n && kwargs_pos_1 == kwargs_pos_n,
"Repeated use of nb::kwargs or nb::args in the function signature!");
static_assert(!has_arg_annotations || has_arg_defaults || nargs_provided + is_method_det == nargs,
"The number of nb::arg annotations must match the argument count!");
static_assert(kwargs_pos_1 == nargs || kwargs_pos_1 + 1 == nargs,
"nb::kwargs must be the last element of the function signature!");
static_assert(args_pos_1 == nargs || args_pos_1 < kwargs_pos_1,
"nb::args must precede nb::kwargs if both are present!");
static_assert(has_arg_annotations || (!implicit_kw_only && !explicit_kw_only),
"Keyword-only arguments must have names!");
// Find the index in Args... of the first keyword-only parameter. Since
// the 'self' parameter doesn't get a nb::arg annotation, we must adjust
// by 1 for methods. Note that nargs_before_kw_only is only used if
// a kw_only annotation exists (i.e., if explicit_kw_only is true);
// the conditional is just to save the compiler some effort otherwise.
constexpr size_t nargs_before_kw_only =
explicit_kw_only
? is_method_det + count_args_before_index<kwonly_pos_1, Extra...>(
std::make_index_sequence<sizeof...(Extra)>())
: nargs;
(void) kwonly_pos_n;
if constexpr (explicit_kw_only) {
static_assert(kwonly_pos_1 == kwonly_pos_n,
"Repeated use of nb::kw_only annotation!");
// If both kw_only and *args are specified, kw_only must be
// immediately after the nb::arg for *args.
static_assert(args_pos_1 == nargs || nargs_before_kw_only == args_pos_1 + 1,
"Arguments after nb::args are implicitly keyword-only; any "
"nb::kw_only() annotation must be positioned to reflect that!");
// If both kw_only and **kwargs are specified, kw_only must be
// before the nb::arg for **kwargs.
static_assert(nargs_before_kw_only < kwargs_pos_1,
"Variadic nb::kwargs are implicitly keyword-only; any "
"nb::kw_only() annotation must be positioned to reflect that!");
}
// Collect function signature information for the docstring
using cast_out = make_caster<
std::conditional_t<std::is_void_v<Return>, void_type, Return>>;
// Compile-time function signature
static constexpr auto descr =
const_name("(") +
concat(type_descr(
make_caster<remove_opt_mono_t<intrinsic_t<Args>>>::Name)...) +
const_name(") -> ") + cast_out::Name;
// std::type_info for all function arguments
const std::type_info* descr_types[descr.type_count() + 1];
descr.put_types(descr_types);
// Auxiliary data structure to capture the provided function/closure
struct capture {
std::remove_reference_t<Func> func;
};
// The following temporary record will describe the function in detail
func_data_prelim<has_arg_defaults ? (nargs - is_method_det) : nargs_provided> f;
// Initialize argument flags. The first branch turns std::optional<> types
// into implicit nb::none() annotations (skipping 'self' for methods).
if constexpr (has_arg_defaults) {
((void)(Is < is_method_det ||
(f.args[Is - is_method_det] = { nullptr, nullptr, nullptr, nullptr,
has_arg_defaults_v<Args> ? (uint8_t) cast_flags::accepts_none
: (uint8_t) 0 }, true)), ...);
} else if constexpr (nargs_provided > 0) {
for (size_t i = 0; i < nargs_provided; ++i)
f.args[i].flag = 0;
}
f.flags = (args_pos_1 < nargs ? (uint32_t) func_flags::has_var_args : 0) |
(kwargs_pos_1 < nargs ? (uint32_t) func_flags::has_var_kwargs : 0) |
(ReturnRef ? (uint32_t) func_flags::return_ref : 0) |
(has_arg_annotations ? (uint32_t) func_flags::has_args : 0);
/* Store captured function inside 'func_data_prelim' if there is space. Issues
with aliasing are resolved via separate compilation of libnanobind. */
if constexpr (sizeof(capture) <= sizeof(f.capture)) {
capture *cap = (capture *) f.capture;
new (cap) capture{ (forward_t<Func>) func };
if constexpr (!std::is_trivially_destructible_v<capture>) {
f.flags |= (uint32_t) func_flags::has_free;
f.free_capture = [](void *p) {
((capture *) p)->~capture();
};
}
} else {
void **cap = (void **) f.capture;
cap[0] = new capture{ (forward_t<Func>) func };
f.flags |= (uint32_t) func_flags::has_free;
f.free_capture = [](void *p) {
delete (capture *) ((void **) p)[0];
};
}
f.impl = [](void *p, PyObject **args, uint8_t *args_flags, rv_policy policy,
cleanup_list *cleanup) NB_INLINE_LAMBDA -> PyObject * {
(void) p; (void) args; (void) args_flags; (void) policy; (void) cleanup;
const capture *cap;
if constexpr (sizeof(capture) <= sizeof(f.capture))
cap = (capture *) p;
else
cap = (capture *) ((void **) p)[0];
tuple<make_caster<Args>...> in;
(void) in;
#if defined(NB_FREE_THREADED)
std::conditional_t<Info::nargs_locked != 0, ft_args_guard, no_guard> guard;
if constexpr (Info::nargs_locked) {
ft_args_collector collector{args};
if constexpr (is_method_det) {
if constexpr (lock_self_det)
collector.apply((arg_locked *) nullptr);
else
collector.apply((arg *) nullptr);
}
(collector.apply((Extra *) nullptr), ...);
guard.lock(collector);
}
#endif
if constexpr (Info::pre_post_hooks) {
std::integral_constant<size_t, nargs> nargs_c;
(process_precall(args, nargs_c, cleanup, (Extra *) nullptr), ...);
if ((!from_python_remember_conv(in.template get<Is>(), args,
args_flags, cleanup, Is) || ...))
return NB_NEXT_OVERLOAD;
} else {
if ((!in.template get<Is>().from_python(args[Is], args_flags[Is],
cleanup) || ...))
return NB_NEXT_OVERLOAD;
}
PyObject *result;
if constexpr (std::is_void_v<Return>) {
#if defined(_WIN32) && !defined(__CUDACC__) // temporary workaround for an internal compiler error in MSVC
cap->func(static_cast<cast_t<Args>>(in.template get<Is>())...);
#else
cap->func(in.template get<Is>().operator cast_t<Args>()...);
#endif
result = Py_None;
Py_INCREF(result);
} else {
#if defined(_WIN32) && !defined(__CUDACC__) // temporary workaround for an internal compiler error in MSVC
result = cast_out::from_cpp(
cap->func(static_cast<cast_t<Args>>(in.template get<Is>())...),
policy, cleanup).ptr();
#else
result = cast_out::from_cpp(
cap->func((in.template get<Is>())
.operator cast_t<Args>()...),
policy, cleanup).ptr();
#endif
}
if constexpr (Info::pre_post_hooks) {
std::integral_constant<size_t, nargs> nargs_c;
(process_postcall(args, nargs_c, result, (Extra *) nullptr), ...);
}
return result;
};
f.descr = descr.text;
f.descr_types = descr_types;
f.nargs = nargs;
// Set nargs_pos to the number of C++ function parameters (Args...) that
// can be filled from Python positional arguments in a one-to-one fashion.
// This ends at:
// - the location of the variadic *args parameter, if present; otherwise
// - the location of the first keyword-only parameter, if any; otherwise
// - the location of the variadic **kwargs parameter, if present; otherwise
// - the end of the parameter list
// It's correct to give *args priority over kw_only because we verified
// above that kw_only comes afterward if both are present. It's correct
// to give kw_only priority over **kwargs because we verified above that
// kw_only comes before if both are present.
f.nargs_pos = args_pos_1 < nargs ? args_pos_1 :
explicit_kw_only ? nargs_before_kw_only :
kwargs_pos_1 < nargs ? kwargs_pos_1 : nargs;
// Fill remaining fields of 'f'
size_t arg_index = 0;
(func_extra_apply(f, extra, arg_index), ...);
(void) arg_index;
return nb_func_new(&f);
}
NAMESPACE_END(detail)
// The initial template parameter to cpp_function/cpp_function_def is
// used by class_ to ensure that member pointers are treated as members
// of the class being defined; other users can safely leave it at its
// default of void.
template <typename = void, typename Return, typename... Args, typename... Extra>
NB_INLINE object cpp_function(Return (*f)(Args...), const Extra&... extra) {
return steal(detail::func_create<true, true>(
f, f, std::make_index_sequence<sizeof...(Args)>(), extra...));
}
template <typename = void, typename Return, typename... Args, typename... Extra>
NB_INLINE void cpp_function_def(Return (*f)(Args...), const Extra&... extra) {
detail::func_create<false, true>(
f, f, std::make_index_sequence<sizeof...(Args)>(), extra...);
}
/// Construct a cpp_function from a lambda function (pot. with internal state)
template <
typename = void, typename Func, typename... Extra,
detail::enable_if_t<detail::is_lambda_v<std::remove_reference_t<Func>>> = 0>
NB_INLINE object cpp_function(Func &&f, const Extra &...extra) {
using am = detail::analyze_method<decltype(&std::remove_reference_t<Func>::operator())>;
return steal(detail::func_create<true, true>(
(detail::forward_t<Func>) f, (typename am::func *) nullptr,
std::make_index_sequence<am::argc>(), extra...));
}
template <
typename = void, typename Func, typename... Extra,
detail::enable_if_t<detail::is_lambda_v<std::remove_reference_t<Func>>> = 0>
NB_INLINE void cpp_function_def(Func &&f, const Extra &...extra) {
using am = detail::analyze_method<decltype(&std::remove_reference_t<Func>::operator())>;
detail::func_create<false, true>(
(detail::forward_t<Func>) f, (typename am::func *) nullptr,
std::make_index_sequence<am::argc>(), extra...);
}
/// Construct a cpp_function from a class method (non-const)
template <typename Target = void,
typename Return, typename Class, typename... Args, typename... Extra>
NB_INLINE object cpp_function(Return (Class::*f)(Args...), const Extra &...extra) {
using T = std::conditional_t<std::is_void_v<Target>, Class, Target>;
return steal(detail::func_create<true, true>(
[f](T *c, Args... args) NB_INLINE_LAMBDA -> Return {
return (c->*f)((detail::forward_t<Args>) args...);
},
(Return(*)(T *, Args...)) nullptr,
std::make_index_sequence<sizeof...(Args) + 1>(), extra...));
}
template <typename Target = void,
typename Return, typename Class, typename... Args, typename... Extra>
NB_INLINE void cpp_function_def(Return (Class::*f)(Args...), const Extra &...extra) {
using T = std::conditional_t<std::is_void_v<Target>, Class, Target>;
detail::func_create<false, true>(
[f](T *c, Args... args) NB_INLINE_LAMBDA -> Return {
return (c->*f)((detail::forward_t<Args>) args...);
},
(Return(*)(T *, Args...)) nullptr,
std::make_index_sequence<sizeof...(Args) + 1>(), extra...);
}
/// Construct a cpp_function from a class method (const)
template <typename Target = void,
typename Return, typename Class, typename... Args, typename... Extra>
NB_INLINE object cpp_function(Return (Class::*f)(Args...) const, const Extra &...extra) {
using T = std::conditional_t<std::is_void_v<Target>, Class, Target>;
return steal(detail::func_create<true, true>(
[f](const T *c, Args... args) NB_INLINE_LAMBDA -> Return {
return (c->*f)((detail::forward_t<Args>) args...);
},
(Return(*)(const T *, Args...)) nullptr,
std::make_index_sequence<sizeof...(Args) + 1>(), extra...));
}
template <typename Target = void,
typename Return, typename Class, typename... Args, typename... Extra>
NB_INLINE void cpp_function_def(Return (Class::*f)(Args...) const, const Extra &...extra) {
using T = std::conditional_t<std::is_void_v<Target>, Class, Target>;
detail::func_create<false, true>(
[f](const T *c, Args... args) NB_INLINE_LAMBDA -> Return {
return (c->*f)((detail::forward_t<Args>) args...);
},
(Return(*)(const T *, Args...)) nullptr,
std::make_index_sequence<sizeof...(Args) + 1>(), extra...);
}
template <typename Func, typename... Extra>
module_ &module_::def(const char *name_, Func &&f, const Extra &...extra) {
cpp_function_def((detail::forward_t<Func>) f, scope(*this),
name(name_), extra...);
return *this;
}
NAMESPACE_END(NB_NAMESPACE)
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
include/nanobind/nb_lib.h | C/C++ Header | /*
nanobind/nb_lib.h: Interface to libnanobind.so
Copyright (c) 2022 Wenzel Jakob
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
NAMESPACE_BEGIN(NB_NAMESPACE)
NAMESPACE_BEGIN(dlpack)
// The version of DLPack that is supported by libnanobind
static constexpr uint32_t major_version = 1;
static constexpr uint32_t minor_version = 1;
// Forward declarations for types in ndarray.h (1)
struct dltensor;
struct dtype;
NAMESPACE_END(dlpack)
NAMESPACE_BEGIN(detail)
// Forward declarations for types in ndarray.h (2)
struct ndarray_handle;
struct ndarray_config;
/**
* Helper class to clean temporaries created by function dispatch.
* The first element serves a special role: it stores the 'self'
* object of method calls (for rv_policy::reference_internal).
*/
struct NB_CORE cleanup_list {
public:
static constexpr uint32_t Small = 6;
cleanup_list(PyObject *self) :
m_size{1},
m_capacity{Small},
m_data{m_local} {
m_local[0] = self;
}
~cleanup_list() = default;
/// Append a single PyObject to the cleanup stack
NB_INLINE void append(PyObject *value) noexcept {
if (m_size >= m_capacity)
expand();
m_data[m_size++] = value;
}
NB_INLINE PyObject *self() const {
return m_local[0];
}
/// Decrease the reference count of all appended objects
void release() noexcept;
/// Does the list contain any entries? (besides the 'self' argument)
bool used() { return m_size != 1; }
/// Return the size of the cleanup stack
size_t size() const { return m_size; }
/// Subscript operator
PyObject *operator[](size_t index) const { return m_data[index]; }
protected:
/// Out of memory, expand..
void expand() noexcept;
protected:
uint32_t m_size;
uint32_t m_capacity;
PyObject **m_data;
PyObject *m_local[Small];
};
// ========================================================================
/// Raise a runtime error with the given message
#if defined(__GNUC__)
__attribute__((noreturn, __format__ (__printf__, 1, 2)))
#else
[[noreturn]]
#endif
NB_CORE void raise(const char *fmt, ...);
/// Raise a type error with the given message
#if defined(__GNUC__)
__attribute__((noreturn, __format__ (__printf__, 1, 2)))
#else
[[noreturn]]
#endif
NB_CORE void raise_type_error(const char *fmt, ...);
/// Abort the process with a fatal error
#if defined(__GNUC__)
__attribute__((noreturn, __format__ (__printf__, 1, 2)))
#else
[[noreturn]]
#endif
NB_CORE void fail(const char *fmt, ...) noexcept;
/// Raise nanobind::python_error after an error condition was found
[[noreturn]] NB_CORE void raise_python_error();
/// Raise nanobind::next_overload
NB_CORE void raise_next_overload_if_null(void *p);
/// Raise nanobind::cast_error
[[noreturn]] NB_CORE void raise_python_or_cast_error();
// ========================================================================
NB_CORE void nb_module_exec(const char *domain, PyObject *m);
NB_CORE void nb_module_free(void *m);
// ========================================================================
/// Convert a Python object into a Python unicode string
NB_CORE PyObject *str_from_obj(PyObject *o);
/// Convert an UTF8 null-terminated C string into a Python unicode string
NB_CORE PyObject *str_from_cstr(const char *c);
/// Convert an UTF8 C string + size into a Python unicode string
NB_CORE PyObject *str_from_cstr_and_size(const char *c, size_t n);
// ========================================================================
/// Convert a Python object into a Python byte string
NB_CORE PyObject *bytes_from_obj(PyObject *o);
/// Convert an UTF8 null-terminated C string into a Python byte string
NB_CORE PyObject *bytes_from_cstr(const char *c);
/// Convert a memory region into a Python byte string
NB_CORE PyObject *bytes_from_cstr_and_size(const void *c, size_t n);
// ========================================================================
/// Convert a Python object into a Python byte array
NB_CORE PyObject *bytearray_from_obj(PyObject *o);
/// Convert a memory region into a Python byte array
NB_CORE PyObject *bytearray_from_cstr_and_size(const void *c, size_t n);
// ========================================================================
/// Convert a Python object into a Python boolean object
NB_CORE PyObject *bool_from_obj(PyObject *o);
/// Convert a Python object into a Python integer object
NB_CORE PyObject *int_from_obj(PyObject *o);
/// Convert a Python object into a Python floating point object
NB_CORE PyObject *float_from_obj(PyObject *o);
// ========================================================================
/// Convert a Python object into a Python list
NB_CORE PyObject *list_from_obj(PyObject *o);
/// Convert a Python object into a Python tuple
NB_CORE PyObject *tuple_from_obj(PyObject *o);
/// Convert a Python object into a Python set
NB_CORE PyObject *set_from_obj(PyObject *o);
/// Convert a Python object into a Python frozenset
NB_CORE PyObject *frozenset_from_obj(PyObject *o);
/// Convert a Python object into a Python memoryview
NB_CORE PyObject *memoryview_from_obj(PyObject *o);
// ========================================================================
/// Get an object attribute or raise an exception
NB_CORE PyObject *getattr(PyObject *obj, const char *key);
NB_CORE PyObject *getattr(PyObject *obj, PyObject *key);
/// Get an object attribute or return a default value (never raises)
NB_CORE PyObject *getattr(PyObject *obj, const char *key, PyObject *def) noexcept;
NB_CORE PyObject *getattr(PyObject *obj, PyObject *key, PyObject *def) noexcept;
/// Get an object attribute or raise an exception. Skip if 'out' is non-null
NB_CORE void getattr_or_raise(PyObject *obj, const char *key, PyObject **out);
NB_CORE void getattr_or_raise(PyObject *obj, PyObject *key, PyObject **out);
/// Set an object attribute or raise an exception
NB_CORE void setattr(PyObject *obj, const char *key, PyObject *value);
NB_CORE void setattr(PyObject *obj, PyObject *key, PyObject *value);
/// Delete an object attribute or raise an exception
NB_CORE void delattr(PyObject *obj, const char *key);
NB_CORE void delattr(PyObject *obj, PyObject *key);
// ========================================================================
/// Index into an object or raise an exception. Skip if 'out' is non-null
NB_CORE void getitem_or_raise(PyObject *obj, Py_ssize_t, PyObject **out);
NB_CORE void getitem_or_raise(PyObject *obj, const char *key, PyObject **out);
NB_CORE void getitem_or_raise(PyObject *obj, PyObject *key, PyObject **out);
/// Set an item or raise an exception
NB_CORE void setitem(PyObject *obj, Py_ssize_t, PyObject *value);
NB_CORE void setitem(PyObject *obj, const char *key, PyObject *value);
NB_CORE void setitem(PyObject *obj, PyObject *key, PyObject *value);
/// Delete an item or raise an exception
NB_CORE void delitem(PyObject *obj, Py_ssize_t);
NB_CORE void delitem(PyObject *obj, const char *key);
NB_CORE void delitem(PyObject *obj, PyObject *key);
// ========================================================================
/// Determine the length of a Python object
NB_CORE size_t obj_len(PyObject *o);
/// Try to roughly determine the length of a Python object
NB_CORE size_t obj_len_hint(PyObject *o) noexcept;
/// Obtain a string representation of a Python object
NB_CORE PyObject* obj_repr(PyObject *o);
/// Perform a comparison between Python objects and handle errors
NB_CORE bool obj_comp(PyObject *a, PyObject *b, int value);
/// Perform an unary operation on a Python object with error handling
NB_CORE PyObject *obj_op_1(PyObject *a, PyObject* (*op)(PyObject*));
/// Perform an unary operation on a Python object with error handling
NB_CORE PyObject *obj_op_2(PyObject *a, PyObject *b,
PyObject *(*op)(PyObject *, PyObject *));
// Perform a vector function call
NB_CORE PyObject *obj_vectorcall(PyObject *base, PyObject *const *args,
size_t nargsf, PyObject *kwnames,
bool method_call);
/// Create an iterator from 'o', raise an exception in case of errors
NB_CORE PyObject *obj_iter(PyObject *o);
/// Advance the iterator 'o', raise an exception in case of errors
NB_CORE PyObject *obj_iter_next(PyObject *o);
// ========================================================================
// Conversion validity check done by nb::make_tuple
NB_CORE void tuple_check(PyObject *tuple, size_t nargs);
// ========================================================================
// Append a single argument to a function call
NB_CORE void call_append_arg(PyObject *args, size_t &nargs, PyObject *value);
// Append a variable-length sequence of arguments to a function call
NB_CORE void call_append_args(PyObject *args, size_t &nargs, PyObject *value);
// Append a single keyword argument to a function call
NB_CORE void call_append_kwarg(PyObject *kwargs, const char *name, PyObject *value);
// Append a variable-length dictionary of keyword arguments to a function call
NB_CORE void call_append_kwargs(PyObject *kwargs, PyObject *value);
// ========================================================================
// If the given sequence has the size 'size', return a pointer to its contents.
// May produce a temporary.
NB_CORE PyObject **seq_get_with_size(PyObject *seq, size_t size,
PyObject **temp) noexcept;
// Like the above, but return the size instead of checking it.
NB_CORE PyObject **seq_get(PyObject *seq, size_t *size,
PyObject **temp) noexcept;
// ========================================================================
/// Create a new capsule object with a name
NB_CORE PyObject *capsule_new(const void *ptr, const char *name,
void (*cleanup)(void *) noexcept) noexcept;
// ========================================================================
// Forward declaration for type in nb_attr.h
struct func_data_prelim_base;
/// Create a Python function object for the given function record
NB_CORE PyObject *nb_func_new(const func_data_prelim_base *f) noexcept;
// ========================================================================
/// Create a Python type object for the given type record
struct type_init_data;
NB_CORE PyObject *nb_type_new(const type_init_data *c) noexcept;
/// Extract a pointer to a C++ type underlying a Python object, if possible
NB_CORE bool nb_type_get(const std::type_info *t, PyObject *o, uint8_t flags,
cleanup_list *cleanup, void **out) noexcept;
/// Cast a C++ type instance into a Python object
NB_CORE PyObject *nb_type_put(const std::type_info *cpp_type, void *value,
rv_policy rvp, cleanup_list *cleanup,
bool *is_new = nullptr) noexcept;
// Special version of nb_type_put for polymorphic classes
NB_CORE PyObject *nb_type_put_p(const std::type_info *cpp_type,
const std::type_info *cpp_type_p, void *value,
rv_policy rvp, cleanup_list *cleanup,
bool *is_new = nullptr) noexcept;
// Special version of 'nb_type_put' for unique pointers and ownership transfer
NB_CORE PyObject *nb_type_put_unique(const std::type_info *cpp_type,
void *value, cleanup_list *cleanup,
bool cpp_delete) noexcept;
// Special version of 'nb_type_put_unique' for polymorphic classes
NB_CORE PyObject *nb_type_put_unique_p(const std::type_info *cpp_type,
const std::type_info *cpp_type_p,
void *value, cleanup_list *cleanup,
bool cpp_delete) noexcept;
/// Try to relinquish ownership from Python object to a unique_ptr;
/// return true if successful, false if not. (Failure is only
/// possible if `cpp_delete` is true.)
NB_CORE bool nb_type_relinquish_ownership(PyObject *o, bool cpp_delete) noexcept;
/// Reverse the effects of nb_type_relinquish_ownership().
NB_CORE void nb_type_restore_ownership(PyObject *o, bool cpp_delete) noexcept;
/// Get a pointer to a user-defined 'extra' value associated with the nb_type t.
NB_CORE void *nb_type_supplement(PyObject *t) noexcept;
/// Check if the given python object represents a nanobind type
NB_CORE bool nb_type_check(PyObject *t) noexcept;
/// Return the size of the type wrapped by the given nanobind type object
NB_CORE size_t nb_type_size(PyObject *t) noexcept;
/// Return the alignment of the type wrapped by the given nanobind type object
NB_CORE size_t nb_type_align(PyObject *t) noexcept;
/// Return a unicode string representing the long-form name of the given type
NB_CORE PyObject *nb_type_name(PyObject *t) noexcept;
/// Return a unicode string representing the long-form name of object's type
NB_CORE PyObject *nb_inst_name(PyObject *o) noexcept;
/// Return the C++ type_info wrapped by the given nanobind type object
NB_CORE const std::type_info *nb_type_info(PyObject *t) noexcept;
/// Get a pointer to the instance data of a nanobind instance (nb_inst)
NB_CORE void *nb_inst_ptr(PyObject *o) noexcept;
/// Check if a Python type object wraps an instance of a specific C++ type
NB_CORE bool nb_type_isinstance(PyObject *obj, const std::type_info *t) noexcept;
/// Search for the Python type object associated with a C++ type
NB_CORE PyObject *nb_type_lookup(const std::type_info *t) noexcept;
/// Allocate an instance of type 't'
NB_CORE PyObject *nb_inst_alloc(PyTypeObject *t);
/// Allocate an zero-initialized instance of type 't'
NB_CORE PyObject *nb_inst_alloc_zero(PyTypeObject *t);
/// Allocate an instance of type 't' referencing the existing 'ptr'
NB_CORE PyObject *nb_inst_reference(PyTypeObject *t, void *ptr,
PyObject *parent);
/// Allocate an instance of type 't' taking ownership of the existing 'ptr'
NB_CORE PyObject *nb_inst_take_ownership(PyTypeObject *t, void *ptr);
/// Call the destructor of the given python object
NB_CORE void nb_inst_destruct(PyObject *o) noexcept;
/// Zero-initialize a POD type and mark it as ready + to be destructed upon GC
NB_CORE void nb_inst_zero(PyObject *o) noexcept;
/// Copy-construct 'dst' from 'src', mark it as ready and to be destructed (must have the same nb_type)
NB_CORE void nb_inst_copy(PyObject *dst, const PyObject *src) noexcept;
/// Move-construct 'dst' from 'src', mark it as ready and to be destructed (must have the same nb_type)
NB_CORE void nb_inst_move(PyObject *dst, const PyObject *src) noexcept;
/// Destruct 'dst', copy-construct 'dst' from 'src', mark ready and retain 'destruct' status (must have the same nb_type)
NB_CORE void nb_inst_replace_copy(PyObject *dst, const PyObject *src) noexcept;
/// Destruct 'dst', move-construct 'dst' from 'src', mark ready and retain 'destruct' status (must have the same nb_type)
NB_CORE void nb_inst_replace_move(PyObject *dst, const PyObject *src) noexcept;
/// Check if a particular instance uses a Python-derived type
NB_CORE bool nb_inst_python_derived(PyObject *o) noexcept;
/// Overwrite the instance's ready/destruct flags
NB_CORE void nb_inst_set_state(PyObject *o, bool ready, bool destruct) noexcept;
/// Query the 'ready' and 'destruct' flags of an instance
NB_CORE std::pair<bool, bool> nb_inst_state(PyObject *o) noexcept;
// ========================================================================
// Create and install a Python property object
NB_CORE void property_install(PyObject *scope, const char *name,
PyObject *getter, PyObject *setter) noexcept;
NB_CORE void property_install_static(PyObject *scope, const char *name,
PyObject *getter,
PyObject *setter) noexcept;
// ========================================================================
NB_CORE PyObject *get_override(void *ptr, const std::type_info *type,
const char *name, bool pure);
// ========================================================================
// Ensure that 'patient' cannot be GCed while 'nurse' is alive
NB_CORE void keep_alive(PyObject *nurse, PyObject *patient);
// Keep 'payload' alive until 'nurse' is GCed
NB_CORE void keep_alive(PyObject *nurse, void *payload,
void (*deleter)(void *) noexcept) noexcept;
// ========================================================================
/// Indicate to nanobind that an implicit constructor can convert 'src' -> 'dst'
NB_CORE void implicitly_convertible(const std::type_info *src,
const std::type_info *dst) noexcept;
/// Register a callback to check if implicit conversion to 'dst' is possible
NB_CORE void implicitly_convertible(bool (*predicate)(PyTypeObject *,
PyObject *,
cleanup_list *),
const std::type_info *dst) noexcept;
// ========================================================================
struct enum_init_data;
/// Create a new enumeration type
NB_CORE PyObject *enum_create(enum_init_data *) noexcept;
/// Append an entry to an enumeration
NB_CORE void enum_append(PyObject *tp, const char *name,
int64_t value, const char *doc) noexcept;
// Query an enumeration's Python object -> integer value map
NB_CORE bool enum_from_python(const std::type_info *, PyObject *, int64_t *,
uint8_t flags) noexcept;
// Query an enumeration's integer value -> Python object map
NB_CORE PyObject *enum_from_cpp(const std::type_info *, int64_t) noexcept;
/// Export enum entries to the parent scope
NB_CORE void enum_export(PyObject *tp);
// ========================================================================
/// Try to import a Python extension module, raises an exception upon failure
NB_CORE PyObject *module_import(const char *name);
/// Try to import a Python extension module, raises an exception upon failure
NB_CORE PyObject *module_import(PyObject *name);
/// Create a submodule of an existing module
NB_CORE PyObject *module_new_submodule(PyObject *base, const char *name,
const char *doc) noexcept;
// ========================================================================
// Try to import a reference-counted ndarray object via DLPack
NB_CORE ndarray_handle *ndarray_import(PyObject *o,
const ndarray_config *c,
bool convert,
cleanup_list *cleanup) noexcept;
// Describe a local ndarray object using a DLPack capsule
NB_CORE ndarray_handle *ndarray_create(void *data, size_t ndim,
const size_t *shape, PyObject *owner,
const int64_t *strides,
dlpack::dtype dtype, bool ro,
int device, int device_id,
char order);
/// Increase the reference count of the given ndarray object; returns a pointer
/// to the underlying DLTensor
NB_CORE dlpack::dltensor *ndarray_inc_ref(ndarray_handle *) noexcept;
/// Decrease the reference count of the given ndarray object
NB_CORE void ndarray_dec_ref(ndarray_handle *) noexcept;
/// Wrap a ndarray_handle* into a PyCapsule
NB_CORE PyObject *ndarray_export(ndarray_handle *, int framework,
rv_policy policy, cleanup_list *cleanup) noexcept;
/// Check if an object represents an ndarray
NB_CORE bool ndarray_check(PyObject *o) noexcept;
// ========================================================================
/// Print to stdout using Python
NB_CORE void print(PyObject *file, PyObject *str, PyObject *end);
// ========================================================================
typedef void (*exception_translator)(const std::exception_ptr &, void *);
NB_CORE void register_exception_translator(exception_translator translator,
void *payload);
NB_CORE PyObject *exception_new(PyObject *mod, const char *name,
PyObject *base);
// ========================================================================
NB_CORE bool load_i8 (PyObject *o, uint8_t flags, int8_t *out) noexcept;
NB_CORE bool load_u8 (PyObject *o, uint8_t flags, uint8_t *out) noexcept;
NB_CORE bool load_i16(PyObject *o, uint8_t flags, int16_t *out) noexcept;
NB_CORE bool load_u16(PyObject *o, uint8_t flags, uint16_t *out) noexcept;
NB_CORE bool load_i32(PyObject *o, uint8_t flags, int32_t *out) noexcept;
NB_CORE bool load_u32(PyObject *o, uint8_t flags, uint32_t *out) noexcept;
NB_CORE bool load_i64(PyObject *o, uint8_t flags, int64_t *out) noexcept;
NB_CORE bool load_u64(PyObject *o, uint8_t flags, uint64_t *out) noexcept;
NB_CORE bool load_f32(PyObject *o, uint8_t flags, float *out) noexcept;
NB_CORE bool load_f64(PyObject *o, uint8_t flags, double *out) noexcept;
// ========================================================================
/// Increase the reference count of 'o', and check that the GIL is held
NB_CORE void incref_checked(PyObject *o) noexcept;
/// Decrease the reference count of 'o', and check that the GIL is held
NB_CORE void decref_checked(PyObject *o) noexcept;
// ========================================================================
NB_CORE bool leak_warnings() noexcept;
NB_CORE bool implicit_cast_warnings() noexcept;
NB_CORE void set_leak_warnings(bool value) noexcept;
NB_CORE void set_implicit_cast_warnings(bool value) noexcept;
// ========================================================================
NB_CORE bool iterable_check(PyObject *o) noexcept;
// ========================================================================
NB_CORE void slice_compute(PyObject *slice, Py_ssize_t size,
Py_ssize_t &start, Py_ssize_t &stop,
Py_ssize_t &step, size_t &slice_length);
// ========================================================================
NB_CORE bool issubclass(PyObject *a, PyObject *b);
// ========================================================================
NB_CORE PyObject *repr_list(PyObject *o);
NB_CORE PyObject *repr_map(PyObject *o);
NB_CORE bool is_alive() noexcept;
#if NB_TYPE_GET_SLOT_IMPL
NB_CORE void *type_get_slot(PyTypeObject *t, int slot_id);
#endif
NB_CORE PyObject *dict_get_item_ref_or_fail(PyObject *d, PyObject *k);
NB_CORE const char *abi_tag();
NAMESPACE_END(detail)
using detail::raise;
using detail::raise_type_error;
using detail::raise_python_error;
NAMESPACE_END(NB_NAMESPACE)
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
include/nanobind/nb_misc.h | C/C++ Header | /*
nanobind/nb_misc.h: Miscellaneous bits (GIL, etc.)
Copyright (c) 2022 Wenzel Jakob
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
NAMESPACE_BEGIN(NB_NAMESPACE)
struct gil_scoped_acquire {
public:
NB_NONCOPYABLE(gil_scoped_acquire)
gil_scoped_acquire() noexcept : state(PyGILState_Ensure()) { }
~gil_scoped_acquire() { PyGILState_Release(state); }
private:
const PyGILState_STATE state;
};
struct gil_scoped_release {
public:
NB_NONCOPYABLE(gil_scoped_release)
gil_scoped_release() noexcept : state(PyEval_SaveThread()) { }
~gil_scoped_release() { PyEval_RestoreThread(state); }
private:
PyThreadState *state;
};
struct ft_mutex {
public:
NB_NONCOPYABLE(ft_mutex)
ft_mutex() = default;
#if !defined(NB_FREE_THREADED)
void lock() { }
void unlock() { }
#else
void lock() { PyMutex_Lock(&mutex); }
void unlock() { PyMutex_Unlock(&mutex); }
private:
PyMutex mutex { 0 };
#endif
};
struct ft_lock_guard {
public:
NB_NONCOPYABLE(ft_lock_guard)
ft_lock_guard(ft_mutex &m) : m(m) { m.lock(); }
~ft_lock_guard() { m.unlock(); }
private:
ft_mutex &m;
};
struct ft_object_guard {
public:
NB_NONCOPYABLE(ft_object_guard)
#if !defined(NB_FREE_THREADED)
ft_object_guard(handle) { }
#else
ft_object_guard(handle h) { PyCriticalSection_Begin(&cs, h.ptr()); }
~ft_object_guard() { PyCriticalSection_End(&cs); }
private:
PyCriticalSection cs;
#endif
};
struct ft_object2_guard {
public:
NB_NONCOPYABLE(ft_object2_guard)
#if !defined(NB_FREE_THREADED)
ft_object2_guard(handle, handle) { }
#else
ft_object2_guard(handle h1, handle h2) { PyCriticalSection2_Begin(&cs, h1.ptr(), h2.ptr()); }
~ft_object2_guard() { PyCriticalSection2_End(&cs); }
private:
PyCriticalSection2 cs;
#endif
};
inline bool leak_warnings() noexcept {
return detail::leak_warnings();
}
inline bool implicit_cast_warnings() noexcept {
return detail::implicit_cast_warnings();
}
inline void set_leak_warnings(bool value) noexcept {
detail::set_leak_warnings(value);
}
inline void set_implicit_cast_warnings(bool value) noexcept {
detail::set_implicit_cast_warnings(value);
}
inline dict globals() {
PyObject *p = PyEval_GetGlobals();
if (!p)
raise("nanobind::globals(): no frame is currently executing!");
return borrow<dict>(p);
}
inline Py_hash_t hash(handle h) {
Py_hash_t rv = PyObject_Hash(h.ptr());
if (rv == -1 && PyErr_Occurred())
nanobind::raise_python_error();
return rv;
}
inline bool isinstance(handle inst, handle cls) {
int ret = PyObject_IsInstance(inst.ptr(), cls.ptr());
if (ret == -1)
nanobind::raise_python_error();
return ret;
}
inline bool is_alive() noexcept {
return detail::is_alive();
}
NAMESPACE_END(NB_NAMESPACE)
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
include/nanobind/nb_python.h | C/C++ Header | /*
nanobind/nb_python.h: Include CPython headers while temporarily disabling
certain warnings. Also, disable dangerous preprocessor definitions.
Copyright (c) 2022 Wenzel Jakob
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
/// Include Python header, disable linking to pythonX_d.lib on Windows in debug mode
#if defined(_MSC_VER)
# pragma warning(push)
# if defined(_DEBUG) && !defined(Py_DEBUG)
# define NB_DEBUG_MARKER
# undef _DEBUG
# endif
#endif
#include <Python.h>
#include <frameobject.h>
#include <pythread.h>
#include <structmember.h>
/* Python #defines overrides on all sorts of core functions, which
tends to weak havok in C++ codebases that expect these to work
like regular functions (potentially with several overloads) */
#if defined(isalnum)
# undef isalnum
# undef isalpha
# undef islower
# undef isspace
# undef isupper
# undef tolower
# undef toupper
#endif
#if defined(copysign)
# undef copysign
#endif
#if defined(setter)
# undef setter
#endif
#if defined(getter)
# undef getter
#endif
#if defined(_MSC_VER)
# if defined(NB_DEBUG_MARKER)
# define _DEBUG
# undef NB_DEBUG_MARKER
# endif
# pragma warning(pop)
#endif
#if PY_VERSION_HEX < 0x03090000
# error The nanobind library requires Python 3.9 (or newer)
#endif
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
include/nanobind/nb_traits.h | C/C++ Header | /*
nanobind/nb_traits.h: type traits for metaprogramming in nanobind
Copyright (c) 2022 Wenzel Jakob
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
NAMESPACE_BEGIN(NB_NAMESPACE)
using ssize_t = std::make_signed_t<size_t>;
NAMESPACE_BEGIN(detail)
struct void_type { };
template <bool... Bs> struct index_1;
template <bool... Bs> struct index_n;
template <> struct index_1<> { constexpr static size_t value = 0; };
template <> struct index_n<> { constexpr static size_t value = 0; };
template <bool B, bool... Bs> struct index_1<B, Bs...> {
constexpr static size_t value_rec = index_1<Bs...>::value;
constexpr static size_t value = B ? 0 : (value_rec + 1);
};
template <bool B, bool... Bs> struct index_n<B, Bs...> {
constexpr static size_t value_rec = index_n<Bs...>::value;
constexpr static size_t value =
(value_rec < sizeof...(Bs) || !B) ? (value_rec + 1) : 0;
};
template <bool... Bs> constexpr size_t index_1_v = index_1<Bs...>::value;
template <bool... Bs> constexpr size_t index_n_v = index_n<Bs...>::value;
/// Helper template to strip away type modifiers
template <typename T> struct intrinsic_type { using type = T; };
template <typename T> struct intrinsic_type<const T> { using type = typename intrinsic_type<T>::type; };
template <typename T> struct intrinsic_type<T*> { using type = typename intrinsic_type<T>::type; };
template <typename T> struct intrinsic_type<T&> { using type = typename intrinsic_type<T>::type; };
template <typename T> struct intrinsic_type<T&&> { using type = typename intrinsic_type<T>::type; };
template <typename T, size_t N> struct intrinsic_type<const T[N]> { using type = typename intrinsic_type<T>::type; };
template <typename T, size_t N> struct intrinsic_type<T[N]> { using type = typename intrinsic_type<T>::type; };
template <typename T> using intrinsic_t = typename intrinsic_type<T>::type;
// More relaxed pointer test
template <typename T>
constexpr bool is_pointer_v = std::is_pointer_v<std::remove_reference_t<T>>;
template <typename T, typename U>
using forwarded_type = std::conditional_t<std::is_lvalue_reference_v<T>,
std::remove_reference_t<U> &,
std::remove_reference_t<U> &&>;
/// Forwards a value U as rvalue or lvalue according to whether T is rvalue or lvalue; typically
/// used for forwarding a container's elements.
template <typename T, typename U> NB_INLINE forwarded_type<T, U> forward_like_(U &&u) {
return (forwarded_type<T, U>) u;
}
template <typename T>
constexpr bool is_std_char_v =
std::is_same_v<T, char>
#if defined(NB_HAS_U8STRING)
|| std::is_same_v<T, char8_t> /* std::u8string */
#endif
|| std::is_same_v<T, char16_t> ||
std::is_same_v<T, char32_t> || std::is_same_v<T, wchar_t>;
template <bool V> using enable_if_t = std::enable_if_t<V, int>;
/// Check if a function is a lambda function
template <typename T>
constexpr bool is_lambda_v = !std::is_function_v<T> && !std::is_pointer_v<T> &&
!std::is_member_pointer_v<T>;
/// Inspect the signature of a method call
template <typename T> struct analyze_method { };
template <typename Cls, typename Ret, typename... Args>
struct analyze_method<Ret (Cls::*)(Args...)> {
using func = Ret(Args...);
static constexpr size_t argc = sizeof...(Args);
};
template <typename Cls, typename Ret, typename... Args>
struct analyze_method<Ret (Cls::*)(Args...) noexcept> {
using func = Ret(Args...);
static constexpr size_t argc = sizeof...(Args);
};
template <typename Cls, typename Ret, typename... Args>
struct analyze_method<Ret (Cls::*)(Args...) const> {
using func = Ret(Args...);
static constexpr size_t argc = sizeof...(Args);
};
template <typename Cls, typename Ret, typename... Args>
struct analyze_method<Ret (Cls::*)(Args...) const noexcept> {
using func = Ret(Args...);
static constexpr size_t argc = sizeof...(Args);
};
template <typename F>
struct strip_function_object {
using type = typename analyze_method<decltype(&F::operator())>::func;
};
// Extracts the function signature from a function, function pointer or lambda.
template <typename Function, typename F = std::remove_reference_t<Function>>
using function_signature_t = std::conditional_t<
std::is_function_v<F>, F,
typename std::conditional_t<
std::is_pointer_v<F> || std::is_member_pointer_v<F>,
std::remove_pointer<F>,
strip_function_object<F>>::type>;
template <typename T>
using forward_t = std::conditional_t<std::is_lvalue_reference_v<T>, T, T &&>;
template <typename...> inline constexpr bool false_v = false;
template <typename... Args> struct overload_cast_impl {
template <typename Return>
constexpr auto operator()(Return (*pf)(Args...)) const noexcept
-> decltype(pf) { return pf; }
template <typename Return, typename Class>
constexpr auto operator()(Return (Class::*pmf)(Args...), std::false_type = {}) const noexcept
-> decltype(pmf) { return pmf; }
template <typename Return, typename Class>
constexpr auto operator()(Return (Class::*pmf)(Args...) const, std::true_type) const noexcept
-> decltype(pmf) { return pmf; }
};
/// Detector pattern
template <typename SFINAE, template <typename> typename Op, typename Arg>
struct detector : std::false_type { };
template <template <typename> typename Op, typename Arg>
struct detector<std::void_t<Op<Arg>>, Op, Arg>
: std::true_type { };
/* This template is used for docstring generation and specialized in
``stl/{variant,optional.h}`` to strip away std::optional and
``std::variant<std::monostate>`` in top-level argument types and
avoid redundancy when combined with nb::arg(...).none(). */
template <typename T> struct remove_opt_mono { using type = T; };
// Detect std::enable_shared_from_this without including <memory>
template <typename T>
auto has_shared_from_this_impl(T *ptr) ->
decltype(ptr->weak_from_this().lock().get(), std::true_type{});
std::false_type has_shared_from_this_impl(...);
template <typename T>
constexpr bool has_shared_from_this_v =
decltype(has_shared_from_this_impl((T *) nullptr))::value;
/// Base of all type casters for traditional bindings created via nanobind::class_<>
struct type_caster_base_tag {
static constexpr bool IsClass = true;
};
/// Check if a type caster represents traditional bindings created via nanobind::class_<>
template <typename Caster>
constexpr bool is_base_caster_v = std::is_base_of_v<type_caster_base_tag, Caster>;
template <typename T> using is_class_caster_test = std::enable_if_t<T::IsClass>;
/// Generalized version of the is_base_caster_v test that also accepts unique_ptr/shared_ptr
template <typename Caster>
constexpr bool is_class_caster_v = detail::detector<void, is_class_caster_test, Caster>::value;
// Primary template
template<typename T, typename = int>
struct is_complex : std::false_type {};
// Specialization if `T` is complex, i.e., `T` has a member type `value_type`,
// member functions `real()` and `imag()` that return such, and the size of
// `T` is twice that of `value_type`.
template<typename T>
struct is_complex<T, enable_if_t<std::is_same_v<
decltype(std::declval<T>().real()),
typename T::value_type>
&& std::is_same_v<
decltype(std::declval<T>().imag()),
typename T::value_type>
&& (sizeof(T) ==
2 * sizeof(typename T::value_type))>>
: std::true_type {};
/// True if the type `T` is a complete type representing a complex number.
template<typename T>
inline constexpr bool is_complex_v = is_complex<T>::value;
template <typename T>
struct has_arg_defaults : std::false_type {};
template <typename T>
constexpr bool has_arg_defaults_v = has_arg_defaults<intrinsic_t<T>>::value;
NAMESPACE_END(detail)
template <typename... Args>
static constexpr detail::overload_cast_impl<Args...> overload_cast = {};
static constexpr auto const_ = std::true_type{};
template <template<typename> class Op, typename Arg>
constexpr bool is_detected_v = detail::detector<void, Op, Arg>::value;
template <typename T>
using remove_opt_mono_t = typename detail::remove_opt_mono<T>::type;
template <template <typename> typename Base, typename T>
std::true_type is_base_of_template(const Base<T>*);
template <template <typename> typename Base>
std::false_type is_base_of_template(...);
template <typename T, template <typename> typename Base>
constexpr bool is_base_of_template_v =
decltype(is_base_of_template<Base>(std::declval<T *>()))::value;
NAMESPACE_END(NB_NAMESPACE)
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
include/nanobind/nb_tuple.h | C/C++ Header | /*
nanobind/nb_tuple.h: tiny self-contained tuple class
Copyright (c) 2022 Wenzel Jakob
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
NAMESPACE_BEGIN(NB_NAMESPACE)
NAMESPACE_BEGIN(detail)
/**
* \brief nanobind::tuple<...>: a tiny recursive tuple class
*
* std::tuple<...> is one of those STL types that just seems unnecessarily
* complex for typical usage. It pulls in a large amount of headers (22K LOC,
* 530 KiB with Clang/libc++) and goes through elaborate contortions to avoid a
* recursive definition. This is helpful when dealing with very large tuples
* (e.g. efficient compilation of std::get<1000>() in a tuple with 10K entries).
* When working with small tuples used to pass around a few arguments, a simple
* recursive definition compiles faster (generated code is identical).
*/
template <typename... Ts> struct tuple;
template <> struct tuple<> {
template <size_t> using type = void;
};
template <typename T, typename... Ts> struct tuple<T, Ts...> : tuple<Ts...> {
using Base = tuple<Ts...>;
tuple() = default;
tuple(const tuple &) = default;
tuple(tuple &&) = default;
tuple& operator=(tuple &&) = default;
tuple& operator=(const tuple &) = default;
template <typename A,
std::enable_if_t<std::is_convertible_v<A, T>, bool> = true,
typename... As>
NB_INLINE tuple(A &&a, As &&...as)
: Base((forward_t<As>) as...), value((forward_t<A>) a) {}
template <size_t I> NB_INLINE auto& get() {
if constexpr (I == 0)
return value;
else
return Base::template get<I - 1>();
}
template <size_t I> NB_INLINE const auto& get() const {
if constexpr (I == 0)
return value;
else
return Base::template get<I - 1>();
}
template <size_t I>
using type =
std::conditional_t<I == 0, T, typename Base::template type<I - 1>>;
private:
T value;
};
template <typename... Ts> tuple(Ts &&...) -> tuple<std::decay_t<Ts>...>;
NAMESPACE_END(detail)
NAMESPACE_END(NB_NAMESPACE)
// Support for C++17 structured bindings
template <typename... Ts>
struct std::tuple_size<nanobind::detail::tuple<Ts...>>
: std::integral_constant<size_t, sizeof...(Ts)> { };
template <size_t I, typename... Ts>
struct std::tuple_element<I, nanobind::detail::tuple<Ts...>> {
using type = typename nanobind::detail::tuple<Ts...>::template type<I>;
};
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
include/nanobind/nb_types.h | C/C++ Header | /*
nanobind/nb_types.h: nb::dict/str/list/..: C++ wrappers for Python types
Copyright (c) 2022 Wenzel Jakob
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
NAMESPACE_BEGIN(NB_NAMESPACE)
/// Macro defining functions/constructors for nanobind::handle subclasses
#define NB_OBJECT(Type, Parent, Str, Check) \
public: \
static constexpr auto Name = ::nanobind::detail::const_name(Str); \
NB_INLINE Type(handle h, ::nanobind::detail::borrow_t) \
: Parent(h, ::nanobind::detail::borrow_t{}) { } \
NB_INLINE Type(handle h, ::nanobind::detail::steal_t) \
: Parent(h, ::nanobind::detail::steal_t{}) { } \
NB_INLINE static bool check_(handle h) { \
return Check(h.ptr()); \
}
/// Like NB_OBJECT but allow null-initialization
#define NB_OBJECT_DEFAULT(Type, Parent, Str, Check) \
NB_OBJECT(Type, Parent, Str, Check) \
NB_INLINE Type() : Parent() {}
/// Helper macro to create detail::api comparison functions
#define NB_DECL_COMP(name) \
template <typename T2> NB_INLINE bool name(const api<T2> &o) const;
#define NB_IMPL_COMP(name, op) \
template <typename T1> template <typename T2> \
NB_INLINE bool api<T1>::name(const api<T2> &o) const { \
return detail::obj_comp(derived().ptr(), o.derived().ptr(), op); \
}
/// Helper macros to create detail::api unary operators
#define NB_DECL_OP_1(name) \
NB_INLINE object name() const;
#define NB_IMPL_OP_1(name, op) \
template <typename T> NB_INLINE object api<T>::name() const { \
return steal(detail::obj_op_1(derived().ptr(), op)); \
}
/// Helper macros to create detail::api binary operators
#define NB_DECL_OP_2(name) \
template <typename T2> NB_INLINE object name(const api<T2> &o) const;
#define NB_IMPL_OP_2(name, op) \
template <typename T1> template <typename T2> \
NB_INLINE object api<T1>::name(const api<T2> &o) const { \
return steal( \
detail::obj_op_2(derived().ptr(), o.derived().ptr(), op)); \
}
#define NB_DECL_OP_2_I(name) \
template <typename T2> NB_INLINE object name(const api<T2> &o);
#define NB_IMPL_OP_2_I(name, op) \
template <typename T1> template <typename T2> \
NB_INLINE object api<T1>::name(const api<T2> &o) { \
return steal( \
detail::obj_op_2(derived().ptr(), o.derived().ptr(), op)); \
}
#define NB_IMPL_OP_2_IO(name) \
template <typename T> NB_INLINE decltype(auto) name(const api<T> &o) { \
return operator=(handle::name(o)); \
}
// A few forward declarations
class object;
class handle;
class iterator;
template <typename T = object> NB_INLINE T borrow(handle h);
template <typename T = object> NB_INLINE T steal(handle h);
NAMESPACE_BEGIN(detail)
template <typename T, typename SFINAE = int> struct type_caster;
template <typename T> using make_caster = type_caster<intrinsic_t<T>>;
template <typename Impl> class accessor;
struct str_attr; struct obj_attr;
struct str_item; struct obj_item; struct num_item;
struct num_item_list; struct num_item_tuple;
class args_proxy; class kwargs_proxy;
struct borrow_t { };
struct steal_t { };
struct api_tag {
constexpr static bool nb_typed = false;
};
class dict_iterator;
struct fast_iterator;
// Standard operations provided by every nanobind object
template <typename Derived> class api : public api_tag {
public:
Derived &derived() { return static_cast<Derived &>(*this); }
const Derived &derived() const { return static_cast<const Derived &>(*this); }
NB_INLINE bool is(handle value) const;
NB_INLINE bool is_none() const { return derived().ptr() == Py_None; }
NB_INLINE bool is_type() const { return PyType_Check(derived().ptr()); }
NB_INLINE bool is_valid() const { return derived().ptr() != nullptr; }
NB_INLINE handle inc_ref() const &;
NB_INLINE handle dec_ref() const &;
iterator begin() const;
iterator end() const;
NB_INLINE handle type() const;
NB_INLINE operator handle() const;
accessor<obj_attr> attr(handle key) const;
accessor<str_attr> attr(const char *key) const;
accessor<str_attr> doc() const;
accessor<obj_item> operator[](handle key) const;
accessor<str_item> operator[](const char *key) const;
template <typename T, enable_if_t<std::is_arithmetic_v<T>> = 1>
accessor<num_item> operator[](T key) const;
args_proxy operator*() const;
template <rv_policy policy = rv_policy::automatic_reference,
typename... Args>
object operator()(Args &&...args) const;
NB_DECL_COMP(equal)
NB_DECL_COMP(not_equal)
NB_DECL_COMP(operator<)
NB_DECL_COMP(operator<=)
NB_DECL_COMP(operator>)
NB_DECL_COMP(operator>=)
NB_DECL_OP_1(operator-)
NB_DECL_OP_1(operator~)
NB_DECL_OP_2(operator+)
NB_DECL_OP_2(operator-)
NB_DECL_OP_2(operator*)
NB_DECL_OP_2(operator/)
NB_DECL_OP_2(operator%)
NB_DECL_OP_2(operator|)
NB_DECL_OP_2(operator&)
NB_DECL_OP_2(operator^)
NB_DECL_OP_2(operator<<)
NB_DECL_OP_2(operator>>)
NB_DECL_OP_2(floor_div)
NB_DECL_OP_2_I(operator+=)
NB_DECL_OP_2_I(operator-=)
NB_DECL_OP_2_I(operator*=)
NB_DECL_OP_2_I(operator/=)
NB_DECL_OP_2_I(operator%=)
NB_DECL_OP_2_I(operator|=)
NB_DECL_OP_2_I(operator&=)
NB_DECL_OP_2_I(operator^=)
NB_DECL_OP_2_I(operator<<=)
NB_DECL_OP_2_I(operator>>=)
};
NAMESPACE_END(detail)
// *WARNING*: nanobind regularly receives requests from users who run it
// through Clang-Tidy, or who compile with increased warnings levels, like
//
// -Wcast-qual, -Wsign-conversion, etc.
//
// (i.e., beyond -Wall -Wextra and /W4 that are currently already used)
//
// Their next step is to open a big pull request needed to silence all of
// the resulting messages. This comment is strategically placed here
// because the (PyObject *) casts below cast away the const qualifier and
// will almost certainly be flagged in this process.
//
// My policy on this is as follows: I am always happy to fix issues in the
// codebase. However, many of the resulting change requests are in the
// "ritual purification" category: things that cause churn, decrease
// readability, and which don't fix actual problems. It's a never-ending
// cycle because each new revision of such tooling adds further warnings
// and purification rites.
//
// So just to be clear: I do not wish to pepper this codebase with
// "const_cast" and #pragmas/comments to avoid warnings in external
// tooling just so those users can have a "silent" build. I don't think it
// is reasonable for them to impose their own style on this project.
//
// As a workaround it is likely possible to restrict the scope of style
// checks to particular C++ namespaces or source code locations.
class handle : public detail::api<handle> {
friend class python_error;
friend struct detail::str_attr;
friend struct detail::obj_attr;
friend struct detail::str_item;
friend struct detail::obj_item;
friend struct detail::num_item;
public:
static constexpr auto Name = detail::const_name("object");
handle() = default;
handle(const handle &) = default;
handle(handle &&) noexcept = default;
handle &operator=(const handle &) = default;
handle &operator=(handle &&) noexcept = default;
NB_INLINE handle(std::nullptr_t, detail::steal_t) : m_ptr(nullptr) { }
NB_INLINE handle(std::nullptr_t) : m_ptr(nullptr) { }
NB_INLINE handle(const PyObject *ptr) : m_ptr((PyObject *) ptr) { }
NB_INLINE handle(const PyTypeObject *ptr) : m_ptr((PyObject *) ptr) { }
const handle& inc_ref() const & noexcept {
#if defined(NDEBUG)
Py_XINCREF(m_ptr);
#else
detail::incref_checked(m_ptr);
#endif
return *this;
}
const handle& dec_ref() const & noexcept {
#if defined(NDEBUG)
Py_XDECREF(m_ptr);
#else
detail::decref_checked(m_ptr);
#endif
return *this;
}
NB_INLINE explicit operator bool() const { return m_ptr != nullptr; }
NB_INLINE PyObject *ptr() const { return m_ptr; }
NB_INLINE static bool check_(handle) { return true; }
protected:
PyObject *m_ptr = nullptr;
};
class object : public handle {
public:
static constexpr auto Name = detail::const_name("object");
object() = default;
object(const object &o) : handle(o) { inc_ref(); }
object(object &&o) noexcept : handle(o) { o.m_ptr = nullptr; }
~object() { dec_ref(); }
object(handle h, detail::borrow_t) : handle(h) { inc_ref(); }
object(handle h, detail::steal_t) : handle(h) { }
handle release() {
handle temp(m_ptr);
m_ptr = nullptr;
return temp;
}
void reset() {
dec_ref();
m_ptr = nullptr;
}
object& operator=(const object &o) {
handle temp(m_ptr);
o.inc_ref();
m_ptr = o.m_ptr;
temp.dec_ref();
return *this;
}
object& operator=(object &&o) noexcept {
handle temp(m_ptr);
m_ptr = o.m_ptr;
o.m_ptr = nullptr;
temp.dec_ref();
return *this;
}
NB_IMPL_OP_2_IO(operator+=)
NB_IMPL_OP_2_IO(operator%=)
NB_IMPL_OP_2_IO(operator-=)
NB_IMPL_OP_2_IO(operator*=)
NB_IMPL_OP_2_IO(operator/=)
NB_IMPL_OP_2_IO(operator|=)
NB_IMPL_OP_2_IO(operator&=)
NB_IMPL_OP_2_IO(operator^=)
NB_IMPL_OP_2_IO(operator<<=)
NB_IMPL_OP_2_IO(operator>>=)
};
template <typename T> NB_INLINE T borrow(handle h) {
return { h, detail::borrow_t() };
}
template <typename T = object, typename T2,
std::enable_if_t<std::is_base_of_v<object, T2> && !std::is_lvalue_reference_v<T2>, int> = 0>
NB_INLINE T borrow(T2 &&o) {
return { o.release(), detail::steal_t() };
}
template <typename T> NB_INLINE T steal(handle h) {
return { h, detail::steal_t() };
}
inline bool hasattr(handle h, const char *key) noexcept {
return PyObject_HasAttrString(h.ptr(), key);
}
inline bool hasattr(handle h, handle key) noexcept {
return PyObject_HasAttr(h.ptr(), key.ptr());
}
inline object getattr(handle h, const char *key) {
return steal(detail::getattr(h.ptr(), key));
}
inline object getattr(handle h, handle key) {
return steal(detail::getattr(h.ptr(), key.ptr()));
}
inline object getattr(handle h, const char *key, handle def) noexcept {
return steal(detail::getattr(h.ptr(), key, def.ptr()));
}
inline object getattr(handle h, handle key, handle value) noexcept {
return steal(detail::getattr(h.ptr(), key.ptr(), value.ptr()));
}
inline void setattr(handle h, const char *key, handle value) {
detail::setattr(h.ptr(), key, value.ptr());
}
inline void setattr(handle h, handle key, handle value) {
detail::setattr(h.ptr(), key.ptr(), value.ptr());
}
inline void delattr(handle h, const char *key) {
detail::delattr(h.ptr(), key);
}
inline void delattr(handle h, handle key) {
detail::delattr(h.ptr(), key.ptr());
}
class module_ : public object {
public:
NB_OBJECT(module_, object, "types.ModuleType", PyModule_CheckExact)
template <typename Func, typename... Extra>
module_ &def(const char *name_, Func &&f, const Extra &...extra);
static NB_INLINE module_ import_(const char *name) {
return steal<module_>(detail::module_import(name));
}
static NB_INLINE module_ import_(handle name) {
return steal<module_>(detail::module_import(name.ptr()));
}
NB_INLINE module_ def_submodule(const char *name,
const char *doc = nullptr) {
return steal<module_>(detail::module_new_submodule(m_ptr, name, doc));
}
};
class capsule : public object {
NB_OBJECT_DEFAULT(capsule, object, NB_TYPING_CAPSULE, PyCapsule_CheckExact)
capsule(const void *ptr, void (*cleanup)(void *) noexcept = nullptr) {
m_ptr = detail::capsule_new(ptr, nullptr, cleanup);
}
capsule(const void *ptr, const char *name,
void (*cleanup)(void *) noexcept = nullptr) {
m_ptr = detail::capsule_new(ptr, name, cleanup);
}
const char *name() const { return PyCapsule_GetName(m_ptr); }
void *data() const { return PyCapsule_GetPointer(m_ptr, name()); }
void *data(const char *name) const {
void *p = PyCapsule_GetPointer(m_ptr, name);
if (!p && PyErr_Occurred())
raise_python_error();
return p;
}
};
class bool_ : public object {
NB_OBJECT_DEFAULT(bool_, object, "bool", PyBool_Check)
explicit bool_(handle h)
: object(detail::bool_from_obj(h.ptr()), detail::borrow_t{}) { }
explicit bool_(bool value)
: object(value ? Py_True : Py_False, detail::borrow_t{}) { }
explicit operator bool() const {
return m_ptr == Py_True;
}
};
class int_ : public object {
NB_OBJECT_DEFAULT(int_, object, "int", PyLong_Check)
explicit int_(handle h)
: object(detail::int_from_obj(h.ptr()), detail::steal_t{}) { }
template <typename T, detail::enable_if_t<std::is_arithmetic_v<T>> = 0>
explicit int_(T value) {
if constexpr (std::is_floating_point_v<T>)
m_ptr = PyLong_FromDouble((double) value);
else
m_ptr = detail::type_caster<T>::from_cpp(value, rv_policy::copy, nullptr).ptr();
if (!m_ptr)
raise_python_error();
}
template <typename T, detail::enable_if_t<std::is_arithmetic_v<T>> = 0>
explicit operator T() const {
detail::type_caster<T> tc;
if (!tc.from_python(m_ptr, 0, nullptr))
throw std::out_of_range("Conversion of nanobind::int_ failed");
return tc.value;
}
};
class float_ : public object {
NB_OBJECT_DEFAULT(float_, object, "float", PyFloat_Check)
explicit float_(handle h)
: object(detail::float_from_obj(h.ptr()), detail::steal_t{}) { }
explicit float_(double value)
: object(PyFloat_FromDouble(value), detail::steal_t{}) {
if (!m_ptr)
raise_python_error();
}
#if !defined(Py_LIMITED_API)
explicit operator double() const { return PyFloat_AS_DOUBLE(m_ptr); }
#else
explicit operator double() const { return PyFloat_AsDouble(m_ptr); }
#endif
};
class str : public object {
NB_OBJECT_DEFAULT(str, object, "str", PyUnicode_Check)
explicit str(handle h)
: object(detail::str_from_obj(h.ptr()), detail::steal_t{}) { }
explicit str(const char *s)
: object(detail::str_from_cstr(s), detail::steal_t{}) { }
explicit str(const char *s, size_t n)
: object(detail::str_from_cstr_and_size(s, n), detail::steal_t{}) { }
template <typename... Args> str format(Args&&... args) const;
const char *c_str() const { return PyUnicode_AsUTF8AndSize(m_ptr, nullptr); }
};
class bytes : public object {
NB_OBJECT_DEFAULT(bytes, object, "bytes", PyBytes_Check)
explicit bytes(handle h)
: object(detail::bytes_from_obj(h.ptr()), detail::steal_t{}) { }
explicit bytes(const char *s)
: object(detail::bytes_from_cstr(s), detail::steal_t{}) { }
explicit bytes(const void *s, size_t n)
: object(detail::bytes_from_cstr_and_size(s, n), detail::steal_t{}) { }
const char *c_str() const { return PyBytes_AsString(m_ptr); }
const void *data() const { return (const void *) PyBytes_AsString(m_ptr); }
size_t size() const { return (size_t) PyBytes_Size(m_ptr); }
};
NAMESPACE_BEGIN(literals)
inline str operator""_s(const char *s, size_t n) {
return str(s, n);
}
NAMESPACE_END(literals)
class bytearray : public object {
NB_OBJECT(bytearray, object, "bytearray", PyByteArray_Check)
bytearray()
: object(PyObject_CallNoArgs((PyObject *)&PyByteArray_Type), detail::steal_t{}) { }
explicit bytearray(handle h)
: object(detail::bytearray_from_obj(h.ptr()), detail::steal_t{}) { }
explicit bytearray(const void *s, size_t n)
: object(detail::bytearray_from_cstr_and_size(s, n), detail::steal_t{}) { }
const char *c_str() const { return PyByteArray_AsString(m_ptr); }
const void *data() const { return PyByteArray_AsString(m_ptr); }
void *data() { return PyByteArray_AsString(m_ptr); }
size_t size() const { return (size_t) PyByteArray_Size(m_ptr); }
void resize(size_t n) {
if (PyByteArray_Resize(m_ptr, (Py_ssize_t) n) != 0)
detail::raise_python_error();
}
};
class tuple : public object {
NB_OBJECT(tuple, object, "tuple", PyTuple_Check)
tuple() : object(PyTuple_New(0), detail::steal_t()) { }
explicit tuple(handle h)
: object(detail::tuple_from_obj(h.ptr()), detail::steal_t{}) { }
size_t size() const { return (size_t) NB_TUPLE_GET_SIZE(m_ptr); }
template <typename T, detail::enable_if_t<std::is_arithmetic_v<T>> = 1>
detail::accessor<detail::num_item_tuple> operator[](T key) const;
#if !defined(Py_LIMITED_API) && !defined(PYPY_VERSION)
detail::fast_iterator begin() const;
detail::fast_iterator end() const;
#endif
bool empty() const { return size() == 0; }
};
class type_object : public object {
NB_OBJECT_DEFAULT(type_object, object, "type", PyType_Check)
};
class list : public object {
NB_OBJECT(list, object, "list", PyList_Check)
list() : object(PyList_New(0), detail::steal_t()) { }
explicit list(handle h)
: object(detail::list_from_obj(h.ptr()), detail::steal_t{}) { }
size_t size() const { return (size_t) NB_LIST_GET_SIZE(m_ptr); }
template <typename T> void append(T &&value);
template <typename T> void insert(Py_ssize_t index, T &&value);
template <typename T, detail::enable_if_t<std::is_arithmetic_v<T>> = 1>
detail::accessor<detail::num_item_list> operator[](T key) const;
void clear() {
if (PyList_SetSlice(m_ptr, 0, PY_SSIZE_T_MAX, nullptr))
raise_python_error();
}
void extend(handle h) {
if (PyList_SetSlice(m_ptr, PY_SSIZE_T_MAX, PY_SSIZE_T_MAX, h.ptr()))
raise_python_error();
}
void sort() {
if (PyList_Sort(m_ptr))
raise_python_error();
}
void reverse() {
if (PyList_Reverse(m_ptr))
raise_python_error();
}
#if !defined(Py_LIMITED_API) && !defined(PYPY_VERSION)
detail::fast_iterator begin() const;
detail::fast_iterator end() const;
#endif
bool empty() const { return size() == 0; }
};
class dict : public object {
NB_OBJECT(dict, object, "dict", PyDict_Check)
dict() : object(PyDict_New(), detail::steal_t()) { }
size_t size() const { return (size_t) NB_DICT_GET_SIZE(m_ptr); }
detail::dict_iterator begin() const;
detail::dict_iterator end() const;
list keys() const { return steal<list>(detail::obj_op_1(m_ptr, PyDict_Keys)); }
list values() const { return steal<list>(detail::obj_op_1(m_ptr, PyDict_Values)); }
list items() const { return steal<list>(detail::obj_op_1(m_ptr, PyDict_Items)); }
object get(handle key, handle def) const {
PyObject *o = PyDict_GetItem(m_ptr, key.ptr());
if (!o)
o = def.ptr();
return borrow(o);
}
object get(const char *key, handle def) const {
PyObject *o = PyDict_GetItemString(m_ptr, key);
if (!o)
o = def.ptr();
return borrow(o);
}
template <typename T> bool contains(T&& key) const;
void clear() { PyDict_Clear(m_ptr); }
void update(handle h) {
if (PyDict_Update(m_ptr, h.ptr()))
raise_python_error();
}
bool empty() const { return size() == 0; }
};
class set : public object {
NB_OBJECT(set, object, "set", PySet_Check)
set() : object(PySet_New(nullptr), detail::steal_t()) { }
explicit set(handle h)
: object(detail::set_from_obj(h.ptr()), detail::steal_t{}) { }
size_t size() const { return (size_t) NB_SET_GET_SIZE(m_ptr); }
template <typename T> bool contains(T&& key) const;
template <typename T> void add(T &&value);
void clear() {
if (PySet_Clear(m_ptr))
raise_python_error();
}
template <typename T> bool discard(T &&value);
bool empty() const { return size() == 0; }
};
class frozenset : public object {
NB_OBJECT(frozenset, object, "frozenset", PyFrozenSet_Check)
frozenset() : object(PyFrozenSet_New(nullptr), detail::steal_t()) { }
explicit frozenset(handle h)
: object(detail::frozenset_from_obj(h.ptr()), detail::steal_t{}) { }
size_t size() const { return (size_t) NB_SET_GET_SIZE(m_ptr); }
template <typename T> bool contains(T&& key) const;
bool empty() const { return size() == 0; }
};
class sequence : public object {
NB_OBJECT_DEFAULT(sequence, object, "collections.abc.Sequence", PySequence_Check)
};
class mapping : public object {
NB_OBJECT_DEFAULT(mapping, object, "collections.abc.Mapping", PyMapping_Check)
list keys() const { return steal<list>(detail::obj_op_1(m_ptr, PyMapping_Keys)); }
list values() const { return steal<list>(detail::obj_op_1(m_ptr, PyMapping_Values)); }
list items() const { return steal<list>(detail::obj_op_1(m_ptr, PyMapping_Items)); }
template <typename T> bool contains(T&& key) const;
};
class args : public tuple {
NB_OBJECT_DEFAULT(args, tuple, "tuple", PyTuple_Check)
};
class kwargs : public dict {
NB_OBJECT_DEFAULT(kwargs, dict, "dict", PyDict_Check)
};
class iterator : public object {
public:
using difference_type = Py_ssize_t;
using value_type = handle;
using reference = const handle;
using pointer = const handle *;
NB_OBJECT_DEFAULT(iterator, object, "collections.abc.Iterator", PyIter_Check)
iterator& operator++() {
m_value = steal(detail::obj_iter_next(m_ptr));
return *this;
}
iterator operator++(int) {
iterator rv = *this;
m_value = steal(detail::obj_iter_next(m_ptr));
return rv;
}
handle operator*() const {
if (is_valid() && !m_value.is_valid())
m_value = steal(detail::obj_iter_next(m_ptr));
return m_value;
}
pointer operator->() const { operator*(); return &m_value; }
static iterator sentinel() { return {}; }
friend bool operator==(const iterator &a, const iterator &b) { return a->ptr() == b->ptr(); }
friend bool operator!=(const iterator &a, const iterator &b) { return a->ptr() != b->ptr(); }
private:
mutable object m_value;
};
class iterable : public object {
public:
NB_OBJECT_DEFAULT(iterable, object, "collections.abc.Iterable", detail::iterable_check)
};
/// Retrieve the Python type object associated with a C++ class
template <typename T> handle type() noexcept {
return detail::nb_type_lookup(&typeid(detail::intrinsic_t<T>));
}
template <typename T>
NB_INLINE bool isinstance(handle h) noexcept {
if constexpr (std::is_base_of_v<handle, T>)
return T::check_(h);
else if constexpr (detail::is_base_caster_v<detail::make_caster<T>>)
return detail::nb_type_isinstance(h.ptr(), &typeid(detail::intrinsic_t<T>));
else
return detail::make_caster<T>().from_python(h, 0, nullptr);
}
NB_INLINE bool issubclass(handle h1, handle h2) {
return detail::issubclass(h1.ptr(), h2.ptr());
}
NB_INLINE str repr(handle h) { return steal<str>(detail::obj_repr(h.ptr())); }
NB_INLINE size_t len(handle h) { return detail::obj_len(h.ptr()); }
NB_INLINE size_t len_hint(handle h) { return detail::obj_len_hint(h.ptr()); }
NB_INLINE size_t len(const tuple &t) { return (size_t) NB_TUPLE_GET_SIZE(t.ptr()); }
NB_INLINE size_t len(const list &l) { return (size_t) NB_LIST_GET_SIZE(l.ptr()); }
NB_INLINE size_t len(const dict &d) { return (size_t) NB_DICT_GET_SIZE(d.ptr()); }
NB_INLINE size_t len(const set &d) { return (size_t) NB_SET_GET_SIZE(d.ptr()); }
inline void print(handle value, handle end = handle(), handle file = handle()) {
detail::print(value.ptr(), end.ptr(), file.ptr());
}
inline void print(const char *str, handle end = handle(), handle file = handle()) {
print(nanobind::str(str), end, file);
}
inline object none() { return borrow(Py_None); }
inline dict builtins() { return borrow<dict>(PyEval_GetBuiltins()); }
inline iterator iter(handle h) {
return steal<iterator>(detail::obj_iter(h.ptr()));
}
class slice : public object {
public:
NB_OBJECT_DEFAULT(slice, object, "slice", PySlice_Check)
slice(handle start, handle stop, handle step) {
m_ptr = PySlice_New(start.ptr(), stop.ptr(), step.ptr());
if (!m_ptr)
raise_python_error();
}
template <typename T, detail::enable_if_t<std::is_arithmetic_v<T>> = 0>
explicit slice(T stop) : slice(Py_None, int_(stop), Py_None) {}
template <typename T, detail::enable_if_t<std::is_arithmetic_v<T>> = 0>
slice(T start, T stop) : slice(int_(start), int_(stop), Py_None) {}
template <typename T, detail::enable_if_t<std::is_arithmetic_v<T>> = 0>
slice(T start, T stop, T step) : slice(int_(start), int_(stop), int_(step)) {}
detail::tuple<Py_ssize_t, Py_ssize_t, Py_ssize_t, size_t> compute(size_t size) const {
Py_ssize_t start, stop, step;
size_t slice_length;
detail::slice_compute(m_ptr, (Py_ssize_t) size, start, stop, step, slice_length);
return detail::tuple(start, stop, step, slice_length);
}
};
class memoryview : public object {
NB_OBJECT(memoryview, object, "memoryview", PyMemoryView_Check)
explicit memoryview(handle h)
: object(detail::memoryview_from_obj(h.ptr()), detail::steal_t{}) { }
};
class ellipsis : public object {
static bool is_ellipsis(PyObject *obj) { return obj == Py_Ellipsis; }
public:
NB_OBJECT(ellipsis, object, "types.EllipsisType", is_ellipsis)
ellipsis() : object(Py_Ellipsis, detail::borrow_t()) {}
};
class not_implemented : public object {
static bool is_not_implemented(PyObject *obj) { return obj == Py_NotImplemented; }
public:
NB_OBJECT(not_implemented, object, "types.NotImplementedType", is_not_implemented)
not_implemented() : object(Py_NotImplemented, detail::borrow_t()) {}
};
class callable : public object {
public:
NB_OBJECT(callable, object, "collections.abc.Callable", PyCallable_Check)
using object::object;
};
class weakref : public object {
public:
NB_OBJECT(weakref, object, "weakref.ReferenceType", PyWeakref_Check)
explicit weakref(handle obj, handle callback = {})
: object(PyWeakref_NewRef(obj.ptr(), callback.ptr()), detail::steal_t{}) {
if (!m_ptr)
raise_python_error();
}
};
class any : public object {
public:
using object::object;
using object::operator=;
static constexpr auto Name = detail::const_name("typing.Any");
};
template <typename T> class handle_t : public handle {
public:
static constexpr auto Name = detail::make_caster<T>::Name;
using handle::handle;
using handle::operator=;
handle_t(const handle &h) : handle(h) { }
static bool check_(handle h) { return isinstance<T>(h); }
};
struct fallback : public handle {
public:
static constexpr auto Name = detail::const_name("object");
using handle::handle;
using handle::operator=;
fallback(const handle &h) : handle(h) { }
};
template <typename T> class type_object_t : public type_object {
public:
static constexpr auto Name = detail::const_name("type[") +
detail::make_caster<T>::Name +
detail::const_name("]");
using type_object::type_object;
using type_object::operator=;
static bool check_(handle h) {
return PyType_Check(h.ptr()) &&
PyType_IsSubtype((PyTypeObject *) h.ptr(),
(PyTypeObject *) nanobind::type<T>().ptr());
}
};
template <typename T, typename...> class typed : public T {
public:
constexpr static bool nb_typed = true;
using T::T;
using T::operator=;
typed(const T& o) : T(o) {}
typed(T&& o) : T(std::move(o)) {}
};
template <typename T> struct pointer_and_handle {
T *p;
handle h;
};
NAMESPACE_BEGIN(detail)
template <typename Derived> NB_INLINE api<Derived>::operator handle() const {
return derived().ptr();
}
template <typename Derived> NB_INLINE handle api<Derived>::type() const {
return (PyObject *) Py_TYPE(derived().ptr());
}
template <typename Derived> NB_INLINE handle api<Derived>::inc_ref() const & {
return operator handle().inc_ref();
}
template <typename Derived> NB_INLINE handle api<Derived>::dec_ref() const & {
return operator handle().dec_ref();
}
template <typename Derived>
NB_INLINE bool api<Derived>::is(handle value) const {
return derived().ptr() == value.ptr();
}
template <typename Derived> iterator api<Derived>::begin() const {
return iter(*this);
}
template <typename Derived> iterator api<Derived>::end() const {
return iterator::sentinel();
}
struct fast_iterator {
using value_type = handle;
using reference = const value_type;
using difference_type = std::ptrdiff_t;
fast_iterator() = default;
fast_iterator(PyObject **value) : value(value) { }
fast_iterator& operator++() { value++; return *this; }
fast_iterator operator++(int) { fast_iterator rv = *this; value++; return rv; }
friend bool operator==(const fast_iterator &a, const fast_iterator &b) { return a.value == b.value; }
friend bool operator!=(const fast_iterator &a, const fast_iterator &b) { return a.value != b.value; }
handle operator*() const { return *value; }
PyObject **value;
};
class dict_iterator {
public:
NB_NONCOPYABLE(dict_iterator)
using value_type = std::pair<handle, handle>;
using reference = const value_type;
dict_iterator() = default;
dict_iterator(handle h) : h(h), pos(0) {
#if defined(NB_FREE_THREADED)
PyCriticalSection_Begin(&cs, h.ptr());
#endif
increment();
}
#if defined(NB_FREE_THREADED)
~dict_iterator() {
if (h.ptr())
PyCriticalSection_End(&cs);
}
#endif
dict_iterator& operator++() {
increment();
return *this;
}
void increment() {
if (PyDict_Next(h.ptr(), &pos, &key, &value) == 0)
pos = -1;
}
value_type operator*() const { return { key, value }; }
friend bool operator==(const dict_iterator &a, const dict_iterator &b) { return a.pos == b.pos; }
friend bool operator!=(const dict_iterator &a, const dict_iterator &b) { return a.pos != b.pos; }
private:
handle h;
Py_ssize_t pos = -1;
PyObject *key = nullptr;
PyObject *value = nullptr;
#if defined(NB_FREE_THREADED)
PyCriticalSection cs { };
#endif
};
NB_IMPL_COMP(equal, Py_EQ)
NB_IMPL_COMP(not_equal, Py_NE)
NB_IMPL_COMP(operator<, Py_LT)
NB_IMPL_COMP(operator<=, Py_LE)
NB_IMPL_COMP(operator>, Py_GT)
NB_IMPL_COMP(operator>=, Py_GE)
NB_IMPL_OP_1(operator-, PyNumber_Negative)
NB_IMPL_OP_1(operator~, PyNumber_Invert)
NB_IMPL_OP_2(operator+, PyNumber_Add)
NB_IMPL_OP_2(operator-, PyNumber_Subtract)
NB_IMPL_OP_2(operator*, PyNumber_Multiply)
NB_IMPL_OP_2(operator/, PyNumber_TrueDivide)
NB_IMPL_OP_2(operator%, PyNumber_Remainder)
NB_IMPL_OP_2(operator|, PyNumber_Or)
NB_IMPL_OP_2(operator&, PyNumber_And)
NB_IMPL_OP_2(operator^, PyNumber_Xor)
NB_IMPL_OP_2(operator<<, PyNumber_Lshift)
NB_IMPL_OP_2(operator>>, PyNumber_Rshift)
NB_IMPL_OP_2(floor_div, PyNumber_FloorDivide)
NB_IMPL_OP_2_I(operator+=, PyNumber_InPlaceAdd)
NB_IMPL_OP_2_I(operator%=, PyNumber_InPlaceRemainder)
NB_IMPL_OP_2_I(operator-=, PyNumber_InPlaceSubtract)
NB_IMPL_OP_2_I(operator*=, PyNumber_InPlaceMultiply)
NB_IMPL_OP_2_I(operator/=, PyNumber_InPlaceTrueDivide)
NB_IMPL_OP_2_I(operator|=, PyNumber_InPlaceOr)
NB_IMPL_OP_2_I(operator&=, PyNumber_InPlaceAnd)
NB_IMPL_OP_2_I(operator^=, PyNumber_InPlaceXor)
NB_IMPL_OP_2_I(operator<<=,PyNumber_InPlaceLshift)
NB_IMPL_OP_2_I(operator>>=,PyNumber_InPlaceRshift)
#undef NB_DECL_COMP
#undef NB_IMPL_COMP
#undef NB_DECL_OP_1
#undef NB_IMPL_OP_1
#undef NB_DECL_OP_2
#undef NB_IMPL_OP_2
#undef NB_DECL_OP_2_I
#undef NB_IMPL_OP_2_I
#undef NB_IMPL_OP_2_IO
NAMESPACE_END(detail)
inline detail::dict_iterator dict::begin() const { return { *this }; }
inline detail::dict_iterator dict::end() const { return { }; }
#if !defined(Py_LIMITED_API) && !defined(PYPY_VERSION)
inline detail::fast_iterator tuple::begin() const {
return ((PyTupleObject *) m_ptr)->ob_item;
}
inline detail::fast_iterator tuple::end() const {
PyTupleObject *v = (PyTupleObject *) m_ptr;
return v->ob_item + v->ob_base.ob_size;
}
inline detail::fast_iterator list::begin() const {
return ((PyListObject *) m_ptr)->ob_item;
}
inline detail::fast_iterator list::end() const {
PyListObject *v = (PyListObject *) m_ptr;
return v->ob_item + v->ob_base.ob_size;
}
#endif
template <typename T> void del(detail::accessor<T> &a) { a.del(); }
template <typename T> void del(detail::accessor<T> &&a) { a.del(); }
NAMESPACE_END(NB_NAMESPACE)
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
include/nanobind/ndarray.h | C/C++ Header | /*
nanobind/ndarray.h: functionality to exchange n-dimensional arrays with
other array programming frameworks (NumPy, PyTorch, etc.)
Copyright (c) 2022 Wenzel Jakob
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
The API below is based on the DLPack project
(https://github.com/dmlc/dlpack/blob/main/include/dlpack/dlpack.h)
*/
#pragma once
#include <nanobind/nanobind.h>
#include <initializer_list>
NAMESPACE_BEGIN(NB_NAMESPACE)
/// DLPack API/ABI data structures are part of a separate namespace.
NAMESPACE_BEGIN(dlpack)
enum class dtype_code : uint8_t {
Int = 0, UInt = 1, Float = 2, Bfloat = 4, Complex = 5, Bool = 6,
Float8_E3M4 = 7, Float8_E4M3 = 8, Float8_E4M3B11FNUZ = 9,
Float8_E4M3FN = 10, Float8_E4M3FNUZ = 11, Float8_E5M2 = 12,
Float8_E5M2FNUZ = 13, Float8_E8M0FNU = 14,
Float6_E2M3FN = 15, Float6_E3M2FN = 16,
Float4_E2M1FN = 17
};
struct device {
int32_t device_type = 0;
int32_t device_id = 0;
};
struct dtype {
uint8_t code = 0;
uint8_t bits = 0;
uint16_t lanes = 0;
constexpr bool operator==(const dtype &o) const {
return code == o.code && bits == o.bits && lanes == o.lanes;
}
constexpr bool operator!=(const dtype &o) const { return !operator==(o); }
};
struct dltensor {
void *data = nullptr;
nanobind::dlpack::device device;
int32_t ndim = 0;
nanobind::dlpack::dtype dtype;
int64_t *shape = nullptr;
int64_t *strides = nullptr;
uint64_t byte_offset = 0;
};
NAMESPACE_END(dlpack)
#define NB_FRAMEWORK(Name, Value, label) \
struct Name { \
static constexpr auto name = detail::const_name(label); \
static constexpr int value = Value; \
static constexpr bool is_framework = true; \
}
#define NB_DEVICE(Name, Value) \
struct Name { \
static constexpr auto name = detail::const_name("device='" #Name "'"); \
static constexpr int value = Value; \
static constexpr bool is_device_type = true; \
}
#define NB_ORDER(Name, Value) \
struct Name { \
static constexpr auto name = detail::const_name("order='" Value "'"); \
static constexpr char value = Value[0]; \
static constexpr bool is_order = true; \
}
NB_ORDER(c_contig, "C");
NB_ORDER(f_contig, "F");
NB_ORDER(any_contig, "A");
NB_FRAMEWORK(no_framework, 0, "ndarray");
NB_FRAMEWORK(numpy, 1, "numpy.ndarray");
NB_FRAMEWORK(pytorch, 2, "torch.Tensor");
NB_FRAMEWORK(tensorflow, 3, "tensorflow.python.framework.ops.EagerTensor");
NB_FRAMEWORK(jax, 4, "jaxlib.xla_extension.DeviceArray");
NB_FRAMEWORK(cupy, 5, "cupy.ndarray");
NB_FRAMEWORK(memview, 6, "memoryview");
NB_FRAMEWORK(array_api, 7, "ArrayLike");
NAMESPACE_BEGIN(device)
NB_DEVICE(none, 0); NB_DEVICE(cpu, 1); NB_DEVICE(cuda, 2);
NB_DEVICE(cuda_host, 3); NB_DEVICE(opencl, 4); NB_DEVICE(vulkan, 7);
NB_DEVICE(metal, 8); NB_DEVICE(rocm, 10); NB_DEVICE(rocm_host, 11);
NB_DEVICE(cuda_managed, 13); NB_DEVICE(oneapi, 14);
NAMESPACE_END(device)
#undef NB_FRAMEWORK
#undef NB_DEVICE
#undef NB_ORDER
template <typename T> struct ndarray_traits {
static constexpr bool is_complex = detail::is_complex_v<T>;
static constexpr bool is_float = std::is_floating_point_v<T>;
static constexpr bool is_bool = std::is_same_v<std::remove_cv_t<T>, bool>;
static constexpr bool is_int = std::is_integral_v<T> && !is_bool;
static constexpr bool is_signed = std::is_signed_v<T>;
};
NAMESPACE_BEGIN(detail)
template <typename T, typename /* SFINAE */ = int> struct dtype_traits {
using traits = ndarray_traits<T>;
static constexpr int matches = traits::is_bool + traits::is_complex +
traits::is_float + traits::is_int;
static_assert(matches <= 1, "dtype matches multiple type categories!");
static constexpr dlpack::dtype value{
(uint8_t) ((traits::is_bool ? (int) dlpack::dtype_code::Bool : 0) +
(traits::is_complex ? (int) dlpack::dtype_code::Complex : 0) +
(traits::is_float ? (int) dlpack::dtype_code::Float : 0) +
(traits::is_int && traits::is_signed ? (int) dlpack::dtype_code::Int : 0) +
(traits::is_int && !traits::is_signed ? (int) dlpack::dtype_code::UInt : 0)),
(uint8_t) matches ? sizeof(T) * 8 : 0,
matches ? 1 : 0
};
static constexpr auto name =
const_name<traits::is_complex>("complex", "") +
const_name<traits::is_int && traits::is_signed>("int", "") +
const_name<traits::is_int && !traits::is_signed>("uint", "") +
const_name<traits::is_float>("float", "") +
const_name<traits::is_bool>(const_name("bool"), const_name<sizeof(T) * 8>());
};
template <> struct dtype_traits<void> {
static constexpr dlpack::dtype value{ 0, 0, 0 };
static constexpr auto name = descr<0>();
};
template <typename T> struct dtype_traits<const T> {
static constexpr dlpack::dtype value = dtype_traits<T>::value;
static constexpr auto name = dtype_traits<T>::name;
};
template <ssize_t... Is> struct shape {
static constexpr auto name =
const_name("shape=(") +
concat(const_name<Is == -1>(const_name("*"),
const_name<(size_t) Is>())...) + const_name(")");
static_assert(
((Is >= 0 || Is == -1) && ...),
"The arguments to nanobind::shape must either be positive or equal to -1"
);
static void put(int64_t *out) {
size_t ctr = 0;
((out[ctr++] = Is), ...);
}
static void put(size_t *out) {
if constexpr (((Is == -1) || ...))
detail::fail("Negative ndarray sizes are not allowed here!");
size_t ctr = 0;
((out[ctr++] = (size_t) Is), ...);
}
};
template <typename T>
constexpr bool is_ndarray_scalar_v = dtype_traits<T>::value.bits != 0;
template <typename> struct ndim_shape;
template <size_t... S> struct ndim_shape<std::index_sequence<S...>> {
using type = shape<((void) S, -1)...>;
};
NAMESPACE_END(detail)
using detail::shape;
struct ro { };
template <size_t N>
using ndim = typename detail::ndim_shape<std::make_index_sequence<N>>::type;
template <typename T> constexpr dlpack::dtype dtype() {
return detail::dtype_traits<T>::value;
}
NAMESPACE_BEGIN(detail)
/// Sentinel type to initialize ndarray_config_t<>
struct unused {
using type = void;
static constexpr int value = 0;
static constexpr auto name = descr<0>();
};
/// ndarray_config describes a requested array configuration
struct ndarray_config {
int device_type = 0;
char order = '\0';
bool ro = false;
dlpack::dtype dtype { };
int32_t ndim = -1;
int64_t *shape = nullptr;
ndarray_config() = default;
template <typename T> ndarray_config(T)
: device_type(T::DeviceType::value),
order((char) T::Order::value),
ro(std::is_const_v<typename T::Scalar>),
dtype(nanobind::dtype<typename T::Scalar>()),
ndim(T::N),
shape(nullptr) { }
};
/// ndarray_config_t collects nd-array template parameters in a structured way.
/// Its "storage" is purely based on types members
template <typename /* SFINAE */ = int, typename...> struct ndarray_config_t;
template <> struct ndarray_config_t<int> {
using Framework = no_framework;
using Scalar = void;
using Shape = unused;
using Order = unused;
using DeviceType = unused;
static constexpr int32_t N = -1;
};
// Template infrastructure to collect ndarray annotations and fail if duplicates are found
template <typename... Args> struct ndarray_config_t<int, ro, Args...> : ndarray_config_t<int, Args...> {
using Scalar = std::add_const_t<typename ndarray_config_t<int, Args...>::Scalar>;
};
template <typename... Args> struct ndarray_config_t<int, unused, Args...> : ndarray_config_t<int, Args...> { };
template <typename Arg, typename... Args> struct ndarray_config_t<enable_if_t<is_ndarray_scalar_v<Arg>>, Arg, Args...> : ndarray_config_t<int, Args...> {
using Scalar = std::conditional_t<
std::is_const_v<typename ndarray_config_t<int, Args...>::Scalar>,
std::add_const_t<Arg>, Arg>;
};
template <typename Arg, typename... Args> struct ndarray_config_t<enable_if_t<Arg::is_device_type>, Arg, Args...> : ndarray_config_t<int, Args...> {
using DeviceType = Arg;
};
template <typename Arg, typename... Args> struct ndarray_config_t<enable_if_t<Arg::is_framework>, Arg, Args...> : ndarray_config_t<int, Args...> {
using Framework = Arg;
};
template <typename Arg, typename... Args> struct ndarray_config_t<enable_if_t<Arg::is_order>, Arg, Args...> : ndarray_config_t<int, Args...> {
using Order = Arg;
};
template <ssize_t... Is, typename... Args> struct ndarray_config_t<int, shape<Is...>, Args...> : ndarray_config_t<int, Args...> {
using Shape = shape<Is...>;
static constexpr int32_t N = sizeof...(Is);
};
NAMESPACE_END(detail)
template <typename Scalar, size_t Dim, char Order> struct ndarray_view {
ndarray_view() = default;
ndarray_view(const ndarray_view &) = default;
ndarray_view(ndarray_view &&) = default;
ndarray_view &operator=(const ndarray_view &) = default;
ndarray_view &operator=(ndarray_view &&) noexcept = default;
~ndarray_view() noexcept = default;
template <typename... Args> NB_INLINE Scalar &operator()(Args... indices) const {
static_assert(
sizeof...(Args) == Dim,
"ndarray_view::operator(): invalid number of arguments");
const int64_t indices_i64[] { (int64_t) indices... };
int64_t offset = 0;
for (size_t i = 0; i < Dim; ++i)
offset += indices_i64[i] * m_strides[i];
return *(m_data + offset);
}
size_t ndim() const { return Dim; }
size_t shape(size_t i) const { return m_shape[i]; }
int64_t stride(size_t i) const { return m_strides[i]; }
Scalar *data() const { return m_data; }
private:
template <typename...> friend class ndarray;
template <size_t... I1, ssize_t... I2>
ndarray_view(Scalar *data, const int64_t *shape, const int64_t *strides,
std::index_sequence<I1...>, nanobind::shape<I2...>)
: m_data(data) {
/* Initialize shape/strides with compile-time knowledge if
available (to permit vectorization, loop unrolling, etc.) */
((m_shape[I1] = (I2 == -1) ? shape[I1] : (int64_t) I2), ...);
((m_strides[I1] = strides[I1]), ...);
if constexpr (Order == 'F') {
m_strides[0] = 1;
for (size_t i = 1; i < Dim; ++i)
m_strides[i] = m_strides[i - 1] * m_shape[i - 1];
} else if constexpr (Order == 'C') {
m_strides[Dim - 1] = 1;
for (Py_ssize_t i = (Py_ssize_t) Dim - 2; i >= 0; --i)
m_strides[i] = m_strides[i + 1] * m_shape[i + 1];
}
}
Scalar *m_data = nullptr;
int64_t m_shape[Dim] { };
int64_t m_strides[Dim] { };
};
template <typename... Args> class ndarray {
public:
template <typename...> friend class ndarray;
using Config = detail::ndarray_config_t<int, Args...>;
using Scalar = typename Config::Scalar;
static constexpr bool ReadOnly = std::is_const_v<Scalar>;
static constexpr char Order = Config::Order::value;
static constexpr int DeviceType = Config::DeviceType::value;
using VoidPtr = std::conditional_t<ReadOnly, const void *, void *>;
ndarray() = default;
explicit ndarray(detail::ndarray_handle *handle) : m_handle(handle) {
if (handle)
m_dltensor = *detail::ndarray_inc_ref(handle);
}
template <typename... Args2>
explicit ndarray(const ndarray<Args2...> &other) : ndarray(other.m_handle) { }
ndarray(VoidPtr data,
size_t ndim,
const size_t *shape,
handle owner = { },
const int64_t *strides = nullptr,
dlpack::dtype dtype = nanobind::dtype<Scalar>(),
int device_type = DeviceType,
int device_id = 0,
char order = Order) {
m_handle = detail::ndarray_create(
(void *) data, ndim, shape, owner.ptr(), strides, dtype,
ReadOnly, device_type, device_id, order);
m_dltensor = *detail::ndarray_inc_ref(m_handle);
}
ndarray(VoidPtr data,
std::initializer_list<size_t> shape = { },
handle owner = { },
std::initializer_list<int64_t> strides = { },
dlpack::dtype dtype = nanobind::dtype<Scalar>(),
int device_type = DeviceType,
int device_id = 0,
char order = Order) {
size_t shape_size = shape.size();
if (strides.size() != 0 && strides.size() != shape_size)
detail::fail("ndarray(): shape and strides have incompatible size!");
size_t shape_buf[Config::N <= 0 ? 1 : Config::N];
const size_t *shape_ptr = shape.begin();
if constexpr (Config::N > 0) {
if (!shape_size) {
Config::Shape::put(shape_buf);
shape_size = Config::N;
shape_ptr = shape_buf;
}
} else {
(void) shape_buf;
}
m_handle = detail::ndarray_create(
(void *) data, shape_size, shape_ptr, owner.ptr(),
(strides.size() == 0) ? nullptr : strides.begin(), dtype,
ReadOnly, device_type, device_id, order);
m_dltensor = *detail::ndarray_inc_ref(m_handle);
}
~ndarray() {
detail::ndarray_dec_ref(m_handle);
}
ndarray(const ndarray &t) : m_handle(t.m_handle), m_dltensor(t.m_dltensor) {
detail::ndarray_inc_ref(m_handle);
}
ndarray(ndarray &&t) noexcept : m_handle(t.m_handle), m_dltensor(t.m_dltensor) {
t.m_handle = nullptr;
t.m_dltensor = dlpack::dltensor();
}
ndarray &operator=(ndarray &&t) noexcept {
detail::ndarray_dec_ref(m_handle);
m_handle = t.m_handle;
m_dltensor = t.m_dltensor;
t.m_handle = nullptr;
t.m_dltensor = dlpack::dltensor();
return *this;
}
ndarray &operator=(const ndarray &t) {
detail::ndarray_inc_ref(t.m_handle);
detail::ndarray_dec_ref(m_handle);
m_handle = t.m_handle;
m_dltensor = t.m_dltensor;
return *this;
}
dlpack::dtype dtype() const { return m_dltensor.dtype; }
size_t ndim() const { return (size_t) m_dltensor.ndim; }
size_t shape(size_t i) const { return (size_t) m_dltensor.shape[i]; }
int64_t stride(size_t i) const { return m_dltensor.strides[i]; }
const int64_t* shape_ptr() const { return m_dltensor.shape; }
const int64_t* stride_ptr() const { return m_dltensor.strides; }
bool is_valid() const { return m_handle != nullptr; }
int device_type() const { return (int) m_dltensor.device.device_type; }
int device_id() const { return (int) m_dltensor.device.device_id; }
detail::ndarray_handle *handle() const { return m_handle; }
size_t size() const {
size_t ret = is_valid();
for (size_t i = 0; i < ndim(); ++i)
ret *= shape(i);
return ret;
}
size_t itemsize() const { return ((size_t) dtype().bits + 7) / 8; }
size_t nbytes() const { return ((size_t) dtype().bits * size() + 7) / 8; }
Scalar *data() const {
return (Scalar *) ((uint8_t *) m_dltensor.data +
m_dltensor.byte_offset);
}
template <typename... Args2>
NB_INLINE auto& operator()(Args2... indices) const {
return *(Scalar *) ((uint8_t *) m_dltensor.data +
byte_offset(indices...));
}
template <typename... Args2> NB_INLINE auto view() const {
using namespace detail;
using Config2 = detail::ndarray_config_t<int, Args2..., Args...>;
using Scalar2 = typename Config2::Scalar;
constexpr size_t N = Config2::N >= 0 ? Config2::N : 0;
constexpr bool has_scalar = !std::is_void_v<Scalar2>,
has_shape = Config2::N >= 0;
static_assert(has_scalar,
"To use the ndarray::view<..>() method, you must add a scalar type "
"annotation (e.g. 'float') to the template parameters of the parent "
"ndarray, or to the call to .view<..>()");
static_assert(has_shape,
"To use the ndarray::view<..>() method, you must add a shape<..> "
"or ndim<..> annotation to the template parameters of the parent "
"ndarray, or to the call to .view<..>()");
if constexpr (has_scalar && has_shape) {
using Result = ndarray_view<Scalar2, N, Config2::Order::value>;
return Result((Scalar2 *) data(), shape_ptr(), stride_ptr(),
std::make_index_sequence<N>(),
typename Config2::Shape());
} else {
return nullptr;
}
}
auto cast(rv_policy rvp = rv_policy::automatic, class handle parent = {});
private:
template <typename... Args2>
NB_INLINE int64_t byte_offset(Args2... indices) const {
constexpr bool has_scalar = !std::is_void_v<Scalar>,
has_shape = Config::N != -1;
static_assert(has_scalar,
"To use ndarray::operator(), you must add a scalar type "
"annotation (e.g. 'float') to the ndarray template parameters.");
static_assert(has_shape,
"To use ndarray::operator(), you must add a shape<> or "
"ndim<> annotation to the ndarray template parameters.");
if constexpr (has_scalar && has_shape) {
static_assert(sizeof...(Args2) == (size_t) Config::N,
"ndarray::operator(): invalid number of arguments");
size_t counter = 0;
int64_t index = 0;
((index += int64_t(indices) * m_dltensor.strides[counter++]), ...);
return (int64_t) m_dltensor.byte_offset + index * sizeof(Scalar);
} else {
return 0;
}
}
detail::ndarray_handle *m_handle = nullptr;
dlpack::dltensor m_dltensor;
};
inline bool ndarray_check(handle h) { return detail::ndarray_check(h.ptr()); }
NAMESPACE_BEGIN(detail)
template <typename T> struct dtype_name {
static constexpr auto name = detail::const_name("dtype=") + dtype_traits<T>::name;
};
template <> struct dtype_name<void> : unused { };
template <> struct dtype_name<const void> : unused { };
template <typename T> struct dtype_const_name {
static constexpr auto name = const_name<std::is_const_v<T>>("writable=False", "");
};
template <typename... Args> struct type_caster<ndarray<Args...>> {
using Config = detail::ndarray_config_t<int, Args...>;
using Scalar = typename Config::Scalar;
NB_TYPE_CASTER(ndarray<Args...>,
Config::Framework::name +
const_name("[") +
concat_maybe(dtype_name<Scalar>::name,
Config::Shape::name,
Config::Order::name,
Config::DeviceType::name,
dtype_const_name<Scalar>::name) +
const_name("]"))
bool from_python(handle src, uint8_t flags, cleanup_list *cleanup) noexcept {
if (src.is_none() && flags & (uint8_t) cast_flags::accepts_none) {
value = ndarray<Args...>();
return true;
}
int64_t shape_buf[Config::N <= 0 ? 1 : Config::N];
ndarray_config config{Config()};
if constexpr (Config::N > 0) {
Config::Shape::put(shape_buf);
config.shape = shape_buf;
} else {
(void) shape_buf;
}
value = Value(ndarray_import(src.ptr(), &config,
flags & (uint8_t) cast_flags::convert,
cleanup));
return value.is_valid();
}
static handle from_cpp(const ndarray<Args...> &tensor, rv_policy policy,
cleanup_list *cleanup) noexcept {
return ndarray_export(tensor.handle(), Config::Framework::value, policy, cleanup);
}
};
template <typename... Args>
class ndarray_object : public object {
public:
using object::object;
using object::operator=;
static constexpr auto Name = type_caster<ndarray<Args...>>::Name;
};
NAMESPACE_END(detail)
template <typename... Args>
auto ndarray<Args...>::cast(rv_policy rvp, class handle parent) {
return borrow<detail::ndarray_object<Args...>>(
nanobind::cast(*this, rvp, parent));
}
NAMESPACE_END(NB_NAMESPACE)
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
include/nanobind/operators.h | C/C++ Header | /*
nanobind/operators.h: convenience functionality for operator overloading
Copyright (c) 2022 Wenzel Jakob
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include <nanobind/nanobind.h>
NAMESPACE_BEGIN(NB_NAMESPACE)
NAMESPACE_BEGIN(detail)
/// Enumeration with all supported operator types
enum op_id : int {
op_add, op_sub, op_mul, op_div, op_mod, op_divmod, op_pow, op_lshift,
op_rshift, op_and, op_xor, op_or, op_neg, op_pos, op_abs, op_invert,
op_int, op_long, op_float, op_str, op_cmp, op_gt, op_ge, op_lt, op_le,
op_eq, op_ne, op_iadd, op_isub, op_imul, op_idiv, op_imod, op_ilshift,
op_irshift, op_iand, op_ixor, op_ior, op_complex, op_bool, op_nonzero,
op_repr, op_truediv, op_itruediv, op_hash
};
enum op_type : int {
op_l, /* base type on left */
op_r, /* base type on right */
op_u /* unary operator */
};
struct self_t { };
[[maybe_unused]] static const self_t self = self_t();
/// Type for an unused type slot
struct undefined_t { };
/// base template of operator implementations
template <op_id, op_type, typename B, typename L, typename R> struct op_impl { };
/// Operator implementation generator
template <op_id id, op_type ot, typename L, typename R> struct op_ {
template <typename Class, typename... Extra> void execute(Class &cl, const Extra&... extra) const {
using Type = typename Class::Type;
using Lt = std::conditional_t<std::is_same_v<L, self_t>, Type, L>;
using Rt = std::conditional_t<std::is_same_v<R, self_t>, Type, R>;
using Op = op_impl<id, ot, Type, Lt, Rt>;
cl.def(Op::name(), &Op::execute, is_operator(), Op::default_policy, extra...);
}
template <typename Class, typename... Extra> void execute_cast(Class &cl, const Extra&... extra) const {
using Type = typename Class::Type;
using Lt = std::conditional_t<std::is_same_v<L, self_t>, Type, L>;
using Rt = std::conditional_t<std::is_same_v<R, self_t>, Type, R>;
using Op = op_impl<id, ot, Type, Lt, Rt>;
cl.def(Op::name(), &Op::execute_cast, is_operator(), Op::default_policy, extra...);
}
};
#define NB_BINARY_OPERATOR(id, rid, op, expr) \
template <typename B, typename L, typename R> struct op_impl<op_##id, op_l, B, L, R> { \
static constexpr rv_policy default_policy = rv_policy::automatic; \
static char const* name() { return "__" #id "__"; } \
static auto execute(const L &l, const R &r) -> decltype(expr) { return (expr); } \
static B execute_cast(const L &l, const R &r) { return B(expr); } \
}; \
template <typename B, typename L, typename R> struct op_impl<op_##id, op_r, B, L, R> { \
static constexpr rv_policy default_policy = rv_policy::automatic; \
static char const* name() { return "__" #rid "__"; } \
static auto execute(const R &r, const L &l) -> decltype(expr) { return (expr); } \
static B execute_cast(const R &r, const L &l) { return B(expr); } \
}; \
inline op_<op_##id, op_l, self_t, self_t> op(const self_t &, const self_t &) { \
return op_<op_##id, op_l, self_t, self_t>(); \
} \
template <typename T> op_<op_##id, op_l, self_t, T> op(const self_t &, const T &) { \
return op_<op_##id, op_l, self_t, T>(); \
} \
template <typename T> op_<op_##id, op_r, T, self_t> op(const T &, const self_t &) { \
return op_<op_##id, op_r, T, self_t>(); \
}
#define NB_INPLACE_OPERATOR(id, op, expr) \
template <typename B, typename L, typename R> struct op_impl<op_##id, op_l, B, L, R> { \
static constexpr rv_policy default_policy = rv_policy::move; \
static char const* name() { return "__" #id "__"; } \
static auto execute(L &l, const R &r) -> decltype(expr) { return expr; } \
static B execute_cast(L &l, const R &r) { return B(expr); } \
}; \
template <typename T> op_<op_##id, op_l, self_t, T> op(const self_t &, const T &) { \
return op_<op_##id, op_l, self_t, T>(); \
}
#define NB_UNARY_OPERATOR(id, op, expr) \
template <typename B, typename L> struct op_impl<op_##id, op_u, B, L, undefined_t> { \
static constexpr rv_policy default_policy = rv_policy::automatic; \
static char const* name() { return "__" #id "__"; } \
static auto execute(const L &l) -> decltype(expr) { return expr; } \
static B execute_cast(const L &l) { return B(expr); } \
}; \
inline op_<op_##id, op_u, self_t, undefined_t> op(const self_t &) { \
return op_<op_##id, op_u, self_t, undefined_t>(); \
}
NB_BINARY_OPERATOR(sub, rsub, operator-, l - r)
NB_BINARY_OPERATOR(add, radd, operator+, l + r)
NB_BINARY_OPERATOR(mul, rmul, operator*, l * r)
NB_BINARY_OPERATOR(truediv, rtruediv, operator/, l / r)
NB_BINARY_OPERATOR(mod, rmod, operator%, l % r)
NB_BINARY_OPERATOR(lshift, rlshift, operator<<, l << r)
NB_BINARY_OPERATOR(rshift, rrshift, operator>>, l >> r)
NB_BINARY_OPERATOR(and, rand, operator&, l & r)
NB_BINARY_OPERATOR(xor, rxor, operator^, l ^ r)
NB_BINARY_OPERATOR(or, ror, operator|, l | r)
NB_BINARY_OPERATOR(gt, lt, operator>, l > r)
NB_BINARY_OPERATOR(ge, le, operator>=, l >= r)
NB_BINARY_OPERATOR(lt, gt, operator<, l < r)
NB_BINARY_OPERATOR(le, ge, operator<=, l <= r)
NB_BINARY_OPERATOR(eq, eq, operator==, l == r)
NB_BINARY_OPERATOR(ne, ne, operator!=, l != r)
NB_INPLACE_OPERATOR(iadd, operator+=, l += r)
NB_INPLACE_OPERATOR(isub, operator-=, l -= r)
NB_INPLACE_OPERATOR(imul, operator*=, l *= r)
NB_INPLACE_OPERATOR(itruediv, operator/=, l /= r)
NB_INPLACE_OPERATOR(imod, operator%=, l %= r)
NB_INPLACE_OPERATOR(ilshift, operator<<=, l <<= r)
NB_INPLACE_OPERATOR(irshift, operator>>=, l >>= r)
NB_INPLACE_OPERATOR(iand, operator&=, l &= r)
NB_INPLACE_OPERATOR(ixor, operator^=, l ^= r)
NB_INPLACE_OPERATOR(ior, operator|=, l |= r)
NB_UNARY_OPERATOR(neg, operator-, -l)
NB_UNARY_OPERATOR(pos, operator+, +l)
NB_UNARY_OPERATOR(invert, operator~, (~l))
NB_UNARY_OPERATOR(bool, operator!, !!l)
NB_UNARY_OPERATOR(abs, abs, std::abs(l))
NB_UNARY_OPERATOR(hash, hash, std::hash<L>()(l))
#undef NB_BINARY_OPERATOR
#undef NB_INPLACE_OPERATOR
#undef NB_UNARY_OPERATOR
NAMESPACE_END(detail)
// Add named operators so that they are accessible via `nb::`.
using detail::self;
using detail::hash;
using detail::abs;
NAMESPACE_END(NB_NAMESPACE)
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
include/nanobind/stl/array.h | C/C++ Header | /*
nanobind/stl/array.h: type caster for std::array<...>
Copyright (c) 2022 Wenzel Jakob
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include "detail/nb_array.h"
#include <array>
NAMESPACE_BEGIN(NB_NAMESPACE)
NAMESPACE_BEGIN(detail)
template <typename Type, size_t Size> struct type_caster<std::array<Type, Size>>
: array_caster<std::array<Type, Size>, Type, Size> { };
NAMESPACE_END(detail)
NAMESPACE_END(NB_NAMESPACE)
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
include/nanobind/stl/bind_map.h | C/C++ Header | /*
nanobind/stl/bind_map.h: Automatic creation of bindings for map-style containers
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include <nanobind/nanobind.h>
#include <nanobind/make_iterator.h>
#include <nanobind/operators.h>
#include <nanobind/stl/detail/traits.h>
NAMESPACE_BEGIN(NB_NAMESPACE)
NAMESPACE_BEGIN(detail)
template <typename Map, typename Key, typename Value>
inline void map_set(Map &m, const Key &k, const Value &v) {
if constexpr (detail::is_copy_assignable_v<Value>) {
m[k] = v;
} else {
auto r = m.emplace(k, v);
if (!r.second) {
// Value is not copy-assignable. Erase and retry
m.erase(r.first);
m.emplace(k, v);
}
}
}
NAMESPACE_END(detail)
template <typename Map,
rv_policy Policy = rv_policy::automatic_reference,
typename... Args>
class_<Map> bind_map(handle scope, const char *name, Args &&...args) {
using Key = typename Map::key_type;
using Value = typename Map::mapped_type;
using ValueRef = typename detail::iterator_value_access<
typename Map::iterator>::result_type;
static_assert(
!detail::is_base_caster_v<detail::make_caster<Value>> ||
detail::is_copy_constructible_v<Value> ||
(Policy != rv_policy::automatic_reference &&
Policy != rv_policy::copy),
"bind_map(): the generated __getitem__ would copy elements, so the "
"value type must be copy-constructible");
handle cl_cur = type<Map>();
if (cl_cur.is_valid()) {
// Binding already exists, don't re-create
return borrow<class_<Map>>(cl_cur);
}
auto cl = class_<Map>(scope, name, std::forward<Args>(args)...)
.def(init<>(),
"Default constructor")
.def("__len__", [](const Map &m) { return m.size(); })
.def("__bool__",
[](const Map &m) { return !m.empty(); },
"Check whether the map is nonempty")
.def("__repr__",
[](handle_t<Map> h) {
return steal<str>(detail::repr_map(h.ptr()));
})
.def("__contains__",
[](const Map &m, const Key &k) { return m.find(k) != m.end(); })
.def("__contains__", // fallback for incompatible types
[](const Map &, handle) { return false; })
.def("__iter__",
[](Map &m) {
return make_key_iterator<Policy>(type<Map>(), "KeyIterator",
m.begin(), m.end());
},
keep_alive<0, 1>())
.def("__getitem__",
[](Map &m, const Key &k) -> ValueRef {
auto it = m.find(k);
if (it == m.end())
throw key_error();
return (*it).second;
}, Policy)
.def("__delitem__",
[](Map &m, const Key &k) {
auto it = m.find(k);
if (it == m.end())
throw key_error();
m.erase(it);
})
.def("clear", [](Map &m) { m.clear(); },
"Remove all items");
if constexpr (detail::is_copy_constructible_v<Map>) {
cl.def(init<const Map &>(), "Copy constructor");
cl.def("__init__", [](Map *m, typed<dict, Key, Value> d) {
new (m) Map();
for (auto [k, v] : borrow<dict>(std::move(d)))
m->emplace(cast<Key>(k), cast<Value>(v));
}, "Construct from a dictionary");
implicitly_convertible<dict, Map>();
}
// Assignment operator for copy-assignable/copy-constructible types
if constexpr (detail::is_copy_assignable_v<Value> ||
detail::is_copy_constructible_v<Value>) {
cl.def("__setitem__", [](Map &m, const Key &k, const Value &v) {
detail::map_set<Map, Key, Value>(m, k, v);
});
cl.def("update", [](Map &m, const Map &m2) {
for (auto &kv : m2)
detail::map_set<Map, Key, Value>(m, kv.first, kv.second);
},
"Update the map with element from `arg`");
}
if constexpr (detail::is_equality_comparable_v<Map>) {
cl.def(self == self, sig("def __eq__(self, arg: object, /) -> bool"))
.def(self != self, sig("def __ne__(self, arg: object, /) -> bool"));
}
// Item, value, and key views
struct KeyView { Map ↦ };
struct ValueView { Map ↦ };
struct ItemView { Map ↦ };
class_<ItemView>(cl, "ItemView")
.def("__len__", [](ItemView &v) { return v.map.size(); })
.def("__iter__",
[](ItemView &v) {
return make_iterator<Policy>(type<Map>(), "ItemIterator",
v.map.begin(), v.map.end());
},
keep_alive<0, 1>());
class_<KeyView>(cl, "KeyView")
.def("__contains__", [](KeyView &v, const Key &k) { return v.map.find(k) != v.map.end(); })
.def("__contains__", [](KeyView &, handle) { return false; })
.def("__len__", [](KeyView &v) { return v.map.size(); })
.def("__iter__",
[](KeyView &v) {
return make_key_iterator<Policy>(type<Map>(), "KeyIterator",
v.map.begin(), v.map.end());
},
keep_alive<0, 1>());
class_<ValueView>(cl, "ValueView")
.def("__len__", [](ValueView &v) { return v.map.size(); })
.def("__iter__",
[](ValueView &v) {
return make_value_iterator<Policy>(type<Map>(), "ValueIterator",
v.map.begin(), v.map.end());
},
keep_alive<0, 1>());
cl.def("keys", [](Map &m) { return new KeyView{m}; }, keep_alive<0, 1>(),
"Returns an iterable view of the map's keys.");
cl.def("values", [](Map &m) { return new ValueView{m}; }, keep_alive<0, 1>(),
"Returns an iterable view of the map's values.");
cl.def("items", [](Map &m) { return new ItemView{m}; }, keep_alive<0, 1>(),
"Returns an iterable view of the map's items.");
return cl;
}
NAMESPACE_END(NB_NAMESPACE)
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
include/nanobind/stl/bind_vector.h | C/C++ Header | /*
nanobind/stl/bind_vector.h: Automatic creation of bindings for vector-style containers
Copyright (c) 2022 Wenzel Jakob
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include <nanobind/nanobind.h>
#include <nanobind/operators.h>
#include <nanobind/make_iterator.h>
#include <nanobind/stl/detail/traits.h>
#include <vector>
#include <algorithm>
NAMESPACE_BEGIN(NB_NAMESPACE)
NAMESPACE_BEGIN(detail)
inline size_t wrap(Py_ssize_t i, size_t n) {
if (i < 0)
i += (Py_ssize_t) n;
if (i < 0 || (size_t) i >= n)
throw index_error();
return (size_t) i;
}
template <> struct iterator_access<typename std::vector<bool>::iterator> {
using result_type = bool;
result_type operator()(typename std::vector<bool>::iterator &it) const { return *it; }
};
NAMESPACE_END(detail)
template <typename Vector,
rv_policy Policy = rv_policy::automatic_reference,
typename... Args>
class_<Vector> bind_vector(handle scope, const char *name, Args &&...args) {
using ValueRef = typename detail::iterator_access<typename Vector::iterator>::result_type;
using Value = std::decay_t<ValueRef>;
static_assert(
!detail::is_base_caster_v<detail::make_caster<Value>> ||
detail::is_copy_constructible_v<Value> ||
(Policy != rv_policy::automatic_reference &&
Policy != rv_policy::copy),
"bind_vector(): the generated __getitem__ would copy elements, so the "
"element type must be copy-constructible");
handle cl_cur = type<Vector>();
if (cl_cur.is_valid()) {
// Binding already exists, don't re-create
return borrow<class_<Vector>>(cl_cur);
}
auto cl = class_<Vector>(scope, name, std::forward<Args>(args)...)
.def(init<>(), "Default constructor")
.def("__len__", [](const Vector &v) { return v.size(); })
.def("__bool__",
[](const Vector &v) { return !v.empty(); },
"Check whether the vector is nonempty")
.def("__repr__",
[](handle_t<Vector> h) {
return steal<str>(detail::repr_list(h.ptr()));
})
.def("__iter__",
[](Vector &v) {
return make_iterator<Policy>(type<Vector>(), "Iterator",
v.begin(), v.end());
}, keep_alive<0, 1>())
.def("__getitem__",
[](Vector &v, Py_ssize_t i) -> ValueRef {
return v[detail::wrap(i, v.size())];
}, Policy)
.def("clear", [](Vector &v) { v.clear(); },
"Remove all items from list.");
if constexpr (detail::is_copy_constructible_v<Value>) {
cl.def(init<const Vector &>(),
"Copy constructor");
cl.def("__init__", [](Vector *v, typed<iterable, Value> seq) {
new (v) Vector();
v->reserve(len_hint(seq));
for (handle h : seq)
v->push_back(cast<Value>(h));
}, "Construct from an iterable object");
implicitly_convertible<iterable, Vector>();
cl.def("append",
[](Vector &v, const Value &value) { v.push_back(value); },
"Append `arg` to the end of the list.")
.def("insert",
[](Vector &v, Py_ssize_t i, const Value &x) {
if (i < 0)
i += (Py_ssize_t) v.size();
if (i < 0 || (size_t) i > v.size())
throw index_error();
v.insert(v.begin() + i, x);
},
"Insert object `arg1` before index `arg0`.")
.def("pop",
[](Vector &v, Py_ssize_t i) {
size_t index = detail::wrap(i, v.size());
Value result = std::move(v[index]);
v.erase(v.begin() + index);
return result;
},
arg("index") = -1,
"Remove and return item at `index` (default last).")
.def("extend",
[](Vector &v, const Vector &src) {
v.insert(v.end(), src.begin(), src.end());
},
"Extend `self` by appending elements from `arg`.")
.def("__setitem__",
[](Vector &v, Py_ssize_t i, const Value &value) {
v[detail::wrap(i, v.size())] = value;
})
.def("__delitem__",
[](Vector &v, Py_ssize_t i) {
v.erase(v.begin() + detail::wrap(i, v.size()));
})
.def("__getitem__",
[](const Vector &v, const slice &slice) -> Vector * {
auto [start, stop, step, length] = slice.compute(v.size());
auto *seq = new Vector();
seq->reserve(length);
for (size_t i = 0; i < length; ++i) {
seq->push_back(v[start]);
start += step;
}
return seq;
})
.def("__setitem__",
[](Vector &v, const slice &slice, const Vector &value) {
auto [start, stop, step, length] = slice.compute(v.size());
if (length != value.size())
throw index_error(
"The left and right hand side of the slice "
"assignment have mismatched sizes!");
for (size_t i = 0; i < length; ++i) {
v[start] = value[i];
start += step;
}
})
.def("__delitem__",
[](Vector &v, const slice &slice) {
auto [start, stop, step, length] = slice.compute(v.size());
if (length == 0)
return;
stop = start + (length - 1) * step;
if (start > stop) {
std::swap(start, stop);
step = -step;
}
if (step == 1) {
v.erase(v.begin() + start, v.begin() + stop + 1);
} else {
for (size_t i = 0; i < length; ++i) {
v.erase(v.begin() + stop);
stop -= step;
}
}
});
}
if constexpr (detail::is_equality_comparable_v<Value>) {
cl.def(self == self, sig("def __eq__(self, arg: object, /) -> bool"))
.def(self != self, sig("def __ne__(self, arg: object, /) -> bool"))
.def("__contains__",
[](const Vector &v, const Value &x) {
return std::find(v.begin(), v.end(), x) != v.end();
})
.def("__contains__", // fallback for incompatible types
[](const Vector &, handle) { return false; })
.def("count",
[](const Vector &v, const Value &x) {
return std::count(v.begin(), v.end(), x);
}, "Return number of occurrences of `arg`.")
.def("remove",
[](Vector &v, const Value &x) {
auto p = std::find(v.begin(), v.end(), x);
if (p != v.end())
v.erase(p);
else
throw value_error();
},
"Remove first occurrence of `arg`.");
}
return cl;
}
NAMESPACE_END(NB_NAMESPACE)
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
include/nanobind/stl/chrono.h | C/C++ Header | /*
nanobind/stl/chrono.h: conversion between std::chrono and python's datetime
Copyright (c) 2023 Hudson River Trading LLC <opensource@hudson-trading.com> and
Trent Houliston <trent@houliston.me> and
Wenzel Jakob <wenzel.jakob@epfl.ch>
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include <nanobind/nanobind.h>
#if !defined(__STDC_WANT_LIB_EXT1__)
#define __STDC_WANT_LIB_EXT1__ 1 // for localtime_s
#endif
#include <time.h>
#include <chrono>
#include <cmath>
#include <ctime>
#include <limits>
#include <nanobind/stl/detail/chrono.h>
// Casts a std::chrono type (either a duration or a time_point) to/from
// Python timedelta objects, or from a Python float representing seconds.
template <typename type> class duration_caster {
public:
using rep = typename type::rep;
using period = typename type::period;
using duration_t = std::chrono::duration<rep, period>;
bool from_python(handle src, uint8_t /*flags*/, cleanup_list*) noexcept {
namespace ch = std::chrono;
if (!src) return false;
// support for signed 25 bits is required by the standard
using days = ch::duration<int_least32_t, std::ratio<86400>>;
// If invoked with datetime.delta object, unpack it
int dd, ss, uu;
try {
if (unpack_timedelta(src.ptr(), &dd, &ss, &uu)) {
value = type(ch::duration_cast<duration_t>(
days(dd) + ch::seconds(ss) + ch::microseconds(uu)));
return true;
}
} catch (python_error& e) {
e.discard_as_unraisable(src.ptr());
return false;
}
// If invoked with a float we assume it is seconds and convert
int is_float;
#if defined(Py_LIMITED_API)
is_float = PyType_IsSubtype(Py_TYPE(src.ptr()), &PyFloat_Type);
#else
is_float = PyFloat_Check(src.ptr());
#endif
if (is_float) {
value = type(ch::duration_cast<duration_t>(
ch::duration<double>(PyFloat_AsDouble(src.ptr()))));
return true;
}
return false;
}
// If this is a duration just return it back
static const duration_t& get_duration(const duration_t& src) {
return src;
}
// If this is a time_point get the time_since_epoch
template <typename Clock>
static duration_t get_duration(
const std::chrono::time_point<Clock, duration_t>& src) {
return src.time_since_epoch();
}
static handle from_cpp(const type& src, rv_policy, cleanup_list*) noexcept {
namespace ch = std::chrono;
// Use overloaded function to get our duration from our source
// Works out if it is a duration or time_point and get the duration
auto d = get_duration(src);
// Declare these special duration types so the conversions happen with the correct primitive types (int)
using dd_t = ch::duration<int, std::ratio<86400>>;
using ss_t = ch::duration<int, std::ratio<1>>;
using us_t = ch::duration<int, std::micro>;
auto dd = ch::duration_cast<dd_t>(d);
auto subd = d - dd;
auto ss = ch::duration_cast<ss_t>(subd);
auto us = ch::duration_cast<us_t>(subd - ss);
return pack_timedelta(dd.count(), ss.count(), us.count());
}
NB_TYPE_CASTER(type, io_name("datetime.timedelta | float",
"datetime.timedelta"))
};
template <class... Args>
auto can_localtime_s(Args*... args) ->
decltype((localtime_s(args...), std::true_type{}));
std::false_type can_localtime_s(...);
template <class... Args>
auto can_localtime_r(Args*... args) ->
decltype((localtime_r(args...), std::true_type{}));
std::false_type can_localtime_r(...);
template <class Time, class Buf>
inline std::tm *localtime_thread_safe(const Time *time, Buf *buf) {
if constexpr (decltype(can_localtime_s(time, buf))::value) {
// C11 localtime_s
std::tm* ret = localtime_s(time, buf);
return ret;
} else if constexpr (decltype(can_localtime_s(buf, time))::value) {
// Microsoft localtime_s (with parameters switched and errno_t return)
int ret = localtime_s(buf, time);
return ret == 0 ? buf : nullptr;
} else {
static_assert(decltype(can_localtime_r(time, buf))::value,
"<nanobind/stl/chrono.h> type caster requires "
"that your C library support localtime_r or localtime_s");
std::tm* ret = localtime_r(time, buf);
return ret;
}
}
// Cast between times on the system clock and datetime.datetime instances
// (also supports datetime.date and datetime.time for Python->C++ conversions)
template <typename Duration>
class type_caster<std::chrono::time_point<std::chrono::system_clock, Duration>> {
public:
using type = std::chrono::time_point<std::chrono::system_clock, Duration>;
bool from_python(handle src, uint8_t /*flags*/, cleanup_list*) noexcept {
namespace ch = std::chrono;
if (!src)
return false;
std::tm cal;
ch::microseconds msecs;
int yy, mon, dd, hh, min, ss, uu;
try {
if (!unpack_datetime(src.ptr(), &yy, &mon, &dd,
&hh, &min, &ss, &uu)) {
return false;
}
} catch (python_error& e) {
e.discard_as_unraisable(src.ptr());
return false;
}
cal.tm_sec = ss;
cal.tm_min = min;
cal.tm_hour = hh;
cal.tm_mday = dd;
cal.tm_mon = mon - 1;
cal.tm_year = yy - 1900;
cal.tm_isdst = -1;
msecs = ch::microseconds(uu);
value = ch::time_point_cast<Duration>(
ch::system_clock::from_time_t(std::mktime(&cal)) + msecs);
return true;
}
static handle from_cpp(const type& src, rv_policy, cleanup_list*) noexcept {
namespace ch = std::chrono;
// Get out microseconds, and make sure they are positive, to
// avoid bug in eastern hemisphere time zones
// (cfr. https://github.com/pybind/pybind11/issues/2417). Note
// that if us_t is 32 bits and we get a time_point that also
// has a 32-bit time_since_epoch (perhaps because it's
// measuring time in minutes or something), then writing `src
// - us` below can lead to overflow based on how common_type
// is defined on durations. Defining us_t to store 64-bit
// microseconds works around this.
using us_t = ch::duration<std::int64_t, std::micro>;
auto us = ch::duration_cast<us_t>(src.time_since_epoch() %
ch::seconds(1));
if (us.count() < 0)
us += ch::seconds(1);
// Subtract microseconds BEFORE `system_clock::to_time_t`, because:
// > If std::time_t has lower precision, it is implementation-defined
// whether the value is rounded or truncated.
// (https://en.cppreference.com/w/cpp/chrono/system_clock/to_time_t)
std::time_t tt = ch::system_clock::to_time_t(
ch::time_point_cast<ch::system_clock::duration>(src - us));
std::tm localtime;
if (!localtime_thread_safe(&tt, &localtime)) {
PyErr_Format(PyExc_ValueError,
"Unable to represent system_clock in local time; "
"got time_t %ld", static_cast<std::int64_t>(tt));
return handle();
}
return pack_datetime(localtime.tm_year + 1900,
localtime.tm_mon + 1,
localtime.tm_mday,
localtime.tm_hour,
localtime.tm_min,
localtime.tm_sec,
(int) us.count());
}
NB_TYPE_CASTER(type, io_name("datetime.datetime | datetime.date | datetime.time",
"datetime.datetime"))
};
// Other clocks that are not the system clock are not measured as
// datetime.datetime objects since they are not measured on calendar
// time. So instead we just make them timedeltas; or if they have
// passed us a time as a float, we convert that.
template <typename Clock, typename Duration>
class type_caster<std::chrono::time_point<Clock, Duration>>
: public duration_caster<std::chrono::time_point<Clock, Duration>> {};
template <typename Rep, typename Period>
class type_caster<std::chrono::duration<Rep, Period>>
: public duration_caster<std::chrono::duration<Rep, Period>> {};
NAMESPACE_END(detail)
NAMESPACE_END(NB_NAMESPACE)
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
include/nanobind/stl/complex.h | C/C++ Header | /*
nanobind/stl/complex.h: type caster for std::complex<...>
Copyright (c) 2023 Degottex Gilles and Wenzel Jakob
Copyright (c) 2025 High Performance Kernels LLC
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include <nanobind/nanobind.h>
#include <complex>
NAMESPACE_BEGIN(NB_NAMESPACE)
NAMESPACE_BEGIN(detail)
NB_CORE bool load_cmplx(PyObject*, uint8_t flags, std::complex<double> *out) noexcept;
template <typename T> struct type_caster<std::complex<T>> {
NB_TYPE_CASTER(std::complex<T>, const_name("complex"))
bool from_python(handle src, uint8_t flags, cleanup_list*) noexcept {
std::complex<double> cmplx;
if (!load_cmplx(src.ptr(), flags, &cmplx))
return false;
if constexpr (std::is_same_v<T, double>) {
value = cmplx;
return true;
} else {
T re = (T) cmplx.real();
T im = (T) cmplx.imag();
if ((flags & (uint8_t) cast_flags::convert)
|| (((double) re == cmplx.real()
|| (re != re && cmplx.real() != cmplx.real()))
&& ((double) im == cmplx.imag()
|| (im != im && cmplx.imag() != cmplx.imag())))) {
value = std::complex<T>(re, im);
return true;
}
return false;
}
}
static handle from_cpp(const std::complex<T>& value, rv_policy,
cleanup_list*) noexcept {
return PyComplex_FromDoubles((double) value.real(),
(double) value.imag());
}
};
NAMESPACE_END(detail)
NAMESPACE_END(NB_NAMESPACE)
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
include/nanobind/stl/detail/chrono.h | C/C++ Header | /*
nanobind/stl/chrono.h: conversion between std::chrono and python's datetime
Copyright (c) 2023 Hudson River Trading LLC <opensource@hudson-trading.com>
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include <nanobind/nanobind.h>
// Functions for working with objects in the Python 'datetime' module,
// used by the std::chrono type caster in <nanobind/stl/chrono.h>.
// This is pretty straightforward except on Limited API builds.
// Note that while PyPy does provide <datetime.h>, it implements
// the macro-like calls there (PyDateTime_DATE_GET_HOUR, etc) as full
// function calls that can fail. We use the limited-API logic on PyPy
// in order to be able to handle errors better.
#if !defined(Py_LIMITED_API) && !defined(PYPY_VERSION)
# include <datetime.h>
#endif
#if defined(__GNUC__)
// warning: warning: declaration of '...' with attribute 'noinline' follows inline declaration
# pragma GCC diagnostic push
# pragma GCC diagnostic ignored "-Wattributes"
#endif
NAMESPACE_BEGIN(NB_NAMESPACE)
NAMESPACE_BEGIN(detail)
// Unpack a datetime.timedelta object into integer days, seconds, and
// microseconds. Returns true if successful, false if `o` is not a timedelta,
// or throws nb::python_error if something else went wrong.
bool unpack_timedelta(PyObject *o, int *days, int *secs, int *usecs);
// Unpack a datetime.date, datetime.time, or datetime.datetime object into
// integer year, month, day, hour, minute, second, and microsecond fields.
// Time objects will be considered to represent that time on Jan 1, 1970.
// Date objects will be considered to represent midnight on that date.
// Returns true if succesful, false if `o` is not a date, time, or datetime,
// or throws nb::python_error if something else went wrong.
bool unpack_datetime(PyObject *o, int *year, int *month, int *day,
int *hour, int *minute, int *second,
int *usec);
// Create a datetime.timedelta object from integer days, seconds, and
// microseconds. Returns a new reference, or nullptr and sets the
// Python error indicator on error.
PyObject* pack_timedelta(int days, int secs, int usecs) noexcept;
// Create a timezone-naive datetime.datetime object from its components.
// Returns a new reference, or nullptr and sets the Python error indicator
// on error.
PyObject* pack_datetime(int year, int month, int day,
int hour, int minute, int second,
int usec) noexcept;
// Note: Several of the functions defined in this header are marked
// 'inline' for linkage purposes (since they might be in multiple
// translation units and the linker should pick one) but NB_NOINLINE
// because we don't want the bloat of actually inlining them. They are
// defined in this header instead of in the built nanobind library in
// order to avoid increasing the library size for users who don't care
// about datetimes.
#if defined(Py_LIMITED_API) || defined(PYPY_VERSION)
struct datetime_types_t {
// Types defined by the datetime module
handle datetime;
handle time;
handle date;
handle timedelta;
// Ensure that the above four handles point to valid Python objects.
// If unable, throw nb::python_error.
void ensure_ready() {
if (datetime.is_valid())
return;
object mod = module_::import_("datetime");
object datetime_o = mod.attr("datetime");
object time_o = mod.attr("time");
object date_o = mod.attr("date");
object timedelta_o = mod.attr("timedelta");
// Leak references to these datetime types. We could improve upon
// this by storing them in the internals structure and decref'ing
// in internals_cleanup(), but it doesn't seem worthwhile for
// something this fundamental. We can't store nb::object in this
// structure because it might be destroyed after the Python
// interpreter has finalized.
datetime = datetime_o.release();
time = time_o.release();
date = date_o.release();
timedelta = timedelta_o.release();
}
};
inline datetime_types_t datetime_types;
// Set *dest to the integer value of getattr(o, name). Returns true
// on success, false and sets the Python error indicator on failure.
// The attribute value must be a Python integer object; other types
// of numbers are not supported.
NB_NOINLINE inline bool set_from_int_attr(int *dest, PyObject *o,
const char *name) noexcept {
PyObject *value = PyObject_GetAttrString(o, name);
if (!value)
return false;
long lval = PyLong_AsLong(value);
if (lval == -1 && PyErr_Occurred()) {
Py_DECREF(value);
return false;
}
if (lval < std::numeric_limits<int>::min() ||
lval > std::numeric_limits<int>::max()) {
PyErr_Format(PyExc_OverflowError,
"%R attribute '%s' (%R) does not fit in an int",
o, name, value);
Py_DECREF(value);
return false;
}
Py_DECREF(value);
*dest = static_cast<int>(lval);
return true;
}
NB_NOINLINE inline bool unpack_timedelta(PyObject *o, int *days,
int *secs, int *usecs) {
datetime_types.ensure_ready();
if (PyType_IsSubtype(Py_TYPE(o),
(PyTypeObject *) datetime_types.timedelta.ptr())) {
if (!set_from_int_attr(days, o, "days") ||
!set_from_int_attr(secs, o, "seconds") ||
!set_from_int_attr(usecs, o, "microseconds")) {
raise_python_error();
}
return true;
}
return false;
}
NB_NOINLINE inline bool unpack_datetime(PyObject *o,
int *year, int *month, int *day,
int *hour, int *minute, int *second,
int *usec) {
datetime_types.ensure_ready();
if (PyType_IsSubtype(Py_TYPE(o),
(PyTypeObject *) datetime_types.datetime.ptr())) {
if (!set_from_int_attr(usec, o, "microsecond") ||
!set_from_int_attr(second, o, "second") ||
!set_from_int_attr(minute, o, "minute") ||
!set_from_int_attr(hour, o, "hour") ||
!set_from_int_attr(day, o, "day") ||
!set_from_int_attr(month, o, "month") ||
!set_from_int_attr(year, o, "year")) {
raise_python_error();
}
return true;
}
if (PyType_IsSubtype(Py_TYPE(o),
(PyTypeObject *) datetime_types.date.ptr())) {
*usec = *second = *minute = *hour = 0;
if (!set_from_int_attr(day, o, "day") ||
!set_from_int_attr(month, o, "month") ||
!set_from_int_attr(year, o, "year")) {
raise_python_error();
}
return true;
}
if (PyType_IsSubtype(Py_TYPE(o),
(PyTypeObject *) datetime_types.time.ptr())) {
*day = 1;
*month = 1;
*year = 1970;
if (!set_from_int_attr(usec, o, "microsecond") ||
!set_from_int_attr(second, o, "second") ||
!set_from_int_attr(minute, o, "minute") ||
!set_from_int_attr(hour, o, "hour")) {
raise_python_error();
}
return true;
}
return false;
}
inline PyObject* pack_timedelta(int days, int secs, int usecs) noexcept {
try {
datetime_types.ensure_ready();
return datetime_types.timedelta(days, secs, usecs).release().ptr();
} catch (python_error& e) {
e.restore();
return nullptr;
}
}
inline PyObject* pack_datetime(int year, int month, int day,
int hour, int minute, int second,
int usec) noexcept {
try {
datetime_types.ensure_ready();
return datetime_types.datetime(
year, month, day, hour, minute, second, usec).release().ptr();
} catch (python_error& e) {
e.restore();
return nullptr;
}
}
#else // !defined(Py_LIMITED_API) && !defined(PYPY_VERSION)
NB_NOINLINE inline bool unpack_timedelta(PyObject *o, int *days,
int *secs, int *usecs) {
if (!PyDateTimeAPI) {
PyDateTime_IMPORT;
if (!PyDateTimeAPI)
raise_python_error();
}
if (PyDelta_Check(o)) {
*days = PyDateTime_DELTA_GET_DAYS(o);
*secs = PyDateTime_DELTA_GET_SECONDS(o);
*usecs = PyDateTime_DELTA_GET_MICROSECONDS(o);
return true;
}
return false;
}
NB_NOINLINE inline bool unpack_datetime(PyObject *o,
int *year, int *month, int *day,
int *hour, int *minute, int *second,
int *usec) {
if (!PyDateTimeAPI) {
PyDateTime_IMPORT;
if (!PyDateTimeAPI)
raise_python_error();
}
if (PyDateTime_Check(o)) {
*usec = PyDateTime_DATE_GET_MICROSECOND(o);
*second = PyDateTime_DATE_GET_SECOND(o);
*minute = PyDateTime_DATE_GET_MINUTE(o);
*hour = PyDateTime_DATE_GET_HOUR(o);
*day = PyDateTime_GET_DAY(o);
*month = PyDateTime_GET_MONTH(o);
*year = PyDateTime_GET_YEAR(o);
return true;
}
if (PyDate_Check(o)) {
*usec = 0;
*second = 0;
*minute = 0;
*hour = 0;
*day = PyDateTime_GET_DAY(o);
*month = PyDateTime_GET_MONTH(o);
*year = PyDateTime_GET_YEAR(o);
return true;
}
if (PyTime_Check(o)) {
*usec = PyDateTime_TIME_GET_MICROSECOND(o);
*second = PyDateTime_TIME_GET_SECOND(o);
*minute = PyDateTime_TIME_GET_MINUTE(o);
*hour = PyDateTime_TIME_GET_HOUR(o);
*day = 1;
*month = 1;
*year = 1970;
return true;
}
return false;
}
inline PyObject* pack_timedelta(int days, int secs, int usecs) noexcept {
if (!PyDateTimeAPI) {
PyDateTime_IMPORT;
if (!PyDateTimeAPI)
return nullptr;
}
return PyDelta_FromDSU(days, secs, usecs);
}
inline PyObject* pack_datetime(int year, int month, int day,
int hour, int minute, int second,
int usec) noexcept {
if (!PyDateTimeAPI) {
PyDateTime_IMPORT;
if (!PyDateTimeAPI)
return nullptr;
}
return PyDateTime_FromDateAndTime(year, month, day,
hour, minute, second, usec);
}
#endif // !defined(Py_LIMITED_API) && !defined(PYPY_VERSION)
//
#if defined(__GNUC__)
# pragma GCC diagnostic pop
#endif
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
include/nanobind/stl/detail/nb_array.h | C/C++ Header | #pragma once
#include <nanobind/nanobind.h>
NAMESPACE_BEGIN(NB_NAMESPACE)
NAMESPACE_BEGIN(detail)
template <typename Array, typename Entry, size_t Size> struct array_caster {
NB_TYPE_CASTER(Array, io_name("collections.abc.Sequence", "list") +
const_name("[") + make_caster<Entry>::Name +
const_name("]"))
using Caster = make_caster<Entry>;
bool from_python(handle src, uint8_t flags, cleanup_list *cleanup) noexcept {
PyObject *temp;
/* Will initialize 'temp' (NULL in the case of a failure.) */
PyObject **o = seq_get_with_size(src.ptr(), Size, &temp);
Caster caster;
bool success = o != nullptr;
flags = flags_for_local_caster<Entry>(flags);
if (success) {
for (size_t i = 0; i < Size; ++i) {
if (!caster.from_python(o[i], flags, cleanup) ||
!caster.template can_cast<Entry>()) {
success = false;
break;
}
value[i] = caster.operator cast_t<Entry>();
}
}
Py_XDECREF(temp);
return success;
}
template <typename T>
static handle from_cpp(T &&src, rv_policy policy, cleanup_list *cleanup) {
object ret = steal(PyList_New(Size));
if (ret.is_valid()) {
Py_ssize_t index = 0;
for (auto &value : src) {
handle h = Caster::from_cpp(forward_like_<T>(value), policy, cleanup);
if (!h.is_valid()) {
ret.reset();
break;
}
NB_LIST_SET_ITEM(ret.ptr(), index++, h.ptr());
}
}
return ret.release();
}
};
NAMESPACE_END(detail)
NAMESPACE_END(NB_NAMESPACE)
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
include/nanobind/stl/detail/nb_dict.h | C/C++ Header | /*
nanobind/stl/detail/nb_dict.h: base class of dict casters
Copyright (c) 2022 Matej Ferencevic and Wenzel Jakob
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include <nanobind/nanobind.h>
NAMESPACE_BEGIN(NB_NAMESPACE)
NAMESPACE_BEGIN(detail)
template <typename Dict, typename Key, typename Val> struct dict_caster {
NB_TYPE_CASTER(Dict, io_name("collections.abc.Mapping", "dict") +
const_name("[") + make_caster<Key>::Name +
const_name(", ") + make_caster<Val>::Name +
const_name("]"))
using KeyCaster = make_caster<Key>;
using ValCaster = make_caster<Val>;
bool from_python(handle src, uint8_t flags, cleanup_list *cleanup) noexcept {
value.clear();
PyObject *items = PyMapping_Items(src.ptr());
if (items == nullptr) {
PyErr_Clear();
return false;
}
// 'items' is safe to access without locking and reference counting, it
// is unique to this thread
Py_ssize_t size = NB_LIST_GET_SIZE(items);
bool success = size >= 0;
uint8_t flags_key = flags_for_local_caster<Key>(flags),
flags_val = flags_for_local_caster<Val>(flags);
KeyCaster key_caster;
ValCaster val_caster;
for (Py_ssize_t i = 0; i < size; ++i) {
PyObject *item = NB_LIST_GET_ITEM(items, i);
PyObject *key = NB_TUPLE_GET_ITEM(item, 0);
PyObject *val = NB_TUPLE_GET_ITEM(item, 1);
if (!key_caster.from_python(key, flags_key, cleanup) ||
!key_caster.template can_cast<Key>()) {
success = false;
break;
}
if (!val_caster.from_python(val, flags_val, cleanup) ||
!val_caster.template can_cast<Val>()) {
success = false;
break;
}
value.emplace(key_caster.operator cast_t<Key>(),
val_caster.operator cast_t<Val>());
}
Py_DECREF(items);
return success;
}
template <typename T>
static handle from_cpp(T &&src, rv_policy policy, cleanup_list *cleanup) {
dict ret;
if (ret.is_valid()) {
for (auto &item : src) {
object k = steal(KeyCaster::from_cpp(
forward_like_<T>(item.first), policy, cleanup));
object e = steal(ValCaster::from_cpp(
forward_like_<T>(item.second), policy, cleanup));
if (!k.is_valid() || !e.is_valid() ||
PyDict_SetItem(ret.ptr(), k.ptr(), e.ptr()) != 0) {
ret.reset();
break;
}
}
}
return ret.release();
}
};
NAMESPACE_END(detail)
NAMESPACE_END(NB_NAMESPACE)
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
include/nanobind/stl/detail/nb_list.h | C/C++ Header | /*
nanobind/stl/detail/nb_list.h: base class of list casters
Copyright (c) 2022 Wenzel Jakob
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include <nanobind/nanobind.h>
NAMESPACE_BEGIN(NB_NAMESPACE)
NAMESPACE_BEGIN(detail)
template <typename List, typename Entry> struct list_caster {
NB_TYPE_CASTER(List, io_name("collections.abc.Sequence", "list") +
const_name("[") + make_caster<Entry>::Name +
const_name("]"))
using Caster = make_caster<Entry>;
template <typename T> using has_reserve = decltype(std::declval<T>().reserve(0));
bool from_python(handle src, uint8_t flags, cleanup_list *cleanup) noexcept {
size_t size;
PyObject *temp;
/* Will initialize 'size' and 'temp'. All return values and
return parameters are zero/NULL in the case of a failure. */
PyObject **o = seq_get(src.ptr(), &size, &temp);
value.clear();
if constexpr (is_detected_v<has_reserve, List>)
value.reserve(size);
Caster caster;
bool success = o != nullptr;
flags = flags_for_local_caster<Entry>(flags);
for (size_t i = 0; i < size; ++i) {
if (!caster.from_python(o[i], flags, cleanup) ||
!caster.template can_cast<Entry>()) {
success = false;
break;
}
value.push_back(caster.operator cast_t<Entry>());
}
Py_XDECREF(temp);
return success;
}
template <typename T>
static handle from_cpp(T &&src, rv_policy policy, cleanup_list *cleanup) {
object ret = steal(PyList_New(src.size()));
if (ret.is_valid()) {
Py_ssize_t index = 0;
for (auto &&value : src) {
handle h = Caster::from_cpp(forward_like_<T>(value), policy, cleanup);
if (!h.is_valid()) {
ret.reset();
break;
}
NB_LIST_SET_ITEM(ret.ptr(), index++, h.ptr());
}
}
return ret.release();
}
};
NAMESPACE_END(detail)
NAMESPACE_END(NB_NAMESPACE)
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
include/nanobind/stl/detail/nb_optional.h | C/C++ Header | /*
nanobind/stl/optional.h: type caster for std::optional<...>
Copyright (c) 2022 Yoshiki Matsuda and Wenzel Jakob
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include <nanobind/nanobind.h>
NAMESPACE_BEGIN(NB_NAMESPACE)
NAMESPACE_BEGIN(detail)
template <typename Optional, typename T = typename Optional::value_type>
struct optional_caster {
using Caster = make_caster<T>;
NB_TYPE_CASTER(Optional, optional_name(Caster::Name))
bool from_python(handle src, uint8_t flags, cleanup_list* cleanup) noexcept {
if (src.is_none()) {
value.reset();
return true;
}
Caster caster;
if (!caster.from_python(src, flags_for_local_caster<T>(flags), cleanup) ||
!caster.template can_cast<T>())
return false;
value.emplace(caster.operator cast_t<T>());
return true;
}
template <typename T_>
static handle from_cpp(T_ &&value, rv_policy policy, cleanup_list *cleanup) noexcept {
if (!value)
return none().release();
return Caster::from_cpp(forward_like_<T_>(*value), policy, cleanup);
}
};
NAMESPACE_END(detail)
NAMESPACE_END(NB_NAMESPACE)
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
include/nanobind/stl/detail/nb_set.h | C/C++ Header | /*
nanobind/stl/detail/nb_set.h: base class of set casters
Copyright (c) 2022 Raymond Yun Fei and Wenzel Jakob
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include <nanobind/nanobind.h>
NAMESPACE_BEGIN(NB_NAMESPACE)
NAMESPACE_BEGIN(detail)
template <typename Set, typename Key> struct set_caster {
NB_TYPE_CASTER(Set, io_name("collections.abc.Set", "set") +
const_name("[") + make_caster<Key>::Name +
const_name("]"))
using Caster = make_caster<Key>;
bool from_python(handle src, uint8_t flags, cleanup_list *cleanup) noexcept {
value.clear();
PyObject* iter = PyObject_GetIter(src.ptr());
if (!iter) {
PyErr_Clear();
return false;
}
bool success = true;
Caster key_caster;
PyObject *key;
flags = flags_for_local_caster<Key>(flags);
while ((key = PyIter_Next(iter)) != nullptr) {
success &= (key_caster.from_python(key, flags, cleanup) &&
key_caster.template can_cast<Key>());
Py_DECREF(key);
if (!success)
break;
value.emplace(key_caster.operator cast_t<Key>());
}
if (PyErr_Occurred()) {
PyErr_Clear();
success = false;
}
Py_DECREF(iter);
return success;
}
template <typename T>
static handle from_cpp(T &&src, rv_policy policy, cleanup_list *cleanup) {
object ret = steal(PySet_New(nullptr));
if (ret.is_valid()) {
for (auto& key : src) {
object k = steal(
Caster::from_cpp(forward_like_<T>(key), policy, cleanup));
if (!k.is_valid() || PySet_Add(ret.ptr(), k.ptr()) != 0) {
ret.reset();
break;
}
}
}
return ret.release();
}
};
NAMESPACE_END(detail)
NAMESPACE_END(NB_NAMESPACE)
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
include/nanobind/stl/detail/traits.h | C/C++ Header | /*
nanobind/stl/detail/traits.h: detail::is_copy_constructible<T>
partial overloads for STL types
Adapted from pybind11.
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include <nanobind/nanobind.h>
NAMESPACE_BEGIN(NB_NAMESPACE)
NAMESPACE_BEGIN(detail)
/* The builtin `std::is_copy_constructible` type trait merely checks whether
a copy constructor is present and returns `true` even when the this copy
constructor cannot be compiled. This a problem for older STL types like
`std::vector<T>` when `T` is noncopyable. The alternative below recurses
into STL types to work around this problem. */
template <typename T>
struct is_copy_constructible<
T, enable_if_t<
std::is_same_v<typename T::value_type &, typename T::reference> &&
std::is_copy_constructible_v<T> &&
!std::is_same_v<T, typename T::value_type>>> {
static constexpr bool value =
is_copy_constructible<typename T::value_type>::value;
};
// std::pair is copy-constructible <=> both constituents are copy-constructible
template <typename T1, typename T2>
struct is_copy_constructible<std::pair<T1, T2>> {
static constexpr bool value =
is_copy_constructible<T1>::value &&
is_copy_constructible<T2>::value;
};
// Analogous template for checking copy-assignability
template <typename T, typename SFINAE = int>
struct is_copy_assignable : std::is_copy_assignable<T> { };
template <typename T>
struct is_copy_assignable<T,
enable_if_t<std::is_copy_assignable_v<T> &&
std::is_same_v<typename T::value_type &,
typename T::reference>>> {
static constexpr bool value = is_copy_assignable<typename T::value_type>::value;
};
template <typename T1, typename T2>
struct is_copy_assignable<std::pair<T1, T2>> {
static constexpr bool value =
is_copy_assignable<T1>::value &&
is_copy_assignable<T2>::value;
};
template <typename T>
constexpr bool is_copy_assignable_v = is_copy_assignable<T>::value;
// Analogous template for checking comparability
template <typename T> using comparable_test = decltype(std::declval<T>() == std::declval<T>());
template <typename T, typename SFINAE = int>
struct is_equality_comparable {
static constexpr bool value = is_detected_v<comparable_test, T>;
};
template <typename T>
struct is_equality_comparable<T, enable_if_t<is_detected_v<comparable_test, T> &&
std::is_same_v<typename T::value_type &,
typename T::reference>>> {
static constexpr bool value = is_equality_comparable<typename T::value_type>::value;
};
template <typename T1, typename T2>
struct is_equality_comparable<std::pair<T1, T2>> {
static constexpr bool value =
is_equality_comparable<T1>::value &&
is_equality_comparable<T2>::value;
};
template <typename T>
constexpr bool is_equality_comparable_v = is_equality_comparable<T>::value;
NAMESPACE_END(detail)
NAMESPACE_END(NB_NAMESPACE)
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
include/nanobind/stl/filesystem.h | C/C++ Header | /*
nanobind/stl/filesystem.h: type caster for std::filesystem
Copyright (c) 2023 Qingnan Zhou and Wenzel Jakob
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include <nanobind/nanobind.h>
#include <filesystem>
#include <string>
NAMESPACE_BEGIN(NB_NAMESPACE)
NAMESPACE_BEGIN(detail)
template <>
struct type_caster<std::filesystem::path> {
static handle from_cpp(const std::filesystem::path &path, rv_policy,
cleanup_list *) noexcept {
str py_str = to_py_str(path.native());
if (py_str.is_valid()) {
try {
return module_::import_("pathlib")
.attr("Path")(py_str)
.release();
} catch (python_error &e) {
e.restore();
}
}
return handle();
}
template <typename Char = typename std::filesystem::path::value_type>
bool from_python(handle src, uint8_t, cleanup_list *) noexcept {
bool success = false;
/* PyUnicode_FSConverter and PyUnicode_FSDecoder normally take care of
calling PyOS_FSPath themselves, but that's broken on PyPy (see PyPy
issue #3168) so we do it ourselves instead. */
PyObject *buf = PyOS_FSPath(src.ptr());
if (buf) {
PyObject *native = nullptr;
if constexpr (std::is_same_v<Char, char>) {
if (PyUnicode_FSConverter(buf, &native)) {
if (char* s = PyBytes_AsString(native)) {
value = s; // Points to internal buffer, no need to free
success = true;
}
}
} else {
if (PyUnicode_FSDecoder(buf, &native)) {
if (wchar_t *s = PyUnicode_AsWideCharString(native, nullptr)) {
value = s;
PyMem_Free(s); // New string, must free
success = true;
}
}
}
Py_DECREF(buf);
Py_XDECREF(native);
}
if (!success)
PyErr_Clear();
return success;
}
NB_TYPE_CASTER(std::filesystem::path, io_name("str | os.PathLike", "pathlib.Path"))
private:
static str to_py_str(const std::string &s) {
return steal<str>(
PyUnicode_DecodeFSDefaultAndSize(s.c_str(), (Py_ssize_t) s.size()));
}
static str to_py_str(const std::wstring &s) {
return steal<str>(
PyUnicode_FromWideChar(s.c_str(), (Py_ssize_t) s.size()));
}
};
NAMESPACE_END(detail)
NAMESPACE_END(NB_NAMESPACE)
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
include/nanobind/stl/function.h | C/C++ Header | /*
nanobind/stl/function.h: type caster for std::function<...>
Copyright (c) 2022 Wenzel Jakob
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include <nanobind/nanobind.h>
#include <functional>
NAMESPACE_BEGIN(NB_NAMESPACE)
NAMESPACE_BEGIN(detail)
struct pyfunc_wrapper {
PyObject *f;
explicit pyfunc_wrapper(PyObject *f) : f(f) {
Py_INCREF(f);
}
pyfunc_wrapper(pyfunc_wrapper &&w) noexcept : f(w.f) {
w.f = nullptr;
}
pyfunc_wrapper(const pyfunc_wrapper &w) : f(w.f) {
if (f) {
gil_scoped_acquire acq;
Py_INCREF(f);
}
}
~pyfunc_wrapper() {
if (f) {
gil_scoped_acquire acq;
Py_DECREF(f);
}
}
pyfunc_wrapper &operator=(const pyfunc_wrapper) = delete;
pyfunc_wrapper &operator=(pyfunc_wrapper &&) = delete;
};
template <typename Return, typename... Args>
struct type_caster<std::function<Return(Args...)>> {
using ReturnCaster = make_caster<
std::conditional_t<std::is_void_v<Return>, void_type, Return>>;
NB_TYPE_CASTER(std::function <Return(Args...)>,
const_name("collections.abc.Callable[[") +
concat(make_caster<Args>::Name...) + const_name("], ") +
ReturnCaster::Name + const_name("]"))
struct pyfunc_wrapper_t : pyfunc_wrapper {
using pyfunc_wrapper::pyfunc_wrapper;
Return operator()(Args... args) const {
gil_scoped_acquire acq;
return cast<Return>(handle(f)((forward_t<Args>) args...));
}
};
bool from_python(handle src, uint8_t flags, cleanup_list *) noexcept {
if (src.is_none())
return flags & cast_flags::convert;
if (!PyCallable_Check(src.ptr()))
return false;
value = pyfunc_wrapper_t(src.ptr());
return true;
}
static handle from_cpp(const Value &value, rv_policy rvp,
cleanup_list *) noexcept {
const pyfunc_wrapper_t *wrapper = value.template target<pyfunc_wrapper_t>();
if (wrapper)
return handle(wrapper->f).inc_ref();
if (rvp == rv_policy::none)
return handle();
if (!value)
return none().release();
return cpp_function(value).release();
}
};
NAMESPACE_END(detail)
NAMESPACE_END(NB_NAMESPACE)
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
include/nanobind/stl/list.h | C/C++ Header | /*
nanobind/stl/list.h: type caster for std::list<...>
Copyright (c) 2022 Wenzel Jakob
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include "detail/nb_list.h"
#include <list>
NAMESPACE_BEGIN(NB_NAMESPACE)
NAMESPACE_BEGIN(detail)
template <typename Type, typename Alloc> struct type_caster<std::list<Type, Alloc>>
: list_caster<std::list<Type, Alloc>, Type> { };
NAMESPACE_END(detail)
NAMESPACE_END(NB_NAMESPACE)
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
include/nanobind/stl/map.h | C/C++ Header | /*
nanobind/stl/map.h: type caster for std::map<...>
Copyright (c) 2022 Matej Ferencevic and Wenzel Jakob
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include "detail/nb_dict.h"
#include <map>
NAMESPACE_BEGIN(NB_NAMESPACE)
NAMESPACE_BEGIN(detail)
template <typename Key, typename T, typename Compare, typename Alloc>
struct type_caster<std::map<Key, T, Compare, Alloc>>
: dict_caster<std::map<Key, T, Compare, Alloc>, Key, T> { };
NAMESPACE_END(detail)
NAMESPACE_END(NB_NAMESPACE)
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
include/nanobind/stl/optional.h | C/C++ Header | /*
nanobind/stl/optional.h: type caster for std::optional<...>
Copyright (c) 2022 Yoshiki Matsuda and Wenzel Jakob
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include "detail/nb_optional.h"
#include <optional>
NAMESPACE_BEGIN(NB_NAMESPACE)
NAMESPACE_BEGIN(detail)
template <typename T> struct remove_opt_mono<std::optional<T>>
: remove_opt_mono<T> { };
template <typename T>
struct type_caster<std::optional<T>> : optional_caster<std::optional<T>> {};
template <> struct type_caster<std::nullopt_t> : none_caster<std::nullopt_t> { };
template <typename T>
struct has_arg_defaults<std::optional<T>> : std::true_type {};
NAMESPACE_END(detail)
NAMESPACE_END(NB_NAMESPACE)
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
include/nanobind/stl/pair.h | C/C++ Header | /*
nanobind/stl/pair.h: type caster for std::pair<...>
Copyright (c) 2022 Wenzel Jakob
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include <nanobind/nanobind.h>
#include <utility>
NAMESPACE_BEGIN(NB_NAMESPACE)
NAMESPACE_BEGIN(detail)
template <typename T1, typename T2> struct type_caster<std::pair<T1, T2>> {
using Value = std::pair<T1, T2>;
// Sub type casters
using Caster1 = make_caster<T1>;
using Caster2 = make_caster<T2>;
/// This caster constructs instances on the fly (otherwise it would not be
/// able to handle pairs containing references). Because of this, only the
/// `operator Value()` cast operator is implemented below, and the type
/// alias below informs users of this class of this fact.
template <typename T> using Cast = Value;
// Value name for docstring generation
static constexpr auto Name =
const_name("tuple[") + concat(Caster1::Name, Caster2::Name) + const_name("]");
/// Python -> C++ caster, populates `caster1` and `caster2` upon success
bool from_python(handle src, uint8_t flags,
cleanup_list *cleanup) noexcept {
PyObject *temp; // always initialized by the following line
PyObject **o = seq_get_with_size(src.ptr(), 2, &temp);
bool success = o &&
caster1.from_python(o[0], flags, cleanup) &&
caster2.from_python(o[1], flags, cleanup);
Py_XDECREF(temp);
return success;
}
template <typename T>
static handle from_cpp(T *value, rv_policy policy, cleanup_list *cleanup) {
if (!value)
return none().release();
return from_cpp(*value, policy, cleanup);
}
template <typename T>
static handle from_cpp(T &&value, rv_policy policy,
cleanup_list *cleanup) noexcept {
object o1 = steal(
Caster1::from_cpp(forward_like_<T>(value.first), policy, cleanup));
if (!o1.is_valid())
return {};
object o2 = steal(
Caster2::from_cpp(forward_like_<T>(value.second), policy, cleanup));
if (!o2.is_valid())
return {};
PyObject *r = PyTuple_New(2);
NB_TUPLE_SET_ITEM(r, 0, o1.release().ptr());
NB_TUPLE_SET_ITEM(r, 1, o2.release().ptr());
return r;
}
template <typename T>
bool can_cast() const noexcept {
return caster1.template can_cast<T1>() && caster2.template can_cast<T2>();
}
/// Return the constructed tuple by copying from the sub-casters
explicit operator Value() {
return Value(caster1.operator cast_t<T1>(),
caster2.operator cast_t<T2>());
}
Caster1 caster1;
Caster2 caster2;
};
NAMESPACE_END(detail)
NAMESPACE_END(NB_NAMESPACE)
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
include/nanobind/stl/set.h | C/C++ Header | /*
nanobind/stl/set.h: type caster for std::set<...>
Copyright (c) 2022 Raymond Yun Fei and Wenzel Jakob
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include "detail/nb_set.h"
#include <set>
NAMESPACE_BEGIN(NB_NAMESPACE)
NAMESPACE_BEGIN(detail)
template <typename Key, typename Compare, typename Alloc>
struct type_caster<std::set<Key, Compare, Alloc>>
: set_caster<std::set<Key, Compare, Alloc>, Key> {
};
NAMESPACE_END(detail)
NAMESPACE_END(NB_NAMESPACE)
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
include/nanobind/stl/shared_ptr.h | C/C++ Header | /*
nanobind/stl/shared_ptr.h: Type caster for std::shared_ptr<T>
Copyright (c) 2022 Wenzel Jakob
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include <nanobind/nanobind.h>
#include <memory>
NAMESPACE_BEGIN(NB_NAMESPACE)
NAMESPACE_BEGIN(detail)
// shared_ptr deleter that reduces the reference count of a Python object
struct py_deleter {
void operator()(void *) noexcept {
// Don't run the deleter if the interpreter has been shut down
if (!is_alive())
return;
gil_scoped_acquire guard;
Py_DECREF(o);
}
PyObject *o;
};
/**
* Create a std::shared_ptr for `ptr` that owns a reference to the Python
* object `h`; if `ptr` is non-null, then the refcount of `h` is incremented
* before creating the shared_ptr and decremented by its deleter.
*
* Usually this is instantiated with T = void, to reduce template bloat.
* But if the pointee type uses enable_shared_from_this, we instantiate
* with T = that type, in order to allow its internal weak_ptr to share
* ownership with the shared_ptr we're creating.
*
* The next two functions are simultaneously marked as 'inline' (to avoid
* linker errors) and 'NB_NOINLINE' (to avoid them being inlined into every
* single shared_ptr type_caster, which would enlarge the binding size)
*/
template <typename T>
inline NB_NOINLINE std::shared_ptr<T>
shared_from_python(T *ptr, handle h) noexcept {
if (ptr)
return std::shared_ptr<T>(ptr, py_deleter{ h.inc_ref().ptr() });
else
return std::shared_ptr<T>(nullptr);
}
inline NB_NOINLINE void shared_from_cpp(std::shared_ptr<void> &&ptr,
PyObject *o) noexcept {
keep_alive(o, new std::shared_ptr<void>(std::move(ptr)),
[](void *p) noexcept { delete (std::shared_ptr<void> *) p; });
}
template <typename T> struct type_caster<std::shared_ptr<T>> {
static constexpr bool IsClass = true;
using Caster = make_caster<T>;
using Td = std::decay_t<T>;
NB_TYPE_CASTER(std::shared_ptr<T>, Caster::Name)
static_assert(is_base_caster_v<Caster>,
"Conversion of ``shared_ptr<T>`` requires that ``T`` is "
"handled by nanobind's regular class binding mechanism. "
"However, a type caster was registered to intercept this "
"particular type, which is not allowed.");
bool from_python(handle src, uint8_t flags,
cleanup_list *cleanup) noexcept {
Caster caster;
if (!caster.from_python(src, flags, cleanup))
return false;
Td *ptr = caster.operator Td *();
if constexpr (has_shared_from_this_v<T>) {
if (ptr) {
if (auto sp = ptr->weak_from_this().lock()) {
// There is already a C++ shared_ptr for this object. Use it.
value = std::static_pointer_cast<T>(std::move(sp));
return true;
}
}
// Otherwise create a new one. Use shared_from_python<T>(...)
// so that future calls to ptr->shared_from_this() can share
// ownership with it.
value = shared_from_python(ptr, src);
} else {
value = std::static_pointer_cast<T>(
shared_from_python(static_cast<void *>(ptr), src));
}
return true;
}
static handle from_cpp(const Value &value, rv_policy,
cleanup_list *cleanup) noexcept {
bool is_new = false;
handle result;
Td *ptr = (Td *) value.get();
const std::type_info *type = &typeid(Td);
constexpr bool has_type_hook =
!std::is_base_of_v<std::false_type, type_hook<Td>>;
if constexpr (has_type_hook)
type = type_hook<Td>::get(ptr);
if constexpr (!std::is_polymorphic_v<Td>) {
result = nb_type_put(type, ptr, rv_policy::reference,
cleanup, &is_new);
} else {
const std::type_info *type_p =
(!has_type_hook && ptr) ? &typeid(*ptr) : nullptr;
result = nb_type_put_p(type, type_p, ptr, rv_policy::reference,
cleanup, &is_new);
}
if (is_new) {
std::shared_ptr<void> pp;
if constexpr (std::is_const_v<T>)
pp = std::static_pointer_cast<void>(std::const_pointer_cast<Td>(value));
else
pp = std::static_pointer_cast<void>(value);
shared_from_cpp(std::move(pp), result.ptr());
}
return result;
}
};
NAMESPACE_END(detail)
NAMESPACE_END(NB_NAMESPACE)
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
include/nanobind/stl/string.h | C/C++ Header | /*
nanobind/stl/string.h: type caster for std::string
Copyright (c) 2022 Wenzel Jakob
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include <nanobind/nanobind.h>
#include <string>
NAMESPACE_BEGIN(NB_NAMESPACE)
NAMESPACE_BEGIN(detail)
template <> struct type_caster<std::string> {
NB_TYPE_CASTER(std::string, const_name("str"))
bool from_python(handle src, uint8_t, cleanup_list *) noexcept {
Py_ssize_t size;
const char *str = PyUnicode_AsUTF8AndSize(src.ptr(), &size);
if (!str) {
PyErr_Clear();
return false;
}
value = std::string(str, (size_t) size);
return true;
}
static handle from_cpp(const std::string &value, rv_policy,
cleanup_list *) noexcept {
return PyUnicode_FromStringAndSize(value.c_str(), value.size());
}
};
NAMESPACE_END(detail)
NAMESPACE_END(NB_NAMESPACE)
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
include/nanobind/stl/string_view.h | C/C++ Header | /*
nanobind/stl/string_view.h: type caster for std::string_view
Copyright (c) 2022 Qingnan Zhou and Wenzel Jakob
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include <nanobind/nanobind.h>
#include <string_view>
NAMESPACE_BEGIN(NB_NAMESPACE)
NAMESPACE_BEGIN(detail)
template <> struct type_caster<std::string_view> {
NB_TYPE_CASTER(std::string_view, const_name("str"))
bool from_python(handle src, uint8_t, cleanup_list *) noexcept {
Py_ssize_t size;
const char *str = PyUnicode_AsUTF8AndSize(src.ptr(), &size);
if (!str) {
PyErr_Clear();
return false;
}
value = std::string_view(str, (size_t) size);
return true;
}
static handle from_cpp(std::string_view value, rv_policy,
cleanup_list *) noexcept {
return PyUnicode_FromStringAndSize(value.data(), value.size());
}
};
NAMESPACE_END(detail)
NAMESPACE_END(NB_NAMESPACE)
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
include/nanobind/stl/tuple.h | C/C++ Header | /*
nanobind/stl/tuple.h: type caster for std::tuple<...>
Copyright (c) 2022 Wenzel Jakob
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include <nanobind/nanobind.h>
#include <tuple>
NAMESPACE_BEGIN(NB_NAMESPACE)
NAMESPACE_BEGIN(detail)
template <typename... Ts> struct type_caster<std::tuple<Ts...>> {
static constexpr size_t N = sizeof...(Ts),
N1 = N > 0 ? N : 1;
using Value = std::tuple<Ts...>;
using Indices = std::make_index_sequence<N>;
static constexpr auto Name =
const_name("tuple[") +
const_name<N == 0>(const_name("()"), concat(make_caster<Ts>::Name...)) +
const_name("]");
/// This caster constructs instances on the fly (otherwise it would not be
/// able to handle tuples containing references_). Because of this, only the
/// `operator Value()` cast operator is implemented below, and the type
/// alias below informs users of this class of this fact.
template <typename T> using Cast = Value;
bool from_python(handle src, uint8_t flags,
cleanup_list *cleanup) noexcept {
return from_python_impl(src, flags, cleanup, Indices{});
}
template <size_t... Is>
bool from_python_impl(handle src, uint8_t flags, cleanup_list *cleanup,
std::index_sequence<Is...>) noexcept {
(void) src; (void) flags; (void) cleanup;
PyObject *temp; // always initialized by the following line
PyObject **o = seq_get_with_size(src.ptr(), N, &temp);
bool success =
(o && ... &&
std::get<Is>(casters).from_python(o[Is], flags, cleanup));
Py_XDECREF(temp);
return success;
}
template <typename T>
static handle from_cpp(T&& value, rv_policy policy,
cleanup_list *cleanup) noexcept {
return from_cpp_impl((forward_t<T>) value, policy, cleanup, Indices{});
}
template <typename T>
static handle from_cpp(T *value, rv_policy policy, cleanup_list *cleanup) {
if (!value)
return none().release();
return from_cpp_impl(*value, policy, cleanup, Indices{});
}
template <typename T, size_t... Is>
static handle from_cpp_impl(T &&value, rv_policy policy,
cleanup_list *cleanup,
std::index_sequence<Is...>) noexcept {
(void) value; (void) policy; (void) cleanup;
object o[N1];
bool success =
(... &&
((o[Is] = steal(make_caster<Ts>::from_cpp(
forward_like_<T>(std::get<Is>(value)), policy, cleanup))),
o[Is].is_valid()));
if (!success)
return handle();
PyObject *r = PyTuple_New(N);
(NB_TUPLE_SET_ITEM(r, Is, o[Is].release().ptr()), ...);
return r;
}
template <typename T>
bool can_cast() const noexcept { return can_cast_impl(Indices{}); }
explicit operator Value() { return cast_impl(Indices{}); }
template <size_t... Is>
bool can_cast_impl(std::index_sequence<Is...>) const noexcept {
return (std::get<Is>(casters).template can_cast<Ts>() && ...);
}
template <size_t... Is> Value cast_impl(std::index_sequence<Is...>) {
return Value(std::get<Is>(casters).operator cast_t<Ts>()...);
}
std::tuple<make_caster<Ts>...> casters;
};
NAMESPACE_END(detail)
NAMESPACE_END(NB_NAMESPACE)
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
include/nanobind/stl/unique_ptr.h | C/C++ Header | /*
nanobind/stl/unique_ptr.h: Type caster for std::unique_ptr<T>
Copyright (c) 2022 Wenzel Jakob
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include <nanobind/nanobind.h>
#include <memory>
NAMESPACE_BEGIN(NB_NAMESPACE)
// Deleter for std::unique_ptr<T> (handles ownership by both C++ and Python)
template <typename T> struct deleter {
/// Instance should be cleared using a delete expression
deleter() = default;
/// Instance owned by Python, reduce reference count upon deletion
deleter(handle h) : o(h.ptr()) { }
/// Does Python own storage of the underlying object
bool owned_by_python() const { return o != nullptr; }
/// Does C++ own storage of the underlying object
bool owned_by_cpp() const { return o == nullptr; }
/// Perform the requested deletion operation
void operator()(void *p) noexcept {
if (o) {
gil_scoped_acquire guard;
Py_DECREF(o);
} else {
delete (T *) p;
}
}
PyObject *o{nullptr};
};
NAMESPACE_BEGIN(detail)
template <typename T, typename Deleter>
struct type_caster<std::unique_ptr<T, Deleter>> {
static constexpr bool IsClass = true;
using Value = std::unique_ptr<T, Deleter>;
using Caster = make_caster<T>;
using Td = std::decay_t<T>;
static constexpr bool IsDefaultDeleter =
std::is_same_v<Deleter, std::default_delete<T>>;
static constexpr bool IsNanobindDeleter =
std::is_same_v<Deleter, deleter<T>>;
static_assert(is_base_caster_v<Caster>,
"Conversion of ``unique_ptr<T>`` requires that ``T`` is "
"handled by nanobind's regular class binding mechanism. "
"However, a type caster was registered to intercept this "
"particular type, which is not allowed.");
static_assert(IsDefaultDeleter || IsNanobindDeleter,
"Binding std::unique_ptr<T, Deleter> requires that "
"'Deleter' is either 'std::default_delete<T>' or "
"'nanobind::deleter<T>'");
static constexpr auto Name = Caster::Name;
template <typename T_> using Cast = Value;
Caster caster;
handle src;
/* If true, the Python object has relinquished ownership but we have
not yet yielded a unique_ptr that holds ownership on the C++ side.
`nb_type_relinquish_ownership()` can fail, so we must check it in
`can_cast()`. If we do so, but then wind up not executing the cast
operator, we must remember to undo our relinquishment and push the
ownership back onto the Python side. For example, this might be
necessary if the Python object `[(foo, foo)]` is converted to
`std::vector<std::pair<std::unique_ptr<T>, std::unique_ptr<T>>>`;
the pair caster won't know that it can't cast the second element
until after it's verified that it can cast the first one. */
mutable bool inflight = false;
~type_caster() {
if (inflight)
nb_type_restore_ownership(src.ptr(), IsDefaultDeleter);
}
bool from_python(handle src_, uint8_t, cleanup_list *) noexcept {
// Stash source python object
src = src_;
/* Try casting to a pointer of the underlying type. We pass flags=0 and
cleanup=nullptr to prevent implicit type conversions (they are
problematic since the instance then wouldn't be owned by 'src') */
return caster.from_python(src_, 0, nullptr);
}
template <typename T2>
static handle from_cpp(T2 *value, rv_policy policy,
cleanup_list *cleanup) noexcept {
if (!value)
return handle();
return from_cpp(*value, policy, cleanup);
}
template <typename T2>
static handle from_cpp(T2 &&value,
rv_policy, cleanup_list *cleanup) noexcept {
bool cpp_delete = true;
if constexpr (IsNanobindDeleter)
cpp_delete = value.get_deleter().owned_by_cpp();
Td *ptr = (Td *) value.get();
const std::type_info *type = &typeid(Td);
if (!ptr)
return none().release();
constexpr bool has_type_hook =
!std::is_base_of_v<std::false_type, type_hook<Td>>;
if constexpr (has_type_hook)
type = type_hook<Td>::get(ptr);
handle result;
if constexpr (!std::is_polymorphic_v<Td>) {
result = nb_type_put_unique(type, ptr, cleanup, cpp_delete);
} else {
const std::type_info *type_p =
(!has_type_hook && ptr) ? &typeid(*ptr) : nullptr;
result = nb_type_put_unique_p(type, type_p, ptr, cleanup, cpp_delete);
}
if (result.is_valid()) {
if (cpp_delete)
value.release();
else
value.reset();
}
return result;
}
template <typename T_>
bool can_cast() const noexcept {
if (src.is_none() || inflight)
return true;
else if (!nb_type_relinquish_ownership(src.ptr(), IsDefaultDeleter))
return false;
inflight = true;
return true;
}
explicit operator Value() {
if (!inflight && !src.is_none() &&
!nb_type_relinquish_ownership(src.ptr(), IsDefaultDeleter))
throw next_overload();
Td *p = caster.operator Td *();
Value value;
if constexpr (IsNanobindDeleter)
value = Value(p, deleter<T>(src.inc_ref()));
else
value = Value(p);
inflight = false;
return value;
}
};
NAMESPACE_END(detail)
NAMESPACE_END(NB_NAMESPACE)
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
include/nanobind/stl/unordered_map.h | C/C++ Header | /*
nanobind/stl/unordered_map.h: type caster for std::unordered_map<...>
Copyright (c) 2022 Matej Ferencevic and Wenzel Jakob
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include "detail/nb_dict.h"
#include <unordered_map>
NAMESPACE_BEGIN(NB_NAMESPACE)
NAMESPACE_BEGIN(detail)
template <typename Key, typename T, typename Compare, typename Alloc>
struct type_caster<std::unordered_map<Key, T, Compare, Alloc>>
: dict_caster<std::unordered_map<Key, T, Compare, Alloc>, Key, T> { };
NAMESPACE_END(detail)
NAMESPACE_END(NB_NAMESPACE)
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
include/nanobind/stl/unordered_set.h | C/C++ Header | /*
nanobind/stl/unordered_set.h: type caster for std::unordered_set<...>
Copyright (c) 2022 Raymond Yun Fei and Wenzel Jakob
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include "detail/nb_set.h"
#include <unordered_set>
NAMESPACE_BEGIN(NB_NAMESPACE)
NAMESPACE_BEGIN(detail)
template <typename Key, typename Hash, typename Compare, typename Alloc>
struct type_caster<std::unordered_set<Key, Hash, Compare, Alloc>>
: set_caster<std::unordered_set<Key, Hash, Compare, Alloc>, Key> {
};
NAMESPACE_END(detail)
NAMESPACE_END(NB_NAMESPACE)
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
include/nanobind/stl/variant.h | C/C++ Header | /*
nanobind/stl/variant.h: type caster for std::variant<...>
Copyright (c) 2022 Yoshiki Matsuda and Wenzel Jakob
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include <nanobind/nanobind.h>
#include <variant>
NAMESPACE_BEGIN(NB_NAMESPACE)
NAMESPACE_BEGIN(detail)
template <typename T, typename...>
struct concat_variant { using type = T; };
template <typename... Ts1, typename... Ts2, typename... Ts3>
struct concat_variant<std::variant<Ts1...>, std::variant<Ts2...>, Ts3...>
: concat_variant<std::variant<Ts1..., Ts2...>, Ts3...> {};
template <typename... Ts> struct remove_opt_mono<std::variant<Ts...>>
: concat_variant<std::conditional_t<std::is_same_v<std::monostate, Ts>, std::variant<>, std::variant<remove_opt_mono_t<Ts>>>...> {};
template <> struct type_caster<std::monostate> : none_caster<std::monostate> { };
template <bool Defaultable, typename... Ts>
struct variant_caster_storage;
template <typename... Ts>
struct variant_caster_storage<true, Ts...> {
using Variant = std::variant<Ts...>;
Variant value;
Variant& get() { return value; }
template <typename T>
void store(T&& alternative) { value = (forward_t<T>) alternative; }
};
template <typename... Ts>
struct variant_caster_storage<false, Ts...> {
using Variant = std::variant<Ts...>;
std::variant<std::monostate, Variant> value;
Variant& get() { return *std::get_if<1>(&value); }
template <typename T>
void store(T&& alternative) {
value.template emplace<1>((forward_t<T>) alternative);
}
};
template <typename T1, typename... Ts>
constexpr bool variant_is_defaultable = std::is_default_constructible_v<T1>;
template <typename... Ts>
struct type_caster<std::variant<Ts...>>
: private variant_caster_storage<variant_is_defaultable<Ts...>, Ts...> {
// We don't use NB_TYPE_CASTER so that we can customize the cast operators
// to use `variant_caster_storage`, in order to support variants that are
// not default-constructible.
using Value = std::variant<Ts...>;
static constexpr auto Name = union_name(make_caster<Ts>::Name...);
template <typename T> using Cast = movable_cast_t<T>;
template <typename T> static constexpr bool can_cast() { return true; }
explicit operator Value*() { return &this->get(); }
explicit operator Value&() { return (Value &) this->get(); }
explicit operator Value&&() { return (Value &&) this->get(); }
template <typename T>
bool try_variant(const handle &src, uint8_t flags, cleanup_list *cleanup) {
using CasterT = make_caster<T>;
CasterT caster;
if (!caster.from_python(src, flags_for_local_caster<T>(flags), cleanup) ||
!caster.template can_cast<T>())
return false;
this->store(caster.operator cast_t<T>());
return true;
}
bool from_python(handle src, uint8_t flags, cleanup_list *cleanup) noexcept {
if (flags & (uint8_t) cast_flags::convert) {
if ((try_variant<Ts>(src, flags & ~(uint8_t)cast_flags::convert, cleanup) || ...)){
return true;
}
}
return (try_variant<Ts>(src, flags, cleanup) || ...);
}
template <typename T>
static handle from_cpp(T *value, rv_policy policy, cleanup_list *cleanup) {
if (!value)
return none().release();
return from_cpp(*value, policy, cleanup);
}
template <typename T>
static handle from_cpp(T &&value, rv_policy policy, cleanup_list *cleanup) noexcept {
return std::visit(
[&](auto &&v) {
return make_caster<decltype(v)>::from_cpp(
std::forward<decltype(v)>(v), policy, cleanup);
},
std::forward<T>(value));
}
};
NAMESPACE_END(detail)
NAMESPACE_END(NB_NAMESPACE)
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
include/nanobind/stl/vector.h | C/C++ Header | /*
nanobind/stl/vector.h: type caster for std::vector<...>
Copyright (c) 2022 Wenzel Jakob
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include "detail/nb_list.h"
#include <vector>
NAMESPACE_BEGIN(NB_NAMESPACE)
NAMESPACE_BEGIN(detail)
template <typename Type, typename Alloc> struct type_caster<std::vector<Type, Alloc>>
: list_caster<std::vector<Type, Alloc>, Type> { };
NAMESPACE_END(detail)
NAMESPACE_END(NB_NAMESPACE)
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
include/nanobind/stl/wstring.h | C/C++ Header | /*
nanobind/stl/string.h: type caster for std::string
Copyright (c) 2022 Wenzel Jakob
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include <nanobind/nanobind.h>
#include <string>
NAMESPACE_BEGIN(NB_NAMESPACE)
NAMESPACE_BEGIN(detail)
template <> struct type_caster<std::wstring> {
NB_TYPE_CASTER(std::wstring, const_name("str"))
bool from_python(handle src, uint8_t, cleanup_list *) noexcept {
Py_ssize_t size;
const wchar_t *str = PyUnicode_AsWideCharString(src.ptr(), &size);
if (!str) {
PyErr_Clear();
return false;
}
value = std::wstring(str, (size_t) size);
return true;
}
static handle from_cpp(const std::wstring &value, rv_policy,
cleanup_list *) noexcept {
return PyUnicode_FromWideChar(value.c_str(), value.size());
}
};
NAMESPACE_END(detail)
NAMESPACE_END(NB_NAMESPACE)
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
include/nanobind/trampoline.h | C/C++ Header | /*
nanobind/trampoline.h: functionality for overriding C++ virtual
functions from within Python
Copyright (c) 2022 Wenzel Jakob
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include <nanobind/nanobind.h>
NAMESPACE_BEGIN(NB_NAMESPACE)
NAMESPACE_BEGIN(detail)
struct ticket;
NB_CORE void trampoline_new(void **data, size_t size, void *ptr) noexcept;
NB_CORE void trampoline_release(void **data, size_t size) noexcept;
NB_CORE void trampoline_enter(void **data, size_t size, const char *name,
bool pure, ticket *ticket);
NB_CORE void trampoline_leave(ticket *ticket) noexcept;
template <size_t Size> struct trampoline {
mutable void *data[2 * Size + 1];
NB_INLINE constexpr trampoline(void *ptr) { trampoline_new(data, Size, ptr); }
NB_INLINE ~trampoline() { trampoline_release(data, Size); }
NB_INLINE handle base() const { return (PyObject *) data[0]; }
};
struct ticket {
handle self;
handle key;
ticket *prev{};
PyGILState_STATE state{};
template <size_t Size>
NB_INLINE ticket(const trampoline<Size> &t, const char *name, bool pure) {
trampoline_enter(t.data, Size, name, pure, this);
}
NB_INLINE ~ticket() noexcept { trampoline_leave(this); }
};
#define NB_TRAMPOLINE(base, size) \
using NBBase = base; \
using NBBase::NBBase; \
nanobind::detail::trampoline<size> nb_trampoline{ this }
#define NB_OVERRIDE_NAME(name, func, ...) \
using nb_ret_type = decltype(NBBase::func(__VA_ARGS__)); \
nanobind::detail::ticket nb_ticket(nb_trampoline, name, false); \
if (nb_ticket.key.is_valid()) { \
return nanobind::cast<nb_ret_type>( \
nb_trampoline.base().attr(nb_ticket.key)(__VA_ARGS__)); \
} else \
return NBBase::func(__VA_ARGS__)
#define NB_OVERRIDE_PURE_NAME(name, func, ...) \
using nb_ret_type = decltype(NBBase::func(__VA_ARGS__)); \
nanobind::detail::ticket nb_ticket(nb_trampoline, name, true); \
return nanobind::cast<nb_ret_type>( \
nb_trampoline.base().attr(nb_ticket.key)(__VA_ARGS__))
#define NB_OVERRIDE(func, ...) \
NB_OVERRIDE_NAME(#func, func, __VA_ARGS__)
#define NB_OVERRIDE_PURE(func, ...) \
NB_OVERRIDE_PURE_NAME(#func, func, __VA_ARGS__)
NAMESPACE_END(detail)
NAMESPACE_END(NB_NAMESPACE)
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
include/nanobind/typing.h | C/C++ Header | /*
nanobind/typing.h: Optional typing-related functionality
Copyright (c) 2024 Wenzel Jakob
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include <nanobind/nanobind.h>
NAMESPACE_BEGIN(NB_NAMESPACE)
inline module_ typing() { return module_::import_("typing"); }
template <typename... Args>
object any_type() { return typing().attr("Any"); }
template <typename... Args>
object type_var(Args&&... args) {
return typing().attr("TypeVar")((detail::forward_t<Args>) args...);
}
template <typename... Args>
object type_var_tuple(Args&&... args) {
return typing().attr("TypeVarTuple")((detail::forward_t<Args>) args...);
}
template <typename... Args>
object param_spec(Args&&... args) {
return typing().attr("ParamSpec")((detail::forward_t<Args>) args...);
}
NAMESPACE_END(NB_NAMESPACE)
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
src/__init__.py | Python | import sys
import os
if sys.version_info < (3, 9):
raise ImportError("nanobind does not support Python < 3.9.")
def source_dir() -> str:
"Return the path to the nanobind source directory."
return os.path.join(os.path.abspath(os.path.dirname(__file__)), "src")
def include_dir() -> str:
"Return the path to the nanobind include directory"
return os.path.join(os.path.abspath(os.path.dirname(__file__)), "include")
def cmake_dir() -> str:
"Return the path to the nanobind CMake module directory."
return os.path.join(os.path.abspath(os.path.dirname(__file__)), "cmake")
__version__ = "2.11.1-dev1"
__all__ = (
"__version__",
"source_dir",
"include_dir",
"cmake_dir",
)
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
src/__main__.py | Python | import argparse
import sys
import sysconfig # type: ignore
from . import __version__, include_dir, cmake_dir
def main() -> None:
parser = argparse.ArgumentParser()
parser.add_argument(
"--version",
action="version",
version=__version__,
help="Print the version number.",
)
parser.add_argument(
"--include_dir",
action="store_true",
help="Print the path to the nanobind C++ header directory."
)
parser.add_argument(
"--cmake_dir",
action="store_true",
help="Print the path to the nanobind CMake module directory."
)
args = parser.parse_args()
if not sys.argv[1:]:
parser.print_help()
if args.include_dir:
print(include_dir())
if args.cmake_dir:
print(cmake_dir())
if __name__ == "__main__":
main()
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
src/buffer.h | C/C++ Header | #pragma once
#include <string.h>
#include <stdarg.h>
#include <stdio.h> // vsnprintf
NAMESPACE_BEGIN(NB_NAMESPACE)
NAMESPACE_BEGIN(detail)
struct Buffer {
public:
// Disable copy/move constructor and assignment
Buffer(const Buffer &) = delete;
Buffer(Buffer &&) = delete;
Buffer &operator=(const Buffer &) = delete;
Buffer &operator=(Buffer &&) = delete;
Buffer(size_t size = 0) : m_start((char *) malloc(size)) {
if (!m_start) {
fprintf(stderr, "Buffer::Buffer(): out of memory (unrecoverable error)!");
abort();
}
m_end = m_start + size;
if (size)
clear();
}
~Buffer() {
free(m_start);
}
/// Append a string with the specified length
void put(const char *str, size_t size) {
if (m_cur + size >= m_end)
expand(size + 1 - remain());
memcpy(m_cur, str, size);
m_cur += size;
*m_cur = '\0';
}
/// Append a string
template <size_t N> void put(const char (&str)[N]) {
put(str, N - 1);
}
/// Append a dynamic string
void put_dstr(const char *str) { put(str, strlen(str)); }
/// Append a single character to the buffer
void put(char c) {
if (m_cur + 1 >= m_end)
expand();
*m_cur++ = c;
*m_cur = '\0';
}
/// Append multiple copies of a single character to the buffer
void put(char c, size_t count) {
if (m_cur + count >= m_end)
expand(count + 1 - remain());
for (size_t i = 0; i < count; ++i)
*m_cur++ = c;
*m_cur = '\0';
}
/// Append a formatted (printf-style) string to the buffer
#if defined(__GNUC__)
__attribute__((__format__ (__printf__, 2, 3)))
#endif
size_t fmt(const char *format, ...) {
size_t written;
do {
size_t size = remain();
va_list args;
va_start(args, format);
written = (size_t) vsnprintf(m_cur, size, format, args);
va_end(args);
if (written + 1 < size) {
m_cur += written;
break;
}
expand();
} while (true);
return written;
}
const char *get() { return m_start; }
void clear() {
m_cur = m_start;
if (m_start != m_end)
m_start[0] = '\0';
}
/// Remove the last 'n' characters
void rewind(size_t n) {
if (m_cur < m_start + n)
m_cur = m_start;
else
m_cur -= n;
*m_cur = '\0';
}
/// Append an unsigned 32 bit integer
void put_uint32(uint32_t value) {
const int digits = 10;
const char *num = "0123456789";
char buf[digits];
size_t i = digits;
do {
buf[--i] = num[value % 10];
value /= 10;
} while (value);
return put(buf + i, digits - i);
}
char *copy(size_t offset = 0) const {
size_t copy_size = size() + 1 - offset;
char *tmp = (char *) malloc(copy_size);
if (!tmp) {
fprintf(stderr, "Buffer::copy(): out of memory (unrecoverable error)!");
abort();
}
memcpy(tmp, m_start + offset, copy_size);
return tmp;
}
size_t size() const { return (size_t) (m_cur - m_start); }
size_t remain() const { return (size_t) (m_end - m_cur); }
private:
NB_NOINLINE void expand(size_t minval = 2) {
size_t old_alloc_size = m_end - m_start,
new_alloc_size = 2 * old_alloc_size + minval,
used_size = (size_t) (m_cur - m_start),
copy_size = used_size + 1;
if (old_alloc_size < copy_size)
copy_size = old_alloc_size;
char *tmp = (char *) malloc(new_alloc_size);
if (!tmp) {
fprintf(stderr, "Buffer::expand(): out of memory (unrecoverable error)!");
abort();
}
memcpy(tmp, m_start, copy_size);
free(m_start);
m_start = tmp;
m_end = m_start + new_alloc_size;
m_cur = m_start + used_size;
}
private:
char *m_start{nullptr}, *m_cur{nullptr}, *m_end{nullptr};
};
NAMESPACE_END(detail)
NAMESPACE_END(NB_NAMESPACE)
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
src/common.cpp | C++ | /*
src/common.cpp: miscellaneous libnanobind functionality
Copyright (c) 2022 Wenzel Jakob
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#include <nanobind/nanobind.h>
#include <complex>
#include "nb_internals.h"
NAMESPACE_BEGIN(NB_NAMESPACE)
NAMESPACE_BEGIN(detail)
NB_NOINLINE static builtin_exception
create_exception(exception_type type, const char *fmt, va_list args_) {
char buf[512];
va_list args;
va_copy(args, args_);
int size = vsnprintf(buf, sizeof(buf), fmt, args);
va_end(args);
if (size < (int) sizeof(buf)) {
return builtin_exception(type, buf);
} else {
scoped_pymalloc<char> temp(size + 1);
va_copy(args, args_);
vsnprintf(temp.get(), size + 1, fmt, args);
va_end(args);
return builtin_exception(type, temp.get());
}
}
#if defined(__GNUC__)
__attribute__((noreturn, __format__ (__printf__, 1, 2)))
#else
[[noreturn]]
#endif
void raise(const char *fmt, ...) {
va_list args;
va_start(args, fmt);
builtin_exception err =
create_exception(exception_type::runtime_error, fmt, args);
va_end(args);
throw err;
}
#if defined(__GNUC__)
__attribute__((noreturn, __format__ (__printf__, 1, 2)))
#else
[[noreturn]]
#endif
void raise_type_error(const char *fmt, ...) {
va_list args;
va_start(args, fmt);
builtin_exception err =
create_exception(exception_type::type_error, fmt, args);
va_end(args);
throw err;
}
/// Abort the process with a fatal error
#if defined(__GNUC__)
__attribute__((noreturn, __format__ (__printf__, 1, 2)))
#else
[[noreturn]]
#endif
void fail(const char *fmt, ...) noexcept {
va_list args;
fprintf(stderr, "Critical nanobind error: ");
va_start(args, fmt);
vfprintf(stderr, fmt, args);
va_end(args);
fprintf(stderr, "\n");
abort();
}
PyObject *capsule_new(const void *ptr, const char *name,
void (*cleanup)(void *) noexcept) noexcept {
auto capsule_cleanup = [](PyObject *o) {
auto cleanup_2 = (void (*)(void *))(PyCapsule_GetContext(o));
if (cleanup_2)
cleanup_2(PyCapsule_GetPointer(o, PyCapsule_GetName(o)));
};
PyObject *c = PyCapsule_New((void *) ptr, name, capsule_cleanup);
check(c, "nanobind::detail::capsule_new(): allocation failed!");
int rv = PyCapsule_SetContext(c, (void *) cleanup);
check(rv == 0, "nanobind::detail::capsule_new(): could not set context!");
return c;
}
void raise_python_error() {
check(PyErr_Occurred(),
"nanobind::detail::raise_python_error() called without "
"an error condition!");
throw python_error();
}
void raise_next_overload_if_null(void *p) {
if (NB_UNLIKELY(!p))
throw next_overload();
}
void raise_python_or_cast_error() {
if (PyErr_Occurred())
throw python_error();
throw cast_error();
}
// ========================================================================
void cleanup_list::release() noexcept {
/* Don't decrease the reference count of the first
element, it stores the 'self' element. */
for (size_t i = 1; i < m_size; ++i)
Py_DECREF(m_data[i]);
if (m_capacity != Small)
free(m_data);
m_data = nullptr;
}
void cleanup_list::expand() noexcept {
uint32_t new_capacity = m_capacity * 2;
PyObject **new_data = (PyObject **) malloc(new_capacity * sizeof(PyObject *));
check(new_data, "nanobind::detail::cleanup_list::expand(): out of memory!");
memcpy(new_data, m_data, m_size * sizeof(PyObject *));
if (m_capacity != Small)
free(m_data);
m_data = new_data;
m_capacity = new_capacity;
}
// ========================================================================
PyObject *module_import(const char *name) {
PyObject *res = PyImport_ImportModule(name);
if (!res)
throw python_error();
return res;
}
PyObject *module_import(PyObject *o) {
PyObject *res = PyImport_Import(o);
if (!res)
throw python_error();
return res;
}
PyObject *module_new_submodule(PyObject *base, const char *name,
const char *doc) noexcept {
const char *base_name, *tmp_str;
Py_ssize_t tmp_size = 0;
object tmp, res;
base_name = PyModule_GetName(base);
if (!base_name)
goto fail;
tmp = steal(PyUnicode_FromFormat("%s.%s", base_name, name));
if (!tmp.is_valid())
goto fail;
tmp_str = PyUnicode_AsUTF8AndSize(tmp.ptr(), &tmp_size);
if (!tmp_str)
goto fail;
#if PY_VERSION_HEX < 0x030D00A0 || defined(Py_LIMITED_API)
res = borrow(PyImport_AddModule(tmp_str));
#else
res = steal(PyImport_AddModuleRef(tmp_str));
#endif
if (!res.is_valid())
goto fail;
if (doc) {
tmp = steal(PyUnicode_FromString(doc));
if (!tmp.is_valid())
goto fail;
if (PyObject_SetAttrString(res.ptr(), "__doc__", tmp.ptr()))
goto fail;
}
res.inc_ref(); // For PyModule_AddObject, which steals upon success
if (PyModule_AddObject(base, name, res.ptr())) {
res.dec_ref();
goto fail;
}
return res.release().ptr();
fail:
raise_python_error();
}
// ========================================================================
size_t obj_len(PyObject *o) {
Py_ssize_t res = PyObject_Size(o);
if (res < 0)
raise_python_error();
return (size_t) res;
}
size_t obj_len_hint(PyObject *o) noexcept {
#if !defined(Py_LIMITED_API)
Py_ssize_t res = PyObject_LengthHint(o, 0);
if (res < 0) {
PyErr_Clear();
res = 0;
}
return (size_t) res;
#else
PyTypeObject *tp = Py_TYPE(o);
lenfunc l = (lenfunc) type_get_slot(tp, Py_sq_length);
if (!l)
l = (lenfunc) type_get_slot(tp, Py_mp_length);
if (l) {
Py_ssize_t res = l(o);
if (res < 0) {
PyErr_Clear();
res = 0;
}
return (size_t) res;
}
try {
return cast<size_t>(handle(o).attr("__length_hint__")());
} catch (...) {
return 0;
}
#endif
}
PyObject *obj_repr(PyObject *o) {
PyObject *res = PyObject_Repr(o);
if (!res)
raise_python_error();
return res;
}
bool obj_comp(PyObject *a, PyObject *b, int value) {
int rv = PyObject_RichCompareBool(a, b, value);
if (rv == -1)
raise_python_error();
return rv == 1;
}
PyObject *obj_op_1(PyObject *a, PyObject* (*op)(PyObject*)) {
PyObject *res = op(a);
if (!res)
raise_python_error();
return res;
}
PyObject *obj_op_2(PyObject *a, PyObject *b,
PyObject *(*op)(PyObject *, PyObject *) ) {
PyObject *res = op(a, b);
if (!res)
raise_python_error();
return res;
}
PyObject *obj_vectorcall(PyObject *base, PyObject *const *args, size_t nargsf,
PyObject *kwnames, bool method_call) {
PyObject *res = nullptr;
bool gil_error = false, cast_error = false;
size_t nargs_total = (size_t) (PyVectorcall_NARGS(nargsf) +
(kwnames ? NB_TUPLE_GET_SIZE(kwnames) : 0));
#if !defined(Py_LIMITED_API)
if (!PyGILState_Check()) {
gil_error = true;
goto end;
}
#endif
for (size_t i = 0; i < nargs_total; ++i) {
if (!args[i]) {
cast_error = true;
goto end;
}
}
res = (method_call ? PyObject_VectorcallMethod
: PyObject_Vectorcall)(base, args, nargsf, kwnames);
end:
for (size_t i = 0; i < nargs_total; ++i)
Py_XDECREF(args[i]);
Py_XDECREF(kwnames);
Py_DECREF(base);
if (!res) {
if (cast_error)
raise_python_or_cast_error();
else if (gil_error)
raise("nanobind::detail::obj_vectorcall(): PyGILState_Check() failure.");
else
raise_python_error();
}
return res;
}
PyObject *obj_iter(PyObject *o) {
PyObject *result = PyObject_GetIter(o);
if (!result)
raise_python_error();
return result;
}
PyObject *obj_iter_next(PyObject *o) {
PyObject *result = PyIter_Next(o);
if (!result && PyErr_Occurred())
raise_python_error();
return result;
}
// ========================================================================
PyObject *getattr(PyObject *obj, const char *key) {
PyObject *res = PyObject_GetAttrString(obj, key);
if (!res)
raise_python_error();
return res;
}
PyObject *getattr(PyObject *obj, PyObject *key) {
PyObject *res = PyObject_GetAttr(obj, key);
if (!res)
raise_python_error();
return res;
}
PyObject *getattr(PyObject *obj, const char *key_, PyObject *def) noexcept {
#if (defined(Py_LIMITED_API) && PY_LIMITED_API < 0x030d0000) || defined(PYPY_VERSION)
str key(key_);
if (PyObject_HasAttr(obj, key.ptr())) {
PyObject *res = PyObject_GetAttr(obj, key.ptr());
if (res)
return res;
PyErr_Clear();
}
#else
PyObject *res;
int rv;
#if PY_VERSION_HEX < 0x030d0000
rv = _PyObject_LookupAttr(obj, str(key_).ptr(), &res);
#else
rv = PyObject_GetOptionalAttrString(obj, key_, &res);
#endif
if (rv == 1)
return res;
else if (rv < 0)
PyErr_Clear();
#endif
Py_XINCREF(def);
return def;
}
PyObject *getattr(PyObject *obj, PyObject *key, PyObject *def) noexcept {
#if (defined(Py_LIMITED_API) && PY_LIMITED_API < 0x030d0000) || defined(PYPY_VERSION)
if (PyObject_HasAttr(obj, key)) {
PyObject *res = PyObject_GetAttr(obj, key);
if (res)
return res;
PyErr_Clear();
}
#else
PyObject *res;
int rv;
#if PY_VERSION_HEX < 0x030d0000
rv = _PyObject_LookupAttr(obj, key, &res);
#else
rv = PyObject_GetOptionalAttr(obj, key, &res);
#endif
if (rv == 1)
return res;
else if (rv < 0)
PyErr_Clear();
#endif
Py_XINCREF(def);
return def;
}
void getattr_or_raise(PyObject *obj, const char *key, PyObject **out) {
if (*out)
return;
PyObject *res = PyObject_GetAttrString(obj, key);
if (!res)
raise_python_error();
*out = res;
}
void getattr_or_raise(PyObject *obj, PyObject *key, PyObject **out) {
if (*out)
return;
PyObject *res = PyObject_GetAttr(obj, key);
if (!res)
raise_python_error();
*out = res;
}
void setattr(PyObject *obj, const char *key, PyObject *value) {
int rv = PyObject_SetAttrString(obj, key, value);
if (rv)
raise_python_error();
}
void setattr(PyObject *obj, PyObject *key, PyObject *value) {
int rv = PyObject_SetAttr(obj, key, value);
if (rv)
raise_python_error();
}
void delattr(PyObject *obj, const char *key) {
#if defined(Py_LIMITED_API) && PY_LIMITED_API < 0x030D0000
int rv = PyObject_SetAttrString(obj, key, nullptr);
#else
int rv = PyObject_DelAttrString(obj, key);
#endif
if (rv)
raise_python_error();
}
void delattr(PyObject *obj, PyObject *key) {
#if defined(Py_LIMITED_API) && PY_LIMITED_API < 0x030D0000
int rv = PyObject_SetAttr(obj, key, nullptr);
#else
int rv = PyObject_DelAttr(obj, key);
#endif
if (rv)
raise_python_error();
}
// ========================================================================
void getitem_or_raise(PyObject *obj, Py_ssize_t key, PyObject **out) {
if (*out)
return;
PyObject *res = PySequence_GetItem(obj, key);
if (!res)
raise_python_error();
*out = res;
}
void getitem_or_raise(PyObject *obj, const char *key_, PyObject **out) {
if (*out)
return;
PyObject *key, *res;
key = PyUnicode_FromString(key_);
if (!key)
raise_python_error();
res = PyObject_GetItem(obj, key);
Py_DECREF(key);
if (!res)
raise_python_error();
*out = res;
}
void getitem_or_raise(PyObject *obj, PyObject *key, PyObject **out) {
if (*out)
return;
PyObject *res = PyObject_GetItem(obj, key);
if (!res)
raise_python_error();
*out = res;
}
void setitem(PyObject *obj, Py_ssize_t key, PyObject *value) {
int rv = PySequence_SetItem(obj, key, value);
if (rv)
raise_python_error();
}
void setitem(PyObject *obj, const char *key_, PyObject *value) {
PyObject *key = PyUnicode_FromString(key_);
if (!key)
raise_python_error();
int rv = PyObject_SetItem(obj, key, value);
Py_DECREF(key);
if (rv)
raise_python_error();
}
void setitem(PyObject *obj, PyObject *key, PyObject *value) {
int rv = PyObject_SetItem(obj, key, value);
if (rv)
raise_python_error();
}
void delitem(PyObject *obj, Py_ssize_t key_) {
PyObject *key = PyLong_FromSsize_t(key_);
if (!key)
raise_python_error();
int rv = PyObject_DelItem(obj, key);
Py_DECREF(key);
if (rv)
raise_python_error();
}
void delitem(PyObject *obj, const char *key_) {
PyObject *key = PyUnicode_FromString(key_);
if (!key)
raise_python_error();
int rv = PyObject_DelItem(obj, key);
Py_DECREF(key);
if (rv)
raise_python_error();
}
void delitem(PyObject *obj, PyObject *key) {
int rv = PyObject_DelItem(obj, key);
if (rv)
raise_python_error();
}
// ========================================================================
PyObject *str_from_obj(PyObject *o) {
PyObject *result = PyObject_Str(o);
if (!result)
raise_python_error();
return result;
}
PyObject *str_from_cstr(const char *str) {
PyObject *result = PyUnicode_FromString(str);
if (!result)
raise("nanobind::detail::str_from_cstr(): conversion error!");
return result;
}
PyObject *str_from_cstr_and_size(const char *str, size_t size) {
PyObject *result = PyUnicode_FromStringAndSize(str, (Py_ssize_t) size);
if (!result)
raise("nanobind::detail::str_from_cstr_and_size(): conversion error!");
return result;
}
// ========================================================================
PyObject *bytes_from_obj(PyObject *o) {
PyObject *result = PyBytes_FromObject(o);
if (!result)
raise_python_error();
return result;
}
PyObject *bytes_from_cstr(const char *str) {
PyObject *result = PyBytes_FromString(str);
if (!result)
raise_python_error();
return result;
}
PyObject *bytes_from_cstr_and_size(const void *str, size_t size) {
PyObject *result = PyBytes_FromStringAndSize((const char *) str, (Py_ssize_t) size);
if (!result)
raise_python_error();
return result;
}
// ========================================================================
PyObject *bytearray_from_obj(PyObject *o) {
PyObject *result = PyByteArray_FromObject(o);
if (!result)
raise_python_error();
return result;
}
PyObject *bytearray_from_cstr_and_size(const void *str, size_t size) {
PyObject *result = PyByteArray_FromStringAndSize((const char *) str, (Py_ssize_t) size);
if (!result)
raise_python_error();
return result;
}
// ========================================================================
PyObject *bool_from_obj(PyObject *o) {
int rv = PyObject_IsTrue(o);
if (rv == -1)
raise_python_error();
return rv == 1 ? Py_True : Py_False;
}
PyObject *int_from_obj(PyObject *o) {
PyObject *result = PyNumber_Long(o);
if (!result)
raise_python_error();
return result;
}
PyObject *float_from_obj(PyObject *o) {
PyObject *result = PyNumber_Float(o);
if (!result)
raise_python_error();
return result;
}
// ========================================================================
PyObject *tuple_from_obj(PyObject *o) {
PyObject *result = PySequence_Tuple(o);
if (!result)
raise_python_error();
return result;
}
PyObject *list_from_obj(PyObject *o) {
PyObject *result = PySequence_List(o);
if (!result)
raise_python_error();
return result;
}
PyObject *set_from_obj(PyObject *o) {
PyObject *result = PySet_New(o);
if (!result)
raise_python_error();
return result;
}
PyObject *frozenset_from_obj(PyObject *o) {
PyObject *result = PyFrozenSet_New(o);
if (!result)
raise_python_error();
return result;
}
PyObject *memoryview_from_obj(PyObject *o) {
PyObject *result = PyMemoryView_FromObject(o);
if (!result)
raise_python_error();
return result;
}
// ========================================================================
PyObject **seq_get(PyObject *seq, size_t *size_out, PyObject **temp_out) noexcept {
PyObject *temp = nullptr;
size_t size = 0;
PyObject **result = nullptr;
/* This function is used during overload resolution; if anything
goes wrong, it fails gracefully without reporting errors. Other
overloads will then be tried. */
if (PyUnicode_CheckExact(seq) || PyBytes_CheckExact(seq)) {
*size_out = 0;
*temp_out = nullptr;
return nullptr;
}
#if !defined(Py_LIMITED_API) && !defined(PYPY_VERSION)
if (PyTuple_CheckExact(seq)) {
size = (size_t) PyTuple_GET_SIZE(seq);
result = ((PyTupleObject *) seq)->ob_item;
/* Special case for zero-sized lists/tuples. CPython
sets ob_item to NULL, which this function incidentally uses to
signal an error. Return a nonzero pointer that will, however,
still trigger a segfault if dereferenced. */
if (size == 0)
result = (PyObject **) 1;
# if !defined(NB_FREE_THREADED) // Require immutable holder in free-threaded mode
} else if (PyList_CheckExact(seq)) {
size = (size_t) PyList_GET_SIZE(seq);
result = ((PyListObject *) seq)->ob_item;
if (size == 0) // ditto
result = (PyObject **) 1;
# endif
} else if (PySequence_Check(seq)) {
temp = PySequence_Tuple(seq);
if (temp)
result = seq_get(temp, &size, temp_out);
else
PyErr_Clear();
}
#else
/* There isn't a nice way to get a PyObject** in Py_LIMITED_API. This
is going to be slow, but hopefully also very future-proof.. */
if (PySequence_Check(seq)) {
Py_ssize_t size_seq = PySequence_Length(seq);
if (size_seq >= 0) {
result = (PyObject **) PyMem_Malloc(sizeof(PyObject *) *
(size_seq + 1));
if (result) {
result[size_seq] = nullptr;
for (Py_ssize_t i = 0; i < size_seq; ++i) {
PyObject *o = PySequence_GetItem(seq, i);
if (o) {
result[i] = o;
} else {
for (Py_ssize_t j = 0; j < i; ++j)
Py_DECREF(result[j]);
PyMem_Free(result);
result = nullptr;
break;
}
}
}
if (result) {
temp = PyCapsule_New(result, nullptr, [](PyObject *o) {
PyObject **ptr = (PyObject **) PyCapsule_GetPointer(o, nullptr);
for (size_t i = 0; ptr[i] != nullptr; ++i)
Py_DECREF(ptr[i]);
PyMem_Free(ptr);
});
if (temp) {
size = (size_t) size_seq;
} else {
PyErr_Clear();
for (Py_ssize_t i = 0; i < size_seq; ++i)
Py_DECREF(result[i]);
PyMem_Free(result);
result = nullptr;
}
}
} else if (size_seq < 0) {
PyErr_Clear();
}
}
#endif
*temp_out = temp;
*size_out = size;
return result;
}
PyObject **seq_get_with_size(PyObject *seq, size_t size,
PyObject **temp_out) noexcept {
/* This function is used during overload resolution; if anything
goes wrong, it fails gracefully without reporting errors. Other
overloads will then be tried. */
PyObject *temp = nullptr,
**result = nullptr;
#if !defined(Py_LIMITED_API) && !defined(PYPY_VERSION)
if (PyTuple_CheckExact(seq)) {
if (size == (size_t) PyTuple_GET_SIZE(seq)) {
result = ((PyTupleObject *) seq)->ob_item;
/* Special case for zero-sized lists/tuples. CPython
sets ob_item to NULL, which this function incidentally uses to
signal an error. Return a nonzero pointer that will, however,
still trigger a segfault if dereferenced. */
if (size == 0)
result = (PyObject **) 1;
}
# if !defined(NB_FREE_THREADED) // Require immutable holder in free-threaded mode
} else if (PyList_CheckExact(seq)) {
if (size == (size_t) PyList_GET_SIZE(seq)) {
result = ((PyListObject *) seq)->ob_item;
if (size == 0) // ditto
result = (PyObject **) 1;
}
# endif
} else if (PySequence_Check(seq)) {
Py_ssize_t size_seq = PySequence_Size(seq);
if (size_seq != (Py_ssize_t) size) {
if (size_seq == -1)
PyErr_Clear();
} else {
temp = PySequence_Tuple(seq);
if (temp)
result = seq_get_with_size(temp, size, temp_out);
else
PyErr_Clear();
}
}
#else
/* There isn't a nice way to get a PyObject** in Py_LIMITED_API. This
is going to be slow, but hopefully also very future-proof.. */
if (PySequence_Check(seq)) {
Py_ssize_t size_seq = PySequence_Length(seq);
if (size == (size_t) size_seq) {
result =
(PyObject **) PyMem_Malloc(sizeof(PyObject *) * (size + 1));
if (result) {
result[size] = nullptr;
for (Py_ssize_t i = 0; i < size_seq; ++i) {
PyObject *o = PySequence_GetItem(seq, i);
if (o) {
result[i] = o;
} else {
for (Py_ssize_t j = 0; j < i; ++j)
Py_DECREF(result[j]);
PyMem_Free(result);
result = nullptr;
break;
}
}
}
if (result) {
temp = PyCapsule_New(result, nullptr, [](PyObject *o) {
PyObject **ptr = (PyObject **) PyCapsule_GetPointer(o, nullptr);
for (size_t i = 0; ptr[i] != nullptr; ++i)
Py_DECREF(ptr[i]);
PyMem_Free(ptr);
});
if (!temp) {
PyErr_Clear();
for (Py_ssize_t i = 0; i < size_seq; ++i)
Py_DECREF(result[i]);
PyMem_Free(result);
result = nullptr;
}
}
} else if (size_seq < 0) {
PyErr_Clear();
}
}
#endif
*temp_out = temp;
return result;
}
// ========================================================================
static void property_install_impl(PyTypeObject *tp, PyObject *scope,
const char *name, PyObject *getter,
PyObject *setter) {
PyObject *m = getter ? getter : setter;
object doc = none();
if (m && (Py_TYPE(m) == internals->nb_func ||
Py_TYPE(m) == internals->nb_method)) {
func_data *f = nb_func_data(m);
if (f->flags & (uint32_t) func_flags::has_doc)
doc = str(f->doc);
}
handle(scope).attr(name) = handle(tp)(
getter ? handle(getter) : handle(Py_None),
setter ? handle(setter) : handle(Py_None),
handle(Py_None), // deleter
doc
);
}
void property_install(PyObject *scope, const char *name, PyObject *getter,
PyObject *setter) noexcept {
property_install_impl(&PyProperty_Type, scope, name, getter, setter);
}
void property_install_static(PyObject *scope, const char *name,
PyObject *getter, PyObject *setter) noexcept {
property_install_impl(nb_static_property_tp(), scope, name, getter,
setter);
}
// ========================================================================
void tuple_check(PyObject *tuple, size_t nargs) {
for (size_t i = 0; i < nargs; ++i) {
if (!NB_TUPLE_GET_ITEM(tuple, i))
raise_python_or_cast_error();
}
}
// ========================================================================
void print(PyObject *value, PyObject *end, PyObject *file) {
if (!file)
file = PySys_GetObject("stdout");
int rv = PyFile_WriteObject(value, file, Py_PRINT_RAW);
if (rv)
raise_python_error();
if (end)
rv = PyFile_WriteObject(end, file, Py_PRINT_RAW);
else
rv = PyFile_WriteString("\n", file);
if (rv)
raise_python_error();
}
// ========================================================================
NB_CORE bool load_cmplx(PyObject *ob, uint8_t flags,
std::complex<double> *out) noexcept {
bool is_complex = PyComplex_CheckExact(ob),
convert = (flags & (uint8_t) cast_flags::convert);
#if !defined(Py_LIMITED_API)
if (is_complex || convert) {
Py_complex result = PyComplex_AsCComplex(ob);
if (result.real != -1.0 || !PyErr_Occurred()) {
*out = std::complex<double>(result.real, result.imag);
return true;
} else {
PyErr_Clear();
}
}
#else
#if Py_LIMITED_API < 0x030D0000
// Before version 3.13, __complex__() was not called by the Stable ABI
// functions PyComplex_{Real,Imag}AsDouble(), so we do so ourselves.
if (!is_complex && convert
&& !PyType_IsSubtype(Py_TYPE(ob), &PyComplex_Type)
&& PyObject_HasAttrString(ob, "__complex__")) {
PyObject* tmp = PyObject_CallFunctionObjArgs(
(PyObject*) &PyComplex_Type, ob, NULL);
if (tmp) {
double re = PyComplex_RealAsDouble(tmp);
double im = PyComplex_ImagAsDouble(tmp);
Py_DECREF(tmp);
if ((re != -1.0 && im != -1.0) || !PyErr_Occurred()) {
*out = std::complex<double>(re, im);
return true;
}
}
PyErr_Clear();
return false;
}
#endif
if (is_complex || convert) {
double re = PyComplex_RealAsDouble(ob);
double im = PyComplex_ImagAsDouble(ob);
if ((re != -1.0 && im != -1.0) || !PyErr_Occurred()) {
*out = std::complex<double>(re, im);
return true;
} else {
PyErr_Clear();
}
}
#endif
return false;
}
bool load_f64(PyObject *o, uint8_t flags, double *out) noexcept {
bool is_float = PyFloat_CheckExact(o);
#if !defined(Py_LIMITED_API)
if (NB_LIKELY(is_float)) {
*out = PyFloat_AS_DOUBLE(o);
return true;
}
is_float = false;
#endif
if (is_float || (flags & (uint8_t) cast_flags::convert)) {
double result = PyFloat_AsDouble(o);
if (result != -1.0 || !PyErr_Occurred()) {
*out = result;
return true;
} else {
PyErr_Clear();
}
}
return false;
}
bool load_f32(PyObject *o, uint8_t flags, float *out) noexcept {
bool is_float = PyFloat_CheckExact(o);
bool convert = flags & (uint8_t) cast_flags::convert;
#if !defined(Py_LIMITED_API)
if (NB_LIKELY(is_float)) {
double d = PyFloat_AS_DOUBLE(o);
float result = (float) d;
if (convert || (double) result == d || d != d) {
*out = result;
return true;
} else {
return false;
}
}
is_float = false;
#endif
if (is_float || convert) {
double d = PyFloat_AsDouble(o);
if (d != -1.0 || !PyErr_Occurred()) {
float result = (float) d;
if (convert || (double) result == d || d != d) {
*out = result;
return true;
}
} else {
PyErr_Clear();
}
}
return false;
}
#if !defined(Py_LIMITED_API) && !defined(PYPY_VERSION) && PY_VERSION_HEX < 0x030c0000
// Direct access for compact integers. These functions are
// available as part of Python starting with version 3.12b1+
NB_INLINE bool PyUnstable_Long_IsCompact(const PyLongObject *o) {
return abs(Py_SIZE(o)) <= 1;
}
NB_INLINE Py_ssize_t PyUnstable_Long_CompactValue(const PyLongObject *o) {
return Py_SIZE(o) * (Py_ssize_t) o->ob_digit[0];
}
#endif
template <typename T, bool Recurse = true>
NB_INLINE bool load_int(PyObject *o, uint32_t flags, T *out) noexcept {
if (NB_LIKELY(PyLong_CheckExact(o))) {
#if !defined(Py_LIMITED_API) && !defined(PYPY_VERSION)
PyLongObject *l = (PyLongObject *) o;
// Fast path for compact integers
if (NB_LIKELY(PyUnstable_Long_IsCompact(l))) {
Py_ssize_t value = PyUnstable_Long_CompactValue(l);
T value_t = (T) value;
if (NB_UNLIKELY((std::is_unsigned_v<T> && value < 0) ||
(sizeof(T) != sizeof(Py_ssize_t) &&
value != (Py_ssize_t) value_t)))
return false;
*out = value_t;
return true;
}
#endif
// Slow path
using T0 = std::conditional_t<sizeof(T) <= sizeof(long), long, long long>;
using Tp = std::conditional_t<std::is_signed_v<T>, T0, std::make_unsigned_t<T0>>;
Tp value_p;
if constexpr (std::is_unsigned_v<Tp>)
value_p = sizeof(T) <= sizeof(long) ? (Tp) PyLong_AsUnsignedLong(o)
: (Tp) PyLong_AsUnsignedLongLong(o);
else
value_p = sizeof(T) <= sizeof(long) ? (Tp) PyLong_AsLong(o)
: (Tp) PyLong_AsLongLong(o);
if (value_p == Tp(-1) && PyErr_Occurred()) {
PyErr_Clear();
return false;
}
T value = (T) value_p;
if constexpr (sizeof(Tp) != sizeof(T)) {
if (value_p != (Tp) value)
return false;
}
*out = value;
return true;
}
if constexpr (Recurse) {
if ((flags & (uint8_t) cast_flags::convert) && !PyFloat_Check(o)) {
PyObject* temp = PyNumber_Long(o);
if (temp) {
bool result = load_int<T, false>(temp, 0, out);
Py_DECREF(temp);
return result;
} else {
PyErr_Clear();
}
}
}
return false;
}
bool load_u8(PyObject *o, uint8_t flags, uint8_t *out) noexcept {
return load_int(o, flags, out);
}
bool load_i8(PyObject *o, uint8_t flags, int8_t *out) noexcept {
return load_int(o, flags, out);
}
bool load_u16(PyObject *o, uint8_t flags, uint16_t *out) noexcept {
return load_int(o, flags, out);
}
bool load_i16(PyObject *o, uint8_t flags, int16_t *out) noexcept {
return load_int(o, flags, out);
}
bool load_u32(PyObject *o, uint8_t flags, uint32_t *out) noexcept {
return load_int(o, flags, out);
}
bool load_i32(PyObject *o, uint8_t flags, int32_t *out) noexcept {
return load_int(o, flags, out);
}
bool load_u64(PyObject *o, uint8_t flags, uint64_t *out) noexcept {
return load_int(o, flags, out);
}
bool load_i64(PyObject *o, uint8_t flags, int64_t *out) noexcept {
return load_int(o, flags, out);
}
// ========================================================================
void incref_checked(PyObject *o) noexcept {
if (!o)
return;
#if !defined(Py_LIMITED_API)
if (!PyGILState_Check())
fail("nanobind::detail::incref_check(): attempted to change the "
"reference count of a Python object while the GIL was not held.");
#endif
Py_INCREF(o);
}
void decref_checked(PyObject *o) noexcept {
if (!o)
return;
#if !defined(Py_LIMITED_API)
if (!PyGILState_Check())
fail("nanobind::detail::decref_check(): attempted to change the "
"reference count of a Python object while the GIL was not held.");
#endif
Py_DECREF(o);
}
// ========================================================================
bool leak_warnings() noexcept {
return internals->print_leak_warnings;
}
bool implicit_cast_warnings() noexcept {
return internals->print_implicit_cast_warnings;
}
void set_leak_warnings(bool value) noexcept {
internals->print_leak_warnings = value;
}
void set_implicit_cast_warnings(bool value) noexcept {
internals->print_implicit_cast_warnings = value;
}
// ========================================================================
void slice_compute(PyObject *slice, Py_ssize_t size, Py_ssize_t &start,
Py_ssize_t &stop, Py_ssize_t &step,
size_t &slice_length) {
if (PySlice_Unpack(slice, &start, &stop, &step) < 0)
detail::raise_python_error();
Py_ssize_t slice_length_ =
PySlice_AdjustIndices((Py_ssize_t) size, &start, &stop, step);
slice_length = (size_t) slice_length_;
}
bool iterable_check(PyObject *o) noexcept {
#if !defined(Py_LIMITED_API)
return Py_TYPE(o)->tp_iter != nullptr || PySequence_Check(o);
#else
PyObject *it = PyObject_GetIter(o);
if (it) {
Py_DECREF(it);
return true;
} else {
PyErr_Clear();
return false;
}
#endif
}
// ========================================================================
NB_CORE PyObject *repr_list(PyObject *o) {
object s = steal(nb_inst_name(o));
s += str("([");
size_t len = obj_len(o);
for (size_t i = 0; i < len; ++i) {
s += repr(handle(o)[i]);
if (i + 1 < len)
s += str(", ");
}
s += str("])");
return s.release().ptr();
}
NB_CORE PyObject *repr_map(PyObject *o) {
object s = steal(nb_inst_name(o));
s += str("({");
bool first = true;
for (handle kv : handle(o).attr("items")()) {
if (!first)
s += str(", ");
s += repr(kv[0]) + str(": ") + repr(kv[1]);
first = false;
}
s += str("})");
return s.release().ptr();
}
// ========================================================================
bool issubclass(PyObject *a, PyObject *b) {
int rv = PyObject_IsSubclass(a, b);
if (rv == -1)
raise_python_error();
return bool(rv);
}
// ========================================================================
PyObject *dict_get_item_ref_or_fail(PyObject *d, PyObject *k) {
PyObject *value;
bool error = false;
#if PY_VERSION_HEX < 0x030D00A1 || defined(Py_LIMITED_API)
value = PyDict_GetItemWithError(d, k);
if (value)
Py_INCREF(value);
else
error = PyErr_Occurred();
#else
error = PyDict_GetItemRef(d, k, &value) == -1;
#endif
check(!error, "nanobind::detail::dict_get_item_ref_or_fail(): dictionary lookup failed!");
return value;
}
NAMESPACE_END(detail)
NAMESPACE_END(NB_NAMESPACE)
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
src/error.cpp | C++ | /*
src/error.cpp: libnanobind functionality for exceptions
Copyright (c) 2022 Wenzel Jakob
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#include <nanobind/nanobind.h>
#include <cstdarg>
#include "buffer.h"
#include "nb_internals.h"
NAMESPACE_BEGIN(NB_NAMESPACE)
NAMESPACE_BEGIN(detail)
// Protected by internals->mutex in free-threaded builds
Buffer buf(128);
NAMESPACE_END(detail)
#if PY_VERSION_HEX >= 0x030C0000
python_error::python_error() {
m_value = PyErr_GetRaisedException();
check(m_value,
"nanobind::python_error::python_error(): error indicator unset!");
}
python_error::~python_error() {
if (m_value) {
gil_scoped_acquire acq;
/* With GIL held */ {
// Clear error status in case the following executes Python code
error_scope scope;
Py_DECREF(m_value);
}
}
free(m_what);
}
python_error::python_error(const python_error &e)
: std::exception(e), m_value(e.m_value) {
if (m_value) {
gil_scoped_acquire acq;
Py_INCREF(m_value);
}
if (e.m_what)
m_what = detail::strdup_check(e.m_what);
}
python_error::python_error(python_error &&e) noexcept
: std::exception(e), m_value(e.m_value), m_what(e.m_what) {
e.m_value = nullptr;
e.m_what = nullptr;
}
void python_error::restore() noexcept {
check(m_value,
"nanobind::python_error::restore(): error was already restored!");
PyErr_SetRaisedException(m_value);
m_value = nullptr;
}
#else /* Exception handling for Python 3.11 and older versions */
python_error::python_error() {
PyErr_Fetch(&m_type, &m_value, &m_traceback);
check(m_type,
"nanobind::python_error::python_error(): error indicator unset!");
}
python_error::~python_error() {
if (m_type) {
gil_scoped_acquire acq;
/* With GIL held */ {
// Clear error status in case the following executes Python code
error_scope scope;
Py_XDECREF(m_type);
Py_XDECREF(m_value);
Py_XDECREF(m_traceback);
}
}
free(m_what);
}
python_error::python_error(const python_error &e)
: std::exception(e), m_type(e.m_type), m_value(e.m_value),
m_traceback(e.m_traceback) {
if (m_type) {
gil_scoped_acquire acq;
Py_INCREF(m_type);
Py_XINCREF(m_value);
Py_XINCREF(m_traceback);
}
if (e.m_what)
m_what = detail::strdup_check(e.m_what);
}
python_error::python_error(python_error &&e) noexcept
: std::exception(e), m_type(e.m_type), m_value(e.m_value),
m_traceback(e.m_traceback), m_what(e.m_what) {
e.m_type = e.m_value = e.m_traceback = nullptr;
e.m_what = nullptr;
}
void python_error::restore() noexcept {
check(m_type,
"nanobind::python_error::restore(): error was already restored!");
PyErr_Restore(m_type, m_value, m_traceback);
m_type = m_value = m_traceback = nullptr;
}
#endif
const char *python_error::what() const noexcept {
using namespace nanobind::detail;
// Return the existing error message if already computed once
if (m_what)
return m_what;
gil_scoped_acquire acq;
// 'buf' is protected by internals->mutex in free-threaded builds
lock_internals guard(internals);
// Try again with GIL held
if (m_what)
return m_what;
#if PY_VERSION_HEX < 0x030C0000
PyErr_NormalizeException(&m_type, &m_value, &m_traceback);
check(m_type,
"nanobind::python_error::what(): PyErr_NormalizeException() failed!");
if (m_traceback) {
if (PyException_SetTraceback(m_value, m_traceback) < 0)
PyErr_Clear();
}
handle exc_type = m_type, exc_value = m_value;
#else
handle exc_value = m_value, exc_type = exc_value.type();
#endif
object exc_traceback = traceback();
#if defined(Py_LIMITED_API) || defined(PYPY_VERSION)
object mod = module_::import_("traceback"),
result = mod.attr("format_exception")(exc_type, exc_value, exc_traceback);
m_what = strdup_check(borrow<str>(str("\n").attr("join")(result)).c_str());
#else
buf.clear();
if (exc_traceback.is_valid()) {
PyTracebackObject *to = (PyTracebackObject *) exc_traceback.ptr();
// Get the deepest trace possible
while (to->tb_next)
to = to->tb_next;
PyFrameObject *frame = to->tb_frame;
Py_XINCREF(frame);
std::vector<PyFrameObject *, py_allocator<PyFrameObject *>> frames;
while (frame) {
frames.push_back(frame);
frame = PyFrame_GetBack(frame);
}
buf.put("Traceback (most recent call last):\n");
for (auto it = frames.rbegin(); it != frames.rend(); ++it) {
frame = *it;
PyCodeObject *f_code = PyFrame_GetCode(frame);
buf.put(" File \"");
buf.put_dstr(borrow<str>(f_code->co_filename).c_str());
buf.put("\", line ");
buf.put_uint32(PyFrame_GetLineNumber(frame));
buf.put(", in ");
buf.put_dstr(borrow<str>(f_code->co_name).c_str());
buf.put('\n');
Py_DECREF(f_code);
Py_DECREF(frame);
}
}
if (exc_type.is_valid()) {
object name = exc_type.attr("__name__");
buf.put_dstr(borrow<str>(name).c_str());
buf.put(": ");
}
if (exc_value.is_valid())
buf.put_dstr(str(m_value).c_str());
m_what = buf.copy();
#endif
return m_what;
}
builtin_exception::builtin_exception(exception_type type, const char *what)
: std::runtime_error(what ? what : ""), m_type(type) { }
builtin_exception::~builtin_exception() { }
NAMESPACE_BEGIN(detail)
void register_exception_translator(exception_translator t, void *payload) {
nb_translator_seq *cur = &internals->translators,
*next = new nb_translator_seq(*cur);
cur->next = next;
cur->payload = payload;
cur->translator = t;
}
NB_CORE PyObject *exception_new(PyObject *scope, const char *name,
PyObject *base) {
object modname;
if (PyModule_Check(scope))
modname = getattr(scope, "__name__", handle());
else
modname = getattr(scope, "__module__", handle());
if (!modname.is_valid())
raise("nanobind::detail::exception_new(): could not determine module "
"name!");
str combined =
steal<str>(PyUnicode_FromFormat("%U.%s", modname.ptr(), name));
object result = steal(PyErr_NewException(combined.c_str(), base, nullptr));
check(result, "nanobind::detail::exception_new(): creation failed!");
if (hasattr(scope, name))
raise("nanobind::detail::exception_new(): an object of the same name "
"already exists!");
setattr(scope, name, result);
return result.release().ptr();
}
NAMESPACE_END(detail)
static void chain_error_v(handle type, const char *fmt, va_list args) noexcept {
#if PY_VERSION_HEX >= 0x030C0000
PyObject *value = PyErr_GetRaisedException();
#else
PyObject *tp = nullptr, *value = nullptr, *traceback = nullptr;
PyErr_Fetch(&tp, &value, &traceback);
if (tp) {
PyErr_NormalizeException(&tp, &value, &traceback);
if (traceback) {
PyException_SetTraceback(value, traceback);
Py_DECREF(traceback);
}
Py_DECREF(tp);
tp = traceback = nullptr;
}
#endif
#if !defined(PYPY_VERSION)
PyErr_FormatV(type.ptr(), fmt, args);
#else
PyObject *exc_str = PyUnicode_FromFormatV(fmt, args);
check(exc_str, "nanobind::detail::raise_from(): PyUnicode_FromFormatV() failed!");
PyErr_SetObject(type.ptr(), exc_str);
Py_DECREF(exc_str);
#endif
if (!value)
return;
PyObject *value_2 = nullptr;
#if PY_VERSION_HEX >= 0x030C0000
value_2 = PyErr_GetRaisedException();
#else
PyErr_Fetch(&tp, &value_2, &traceback);
PyErr_NormalizeException(&tp, &value_2, &traceback);
#endif
Py_INCREF(value);
PyException_SetCause(value_2, value); // steals
PyException_SetContext(value_2, value); // steals
#if PY_VERSION_HEX >= 0x030C0000
PyErr_SetRaisedException(value_2);
#else
PyErr_Restore(tp, value_2, traceback);
#endif
}
void chain_error(handle type, const char *fmt, ...) noexcept {
va_list args;
va_start(args, fmt);
chain_error_v(type, fmt, args);
va_end(args);
}
void raise_from(python_error &e, handle type, const char *fmt, ...) {
e.restore();
va_list args;
va_start(args, fmt);
chain_error_v(type, fmt, args);
va_end(args);
detail::raise_python_error();
}
NAMESPACE_END(NB_NAMESPACE)
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
src/hash.h | C/C++ Header | //-----------------------------------------------------------------------------
// Slightly adapted version of the MurmurHash3 codebase (originally by Austin
// Appleby, in the public domain)
//
// The changes are as follows:
//
// - fmix32 and fmix64 are exported to other compilation units, since they
// are useful as a hash function for 32/64 bit integers and pointers.
//-----------------------------------------------------------------------------
#pragma once
#include <cstdint>
#include <cstdlib>
inline uint32_t fmix32(uint32_t h) {
h ^= h >> 16;
h *= 0x85ebca6b;
h ^= h >> 13;
h *= 0xc2b2ae35;
h ^= h >> 16;
return h;
}
inline uint64_t fmix64(uint64_t k) {
k ^= k >> 33;
k *= (uint64_t) 0xff51afd7ed558ccdull;
k ^= k >> 33;
k *= (uint64_t) 0xc4ceb9fe1a85ec53ull;
k ^= k >> 33;
return k;
}
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
src/implicit.cpp | C++ | /*
src/implicit.cpp: functions for registering implicit conversions
Copyright (c) 2022 Wenzel Jakob
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#include <nanobind/trampoline.h>
#include "nb_internals.h"
NAMESPACE_BEGIN(NB_NAMESPACE)
NAMESPACE_BEGIN(detail)
void implicitly_convertible(const std::type_info *src,
const std::type_info *dst) noexcept {
nb_internals *internals_ = internals;
type_data *t = nb_type_c2p(internals_, dst);
check(t, "nanobind::detail::implicitly_convertible(src=%s, dst=%s): "
"destination type unknown!", type_name(src), type_name(dst));
lock_internals guard(internals_);
size_t size = 0;
if (t->flags & (uint32_t) type_flags::has_implicit_conversions) {
while (t->implicit.cpp && t->implicit.cpp[size])
size++;
} else {
t->implicit.cpp = nullptr;
t->implicit.py = nullptr;
t->flags |= (uint32_t) type_flags::has_implicit_conversions;
}
void **data = (void **) PyMem_Malloc(sizeof(void *) * (size + 2));
if (size)
memcpy(data, t->implicit.cpp, size * sizeof(void *));
data[size] = (void *) src;
data[size + 1] = nullptr;
PyMem_Free(t->implicit.cpp);
t->implicit.cpp = (decltype(t->implicit.cpp)) data;
}
void implicitly_convertible(bool (*predicate)(PyTypeObject *, PyObject *,
cleanup_list *),
const std::type_info *dst) noexcept {
nb_internals *internals_ = internals;
type_data *t = nb_type_c2p(internals_, dst);
check(t, "nanobind::detail::implicitly_convertible(src=<predicate>, dst=%s): "
"destination type unknown!", type_name(dst));
lock_internals guard(internals_);
size_t size = 0;
if (t->flags & (uint32_t) type_flags::has_implicit_conversions) {
while (t->implicit.py && t->implicit.py[size])
size++;
} else {
t->implicit.cpp = nullptr;
t->implicit.py = nullptr;
t->flags |= (uint32_t) type_flags::has_implicit_conversions;
}
void **data = (void **) PyMem_Malloc(sizeof(void *) * (size + 2));
if (size)
memcpy(data, t->implicit.py, size * sizeof(void *));
data[size] = (void *) predicate;
data[size + 1] = nullptr;
PyMem_Free(t->implicit.py);
t->implicit.py = (decltype(t->implicit.py)) data;
}
NAMESPACE_END(detail)
NAMESPACE_END(NB_NAMESPACE)
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
src/nb_abi.h | C/C++ Header | /*
src/nb_abi.h: this file computes tags that are used to isolate extensions
from each other in the case of platform or nanobind-related ABI
incompatibilities. The file is included by ``nb_internals.cpp`` and should
not be used directly.
The implementation of this file (specifically, the NB_PLATFORM_ABI_TAG) is
designed to be compatible with @rwgk's
https://github.com/pybind/pybind11/blob/master/include/pybind11/conduit/pybind11_platform_abi_id.h
Use of this source code is governed by a BSD-style license that can be found
in the LICENSE file.
*/
/// Tracks the version of nanobind's internal data structures
#ifndef NB_INTERNALS_VERSION
# define NB_INTERNALS_VERSION 19
#endif
#if defined(__MINGW32__)
# define NB_COMPILER_TYPE "mingw"
#elif defined(__CYGWIN__)
# define NB_COMPILER_TYPE "gcc_cygwin"
#elif defined(_MSC_VER)
# define NB_COMPILER_TYPE "msvc"
#elif defined(__clang__) || defined(__GNUC__)
# define NB_COMPILER_TYPE "system" // Assumed compatible with system compiler.
#else
# error "Unknown compiler type. Please revise this code."
#endif
// Catch other conditions that imply ABI incompatibility
// - MSVC builds with different CRT versions
// - An anticipated MSVC ABI break ("vNext")
// - Builds using libc++ with unstable ABIs
// - Builds using libstdc++ with the legacy (pre-C++11) ABI, etc.
#if defined(_MSC_VER)
# if defined(_MT) && defined(_DLL) // Corresponding to CL command line options /MD or /MDd.
# if (_MSC_VER) / 100 == 19
# define NB_BUILD_ABI "_md_mscver19"
# else
# error "Unknown MSVC major version. Please revise this code."
# endif
# elif defined(_MT) // Corresponding to CL command line options /MT or /MTd.
# define NB_BUILD_ABI "_mt_mscver" NB_TOSTRING(_MSC_VER)
# else
# if (_MSC_VER) / 100 == 19
# define NB_BUILD_ABI "_none_mscver19"
# else
# error "Unknown MSVC major version. Please revise this code."
# endif
# endif
#elif defined(_LIBCPP_ABI_VERSION) // https://libcxx.llvm.org/DesignDocs/ABIVersioning.html
# define NB_BUILD_ABI "_libcpp_abi" NB_TOSTRING(_LIBCPP_ABI_VERSION)
#elif defined(_GLIBCXX_USE_CXX11_ABI)
# if defined(__NVCOMPILER) && !defined(__GXX_ABI_VERSION)
# error "Unknown platform or compiler (_GLIBCXX_USE_CXX11_ABI). Please revise this code."
# endif
# if defined(__GXX_ABI_VERSION) && __GXX_ABI_VERSION < 1002 || __GXX_ABI_VERSION >= 2000
# error "Unknown platform or compiler (__GXX_ABI_VERSION). Please revise this code."
# endif
# define NB_BUILD_ABI "_libstdcpp_gxx_abi_1xxx_use_cxx11_abi_" NB_TOSTRING(_GLIBCXX_USE_CXX11_ABI)
#else
# error "Unknown platform or compiler. Please revise this code."
#endif
// On MSVC, debug and release builds are not ABI-compatible!
#if defined(_MSC_VER) && defined(_DEBUG)
# define NB_BUILD_TYPE "_debug"
#else
# define NB_BUILD_TYPE ""
#endif
// Tag to determine if inter-library C++ function can be safely dispatched
#define NB_PLATFORM_ABI_TAG \
NB_COMPILER_TYPE NB_BUILD_ABI NB_BUILD_TYPE
// Can have limited and non-limited-API extensions in the same process.
// Nanobind data structures will differ, so these can't talk to each other
#if defined(Py_LIMITED_API)
# define NB_STABLE_ABI "_stable"
#else
# define NB_STABLE_ABI ""
#endif
// As above, but for free-threaded extensions
#if defined(NB_FREE_THREADED)
# define NB_FREE_THREADED_ABI "_ft"
#else
# define NB_FREE_THREADED_ABI ""
#endif
#if NB_VERSION_DEV > 0
#define NB_VERSION_DEV_STR "_dev" NB_TOSTRING(NB_VERSION_DEV)
#else
#define NB_VERSION_DEV_STR ""
#endif
#define NB_ABI_TAG \
"v" NB_TOSTRING(NB_INTERNALS_VERSION) \
NB_VERSION_DEV_STR "_" NB_PLATFORM_ABI_TAG NB_STABLE_ABI \
NB_FREE_THREADED_ABI
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
src/nb_combined.cpp | C++ | /* You can use this file to compile bindings using alternative build systems in
case the dependence on CMake is problematic. The following instructions
illustrate how to do this Linux. The commands will need to be updated to
target other operating systems, but that is beyond the scope of this writeup.
To investigate platform-dependent subtleties, read the CMake build system
or run it and examine the commands it generates.
Step 1: compile libnanobind
$ NB_DIR=<path to nanobind directory>
$ CXXFLAGS="-std=c++17 -fvisibility=hidden \
-DNDEBUG -DNB_COMPACT_ASSERTIONS \
`python3-config --includes` -fPIC \
-I $NB_DIR/include \
-I $NB_DIR/ext/robin_map/include"
$ g++ src/nb_combined.cpp $CXXFLAGS -O3 -fno-strict-aliasing \
-ffunction-sections -fdata-sections -c -o libnanobind.o
Step 2: compile the extension code
$ g++ my_ext.cpp $CXXFLAGS -Os -c -o my_ext.o
Step 3: link the extension code with libnanobind
$ g++ -shared -Wl,-s -Wl,--gc-sections my_ext.o libnanobind.o \
-o my_ext.cpython-310-x86_64-linux-gnu.so
A few comments about these compilation parameters:
- the '-ffunction-sections/-fdata-sections/--gc-sections' parameter ensure
that unused parts of libnanobind are removed from the compiled extension.
- the '-fno-strict-aliasing' part is needed by the libnanobind part. This
flag generally applies to code that uses significant amounts of 'raw'
CPython API code. You should ensure that libnanobind isn't merged with
other code via link time optimization (LTO), otherwise you may need to
specify '-fno-strict-aliasing' at the project level.
- The '-Wl,-s' parameter strips debug information from the generated shared
library. Similarly, '-DNDEBUG' and '-DNB_COMPACT_ASSERTIONS' are parameters
that improve performance and reduce binary size in release builds, but
which would be omitted in debug builds.
- here, the libnanobind part uses -O3 (optimization for highest peformance),
while the bindings use -Os (optimization for the smallest size). The
assumption here is that the code in 'my_ext.cpp' is glue code that isn't
performance sensitive but which can become very big in a perf.-optimized
build. I find generally this to be a good default, but you may have other
preferences. If in doubt, benchmark to see what works best.
- The suffix of the final shared library depends on the Python version and
platform. You can query the Python binary about the right extension via
"import sysconfig; print(sysconfig.get_config_var('EXT_SUFFIX'))"
- Some of the above details change when creating a limited API /
stable ABI build, which is possible starting with Python 3.12. See the
CMake build system for details.
- On macOS, linking should uses a "linker response file" to benefit from the
latest macOS linker technology ("chained fixups"). For details, see
https://developer.apple.com/videos/play/wwdc2022/110362/ and
https://github.com/python/cpython/issues/97524#issuecomment-1458855301.
The files 'cmake/darwin-ld-cpython.sym' and 'cmake/darwin-ld-pypy.sym'
provide symbol lists that are needed to do so.
- The CMake build system can also create a shared build of the 'libnanobind'
component, which is useful in complex binding projects that ship multiple
exension libraries. As you can see from the above long list, the current
build system automates quite a few tedious steps. My suggestion would be to
not roll your own unless you are 100% convinced that this is really needed.
*/
#include "nb_internals.cpp"
#include "nb_func.cpp"
#include "nb_type.cpp"
#include "nb_enum.cpp"
#include "nb_ndarray.cpp"
#include "nb_static_property.cpp"
#if defined(Py_GIL_DISABLED)
# include "nb_ft.cpp"
#endif
#include "error.cpp"
#include "common.cpp"
#include "implicit.cpp"
#include "trampoline.cpp"
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
src/nb_enum.cpp | C++ | #include "nb_internals.h"
#include "nb_ft.h"
NAMESPACE_BEGIN(NB_NAMESPACE)
NAMESPACE_BEGIN(detail)
struct int64_hash {
size_t operator()(const int64_t value) const {
return (size_t) fmix64((uint64_t) value);
}
};
// This data structure is used to map Python instances to integers as well as
// the inverse. We're reusing the type to avoid generating essentially the same
// code for two template instantiations. The key/value types are big enough to
// hold both.
using enum_map = tsl::robin_map<int64_t, int64_t, int64_hash>;
PyObject *enum_create(enum_init_data *ed) noexcept {
// Update hash table that maps from std::type_info to Python type
nb_internals *internals_ = internals;
bool success;
nb_type_map_slow::iterator it;
{
lock_internals guard(internals_);
std::tie(it, success) = internals_->type_c2p_slow.try_emplace(ed->type, nullptr);
if (!success) {
PyErr_WarnFormat(PyExc_RuntimeWarning, 1,
"nanobind: type '%s' was already registered!\n",
ed->name);
PyObject *tp = (PyObject *) it->second->type_py;
Py_INCREF(tp);
return tp;
}
}
handle scope(ed->scope);
bool is_arithmetic = ed->flags & (uint32_t) enum_flags::is_arithmetic;
bool is_flag = ed->flags & (uint32_t) enum_flags::is_flag;
str name(ed->name), qualname = name;
object modname;
if (PyModule_Check(ed->scope)) {
modname = getattr(scope, "__name__", handle());
} else {
modname = getattr(scope, "__module__", handle());
object scope_qualname = getattr(scope, "__qualname__", handle());
if (scope_qualname.is_valid())
qualname = steal<str>(
PyUnicode_FromFormat("%U.%U", scope_qualname.ptr(), name.ptr()));
}
const char *factory_name = "Enum";
if (is_arithmetic && is_flag)
factory_name = "IntFlag";
else if (is_flag)
factory_name = "Flag";
else if (is_arithmetic)
factory_name = "IntEnum";
object enum_mod = module_::import_("enum"),
factory = enum_mod.attr(factory_name),
result = factory(name, nanobind::tuple(),
arg("module") = modname,
arg("qualname") = qualname);
scope.attr(name) = result;
result.attr("__doc__") = ed->docstr ? str(ed->docstr) : none();
result.attr("__str__") = enum_mod.attr(is_flag ? factory_name : "Enum").attr("__str__");
result.attr("__repr__") = result.attr("__str__");
type_init_data *t = new type_init_data();
memset(t, 0, sizeof(type_data));
t->name = strdup_check(ed->name);
t->type = ed->type;
t->type_py = (PyTypeObject *) result.ptr();
t->flags = ed->flags;
t->enum_tbl.fwd = new enum_map();
t->enum_tbl.rev = new enum_map();
t->scope = ed->scope;
it.value() = t;
{
lock_internals guard(internals_);
internals_->type_c2p_slow[ed->type] = t;
#if !defined(NB_FREE_THREADED)
internals_->type_c2p_fast[(void *) ed->type] = t;
#endif
}
make_immortal(result.ptr());
result.attr("__nb_enum__") = capsule(t, [](void *p) noexcept {
type_init_data *t = (type_init_data *) p;
delete (enum_map *) t->enum_tbl.fwd;
delete (enum_map *) t->enum_tbl.rev;
nb_type_unregister(t);
free((char*) t->name);
delete t;
});
return result.release().ptr();
}
static type_init_data *enum_get_type_data(handle tp) {
return (type_init_data *) (borrow<capsule>(handle(tp).attr("__nb_enum__"))).data();
}
void enum_append(PyObject *tp_, const char *name_, int64_t value_,
const char *doc) noexcept {
handle tp(tp_),
val_tp(&PyLong_Type),
obj_tp((PyObject *) &PyBaseObject_Type);
type_data *t = enum_get_type_data(tp);
object val;
if (t->flags & (uint32_t) enum_flags::is_signed)
val = steal(PyLong_FromLongLong((long long) value_));
else
val = steal(PyLong_FromUnsignedLongLong((unsigned long long) value_));
dict value_map = tp.attr("_value2member_map_"),
member_map = tp.attr("_member_map_");
list member_names = tp.attr("_member_names_");
str name(name_);
if (member_map.contains(name))
fail("refusing to add duplicate key \"%s\" to enumeration \"%s\"!",
name_, type_name(tp).c_str());
# if PY_VERSION_HEX >= 0x030B0000
// In Python 3.11+, update the flag and bit masks by hand,
// since enum._proto_member.__set_name__ is not called in this code path.
if (t->flags & (uint32_t) enum_flags::is_flag) {
tp.attr("_flag_mask_") |= val;
bool is_single_bit = (value_ != 0) && (value_ & (value_ - 1)) == 0;
if (is_single_bit && hasattr(tp, "_singles_mask_"))
tp.attr("_singles_mask_") |= val;
int_ bit_length = int_(tp.attr("_flag_mask_").attr("bit_length")());
setattr(tp, "_all_bits_", (int_(2) << bit_length) - int_(1));
}
#endif
object el;
if (issubclass(tp, val_tp))
el = val_tp.attr("__new__")(tp, val);
else
el = obj_tp.attr("__new__")(tp);
el.attr("_name_") = name;
el.attr("__objclass__") = tp;
el.attr("__init__")(val);
el.attr("_sort_order_") = len(member_names);
el.attr("_value_") = val;
el.attr("__doc__") = doc ? str(doc) : none();
// Compatibility with nanobind 1.x
el.attr("__name__") = name;
setattr(tp, name, el);
if (!value_map.contains(val)) {
member_names.append(name);
value_map[val] = el;
}
member_map[name] = el;
enum_map *fwd = (enum_map *) t->enum_tbl.fwd;
fwd->emplace(value_, (int64_t) (uintptr_t) el.ptr());
enum_map *rev = (enum_map *) t->enum_tbl.rev;
rev->emplace((int64_t) (uintptr_t) el.ptr(), value_);
}
bool enum_from_python(const std::type_info *tp, PyObject *o, int64_t *out, uint8_t flags) noexcept {
type_data *t = nb_type_c2p(internals, tp);
if (!t)
return false;
if ((t->flags & (uint32_t) enum_flags::is_flag) != 0 && Py_TYPE(o) == t->type_py) {
PyObject *value_o =
PyObject_GetAttr(o, static_pyobjects[pyobj_name::value_str]);
if (value_o == nullptr) {
PyErr_Clear();
return false;
}
if ((t->flags & (uint32_t) enum_flags::is_signed)) {
long long value = PyLong_AsLongLong(value_o);
if (value == -1 && PyErr_Occurred()) {
PyErr_Clear();
return false;
}
*out = (int64_t) value;
return true;
} else {
unsigned long long value = PyLong_AsUnsignedLongLong(value_o);
if (value == (unsigned long long) -1 && PyErr_Occurred()) {
PyErr_Clear();
return false;
}
*out = (int64_t) value;
return true;
}
}
enum_map *rev = (enum_map *) t->enum_tbl.rev;
enum_map::iterator it = rev->find((int64_t) (uintptr_t) o);
if (it != rev->end()) {
*out = it->second;
return true;
}
if (flags & (uint8_t) cast_flags::convert) {
enum_map *fwd = (enum_map *) t->enum_tbl.fwd;
if (t->flags & (uint32_t) enum_flags::is_signed) {
long long value = PyLong_AsLongLong(o);
if (value == -1 && PyErr_Occurred()) {
PyErr_Clear();
return false;
}
enum_map::iterator it2 = fwd->find((int64_t) value);
if (it2 != fwd->end()) {
*out = (int64_t) value;
return true;
}
} else {
unsigned long long value = PyLong_AsUnsignedLongLong(o);
if (value == (unsigned long long) -1 && PyErr_Occurred()) {
PyErr_Clear();
return false;
}
enum_map::iterator it2 = fwd->find((int64_t) value);
if (it2 != fwd->end()) {
*out = (int64_t) value;
return true;
}
}
}
return false;
}
PyObject *enum_from_cpp(const std::type_info *tp, int64_t key) noexcept {
type_data *t = nb_type_c2p(internals, tp);
if (!t)
return nullptr;
enum_map *fwd = (enum_map *) t->enum_tbl.fwd;
enum_map::iterator it = fwd->find(key);
if (it != fwd->end()) {
PyObject *value = (PyObject *) it->second;
Py_INCREF(value);
return value;
}
uint32_t flags = t->flags;
if ((flags & (uint32_t) enum_flags::is_flag) != 0) {
handle enum_tp(t->type_py);
object val;
if (flags & (uint32_t) enum_flags::is_signed)
val = steal(PyLong_FromLongLong((long long) key));
else
val = steal(PyLong_FromUnsignedLongLong((unsigned long long) key));
return enum_tp.attr("__new__")(enum_tp, val).release().ptr();
}
if (flags & (uint32_t) enum_flags::is_signed)
PyErr_Format(PyExc_ValueError, "%lli is not a valid %s.",
(long long) key, t->name);
else
PyErr_Format(PyExc_ValueError, "%llu is not a valid %s.",
(unsigned long long) key, t->name);
return nullptr;
}
void enum_export(PyObject *tp) {
type_init_data *t = enum_get_type_data(tp);
handle scope = t->scope;
for (handle item: handle(tp))
scope.attr(item.attr("name")) = item;
}
NAMESPACE_END(detail)
NAMESPACE_END(NB_NAMESPACE)
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
src/nb_ft.cpp | C++ | #include <nanobind/nanobind.h>
#include "nb_ft.h"
#if defined(Py_GIL_DISABLED)
/// Make an object immortal when targeting free-threaded Python
void make_immortal(PyObject *op) noexcept {
// See CPython's Objects/object.c
if (PyObject_IS_GC(op))
PyObject_GC_UnTrack(op);
op->ob_tid = _Py_UNOWNED_TID;
op->ob_ref_local = _Py_IMMORTAL_REFCNT_LOCAL;
op->ob_ref_shared = 0;
}
#if PY_VERSION_HEX < 0x030E00A5
void nb_enable_try_inc_ref(PyObject *obj) noexcept {
// Since this is called during object construction, we know that we have
// the only reference to the object and can use a non-atomic write.
assert(obj->ob_ref_shared == 0);
obj->ob_ref_shared = _Py_REF_MAYBE_WEAKREF;
}
bool nb_try_inc_ref(PyObject *obj) noexcept {
// See https://github.com/python/cpython/blob/d05140f9f77d7dfc753dd1e5ac3a5962aaa03eff/Include/internal/pycore_object.h#L761
uint32_t local = _Py_atomic_load_uint32_relaxed(&obj->ob_ref_local);
local += 1;
if (local == 0) {
// immortal
return true;
}
if (_Py_IsOwnedByCurrentThread(obj)) {
_Py_atomic_store_uint32_relaxed(&obj->ob_ref_local, local);
#ifdef Py_REF_DEBUG
_Py_INCREF_IncRefTotal();
#endif
return true;
}
Py_ssize_t shared = _Py_atomic_load_ssize_relaxed(&obj->ob_ref_shared);
for (;;) {
// If the shared refcount is zero and the object is either merged
// or may not have weak references, then we cannot incref it.
if (shared == 0 || shared == _Py_REF_MERGED) {
return false;
}
if (_Py_atomic_compare_exchange_ssize(
&obj->ob_ref_shared, &shared, shared + (1 << _Py_REF_SHARED_SHIFT))) {
#ifdef Py_REF_DEBUG
_Py_INCREF_IncRefTotal();
#endif
return true;
}
}
}
#endif
#endif
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
src/nb_ft.h | C/C++ Header | /*
src/nb_ft.h: implementation details related to free-threaded Python
Copyright (c) 2022 Wenzel Jakob
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#if !defined(Py_GIL_DISABLED)
/// Trivial implementations for non-free-threaded Python
inline void make_immortal(PyObject *) noexcept { }
inline void nb_enable_try_inc_ref(PyObject *) noexcept { }
inline bool nb_try_inc_ref(PyObject *obj) noexcept {
if (Py_REFCNT(obj) > 0) {
Py_INCREF(obj);
return true;
}
return false;
}
#else
extern void make_immortal(PyObject *op) noexcept;
#if PY_VERSION_HEX >= 0x030E00A5
/// Sufficiently recent CPython versions provide an API for the following operations
inline void nb_enable_try_inc_ref(PyObject *obj) noexcept {
PyUnstable_EnableTryIncRef(obj);
}
inline bool nb_try_inc_ref(PyObject *obj) noexcept {
return PyUnstable_TryIncRef(obj);
}
#else
/// Otherwise, nanabind ships with a low-level implementation
extern void nb_enable_try_inc_ref(PyObject *) noexcept;
extern bool nb_try_inc_ref(PyObject *obj) noexcept;
#endif
#endif
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
src/nb_func.cpp | C++ | /*
src/nb_func.cpp: nanobind function type
Copyright (c) 2022 Wenzel Jakob
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#include "nb_internals.h"
#include "buffer.h"
#include "nb_ft.h"
/// Maximum number of arguments supported by 'nb_vectorcall_simple'
#define NB_MAXARGS_SIMPLE 8
#if defined(__GNUG__)
# include <cxxabi.h>
#endif
#if defined(_MSC_VER)
# pragma warning(disable: 4706) // assignment within conditional expression
# pragma warning(disable: 6255) // _alloca indicates failure by raising a stack overflow exception
#endif
NAMESPACE_BEGIN(NB_NAMESPACE)
NAMESPACE_BEGIN(detail)
// Forward/external declarations
extern Buffer buf;
static PyObject *nb_func_vectorcall_simple_0(PyObject *, PyObject *const *,
size_t, PyObject *) noexcept;
static PyObject *nb_func_vectorcall_simple_1(PyObject *, PyObject *const *,
size_t, PyObject *) noexcept;
static PyObject *nb_func_vectorcall_simple(PyObject *, PyObject *const *,
size_t, PyObject *) noexcept;
static PyObject *nb_func_vectorcall_complex(PyObject *, PyObject *const *,
size_t, PyObject *) noexcept;
static uint32_t nb_func_render_signature(const func_data *f,
bool nb_signature_mode = false) noexcept;
int nb_func_traverse(PyObject *self, visitproc visit, void *arg) {
size_t size = (size_t) Py_SIZE(self);
if (size) {
func_data *f = nb_func_data(self);
for (size_t i = 0; i < size; ++i) {
if (f->flags & (uint32_t) func_flags::has_args) {
for (size_t j = 0; j < f->nargs; ++j) {
Py_VISIT(f->args[j].value);
}
}
++f;
}
}
return 0;
}
int nb_func_clear(PyObject *self) {
size_t size = (size_t) Py_SIZE(self);
if (size) {
func_data *f = nb_func_data(self);
for (size_t i = 0; i < size; ++i) {
if (f->flags & (uint32_t) func_flags::has_args) {
for (size_t j = 0; j < f->nargs; ++j) {
Py_CLEAR(f->args[j].value);
}
}
++f;
}
}
return 0;
}
/// Free a function overload chain
void nb_func_dealloc(PyObject *self) {
PyObject_GC_UnTrack(self);
size_t size = (size_t) Py_SIZE(self);
if (size) {
func_data *f = nb_func_data(self);
// Delete from registered function list
#if !defined(NB_FREE_THREADED)
size_t n_deleted = internals->funcs.erase(self);
check(n_deleted == 1,
"nanobind::detail::nb_func_dealloc(\"%s\"): function not found!",
((f->flags & (uint32_t) func_flags::has_name) ? f->name
: "<anonymous>"));
#endif
for (size_t i = 0; i < size; ++i) {
if (f->flags & (uint32_t) func_flags::has_free)
f->free_capture(f->capture);
if (f->flags & (uint32_t) func_flags::has_args) {
for (size_t j = 0; j < f->nargs; ++j) {
const arg_data &arg = f->args[j];
Py_XDECREF(arg.value);
Py_XDECREF(arg.name_py);
free((char *) arg.signature);
}
}
if (f->flags & (uint32_t) func_flags::has_doc)
free((char *) f->doc);
free((char *) f->name);
free(f->args);
free((char *) f->descr);
free(f->descr_types);
free(f->signature);
++f;
}
}
PyObject_GC_Del(self);
internals_dec_ref();
}
int nb_bound_method_traverse(PyObject *self, visitproc visit, void *arg) {
nb_bound_method *mb = (nb_bound_method *) self;
Py_VISIT((PyObject *) mb->func);
Py_VISIT(mb->self);
return 0;
}
int nb_bound_method_clear(PyObject *self) {
nb_bound_method *mb = (nb_bound_method *) self;
Py_CLEAR(mb->func);
Py_CLEAR(mb->self);
return 0;
}
void nb_bound_method_dealloc(PyObject *self) {
nb_bound_method *mb = (nb_bound_method *) self;
PyObject_GC_UnTrack(self);
Py_DECREF((PyObject *) mb->func);
Py_DECREF(mb->self);
PyObject_GC_Del(self);
}
static arg_data method_args[2] = {
{ "self", nullptr, nullptr, nullptr, 0 },
{ nullptr, nullptr, nullptr, nullptr, 0 }
};
static bool set_builtin_exception_status(builtin_exception &e) {
PyObject *o;
switch (e.type()) {
case exception_type::runtime_error: o = PyExc_RuntimeError; break;
case exception_type::stop_iteration: o = PyExc_StopIteration; break;
case exception_type::index_error: o = PyExc_IndexError; break;
case exception_type::key_error: o = PyExc_KeyError; break;
case exception_type::value_error: o = PyExc_ValueError; break;
case exception_type::type_error: o = PyExc_TypeError; break;
case exception_type::buffer_error: o = PyExc_BufferError; break;
case exception_type::import_error: o = PyExc_ImportError; break;
case exception_type::attribute_error: o = PyExc_AttributeError; break;
case exception_type::next_overload: return false;
default:
check(false, "nanobind::detail::set_builtin_exception_status(): "
"invalid exception type!");
}
PyErr_SetString(o, e.what());
return true;
}
void *malloc_check(size_t size) {
void *ptr = malloc(size);
if (!ptr)
fail("nanobind: malloc() failed!");
return ptr;
}
char *strdup_check(const char *s) {
char *result;
#if defined(_WIN32)
result = _strdup(s);
#else
result = strdup(s);
#endif
if (!result)
fail("nanobind: strdup() failed!");
return result;
}
/**
* \brief Wrap a C++ function into a Python function object
*
* This is an implementation detail of nanobind::cpp_function.
*/
PyObject *nb_func_new(const func_data_prelim_base *f) noexcept {
bool has_scope = f->flags & (uint32_t) func_flags::has_scope,
has_name = f->flags & (uint32_t) func_flags::has_name,
has_args = f->flags & (uint32_t) func_flags::has_args,
has_var_args = f->flags & (uint32_t) func_flags::has_var_args,
has_var_kwargs = f->flags & (uint32_t) func_flags::has_var_kwargs,
can_mutate_args = f->flags & (uint32_t) func_flags::can_mutate_args,
has_doc = f->flags & (uint32_t) func_flags::has_doc,
has_signature = f->flags & (uint32_t) func_flags::has_signature,
is_implicit = f->flags & (uint32_t) func_flags::is_implicit,
is_method = f->flags & (uint32_t) func_flags::is_method,
return_ref = f->flags & (uint32_t) func_flags::return_ref,
is_constructor = false,
is_init = false,
is_new = false,
is_setstate = false;
arg_data *args_in = nullptr;
if (has_args)
args_in = std::launder((arg_data*) ((func_data_prelim<1>*) f)->args);
PyObject *name = nullptr;
PyObject *func_prev = nullptr;
char *name_cstr;
if (has_signature) {
name_cstr = extract_name("nanobind::detail::nb_func_new", "def ", f->name);
has_name = *name_cstr != '\0';
} else {
name_cstr = strdup_check(has_name ? f->name : "");
}
// Check for previous overloads
nb_internals *internals_ = internals;
if (has_scope && has_name) {
name = PyUnicode_InternFromString(name_cstr);
check(name, "nb::detail::nb_func_new(\"%s\"): invalid name.", name_cstr);
func_prev = PyObject_GetAttr(f->scope, name);
if (func_prev) {
if (Py_TYPE(func_prev) == internals_->nb_func ||
Py_TYPE(func_prev) == internals_->nb_method) {
func_data *fp = nb_func_data(func_prev);
check((fp->flags & (uint32_t) func_flags::is_method) ==
(f->flags & (uint32_t) func_flags::is_method),
"nb::detail::nb_func_new(\"%s\"): mismatched static/"
"instance method flags in function overloads!",
name_cstr);
/* Never append a method to an overload chain of a parent class;
instead, hide the parent's overloads in this case */
if (fp->scope != f->scope)
Py_CLEAR(func_prev);
} else if (name_cstr[0] == '_') {
Py_CLEAR(func_prev);
} else {
check(false,
"nb::detail::nb_func_new(\"%s\"): cannot overload "
"existing non-function object of the same name!", name_cstr);
}
} else {
PyErr_Clear();
}
is_init = strcmp(name_cstr, "__init__") == 0;
is_new = strcmp(name_cstr, "__new__") == 0;
is_setstate = strcmp(name_cstr, "__setstate__") == 0;
// Is this method a constructor that takes a class binding as first parameter?
is_constructor = is_method && (is_init || is_setstate) &&
strncmp(f->descr, "({%}", 4) == 0;
// Don't use implicit conversions in copy constructors (causes infinite recursion)
// Notes:
// f->nargs = C++ argument count.
// f->descr_types = zero-terminated array of bound types among them.
// Hence of size >= 2 for constructors, where f->descr_types[1] my be null.
// args_in = array of Python arguments (nb::arg). Non-empty if has_args.
// By contrast, fc->args below has size f->nargs.
if (is_constructor && f->nargs == 2 && f->descr_types[0] &&
f->descr_types[0] == f->descr_types[1]) {
if (has_args) {
args_in[0].flag &= ~(uint8_t) cast_flags::convert;
} else {
args_in = method_args + 1;
has_args = true;
}
}
}
// Create a new function and destroy the old one
Py_ssize_t prev_overloads = func_prev ? Py_SIZE(func_prev) : 0;
nb_func *func = (nb_func *) PyType_GenericAlloc(
is_method ? internals_->nb_method : internals_->nb_func, prev_overloads + 1);
check(func, "nb::detail::nb_func_new(\"%s\"): alloc. failed (1).",
name_cstr);
make_immortal((PyObject *) func);
internals_inc_ref();
// Check if the complex dispatch loop is needed
bool complex_call = can_mutate_args || has_var_kwargs || has_var_args ||
f->nargs > NB_MAXARGS_SIMPLE;
if (has_args) {
for (size_t i = is_method; i < f->nargs; ++i) {
arg_data &a = args_in[i - is_method];
complex_call |= a.name != nullptr || a.value != nullptr ||
a.flag != cast_flags::convert;
}
}
uint32_t max_nargs = f->nargs;
const char *prev_doc = nullptr;
if (func_prev) {
nb_func *nb_func_prev = (nb_func *) func_prev;
complex_call |= nb_func_prev->complex_call;
max_nargs = std::max(max_nargs, nb_func_prev->max_nargs);
func_data *cur = nb_func_data(func),
*prev = nb_func_data(func_prev);
if (nb_func_prev->doc_uniform)
prev_doc = prev->doc;
memcpy(cur, prev, sizeof(func_data) * prev_overloads);
memset(prev, 0, sizeof(func_data) * prev_overloads);
((PyVarObject *) func_prev)->ob_size = 0;
#if !defined(NB_FREE_THREADED)
size_t n_deleted = internals_->funcs.erase(func_prev);
check(n_deleted == 1,
"nanobind::detail::nb_func_new(): internal update failed (1)!");
#endif
Py_CLEAR(func_prev);
}
func->max_nargs = max_nargs;
func->complex_call = complex_call;
PyObject* (*vectorcall)(PyObject *, PyObject * const*, size_t, PyObject *);
if (complex_call) {
vectorcall = nb_func_vectorcall_complex;
} else {
if (f->nargs == 0 && !prev_overloads)
vectorcall = nb_func_vectorcall_simple_0;
else if (f->nargs == 1 && !prev_overloads)
vectorcall = nb_func_vectorcall_simple_1;
else
vectorcall = nb_func_vectorcall_simple;
}
func->vectorcall = vectorcall;
#if !defined(NB_FREE_THREADED)
// Register the function
auto [it, success] = internals_->funcs.try_emplace(func, nullptr);
check(success,
"nanobind::detail::nb_func_new(): internal update failed (2)!");
#endif
func_data *fc = nb_func_data(func) + prev_overloads;
memcpy(fc, f, sizeof(func_data_prelim_base));
if (has_doc) {
if (fc->doc[0] == '\n')
fc->doc++;
if (fc->doc[0] == '\0') {
fc->doc = nullptr;
fc->flags &= ~(uint32_t) func_flags::has_doc;
has_doc = false;
} else {
fc->doc = strdup_check(fc->doc);
}
}
// Detect when an entire overload chain has the same docstring
func->doc_uniform =
(has_doc && ((prev_overloads == 0) ||
(prev_doc && strcmp(fc->doc, prev_doc) == 0)));
if (is_constructor)
fc->flags |= (uint32_t) func_flags::is_constructor;
if (has_args)
fc->flags |= (uint32_t) func_flags::has_args;
fc->name = name_cstr;
fc->signature = has_signature ? strdup_check(f->name) : nullptr;
if (is_implicit) {
check(fc->flags & (uint32_t) func_flags::is_constructor,
"nb::detail::nb_func_new(\"%s\"): nanobind::is_implicit() "
"should only be specified for constructors.",
name_cstr);
check(f->nargs == 2,
"nb::detail::nb_func_new(\"%s\"): implicit constructors "
"should only have one argument.",
name_cstr);
if (f->descr_types[1])
implicitly_convertible(f->descr_types[1], f->descr_types[0]);
}
for (size_t i = 0;; ++i) {
if (!f->descr[i]) {
fc->descr = (char *) malloc_check(sizeof(char) * (i + 1));
memcpy((char *) fc->descr, f->descr, (i + 1) * sizeof(char));
break;
}
}
for (size_t i = 0;; ++i) {
if (!f->descr_types[i]) {
fc->descr_types = (const std::type_info **)
malloc_check(sizeof(const std::type_info *) * (i + 1));
memcpy(fc->descr_types, f->descr_types,
(i + 1) * sizeof(const std::type_info *));
break;
}
}
if (has_args) {
fc->args = (arg_data *) malloc_check(sizeof(arg_data) * f->nargs);
if (is_method) // add implicit 'self' argument annotation
fc->args[0] = method_args[0];
for (size_t i = is_method; i < fc->nargs; ++i)
fc->args[i] = args_in[i - is_method];
for (size_t i = 0; i < fc->nargs; ++i) {
arg_data &a = fc->args[i];
if (a.name) {
a.name_py = PyUnicode_InternFromString(a.name);
a.name = PyUnicode_AsUTF8AndSize(a.name_py, nullptr);
} else {
a.name_py = nullptr;
}
if (a.value == Py_None)
a.flag |= (uint8_t) cast_flags::accepts_none;
a.signature = a.signature ? strdup_check(a.signature) : nullptr;
Py_XINCREF(a.value);
}
}
// Fast path for vector call object construction
if (((is_init && is_method) || (is_new && !is_method)) &&
nb_type_check(f->scope)) {
type_data *td = nb_type_data((PyTypeObject *) f->scope);
bool has_new = td->flags & (uint32_t) type_flags::has_new;
if (is_init) {
if (!has_new) {
td->init = func;
} else {
// Keep track of whether we have a __init__ overload that
// accepts no arguments (except self). If not, then we
// shouldn't allow calling the type object with no arguments,
// even though (for unpickling support) we probably do have
// a __new__ overload that accepts no arguments (except cls).
// This check is necessary because our type vectorcall shortcut
// skips Python's usual logic where __init__ is always called
// if __new__ returns an instance of the type.
bool noargs_ok = true;
for (uint32_t i = 1; i < fc->nargs - (uint32_t) has_var_kwargs; ++i) {
if (has_var_args && i == fc->nargs_pos)
continue; // skip `nb::args` since it can be empty
if (has_args && fc->args[i].value != nullptr)
continue; // arg with default is OK
noargs_ok = false;
break;
}
if (noargs_ok)
td->flags |= (uint32_t) type_flags::has_nullary_new;
}
} else if (is_new) {
td->init = func;
td->flags |= (uint32_t) type_flags::has_new;
}
}
if (has_scope && name) {
int rv = PyObject_SetAttr(f->scope, name, (PyObject *) func);
check(rv == 0, "nb::detail::nb_func_new(\"%s\"): setattr. failed.",
name_cstr);
}
Py_XDECREF(name);
if (return_ref) {
return (PyObject *) func;
} else {
Py_DECREF(func);
return nullptr;
}
}
/// Used by nb_func_vectorcall: generate an error when overload resolution fails
static NB_NOINLINE PyObject *
nb_func_error_overload(PyObject *self, PyObject *const *args_in,
size_t nargs_in, PyObject *kwargs_in) noexcept {
uint32_t count = (uint32_t) Py_SIZE(self);
func_data *f = nb_func_data(self);
if (f->flags & (uint32_t) func_flags::is_operator)
return not_implemented().release().ptr();
// The buffer 'buf' is protected by 'internals.mutex'
lock_internals guard(internals);
buf.clear();
buf.put_dstr(f->name);
buf.put("(): incompatible function arguments. The following argument types "
"are supported:\n");
// Mask default __new__ overload created by nb::new_()
if (strcmp(f->name, "__new__") == 0 && count > 1 && f->nargs == 1) {
count -= 1;
f += 1;
}
for (uint32_t i = 0; i < count; ++i) {
buf.put(" ");
buf.put_uint32(i + 1);
buf.put(". ");
nb_func_render_signature(f + i);
buf.put('\n');
}
buf.put("\nInvoked with types: ");
for (size_t i = 0; i < nargs_in; ++i) {
str name = steal<str>(nb_inst_name(args_in[i]));
buf.put_dstr(name.c_str());
if (i + 1 < nargs_in)
buf.put(", ");
}
if (kwargs_in) {
if (nargs_in)
buf.put(", ");
buf.put("kwargs = { ");
size_t nkwargs_in = (size_t) NB_TUPLE_GET_SIZE(kwargs_in);
for (size_t j = 0; j < nkwargs_in; ++j) {
PyObject *key = NB_TUPLE_GET_ITEM(kwargs_in, j),
*value = args_in[nargs_in + j];
const char *key_cstr = PyUnicode_AsUTF8AndSize(key, nullptr);
buf.put_dstr(key_cstr);
buf.put(": ");
str name = steal<str>(nb_inst_name(value));
buf.put_dstr(name.c_str());
buf.put(", ");
}
buf.rewind(2);
buf.put(" }");
}
PyErr_SetString(PyExc_TypeError, buf.get());
return nullptr;
}
/// Used by nb_func_vectorcall: generate an error when result conversion fails
static NB_NOINLINE PyObject *nb_func_error_noconvert(PyObject *self,
PyObject *const *, size_t,
PyObject *) noexcept {
if (PyErr_Occurred())
return nullptr;
func_data *f = nb_func_data(self);
// The buffer 'buf' is protected by 'internals.mutex'
lock_internals guard(internals);
buf.clear();
buf.put("Unable to convert function return value to a Python "
"type! The signature was\n ");
nb_func_render_signature(f);
PyErr_SetString(PyExc_TypeError, buf.get());
return nullptr;
}
/// Used by nb_func_vectorcall: convert a C++ exception into a Python error
static NB_NOINLINE void nb_func_convert_cpp_exception() noexcept {
std::exception_ptr e = std::current_exception();
for (nb_translator_seq *cur = &internals->translators; cur;
cur = cur->next) {
try {
// Try exception translator & forward payload
cur->translator(e, cur->payload);
return;
} catch (...) {
e = std::current_exception();
}
}
PyErr_SetString(PyExc_SystemError,
"nanobind::detail::nb_func_error_except(): exception "
"could not be translated!");
}
/// Dispatch loop that is used to invoke functions created by nb_func_new
static PyObject *nb_func_vectorcall_complex(PyObject *self,
PyObject *const *args_in,
size_t nargsf,
PyObject *kwargs_in) noexcept {
const size_t count = (size_t) Py_SIZE(self),
nargs_in = (size_t) PyVectorcall_NARGS(nargsf),
nkwargs_in = kwargs_in ? (size_t) NB_TUPLE_GET_SIZE(kwargs_in) : 0;
func_data *fr = nb_func_data(self);
const bool is_method = fr->flags & (uint32_t) func_flags::is_method,
is_constructor = fr->flags & (uint32_t) func_flags::is_constructor;
PyObject *result = nullptr,
*self_arg = (is_method && nargs_in > 0) ? args_in[0] : nullptr;
/* The following lines allocate memory on the stack, which is very efficient
but also potentially dangerous since it can be used to generate stack
overflows. We refuse unrealistically large number of 'kwargs' (the
'max_nargs' value is fine since it is specified by the bindings) */
if (nkwargs_in > 1024) {
PyErr_SetString(PyExc_TypeError,
"nanobind::detail::nb_func_vectorcall(): too many (> "
"1024) keyword arguments.");
return nullptr;
}
// Handler routine that will be invoked in case of an error condition
PyObject *(*error_handler)(PyObject *, PyObject *const *, size_t,
PyObject *) noexcept = nullptr;
// Small array holding temporaries (implicit conversion/*args/**kwargs)
cleanup_list cleanup(self_arg);
// Preallocate stack memory for function dispatch
size_t max_nargs = ((nb_func *) self)->max_nargs;
PyObject **args = (PyObject **) alloca(max_nargs * sizeof(PyObject *));
uint8_t *args_flags = (uint8_t *) alloca(max_nargs * sizeof(uint8_t));
bool *kwarg_used = (bool *) alloca(nkwargs_in * sizeof(bool));
// Ensure that keyword argument names are interned. That makes it faster
// to compare them against pre-interned argument names in the overload chain.
// Normal function calls will have their keyword arguments already interned,
// but we can't rely on that; it fails for things like fn(**json.loads(...)).
PyObject **kwnames = nullptr;
#if !defined(PYPY_VERSION) && !defined(Py_LIMITED_API)
bool kwnames_interned = true;
for (size_t i = 0; i < nkwargs_in; ++i) {
PyObject *key = NB_TUPLE_GET_ITEM(kwargs_in, i);
kwnames_interned &= ((PyASCIIObject *) key)->state.interned != 0;
}
if (kwargs_in && NB_LIKELY(kwnames_interned)) {
kwnames = ((PyTupleObject *) kwargs_in)->ob_item;
goto traverse_overloads;
}
#endif
kwnames = (PyObject **) alloca(nkwargs_in * sizeof(PyObject *));
for (size_t i = 0; i < nkwargs_in; ++i) {
PyObject *key = NB_TUPLE_GET_ITEM(kwargs_in, i);
Py_INCREF(key);
kwnames[i] = key;
PyUnicode_InternInPlace(&kwnames[i]);
PyObject *key_interned = kwnames[i];
if (NB_LIKELY(key == key_interned)) // string was already interned
Py_DECREF(key);
else
cleanup.append(key_interned);
}
#if !defined(PYPY_VERSION) && !defined(Py_LIMITED_API)
traverse_overloads:
#endif
/* The logic below tries to find a suitable overload using two passes
of the overload chain (or 1, if there are no overloads). The first pass
is strict and permits no implicit conversions, while the second pass
allows them.
The following is done per overload during a pass
1. Copy individual arguments while checking that named positional
arguments weren't *also* specified as kwarg. Substitute missing
entries using keyword arguments or default argument values provided
in the bindings, if available.
2. Ensure that either all keyword arguments were "consumed", or that
the function takes a kwargs argument to accept unconsumed kwargs.
3. Any positional arguments still left get put into a tuple (for args),
and any leftover kwargs get put into a dict.
4. Pack everything into a vector; if we have nb::args or nb::kwargs,
they become a tuple or dict at the end of the positional arguments.
5. Call the function call dispatcher (func_data::impl)
If one of these fail, move on to the next overload and keep trying
until we get a result other than NB_NEXT_OVERLOAD.
*/
for (size_t pass = (count > 1) ? 0 : 1; pass < 2; ++pass) {
for (size_t k = 0; k < count; ++k) {
const func_data *f = fr + k;
const bool has_args = f->flags & (uint32_t) func_flags::has_args,
has_var_args = f->flags & (uint32_t) func_flags::has_var_args,
has_var_kwargs = f->flags & (uint32_t) func_flags::has_var_kwargs;
// Number of C++ parameters eligible to be filled from individual
// Python positional arguments
size_t nargs_pos = f->nargs_pos;
// Number of C++ parameters in total, except for a possible trailing
// nb::kwargs. All of these are eligible to be filled from individual
// Python arguments (keyword always, positional until index nargs_pos)
// except for a potential nb::args, which exists at index nargs_pos
// if has_var_args is true. We'll skip that one in the individual-args
// loop, and go back and fill it later with the unused positionals.
size_t nargs_step1 = f->nargs - has_var_kwargs;
if (nargs_in > nargs_pos && !has_var_args)
continue; // Too many positional arguments given for this overload
if (nargs_in < nargs_pos && !has_args)
continue; // Not enough positional arguments, insufficient
// keyword/default arguments to fill in the blanks
memset(kwarg_used, 0, nkwargs_in * sizeof(bool));
// 1. Copy individual arguments, potentially substitute kwargs/defaults
size_t i = 0;
for (; i < nargs_step1; ++i) {
if (has_var_args && i == nargs_pos)
continue; // skip nb::args parameter, will be handled below
PyObject *arg = nullptr;
uint8_t arg_flag = 1;
// If i >= nargs_pos, then this is a keyword-only parameter.
// (We skipped any *args parameter using the test above,
// and we set the bounds of nargs_step1 to not include any
// **kwargs parameter.) In that case we don't want to take
// a positional arg (which might validly exist and be
// destined for the *args) but we do still want to look for
// a matching keyword arg.
if (i < nargs_in && i < nargs_pos)
arg = args_in[i];
if (has_args) {
const arg_data &ad = f->args[i];
if (kwargs_in && ad.name_py) {
PyObject *hit = nullptr;
for (size_t j = 0; j < nkwargs_in; ++j) {
if (kwnames[j] == ad.name_py) {
hit = args_in[nargs_in + j];
kwarg_used[j] = true;
break;
}
}
if (hit) {
if (arg)
break; // conflict between keyword and positional arg.
arg = hit;
}
}
if (!arg)
arg = ad.value;
arg_flag = ad.flag;
}
if (!arg || (arg == Py_None && (arg_flag & cast_flags::accepts_none) == 0))
break;
// Implicit conversion only active in the 2nd pass
args_flags[i] = arg_flag & ~uint8_t(pass == 0);
args[i] = arg;
}
// Skip this overload if any arguments were unavailable
if (i != nargs_step1)
continue;
// Deal with remaining positional arguments
if (has_var_args) {
PyObject *tuple = PyTuple_New(
nargs_in > nargs_pos ? (Py_ssize_t) (nargs_in - nargs_pos) : 0);
for (size_t j = nargs_pos; j < nargs_in; ++j) {
PyObject *o = args_in[j];
Py_INCREF(o);
NB_TUPLE_SET_ITEM(tuple, j - nargs_pos, o);
}
args[nargs_pos] = tuple;
args_flags[nargs_pos] = 0;
cleanup.append(tuple);
}
// Deal with remaining keyword arguments
if (has_var_kwargs) {
PyObject *dict = PyDict_New();
for (size_t j = 0; j < nkwargs_in; ++j) {
PyObject *key = kwnames[j];
if (!kwarg_used[j])
PyDict_SetItem(dict, key, args_in[nargs_in + j]);
}
args[nargs_step1] = dict;
args_flags[nargs_step1] = 0;
cleanup.append(dict);
} else if (kwargs_in) {
bool success = true;
for (size_t j = 0; j < nkwargs_in; ++j)
success &= kwarg_used[j];
if (!success)
continue;
}
if (is_constructor)
args_flags[0] |= (uint8_t) cast_flags::construct;
rv_policy policy = (rv_policy) (f->flags & 0b111);
try {
result = nullptr;
// Found a suitable overload, let's try calling it
result = f->impl((void *) f->capture, args, args_flags,
policy, &cleanup);
if (NB_UNLIKELY(!result))
error_handler = nb_func_error_noconvert;
} catch (builtin_exception &e) {
if (!set_builtin_exception_status(e))
result = NB_NEXT_OVERLOAD;
} catch (python_error &e) {
e.restore();
} catch (...) {
nb_func_convert_cpp_exception();
}
if (result != NB_NEXT_OVERLOAD) {
if (is_constructor && result != nullptr) {
nb_inst *self_arg_nb = (nb_inst *) self_arg;
self_arg_nb->destruct = true;
self_arg_nb->state = nb_inst::state_ready;
if (NB_UNLIKELY(self_arg_nb->intrusive))
nb_type_data(Py_TYPE(self_arg))
->set_self_py(inst_ptr(self_arg_nb), self_arg);
}
goto done;
}
}
}
error_handler = nb_func_error_overload;
done:
if (NB_UNLIKELY(cleanup.used()))
cleanup.release();
if (NB_UNLIKELY(error_handler))
result = error_handler(self, args_in, nargs_in, kwargs_in);
return result;
}
/// Simplified nb_func_vectorcall variant for functions w/o keyword arguments,
/// w/o default arguments, with no more than NB_MAXARGS_SIMPLE arguments, etc.
static PyObject *nb_func_vectorcall_simple(PyObject *self,
PyObject *const *args_in,
size_t nargsf,
PyObject *kwargs_in) noexcept {
uint8_t args_flags[NB_MAXARGS_SIMPLE];
func_data *fr = nb_func_data(self);
const size_t count = (size_t) Py_SIZE(self),
nargs_in = (size_t) PyVectorcall_NARGS(nargsf);
const bool is_method = fr->flags & (uint32_t) func_flags::is_method,
is_constructor = fr->flags & (uint32_t) func_flags::is_constructor;
PyObject *result = nullptr,
*self_arg = (is_method && nargs_in > 0) ? args_in[0] : nullptr;
// Small array holding temporaries (implicit conversion/*args/**kwargs)
cleanup_list cleanup(self_arg);
// Handler routine that will be invoked in case of an error condition
PyObject *(*error_handler)(PyObject *, PyObject *const *, size_t,
PyObject *) noexcept = nullptr;
bool fail = kwargs_in != nullptr;
PyObject *none_ptr = Py_None;
for (size_t i = 0; i < nargs_in; ++i)
fail |= args_in[i] == none_ptr;
if (fail) { // keyword/None arguments unsupported in simple vectorcall
error_handler = nb_func_error_overload;
goto done;
}
for (size_t pass = (count > 1) ? 0 : 1; pass < 2; ++pass) {
for (int i = 0; i < NB_MAXARGS_SIMPLE; ++i)
args_flags[i] = (uint8_t) pass;
if (is_constructor)
args_flags[0] = (uint8_t) cast_flags::construct;
for (size_t k = 0; k < count; ++k) {
const func_data *f = fr + k;
if (nargs_in != f->nargs)
continue;
try {
result = nullptr;
// Found a suitable overload, let's try calling it
result = f->impl((void *) f->capture, (PyObject **) args_in,
args_flags, (rv_policy) (f->flags & 0b111),
&cleanup);
if (NB_UNLIKELY(!result))
error_handler = nb_func_error_noconvert;
} catch (builtin_exception &e) {
if (!set_builtin_exception_status(e))
result = NB_NEXT_OVERLOAD;
} catch (python_error &e) {
e.restore();
} catch (...) {
nb_func_convert_cpp_exception();
}
if (result != NB_NEXT_OVERLOAD) {
if (is_constructor && result != nullptr) {
nb_inst *self_arg_nb = (nb_inst *) self_arg;
self_arg_nb->destruct = true;
self_arg_nb->state = nb_inst::state_ready;
if (NB_UNLIKELY(self_arg_nb->intrusive))
nb_type_data(Py_TYPE(self_arg))
->set_self_py(inst_ptr(self_arg_nb), self_arg);
}
goto done;
}
}
}
error_handler = nb_func_error_overload;
done:
if (NB_UNLIKELY(cleanup.used()))
cleanup.release();
if (NB_UNLIKELY(error_handler))
result = error_handler(self, args_in, nargs_in, kwargs_in);
return result;
}
/// Simplified nb_func_vectorcall variant for non-overloaded functions with 0 args
static PyObject *nb_func_vectorcall_simple_0(PyObject *self,
PyObject *const *args_in,
size_t nargsf,
PyObject *kwargs_in) noexcept {
func_data *fr = nb_func_data(self);
const size_t nargs_in = (size_t) PyVectorcall_NARGS(nargsf);
// Handler routine that will be invoked in case of an error condition
PyObject *(*error_handler)(PyObject *, PyObject *const *, size_t,
PyObject *) noexcept = nullptr;
PyObject *result = nullptr;
if (kwargs_in == nullptr && nargs_in == 0) {
try {
result = fr->impl((void *) fr->capture, (PyObject **) args_in,
nullptr, (rv_policy) (fr->flags & 0b111), nullptr);
if (result == NB_NEXT_OVERLOAD)
error_handler = nb_func_error_overload;
else if (!result)
error_handler = nb_func_error_noconvert;
} catch (builtin_exception &e) {
if (!set_builtin_exception_status(e))
error_handler = nb_func_error_overload;
} catch (python_error &e) {
e.restore();
} catch (...) {
nb_func_convert_cpp_exception();
}
} else {
error_handler = nb_func_error_overload;
}
if (NB_UNLIKELY(error_handler))
result = error_handler(self, args_in, nargs_in, kwargs_in);
return result;
}
/// Simplified nb_func_vectorcall variant for non-overloaded functions with 1 arg
static PyObject *nb_func_vectorcall_simple_1(PyObject *self,
PyObject *const *args_in,
size_t nargsf,
PyObject *kwargs_in) noexcept {
func_data *fr = nb_func_data(self);
const size_t nargs_in = (size_t) PyVectorcall_NARGS(nargsf);
bool is_constructor = fr->flags & (uint32_t) func_flags::is_constructor;
// Handler routine that will be invoked in case of an error condition
PyObject *(*error_handler)(PyObject *, PyObject *const *, size_t,
PyObject *) noexcept = nullptr;
PyObject *result = nullptr;
if (kwargs_in == nullptr && nargs_in == 1 && args_in[0] != Py_None) {
PyObject *arg = args_in[0];
cleanup_list cleanup(arg);
uint8_t args_flags[1] = {
(uint8_t) (is_constructor ? (1 | (uint8_t) cast_flags::construct) : 1)
};
try {
result = fr->impl((void *) fr->capture, (PyObject **) args_in,
args_flags, (rv_policy) (fr->flags & 0b111), &cleanup);
if (result == NB_NEXT_OVERLOAD) {
error_handler = nb_func_error_overload;
} else if (!result) {
error_handler = nb_func_error_noconvert;
} else if (is_constructor) {
nb_inst *arg_nb = (nb_inst *) arg;
arg_nb->destruct = true;
arg_nb->state = nb_inst::state_ready;
if (NB_UNLIKELY(arg_nb->intrusive))
nb_type_data(Py_TYPE(arg))
->set_self_py(inst_ptr(arg_nb), arg);
}
} catch (builtin_exception &e) {
if (!set_builtin_exception_status(e))
error_handler = nb_func_error_overload;
} catch (python_error &e) {
e.restore();
} catch (...) {
nb_func_convert_cpp_exception();
}
if (NB_UNLIKELY(cleanup.used()))
cleanup.release();
} else {
error_handler = nb_func_error_overload;
}
if (NB_UNLIKELY(error_handler))
result = error_handler(self, args_in, nargs_in, kwargs_in);
return result;
}
static PyObject *nb_bound_method_vectorcall(PyObject *self,
PyObject *const *args_in,
size_t nargsf,
PyObject *kwargs_in) noexcept {
nb_bound_method *mb = (nb_bound_method *) self;
size_t nargs = (size_t) PyVectorcall_NARGS(nargsf);
const size_t buf_size = 5;
PyObject **args, *args_buf[buf_size], *temp = nullptr, *result;
bool alloc = false;
if (NB_LIKELY(nargsf & PY_VECTORCALL_ARGUMENTS_OFFSET)) {
args = (PyObject **) (args_in - 1);
temp = args[0];
} else {
size_t size = nargs + 1;
if (kwargs_in)
size += NB_TUPLE_GET_SIZE(kwargs_in);
if (size < buf_size) {
args = args_buf;
} else {
args = (PyObject **) PyMem_Malloc(size * sizeof(PyObject *));
if (!args)
return PyErr_NoMemory();
alloc = true;
}
if (size > 1)
memcpy(args + 1, args_in, sizeof(PyObject *) * (size - 1));
}
args[0] = mb->self;
result = mb->func->vectorcall((PyObject *) mb->func, args, nargs + 1, kwargs_in);
args[0] = temp;
if (NB_UNLIKELY(alloc))
PyMem_Free(args);
return result;
}
PyObject *nb_method_descr_get(PyObject *self, PyObject *inst, PyObject *) {
if (inst) {
/* Return a bound method. This should be avoidable in most cases via the
'CALL_METHOD' opcode and vector calls. Pytest rewrites the bytecode
in a way that breaks this optimization :-/ */
nb_bound_method *mb =
PyObject_GC_New(nb_bound_method, internals->nb_bound_method);
mb->func = (nb_func *) self;
mb->self = inst;
mb->vectorcall = nb_bound_method_vectorcall;
Py_INCREF(self);
Py_INCREF(inst);
return (PyObject *) mb;
} else {
Py_INCREF(self);
return self;
}
}
/// Render the function signature of a single function. Callers must hold the
/// 'internals' mutex.
static uint32_t nb_func_render_signature(const func_data *f,
bool nb_signature_mode) noexcept {
const bool is_method = f->flags & (uint32_t) func_flags::is_method,
has_args = f->flags & (uint32_t) func_flags::has_args,
has_var_args = f->flags & (uint32_t) func_flags::has_var_args,
has_var_kwargs = f->flags & (uint32_t) func_flags::has_var_kwargs,
has_signature = f->flags & (uint32_t) func_flags::has_signature;
nb_internals *internals_ = internals;
if (has_signature) {
const char *s = f->signature;
if (!nb_signature_mode) {
// go to last line of manually provided signature, strip away 'def ' prefix
const char *p = strrchr(s, '\n');
s = p ? (p + 1) : s;
if (strncmp(s, "def ", 4) == 0)
s += 4;
}
buf.put_dstr(s);
return 0;
}
if (nb_signature_mode)
buf.put("def ");
const std::type_info **descr_type = f->descr_types;
bool rv = false;
uint32_t arg_index = 0, n_default_args = 0;
buf.put_dstr(f->name);
for (const char *pc = f->descr; *pc != '\0'; ++pc) {
char c = *pc;
switch (c) {
case '@':
// Handle types that differ depending on whether they appear
// in an argument or a return value position
pc++;
if (!rv) {
while (*pc && *pc != '@')
buf.put(*pc++);
if (*pc == '@')
pc++;
while (*pc && *pc != '@')
pc++;
} else {
while (*pc && *pc != '@')
pc++;
if (*pc == '@')
pc++;
while (*pc && *pc != '@')
buf.put(*pc++);
}
break;
case '{':
{
const char *arg_name = has_args ? f->args[arg_index].name : nullptr;
// Argument name
if (has_var_kwargs && arg_index + 1 == f->nargs) {
buf.put("**");
buf.put_dstr(arg_name ? arg_name : "kwargs");
pc += 4; // strlen("dict")
break;
}
if (arg_index == f->nargs_pos) {
buf.put("*");
if (has_var_args) {
buf.put_dstr(arg_name ? arg_name : "args");
pc += 5; // strlen("tuple")
break;
} else {
buf.put(", ");
// fall through to render the first keyword-only arg
}
}
if (is_method && arg_index == 0) {
buf.put("self");
// Skip over type
while (*pc != '}') {
if (*pc == '%')
descr_type++;
pc++;
}
arg_index++;
continue;
} else if (arg_name) {
buf.put_dstr(arg_name);
} else {
buf.put("arg");
if (f->nargs > 1 + (uint32_t) is_method)
buf.put_uint32(arg_index - is_method);
}
buf.put(": ");
if (has_args && f->args[arg_index].flag &
(uint8_t) cast_flags::accepts_none) {
#if PY_VERSION_HEX < 0x030A0000
buf.put("typing.Optional[");
#else
// See below
#endif
}
}
break;
case '}':
// Default argument
if (has_args) {
if (f->args[arg_index].flag & (uint8_t) cast_flags::accepts_none) {
#if PY_VERSION_HEX < 0x030A0000
buf.put(']');
#else
buf.put(" | None");
#endif
}
if (f->args[arg_index].value) {
const arg_data &arg = f->args[arg_index];
if (nb_signature_mode) {
buf.put(" = \\");
if (arg.signature)
buf.put('=');
buf.put_uint32(n_default_args++);
} else if (arg.signature) {
buf.put(" = ");
buf.put_dstr(arg.signature);
} else {
PyObject *o = arg.value, *str;
{
unlock_internals guard2(internals_);
str = PyObject_Repr(o);
}
if (str) {
Py_ssize_t size = 0;
const char *cstr =
PyUnicode_AsUTF8AndSize(str, &size);
if (!cstr) {
PyErr_Clear();
} else {
buf.put(" = ");
buf.put(cstr, (size_t) size);
}
Py_DECREF(str);
} else {
PyErr_Clear();
}
}
}
}
arg_index++;
if (arg_index == f->nargs_pos && !has_args)
buf.put(", /");
break;
case '%':
check(*descr_type,
"nb::detail::nb_func_render_signature(): missing type!");
if (!(is_method && arg_index == 0)) {
bool found = false;
auto it = internals_->type_c2p_slow.find(*descr_type);
if (it != internals_->type_c2p_slow.end()) {
handle th((PyObject *) it->second->type_py);
buf.put_dstr((borrow<str>(th.attr("__module__"))).c_str());
buf.put('.');
buf.put_dstr((borrow<str>(th.attr("__qualname__"))).c_str());
found = true;
}
if (!found) {
if (nb_signature_mode)
buf.put('"');
char *name = type_name(*descr_type);
buf.put_dstr(name);
free(name);
if (nb_signature_mode)
buf.put('"');
}
}
descr_type++;
break;
case '-':
if (pc[1] == '>')
rv = true;
buf.put(c);
break;
default:
buf.put(c);
break;
}
}
check(arg_index == f->nargs && !*descr_type,
"nanobind::detail::nb_func_render_signature(%s): arguments inconsistent.",
f->name);
return n_default_args;
}
static PyObject *nb_func_get_name(PyObject *self) {
func_data *f = nb_func_data(self);
const char *name = "";
if (f->flags & (uint32_t) func_flags::has_name)
name = f->name;
return PyUnicode_FromString(name);
}
static PyObject *nb_func_get_qualname(PyObject *self) {
func_data *f = nb_func_data(self);
if ((f->flags & (uint32_t) func_flags::has_scope) &&
(f->flags & (uint32_t) func_flags::has_name)) {
PyObject *scope_name = PyObject_GetAttrString(f->scope, "__qualname__");
if (scope_name) {
return PyUnicode_FromFormat("%U.%s", scope_name, f->name);
} else {
PyErr_Clear();
return PyUnicode_FromString(f->name);
}
} else {
Py_INCREF(Py_None);
return Py_None;
}
}
static PyObject *nb_func_get_module(PyObject *self) {
func_data *f = nb_func_data(self);
if (f->flags & (uint32_t) func_flags::has_scope) {
return PyObject_GetAttrString(
f->scope, PyModule_Check(f->scope) ? "__name__" : "__module__");
} else {
Py_INCREF(Py_None);
return Py_None;
}
}
PyObject *nb_func_get_nb_signature(PyObject *self, void *) {
PyObject *docstr = nullptr, *item = nullptr, *sigstr = nullptr,
*defaults = nullptr;
func_data *f = nb_func_data(self);
uint32_t count = (uint32_t) Py_SIZE(self);
PyObject *result = PyTuple_New(count);
if (!result)
return nullptr;
for (uint32_t i = 0; i < count; ++i) {
docstr = item = sigstr = defaults = nullptr;
const func_data *fi = f + i;
if ((fi->flags & (uint32_t) func_flags::has_doc) &&
(!((nb_func *) self)->doc_uniform || i == 0)) {
docstr = PyUnicode_FromString(fi->doc);
} else {
docstr = Py_None;
Py_INCREF(docstr);
}
// The buffer 'buf' is protected by 'internals.mutex'
lock_internals guard(internals);
buf.clear();
uint32_t n_default_args = nb_func_render_signature(fi, true);
item = PyTuple_New(3);
sigstr = PyUnicode_FromString(buf.get());
if (n_default_args) {
defaults = PyTuple_New(n_default_args);
} else {
defaults = Py_None;
Py_INCREF(defaults);
}
if (!docstr || !sigstr || !item || !defaults)
goto fail;
if (n_default_args) {
size_t pos = 0;
for (uint32_t j = 0; j < fi->nargs; ++j) {
const arg_data &arg = fi->args[j];
PyObject *value = arg.value;
if (!value)
continue;
if (arg.signature) {
value = PyUnicode_FromString(arg.signature);
if (!value)
goto fail;
} else {
Py_INCREF(value);
}
NB_TUPLE_SET_ITEM(defaults, pos, value);
pos++;
}
check(pos == n_default_args,
"__nb_signature__: default argument counting inconsistency!");
}
NB_TUPLE_SET_ITEM(item, 0, sigstr);
NB_TUPLE_SET_ITEM(item, 1, docstr);
NB_TUPLE_SET_ITEM(item, 2, defaults);
NB_TUPLE_SET_ITEM(result, (Py_ssize_t) i, item);
}
return result;
fail:
Py_XDECREF(docstr);
Py_XDECREF(sigstr);
Py_XDECREF(defaults);
Py_XDECREF(item);
Py_DECREF(result);
return nullptr;
}
PyObject *nb_func_get_doc(PyObject *self, void *) {
func_data *f = nb_func_data(self);
uint32_t count = (uint32_t) Py_SIZE(self);
// The buffer 'buf' is protected by 'internals.mutex'
lock_internals guard(internals);
buf.clear();
bool doc_found = false;
for (uint32_t i = 0; i < count; ++i) {
const func_data *fi = f + i;
nb_func_render_signature(fi);
buf.put('\n');
doc_found |= (fi->flags & (uint32_t) func_flags::has_doc) != 0;
}
if (doc_found) {
if (((nb_func *) self)->doc_uniform) {
buf.put('\n');
buf.put_dstr(f->doc);
buf.put('\n');
} else {
buf.put("\nOverloaded function.\n");
for (uint32_t i = 0; i < count; ++i) {
const func_data *fi = f + i;
buf.put('\n');
buf.put_uint32(i + 1);
buf.put(". ``");
nb_func_render_signature(fi);
buf.put("``\n\n");
if (fi->flags & (uint32_t) func_flags::has_doc) {
buf.put_dstr(fi->doc);
buf.put('\n');
}
}
}
}
if (buf.size() > 0) // remove last newline
buf.rewind(1);
return PyUnicode_FromString(buf.get());
}
PyObject *nb_func_getattro(PyObject *self, PyObject *name_) {
const char *name = PyUnicode_AsUTF8AndSize(name_, nullptr);
if (!name)
return nullptr;
else if (strcmp(name, "__module__") == 0)
return nb_func_get_module(self);
else if (strcmp(name, "__name__") == 0)
return nb_func_get_name(self);
else if (strcmp(name, "__qualname__") == 0)
return nb_func_get_qualname(self);
else if (strcmp(name, "__doc__") == 0)
return nb_func_get_doc(self, nullptr);
else
return PyObject_GenericGetAttr(self, name_);
}
PyObject *nb_bound_method_getattro(PyObject *self, PyObject *name_) {
bool passthrough = false;
if (const char *name = PyUnicode_AsUTF8AndSize(name_, nullptr)) {
// These attributes do exist on nb_bound_method (because they
// exist on every type) but we want to take their special handling
// from nb_func_getattro instead.
passthrough = (strcmp(name, "__doc__") == 0 ||
strcmp(name, "__module__") == 0);
}
if (!passthrough) {
if (PyObject* res = PyObject_GenericGetAttr(self, name_))
return res;
PyErr_Clear();
}
nb_func *func = ((nb_bound_method *) self)->func;
return nb_func_getattro((PyObject *) func, name_);
}
/// Excise a substring from 's'
static void strexc(char *s, const char *sub) {
size_t len = strlen(sub);
if (len == 0)
return;
char *p = s;
while ((p = strstr(p, sub)))
memmove(p, p + len, strlen(p + len) + 1);
}
/// Return a readable string representation of a C++ type
NB_NOINLINE char *type_name(const std::type_info *t) {
const char *name_in = t->name();
#if defined(__GNUG__)
int status = 0;
char *name = abi::__cxa_demangle(name_in, nullptr, nullptr, &status);
if (!name)
return strdup_check(name_in);
#else
char *name = strdup_check(name_in);
strexc(name, "class ");
strexc(name, "struct ");
strexc(name, "enum ");
#endif
strexc(name, "nanobind::");
return name;
}
NAMESPACE_END(detail)
NAMESPACE_END(NB_NAMESPACE)
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
src/nb_internals.cpp | C++ | /*
src/internals.cpp: internal libnanobind data structures
Copyright (c) 2022 Wenzel Jakob
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#include <nanobind/nanobind.h>
#include <structmember.h>
#include "nb_internals.h"
#include "nb_abi.h"
#include <thread>
#if defined(__GNUC__) && !defined(__clang__)
# pragma GCC diagnostic ignored "-Wmissing-field-initializers"
#endif
NAMESPACE_BEGIN(NB_NAMESPACE)
NAMESPACE_BEGIN(detail)
extern PyObject *nb_func_getattro(PyObject *, PyObject *);
extern PyObject *nb_func_get_doc(PyObject *, void *);
extern PyObject *nb_func_get_nb_signature(PyObject *, void *);
extern PyObject *nb_bound_method_getattro(PyObject *, PyObject *);
extern int nb_func_traverse(PyObject *, visitproc, void *);
extern int nb_func_clear(PyObject *);
extern void nb_func_dealloc(PyObject *);
extern int nb_bound_method_traverse(PyObject *, visitproc, void *);
extern int nb_bound_method_clear(PyObject *);
extern void nb_bound_method_dealloc(PyObject *);
extern PyObject *nb_method_descr_get(PyObject *, PyObject *, PyObject *);
static PyType_Slot nb_meta_slots[] = {
{ Py_tp_base, nullptr },
{ 0, nullptr }
};
static PyType_Spec nb_meta_spec = {
/* .name = */ "nanobind.nb_meta",
/* .basicsize = */ 0,
/* .itemsize = */ 0,
/* .flags = */ Py_TPFLAGS_DEFAULT |
NB_TPFLAGS_IMMUTABLETYPE,
/* .slots = */ nb_meta_slots
};
static PyMemberDef nb_func_members[] = {
{ "__vectorcalloffset__", T_PYSSIZET,
(Py_ssize_t) offsetof(nb_func, vectorcall), READONLY, nullptr },
{ nullptr, 0, 0, 0, nullptr }
};
static PyGetSetDef nb_func_getset[] = {
{ "__doc__", nb_func_get_doc, nullptr, nullptr, nullptr },
{ "__nb_signature__", nb_func_get_nb_signature, nullptr, nullptr, nullptr },
{ nullptr, nullptr, nullptr, nullptr, nullptr }
};
static PyType_Slot nb_func_slots[] = {
{ Py_tp_members, (void *) nb_func_members },
{ Py_tp_getset, (void *) nb_func_getset },
{ Py_tp_getattro, (void *) nb_func_getattro },
{ Py_tp_traverse, (void *) nb_func_traverse },
{ Py_tp_clear, (void *) nb_func_clear },
{ Py_tp_dealloc, (void *) nb_func_dealloc },
{ Py_tp_new, (void *) PyType_GenericNew },
{ Py_tp_call, (void *) PyVectorcall_Call },
{ 0, nullptr }
};
static PyType_Spec nb_func_spec = {
/* .name = */ "nanobind.nb_func",
/* .basicsize = */ (int) sizeof(nb_func),
/* .itemsize = */ (int) sizeof(func_data),
/* .flags = */ Py_TPFLAGS_DEFAULT |
Py_TPFLAGS_HAVE_GC |
Py_TPFLAGS_HAVE_VECTORCALL |
NB_TPFLAGS_IMMUTABLETYPE,
/* .slots = */ nb_func_slots
};
static PyType_Slot nb_method_slots[] = {
{ Py_tp_members, (void *) nb_func_members },
{ Py_tp_getset, (void *) nb_func_getset },
{ Py_tp_getattro, (void *) nb_func_getattro },
{ Py_tp_traverse, (void *) nb_func_traverse },
{ Py_tp_clear, (void *) nb_func_clear },
{ Py_tp_dealloc, (void *) nb_func_dealloc },
{ Py_tp_descr_get, (void *) nb_method_descr_get },
{ Py_tp_new, (void *) PyType_GenericNew },
{ Py_tp_call, (void *) PyVectorcall_Call },
{ 0, nullptr }
};
static PyType_Spec nb_method_spec = {
/*.name = */ "nanobind.nb_method",
/*.basicsize = */ (int) sizeof(nb_func),
/*.itemsize = */ (int) sizeof(func_data),
/*.flags = */ Py_TPFLAGS_DEFAULT |
Py_TPFLAGS_HAVE_GC |
Py_TPFLAGS_METHOD_DESCRIPTOR |
Py_TPFLAGS_HAVE_VECTORCALL |
NB_TPFLAGS_IMMUTABLETYPE,
/*.slots = */ nb_method_slots
};
static PyMemberDef nb_bound_method_members[] = {
{ "__vectorcalloffset__", T_PYSSIZET,
(Py_ssize_t) offsetof(nb_bound_method, vectorcall), READONLY, nullptr },
{ "__func__", T_OBJECT_EX,
(Py_ssize_t) offsetof(nb_bound_method, func), READONLY, nullptr },
{ "__self__", T_OBJECT_EX,
(Py_ssize_t) offsetof(nb_bound_method, self), READONLY, nullptr },
{ nullptr, 0, 0, 0, nullptr }
};
static PyType_Slot nb_bound_method_slots[] = {
{ Py_tp_members, (void *) nb_bound_method_members },
{ Py_tp_getattro, (void *) nb_bound_method_getattro },
{ Py_tp_traverse, (void *) nb_bound_method_traverse },
{ Py_tp_clear, (void *) nb_bound_method_clear },
{ Py_tp_dealloc, (void *) nb_bound_method_dealloc },
{ Py_tp_call, (void *) PyVectorcall_Call },
{ 0, nullptr }
};
static PyType_Spec nb_bound_method_spec = {
/* .name = */ "nanobind.nb_bound_method",
/* .basicsize = */ (int) sizeof(nb_bound_method),
/* .itemsize = */ 0,
/* .flags = */ Py_TPFLAGS_DEFAULT |
Py_TPFLAGS_HAVE_GC |
Py_TPFLAGS_HAVE_VECTORCALL |
NB_TPFLAGS_IMMUTABLETYPE,
/* .slots = */ nb_bound_method_slots
};
void default_exception_translator(const std::exception_ptr &p, void *) {
try {
std::rethrow_exception(p);
} catch (const std::bad_alloc &e) {
PyErr_SetString(PyExc_MemoryError, e.what());
} catch (const std::domain_error &e) {
PyErr_SetString(PyExc_ValueError, e.what());
} catch (const std::invalid_argument &e) {
PyErr_SetString(PyExc_ValueError, e.what());
} catch (const std::length_error &e) {
PyErr_SetString(PyExc_ValueError, e.what());
} catch (const std::out_of_range &e) {
PyErr_SetString(PyExc_IndexError, e.what());
} catch (const std::range_error &e) {
PyErr_SetString(PyExc_ValueError, e.what());
} catch (const std::overflow_error &e) {
PyErr_SetString(PyExc_OverflowError, e.what());
} catch (const std::exception &e) {
PyErr_SetString(PyExc_RuntimeError, e.what());
}
}
// Initialized once when the module is loaded, no locking needed
nb_internals *internals = nullptr;
PyTypeObject *nb_meta_cache = nullptr;
static const char* interned_c_strs[pyobj_name::string_count] {
"value",
"copy",
"clone",
"array",
"from_dlpack",
"__dlpack__",
"max_version",
"dl_device",
};
PyObject *static_pyobjects[pyobj_name::total_count] = {};
static void new_constant(nb_internals *p, int index, PyObject *o) {
static_pyobjects[index] = o;
new_object(p, o);
}
/// Populate this library's static_pyobjects[]
static void init_pyobjects(nb_internals *p) {
if (static_pyobjects[0])
return;
NB_NOUNROLL
for (int i = 0; i < pyobj_name::string_count; ++i)
new_constant(p, i, PyUnicode_InternFromString(interned_c_strs[i]));
new_constant(p, pyobj_name::copy_tpl,
PyTuple_Pack(1, static_pyobjects[pyobj_name::copy_str]));
new_constant(p, pyobj_name::max_version_tpl,
PyTuple_Pack(1, static_pyobjects[pyobj_name::max_version_str]));
PyObject *one = PyLong_FromLong(1), *zero = PyLong_FromLong(0);
new_constant(p, pyobj_name::dl_cpu_tpl, PyTuple_Pack(2, one, zero));
Py_DECREF(zero);
Py_DECREF(one);
PyObject *major = PyLong_FromLong(dlpack::major_version),
*minor = PyLong_FromLong(dlpack::minor_version);
new_constant(p, pyobj_name::dl_version_tpl, PyTuple_Pack(2, major, minor));
Py_DECREF(minor);
Py_DECREF(major);
}
/// Create lifeline + internal types if needed
static void init_internals(nb_internals *p) {
if (p->lifeline)
return;
p->lifeline = PyList_New(0);
check(p->lifeline, "nanobind::detail::nb_module_exec(): "
"could not create lifeline list!");
str nb_name("nanobind");
p->nb_module = PyModule_NewObject(nb_name.ptr());
new_object(p, p->nb_module);
nb_meta_slots[0].pfunc = (PyObject *) &PyType_Type;
p->nb_meta = new_type(p, &nb_meta_spec);
p->nb_type_dict = PyDict_New();
new_object(p, p->nb_type_dict);
p->nb_func = new_type(p, &nb_func_spec);
p->nb_method = new_type(p, &nb_method_spec);
p->nb_bound_method = new_type(p, &nb_bound_method_spec);
check(p->nb_module && p->nb_meta && p->nb_type_dict && p->nb_func &&
p->nb_method && p->nb_bound_method,
"nanobind::detail::nb_module_exec(): initialization failed!");
#if defined(Py_LIMITED_API)
p->PyType_Type_tp_free = (freefunc) PyType_GetSlot(&PyType_Type, Py_tp_free);
p->PyType_Type_tp_init = (initproc) PyType_GetSlot(&PyType_Type, Py_tp_init);
p->PyType_Type_tp_dealloc =
(destructor) PyType_GetSlot(&PyType_Type, Py_tp_dealloc);
p->PyType_Type_tp_setattro =
(setattrofunc) PyType_GetSlot(&PyType_Type, Py_tp_setattro);
p->PyProperty_Type_tp_descr_get =
(descrgetfunc) PyType_GetSlot(&PyProperty_Type, Py_tp_descr_get);
p->PyProperty_Type_tp_descr_set =
(descrsetfunc) PyType_GetSlot(&PyProperty_Type, Py_tp_descr_set);
PyType_Slot dummy_slots[] = {
{ Py_tp_base, &PyType_Type },
{ 0, nullptr }
};
PyType_Spec dummy_spec = {
/* .name = */ "nanobind.dummy",
/* .basicsize = */ - (int) sizeof(void*),
/* .itemsize = */ 0,
/* .flags = */ Py_TPFLAGS_DEFAULT,
/* .slots = */ dummy_slots
};
PyObject *dummy = PyType_FromMetaclass(
p->nb_meta, p->nb_module, &dummy_spec, nullptr);
p->type_data_offset =
(uint8_t *) PyObject_GetTypeData(dummy, p->nb_meta) - (uint8_t *) dummy;
Py_DECREF(dummy);
#endif
}
void internals_inc_ref() {
internals->shared_ref_count.value++;
}
void internals_dec_ref() {
nb_internals *p = internals;
auto value = --p->shared_ref_count.value;
if (value != 0)
return;
Py_CLEAR(p->lifeline);
p->nb_module = nullptr;
p->nb_meta = nullptr;
p->nb_type_dict = nullptr;
p->nb_func = nullptr;
p->nb_method = nullptr;
p->nb_bound_method = nullptr;
p->nb_static_property.store_release(nullptr);
p->nb_ndarray.store_release(nullptr);
nb_meta_cache = nullptr;
for (int i = 0; i < pyobj_name::total_count; ++i)
static_pyobjects[i] = nullptr;
}
void nb_module_free(void *) {
internals_dec_ref();
}
static bool is_alive_value = false;
static bool *is_alive_ptr = &is_alive_value;
bool is_alive() noexcept { return *is_alive_ptr; }
const char *abi_tag() { return NB_ABI_TAG; }
static void internals_cleanup() {
nb_internals *p = internals;
if (!p)
return;
*is_alive_ptr = false;
#if !defined(PYPY_VERSION) && !defined(NB_FREE_THREADED)
/* The memory leak checker is unsupported on PyPy, see
see https://foss.heptapod.net/pypy/pypy/-/issues/3855.
Leak reporting is explicitly disabled on free-threaded builds
for now because of the decision to immortalize function and
type objects. This may change in the future. */
bool print_leak_warnings = p->print_leak_warnings;
size_t inst_leaks = 0, keep_alive_leaks = 0;
// Shard locking no longer needed, Py_AtExit is single-threaded
for (size_t i = 0; i < p->shard_count; ++i) {
nb_shard &s = p->shards[i];
inst_leaks += s.inst_c2p.size();
keep_alive_leaks += s.keep_alive.size();
}
#ifdef _DEBUG
// in debug mode, show all leak records
#define INC_CTR do {} while(0)
#else
// otherwise show just the first 10 or 20
#define INC_CTR ctr++
#endif
bool leak = inst_leaks > 0 || keep_alive_leaks > 0;
if (print_leak_warnings && inst_leaks > 0) {
fprintf(stderr, "nanobind: leaked %zu instances!\n", inst_leaks);
#if !defined(Py_LIMITED_API)
auto print_leak = [](void* k, PyObject* v) {
type_data *tp = nb_type_data(Py_TYPE(v));
fprintf(stderr, " - leaked instance %p of type \"%s\"\n", k, tp->name);
};
int ctr = 0;
for (size_t i = 0; i < p->shard_count && ctr < 20; ++i) {
for (auto [k, v]: p->shards[i].inst_c2p) {
if (NB_UNLIKELY(nb_is_seq(v))) {
nb_inst_seq* seq = nb_get_seq(v);
for(; seq != nullptr && ctr < 20; seq = seq->next) {
print_leak(k, seq->inst);
INC_CTR;
}
} else {
print_leak(k, (PyObject*)v);
INC_CTR;
}
if (ctr >= 20)
break;
}
}
if (ctr >= 20) {
fprintf(stderr, " - ... skipped remainder\n");
}
#endif
}
if (print_leak_warnings && keep_alive_leaks > 0)
fprintf(stderr, "nanobind: leaked %zu keep_alive records!\n",
keep_alive_leaks);
// Only report function/type leaks if actual nanobind instances were leaked
#if !defined(NB_ABORT_ON_LEAK)
if (!leak)
print_leak_warnings = false;
#endif
if (!p->type_c2p_slow.empty()) {
if (print_leak_warnings) {
fprintf(stderr, "nanobind: leaked %zu types!\n",
p->type_c2p_slow.size());
int ctr = 0;
for (const auto &kv : p->type_c2p_slow) {
fprintf(stderr, " - leaked type \"%s\"\n", kv.second->name);
INC_CTR;
if (ctr == 10) {
fprintf(stderr, " - ... skipped remainder\n");
break;
}
}
}
leak = true;
}
if (!p->funcs.empty()) {
if (print_leak_warnings) {
fprintf(stderr, "nanobind: leaked %zu functions!\n",
p->funcs.size());
int ctr = 0;
for (auto [f, p2] : p->funcs) {
fprintf(stderr, " - leaked function \"%s\"\n",
nb_func_data(f)->name);
INC_CTR;
if (ctr == 10) {
fprintf(stderr, " - ... skipped remainder\n");
break;
}
}
}
leak = true;
}
if (!leak) {
nb_translator_seq* t = p->translators.next;
while (t) {
nb_translator_seq *next = t->next;
delete t;
t = next;
}
#if defined(NB_FREE_THREADED)
// This code won't run for now but is kept here for a time when
// immortalization isn't needed anymore.
PyThread_tss_delete(p->nb_static_property_disabled);
PyThread_tss_free(p->nb_static_property_disabled);
delete[] p->shards;
#endif
delete p;
internals = nullptr;
nb_meta_cache = nullptr;
} else {
if (print_leak_warnings) {
fprintf(stderr, "nanobind: this is likely caused by a reference "
"counting issue in the binding code.\n"
"See https://nanobind.readthedocs.io/en/latest/refleaks.html");
}
#if defined(NB_ABORT_ON_LEAK) && !defined(NB_FREE_THREADED)
abort(); // Extra-strict behavior for the CI server
#endif
}
#endif
}
NB_NOINLINE void nb_module_exec(const char *name, PyObject *) {
if (internals) {
init_internals(internals);
init_pyobjects(internals);
nb_meta_cache = internals->nb_meta;
internals_inc_ref();
return;
}
#if defined(PYPY_VERSION)
PyObject *dict = PyEval_GetBuiltins();
#else
PyObject *dict = PyInterpreterState_GetDict(PyInterpreterState_Get());
#endif
check(dict, "nanobind::detail::nb_module_exec(): "
"could not access internals dictionary!");
PyObject *key = PyUnicode_FromFormat("__nb_internals_%s_%s__",
abi_tag(), name ? name : "");
check(key, "nanobind::detail::nb_module_exec(): "
"could not create dictionary key!");
PyObject *capsule = dict_get_item_ref_or_fail(dict, key);
if (capsule) {
Py_DECREF(key);
internals = (nb_internals *) PyCapsule_GetPointer(capsule, "nb_internals");
check(internals, "nanobind::detail::nb_module_exec(): "
"capsule pointer is NULL!");
is_alive_ptr = internals->is_alive_ptr;
init_internals(internals);
init_pyobjects(internals);
nb_meta_cache = internals->nb_meta;
internals_inc_ref();
Py_DECREF(capsule);
return;
}
nb_internals *p = new nb_internals();
size_t shard_count = 1;
#if defined(NB_FREE_THREADED)
size_t hw_concurrency = std::thread::hardware_concurrency();
while (shard_count < hw_concurrency)
shard_count *= 2;
shard_count *= 2;
p->shards = new nb_shard[shard_count];
p->shard_mask = shard_count - 1;
#endif
p->shard_count = shard_count;
internals = p;
init_internals(p);
init_pyobjects(p);
nb_meta_cache = p->nb_meta;
#if defined(NB_FREE_THREADED)
p->nb_static_property_disabled = PyThread_tss_alloc();
PyThread_tss_create(p->nb_static_property_disabled);
#endif
p->translators = { default_exception_translator, nullptr, nullptr };
is_alive_value = true;
is_alive_ptr = &is_alive_value;
p->is_alive_ptr = is_alive_ptr;
internals_inc_ref();
#if PY_VERSION_HEX < 0x030C0000 && !defined(PYPY_VERSION)
/* The implementation of typing.py on CPython <3.12 tends to introduce
spurious reference leaks that upset nanobind's leak checker. The
following band-aid, installs an 'atexit' handler that clears LRU caches
used in typing.py. To be resilient to potential future changes in
typing.py, the implementation fails silently if any step goes wrong. For
context, see https://github.com/python/cpython/issues/98253. */
const char *str =
"def cleanup():\n"
" try:\n"
" import sys\n"
" fs = getattr(sys.modules.get('typing'), '_cleanups', None)\n"
" if fs is not None:\n"
" for f in fs:\n"
" f()\n"
" except:\n"
" pass\n"
"import atexit\n"
"atexit.register(cleanup)\n"
"del atexit, cleanup";
PyObject *code = Py_CompileString(str, "<internal>", Py_file_input);
if (code) {
PyObject *result = PyEval_EvalCode(code, PyEval_GetGlobals(), nullptr);
if (!result)
PyErr_Clear();
Py_XDECREF(result);
Py_DECREF(code);
} else {
PyErr_Clear();
}
#endif
if (Py_AtExit(internals_cleanup))
fprintf(stderr,
"Warning: could not install the nanobind cleanup handler! This "
"is needed to check for reference leaks and release remaining "
"resources at interpreter shutdown (e.g., to avoid leaks being "
"reported by tools like 'valgrind'). If you are a user of a "
"python extension library, you can ignore this warning.");
capsule = PyCapsule_New(p, "nb_internals", nullptr);
int rv = PyDict_SetItem(dict, key, capsule);
check(!rv && capsule,
"nanobind::detail::nb_module_exec(): capsule creation failed!");
Py_DECREF(capsule);
Py_DECREF(key);
}
#if defined(NB_COMPACT_ASSERTIONS)
NB_NOINLINE void fail_unspecified() noexcept {
#if defined(NB_COMPACT_ASSERTION_MESSAGE)
fail(NB_COMPACT_ASSERTION_MESSAGE);
#else
fail("encountered an unrecoverable error condition. Recompile using the"
" 'Debug' mode to obtain further information about this problem.");
#endif
}
#endif
NAMESPACE_END(detail)
NAMESPACE_END(NB_NAMESPACE)
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
src/nb_internals.h | C/C++ Header | #pragma once
#if defined(__GNUC__)
// Don't warn about missing fields in PyTypeObject declarations
# pragma GCC diagnostic ignored "-Wmissing-field-initializers"
#elif defined(_MSC_VER)
// Silence warnings that MSVC reports in robin_*.h
# pragma warning(disable: 4127) // conditional expression is constant
# pragma warning(disable: 4324) // structure was padded due to alignment specifier
# pragma warning(disable: 4293) // shift count negative or too big <-- erroneously raised in a constexpr-disabled block
# pragma warning(disable: 4310) // cast truncates constant value <-- erroneously raised in a constexpr-disabled block
#endif
#include <nanobind/nanobind.h>
#include <tsl/robin_map.h>
#if defined(NB_FREE_THREADED)
#include <atomic>
#endif
#include <cstring>
#include <string_view>
#include <functional>
#include "hash.h"
#if TSL_RH_VERSION_MAJOR != 1 || TSL_RH_VERSION_MINOR < 3
# error nanobind depends on tsl::robin_map, in particular version >= 1.3.0, <2.0.0
#endif
#if defined(_MSC_VER)
# define NB_THREAD_LOCAL __declspec(thread)
#else
# define NB_THREAD_LOCAL __thread
#endif
#if PY_VERSION_HEX >= 0x030A0000
# define NB_TPFLAGS_IMMUTABLETYPE Py_TPFLAGS_IMMUTABLETYPE
#else
# define NB_TPFLAGS_IMMUTABLETYPE 0
#endif
NAMESPACE_BEGIN(NB_NAMESPACE)
NAMESPACE_BEGIN(detail)
#if defined(NB_COMPACT_ASSERTIONS)
[[noreturn]] extern void fail_unspecified() noexcept;
# define check(cond, ...) if (NB_UNLIKELY(!(cond))) nanobind::detail::fail_unspecified()
#else
# define check(cond, ...) if (NB_UNLIKELY(!(cond))) nanobind::detail::fail(__VA_ARGS__)
#endif
/// Nanobind function metadata (overloads, etc.)
struct func_data : func_data_prelim_base {
arg_data *args;
char *signature;
};
/// Python object representing an instance of a bound C++ type
struct nb_inst { // usually: 24 bytes
PyObject_HEAD
/// Offset to the actual instance data
int32_t offset;
/// State of the C++ object this instance points to: is it constructed?
/// can we use it?
uint8_t state : 2;
// Values for `state`. Note that the numeric values of these are relied upon
// for an optimization in `nb_type_get()`.
static constexpr uint32_t state_uninitialized = 0; // not constructed
static constexpr uint32_t state_relinquished = 1; // owned by C++, don't touch
static constexpr uint32_t state_ready = 2; // constructed and usable
/**
* The variable 'offset' can either encode an offset relative to the
* nb_inst address that leads to the instance data, or it can encode a
* relative offset to a pointer that must be dereferenced to get to the
* instance data. 'direct' is 'true' in the former case.
*/
uint8_t direct : 1;
/// Is the instance data co-located with the Python object?
uint8_t internal : 1;
/// Should the destructor be called when this instance is GCed?
uint8_t destruct : 1;
/// Should nanobind call 'operator delete' when this instance is GCed?
uint8_t cpp_delete : 1;
/// Does this instance use intrusive reference counting?
uint8_t intrusive : 1;
/// Does this instance hold references to others? (via internals.keep_alive)
/// This may be accessed concurrently to 'state', so it must not be in
/// the same bitfield as 'state'.
uint8_t clear_keep_alive;
// That's a lot of unused space. I wonder if there is a good use for it..
uint16_t unused;
};
static_assert(sizeof(nb_inst) == sizeof(PyObject) + sizeof(uint32_t) * 2);
/// Python object representing a bound C++ function
struct nb_func {
PyObject_VAR_HEAD
PyObject* (*vectorcall)(PyObject *, PyObject * const*, size_t, PyObject *);
uint32_t max_nargs; // maximum value of func_data::nargs for any overload
bool complex_call;
bool doc_uniform;
};
/// Python object representing a `nb_ndarray` (which wraps a DLPack ndarray)
struct nb_ndarray {
PyObject_HEAD
ndarray_handle *th;
};
/// Python object representing an `nb_method` bound to an instance (analogous to non-public PyMethod_Type)
struct nb_bound_method {
PyObject_HEAD
PyObject* (*vectorcall)(PyObject *, PyObject * const*, size_t, PyObject *);
nb_func *func;
PyObject *self;
};
/// Pointers require a good hash function to randomize the mapping to buckets
struct ptr_hash {
size_t operator()(const void *p) const {
// fmix32/64 from MurmurHash by Austin Appleby (public domain)
if constexpr (sizeof(void *) == 4)
return (size_t) fmix32((uint32_t) (uintptr_t) p);
else
return (size_t) fmix64((uint64_t) (uintptr_t) p);
}
};
// Minimal allocator definition, contains only the parts needed by tsl::*
template <typename T> class py_allocator {
public:
using value_type = T;
using pointer = T *;
using size_type = std::size_t;
py_allocator() = default;
py_allocator(const py_allocator &) = default;
template <typename U> py_allocator(const py_allocator<U> &) { }
pointer allocate(size_type n, const void * /*hint*/ = nullptr) noexcept {
void *p = PyMem_Malloc(n * sizeof(T));
if (!p)
fail("PyMem_Malloc(): out of memory!");
return static_cast<pointer>(p);
}
void deallocate(T *p, size_type /*n*/) noexcept { PyMem_Free(p); }
};
// Linked list of instances with the same pointer address. Usually just 1.
struct nb_inst_seq {
PyObject *inst;
nb_inst_seq *next;
};
// Linked list of type aliases when there are multiple shared libraries with duplicate RTTI data
struct nb_alias_chain {
const std::type_info *value;
nb_alias_chain *next;
};
// Weak reference list. Usually, there is just one entry
struct nb_weakref_seq {
void (*callback)(void *) noexcept;
void *payload;
nb_weakref_seq *next;
};
struct std_typeinfo_hash {
size_t operator()(const std::type_info *a) const {
const char *name = a->name();
return std::hash<std::string_view>()({name, strlen(name)});
}
};
struct std_typeinfo_eq {
bool operator()(const std::type_info *a, const std::type_info *b) const {
return a->name() == b->name() || strcmp(a->name(), b->name()) == 0;
}
};
/// A simple pointer-to-pointer map that is reused a few times below (even if
/// not 100% ideal) to avoid template code generation bloat.
using nb_ptr_map = tsl::robin_map<void *, void*, ptr_hash>;
using nb_type_map_fast = nb_ptr_map;
using nb_type_map_slow = tsl::robin_map<const std::type_info *, type_data *,
std_typeinfo_hash, std_typeinfo_eq>;
/// Convenience functions to deal with the pointer encoding in 'internals.inst_c2p'
/// Does this entry store a linked list of instances?
NB_INLINE bool nb_is_seq(void *p) { return ((uintptr_t) p) & 1; }
/// Tag a nb_inst_seq* pointer as such
NB_INLINE void* nb_mark_seq(void *p) { return (void *) (((uintptr_t) p) | 1); }
/// Retrieve the nb_inst_seq* pointer from an 'inst_c2p' value
NB_INLINE nb_inst_seq* nb_get_seq(void *p) { return (nb_inst_seq *) (((uintptr_t) p) ^ 1); }
struct nb_translator_seq {
exception_translator translator;
void *payload;
nb_translator_seq *next = nullptr;
};
#if defined(NB_FREE_THREADED)
# define NB_SHARD_ALIGNMENT alignas(64)
#else
# define NB_SHARD_ALIGNMENT
#endif
/**
* The following data structure stores information associated with individual
* instances. In free-threaded builds, it is split into multiple shards to avoid
* lock contention.
*/
struct NB_SHARD_ALIGNMENT nb_shard {
/**
* C++ -> Python instance map
*
* This associative data structure maps a C++ instance pointer onto its
* associated PyObject* (if bit 0 of the map value is zero) or a linked
* list of type `nb_inst_seq*` (if bit 0 is set---it must be cleared before
* interpreting the pointer in this case).
*
* The latter case occurs when several distinct Python objects reference
* the same memory address (e.g. a struct and its first member).
*/
nb_ptr_map inst_c2p;
/// Dictionary storing keep_alive references
nb_ptr_map keep_alive;
#if defined(NB_FREE_THREADED)
PyMutex mutex { };
#endif
};
/**
* Wraps a std::atomic if free-threading is enabled, otherwise a raw value.
*/
#if defined(NB_FREE_THREADED)
template<typename T>
struct nb_maybe_atomic {
nb_maybe_atomic(T v) : value(v) {}
std::atomic<T> value;
T load_acquire() { return value.load(std::memory_order_acquire); }
T load_relaxed() { return value.load(std::memory_order_relaxed); }
void store_release(T w) { value.store(w, std::memory_order_release); }
};
#else
template<typename T>
struct nb_maybe_atomic {
nb_maybe_atomic(T v) : value(v) {}
T value;
T load_acquire() { return value; }
T load_relaxed() { return value; }
void store_release(T w) { value = w; }
};
#endif
/**
* `nb_internals` is the central data structure storing information related to
* function/type bindings and instances. Separate nanobind extensions within the
* same NB_DOMAIN furthermore share `nb_internals` to communicate with each
* other, hence any changes here generally require an ABI version bump.
*
* The GIL protects the elements of this data structure from concurrent
* modification. In free-threaded builds, a combination of locking schemes is
* needed to achieve good performance.
*
* In particular, `inst_c2p` and `type_c2p_fast` are very hot and potentially
* accessed several times while dispatching a single function call. The other
* elements are accessed much less frequently and easier to deal with.
*
* The following list clarifies locking semantics for each member.
*
* - `nb_module`, `nb_meta`, `nb_func`, `nb_method`, `nb_bound_method`,
* `*_Type_tp_*`, `shard_count`, `is_alive_ptr`: these are initialized when
* loading the first nanobind extension within a domain, which happens within
* a critical section. They do not require locking.
*
* - `nb_type_dict`: created when the loading the first nanobind extension
* within a domain. While the dictionary itself is protected by its own
* lock, additional locking is needed to avoid races that create redundant
* entries. The `mutex` member is used for this.
*
* - `nb_static_property` and `nb_static_propert_descr_set`: created only once
* on demand, protected by `mutex`.
*
* - `nb_static_property_disabled`: needed to correctly implement assignments to
* static properties. Free-threaded builds store this flag using TLS to avoid
* concurrent modification.
*
* - `nb_static_property` and `nb_static_propert_descr_set`: created only once
* on demand, protected by `mutex`.
*
* - `nb_ndarray`: created only once on demand, protected by `mutex`.
*
* - `inst_c2p`: stores the C++ instance to Python object mapping. This
* data struture is *hot* and uses a sharded locking scheme to reduce
* lock contention.
*
* - `keep_alive`: stores lifetime dependencies (e.g., from the
* reference_internal return value policy). This data structure is
* potentially hot and shares the sharding scheme of `inst_c2p`.
*
* - `type_c2p_slow`: This is the ground-truth source of the `std::type_info`
* to `type_info *` mapping. Unrelated to free-threading, lookups into this
* data struture are generally costly because they use a string comparison on
* some platforms. Because it is only used as a fallback for 'type_c2p_fast',
* protecting this member via the global `mutex` is sufficient.
*
* - `type_c2p_fast`: this data structure is *hot* and mostly read. It maps
* `std::type_info` to `type_info *` but uses pointer-based comparisons.
* The implementation depends on the Python build.
*
* - `translators`: This is an append-to-front-only singly linked list traversed
* while raising exceptions. The main concern is losing elements during
* concurrent append operations. We assume that this data structure is only
* written during module initialization and don't use locking.
*
* - `funcs`: data structure for function leak tracking. Not used in
* free-threaded mode .
*
* - `print_leak_warnings`, `print_implicit_cast_warnings`: simple boolean
* flags. No protection against concurrent conflicting updates.
*/
struct nb_internals {
/// Internal nanobind module
PyObject *nb_module;
/// Meta-metaclass of nanobind instances
PyTypeObject *nb_meta;
/// Dictionary with nanobind metaclass(es) for different payload sizes
PyObject *nb_type_dict;
/// Types of nanobind functions and methods
PyTypeObject *nb_func, *nb_method, *nb_bound_method;
/// Property variant for static attributes (created on demand)
nb_maybe_atomic<PyTypeObject *> nb_static_property = nullptr;
descrsetfunc nb_static_property_descr_set = nullptr;
#if defined(NB_FREE_THREADED)
Py_tss_t *nb_static_property_disabled = nullptr;
#else
bool nb_static_property_disabled = false;
#endif
/// N-dimensional array wrapper (created on demand)
nb_maybe_atomic<PyTypeObject *> nb_ndarray = nullptr;
#if defined(NB_FREE_THREADED)
nb_shard *shards = nullptr;
size_t shard_mask = 0;
// Heuristic shard selection (from pybind11 PR #5148 by @colesbury), uses
// high pointer bits to group allocations by individual threads/cores.
inline nb_shard &shard(void *p) {
uintptr_t highbits = ((uintptr_t) p) >> 20;
size_t index = ((size_t) fmix64((uint64_t) highbits)) & shard_mask;
return shards[index];
}
#else
nb_shard shards[1];
inline nb_shard &shard(void *) { return shards[0]; }
#endif
#if !defined(NB_FREE_THREADED)
/// C++ -> Python type map -- fast version based on std::type_info pointer equality
nb_type_map_fast type_c2p_fast;
#endif
/// C++ -> Python type map -- slow fallback version based on hashed strings
nb_type_map_slow type_c2p_slow;
#if !defined(NB_FREE_THREADED)
/// nb_func/meth instance map for leak reporting (used as set, the value is unused)
/// In free-threaded mode, functions are immortal and don't require this data structure.
nb_ptr_map funcs;
#endif
/// Registered C++ -> Python exception translators
nb_translator_seq translators;
/// Should nanobind print leak warnings on exit?
bool print_leak_warnings = true;
/// Should nanobind print warnings after implicit cast failures?
bool print_implicit_cast_warnings = true;
/// Pointer to a boolean that denotes if nanobind is fully initialized.
bool *is_alive_ptr = nullptr;
#if defined(Py_LIMITED_API)
// Cache important functions from PyType_Type and PyProperty_Type
freefunc PyType_Type_tp_free;
initproc PyType_Type_tp_init;
destructor PyType_Type_tp_dealloc;
setattrofunc PyType_Type_tp_setattro;
descrgetfunc PyProperty_Type_tp_descr_get;
descrsetfunc PyProperty_Type_tp_descr_set;
size_t type_data_offset;
#endif
#if defined(NB_FREE_THREADED)
PyMutex mutex { };
#endif
// Size of the 'shards' data structure. Only rarely accessed, hence at the end
size_t shard_count = 1;
/// Reference count tracking modules + types + functions using shared state
nb_maybe_atomic<uint32_t> shared_ref_count = 0;
/// PyList keeping managed PyObjects alive. Cleared when shared_ref_count
/// reaches 0.
PyObject *lifeline = nullptr;
};
// Names for the PyObject* entries in the per-module state array.
// These names are scoped, but will implicitly convert to int.
struct pyobj_name {
enum : int {
value_str = 0, // string "value"
copy_str, // string "copy"
clone_str, // string "clone"
array_str, // string "array"
from_dlpack_str, // string "from_dlpack"
dunder_dlpack_str, // string "__dlpack__"
max_version_str, // string "max_version"
dl_device_str, // string "dl_device"
string_count,
copy_tpl = string_count, // tuple ("copy")
max_version_tpl, // tuple ("max_version")
dl_cpu_tpl, // tuple (1, 0), which corresponds to nb::device::cpu
dl_version_tpl, // tuple (dlpack::major_version, dlpack::minor_version)
total_count
};
};
extern PyObject *static_pyobjects[];
extern void internals_inc_ref();
extern void internals_dec_ref();
/// Append 'o' to the lifeline and transfer ownership to it
inline void new_object(nb_internals *p, PyObject *o) {
PyList_Append(p->lifeline, o);
Py_DECREF(o);
}
/// Create a type via PyType_FromSpec and transfer ownership to the lifeline
inline PyTypeObject *new_type(nb_internals *p, PyType_Spec *spec) {
PyTypeObject *tp = (PyTypeObject *) PyType_FromSpec(spec);
if (tp)
new_object(p, (PyObject *) tp);
return tp;
}
/// Convenience macro to potentially access cached functions
#if defined(Py_LIMITED_API)
# define NB_SLOT(type, name) internals->type##_##name
#else
# define NB_SLOT(type, name) type.name
#endif
extern nb_internals *internals;
extern PyTypeObject *nb_meta_cache;
extern char *type_name(const std::type_info *t);
// Forward declarations
extern PyObject *inst_new_ext(PyTypeObject *tp, void *value);
extern PyObject *inst_new_int(PyTypeObject *tp, PyObject *args, PyObject *kwds);
extern PyTypeObject *nb_static_property_tp() noexcept;
extern type_data *nb_type_c2p(nb_internals *internals,
const std::type_info *type);
extern void nb_type_unregister(type_data *t) noexcept;
extern PyObject *call_one_arg(PyObject *fn, PyObject *arg) noexcept;
/// Fetch the nanobind function record from a 'nb_func' instance
NB_INLINE func_data *nb_func_data(void *o) {
return (func_data *) (((char *) o) + sizeof(nb_func));
}
#if defined(Py_LIMITED_API)
extern type_data *nb_type_data_static(PyTypeObject *o) noexcept;
#endif
/// Fetch the nanobind type record from a 'nb_type' instance
NB_INLINE type_data *nb_type_data(PyTypeObject *o) noexcept{
#if !defined(Py_LIMITED_API)
return (type_data *) (((char *) o) + sizeof(PyHeapTypeObject));
#else
return nb_type_data_static(o);
#endif
}
inline void *inst_ptr(nb_inst *self) {
void *ptr = (void *) ((intptr_t) self + self->offset);
return self->direct ? ptr : *(void **) ptr;
}
template <typename T> struct scoped_pymalloc {
scoped_pymalloc(size_t size = 1, size_t extra_bytes = 0) {
// Tip: construct objects in the extra bytes using placement new.
ptr = (T *) PyMem_Malloc(size * sizeof(T) + extra_bytes);
if (!ptr)
fail("scoped_pymalloc(): could not allocate %llu bytes of memory!",
(unsigned long long) (size * sizeof(T) + extra_bytes));
}
~scoped_pymalloc() { PyMem_Free(ptr); }
T *release() {
T *temp = ptr;
ptr = nullptr;
return temp;
}
T *get() const { return ptr; }
T &operator[](size_t i) { return ptr[i]; }
T *operator->() { return ptr; }
private:
T *ptr{ nullptr };
};
/// RAII lock/unlock guards for free-threaded builds
#if defined(NB_FREE_THREADED)
struct lock_shard {
nb_shard &s;
lock_shard(nb_shard &s) : s(s) { PyMutex_Lock(&s.mutex); }
~lock_shard() { PyMutex_Unlock(&s.mutex); }
};
struct lock_internals {
nb_internals *i;
lock_internals(nb_internals *i) : i(i) { PyMutex_Lock(&i->mutex); }
~lock_internals() { PyMutex_Unlock(&i->mutex); }
};
struct unlock_internals {
nb_internals *i;
unlock_internals(nb_internals *i) : i(i) { PyMutex_Unlock(&i->mutex); }
~unlock_internals() { PyMutex_Lock(&i->mutex); }
};
#else
struct lock_shard { lock_shard(nb_shard &) { } };
struct lock_internals { lock_internals(nb_internals *) { } };
struct unlock_internals { unlock_internals(nb_internals *) { } };
struct lock_obj { lock_obj(PyObject *) { } };
#endif
extern char *strdup_check(const char *);
extern void *malloc_check(size_t size);
extern char *extract_name(const char *cmd, const char *prefix, const char *s);
NAMESPACE_END(detail)
NAMESPACE_END(NB_NAMESPACE)
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
src/nb_ndarray.cpp | C++ | #include <nanobind/ndarray.h>
#include <atomic>
#include <memory>
#include "nb_internals.h"
NAMESPACE_BEGIN(NB_NAMESPACE)
NAMESPACE_BEGIN(dlpack)
/// Indicates the managed_dltensor_versioned is read only.
static constexpr uint64_t flag_bitmask_read_only = 1UL << 0;
struct version {
uint32_t major;
uint32_t minor;
};
NAMESPACE_END(dlpack)
// ========================================================================
NAMESPACE_BEGIN(detail)
// DLPack version 0, deprecated Feb 2024, obsoleted March 2025
struct managed_dltensor {
dlpack::dltensor dltensor;
void *manager_ctx;
void (*deleter)(managed_dltensor *);
};
// DLPack version 1, pre-release Feb 2024, release Sep 2024
struct managed_dltensor_versioned {
dlpack::version version;
void *manager_ctx;
void (*deleter)(managed_dltensor_versioned *);
uint64_t flags = 0UL;
dlpack::dltensor dltensor;
};
static void mt_from_buffer_delete(managed_dltensor_versioned* self) {
gil_scoped_acquire guard;
Py_buffer *buf = (Py_buffer *) self->manager_ctx;
PyBuffer_Release(buf);
PyMem_Free(buf);
PyMem_Free(self); // This also frees shape and size arrays.
}
// Forward declaration
struct ndarray_handle;
template<typename MT>
static void mt_from_handle_delete(MT* self) {
gil_scoped_acquire guard;
ndarray_handle* th = (ndarray_handle *) self->manager_ctx;
PyMem_Free(self);
ndarray_dec_ref(th);
}
template<bool versioned>
static void capsule_delete(PyObject *capsule) {
const char* capsule_name;
if constexpr (versioned)
capsule_name = "dltensor_versioned";
else
capsule_name = "dltensor";
using MT = std::conditional_t<versioned, managed_dltensor_versioned,
managed_dltensor>;
error_scope scope; // temporarily save any existing errors
MT* mt = (MT*) PyCapsule_GetPointer(capsule, capsule_name);
if (mt)
mt->deleter(mt);
else
PyErr_Clear();
}
// Reference-counted wrapper for versioned or unversioned managed tensors
struct ndarray_handle {
union {
managed_dltensor *mt_unversioned;
managed_dltensor_versioned *mt_versioned;
};
std::atomic<size_t> refcount;
PyObject *owner, *self;
bool versioned; // This tags which union member is active.
bool free_strides; // True if we added strides to an imported tensor.
bool call_deleter; // True if tensor was imported, else PyMem_Free(mt).
bool ro; // Whether tensor is read-only.
PyObject* make_capsule_unversioned() {
PyObject* capsule;
if (!versioned && mt_unversioned->manager_ctx == this) {
capsule = PyCapsule_New(mt_unversioned, "dltensor",
capsule_delete</*versioned=*/false>);
} else {
scoped_pymalloc<managed_dltensor> mt;
memcpy(&mt->dltensor,
(versioned) ? &mt_versioned->dltensor
: &mt_unversioned->dltensor,
sizeof(dlpack::dltensor));
mt->manager_ctx = this;
mt->deleter = mt_from_handle_delete<managed_dltensor>;
capsule = PyCapsule_New(mt.release(), "dltensor",
capsule_delete</*versioned=*/false>);
}
check(capsule, "Could not make unversioned capsule");
refcount++;
return capsule;
}
PyObject* make_capsule_versioned() {
PyObject* capsule;
if (versioned && mt_versioned->manager_ctx == this) {
capsule = PyCapsule_New(mt_versioned, "dltensor_versioned",
capsule_delete</*versioned=*/true>);
} else {
scoped_pymalloc<managed_dltensor_versioned> mt;
mt->version = {dlpack::major_version, dlpack::minor_version};
mt->manager_ctx = this;
mt->deleter = mt_from_handle_delete<managed_dltensor_versioned>;
mt->flags = (ro) ? dlpack::flag_bitmask_read_only : 0;
memcpy(&mt->dltensor,
(versioned) ? &mt_versioned->dltensor
: &mt_unversioned->dltensor,
sizeof(dlpack::dltensor));
capsule = PyCapsule_New(mt.release(), "dltensor_versioned",
capsule_delete</*versioned=*/true>);
}
check(capsule, "Could not make versioned capsule");
refcount++;
return capsule;
}
};
// ========================================================================
static void nb_ndarray_dealloc(PyObject *self) {
PyTypeObject *tp = Py_TYPE(self);
ndarray_dec_ref(((nb_ndarray *) self)->th);
PyObject_Free(self);
Py_DECREF(tp);
}
static int nb_ndarray_getbuffer(PyObject *self, Py_buffer *view, int) {
ndarray_handle *th = ((nb_ndarray *) self)->th;
dlpack::dltensor &t = (th->versioned) ? th->mt_versioned->dltensor
: th->mt_unversioned->dltensor;
if (t.device.device_type != device::cpu::value) {
PyErr_SetString(PyExc_BufferError, "Only CPU-allocated ndarrays can be "
"accessed via the buffer protocol!");
return -1;
}
const char *format = nullptr;
switch ((dlpack::dtype_code) t.dtype.code) {
case dlpack::dtype_code::Int:
switch (t.dtype.bits) {
case 8: format = "b"; break;
case 16: format = "h"; break;
case 32: format = "i"; break;
case 64: format = "q"; break;
}
break;
case dlpack::dtype_code::UInt:
switch (t.dtype.bits) {
case 8: format = "B"; break;
case 16: format = "H"; break;
case 32: format = "I"; break;
case 64: format = "Q"; break;
}
break;
case dlpack::dtype_code::Float:
switch (t.dtype.bits) {
case 16: format = "e"; break;
case 32: format = "f"; break;
case 64: format = "d"; break;
}
break;
case dlpack::dtype_code::Complex:
switch (t.dtype.bits) {
case 64: format = "Zf"; break;
case 128: format = "Zd"; break;
}
break;
case dlpack::dtype_code::Bool:
format = "?";
break;
default:
break;
}
if (!format || t.dtype.lanes != 1) {
PyErr_SetString(PyExc_BufferError,
"Cannot convert DLPack dtype into buffer protocol format!");
return -1;
}
view->buf = (void *) ((uintptr_t) t.data + t.byte_offset);
view->obj = self;
Py_INCREF(self);
scoped_pymalloc<Py_ssize_t> shape_and_strides(2 * (size_t) t.ndim);
Py_ssize_t* shape = shape_and_strides.get();
Py_ssize_t* strides = shape + t.ndim;
const Py_ssize_t itemsize = t.dtype.bits / 8;
Py_ssize_t len = itemsize;
for (size_t i = 0; i < (size_t) t.ndim; ++i) {
len *= (Py_ssize_t) t.shape[i];
shape[i] = (Py_ssize_t) t.shape[i];
strides[i] = (Py_ssize_t) t.strides[i] * itemsize;
}
view->len = len;
view->itemsize = itemsize;
view->readonly = th->ro;
view->ndim = t.ndim;
view->format = (char *) format;
view->shape = shape;
view->strides = strides;
view->suboffsets = nullptr;
view->internal = shape_and_strides.release();
return 0;
}
static void nb_ndarray_releasebuffer(PyObject *, Py_buffer *view) {
PyMem_Free(view->internal);
}
// This function implements __dlpack__() for a nanobind.nb_ndarray.
static PyObject *nb_ndarray_dlpack(PyObject *self, PyObject *const *args,
Py_ssize_t nargsf, PyObject *kwnames) {
if (PyVectorcall_NARGS(nargsf) != 0) {
PyErr_SetString(PyExc_TypeError,
"__dlpack__() does not accept positional arguments");
return nullptr;
}
Py_ssize_t nkwargs = (kwnames) ? NB_TUPLE_GET_SIZE(kwnames) : 0;
long max_major_version = 0;
for (Py_ssize_t i = 0; i < nkwargs; ++i) {
PyObject* key = NB_TUPLE_GET_ITEM(kwnames, i);
if (key == static_pyobjects[pyobj_name::dl_device_str] ||
key == static_pyobjects[pyobj_name::copy_str])
// These keyword arguments are ignored. This branch of the code
// is here to avoid a Python call to RichCompare if these kwargs
// are provided by the caller.
continue;
if (key == static_pyobjects[pyobj_name::max_version_str] ||
PyObject_RichCompareBool(key,
static_pyobjects[pyobj_name::max_version_str], Py_EQ) == 1) {
PyObject* value = args[i];
if (value == Py_None)
break;
if (!PyTuple_Check(value) || NB_TUPLE_GET_SIZE(value) != 2) {
PyErr_SetString(PyExc_TypeError,
"max_version must be None or tuple[int, int]");
return nullptr;
}
max_major_version = PyLong_AsLong(NB_TUPLE_GET_ITEM(value, 0));
break;
}
}
ndarray_handle *th = ((nb_ndarray *) self)->th;
PyObject *capsule;
if (max_major_version >= dlpack::major_version)
capsule = th->make_capsule_versioned();
else
capsule = th->make_capsule_unversioned();
return capsule;
}
// This function implements __dlpack_device__() for a nanobind.nb_ndarray.
static PyObject *nb_ndarray_dlpack_device(PyObject *self, PyObject *) {
ndarray_handle *th = ((nb_ndarray *) self)->th;
dlpack::dltensor& t = (th->versioned)
? th->mt_versioned->dltensor
: th->mt_unversioned->dltensor;
PyObject *r;
if (t.device.device_type == 1 && t.device.device_id == 0) {
r = static_pyobjects[pyobj_name::dl_cpu_tpl];
Py_INCREF(r);
} else {
r = PyTuple_New(2);
PyObject *r0 = PyLong_FromLong(t.device.device_type);
PyObject *r1 = PyLong_FromLong(t.device.device_id);
if (!r || !r0 || !r1) {
Py_XDECREF(r);
Py_XDECREF(r0);
Py_XDECREF(r1);
return nullptr;
}
NB_TUPLE_SET_ITEM(r, 0, r0);
NB_TUPLE_SET_ITEM(r, 1, r1);
}
return r;
}
static PyMethodDef nb_ndarray_methods[] = {
{ "__dlpack__", (PyCFunction) (void *) nb_ndarray_dlpack,
METH_FASTCALL | METH_KEYWORDS, nullptr },
{ "__dlpack_device__", nb_ndarray_dlpack_device, METH_NOARGS, nullptr },
{ nullptr, nullptr, 0, nullptr }
};
static PyTypeObject *nb_ndarray_tp() noexcept {
nb_internals *internals_ = internals;
PyTypeObject *tp = internals_->nb_ndarray.load_acquire();
if (NB_UNLIKELY(!tp)) {
lock_internals guard(internals_);
tp = internals_->nb_ndarray.load_relaxed();
if (tp)
return tp;
PyType_Slot slots[] = {
{ Py_tp_dealloc, (void *) nb_ndarray_dealloc },
{ Py_tp_methods, (void *) nb_ndarray_methods },
{ Py_bf_getbuffer, (void *) nb_ndarray_getbuffer },
{ Py_bf_releasebuffer, (void *) nb_ndarray_releasebuffer },
{ 0, nullptr }
};
PyType_Spec spec = {
/* .name = */ "nanobind.nb_ndarray",
/* .basicsize = */ (int) sizeof(nb_ndarray),
/* .itemsize = */ 0,
/* .flags = */ Py_TPFLAGS_DEFAULT,
/* .slots = */ slots
};
tp = new_type(internals_, &spec);
check(tp, "nb_ndarray type creation failed!");
internals_->nb_ndarray.store_release(tp);
}
return tp;
}
// ========================================================================
using mt_unique_ptr_t = std::unique_ptr<managed_dltensor_versioned,
decltype(&mt_from_buffer_delete)>;
static mt_unique_ptr_t make_mt_from_buffer_protocol(PyObject *o, bool ro) {
mt_unique_ptr_t mt_unique_ptr(nullptr, &mt_from_buffer_delete);
scoped_pymalloc<Py_buffer> view;
if (PyObject_GetBuffer(o, view.get(),
ro ? PyBUF_RECORDS_RO : PyBUF_RECORDS)) {
PyErr_Clear();
return mt_unique_ptr;
}
char format_c = 'B';
const char *format_str = view->format;
if (format_str)
format_c = *format_str;
bool skip_first = format_c == '@' || format_c == '=';
int32_t num = 1;
if (*(uint8_t *) &num == 1) {
if (format_c == '<')
skip_first = true;
} else {
if (format_c == '!' || format_c == '>')
skip_first = true;
}
if (skip_first && format_str)
format_c = *++format_str;
bool is_complex = format_str[0] == 'Z';
if (is_complex)
format_c = *++format_str;
dlpack::dtype dt { };
bool fail = format_str && format_str[1] != '\0';
if (!fail) {
switch (format_c) {
case 'c':
case 'b':
case 'h':
case 'i':
case 'l':
case 'q':
case 'n': dt.code = (uint8_t) dlpack::dtype_code::Int; break;
case 'B':
case 'H':
case 'I':
case 'L':
case 'Q':
case 'N': dt.code = (uint8_t) dlpack::dtype_code::UInt; break;
case 'e':
case 'f':
case 'd': dt.code = (uint8_t) dlpack::dtype_code::Float; break;
case '?': dt.code = (uint8_t) dlpack::dtype_code::Bool; break;
default: fail = true;
}
if (is_complex) {
fail |= dt.code != (uint8_t) dlpack::dtype_code::Float;
dt.code = (uint8_t) dlpack::dtype_code::Complex;
}
dt.lanes = 1;
dt.bits = (uint8_t) (view->itemsize * 8);
}
if (fail) {
PyBuffer_Release(view.get());
return mt_unique_ptr;
}
int32_t ndim = view->ndim;
static_assert(alignof(managed_dltensor_versioned) >= alignof(int64_t));
scoped_pymalloc<managed_dltensor_versioned> mt(1, 2 * sizeof(int64_t)*ndim);
int64_t* shape = nullptr;
int64_t* strides = nullptr;
if (ndim > 0) {
shape = new ((void*) (mt.get() + 1)) int64_t[2 * ndim];
strides = shape + ndim;
}
/* See comments in function ndarray_create(). */
#if 0
uintptr_t data_uint = (uintptr_t) view->buf;
void* data_ptr = (void *) (data_uint & ~uintptr_t{255});
uint64_t data_offset = data_uint & uintptr_t{255};
#else
void* data_ptr = view->buf;
constexpr uint64_t data_offset = 0UL;
#endif
mt->dltensor.data = data_ptr;
mt->dltensor.device = { device::cpu::value, 0 };
mt->dltensor.ndim = ndim;
mt->dltensor.dtype = dt;
mt->dltensor.shape = shape;
mt->dltensor.strides = strides;
mt->dltensor.byte_offset = data_offset;
const int64_t itemsize = (int64_t) view->itemsize;
for (int32_t i = 0; i < ndim; ++i) {
int64_t stride = view->strides[i] / itemsize;
if (stride * itemsize != view->strides[i]) {
PyBuffer_Release(view.get());
return mt_unique_ptr;
}
strides[i] = stride;
shape[i] = (int64_t) view->shape[i];
}
mt->version = {dlpack::major_version, dlpack::minor_version};
mt->manager_ctx = view.release();
mt->deleter = mt_from_buffer_delete;
mt->flags = (ro) ? dlpack::flag_bitmask_read_only : 0;
mt_unique_ptr.reset(mt.release());
return mt_unique_ptr;
}
bool ndarray_check(PyObject *o) noexcept {
if (PyObject_HasAttr(o, static_pyobjects[pyobj_name::dunder_dlpack_str]) ||
PyObject_CheckBuffer(o))
return true;
PyTypeObject *tp = Py_TYPE(o);
if (tp == &PyCapsule_Type)
return true;
PyObject *name = nb_type_name((PyObject *) tp);
check(name, "Could not obtain type name! (1)");
const char *tp_name = PyUnicode_AsUTF8AndSize(name, nullptr);
check(tp_name, "Could not obtain type name! (2)");
bool result =
// PyTorch
strcmp(tp_name, "torch.Tensor") == 0 ||
// XLA
strcmp(tp_name, "jaxlib.xla_extension.ArrayImpl") == 0 ||
// Tensorflow
strcmp(tp_name, "tensorflow.python.framework.ops.EagerTensor") == 0 ||
// Cupy
strcmp(tp_name, "cupy.ndarray") == 0;
Py_DECREF(name);
return result;
}
ndarray_handle *ndarray_import(PyObject *src, const ndarray_config *c,
bool convert, cleanup_list *cleanup) noexcept {
object capsule;
const bool src_is_pycapsule = PyCapsule_CheckExact(src);
mt_unique_ptr_t mt_unique_ptr(nullptr, &mt_from_buffer_delete);
if (src_is_pycapsule) {
capsule = borrow(src);
} else {
// Try calling src.__dlpack__()
PyObject* args[] = {src, static_pyobjects[pyobj_name::dl_version_tpl]};
Py_ssize_t nargsf = 1 | PY_VECTORCALL_ARGUMENTS_OFFSET;
capsule = steal(PyObject_VectorcallMethod(
static_pyobjects[pyobj_name::dunder_dlpack_str],
args, nargsf,
static_pyobjects[pyobj_name::max_version_tpl]));
// Python array API standard v2023 introduced max_version.
// Try calling src.__dlpack__() without any kwargs.
if (!capsule.is_valid() && PyErr_ExceptionMatches(PyExc_TypeError)) {
PyErr_Clear();
capsule = steal(PyObject_VectorcallMethod(
static_pyobjects[pyobj_name::dunder_dlpack_str],
args, nargsf, nullptr));
}
// Try creating an ndarray via the buffer protocol
if (!capsule.is_valid()) {
PyErr_Clear();
mt_unique_ptr = make_mt_from_buffer_protocol(src, c->ro);
}
// Try the function to_dlpack(), already obsolete in array API v2021
if (!mt_unique_ptr && !capsule.is_valid()) {
PyTypeObject *tp = Py_TYPE(src);
try {
const char *module_name =
borrow<str>(handle(tp).attr("__module__")).c_str();
object package;
if (strncmp(module_name, "tensorflow.", 11) == 0)
package = module_::import_("tensorflow.experimental.dlpack");
else if (strncmp(module_name, "torch", 5) == 0)
package = module_::import_("torch.utils.dlpack");
else if (strncmp(module_name, "jaxlib", 6) == 0)
package = module_::import_("jax.dlpack");
if (package.is_valid())
capsule = package.attr("to_dlpack")(handle(src));
} catch (...) {
capsule.reset();
}
if (!capsule.is_valid())
return nullptr;
}
}
void* mt; // can be versioned or unversioned
bool versioned = true;
if (mt_unique_ptr) {
mt = mt_unique_ptr.get();
} else {
// Extract the managed_dltensor{_versioned} pointer from the capsule.
mt = PyCapsule_GetPointer(capsule.ptr(), "dltensor_versioned");
if (!mt) {
PyErr_Clear();
versioned = false;
mt = PyCapsule_GetPointer(capsule.ptr(), "dltensor");
if (!mt) {
PyErr_Clear();
return nullptr;
}
}
}
dlpack::dltensor& t = (versioned)
? ((managed_dltensor_versioned *) mt)->dltensor
: ((managed_dltensor *) mt)->dltensor;
uint64_t flags = (versioned) ? ((managed_dltensor_versioned *) mt)->flags
: 0UL;
// Reject a read-only ndarray if a writable one is required, and
// reject an ndarray not on the required device.
if ((!c->ro && (flags & dlpack::flag_bitmask_read_only))
|| (c->device_type != 0 && t.device.device_type != c->device_type)) {
return nullptr;
}
// Check if the ndarray satisfies the remaining requirements.
bool has_dtype = c->dtype != dlpack::dtype(),
has_shape = c->ndim != -1,
has_order = c->order != '\0';
bool pass_dtype = true, pass_shape = true, pass_order = true;
if (has_dtype)
pass_dtype = t.dtype == c->dtype;
if (has_shape) {
pass_shape = t.ndim == c->ndim;
if (pass_shape) {
for (int32_t i = 0; i < c->ndim; ++i) {
if (c->shape[i] != -1 && t.shape[i] != c->shape[i]) {
pass_shape = false;
break;
}
}
}
}
int64_t size = 1;
for (int32_t i = 0; i < t.ndim; ++i)
size *= t.shape[i];
// Tolerate any strides if the array has 1 or fewer elements
if (pass_shape && has_order && size > 1) {
char order = c->order;
bool c_order = order == 'C' || order == 'A',
f_order = order == 'F' || order == 'A';
if (!t.strides) {
/* When the provided tensor does not have a valid
strides field, it uses the C ordering convention */
if (c_order) {
pass_order = true;
} else {
int nontrivial_dims = 0;
for (int i = 0; i < t.ndim; ++i)
nontrivial_dims += (int) (t.shape[i] > 1);
pass_order = nontrivial_dims <= 1;
}
} else {
if (c_order) {
for (int64_t i = t.ndim - 1, accum = 1; i >= 0; --i) {
c_order &= t.shape[i] == 1 || t.strides[i] == accum;
accum *= t.shape[i];
}
}
if (f_order) {
for (int64_t i = 0, accum = 1; i < t.ndim; ++i) {
f_order &= t.shape[i] == 1 || t.strides[i] == accum;
accum *= t.shape[i];
}
}
pass_order = c_order || f_order;
}
}
// Do not convert shape and do not convert complex numbers to non-complex.
convert &= pass_shape &
!(t.dtype.code == (uint8_t) dlpack::dtype_code::Complex
&& has_dtype
&& c->dtype.code != (uint8_t) dlpack::dtype_code::Complex);
// Support implicit conversion of dtype and order.
if (convert && (!pass_dtype || !pass_order) && !src_is_pycapsule) {
PyTypeObject *tp = Py_TYPE(src);
str module_name_o = borrow<str>(handle(tp).attr("__module__"));
const char *module_name = module_name_o.c_str();
char order = 'K'; // for NumPy. 'K' means 'keep'
if (c->order)
order = c->order;
dlpack::dtype dt = has_dtype ? c->dtype : t.dtype;
if (dt.lanes != 1)
return nullptr;
char dtype[11];
if (dt.code == (uint8_t) dlpack::dtype_code::Bool) {
std::strcpy(dtype, "bool");
} else {
const char *prefix = nullptr;
switch (dt.code) {
case (uint8_t) dlpack::dtype_code::Int:
prefix = "int";
break;
case (uint8_t) dlpack::dtype_code::UInt:
prefix = "uint";
break;
case (uint8_t) dlpack::dtype_code::Float:
prefix = "float";
break;
case (uint8_t) dlpack::dtype_code::Bfloat:
prefix = "bfloat";
break;
case (uint8_t) dlpack::dtype_code::Complex:
prefix = "complex";
break;
default:
return nullptr;
}
snprintf(dtype, sizeof(dtype), "%s%u", prefix, dt.bits);
}
object converted;
try {
if (strncmp(module_name, "numpy", 5) == 0
|| strncmp(module_name, "cupy", 4) == 0) {
converted = handle(src).attr("astype")(dtype, order);
} else if (strncmp(module_name, "torch", 5) == 0) {
module_ torch = module_::import_("torch");
converted = handle(src).attr("to")(torch.attr(dtype));
if (c->order == 'C')
converted = converted.attr("contiguous")();
} else if (strncmp(module_name, "tensorflow.", 11) == 0) {
module_ tensorflow = module_::import_("tensorflow");
converted = tensorflow.attr("cast")(handle(src), dtype);
} else if (strncmp(module_name, "jaxlib", 6) == 0) {
converted = handle(src).attr("astype")(dtype);
}
} catch (...) { converted.reset(); }
// Potentially try once again, recursively
if (converted.is_valid()) {
ndarray_handle *h =
ndarray_import(converted.ptr(), c, false, nullptr);
if (h && cleanup)
cleanup->append(converted.release().ptr());
return h;
}
}
if (!pass_dtype || !pass_shape || !pass_order)
return nullptr;
// Create a reference-counted wrapper
scoped_pymalloc<ndarray_handle> result;
if (versioned)
result->mt_versioned = (managed_dltensor_versioned *) mt;
else
result->mt_unversioned = (managed_dltensor *) mt;
result->refcount = 0;
result->owner = nullptr;
result->versioned = versioned;
result->call_deleter = true;
result->ro = c->ro;
if (src_is_pycapsule) {
result->self = nullptr;
} else {
result->self = src;
Py_INCREF(src);
}
// If ndim > 0, ensure that the strides member is initialized.
if (t.strides || t.ndim == 0) {
result->free_strides = false;
} else {
result->free_strides = true;
scoped_pymalloc<int64_t> strides((size_t) t.ndim);
for (int64_t i = t.ndim - 1, accum = 1; i >= 0; --i) {
strides[i] = accum;
accum *= t.shape[i];
}
t.strides = strides.release();
}
if (capsule.is_valid()) {
// Mark the dltensor capsule as used, i.e., "consumed".
const char* used_name = (versioned) ? "used_dltensor_versioned"
: "used_dltensor";
if (PyCapsule_SetName(capsule.ptr(), used_name) ||
PyCapsule_SetDestructor(capsule.ptr(), nullptr))
check(false, "ndarray_import(): could not mark capsule as used");
}
mt_unique_ptr.release();
return result.release();
}
dlpack::dltensor *ndarray_inc_ref(ndarray_handle *th) noexcept {
if (!th)
return nullptr;
++th->refcount;
return (th->versioned) ? &th->mt_versioned->dltensor
: &th->mt_unversioned->dltensor;
}
void ndarray_dec_ref(ndarray_handle *th) noexcept {
if (!th)
return;
size_t rc_value = th->refcount--;
if (rc_value == 0) {
check(false, "ndarray_dec_ref(): reference count became negative!");
} else if (rc_value == 1) {
gil_scoped_acquire guard;
Py_XDECREF(th->owner);
Py_XDECREF(th->self);
if (th->versioned) {
managed_dltensor_versioned *mt = th->mt_versioned;
if (th->free_strides) {
PyMem_Free(mt->dltensor.strides);
mt->dltensor.strides = nullptr;
}
if (th->call_deleter) {
if (mt->deleter)
mt->deleter(mt);
} else {
PyMem_Free(mt); // This also frees shape and size arrays.
}
} else {
managed_dltensor *mt = th->mt_unversioned;
if (th->free_strides) {
PyMem_Free(mt->dltensor.strides);
mt->dltensor.strides = nullptr;
}
assert(th->call_deleter);
if (mt->deleter)
mt->deleter(mt);
}
PyMem_Free(th);
}
}
ndarray_handle *ndarray_create(void *data, size_t ndim, const size_t *shape_in,
PyObject *owner, const int64_t *strides_in,
dlpack::dtype dtype, bool ro, int device_type,
int device_id, char order) {
/* DLPack mandates 256-byte alignment of the 'DLTensor::data' field,
but this requirement is generally ignored. Also, PyTorch has/had
a bug in ignoring byte_offset and assuming it's zero.
It would be wrong to split the 64-bit raw pointer into two pieces,
as disabled below, since the pointer dltensor.data must point to
allocated memory (i.e., memory that can be accessed).
A byte_offset can be used to support array slicing when data is an
opaque device pointer or handle, on which arithmetic is impossible.
However, this function is not slicing the data.
See also: https://github.com/data-apis/array-api/discussions/779 */
#if 0
uintptr_t data_uint = (uintptr_t) data;
data = (void *) (data_uint & ~uintptr_t{255}); // upper bits
uint64_t data_offset = data_uint & uintptr_t{255}; // lowest 8 bits
#else
constexpr uint64_t data_offset = 0UL;
#endif
if (device_type == 0)
device_type = device::cpu::value;
static_assert(alignof(managed_dltensor_versioned) >= alignof(int64_t));
scoped_pymalloc<managed_dltensor_versioned> mt(1, 2 * sizeof(int64_t)*ndim);
int64_t* shape = nullptr;
int64_t* strides = nullptr;
if (ndim > 0) {
shape = new ((void*) (mt.get() + 1)) int64_t[2 * ndim];
strides = shape + ndim;
}
for (size_t i = 0; i < ndim; ++i)
shape[i] = (int64_t) shape_in[i];
if (ndim > 0) {
int64_t prod = 1;
if (strides_in) {
for (size_t i = 0; i < ndim; ++i)
strides[i] = strides_in[i];
} else if (order == 'F') {
for (size_t i = 0; i < ndim; ++i) {
strides[i] = prod;
prod *= (int64_t) shape_in[i];
}
} else if (order == '\0' || order == 'A' || order == 'C') {
for (ssize_t i = (ssize_t) ndim - 1; i >= 0; --i) {
strides[i] = prod;
prod *= (int64_t) shape_in[i];
}
} else {
check(false, "ndarray_create(): unknown memory order requested!");
}
}
scoped_pymalloc<ndarray_handle> result;
mt->version = {dlpack::major_version, dlpack::minor_version};
mt->manager_ctx = result.get();
mt->deleter = [](managed_dltensor_versioned *self) {
ndarray_dec_ref((ndarray_handle *) self->manager_ctx);
};
mt->flags = (ro) ? dlpack::flag_bitmask_read_only : 0;
mt->dltensor.data = data;
mt->dltensor.device.device_type = (int32_t) device_type;
mt->dltensor.device.device_id = (int32_t) device_id;
mt->dltensor.ndim = (int32_t) ndim;
mt->dltensor.dtype = dtype;
mt->dltensor.shape = shape;
mt->dltensor.strides = strides;
mt->dltensor.byte_offset = data_offset;
result->mt_versioned = mt.release();
result->refcount = 0;
result->owner = owner;
result->self = nullptr;
result->versioned = true;
result->free_strides = false;
result->call_deleter = false;
result->ro = ro;
Py_XINCREF(owner);
return result.release();
}
PyObject *ndarray_export(ndarray_handle *th, int framework,
rv_policy policy, cleanup_list *cleanup) noexcept {
if (!th)
return none().release().ptr();
bool copy;
switch (policy) {
case rv_policy::reference_internal:
if (cleanup && cleanup->self() != th->owner && !th->self) {
if (th->owner) {
PyErr_SetString(PyExc_RuntimeError,
"nanobind::detail::ndarray_export(): "
"reference_internal policy cannot be "
"applied (ndarray already has an owner)");
return nullptr;
} else {
th->owner = cleanup->self();
Py_INCREF(th->owner);
}
}
[[fallthrough]];
case rv_policy::automatic:
case rv_policy::automatic_reference:
copy = th->owner == nullptr && th->self == nullptr;
break;
case rv_policy::copy:
case rv_policy::move:
copy = true;
break;
default:
copy = false;
break;
}
if (!copy) {
if (th->self) {
Py_INCREF(th->self);
return th->self;
} else if (policy == rv_policy::none) {
return nullptr;
}
}
object o;
if (copy && framework == no_framework::value && th->self) {
o = borrow(th->self);
} else if (framework == no_framework::value ||
framework == tensorflow::value) {
// Make a new capsule wrapping an unversioned managed_dltensor.
o = steal(th->make_capsule_unversioned());
} else {
// Make a Python object providing the buffer interface and having
// the two DLPack methods __dlpack__() and __dlpack_device__().
nb_ndarray *h = PyObject_New(nb_ndarray, nb_ndarray_tp());
if (!h)
return nullptr;
h->th = th;
ndarray_inc_ref(th);
o = steal((PyObject *) h);
}
if (framework == numpy::value) {
try {
PyObject* pkg_mod = module_import("numpy");
PyObject* args[] = {pkg_mod, o.ptr(),
(copy) ? Py_True : Py_False};
Py_ssize_t nargsf = 2 | PY_VECTORCALL_ARGUMENTS_OFFSET;
return PyObject_VectorcallMethod(
static_pyobjects[pyobj_name::array_str], args, nargsf,
static_pyobjects[pyobj_name::copy_tpl]);
} catch (const std::exception &e) {
PyErr_Format(PyExc_TypeError,
"could not export nanobind::ndarray: %s",
e.what());
return nullptr;
}
}
try {
const char* pkg_name;
switch (framework) {
case pytorch::value:
pkg_name = "torch.utils.dlpack";
break;
case tensorflow::value:
pkg_name = "tensorflow.experimental.dlpack";
break;
case jax::value:
pkg_name = "jax.dlpack";
break;
case cupy::value:
pkg_name = "cupy";
break;
case memview::value:
return PyMemoryView_FromObject(o.ptr());
default:
pkg_name = nullptr;
}
if (pkg_name) {
PyObject* pkg_mod = module_import(pkg_name);
PyObject* args[] = {pkg_mod, o.ptr()};
Py_ssize_t nargsf = 2 | PY_VECTORCALL_ARGUMENTS_OFFSET;
o = steal(PyObject_VectorcallMethod(
static_pyobjects[pyobj_name::from_dlpack_str],
args, nargsf, nullptr));
}
} catch (const std::exception &e) {
PyErr_Format(PyExc_TypeError,
"could not export nanobind::ndarray: %s",
e.what());
return nullptr;
}
if (copy) {
PyObject* copy_function_name = static_pyobjects[pyobj_name::copy_str];
if (framework == pytorch::value)
copy_function_name = static_pyobjects[pyobj_name::clone_str];
try {
o = o.attr(copy_function_name)();
} catch (std::exception &e) {
PyErr_Format(PyExc_RuntimeError,
"copying nanobind::ndarray failed: %s",
e.what());
return nullptr;
}
}
return o.release().ptr();
}
NAMESPACE_END(detail)
NAMESPACE_END(NB_NAMESPACE)
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
src/nb_static_property.cpp | C++ | #include "nb_internals.h"
NAMESPACE_BEGIN(NB_NAMESPACE)
NAMESPACE_BEGIN(detail)
/// `nb_static_property.__get__()`: Always pass the class instead of the instance.
static PyObject *nb_static_property_descr_get(PyObject *self, PyObject *, PyObject *cls) {
// Flag to avoid infinite recursion during static attribute assignment
bool static_property_disabled;
#if defined(NB_FREE_THREADED)
static_property_disabled = (bool) PyThread_tss_get(internals->nb_static_property_disabled);
#else
static_property_disabled = internals->nb_static_property_disabled;
#endif
if (!static_property_disabled) {
return NB_SLOT(PyProperty_Type, tp_descr_get)(self, cls, cls);
} else {
Py_INCREF(self);
return self;
}
}
/// `nb_static_property.__set__()`: Just like the above `__get__()`.
static int nb_static_property_descr_set(PyObject *self, PyObject *obj, PyObject *value) {
PyObject *cls = PyType_Check(obj) ? obj : (PyObject *) Py_TYPE(obj);
return NB_SLOT(PyProperty_Type, tp_descr_set)(self, cls, value);
}
PyTypeObject *nb_static_property_tp() noexcept {
nb_internals *internals_ = internals;
PyTypeObject *tp = internals_->nb_static_property.load_acquire();
if (NB_UNLIKELY(!tp)) {
lock_internals guard(internals_);
tp = internals_->nb_static_property.load_relaxed();
if (tp)
return tp;
PyMemberDef *members;
#if defined(Py_LIMITED_API)
members = (PyMemberDef *) PyType_GetSlot(&PyProperty_Type, Py_tp_members);
#else
members = PyProperty_Type.tp_members;
#endif
PyType_Slot slots[] = {
{ Py_tp_base, &PyProperty_Type },
{ Py_tp_descr_get, (void *) nb_static_property_descr_get },
{ Py_tp_members, members },
{ 0, nullptr }
};
PyType_Spec spec = {
/* .name = */ "nanobind.nb_static_property",
/* .basicsize = */ 0,
/* .itemsize = */ 0,
/* .flags = */ Py_TPFLAGS_DEFAULT,
/* .slots = */ slots
};
tp = new_type(internals_, &spec);
check(tp, "nb_static_property type creation failed!");
internals_->nb_static_property_descr_set = nb_static_property_descr_set;
internals_->nb_static_property.store_release(tp);
}
return tp;
}
NAMESPACE_END(detail)
NAMESPACE_END(NB_NAMESPACE)
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
src/nb_type.cpp | C++ | /*
src/nb_type.cpp: libnanobind functionality for binding classes
Copyright (c) 2022 Wenzel Jakob
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#include "nb_internals.h"
#include "nb_ft.h"
#if defined(_MSC_VER)
# pragma warning(disable: 4706) // assignment within conditional expression
#endif
#if !defined(Py_tp_vectorcall)
# define Py_tp_vectorcall 82
#endif
NAMESPACE_BEGIN(NB_NAMESPACE)
NAMESPACE_BEGIN(detail)
static PyObject **nb_dict_ptr(PyObject *self, PyTypeObject *tp) {
Py_ssize_t dictoffset = nb_type_data(tp)->dictoffset;
return dictoffset ? (PyObject **) ((uint8_t *) self + dictoffset) : nullptr;
}
static PyObject **nb_weaklist_ptr(PyObject *self, PyTypeObject *tp) {
Py_ssize_t weaklistoffset = nb_type_data(tp)->weaklistoffset;
return weaklistoffset ? (PyObject **) ((uint8_t *) self + weaklistoffset) : nullptr;
}
static PyGetSetDef inst_getset[] = {
{ "__dict__", PyObject_GenericGetDict, PyObject_GenericSetDict, nullptr, nullptr },
{ nullptr, nullptr, nullptr, nullptr, nullptr }
};
static int inst_clear(PyObject *self) {
PyTypeObject *tp = Py_TYPE(self);
PyObject **dict = nb_dict_ptr(self, tp);
if (dict)
Py_CLEAR(*dict);
return 0;
}
static int inst_traverse(PyObject *self, visitproc visit, void *arg) {
PyTypeObject *tp = Py_TYPE(self);
PyObject **dict = nb_dict_ptr(self, tp);
if (dict)
Py_VISIT(*dict);
Py_VISIT(tp);
return 0;
}
static int inst_init(PyObject *self, PyObject *, PyObject *) {
const type_data *t = nb_type_data(Py_TYPE(self));
PyErr_Format(PyExc_TypeError, "%s: no constructor defined!", t->name);
return -1;
}
/// Allocate memory for a nb_type instance with internal storage
PyObject *inst_new_int(PyTypeObject *tp, PyObject * /* args */,
PyObject * /*kwd */) {
bool gc = PyType_HasFeature(tp, Py_TPFLAGS_HAVE_GC);
nb_inst *self;
if (NB_LIKELY(!gc))
self = PyObject_New(nb_inst, tp);
else
self = (nb_inst *) PyType_GenericAlloc(tp, 0);
if (NB_LIKELY(self)) {
const type_data *t = nb_type_data(tp);
uint32_t align = (uint32_t) t->align;
bool intrusive = t->flags & (uint32_t) type_flags::intrusive_ptr;
uintptr_t payload = (uintptr_t) (self + 1);
if (NB_UNLIKELY(align > sizeof(void *)))
payload = (payload + align - 1) & ~(uintptr_t(align) - 1);
self->offset = (int32_t) ((intptr_t) payload - (intptr_t) self);
self->direct = 1;
self->internal = 1;
self->state = nb_inst::state_uninitialized;
self->destruct = 0;
self->cpp_delete = 0;
self->clear_keep_alive = 0;
self->intrusive = intrusive;
self->unused = 0;
// Make the object compatible with nb_try_inc_ref (free-threaded builds only)
nb_enable_try_inc_ref((PyObject *) self);
// Update hash table that maps from C++ to Python instance
nb_shard &shard = internals->shard((void *) payload);
lock_shard guard(shard);
auto [it, success] = shard.inst_c2p.try_emplace((void *) payload, self);
check(success, "nanobind::detail::inst_new_int(): unexpected collision!");
}
return (PyObject *) self;
}
/// Allocate memory for a nb_type instance with external storage. In contrast to
/// 'inst_new_int()', this does not yet register the instance in the internal
/// data structures. The function 'inst_register()' must be used to do so.
PyObject *inst_new_ext(PyTypeObject *tp, void *value) {
bool gc = PyType_HasFeature(tp, Py_TPFLAGS_HAVE_GC);
nb_inst *self;
if (NB_LIKELY(!gc)) {
self = (nb_inst *) PyObject_Malloc(sizeof(nb_inst));
if (!self)
return PyErr_NoMemory();
PyObject_Init((PyObject *) self, tp);
} else {
self = (nb_inst *) PyType_GenericAlloc(tp, 0);
if (!self)
return nullptr;
}
// Compute offset to instance value
// Use uint64_t because subtracting tagged pointers (e.g., with
// HardwareAddressSanitizer) may overflow, which is undefined behavior for
// signed integers.
int32_t offset = (int32_t) ((uintptr_t) value - (uintptr_t) self);
bool direct = (intptr_t) self + offset == (intptr_t) value;
if (NB_UNLIKELY(!direct)) {
// Location is not representable as signed 32 bit offset
if (!gc) {
/// Allocate memory for an extra pointer
nb_inst *self_2 =
(nb_inst *) PyObject_Realloc(self, sizeof(nb_inst) + sizeof(void *));
if (NB_UNLIKELY(!self_2)) {
PyObject_Free(self);
return PyErr_NoMemory();
}
self = self_2;
}
*(void **) (self + 1) = value;
offset = (int32_t) sizeof(nb_inst);
}
const type_data *t = nb_type_data(tp);
bool intrusive = t->flags & (uint32_t) type_flags::intrusive_ptr;
self->offset = offset;
self->direct = direct;
self->internal = 0;
self->state = nb_inst::state_uninitialized;
self->destruct = 0;
self->cpp_delete = 0;
self->clear_keep_alive = 0;
self->intrusive = intrusive;
self->unused = 0;
// Make the object compatible with nb_try_inc_ref (free-threaded builds only)
nb_enable_try_inc_ref((PyObject *) self);
return (PyObject *) self;
}
/// Register the object constructed by 'inst_new_ext()' in the internal data structures
static void inst_register(PyObject *inst, void *value) noexcept {
nb_shard &shard = internals->shard(value);
lock_shard guard(shard);
// Update hash table that maps from C++ to Python instance
auto [it, success] = shard.inst_c2p.try_emplace(value, inst);
if (NB_UNLIKELY(!success)) {
void *entry = it->second;
// Potentially convert the map value into linked list format
if (!nb_is_seq(entry)) {
nb_inst_seq *first = (nb_inst_seq *) PyMem_Malloc(sizeof(nb_inst_seq));
check(first, "nanobind::detail::inst_new_ext(): list element "
"allocation failed!");
first->inst = (PyObject *) entry;
first->next = nullptr;
entry = it.value() = nb_mark_seq(first);
}
nb_inst_seq *seq = nb_get_seq(entry);
while (true) {
// The following should never happen
check(inst != seq->inst, "nanobind::detail::inst_new_ext(): duplicate instance!");
if (!seq->next)
break;
seq = seq->next;
}
nb_inst_seq *next = (nb_inst_seq *) PyMem_Malloc(sizeof(nb_inst_seq));
check(next,
"nanobind::detail::inst_new_ext(): list element allocation failed!");
next->inst = (PyObject *) inst;
next->next = nullptr;
seq->next = next;
}
}
static void inst_dealloc(PyObject *self) {
PyTypeObject *tp = Py_TYPE(self);
const type_data *t = nb_type_data(tp);
bool gc = PyType_HasFeature(tp, Py_TPFLAGS_HAVE_GC);
if (NB_UNLIKELY(gc)) {
PyObject_GC_UnTrack(self);
if (t->flags & (uint32_t) type_flags::has_dynamic_attr) {
PyObject **dict = nb_dict_ptr(self, tp);
if (dict)
Py_CLEAR(*dict);
}
}
if (t->flags & (uint32_t) type_flags::is_weak_referenceable &&
nb_weaklist_ptr(self, tp) != nullptr) {
#if defined(PYPY_VERSION)
PyObject **weaklist = nb_weaklist_ptr(self, tp);
if (weaklist)
Py_CLEAR(*weaklist);
#else
PyObject_ClearWeakRefs(self);
#endif
}
nb_inst *inst = (nb_inst *) self;
void *p = inst_ptr(inst);
if (inst->destruct) {
check(t->flags & (uint32_t) type_flags::is_destructible,
"nanobind::detail::inst_dealloc(\"%s\"): attempted to call "
"the destructor of a non-destructible type!", t->name);
if (t->flags & (uint32_t) type_flags::has_destruct)
t->destruct(p);
}
if (inst->cpp_delete) {
if (NB_LIKELY(t->align <= (uint32_t) __STDCPP_DEFAULT_NEW_ALIGNMENT__))
operator delete(p);
else
operator delete(p, std::align_val_t(t->align));
}
nb_weakref_seq *wr_seq = nullptr;
{
// Enter critical section of shard
nb_shard &shard = internals->shard(p);
lock_shard guard(shard);
if (NB_UNLIKELY(inst->clear_keep_alive)) {
size_t self_hash = ptr_hash()(self);
nb_ptr_map &keep_alive = shard.keep_alive;
nb_ptr_map::iterator it = keep_alive.find(self, self_hash);
check(it != keep_alive.end(),
"nanobind::detail::inst_dealloc(\"%s\"): inconsistent "
"keep_alive information", t->name);
wr_seq = (nb_weakref_seq *) it->second;
keep_alive.erase_fast(it);
}
size_t p_hash = ptr_hash()(p);
// Update hash table that maps from C++ to Python instance
nb_ptr_map &inst_c2p = shard.inst_c2p;
nb_ptr_map::iterator it = inst_c2p.find(p, p_hash);
bool found = false;
if (NB_LIKELY(it != inst_c2p.end())) {
void *entry = it->second;
if (NB_LIKELY(entry == inst)) {
found = true;
inst_c2p.erase_fast(it);
} else if (nb_is_seq(entry)) {
// Multiple objects are associated with this address. Find the right one!
nb_inst_seq *seq = nb_get_seq(entry),
*pred = nullptr;
do {
if ((nb_inst *) seq->inst == inst) {
found = true;
if (pred) {
pred->next = seq->next;
} else {
if (seq->next)
it.value() = nb_mark_seq(seq->next);
else
inst_c2p.erase_fast(it);
}
PyMem_Free(seq);
break;
}
pred = seq;
seq = seq->next;
} while (seq);
}
}
check(found,
"nanobind::detail::inst_dealloc(\"%s\"): attempted to delete an "
"unknown instance (%p)!", t->name, p);
}
while (wr_seq) {
nb_weakref_seq *c = wr_seq;
wr_seq = c->next;
if (c->callback)
c->callback(c->payload);
else
Py_DECREF((PyObject *) c->payload);
PyMem_Free(c);
}
if (NB_UNLIKELY(gc))
PyObject_GC_Del(self);
else
PyObject_Free(self);
Py_DECREF(tp);
}
type_data *nb_type_c2p(nb_internals *internals_,
const std::type_info *type) {
#if defined(NB_FREE_THREADED)
thread_local nb_type_map_fast type_c2p_fast;
#else
nb_type_map_fast &type_c2p_fast = internals_->type_c2p_fast;
#endif
nb_type_map_fast::iterator it_fast = type_c2p_fast.find((void *) type);
if (it_fast != type_c2p_fast.end())
return (type_data *) it_fast->second;
lock_internals guard(internals_);
nb_type_map_slow &type_c2p_slow = internals_->type_c2p_slow;
nb_type_map_slow::iterator it_slow = type_c2p_slow.find(type);
if (it_slow != type_c2p_slow.end()) {
type_data *d = it_slow->second;
#if !defined(NB_FREE_THREADED)
// Maintain a linked list to clean up 'type_c2p_fast' when the type
// expires (see nb_type_unregister). In free-threaded mode, we leak
// these entries until the thread destructs.
nb_alias_chain *chain =
(nb_alias_chain *) PyMem_Malloc(sizeof(nb_alias_chain));
check(chain, "Could not allocate nb_alias_chain entry!");
chain->next = d->alias_chain;
chain->value = type;
d->alias_chain = chain;
#endif
type_c2p_fast[(void *) type] = d;
return d;
}
return nullptr;
}
void nb_type_unregister(type_data *t) noexcept {
nb_internals *internals_ = internals;
nb_type_map_slow &type_c2p_slow = internals_->type_c2p_slow;
lock_internals guard(internals_);
size_t n_del_slow = type_c2p_slow.erase(t->type);
#if defined(NB_FREE_THREADED)
// In free-threaded mode, stale type information remains in the
// 'type_c2p_fast' TLS. This data structure is eventually deallocated
// when the thread terminates.
//
// In principle, this is dangerous because the user could delete a type
// binding from a module at runtime, causing the associated
// Python type object to be freed. If a function then attempts to return
// a value with such a de-registered type, nanobind should raise an
// exception, which requires knowing that the entry in 'type_c2p_fast'
// has become invalid in the meantime.
//
// Right now, this problem is avoided because we immortalize type objects in
// ``nb_type_new()`` and ``enum_create()``. However, we may not always
// want to stick with immortalization, which is just a workaround.
//
// In the future, a global version counter modified with acquire/release
// semantics (see https://github.com/wjakob/nanobind/pull/695#discussion_r1761600010)
// might prove to be a similarly efficient but more general solution.
bool fail = n_del_slow != 1;
#else
nb_type_map_fast &type_c2p_fast = internals_->type_c2p_fast;
size_t n_del_fast = type_c2p_fast.erase((void *) t->type);
bool fail = n_del_fast != 1 || n_del_slow != 1;
if (!fail) {
nb_alias_chain *cur = t->alias_chain;
while (cur) {
nb_alias_chain *next = cur->next;
n_del_fast = type_c2p_fast.erase((void *) cur->value);
if (n_del_fast != 1) {
fail = true;
break;
}
PyMem_Free(cur);
cur = next;
}
}
#endif
check(!fail,
"nanobind::detail::nb_type_unregister(\"%s\"): could not "
"find type!", t->name);
}
static void nb_type_dealloc(PyObject *o) {
type_data *t = nb_type_data((PyTypeObject *) o);
if (t->type && (t->flags & (uint32_t) type_flags::is_python_type) == 0)
nb_type_unregister(t);
if (t->flags & (uint32_t) type_flags::has_implicit_conversions) {
PyMem_Free(t->implicit.cpp);
PyMem_Free(t->implicit.py);
}
bool initialized = t->name != nullptr;
free((char *) t->name);
NB_SLOT(PyType_Type, tp_dealloc)(o);
if (initialized)
internals_dec_ref();
}
/// Called when a C++ type is extended from within Python
static int nb_type_init(PyObject *self, PyObject *args, PyObject *kwds) {
if (NB_TUPLE_GET_SIZE(args) != 3) {
PyErr_SetString(PyExc_RuntimeError,
"nb_type_init(): invalid number of arguments!");
return -1;
}
PyObject *bases = NB_TUPLE_GET_ITEM(args, 1);
if (!PyTuple_CheckExact(bases) || NB_TUPLE_GET_SIZE(bases) != 1) {
PyErr_SetString(PyExc_RuntimeError,
"nb_type_init(): invalid number of bases!");
return -1;
}
PyObject *base = NB_TUPLE_GET_ITEM(bases, 0);
if (!PyType_Check(base)) {
PyErr_SetString(PyExc_RuntimeError, "nb_type_init(): expected a base type object!");
return -1;
}
type_data *t_b = nb_type_data((PyTypeObject *) base);
if (t_b->flags & (uint32_t) type_flags::is_final) {
PyErr_Format(PyExc_TypeError, "The type '%s' prohibits subclassing!",
t_b->name);
return -1;
}
int rv = NB_SLOT(PyType_Type, tp_init)(self, args, kwds);
if (rv)
return rv;
type_data *t = nb_type_data((PyTypeObject *) self);
*t = *t_b;
t->flags |= (uint32_t) type_flags::is_python_type;
t->flags &= ~((uint32_t) type_flags::has_implicit_conversions);
PyObject *name = nb_type_name(self);
t->name = strdup_check(PyUnicode_AsUTF8AndSize(name, nullptr));
Py_DECREF(name);
t->type_py = (PyTypeObject *) self;
t->implicit.cpp = nullptr;
t->implicit.py = nullptr;
t->alias_chain = nullptr;
#if defined(Py_LIMITED_API)
t->vectorcall = nullptr;
#else
((PyTypeObject *) self)->tp_vectorcall = nullptr;
#endif
internals_inc_ref();
return 0;
}
/// Special case to handle 'Class.property = value' assignments
int nb_type_setattro(PyObject* obj, PyObject* name, PyObject* value) {
nb_internals *int_p = internals;
// Set a flag to avoid infinite recursion during static attribute assignment
#if defined(NB_FREE_THREADED)
PyThread_tss_set(int_p->nb_static_property_disabled, (void *) 1);
#else
int_p->nb_static_property_disabled = true;
#endif
PyObject *cur = PyObject_GetAttr(obj, name);
#if defined(NB_FREE_THREADED)
PyThread_tss_set(int_p->nb_static_property_disabled, (void *) 0);
#else
int_p->nb_static_property_disabled = false;
#endif
if (cur) {
PyTypeObject *tp = int_p->nb_static_property.load_acquire();
// For type.static_prop = value, call the setter.
// For type.static_prop = another_static_prop, replace the descriptor.
if (Py_TYPE(cur) == tp && Py_TYPE(value) != tp) {
int rv = int_p->nb_static_property_descr_set(cur, obj, value);
Py_DECREF(cur);
return rv;
}
Py_DECREF(cur);
const char *cname = PyUnicode_AsUTF8AndSize(name, nullptr);
if (!cname) {
PyErr_Clear(); // probably a non-string attribute name
} else if (cname[0] == '@') {
/* Prevent type attributes starting with an `@` sign from being
rebound or deleted. This is useful to safely stash owning
references. The ``nb::enum_<>`` class, e.g., uses this to ensure
indirect ownership of a borrowed reference in the supplemental
type data. */
PyErr_Format(PyExc_AttributeError,
"internal nanobind attribute '%s' cannot be "
"reassigned or deleted.", cname);
return -1;
}
} else {
PyErr_Clear();
}
return NB_SLOT(PyType_Type, tp_setattro)(obj, name, value);
}
#if NB_TYPE_FROM_METACLASS_IMPL || NB_TYPE_GET_SLOT_IMPL
struct nb_slot {
#if NB_TYPE_GET_SLOT_IMPL
uint8_t indirect_1;
uint8_t indirect_2;
#endif
uint8_t direct;
};
template <size_t I1, size_t I2, size_t Offset1, size_t Offset2> nb_slot constexpr Ei() {
// Compile-time check to ensure that indices and alignment match our expectation
static_assert(I1 == I2 && (Offset1 % sizeof(void *)) == 0 && (Offset2 % sizeof(void *)) == 0,
"nb_slot construction: internal error");
#if NB_TYPE_GET_SLOT_IMPL
size_t o = 0;
switch (Offset1) {
case offsetof(PyHeapTypeObject, as_async): o = offsetof(PyTypeObject, tp_as_async); break;
case offsetof(PyHeapTypeObject, as_number): o = offsetof(PyTypeObject, tp_as_number); break;
case offsetof(PyHeapTypeObject, as_mapping): o = offsetof(PyTypeObject, tp_as_mapping); break;
case offsetof(PyHeapTypeObject, as_sequence): o = offsetof(PyTypeObject, tp_as_sequence); break;
case offsetof(PyHeapTypeObject, as_buffer): o = offsetof(PyTypeObject, tp_as_buffer); break;
default: break;
}
return {
(uint8_t) (o / sizeof(void *)),
(uint8_t) ((Offset2 - Offset1) / sizeof(void *)),
(uint8_t) (Offset2 / sizeof(void *)),
};
#else
return { (uint8_t) (Offset2 / sizeof(void *)) };
#endif
}
// Precomputed mapping from type slot ID to an entry in the data structure
#define E(i1, p1, p2, name) \
Ei<i1, Py_##p2##_##name, \
offsetof(PyHeapTypeObject, p1), \
offsetof(PyHeapTypeObject, p1.p2##_##name)>()
static constexpr nb_slot type_slots[] {
E(1, as_buffer, bf, getbuffer),
E(2, as_buffer, bf, releasebuffer),
E(3, as_mapping, mp, ass_subscript),
E(4, as_mapping, mp, length),
E(5, as_mapping, mp, subscript),
E(6, as_number, nb, absolute),
E(7, as_number, nb, add),
E(8, as_number, nb, and),
E(9, as_number, nb, bool),
E(10, as_number, nb, divmod),
E(11, as_number, nb, float),
E(12, as_number, nb, floor_divide),
E(13, as_number, nb, index),
E(14, as_number, nb, inplace_add),
E(15, as_number, nb, inplace_and),
E(16, as_number, nb, inplace_floor_divide),
E(17, as_number, nb, inplace_lshift),
E(18, as_number, nb, inplace_multiply),
E(19, as_number, nb, inplace_or),
E(20, as_number, nb, inplace_power),
E(21, as_number, nb, inplace_remainder),
E(22, as_number, nb, inplace_rshift),
E(23, as_number, nb, inplace_subtract),
E(24, as_number, nb, inplace_true_divide),
E(25, as_number, nb, inplace_xor),
E(26, as_number, nb, int),
E(27, as_number, nb, invert),
E(28, as_number, nb, lshift),
E(29, as_number, nb, multiply),
E(30, as_number, nb, negative),
E(31, as_number, nb, or),
E(32, as_number, nb, positive),
E(33, as_number, nb, power),
E(34, as_number, nb, remainder),
E(35, as_number, nb, rshift),
E(36, as_number, nb, subtract),
E(37, as_number, nb, true_divide),
E(38, as_number, nb, xor),
E(39, as_sequence, sq, ass_item),
E(40, as_sequence, sq, concat),
E(41, as_sequence, sq, contains),
E(42, as_sequence, sq, inplace_concat),
E(43, as_sequence, sq, inplace_repeat),
E(44, as_sequence, sq, item),
E(45, as_sequence, sq, length),
E(46, as_sequence, sq, repeat),
E(47, ht_type, tp, alloc),
E(48, ht_type, tp, base),
E(49, ht_type, tp, bases),
E(50, ht_type, tp, call),
E(51, ht_type, tp, clear),
E(52, ht_type, tp, dealloc),
E(53, ht_type, tp, del),
E(54, ht_type, tp, descr_get),
E(55, ht_type, tp, descr_set),
E(56, ht_type, tp, doc),
E(57, ht_type, tp, getattr),
E(58, ht_type, tp, getattro),
E(59, ht_type, tp, hash),
E(60, ht_type, tp, init),
E(61, ht_type, tp, is_gc),
E(62, ht_type, tp, iter),
E(63, ht_type, tp, iternext),
E(64, ht_type, tp, methods),
E(65, ht_type, tp, new),
E(66, ht_type, tp, repr),
E(67, ht_type, tp, richcompare),
E(68, ht_type, tp, setattr),
E(69, ht_type, tp, setattro),
E(70, ht_type, tp, str),
E(71, ht_type, tp, traverse),
E(72, ht_type, tp, members),
E(73, ht_type, tp, getset),
E(74, ht_type, tp, free),
E(75, as_number, nb, matrix_multiply),
E(76, as_number, nb, inplace_matrix_multiply),
E(77, as_async, am, await),
E(78, as_async, am, aiter),
E(79, as_async, am, anext),
E(80, ht_type, tp, finalize),
#if PY_VERSION_HEX >= 0x030A0000 && !defined(PYPY_VERSION)
E(81, as_async, am, send),
#endif
};
#if NB_TYPE_GET_SLOT_IMPL
void *type_get_slot(PyTypeObject *t, int slot_id) {
nb_slot slot = type_slots[slot_id - 1];
if (PyType_HasFeature(t, Py_TPFLAGS_HEAPTYPE)) {
return ((void **) t)[slot.direct];
} else {
if (slot.indirect_1)
return ((void ***) t)[slot.indirect_1][slot.indirect_2];
else
return ((void **) t)[slot.indirect_2];
}
}
#endif
#endif
static PyObject *nb_type_from_metaclass(PyTypeObject *meta, PyObject *mod,
PyType_Spec *spec) {
#if NB_TYPE_FROM_METACLASS_IMPL == 0
// Life is good, PyType_FromMetaclass() is available
return PyType_FromMetaclass(meta, mod, spec, nullptr);
#else
/* The fallback code below emulates PyType_FromMetaclass() on Python prior
to version 3.12. It requires access to CPython-internal structures, which
is why nanobind can only target the stable ABI on version 3.12+. */
const char *name = strrchr(spec->name, '.');
PyObject *modname_o = nullptr;
if (name) {
modname_o = PyUnicode_FromStringAndSize(spec->name, name - spec->name);
if (!modname_o)
return nullptr;
name++;
} else {
name = spec->name;
}
PyObject *name_o = PyUnicode_InternFromString(name);
if (!name_o) {
Py_XDECREF(modname_o);
return nullptr;
}
const char *name_cstr = PyUnicode_AsUTF8AndSize(name_o, nullptr);
if (!name_cstr) {
Py_XDECREF(modname_o);
Py_DECREF(name_o);
return nullptr;
}
PyHeapTypeObject *ht = (PyHeapTypeObject *) PyType_GenericAlloc(meta, 0);
if (!ht) {
Py_XDECREF(modname_o);
Py_DECREF(name_o);
return nullptr;
}
ht->ht_name = name_o;
ht->ht_qualname = name_o;
Py_INCREF(name_o);
if (mod) {
Py_INCREF(mod);
ht->ht_module = mod;
}
PyTypeObject *tp = &ht->ht_type;
tp->tp_name = name_cstr;
tp->tp_basicsize = spec->basicsize;
tp->tp_itemsize = spec->itemsize;
tp->tp_flags = spec->flags | Py_TPFLAGS_HEAPTYPE;
tp->tp_as_async = &ht->as_async;
tp->tp_as_number = &ht->as_number;
tp->tp_as_sequence = &ht->as_sequence;
tp->tp_as_mapping = &ht->as_mapping;
tp->tp_as_buffer = &ht->as_buffer;
PyType_Slot *ts = spec->slots;
bool fail = false;
while (true) {
int slot = ts->slot;
if (slot == 0) {
break;
} else if (slot * sizeof(nb_slot) < (int) sizeof(type_slots)) {
*(((void **) ht) + type_slots[slot - 1].direct) = ts->pfunc;
} else {
PyErr_Format(PyExc_RuntimeError,
"nb_type_from_metaclass(): unhandled slot %i", slot);
fail = true;
break;
}
ts++;
}
// Bring type object into a safe state (before error handling)
const PyMemberDef *members = tp->tp_members;
const char *doc = tp->tp_doc;
tp->tp_members = nullptr;
tp->tp_doc = nullptr;
Py_XINCREF(tp->tp_base);
if (doc && !fail) {
size_t size = strlen(doc) + 1;
/// This code path is only used for Python 3.12, where
/// PyObject_Malloc is the right allocation routine for tp_doc
char *target = (char *) PyObject_Malloc(size);
if (!target) {
PyErr_NoMemory();
fail = true;
} else {
memcpy(target, doc, size);
tp->tp_doc = target;
}
}
if (members && !fail) {
while (members->name) {
if (members->type == T_PYSSIZET && members->flags == READONLY) {
if (strcmp(members->name, "__dictoffset__") == 0)
tp->tp_dictoffset = members->offset;
else if (strcmp(members->name, "__weaklistoffset__") == 0)
tp->tp_weaklistoffset = members->offset;
else if (strcmp(members->name, "__vectorcalloffset__") == 0)
tp->tp_vectorcall_offset = members->offset;
else
fail = true;
} else {
fail = true;
}
if (fail) {
PyErr_Format(
PyExc_RuntimeError,
"nb_type_from_metaclass(): unhandled tp_members entry!");
break;
}
members++;
}
}
if (modname_o && !fail) {
tp->tp_dict = PyDict_New();
if (!tp->tp_dict ||
PyDict_SetItemString(tp->tp_dict, "__module__", modname_o) < 0)
fail = true;
}
Py_XDECREF(modname_o);
if (fail || PyType_Ready(tp) != 0) {
Py_DECREF(tp);
return nullptr;
}
return (PyObject *) tp;
#endif
}
extern int nb_type_setattro(PyObject* obj, PyObject* name, PyObject* value);
// Implements the vector call protocol directly on type objects to construct
// instances more efficiently.
static PyObject *nb_type_vectorcall(PyObject *self, PyObject *const *args_in,
size_t nargsf,
PyObject *kwargs_in) noexcept {
PyTypeObject *tp = (PyTypeObject *) self;
type_data *td = nb_type_data(tp);
nb_func *func = (nb_func *) td->init;
bool is_init = (td->flags & (uint32_t) type_flags::has_new) == 0;
Py_ssize_t nargs = PyVectorcall_NARGS(nargsf);
if (NB_UNLIKELY(!func)) {
PyErr_Format(PyExc_TypeError, "%s: no constructor defined!", td->name);
return nullptr;
}
if (NB_LIKELY(is_init)) {
self = inst_new_int(tp, nullptr, nullptr);
if (!self)
return nullptr;
} else if (nargs == 0 && !kwargs_in &&
!(td->flags & (uint32_t) type_flags::has_nullary_new)) {
// When the bindings define a custom __new__ operator, nanobind always
// provides a no-argument dummy __new__ constructor to handle unpickling
// via __setstate__. This is an implementation detail that should not be
// exposed. Therefore, only allow argument-less calls if there is an
// actual __new__ overload with a compatible signature. This is
// detected in nb_func.cpp based on whether any __init__ overload can
// accept no arguments.
return func->vectorcall((PyObject *) func, nullptr, 0, nullptr);
}
const size_t buf_size = 5;
PyObject **args, *buf[buf_size], *temp = nullptr;
bool alloc = false;
if (NB_LIKELY(nargsf & PY_VECTORCALL_ARGUMENTS_OFFSET)) {
args = (PyObject **) (args_in - 1);
temp = args[0];
} else {
size_t size = nargs + 1;
if (kwargs_in)
size += NB_TUPLE_GET_SIZE(kwargs_in);
if (size < buf_size) {
args = buf;
} else {
args = (PyObject **) PyMem_Malloc(size * sizeof(PyObject *));
if (!args) {
if (is_init)
Py_DECREF(self);
return PyErr_NoMemory();
}
alloc = true;
}
memcpy(args + 1, args_in, sizeof(PyObject *) * (size - 1));
}
args[0] = self;
PyObject *rv =
func->vectorcall((PyObject *) func, args, nargs + 1, kwargs_in);
args[0] = temp;
if (NB_UNLIKELY(alloc))
PyMem_Free(args);
if (NB_LIKELY(is_init)) {
if (!rv) {
Py_DECREF(self);
return nullptr;
}
// __init__ constructor: 'rv' is None
Py_DECREF(rv);
return self;
} else {
// __new__ constructor
return rv;
}
}
static PyTypeObject *nb_type_tp(size_t supplement) noexcept {
object key = steal(PyLong_FromSize_t(supplement));
nb_internals *internals_ = internals;
PyTypeObject *tp =
(PyTypeObject *) dict_get_item_ref_or_fail(internals_->nb_type_dict, key.ptr());
if (NB_UNLIKELY(!tp)) {
// Retry in critical section to avoid races that create the same nb_type
lock_internals guard(internals_);
tp = (PyTypeObject *) dict_get_item_ref_or_fail(internals_->nb_type_dict, key.ptr());
if (tp)
return tp;
#if PY_VERSION_HEX >= 0x030C0000
int basicsize = -(int) (sizeof(type_data) + supplement),
itemsize = 0;
#else
int basicsize = (int) (PyType_Type.tp_basicsize + (sizeof(type_data) + supplement)),
itemsize = (int) PyType_Type.tp_itemsize;
#endif
char name[17 + 20 + 1];
snprintf(name, sizeof(name), "nanobind.nb_type_%zu", supplement);
PyType_Slot slots[] = {
{ Py_tp_base, &PyType_Type },
{ Py_tp_dealloc, (void *) nb_type_dealloc },
{ Py_tp_setattro, (void *) nb_type_setattro },
{ Py_tp_init, (void *) nb_type_init },
{ 0, nullptr },
{ 0, nullptr }
};
PyType_Spec spec = {
/* .name = */ name,
/* .basicsize = */ basicsize,
/* .itemsize = */ itemsize,
/* .flags = */ Py_TPFLAGS_DEFAULT | NB_TPFLAGS_IMMUTABLETYPE,
/* .slots = */ slots
};
#if defined(Py_LIMITED_API)
PyMemberDef members[] = {
{ "__vectorcalloffset__", Py_T_PYSSIZET, 0, Py_READONLY, nullptr },
{ nullptr, 0, 0, 0, nullptr }
};
// Workaround because __vectorcalloffset__ does not support Py_RELATIVE_OFFSET
members[0].offset = internals_->type_data_offset + offsetof(type_data, vectorcall);
if (NB_DYNAMIC_VERSION < 0x030E0000) {
slots[4] = { Py_tp_members, (void *) members };
spec.flags |= Py_TPFLAGS_HAVE_VECTORCALL;
}
#endif
tp = (PyTypeObject *) nb_type_from_metaclass(
internals_->nb_meta, internals_->nb_module, &spec);
make_immortal((PyObject *) tp);
int rv = 1;
if (tp)
rv = PyDict_SetItem(internals_->nb_type_dict, key.ptr(), (PyObject *) tp);
check(rv == 0, "nb_type type creation failed!");
}
return tp;
}
// This helper function extracts the function/class name from a custom signature attribute
NB_NOINLINE char *extract_name(const char *cmd, const char *prefix, const char *s) {
(void) cmd;
// Move to the last line
const char *p = strrchr(s, '\n');
p = p ? (p + 1) : s;
// Check that the last line starts with the right prefix
size_t prefix_len = strlen(prefix);
check(strncmp(p, prefix, prefix_len) == 0,
"%s(): last line of custom signature \"%s\" must start with \"%s\"!",
cmd, s, prefix);
p += prefix_len;
// Find the opening parenthesis or bracket
const char *p2 = strchr(p, '(');
const char *p3 = strchr(p, '[');
if (p2 == nullptr)
p2 = p3;
else if (p3 != nullptr)
p2 = p2 < p3 ? p2 : p3;
check(p2 != nullptr,
"%s(): last line of custom signature \"%s\" must contain an opening "
"parenthesis (\"(\") or bracket (\"[\")!", cmd, s);
// A few sanity checks
size_t len = strlen(p);
char last = p[len ? (len - 1) : 0];
check(last != ':' && last != ' ',
"%s(): custom signature \"%s\" should not end with \":\" or \" \"!", cmd, s);
check((p2 == p || (p[0] != ' ' && p2[-1] != ' ')),
"%s(): custom signature \"%s\" contains leading/trailing space around name!", cmd, s);
size_t size = p2 - p;
char *result = (char *) malloc_check(size + 1);
memcpy(result, p, size);
result[size] = '\0';
return result;
}
static PyMethodDef class_getitem_method[] = {
{ "__class_getitem__", Py_GenericAlias, METH_O | METH_CLASS, nullptr },
{ nullptr }
};
/// Called when a C++ type is bound via nb::class_<>
PyObject *nb_type_new(const type_init_data *t) noexcept {
bool has_doc = t->flags & (uint32_t) type_init_flags::has_doc,
has_base = t->flags & (uint32_t) type_init_flags::has_base,
has_base_py = t->flags & (uint32_t) type_init_flags::has_base_py,
has_type_slots = t->flags & (uint32_t) type_init_flags::has_type_slots,
has_supplement = t->flags & (uint32_t) type_init_flags::has_supplement,
has_dynamic_attr = t->flags & (uint32_t) type_flags::has_dynamic_attr,
is_weak_referenceable = t->flags & (uint32_t) type_flags::is_weak_referenceable,
is_generic = t->flags & (uint32_t) type_flags::is_generic,
intrusive_ptr = t->flags & (uint32_t) type_flags::intrusive_ptr,
has_shared_from_this = t->flags & (uint32_t) type_flags::has_shared_from_this,
has_signature = t->flags & (uint32_t) type_flags::has_signature;
const char *t_name = t->name;
if (has_signature)
t_name =
extract_name("nanobind::detail::nb_type_new", "class ", t->name);
str name = steal<str>(PyUnicode_InternFromString(t_name)),
qualname = name;
object modname;
PyObject *mod = nullptr;
// Update hash table that maps from std::type_info to Python type
nb_type_map_slow::iterator it;
bool success;
nb_internals *internals_ = internals;
{
lock_internals guard(internals_);
std::tie(it, success) = internals_->type_c2p_slow.try_emplace(t->type, nullptr);
if (!success) {
PyErr_WarnFormat(PyExc_RuntimeWarning, 1,
"nanobind: type '%s' was already registered!\n",
t_name);
PyObject *tp = (PyObject *) it->second->type_py;
Py_INCREF(tp);
if (has_signature)
free((char *) t_name);
return tp;
}
}
if (t->scope != nullptr) {
if (PyModule_Check(t->scope)) {
mod = t->scope;
modname = getattr(t->scope, "__name__", handle());
} else {
modname = getattr(t->scope, "__module__", handle());
object scope_qualname = getattr(t->scope, "__qualname__", handle());
if (scope_qualname.is_valid())
qualname = steal<str>(
PyUnicode_FromFormat("%U.%U", scope_qualname.ptr(), name.ptr()));
}
}
if (modname.is_valid())
name = steal<str>(
PyUnicode_FromFormat("%U.%U", modname.ptr(), name.ptr()));
constexpr size_t ptr_size = sizeof(void *);
size_t basicsize = sizeof(nb_inst) + t->size;
if (t->align > ptr_size)
basicsize += t->align - ptr_size;
PyObject *base = nullptr;
#if !defined(PYPY_VERSION) // see https://github.com/pypy/pypy/issues/4914
bool generic_base = false;
#endif
if (has_base_py) {
check(!has_base,
"nanobind::detail::nb_type_new(\"%s\"): multiple base types "
"specified!", t_name);
base = (PyObject *) t->base_py;
#if !defined(PYPY_VERSION) // see https://github.com/pypy/pypy/issues/4914
if (Py_TYPE(base) == &Py_GenericAliasType) {
base = PyObject_GetAttrString(base, "__origin__");
check(base != nullptr,
"nanobind::detail::nb_type_new(\"%s\"): could not access base of type alias!", t_name);
Py_DECREF(base);
generic_base = true;
}
#endif
check(nb_type_check(base),
"nanobind::detail::nb_type_new(\"%s\"): base type is not a "
"nanobind type!", t_name);
} else if (has_base) {
lock_internals guard(internals_);
nb_type_map_slow::iterator it2 = internals_->type_c2p_slow.find(t->base);
check(it2 != internals_->type_c2p_slow.end(),
"nanobind::detail::nb_type_new(\"%s\"): base type \"%s\" not "
"known to nanobind!", t_name, type_name(t->base));
base = (PyObject *) it2->second->type_py;
}
type_data *tb = nullptr;
if (base) {
// Check if the base type already has dynamic attributes
tb = nb_type_data((PyTypeObject *) base);
if (tb->flags & (uint32_t) type_flags::has_dynamic_attr)
has_dynamic_attr = true;
if (tb->flags & (uint32_t) type_flags::is_weak_referenceable)
is_weak_referenceable = true;
/* Handle a corner case (base class larger than derived class)
which can arise when extending trampoline base classes */
PyTypeObject *base_2 = (PyTypeObject *) base;
type_data *tb_2 = tb;
do {
size_t base_basicsize = sizeof(nb_inst) + tb_2->size;
if (tb_2->align > ptr_size)
base_basicsize += tb_2->align - ptr_size;
if (base_basicsize > basicsize)
basicsize = base_basicsize;
#if !defined(Py_LIMITED_API)
base_2 = base_2->tp_base;
#else
base_2 = (PyTypeObject *) PyType_GetSlot(base_2, Py_tp_base);
#endif
if (!base_2 || !nb_type_check((PyObject *) base_2))
break;
tb_2 = nb_type_data(base_2);
} while (true);
}
bool base_intrusive_ptr =
tb && (tb->flags & (uint32_t) type_flags::intrusive_ptr);
// tp_basicsize must satisfy pointer alignment.
basicsize = (basicsize + ptr_size - 1) / ptr_size * ptr_size;
char *name_copy = strdup_check(name.c_str());
constexpr size_t nb_type_max_slots = 12,
nb_extra_slots = 80,
nb_total_slots = nb_type_max_slots +
nb_extra_slots + 1;
PyMemberDef members[3] { };
PyType_Slot slots[nb_total_slots], *s = slots;
PyType_Spec spec = {
/* .name = */ name_copy,
/* .basicsize = */ (int) basicsize,
/* .itemsize = */ 0,
/* .flags = */ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE,
/* .slots = */ slots
};
if (base)
*s++ = { Py_tp_base, (void *) base };
*s++ = { Py_tp_init, (void *) inst_init };
*s++ = { Py_tp_new, (void *) inst_new_int };
*s++ = { Py_tp_dealloc, (void *) inst_dealloc };
if (has_doc)
*s++ = { Py_tp_doc, (void *) t->doc };
vectorcallfunc type_vectorcall = nb_type_vectorcall;
bool has_traverse = false,
has_getset = false,
has_custom_init_or_new = false,
has_custom_type_vectorcall = false;
if (has_type_slots) {
size_t num_avail = nb_extra_slots;
size_t i = 0;
while (t->type_slots[i].slot) {
check(i != num_avail,
"nanobind::detail::nb_type_new(\"%s\"): ran out of "
"type slots!", t_name);
const PyType_Slot &ts = t->type_slots[i++];
int slot_id = ts.slot;
has_traverse |= slot_id == Py_tp_traverse;
has_getset |= slot_id == Py_tp_getset;
has_custom_init_or_new |=
slot_id == Py_tp_init || slot_id == Py_tp_new;
if (slot_id == Py_tp_vectorcall) {
type_vectorcall = (vectorcallfunc) ts.pfunc;
has_custom_type_vectorcall = true;
continue;
}
*s++ = ts;
}
}
if (has_custom_init_or_new && !has_custom_type_vectorcall)
type_vectorcall = nullptr;
Py_ssize_t dictoffset = 0, weaklistoffset = 0;
int num_members = 0;
// realign to sizeof(void*) if needed
if (has_dynamic_attr || is_weak_referenceable)
basicsize = (basicsize + ptr_size - 1) / ptr_size * ptr_size;
if (has_dynamic_attr) {
dictoffset = (Py_ssize_t) basicsize;
basicsize += ptr_size;
members[num_members] = PyMemberDef{ "__dictoffset__", T_PYSSIZET,
dictoffset, READONLY, nullptr };
++num_members;
// Install GC traverse and clear routines if not inherited/overridden
if (!has_traverse) {
*s++ = { Py_tp_traverse, (void *) inst_traverse };
*s++ = { Py_tp_clear, (void *) inst_clear };
has_traverse = true;
}
spec.basicsize = (int) basicsize;
if (!has_getset)
*s++ = { Py_tp_getset, (void *) inst_getset };
}
if (is_weak_referenceable) {
weaklistoffset = (Py_ssize_t) basicsize;
basicsize += ptr_size;
members[num_members] = PyMemberDef{ "__weaklistoffset__", T_PYSSIZET,
weaklistoffset, READONLY, nullptr };
++num_members;
// Install GC traverse and clear routines if not inherited/overridden
if (!has_traverse) {
*s++ = { Py_tp_traverse, (void *) inst_traverse };
*s++ = { Py_tp_clear, (void *) inst_clear };
has_traverse = true;
}
spec.basicsize = (int) basicsize;
}
if (num_members > 0)
*s++ = { Py_tp_members, (void*) members };
if (is_generic)
*s++ = { Py_tp_methods, (void*) class_getitem_method };
if (NB_DYNAMIC_VERSION >= 0x030E0000 && type_vectorcall)
*s++ = { Py_tp_vectorcall, (void *) type_vectorcall };
if (has_traverse)
spec.flags |= Py_TPFLAGS_HAVE_GC;
*s++ = { 0, nullptr };
PyTypeObject *metaclass = nb_type_tp(has_supplement ? t->supplement : 0);
PyObject *result = nb_type_from_metaclass(metaclass, mod, &spec);
if (!result) {
python_error err;
check(false,
"nanobind::detail::nb_type_new(\"%s\"): type construction "
"failed: %s!", t_name, err.what());
}
Py_DECREF(metaclass);
make_immortal(result);
internals_inc_ref();
type_data *to = nb_type_data((PyTypeObject *) result);
*to = *t; // note: slices off _init parts
to->flags &= ~(uint32_t) type_init_flags::all_init_flags;
if (!intrusive_ptr && base_intrusive_ptr) {
to->flags |= (uint32_t) type_flags::intrusive_ptr;
to->set_self_py = tb->set_self_py;
}
if (!has_shared_from_this && tb &&
(tb->flags & (uint32_t) type_flags::has_shared_from_this)) {
to->flags |= (uint32_t) type_flags::has_shared_from_this;
to->keep_shared_from_this_alive = tb->keep_shared_from_this_alive;
}
if (NB_DYNAMIC_VERSION < 0x030E0000) {
// On Python 3.14+, use Py_tp_vectorcall to set the type vectorcall
// slot. Otherwise, assign tp_vectorcall or use a workaround (via
// tp_vectorcall_offset) for stable ABI builds.
#if defined(Py_LIMITED_API)
to->vectorcall = type_vectorcall;
#else
((PyTypeObject *) result)->tp_vectorcall = type_vectorcall;
#endif
}
to->name = name_copy;
to->type_py = (PyTypeObject *) result;
to->alias_chain = nullptr;
to->init = nullptr;
if (has_dynamic_attr)
to->flags |= (uint32_t) type_flags::has_dynamic_attr;
if (is_weak_referenceable)
to->flags |= (uint32_t) type_flags::is_weak_referenceable;
// Always cache dictoffset/weaklistoffset so nb_dict_ptr()/nb_weaklist_ptr()
// only access dicts/weaklists created by nanobind, not those added by Python
to->dictoffset = (uint32_t) dictoffset;
to->weaklistoffset = (uint32_t) weaklistoffset;
if (t->scope != nullptr)
setattr(t->scope, t_name, result);
setattr(result, "__qualname__", qualname.ptr());
if (modname.is_valid())
setattr(result, "__module__", modname.ptr());
{
lock_internals guard(internals_);
internals_->type_c2p_slow[t->type] = to;
#if !defined(NB_FREE_THREADED)
internals_->type_c2p_fast[(void *) t->type] = to;
#endif
}
if (has_signature) {
setattr(result, "__nb_signature__", str(t->name));
free((char *) t_name);
}
#if !defined(PYPY_VERSION)
if (generic_base)
setattr(result, "__orig_bases__", make_tuple(handle(t->base_py)));
#endif
return result;
}
PyObject *call_one_arg(PyObject *fn, PyObject *arg) noexcept {
PyObject *argv[2] = { nullptr, arg };
return PyObject_Vectorcall(fn, argv + 1,
PY_VECTORCALL_ARGUMENTS_OFFSET + 1, nullptr);
}
/// Encapsulates the implicit conversion part of nb_type_get()
static NB_NOINLINE bool nb_type_get_implicit(PyObject *src,
const std::type_info *cpp_type_src,
const type_data *dst_type,
nb_internals *internals_,
cleanup_list *cleanup, void **out) noexcept {
if (dst_type->implicit.cpp && cpp_type_src) {
const std::type_info **it = dst_type->implicit.cpp;
const std::type_info *v;
while ((v = *it++)) {
if (v == cpp_type_src || *v == *cpp_type_src)
goto found;
}
it = dst_type->implicit.cpp;
while ((v = *it++)) {
const type_data *d = nb_type_c2p(internals_, v);
if (d && PyType_IsSubtype(Py_TYPE(src), d->type_py))
goto found;
}
}
if (dst_type->implicit.py) {
bool (**it)(PyTypeObject *, PyObject *, cleanup_list *) noexcept =
dst_type->implicit.py;
bool (*v2)(PyTypeObject *, PyObject *, cleanup_list *) noexcept;
while ((v2 = *it++)) {
if (v2(dst_type->type_py, src, cleanup))
goto found;
}
}
return false;
found:
PyObject *result = call_one_arg((PyObject *) dst_type->type_py, src);
if (result) {
cleanup->append(result);
*out = inst_ptr((nb_inst *) result);
return true;
} else {
PyErr_Clear();
if (internals->print_implicit_cast_warnings) {
#if !defined(Py_LIMITED_API)
const char *name = Py_TYPE(src)->tp_name;
#else
PyObject *name_py = nb_inst_name(src);
const char *name = PyUnicode_AsUTF8AndSize(name_py, nullptr);
#endif
// Can't use PyErr_Warn*() if conversion failed due to a stack overflow
fprintf(stderr,
"nanobind: implicit conversion from type '%s' to type '%s' "
"failed!\n", name, dst_type->name);
#if defined(Py_LIMITED_API)
Py_DECREF(name_py);
#endif
}
return false;
}
}
// Attempt to retrieve a pointer to a C++ instance
bool nb_type_get(const std::type_info *cpp_type, PyObject *src, uint8_t flags,
cleanup_list *cleanup, void **out) noexcept {
// Convert None -> nullptr
if (src == Py_None) {
*out = nullptr;
return true;
}
PyTypeObject *src_type = Py_TYPE(src);
const std::type_info *cpp_type_src = nullptr;
const bool src_is_nb_type = nb_type_check((PyObject *) src_type);
type_data *dst_type = nullptr;
nb_internals *internals_ = internals;
// If 'src' is a nanobind-bound type
if (NB_LIKELY(src_is_nb_type)) {
type_data *t = nb_type_data(src_type);
cpp_type_src = t->type;
// Check if the source / destination typeid are an exact match
bool valid = cpp_type == cpp_type_src || *cpp_type == *cpp_type_src;
// If not, look up the Python type and check the inheritance chain
if (NB_UNLIKELY(!valid)) {
dst_type = nb_type_c2p(internals_, cpp_type);
if (dst_type)
valid = PyType_IsSubtype(src_type, dst_type->type_py);
}
// Success, return the pointer if the instance is correctly initialized
if (NB_LIKELY(valid)) {
nb_inst *inst = (nb_inst *) src;
static_assert(cast_flags::construct == nb_inst::state_ready,
"this function is optimized assuming that "
"cast_flags::construct == nb_inst::state_ready");
// (flags & construct) state xor-result should accept?
// [normal] 0 [uninit] 0 0 no
// [normal] 0 [relinq] 1 1 no
// [normal] 0 [ready] 2 2 yes
// [construct] 2 [uninit] 0 2 yes
// [construct] 2 [relinq] 1 3 no
// [construct] 2 [ready] 2 0 no
if (NB_UNLIKELY(((flags & (uint8_t) cast_flags::construct) ^
inst->state) != nb_inst::state_ready)) {
constexpr const char* errors[4] = {
/* 0 = uninit */ "attempted to access an uninitialized instance",
/* 1 = relinq */ "attempted to access a relinquished instance",
/* 2 = ready */ "attempted to initialize an already-initialized instance",
/* 3 = invalid */ "instance state has become corrupted",
};
PyErr_WarnFormat(
PyExc_RuntimeWarning, 1, "nanobind: %s of type '%s'!\n",
errors[inst->state], t->name);
return false;
}
*out = inst_ptr(inst);
return true;
}
}
// Try an implicit conversion as last resort (if possible & requested)
if ((flags & (uint16_t) cast_flags::convert) && cleanup) {
if (!src_is_nb_type)
dst_type = nb_type_c2p(internals_, cpp_type);
if (dst_type &&
(dst_type->flags & (uint32_t) type_flags::has_implicit_conversions))
return nb_type_get_implicit(src, cpp_type_src, dst_type, internals_,
cleanup, out);
}
return false;
}
static PyObject *keep_alive_callback(PyObject *self, PyObject *const *args,
Py_ssize_t nargs) {
check(nargs == 1 && PyWeakref_CheckRefExact(args[0]),
"nanobind::detail::keep_alive_callback(): invalid input!");
Py_DECREF(args[0]); // self
Py_DECREF(self); // patient
Py_INCREF(Py_None);
return Py_None;
}
static PyMethodDef keep_alive_callback_def = {
"keep_alive_callback", (PyCFunction) (void *) keep_alive_callback,
METH_FASTCALL, nullptr
};
void keep_alive(PyObject *nurse, PyObject *patient) {
if (!patient || !nurse || nurse == Py_None || patient == Py_None)
return;
if (nb_type_check((PyObject *) Py_TYPE(nurse))) {
#if defined(NB_FREE_THREADED)
nb_shard &shard = internals->shard(inst_ptr((nb_inst *) nurse));
lock_shard guard(shard);
#else
nb_shard &shard = internals->shards[0];
#endif
nb_weakref_seq **pp = (nb_weakref_seq **) &shard.keep_alive[nurse];
do {
nb_weakref_seq *p = *pp;
if (!p)
break;
else if (p->payload == patient && !p->callback)
return;
pp = &p->next;
} while (true);
nb_weakref_seq *s =
(nb_weakref_seq *) PyMem_Malloc(sizeof(nb_weakref_seq));
check(s, "nanobind::detail::keep_alive(): out of memory!");
s->payload = patient;
s->callback = nullptr;
s->next = nullptr;
*pp = s;
Py_INCREF(patient);
((nb_inst *) nurse)->clear_keep_alive = true;
} else {
PyObject *callback =
PyCFunction_New(&keep_alive_callback_def, patient);
PyObject *weakref = PyWeakref_NewRef(nurse, callback);
if (!weakref) {
Py_DECREF(callback);
PyErr_Clear();
raise("nanobind::detail::keep_alive(): could not create a weak "
"reference! Likely, the 'nurse' argument you specified is not "
"a weak-referenceable type!");
}
check(callback,
"nanobind::detail::keep_alive(): callback creation failed!");
// Increase patient reference count, leak weak reference
Py_INCREF(patient);
Py_DECREF(callback);
}
}
void keep_alive(PyObject *nurse, void *payload,
void (*callback)(void *) noexcept) noexcept {
check(nurse, "nanobind::detail::keep_alive(): 'nurse' is undefined!");
if (nb_type_check((PyObject *) Py_TYPE(nurse))) {
#if defined(NB_FREE_THREADED)
nb_shard &shard = internals->shard(inst_ptr((nb_inst *) nurse));
lock_shard guard(shard);
#else
nb_shard &shard = internals->shards[0];
#endif
nb_weakref_seq
**pp = (nb_weakref_seq **) &shard.keep_alive[nurse],
*s = (nb_weakref_seq *) PyMem_Malloc(sizeof(nb_weakref_seq));
check(s, "nanobind::detail::keep_alive(): out of memory!");
s->payload = payload;
s->callback = callback;
s->next = *pp;
*pp = s;
((nb_inst *) nurse)->clear_keep_alive = true;
} else {
PyObject *patient = capsule_new(payload, nullptr, callback);
keep_alive(nurse, patient);
Py_DECREF(patient);
}
}
static PyObject *nb_type_put_common(void *value, type_data *t, rv_policy rvp,
cleanup_list *cleanup,
bool *is_new) noexcept {
// The reference_internals RVP needs a self pointer, give up if unavailable
if (rvp == rv_policy::reference_internal && (!cleanup || !cleanup->self()))
return nullptr;
const bool intrusive = t->flags & (uint32_t) type_flags::intrusive_ptr;
if (intrusive)
rvp = rv_policy::take_ownership;
const bool create_new = rvp == rv_policy::copy || rvp == rv_policy::move;
nb_inst *inst;
if (create_new)
inst = (nb_inst *) inst_new_int(t->type_py, nullptr, nullptr);
else
inst = (nb_inst *) inst_new_ext(t->type_py, value);
if (!inst)
return nullptr;
void *new_value = inst_ptr(inst);
if (rvp == rv_policy::move) {
if (t->flags & (uint32_t) type_flags::is_move_constructible) {
if (t->flags & (uint32_t) type_flags::has_move) {
try {
t->move(new_value, value);
} catch (...) {
Py_DECREF(inst);
return nullptr;
}
} else {
memcpy(new_value, value, t->size);
memset(value, 0, t->size);
}
} else {
check(t->flags & (uint32_t) type_flags::is_copy_constructible,
"nanobind::detail::nb_type_put(\"%s\"): attempted to move "
"an instance that is neither copy- nor move-constructible!",
t->name);
rvp = rv_policy::copy;
}
}
if (rvp == rv_policy::copy) {
check(t->flags & (uint32_t) type_flags::is_copy_constructible,
"nanobind::detail::nb_type_put(\"%s\"): attempted to copy "
"an instance that is not copy-constructible!", t->name);
if (t->flags & (uint32_t) type_flags::has_copy) {
try {
t->copy(new_value, value);
} catch (...) {
Py_DECREF(inst);
return nullptr;
}
} else {
memcpy(new_value, value, t->size);
}
}
// If we can find an existing C++ shared_ptr for this object, and
// the instance we're creating just holds a pointer, then take out
// another C++ shared_ptr that shares ownership with the existing
// one, and tie its lifetime to the Python object. This is the
// same thing done by the <nanobind/stl/shared_ptr.h> caster when
// returning shared_ptr<T> to Python explicitly.
if ((t->flags & (uint32_t) type_flags::has_shared_from_this) &&
!create_new && t->keep_shared_from_this_alive((PyObject *) inst))
rvp = rv_policy::reference;
else if (is_new)
*is_new = true;
inst->destruct = rvp != rv_policy::reference && rvp != rv_policy::reference_internal;
inst->cpp_delete = rvp == rv_policy::take_ownership;
inst->state = nb_inst::state_ready;
if (rvp == rv_policy::reference_internal)
keep_alive((PyObject *) inst, cleanup->self());
if (intrusive)
t->set_self_py(new_value, (PyObject *) inst);
if (!create_new)
inst_register((PyObject *) inst, value);
return (PyObject *) inst;
}
PyObject *nb_type_put(const std::type_info *cpp_type,
void *value, rv_policy rvp,
cleanup_list *cleanup,
bool *is_new) noexcept {
// Convert nullptr -> None
if (!value) {
Py_INCREF(Py_None);
return Py_None;
}
nb_internals *internals_ = internals;
type_data *td = nullptr;
auto lookup_type = [cpp_type, internals_, &td]() -> bool {
if (!td) {
type_data *d = nb_type_c2p(internals_, cpp_type);
if (!d)
return false;
td = d;
}
return true;
};
if (rvp != rv_policy::copy) {
nb_shard &shard = internals_->shard(value);
lock_shard guard(shard);
// Check if the instance is already registered with nanobind
nb_ptr_map &inst_c2p = shard.inst_c2p;
nb_ptr_map::iterator it = inst_c2p.find(value);
if (it != inst_c2p.end()) {
void *entry = it->second;
nb_inst_seq seq;
if (NB_UNLIKELY(nb_is_seq(entry))) {
seq = *nb_get_seq(entry);
} else {
seq.inst = (PyObject *) entry;
seq.next = nullptr;
}
while (true) {
PyTypeObject *tp = Py_TYPE(seq.inst);
if (nb_type_data(tp)->type == cpp_type) {
if (nb_try_inc_ref(seq.inst))
return seq.inst;
}
if (!lookup_type())
return nullptr;
if (PyType_IsSubtype(tp, td->type_py)) {
if (nb_try_inc_ref(seq.inst))
return seq.inst;
}
if (seq.next == nullptr)
break;
seq = *seq.next;
}
} else if (rvp == rv_policy::none) {
return nullptr;
}
}
// Look up the corresponding Python type if not already done
if (!lookup_type())
return nullptr;
return nb_type_put_common(value, td, rvp, cleanup, is_new);
}
PyObject *nb_type_put_p(const std::type_info *cpp_type,
const std::type_info *cpp_type_p,
void *value, rv_policy rvp,
cleanup_list *cleanup,
bool *is_new) noexcept {
// Convert nullptr -> None
if (!value) {
Py_INCREF(Py_None);
return Py_None;
}
// Check if the instance is already registered with nanobind
nb_internals *internals_ = internals;
// Look up the corresponding Python type
type_data *td = nullptr,
*td_p = nullptr;
auto lookup_type = [cpp_type, cpp_type_p, internals_, &td, &td_p]() -> bool {
if (!td) {
type_data *d = nb_type_c2p(internals_, cpp_type);
if (!d)
return false;
td = d;
if (cpp_type_p && cpp_type_p != cpp_type)
td_p = nb_type_c2p(internals_, cpp_type_p);
}
return true;
};
if (rvp != rv_policy::copy) {
nb_shard &shard = internals_->shard(value);
lock_shard guard(shard);
// Check if the instance is already registered with nanobind
nb_ptr_map &inst_c2p = shard.inst_c2p;
nb_ptr_map::iterator it = inst_c2p.find(value);
if (it != inst_c2p.end()) {
void *entry = it->second;
nb_inst_seq seq;
if (NB_UNLIKELY(nb_is_seq(entry))) {
seq = *nb_get_seq(entry);
} else {
seq.inst = (PyObject *) entry;
seq.next = nullptr;
}
while (true) {
PyTypeObject *tp = Py_TYPE(seq.inst);
const std::type_info *p = nb_type_data(tp)->type;
if (p == cpp_type || p == cpp_type_p) {
if (nb_try_inc_ref(seq.inst))
return seq.inst;
}
if (!lookup_type())
return nullptr;
if (PyType_IsSubtype(tp, td->type_py) ||
(td_p && PyType_IsSubtype(tp, td_p->type_py))) {
if (nb_try_inc_ref(seq.inst))
return seq.inst;
}
if (seq.next == nullptr)
break;
seq = *seq.next;
}
} else if (rvp == rv_policy::none) {
return nullptr;
}
}
// Look up the corresponding Python type if not already done
if (!lookup_type())
return nullptr;
return nb_type_put_common(value, td_p ? td_p : td, rvp, cleanup, is_new);
}
static void nb_type_put_unique_finalize(PyObject *o,
const std::type_info *cpp_type,
bool cpp_delete, bool is_new) {
(void) cpp_type;
check(cpp_delete || !is_new,
"nanobind::detail::nb_type_put_unique(type='%s', cpp_delete=%i): "
"ownership status has become corrupted.",
type_name(cpp_type), cpp_delete);
nb_inst *inst = (nb_inst *) o;
if (cpp_delete) {
check(inst->state == (is_new ? nb_inst::state_ready
: nb_inst::state_relinquished) &&
(bool) inst->destruct == is_new &&
(bool) inst->cpp_delete == is_new,
"nanobind::detail::nb_type_put_unique(type='%s', cpp_delete=%i): "
"unexpected status flags! (state=%i, destruct=%i, cpp_delete=%i)",
type_name(cpp_type), cpp_delete, inst->state, inst->destruct,
inst->cpp_delete);
inst->state = nb_inst::state_ready;
inst->destruct = inst->cpp_delete = true;
} else {
check(inst->state == nb_inst::state_relinquished,
"nanobind::detail::nb_type_put_unique('%s'): ownership "
"status has become corrupted.", type_name(cpp_type));
inst->state = nb_inst::state_ready;
}
}
PyObject *nb_type_put_unique(const std::type_info *cpp_type,
void *value,
cleanup_list *cleanup, bool cpp_delete) noexcept {
rv_policy policy = cpp_delete ? rv_policy::take_ownership : rv_policy::none;
bool is_new = false;
PyObject *o = nb_type_put(cpp_type, value, policy, cleanup, &is_new);
if (o)
nb_type_put_unique_finalize(o, cpp_type, cpp_delete, is_new);
return o;
}
PyObject *nb_type_put_unique_p(const std::type_info *cpp_type,
const std::type_info *cpp_type_p,
void *value,
cleanup_list *cleanup, bool cpp_delete) noexcept {
rv_policy policy = cpp_delete ? rv_policy::take_ownership : rv_policy::none;
bool is_new = false;
PyObject *o =
nb_type_put_p(cpp_type, cpp_type_p, value, policy, cleanup, &is_new);
if (o)
nb_type_put_unique_finalize(o, cpp_type, cpp_delete, is_new);
return o;
}
static void warn_relinquish_failed(const char *why, PyObject *o) noexcept {
PyObject *name = nb_inst_name(o);
int rc = PyErr_WarnFormat(
PyExc_RuntimeWarning, 1,
"nanobind::detail::nb_relinquish_ownership(): could not "
"transfer ownership of a Python instance of type '%U' to C++. %s",
name, why);
if (rc != 0) // user has configured warnings-as-errors
PyErr_WriteUnraisable(o);
Py_DECREF(name);
}
bool nb_type_relinquish_ownership(PyObject *o, bool cpp_delete) noexcept {
nb_inst *inst = (nb_inst *) o;
/* This function is called after nb_type_get() succeeds, so the instance
should be ready; but the !ready case is possible if an attempt is made to
transfer ownership of the same object to C++ multiple times as part of
the same data structure. For example, converting Python (foo, foo) to C++
std::pair<std::unique_ptr<T>, std::unique_ptr<T>>. */
if (inst->state != nb_inst::state_ready) {
warn_relinquish_failed(
"The resulting data structure would have multiple "
"std::unique_ptrs, each thinking that they own the same instance, "
"which is not allowed.", o);
return false;
}
if (cpp_delete) {
if (!inst->cpp_delete || !inst->destruct || inst->internal) {
warn_relinquish_failed(
"This is only possible when the instance was previously "
"constructed on the C++ side and is now owned by Python, which "
"was not the case here. You could change the unique pointer "
"signature to std::unique_ptr<T, nb::deleter<T>> to work "
"around this issue.", o);
return false;
}
inst->cpp_delete = false;
inst->destruct = false;
}
inst->state = nb_inst::state_relinquished;
return true;
}
void nb_type_restore_ownership(PyObject *o, bool cpp_delete) noexcept {
nb_inst *inst = (nb_inst *) o;
check(inst->state == nb_inst::state_relinquished,
"nanobind::detail::nb_type_restore_ownership('%s'): ownership "
"status has become corrupted.",
PyUnicode_AsUTF8AndSize(nb_inst_name(o), nullptr));
inst->state = nb_inst::state_ready;
if (cpp_delete) {
inst->cpp_delete = true;
inst->destruct = true;
}
}
bool nb_type_isinstance(PyObject *o, const std::type_info *t) noexcept {
type_data *d = nb_type_c2p(internals, t);
if (d)
return PyType_IsSubtype(Py_TYPE(o), d->type_py);
else
return false;
}
PyObject *nb_type_lookup(const std::type_info *t) noexcept {
type_data *d = nb_type_c2p(internals, t);
if (d)
return (PyObject *) d->type_py;
else
return nullptr;
}
bool nb_type_check(PyObject *t) noexcept {
PyTypeObject *meta = Py_TYPE(t),
*meta2 = Py_TYPE((PyObject *) meta);
return meta2 == nb_meta_cache;
}
size_t nb_type_size(PyObject *t) noexcept {
return nb_type_data((PyTypeObject *) t)->size;
}
size_t nb_type_align(PyObject *t) noexcept {
return nb_type_data((PyTypeObject *) t)->align;
}
const std::type_info *nb_type_info(PyObject *t) noexcept {
return nb_type_data((PyTypeObject *) t)->type;
}
void *nb_type_supplement(PyObject *t) noexcept {
return nb_type_data((PyTypeObject *) t) + 1;
}
PyObject *nb_inst_alloc(PyTypeObject *t) {
PyObject *result = inst_new_int(t, nullptr, nullptr);
if (!result)
raise_python_error();
return result;
}
PyObject *nb_inst_reference(PyTypeObject *t, void *ptr, PyObject *parent) {
PyObject *result = inst_new_ext(t, ptr);
if (!result)
raise_python_error();
nb_inst *nbi = (nb_inst *) result;
nbi->destruct = nbi->cpp_delete = false;
nbi->state = nb_inst::state_ready;
if (parent)
keep_alive(result, parent);
inst_register(result, ptr);
return result;
}
PyObject *nb_inst_take_ownership(PyTypeObject *t, void *ptr) {
PyObject *result = inst_new_ext(t, ptr);
if (!result)
raise_python_error();
nb_inst *nbi = (nb_inst *) result;
nbi->destruct = nbi->cpp_delete = true;
nbi->state = nb_inst::state_ready;
inst_register(result, ptr);
return result;
}
void *nb_inst_ptr(PyObject *o) noexcept {
return inst_ptr((nb_inst *) o);
}
void nb_inst_zero(PyObject *o) noexcept {
nb_inst *nbi = (nb_inst *) o;
type_data *td = nb_type_data(Py_TYPE(o));
memset(inst_ptr(nbi), 0, td->size);
nbi->state = nb_inst::state_ready;
nbi->destruct = true;
}
PyObject *nb_inst_alloc_zero(PyTypeObject *t) {
PyObject *result = inst_new_int(t, nullptr, nullptr);
if (!result)
raise_python_error();
nb_inst *nbi = (nb_inst *) result;
type_data *td = nb_type_data(t);
memset(inst_ptr(nbi), 0, td->size);
nbi->state = nb_inst::state_ready;
nbi->destruct = true;
return result;
}
void nb_inst_set_state(PyObject *o, bool ready, bool destruct) noexcept {
nb_inst *nbi = (nb_inst *) o;
nbi->state = ready ? nb_inst::state_ready : nb_inst::state_uninitialized;
nbi->destruct = destruct;
nbi->cpp_delete = destruct && !nbi->internal;
}
std::pair<bool, bool> nb_inst_state(PyObject *o) noexcept {
nb_inst *nbi = (nb_inst *) o;
return { nbi->state == nb_inst::state_ready, (bool) nbi->destruct };
}
void nb_inst_destruct(PyObject *o) noexcept {
nb_inst *nbi = (nb_inst *) o;
type_data *t = nb_type_data(Py_TYPE(o));
check(nbi->state != nb_inst::state_relinquished,
"nanobind::detail::nb_inst_destruct(\"%s\"): attempted to destroy "
"an object whose ownership had been transferred away!",
t->name);
if (nbi->destruct) {
check(t->flags & (uint32_t) type_flags::is_destructible,
"nanobind::detail::nb_inst_destruct(\"%s\"): attempted to call "
"the destructor of a non-destructible type!",
t->name);
if (t->flags & (uint32_t) type_flags::has_destruct)
t->destruct(inst_ptr(nbi));
nbi->destruct = false;
}
nbi->state = nb_inst::state_uninitialized;
}
void nb_inst_copy(PyObject *dst, const PyObject *src) noexcept {
if (src == dst)
return;
PyTypeObject *tp = Py_TYPE((PyObject *) src);
type_data *t = nb_type_data(tp);
check(tp == Py_TYPE(dst) &&
(t->flags & (uint32_t) type_flags::is_copy_constructible),
"nanobind::detail::nb_inst_copy(): invalid arguments!");
nb_inst *nbi = (nb_inst *) dst;
const void *src_data = inst_ptr((nb_inst *) src);
void *dst_data = inst_ptr(nbi);
if (t->flags & (uint32_t) type_flags::has_copy)
t->copy(dst_data, src_data);
else
memcpy(dst_data, src_data, t->size);
nbi->state = nb_inst::state_ready;
nbi->destruct = true;
}
void nb_inst_move(PyObject *dst, const PyObject *src) noexcept {
if (src == dst)
return;
PyTypeObject *tp = Py_TYPE((PyObject *) src);
type_data *t = nb_type_data(tp);
check(tp == Py_TYPE(dst) &&
(t->flags & (uint32_t) type_flags::is_move_constructible),
"nanobind::detail::nb_inst_move(): invalid arguments!");
nb_inst *nbi = (nb_inst *) dst;
void *src_data = inst_ptr((nb_inst *) src);
void *dst_data = inst_ptr(nbi);
if (t->flags & (uint32_t) type_flags::has_move) {
t->move(dst_data, src_data);
} else {
memcpy(dst_data, src_data, t->size);
memset(src_data, 0, t->size);
}
nbi->state = nb_inst::state_ready;
nbi->destruct = true;
}
void nb_inst_replace_move(PyObject *dst, const PyObject *src) noexcept {
if (src == dst)
return;
nb_inst *nbi = (nb_inst *) dst;
bool destruct = nbi->destruct;
nbi->destruct = true;
nb_inst_destruct(dst);
nb_inst_move(dst, src);
nbi->destruct = destruct;
}
void nb_inst_replace_copy(PyObject *dst, const PyObject *src) noexcept {
if (src == dst)
return;
nb_inst *nbi = (nb_inst *) dst;
bool destruct = nbi->destruct;
nbi->destruct = true;
nb_inst_destruct(dst);
nb_inst_copy(dst, src);
nbi->destruct = destruct;
}
#if defined(Py_LIMITED_API)
type_data *nb_type_data_static(PyTypeObject *o) noexcept {
return (type_data *) PyObject_GetTypeData((PyObject *) o, Py_TYPE((PyObject *) o));
}
#endif
PyObject *nb_type_name(PyObject *t) noexcept {
error_scope s;
#if PY_VERSION_HEX >= 0x030B0000
PyObject *result = PyType_GetName((PyTypeObject *) t);
#else
PyObject *result = PyObject_GetAttrString(t, "__name__");
#endif
if (PyType_HasFeature((PyTypeObject *) t, Py_TPFLAGS_HEAPTYPE)) {
PyObject *mod = PyObject_GetAttrString(t, "__module__");
PyObject *combined = PyUnicode_FromFormat("%U.%U", mod, result);
Py_DECREF(mod);
Py_DECREF(result);
result = combined;
}
return result;
}
PyObject *nb_inst_name(PyObject *o) noexcept {
return nb_type_name((PyObject *) Py_TYPE(o));
}
bool nb_inst_python_derived(PyObject *o) noexcept {
return nb_type_data(Py_TYPE(o))->flags &
(uint32_t) type_flags::is_python_type;
}
NAMESPACE_END(detail)
NAMESPACE_END(NB_NAMESPACE)
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
src/stubgen.py | Python | #!/usr/bin/env python3
# pyright: strict
"""
stubgen.py: nanobind stub generation tool
This file provides both an API (``nanobind.stubgen.StubGen``) and a command
line interface to generate stubs for nanobind extensions.
To generate stubs on the command line, invoke the stub generator with a module
name, which will place the newly generated ``.pyi`` file directly into the
module folder.
```
python -m nanobind.stubgen <module name>
```
Specify ``-o <filename>`` or ``-O <path>`` to redirect the output somewhere
else in case this is not desired.
To programmatically generate stubs, construct an instance of the ``StubGen``
class and repeatedly call ``.put()`` to register modules or contents within the
modules (specific methods, classes, etc.). Afterwards, the ``.get()`` method
returns a string containing the stub declarations.
```
from nanobind.stubgen import StubGen
import my_module
sg = StubGen()
sg.put(my_module)
print(sg.get())
```
Internals:
----------
The implementation of stubgen performs a DFS traversal of the loaded module and
directly generates the stub in lockstep. There are no ASTs or other
intermediate data structures to keep things as simple as possible. Every kind
of object that could be encountered (functions, properties, values, types) has
a corresponding ``put_..`` method, along with the main ``put()`` entry point
that dispatches to the others as appropriate.
Internally, stub generation involves two potentially complex steps: converting
nanobind overload chains into '@overload' declarations that can be understood
by the 'typing' module, and turning default values back into Python
expressions. To make both steps well-defined, the implementation relies on an
internal ``__nb_signature__`` property that nanobind functions expose
specifically to simplify stub generation.
(Note that for now, the StubGen API is considered experimental and not subject
to the semantic versioning policy used by the nanobind project.)
"""
import argparse
import builtins
import enum
from inspect import Signature, Parameter, signature, ismodule
import io
import textwrap
import importlib
import importlib.machinery
import importlib.util
import types
import typing
from dataclasses import dataclass
from typing import Dict, Sequence, List, Optional, Tuple, cast, Generator, Any, Callable, Union, Protocol, Literal
from pathlib import Path
import re
import sys
from re import Match, Pattern
if sys.version_info < (3, 11):
try:
import typing_extensions
except ImportError:
raise RuntimeError(
"stubgen.py requires the 'typing_extensions' package on Python <3.11"
)
else:
typing_extensions = None
SKIP_LIST = [
# Various standard attributes found in modules, classes, etc.
"__doc__", "__module__", "__name__", "__new__", "__builtins__",
"__cached__", "__path__", "__version__", "__spec__", "__loader__",
"__package__", "__nb_signature__", "__class_getitem__", "__orig_bases__",
"__file__", "__dict__", "__weakref__", "__format__", "__nb_enum__",
"__firstlineno__", "__static_attributes__", "__annotations__", "__annotate__",
"__annotate_func__",
# Auto-generated enum attributes. Type checkers synthesize these, so they
# shouldn't appear in the stubs.
"_new_member_", "_use_args_", "_member_names_", "_member_map_",
"_value2member_map_", "_hashable_values_", "_unhashable_values_",
"_unhashable_values_map_", "_value_repr_",
]
# Interpreter-internal types.
TYPES_TYPES = {
getattr(types, name): name for name in [
"MethodDescriptorType",
"MemberDescriptorType",
"ModuleType",
]
}
# fmt: on
# This type is used to track per-module imports (``import name as desired_name``)
# during stub generation. The actual name in the stub is given by the value element.
# (name, desired_as_name) -> actual_as_name
ImportDict = Dict[Tuple[Optional[str], Optional[str]], Optional[str]]
# This type maps a module name to an `ImportDict` tuple that tracks the
# import declarations from that module.
# package_name -> ((name, desired_as_name) -> actual_as_name)
PackagesDict = Dict[str, ImportDict]
# Type of an entry of the ``__nb_signature__`` tuple of nanobind functions.
# It stores a function signature string, docstring, and a tuple of default function values.
# (signature_str, doc_str, (default_arg_1, default_arg_2, ...))
NbSignature = Tuple[Optional[str], Optional[str]]
# Type of an entry of the ``__nb_signature__`` tuple of nanobind functions.
NbFunctionSignature = Tuple[Optional[str], Optional[str], Optional[Tuple[Any, ...]]]
# Type of an entry of the ``__nb_signature__`` tuple of nanobind getters and setters.
NbGetterSetterSignature = Tuple[str, str]
class NamedObject(Protocol):
"""
Typing protocol representing an object with __name__ and __module__ members
"""
__module__: str
__name__: str
class NbFunction(Protocol):
"""
Typing protocol representing a nanobind function with its __nb_signature__ property
"""
__module__: Literal["nanobind"]
__name__: Literal["nb_func", "nb_method"]
__nb_signature__: Tuple[NbFunctionSignature, ...]
__call__: Callable[..., Any]
@typing.runtime_checkable
class NbGetterSetter(Protocol):
__nb_signature__: Tuple[NbGetterSetterSignature, ...]
class NbStaticProperty(Protocol):
"""Typing protocol representing a nanobind static property"""
__module__: Literal["nanobind"]
__name__: Literal["nb_static_property"]
fget: NbGetterSetter
fset: NbGetterSetter
class NbType(Protocol):
"""typing protocol representing a nanobind type object"""
__module__: Literal["nanobind"]
__name__: Literal["nb_type"]
__nb_signature__: str
__bases__: Tuple[Any, ...]
@dataclass
class ReplacePattern:
"""
A compiled query (regular expression) and replacement pattern. Patterns can
be loaded using the ``load_pattern_file()`` function defined below
"""
# A replacement patterns as produced by ``load_pattern_file()`` below
query: Pattern[str]
lines: List[str]
matches: int
def create_subdirectory_for_module(module: types.ModuleType) -> bool:
"""
When creating stubs recursively, prefer putting information directly
into a ``submodule.pyi`` file unless the submodule has sub-submodules,
or is defined in a nested directory (e.g. ``submodule/__init__.py``).
In those two cases, put the stubs into ``submodule/__init__.pyi``
"""
for child in module.__dict__.values():
if ismodule(child):
parent_name, _, _ = child.__name__.rpartition(".")
if parent_name == module.__name__:
return True
return hasattr(module, '__file__') \
and module.__file__ is not None \
and module.__file__.endswith('__init__.py')
class StubGen:
def __init__(
self,
module: types.ModuleType,
recursive: bool = False,
include_docstrings: bool = True,
include_private: bool = False,
include_internal_imports: bool = True,
include_external_imports: bool = False,
max_expr_length: int = 50,
patterns: List[ReplacePattern] = [],
quiet: bool = True,
output_file: Optional[Path] = None
) -> None:
# Module to check for name conflicts when adding helper imports
self.module = module
# Include docstrings in the generated stub?
self.include_docstrings = include_docstrings
# Recurse into submodules?
self.recursive = recursive
# Include private members that start or end with a single underscore?
self.include_private = include_private
# Include types and functions imported from the same package (but a different module)
self.include_internal_imports = include_internal_imports
# Include types and functions imported from external packages?
self.include_external_imports = include_external_imports
# Maximal length (in characters) before an expression gets abbreviated as '...'
self.max_expr_length = max_expr_length
# Replacement patterns as produced by ``load_pattern_file()`` below
self.patterns = patterns
# Set this to ``True`` if output to stdout is unacceptable
self.quiet = quiet
# Target filename, only needed for recursive stub generation
self.output_file = output_file
# ---------- Internal fields ----------
# Current depth / indentation level
self.depth = 0
# Output buffer
self._output = io.StringIO()
# A stack to avoid infinite recursion
self.stack: List[object] = []
# An identifier associated with the top element of the stack
self.prefix = module.__name__
# Dictionary to keep track of import directives added by the stub generator
# Maps package_name -> ((name, desired_as_name) -> actual_as_name)
self.imports: PackagesDict = {}
# ---------- Regular expressions ----------
# Negative lookbehind matching word boundaries except '.'
sep_before = r"(?<![\\B\.])"
# Negative lookforward matching word boundaries except '.'
sep_after = r"(?![\\B\.])"
# Regexp matching a Python identifier
identifier = r"[^\d\W]\w*"
# Precompile RE a sequence of identifiers separated by periods
self.id_seq = re.compile(
sep_before
+ "((?:"
+ identifier
+ r"\.)+)("
+ identifier
+ r")\b"
+ sep_after
)
# Precompile RE to extract nanobind nd-arrays
self.ndarray_re = re.compile(
sep_before + r"(numpy\.ndarray|ndarray|torch\.Tensor)\[([^\]]*)\]"
)
# Types which moved from typing.* to collections.abc in Python 3.9
self.abc_re = re.compile(
'typing.(AsyncGenerator|AsyncIterable|AsyncIterator|Awaitable|Callable|'
'Collection|Container|Coroutine|Generator|Hashable|ItemsView|'
'Iterable|Iterator|KeysView|Mapping|MappingView|MutableMapping|'
'MutableSequence|MutableSet|Sequence|ValuesView)'
)
@property
def output(self) -> str:
"""Get the current output as a string."""
return self._output.getvalue()
def write(self, s: str) -> None:
"""Append raw characters to the output"""
self._output.write(s)
def write_ln(self, line: str) -> None:
"""Append an indented line"""
if len(line) != 0 and not line.isspace():
self._output.write(" " * self.depth + line)
self._output.write("\n")
def _replace_tail(self, num_chars: int, replacement: str) -> None:
"""Remove the last num_chars from output and append replacement."""
self._output.seek(self._output.tell() - num_chars)
self._output.truncate()
self._output.write(replacement)
def format_docstr(self, docstr: str, depth: int) -> str:
"""Format a single or multi-line docstring with given indentation"""
docstr = textwrap.dedent(docstr).strip()
raw_str = ""
if "''" in docstr or "\\" in docstr:
# Escape all double quotes so that no unquoted triple quote can exist
docstr = docstr.replace("''", "\\'\\'")
raw_str = "r"
if len(docstr) > 70 or "\n" in docstr:
docstr = "\n" + docstr + "\n"
docstr = f'{raw_str}"""{docstr}"""\n'
return textwrap.indent(docstr, " " * depth)
def put_docstr(self, docstr: str) -> None:
"""Append an indented single or multi-line docstring"""
self.write(self.format_docstr(docstr, self.depth))
def put_nb_overload(self, fn: NbFunction, sig: NbFunctionSignature, name: Optional[str] = None) -> None:
"""
The ``put_nb_func()`` repeatedly calls this method to render the
individual method overloads.
"""
sig_str, docstr, start = cast(str, sig[0]), cast(str, sig[1]), 0
# Label anonymous functions
if sig_str.startswith("def (") and name is not None:
sig_str = "def " + name + sig_str[4:]
# Simplify type names present in the signature
paren = sig_str.find("(")
sig_str = sig_str[:paren] + self.simplify_types(sig_str[paren:])
# Substitute in string versions of the default arguments
default_args = sig[2]
if default_args:
for index, arg in enumerate(default_args):
pos = -1
pattern = None
arg_str = None
# First, handle the case where the user overrode the default value signature
if isinstance(arg, str):
pattern = f"\\={index}"
pos = sig_str.find(pattern, start)
if pos >= 0:
arg_str = arg
# General case
if pos < 0:
pattern = f"\\{index}"
pos = sig_str.find(pattern, start)
if pos < 0:
raise Exception(
"Could not locate default argument in function signature"
)
if not arg_str:
# Call expr_str to convert the default value to a string.
# Abbreviate with '...' if it is too long.
expr = self.expr_str(arg, abbrev=True)
arg_str = expr if expr else "..."
assert (
"\n" not in arg_str
), "Default argument string may not contain newlines."
assert pattern is not None
sig_str = sig_str[:pos] + arg_str + sig_str[pos + len(pattern) :]
start = pos + len(arg_str)
if type(fn).__name__ == "nb_func" and self.depth > 0:
self.write_ln("@staticmethod")
if not docstr or not self.include_docstrings:
for s in sig_str.split("\n"):
self.write_ln(s)
self._replace_tail(1, ": ...\n")
else:
docstr = textwrap.dedent(docstr)
for s in sig_str.split("\n"):
self.write_ln(s)
self._replace_tail(1, ":\n")
self.depth += 1
self.put_docstr(docstr)
self.depth -= 1
self.write("\n")
def put_nb_func(self, fn: NbFunction, name: Optional[str] = None) -> None:
"""Append a nanobind function binding to the stub"""
sigs = fn.__nb_signature__
count = len(sigs)
assert count > 0
if count == 1:
# No overloads write directly
self.put_nb_overload(fn, sigs[0], name)
else:
# Render an @overload-decorated chain
overload = self.import_object("typing", "overload")
for s in sigs:
self.write_ln(f"@{overload}")
self.put_nb_overload(fn, s, name)
def put_function(self, fn: Callable[..., Any], name: Optional[str] = None, parent: Optional[object] = None):
"""Append a function of an arbitrary type to the stub"""
# Don't generate a constructor for nanobind classes that aren't constructible
if name == "__init__" and type(parent).__name__.startswith("nb_type"):
return
fn_module = getattr(fn, "__module__", None)
fn_name = getattr(fn, "__name__", None)
# Check if this function is an alias from *another* module
if name and fn_module and fn_module != self.module.__name__:
self.put_value(fn, name)
return
# Check if this function is an alias from the *same* module
if name and fn_name and name != fn_name:
self.write_ln(f"{name} = {fn_name}\n")
return
# Special handling for nanobind functions with overloads
if type(fn).__module__ == "nanobind":
fn = cast(NbFunction, fn)
self.put_nb_func(fn, name)
return
if isinstance(fn, staticmethod):
self.write_ln("@staticmethod")
fn = fn.__func__
elif isinstance(fn, classmethod):
self.write_ln("@classmethod")
fn = fn.__func__
if name is None:
name = fn.__name__
assert name
overloads: Sequence[Callable[..., Any]] = []
if hasattr(fn, "__module__"):
if typing_extensions:
overloads = typing_extensions.get_overloads(fn)
else:
overloads = typing.get_overloads(fn)
if not overloads:
overloads = [fn]
for i, fno in enumerate(overloads):
if len(overloads) > 1:
overload = self.import_object("typing", "overload")
self.write_ln(f"@{overload}")
try:
sig = signature(fno)
except ValueError:
sig = None
if sig is not None:
sig_str = f"{name}{self.signature_str(sig)}"
else:
# If inspect.signature fails, use a maximally permissive type.
any_type = self.import_object("typing", "Any")
sig_str = f"{name}(*args, **kwargs) -> {any_type}"
# Potentially copy docstring from the implementation function
docstr = fno.__doc__
if i == 0 and not docstr and fn.__doc__:
docstr = fn.__doc__
if not docstr or not self.include_docstrings:
self.write_ln("def " + sig_str + ": ...")
else:
self.write_ln("def " + sig_str + ":")
self.depth += 1
self.put_docstr(docstr)
self.depth -= 1
self.write("\n")
def put_property(self, prop: property, name: Optional[str]):
"""Append a Python 'property' object"""
fget, fset = prop.fget, prop.fset
self.write_ln("@property")
self.put(fget, name=name)
if fset:
self.write_ln(f"@{name}.setter")
docstrings_backup = self.include_docstrings
if isinstance(fget, NbGetterSetter) and isinstance(fset, NbGetterSetter):
doc1 = fget.__nb_signature__[0][1]
doc2 = fset.__nb_signature__[0][1]
if doc1 and doc2 and doc1 == doc2:
self.include_docstrings = False
self.put(prop.fset, name=name)
self.include_docstrings = docstrings_backup
def put_nb_static_property(self, name: Optional[str], prop: NbStaticProperty):
"""Append a 'nb_static_property' object"""
getter_sig = prop.fget.__nb_signature__[0][0]
pos = getter_sig.find("/) -> ")
if pos == -1:
raise RuntimeError(f"Static property '{name}' ({getter_sig}) has an invalid signature!")
getter_sig = getter_sig[pos + 6 :]
self.write_ln(f"{name}: {getter_sig} = ...")
if prop.__doc__ and self.include_docstrings:
self.put_docstr(prop.__doc__)
self.write("\n")
def put_type(self, tp: NbType, name: Optional[str]):
"""Append a 'nb_type' type object"""
tp_name, tp_mod_name = tp.__name__, tp.__module__
mod_name = self.module.__name__
if name and (name != tp_name or mod_name != tp_mod_name):
same_module = tp_mod_name == mod_name
same_toplevel_module = tp_mod_name.split(".")[0] == mod_name.split(".")[0]
if same_module:
# This is an alias of a type in the same module or same top-level module
if sys.version_info >= (3, 10, 0):
alias_tp = self.import_object("typing", "TypeAlias")
else:
alias_tp = self.import_object("typing_extensions", "TypeAlias")
self.write_ln(f"{name}: {alias_tp} = {tp.__qualname__}\n")
elif self.include_external_imports or (same_toplevel_module and self.include_internal_imports):
# Import from a different module
self.put_value(tp, name)
else:
docstr = tp.__doc__
tp_dict = dict(tp.__dict__)
tp_bases: Union[List[str], Tuple[Any, ...], None] = None
if "__nb_signature__" in tp.__dict__:
# Types with a custom signature override
for s in tp.__nb_signature__.split("\n"):
self.write_ln(self.simplify_types(s))
self._replace_tail(1, ":\n")
else:
self.write_ln(f"class {tp_name}:")
if tp_bases is None:
tp_bases = getattr(tp, "__orig_bases__", None)
if tp_bases is None:
tp_bases = tp.__bases__
tp_bases = [self.type_str(base) for base in tp_bases]
if tp_bases != ["object"]:
self._replace_tail(2, "(")
for i, base in enumerate(tp_bases):
if i:
self.write(", ")
self.write(base)
self.write("):\n")
self.depth += 1
output_pos = self._output.tell()
if docstr and self.include_docstrings:
self.put_docstr(docstr)
if len(tp_dict):
self.write("\n")
self.apply_pattern(self.prefix + ".__prefix__", None)
for k, v in tp_dict.items():
self.put(v, k, tp)
self.apply_pattern(self.prefix + ".__suffix__", None)
if output_pos == self._output.tell():
self.write_ln("pass\n")
self.depth -= 1
def is_function(self, tp: type) -> bool:
"""
Test if this is one of the many types of built-in functions supported
by Python, or if it is a nanobind ``nb_func``.
"""
return (
issubclass(tp, types.FunctionType)
or issubclass(tp, types.BuiltinFunctionType)
or issubclass(tp, types.BuiltinMethodType)
or issubclass(tp, types.WrapperDescriptorType)
or issubclass(tp, staticmethod)
or issubclass(tp, classmethod)
or (tp.__module__ == "nanobind" and tp.__name__ == "nb_func")
)
def put_value(self, value: object, name: str, parent: Optional[object] = None, abbrev: bool = True) -> None:
"""
Render a ``name: type = value`` assignment at the module, class, or
enum scope.
The parameter ``abbrev`` indicates if it is acceptable to reduce very
long expressions to ``...``.
"""
tp = type(value)
# Ignore module imports of non-type values like 'from typing import Optional'
if (
not self.include_external_imports
and tp.__module__ == "typing"
and str(value) == f"typing.{name}"
):
return
if tp.__module__ == '__future__':
return
if isinstance(parent, type) and issubclass(tp, parent):
# This is an entry of an enumeration
self.write_ln(f"{name} = {typing.cast(enum.Enum, value)._value_}")
if value.__doc__ and self.include_docstrings:
self.put_docstr(value.__doc__)
self.write("\n")
elif self.is_function(tp) or isinstance(value, type):
named_value = cast(NamedObject, value)
same_toplevel_module = named_value.__module__.split(".")[0] == self.module.__name__.split(".")[0]
if self.include_external_imports or (same_toplevel_module and self.include_internal_imports):
# This is a function or a type, import it from its actual source
self.import_object(named_value.__module__, named_value.__name__, name)
else:
value_str = self.expr_str(value, abbrev)
if value_str is None:
value_str = "..."
# Catch a few different typing.* constructs
if self.is_type_var(tp):
types = ""
elif typing.get_origin(value):
if sys.version_info >= (3, 10, 0):
types = ": " + self.import_object("typing", "TypeAlias")
else:
types = ": " + self.import_object("typing_extensions", "TypeAlias")
else:
types = f": {self.type_str(tp)}"
self.write_ln(f"{name}{types} = {value_str}\n")
def is_type_var(self, tp: type) -> bool:
if issubclass(tp, typing.TypeVar):
return True
if sys.version_info >= (3, 10) and issubclass(tp, typing.ParamSpec):
return True
if sys.version_info >= (3, 11) and issubclass(tp, typing.TypeVarTuple):
return True
if typing_extensions is not None:
if issubclass(
tp,
(
typing_extensions.TypeVar,
typing_extensions.ParamSpec,
typing_extensions.TypeVarTuple
)
):
return True
return False
def simplify_types(self, s: str) -> str:
"""
Process types that occur within a signature string ``s`` and simplify
them. This function applies the following rules:
- "local_module.X" -> "X"
- "other_module.X" -> "other_module.X"
(with "import other_module" added at top)
- "builtins.X" -> "X"
- "NoneType" -> "None"
- "ndarray[...]" -> "Annotated[NDArray[dtype], dict(..extras..)]"
- "collections.abc.X" -> "X"
(with "from collections.abc import X" added at top)
- "typing.X" -> "X"
(with "from typing import X" added at top, potentially
changed to 'collections.abc' on newer Python versions)
"""
# Process nd-array type annotations so that MyPy accepts them
s = self.ndarray_re.sub(lambda m: self._format_ndarray(m.group(2)), s)
s = self.abc_re.sub(r'collections.abc.\1', s)
# Process other type names and add suitable import statements
def process_general(m: Match[str]) -> str:
def is_valid_module(module_name: str) -> bool:
try:
importlib.util.find_spec(module_name)
# If we get here, the module exists and has a valid spec.
return True
except ValueError:
# The module exists but has no spec, `find_spec` raises a
# `ValueError`, so if we get here, the module does exist.
return True
except ModuleNotFoundError:
return False
full_name, mod_name, cls_name = m.group(0), m.group(1)[:-1], m.group(2)
if mod_name == "builtins":
# Simplify builtins
return cls_name if cls_name != "NoneType" else "None"
if full_name.startswith(self.module.__name__):
# Strip away the module prefix for local classes
return full_name[len(self.module.__name__) + 1 :]
elif mod_name == "typing" or mod_name == "collections.abc":
# Import frequently-occurring typing classes and ABCs directly
return self.import_object(mod_name, cls_name)
else:
# Handle nested names. While mod_name isn't a valid module, then
# move the last segment of the name from mod_name to cls_name
# and try again until we have the right partition.
search_mod_name = mod_name
search_cls_name = cls_name
while search_mod_name:
if is_valid_module(search_mod_name):
mod_name = search_mod_name
cls_name = search_cls_name
break
search_mod_name, _, symbol = search_mod_name.rpartition(".")
search_cls_name = f"{symbol}.{search_cls_name}"
# Import the module and reference the contained class by name
self.import_object(mod_name, None)
return full_name
s = self.id_seq.sub(process_general, s)
return s
def _format_ndarray(self, annotation: str) -> str:
"""Improve NumPy type annotations for static type checking"""
dtype = None
m = re.search(r"dtype=(\w+)", annotation)
if m:
dtype = "numpy."+ m.group(1)
dtype = dtype.replace('bool', 'bool_')
annotation = re.sub(r"dtype=\w+,?\s*", "", annotation).rstrip(", ")
# Turn shape notation into a valid Python type expression
annotation = annotation.replace("*", "None").replace("(None)", "(None,)")
# Build type while potentially preserving extra information as an annotation
ndarray = self.import_object("numpy.typing", "NDArray")
result = f"{ndarray}[{dtype}]" if dtype else ndarray
if annotation:
annotated = self.import_object("typing", "Annotated")
result = f"{annotated}[{result}, dict({annotation})]"
return result
def apply_pattern(self, query: str, value: object) -> bool:
"""
Check if ``value`` matches an entry of a pattern file. Applies the
pattern and returns ``True`` in that case, otherwise returns ``False``.
"""
match: Optional[Match[str]] = None
pattern: Optional[ReplacePattern] = None
for pattern in self.patterns:
match = pattern.query.search(query)
if match:
break
if not match or not pattern:
return False
for line in pattern.lines:
ls = line.strip()
if ls == "\\doc":
# Docstring reference
tp = type(value)
doc: Optional[str] = None
if tp.__module__ == "nanobind" and tp.__name__ in (
"nb_func",
"nb_method",
):
value = cast(NbFunction, value)
for tp_i in value.__nb_signature__:
doc = tp_i[1]
if doc:
break
else:
doc = getattr(value, "__doc__", None)
self.depth += 1
if doc and self.include_docstrings:
self.put_docstr(doc)
else:
self.write_ln("pass")
self.depth -= 1
continue
elif ls.startswith("\\from "):
items = ls[5:].split(" import ")
if len(items) != 2:
raise RuntimeError(f"Could not parse import declaration {ls}")
for item in items[1].strip("()").split(","):
item_list = item.split(" as ")
import_module, import_name = (
items[0].strip(),
item_list[0].strip(),
)
import_as = item_list[1].strip() if len(item_list) > 1 else None
self.import_object(import_module, import_name, import_as)
continue
groups = match.groups()
for i in reversed(range(len(groups))):
line = line.replace(f"\\{i+1}", groups[i])
for k, v in match.groupdict().items():
line = line.replace(f"\\{k}", v)
self.write_ln(line)
# Success, pattern was applied
return True
def put(self, value: object, name: Optional[str] = None, parent: Optional[object] = None) -> None:
old_prefix = self.prefix
if value in self.stack:
# Avoid infinite recursion due to cycles
return
try:
self.stack.append(value)
self.prefix = self.prefix + (("." + name) if name else "")
# Check if an entry in a provided pattern file matches
if self.apply_pattern(self.prefix, value):
return
# Exclude various standard elements found in modules, classes, etc.
if name in SKIP_LIST:
return
is_type_alias = typing.get_origin(value) or (
isinstance(value, type)
and (value.__name__ != name or value.__module__ != self.module.__name__)
)
# Ignore private members unless the user requests their inclusion
if (
not self.include_private
and name
and not is_type_alias
and len(name) > 2
and (
(name[0] == "_" and name[1] != "_")
or (name[-1] == "_" and name[-2] != "_")
)
):
return
tp = type(value)
tp_mod, tp_name = tp.__module__, tp.__name__
if ismodule(value):
if len(self.stack) != 1:
value_name_s = value.__name__.split(".")
module_name_s = self.module.__name__.split(".")
is_external = value_name_s[0] != module_name_s[0]
if not self.include_external_imports and is_external:
return
# Do not include submodules in the same stub, but include a directive to import them
self.import_object(value.__name__, name=None, as_name=name)
# If the user requested this, generate recursive stub files as well
if self.recursive and value_name_s[:-1] == module_name_s and self.output_file:
if create_subdirectory_for_module(value):
# Create a new subdirectory and start with an __init__.pyi file there
dir_name = self.output_file.parents[0] / value_name_s[-1]
dir_name.mkdir(parents=False, exist_ok=True)
output_file = dir_name / '__init__.pyi'
else:
output_file = self.output_file.parents[0] / (value_name_s[-1] + '.pyi')
sg = StubGen(
module=value,
recursive=self.recursive,
include_docstrings=self.include_docstrings,
include_private=self.include_private,
include_external_imports=self.include_external_imports,
include_internal_imports=self.include_internal_imports,
max_expr_length=self.max_expr_length,
patterns=self.patterns,
output_file=output_file,
quiet=self.quiet
)
sg.put(value)
output_file = output_file.resolve()
if not self.quiet:
print(f' - writing stub "{output_file}" ..')
with open(output_file, "w", encoding='utf-8') as f:
f.write(sg.get())
return
else:
self.apply_pattern(self.prefix + ".__prefix__", None)
# using value.__dict__ rather than inspect.getmembers
# to preserve insertion order
for name, child in value.__dict__.items():
self.put(child, name=name, parent=value)
self.apply_pattern(self.prefix + ".__suffix__", None)
elif self.is_function(tp):
value = cast(NbFunction, value)
self.put_function(value, name, parent)
elif issubclass(tp, type):
value = cast(NbType, value)
self.put_type(value, name)
elif tp_mod == "nanobind":
if tp_name == "nb_method":
value = cast(NbFunction, value)
self.put_function(value, name)
elif tp_name == "nb_static_property":
value = cast(NbStaticProperty, value)
self.put_nb_static_property(name, value)
elif tp_mod == "builtins":
if tp is property:
value = cast(property, value)
self.put_property(value, name)
else:
assert name is not None
abbrev = name != "__all__"
self.put_value(value, name, parent, abbrev=abbrev)
else:
assert name is not None
self.put_value(value, name, parent)
finally:
self.stack.pop()
self.prefix = old_prefix
def import_object(
self, module: str, name: Optional[str], as_name: Optional[str] = None
) -> str:
"""
Import a type (e.g. typing.Optional) used within the stub, ensuring
that this does not cause conflicts. Specify ``as_name`` to ensure that
the import is bound to a specified name.
When ``name`` is None, the entire module is imported.
"""
if module == "builtins" and name and (not as_name or name == as_name):
return name
# Rewrite module name if this is relative import from a submodule
if module.startswith(self.module.__name__) and module != self.module.__name__:
module_short = module[len(self.module.__name__) :]
if not name and as_name and module_short[0] == ".":
name = as_name = module_short[1:]
module_short = "."
else:
module_short = module
# Query a cache of previously imported objects
imports_module: Optional[ImportDict] = self.imports.get(module_short, None)
if not imports_module:
imports_module = {}
self.imports[module_short] = imports_module
key = (name, as_name)
final_name = imports_module.get(key, None)
if final_name:
return final_name
# Cache miss, import the object
final_name = as_name if as_name else name
# If no as_name constraint was set, potentially adjust the name to
# avoid conflicts with an existing object of the same name
if name and not as_name:
test_name = name
while True:
# Accept the name if there are no conflicts
if not hasattr(self.module, test_name):
break
value = getattr(self.module, test_name)
try:
if module == ".":
mod_o = self.module
else:
mod_o = importlib.import_module(module)
# If there is a conflict, accept it if it refers to the same object
if getattr(mod_o, name) is value:
break
except ImportError:
pass
# Prefix with an underscore
test_name = "_" + test_name
final_name = test_name
imports_module[key] = final_name
return final_name if final_name else ""
def expr_str(self, e: Any, abbrev: bool = True) -> Optional[str]:
"""
Attempt to convert a value into valid Python syntax that regenerates
that value. When ``abbrev`` is True, the implementation gives up and
returns ``None`` when the expression is considered to be too
complicated.
"""
tp = type(e)
if issubclass(tp, (bool, int, type(None), type(builtins.Ellipsis))):
s = repr(e)
if len(s) < self.max_expr_length or not abbrev:
return s
elif issubclass(tp, float):
s = repr(e)
if "inf" in s or "nan" in s:
s = f"float('{s}')"
if len(s) < self.max_expr_length or not abbrev:
return s
elif issubclass(tp, type) or typing.get_origin(e):
return self.type_str(e)
elif issubclass(tp, typing.ForwardRef):
return f'"{e.__forward_arg__}"'
elif issubclass(tp, enum.Enum):
return self.type_str(tp) + '.' + e._name_
elif (sys.version_info >= (3, 10) and issubclass(tp, typing.ParamSpec)) \
or (typing_extensions is not None and issubclass(tp, typing_extensions.ParamSpec)):
tv = self.import_object(tp.__module__, "ParamSpec")
return f'{tv}("{e.__name__}")'
elif (sys.version_info >= (3, 11) and issubclass(tp, typing.TypeVarTuple)) \
or (typing_extensions is not None and issubclass(tp, typing_extensions.TypeVarTuple)):
tv = self.import_object(tp.__module__, "TypeVarTuple")
return f'{tv}("{e.__name__}")'
elif issubclass(tp, typing.TypeVar):
tv = self.import_object("typing", "TypeVar")
s = f'{tv}("{e.__name__}"'
for v in getattr(e, "__constraints__", ()):
v = self.type_str(v)
assert v
s += ", " + v
if v := getattr(e, "__bound__", None):
v = self.type_str(v)
assert v
s += ", bound=" + v
for k in ["contravariant", "covariant", "infer_variance"]:
v = getattr(e, f"__{k}__", None)
if v:
v = self.expr_str(v, abbrev=False)
if v is None:
return None
s += f", {k}=" + v
s += ")"
return s
elif issubclass(tp, str):
s = repr(e)
if len(s) < self.max_expr_length or not abbrev:
return s
elif issubclass(tp, list) or issubclass(tp, tuple):
e = [self.expr_str(v, abbrev) for v in e]
if None in e:
return None
if issubclass(tp, list):
s = "[" + ", ".join(e) + "]"
else:
s = "(" + ", ".join(e) + ")"
if len(s) < self.max_expr_length or not abbrev:
return s
elif issubclass(tp, dict):
e = [
(self.expr_str(k, abbrev), self.expr_str(v, abbrev))
for k, v in e.items()
]
s = "{"
for i, (k, v) in enumerate(e):
if k is None or v is None:
return None
s += k + " : " + v
if i + 1 < len(e):
s += ", "
s += "}"
if len(s) < self.max_expr_length or not abbrev:
return s
pass
return None
def signature_str(self, s: Signature) -> str:
"""Convert an inspect.Signature to into valid Python syntax"""
posonly_sep, kwonly_sep = False, True
params: List[str] = []
# Logic for placing '*' and '/' based on the
# signature.Signature implementation
for param in s.parameters.values():
kind = param.kind
if kind == Parameter.POSITIONAL_ONLY:
posonly_sep = True
elif posonly_sep:
params.append("/")
posonly_sep = False
if kind == Parameter.VAR_POSITIONAL:
kwonly_sep = False
elif kind == Parameter.KEYWORD_ONLY and kwonly_sep:
params.append("*")
kwonly_sep = False
params.append(self.param_str(param))
if posonly_sep:
params.append("/")
result = f"({', '.join(params)})"
if s.return_annotation != Signature.empty:
result += " -> " + self.type_str(s.return_annotation)
return result
def param_str(self, p: Parameter) -> str:
result = ""
if p.kind == Parameter.VAR_POSITIONAL:
result += "*"
elif p.kind == Parameter.VAR_KEYWORD:
result += "**"
result += p.name
has_type = p.annotation != Parameter.empty
has_def = p.default != Parameter.empty
if has_type:
result += ": " + self.type_str(p.annotation)
if has_def:
result += " = " if has_type else "="
p_default_str = self.expr_str(p.default)
if p_default_str is None:
# self.expr_str(p.default) could return None in some rare cases,
# e.g. p.default is a nanobind object. If so, use ellipsis as a placeholder.
p_default_str = "..."
assert p_default_str
result += p_default_str
return result
def type_str(self, tp: Union[List[Any], Tuple[Any, ...], Dict[Any, Any], Any]) -> str:
"""Attempt to convert a type into a Python expression which reproduces it"""
origin, args = typing.get_origin(tp), typing.get_args(tp)
if isinstance(tp, str):
result = tp
elif isinstance(tp, typing.TypeVar):
return tp.__name__
elif isinstance(tp, typing.ForwardRef):
return repr(tp.__forward_arg__)
elif isinstance(tp, list):
list_gen: Generator[str, None, None] = (self.type_str(a) for a in tp)
return "[" + ", ".join(list_gen) + "]"
elif isinstance(tp, tuple):
tuple_gen: Generator[str, None, None] = (self.type_str(a) for a in tp)
return "(" + ", ".join(tuple_gen) + ")"
elif isinstance(tp, dict):
dict_gen: Generator[str, None, None] = (repr(k) + ": " + self.type_str(v) for k, v in tp.items())
return (
"{"
+ ", ".join(dict_gen)
+ "}"
)
elif origin and args:
args_gen: Generator[str, None, None] = (self.type_str(a) for a in args)
result = (
self.type_str(origin)
+ "["
+ ", ".join(args_gen)
+ "]"
)
elif tp in TYPES_TYPES:
result = f"types.{TYPES_TYPES[tp]}"
elif tp is Ellipsis:
result = "..."
elif isinstance(tp, type):
result = tp.__module__ + "." + tp.__qualname__
else:
result = repr(tp)
return self.simplify_types(result)
def check_party(self, module: str) -> Literal[0, 1, 2]:
"""
Check source of module
0 = From stdlib
1 = From 3rd party package
2 = From the package being built
"""
if module.startswith(".") or module.split('.')[0] == self.module.__name__.split('.')[0]:
return 2
try:
spec = importlib.util.find_spec(module)
except (ModuleNotFoundError, ValueError):
return 1
if spec:
if spec.origin and ("site-packages" in spec.origin or "dist-packages" in spec.origin):
return 1
else:
return 0
else:
return 1
def get(self) -> str:
"""Generate the final stub output"""
s = ""
# Potentially add a module docstring
doc = getattr(self.module, '__doc__', None)
if self.include_docstrings and doc:
s += self.format_docstr(doc, 0) + "\n"
last_party = None
for module in sorted(self.imports, key=lambda i: str(self.check_party(i)) + i):
imports = self.imports[module]
items: List[str] = []
party = self.check_party(module)
if party != last_party:
if last_party is not None:
s += "\n"
last_party = party
for (k, v1), v2 in imports.items():
if k is None:
if v1 and v1 != module:
s += f"import {module} as {v1}\n"
elif v1 is None or (k, None) not in imports:
s += f"import {module}\n"
else:
if k != v2 or v1:
items.append(f"{k} as {v2}")
else:
items.append(k)
items = sorted(items)
if items:
items_v0 = ", ".join(items)
items_v0 = f"from {module} import {items_v0}\n"
items_v1 = "(\n " + ",\n ".join(items) + "\n)"
items_v1 = f"from {module} import {items_v1}\n"
s += items_v0 if len(items_v0) <= 70 else items_v1
s += "\n\n"
# Append the main generated stub
s += self.output
return s.rstrip() + "\n"
def parse_options(args: List[str]) -> argparse.Namespace:
parser = argparse.ArgumentParser(
prog="python -m nanobind.stubgen",
description="Generate stubs for nanobind-based extensions.",
)
parser.add_argument(
"-o",
"--output-file",
metavar="FILE",
dest="output_file",
default=None,
help="write generated stubs to the specified file",
)
parser.add_argument(
"-O",
"--output-dir",
metavar="PATH",
dest="output_dir",
default=None,
help="write generated stubs to the specified directory",
)
parser.add_argument(
"-i",
"--import",
action="append",
metavar="PATH",
dest="imports",
default=[],
help="add the directory to the Python import path (can specify multiple times)",
)
parser.add_argument(
"-m",
"--module",
action="append",
metavar="MODULE",
dest="modules",
default=[],
help="generate a stub for the specified module (can specify multiple times)",
)
parser.add_argument(
"-r",
"--recursive",
default=False,
action="store_true",
dest="recursive",
help="recursively process submodules",
)
parser.add_argument(
"-M",
"--marker-file",
action="append",
metavar="FILE",
dest="marker_file",
default=[],
help="generate a marker file (usually named 'py.typed', can specify multiple times)",
)
parser.add_argument(
"-p",
"--pattern-file",
metavar="FILE",
dest="pattern_file",
default=None,
help="apply the given patterns to the generated stub (see the docs for syntax)",
)
parser.add_argument(
"-P",
"--include-private",
dest="include_private",
default=False,
action="store_true",
help="include private members (with single leading or trailing underscore)",
)
parser.add_argument(
"-D",
"--exclude-docstrings",
dest="include_docstrings",
default=True,
action="store_false",
help="exclude docstrings from the generated stub",
)
parser.add_argument(
"--exclude-values",
dest="exclude_values",
default=False,
action="store_true",
help="force the use of ... for values",
)
parser.add_argument(
"-q",
"--quiet",
default=False,
action="store_true",
help="do not generate any output in the absence of failures",
)
opt = parser.parse_args(args)
if len(opt.modules) == 0:
parser.error("At least one module must be specified.")
if len(opt.modules) > 1 and opt.output_file:
parser.error(
"The -o option can only be specified when a single module is being processed."
)
if opt.recursive and opt.output_file:
parser.error(
"The -o option is not compatible with recursive stub generation (-r)."
)
return opt
def load_pattern_file(fname: str) -> List[ReplacePattern]:
"""
Load a pattern file from disk and return a list of pattern instances that
includes precompiled versions of all of the contained regular expressions.
"""
with open(fname, "r", encoding='utf-8') as f:
f_lines = f.readlines()
patterns: List[ReplacePattern] = []
def add_pattern(query: str, lines: List[str]):
# Exactly 1 empty line at the end
while lines and (lines[-1].isspace() or len(lines[-1]) == 0):
lines.pop()
lines.append("")
# Identify deletions (replacement by only whitespace)
if all((p.isspace() or len(p) == 0 for p in lines)):
lines = []
patterns.append(ReplacePattern(re.compile(query[:-1]), lines, 0))
lines: List[str]
lines, query, dedent = [], None, 0
for i, line in enumerate(f_lines):
line = line.rstrip()
if line.startswith("#"):
continue
if len(line) == 0 or line[0].isspace():
if not lines:
stripped = line.lstrip()
dedent = len(line) - len(stripped)
lines.append(stripped)
else:
s1, s2 = line.lstrip(), line[dedent:]
lines.append(s2 if len(s2) > len(s1) else s1)
else:
if not line.endswith(":"):
raise Exception(f'Cannot parse line {i+1} of pattern file "{fname}"')
if query:
add_pattern(query, lines)
query = line
lines = []
if query:
add_pattern(query, lines)
return patterns
def main(args: Optional[List[str]] = None) -> None:
import sys
# Ensure that the current directory is on the path
if "" not in sys.path and "." not in sys.path:
sys.path.insert(0, "")
opt = parse_options(sys.argv[1:] if args is None else args)
patterns: List[ReplacePattern]
if opt.pattern_file:
if not opt.quiet:
print('Using pattern file "%s" ..' % opt.pattern_file)
patterns = load_pattern_file(opt.pattern_file)
if not opt.quiet:
print(" - loaded %i patterns.\n" % len(patterns))
else:
patterns = []
for i in opt.imports:
sys.path.insert(0, i)
for i, mod in enumerate(opt.modules):
if not opt.quiet:
if i > 0:
print("\n")
print('Module "%s" ..' % mod)
print(" - importing ..")
mod_imported = importlib.import_module(mod)
if opt.output_file:
file = Path(opt.output_file)
else:
file_s = getattr(mod_imported, "__file__", None)
if file_s is None:
raise Exception(
'the module lacks a "__file__" attribute, hence '
"stubgen cannot infer where to place the generated "
"stub. You must specify the -o parameter to provide "
"the name of an output file."
)
file = Path(str(file_s))
ext_loader = importlib.machinery.ExtensionFileLoader
if isinstance(mod_imported.__loader__, ext_loader):
# Splitting on "." (module nesting qualifier) handles the case
# of invoking stubgen on a module that's not in the current
# working directory - in that case, we still only want the Python
# module name as the stub file name, not the whole source tree
# hierarchy.
modname = mod_imported.__name__.split(".")[-1]
file = file.with_name(modname)
file = file.with_suffix(".pyi")
if opt.output_dir:
file = Path(opt.output_dir, file.name)
if opt.recursive and create_subdirectory_for_module(mod_imported) \
and file.name != '__init__.pyi':
file = file.with_suffix('') / "__init__.pyi"
file.parents[0].mkdir(parents=True, exist_ok=True)
sg = StubGen(
module=mod_imported,
quiet=opt.quiet,
recursive=opt.recursive,
include_docstrings=opt.include_docstrings,
include_private=opt.include_private,
max_expr_length=0 if opt.exclude_values else 50,
patterns=patterns,
output_file=file
)
if not opt.quiet:
print(" - analyzing ..")
sg.put(mod_imported)
if patterns:
total_matches = 0
for p in patterns:
if p.matches != 0:
total_matches += p.matches
continue
rule_str = str(p.query)
if "re.compile" in rule_str:
rule_str = rule_str.replace("re.compile(", "")[:-1]
if not opt.quiet:
print(f" - warning: rule {rule_str} did not match any elements.")
if not opt.quiet:
print(f" - applied {total_matches} patterns.")
file = file.resolve()
if not opt.quiet:
print(f' - writing stub "{file}" ..')
with open(file, "w", encoding='utf-8') as f:
f.write(sg.get())
for marker_file in opt.marker_file:
marker_file = Path(marker_file).resolve()
if not opt.quiet:
print(f' - writing marker file "{marker_file}" ..')
marker_file.touch()
if __name__ == "__main__":
main()
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
src/trampoline.cpp | C++ | /*
src/trampoline.cpp: support for overriding virtual functions in Python
Copyright (c) 2022 Wenzel Jakob
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#include <nanobind/trampoline.h>
#include "nb_internals.h"
NAMESPACE_BEGIN(NB_NAMESPACE)
NAMESPACE_BEGIN(detail)
void trampoline_new(void **data, size_t size, void *ptr) noexcept {
// GIL is held when the trampoline constructor runs. Lock the
// associated instance shard in GIL-less Python.
nb_shard &shard = internals->shard(ptr);
lock_shard lock(shard);
nb_ptr_map &inst_c2p = shard.inst_c2p;
nb_ptr_map::iterator it = inst_c2p.find(ptr);
check(it != inst_c2p.end() && (((uintptr_t) it->second) & 1) == 0,
"nanobind::detail::trampoline_new(): unique instance not found!");
data[0] = it->second;
memset(data + 1, 0, sizeof(void *) * 2 * size);
}
void trampoline_release(void **data, size_t size) noexcept {
// GIL is held when the trampoline destructor runs
for (size_t i = 0; i < size; ++i)
Py_XDECREF((PyObject *) data[i*2 + 2]);
}
static void trampoline_enter_internal(void **data, size_t size,
const char *name, bool pure, ticket *t) {
const PyObject *None = Py_None;
PyGILState_STATE state{ };
const char *error = nullptr;
PyObject *key = nullptr, *value = nullptr;
PyObject *self = (PyObject *) data[0];
PyTypeObject *value_tp = nullptr;
size_t offset = 0;
// First, perform a quick sweep without lock
for (size_t i = 0; i < size; i++) {
void *d_name = data[2*i + 1],
*d_value = data[2*i + 2];
if (name == d_name && d_value) {
if (d_value != None) {
t->state = PyGILState_Ensure();
t->key = (PyObject *) d_value;
return;
} else {
if (pure) {
error = "tried to call a pure virtual function";
break;
} else {
return;
}
}
}
}
// Nothing found -- retry, now with lock held
state = PyGILState_Ensure();
ft_object_guard guard(self);
if (error)
goto fail;
for (size_t i = 0; i < size; i++) {
void *d_name = data[2*i + 1],
*d_value = data[2*i + 2];
if (name == d_name && d_value) {
if (d_value != None) {
t->state = state;
t->key = (PyObject *) d_value;
return;
} else {
if (pure) {
error = "tried to call a pure virtual function";
goto fail;
} else {
PyGILState_Release(state);
return;
}
}
}
}
// Sill no luck -- perform a lookup and populate the trampoline
for (; offset < size; offset++) {
if (data[2 * offset + 1] == nullptr &&
data[2 * offset + 2] == nullptr)
break;
}
if (offset == size) {
error = "the trampoline ran out of slots (you will need to increase "
"the value provided to the NB_TRAMPOLINE() macro)";
goto fail;
}
key = PyUnicode_InternFromString(name);
if (!key) {
error = "could not intern string";
goto fail;
}
value = PyObject_GetAttr(self, key);
if (!value) {
error = "lookup failed";
goto fail;
}
value_tp = Py_TYPE(value);
Py_CLEAR(value);
if (value_tp == internals->nb_func || value_tp == internals->nb_method ||
value_tp == internals->nb_bound_method) {
Py_DECREF(key);
if (pure) {
error = "tried to call a pure virtual function";
goto fail;
}
Py_INCREF(Py_None);
key = Py_None;
}
data[2 * offset + 1] = (void *) name;
data[2 * offset + 2] = key;
if (key != None) {
t->state = state;
t->key = key;
return;
} else {
PyGILState_Release(state);
return;
}
fail:
type_data *td = nb_type_data(Py_TYPE(self));
PyGILState_Release(state);
raise("nanobind::detail::get_trampoline('%s::%s()'): %s!",
td->name, name, error);
}
static NB_THREAD_LOCAL ticket *current_ticket = nullptr;
void trampoline_enter(void **data, size_t size, const char *name, bool pure, ticket *t) {
trampoline_enter_internal(data, size, name, pure, t);
if (t->key) {
t->self = (PyObject *) data[0];
t->prev = current_ticket;
if (t->prev && t->prev->self.is(t->self) && t->prev->key.is(t->key)) {
t->self = handle();
t->key = handle();
t->prev = nullptr;
PyGILState_Release(t->state);
if (pure)
raise("nanobind::detail::get_trampoline('%s()'): tried to call "
"a pure virtual function!", name);
return;
}
current_ticket = t;
}
}
void trampoline_leave(ticket *t) noexcept {
if (!t->key)
return;
current_ticket = t->prev;
PyGILState_Release(t->state);
}
NAMESPACE_END(detail)
NAMESPACE_END(NB_NAMESPACE)
| wjakob/nanobind | 3,353 | nanobind: tiny and efficient C++/Python bindings | C++ | wjakob | Wenzel Jakob | EPFL |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.