diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 28eb35c..2e2f9a2 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -22,7 +22,7 @@ jobs: name: Linux-aarch64 other_linker_flags: '-L/usr/aarch64-linux-gnu/local/lib' arch: aarch64 - output: libRemoteInput.so.1.0.0 + output: libRemoteInput.so release: libRemoteInput-aarch64.so #Linux-64 @@ -30,7 +30,7 @@ jobs: name: Linux-64 other_linker_flags: '-m64' arch: x86_64 - output: libRemoteInput.so.1.0.0 + output: libRemoteInput.so release: libRemoteInput-x86_64.so #MacOS-64 @@ -38,7 +38,7 @@ jobs: name: MacOS-64 other_linker_flags: '-m64' arch: x86_64 - output: libRemoteInput.1.0.0.dylib + output: libRemoteInput.dylib release: libRemoteInput-x86_64.dylib #Windows-32 @@ -63,6 +63,7 @@ jobs: submodules: true - name: Set up Python + if: matrix.config.name != 'Linux-aarch64' uses: actions/setup-python@v4 with: python-version: '3.x' diff --git a/CMakeLists.txt b/CMakeLists.txt index f967244..ddcdc0a 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -9,7 +9,23 @@ IF(NOT CMAKE_BUILD_TYPE) ENDIF() set(PYTHON_BINDINGS ON) +set(USE_PYBIND11 OFF) +set(USE_PYTHON3 OFF) +set(USE_SYSTEM_PYBIND11 OFF) +set(PYTHON_LIMITED_VERSION 0x03080000) + +IF (USE_PYBIND11) + unset(Py_LIMITED_API) + MESSAGE(STATUS, "PyBind11 being used -- Ignoring Py_LIMITED_API") +ENDIF() + +IF (NOT USE_PYBIND11) + set(Py_LIMITED_API ${PYTHON_LIMITED_VERSION}) +ENDIF() +IF(PYTHON_BINDINGS AND USE_PYBIND11 AND Py_LIMITED_API) + MESSAGE(FATAL_ERROR, "PyBind11 cannot be used with Py_LIMITED_API") +ENDIF() # ----------------------------- PACKAGES ----------------------------- set(JAVA_AWT_LIBRARY NotNeeded) @@ -20,14 +36,48 @@ find_package(Java 1.8 REQUIRED) find_package(JNI 1.8 REQUIRED) IF(PYTHON_BINDINGS) - find_package(Python 3.8 REQUIRED COMPONENTS Interpreter Development) - MESSAGE(STATUS, "${Python_LIBRARIES}") - MESSAGE(STATUS, "${Python_LIBRARY_DIRS}") - MESSAGE(STATUS, "${PYTHON_DYNAMIC_LINKER_FLAGS}") -ENDIF() - -set(Py_LIMITED_API 0x03080000) + IF(USE_PYBIND11) + IF(USE_PYTHON3) + find_package(Python3 REQUIRED COMPONENTS Interpreter Development) + set(PY_INCLUDE_DIRS ${Python3_INCLUDE_DIRS}) + set(PY_LIBRARIES ${Python3_LIBRARIES}) + set(PY_LIBRARIES ${Python3_LIBRARY_DIRS}) + set(PY_LINK_OPTIONS ${Python3_LINK_OPTIONS}) + set(PY_DYNAMIC_LINKER_FLAGS ${Python3_DYNAMIC_LINKER_FLAGS}) + ELSE() + find_package(Python REQUIRED COMPONENTS Interpreter Development) + set(PY_INCLUDE_DIRS ${Python_INCLUDE_DIRS}) + set(PY_LIBRARIES ${Python_LIBRARIES}) + set(PY_LIBRARY_DIRS ${Python_LIBRARY_DIRS}) + set(PY_LINK_OPTIONS ${Python_LINK_OPTIONS}) + set(PY_DYNAMIC_LINKER_FLAGS ${Python_DYNAMIC_LINKER_FLAGS}) + ENDIF() + IF(USE_SYSTEM_PYBIND11) + find_package(nanobind REQUIRED) + ENDIF() + ELSE() + IF(USE_PYTHON3) + find_package(Python3 3.8 REQUIRED COMPONENTS Interpreter Development) + set(PY_INCLUDE_DIRS ${Python3_INCLUDE_DIRS}) + set(PY_LIBRARIES ${Python3_LIBRARIES}) + set(PY_LIBRARY_DIRS ${Python3_LIBRARY_DIRS}) + set(PY_LINK_OPTIONS ${Python3_LINK_OPTIONS}) + set(PY_DYNAMIC_LINKER_FLAGS ${Python3_DYNAMIC_LINKER_FLAGS}) + ELSE() + find_package(Python 3.8 REQUIRED COMPONENTS Interpreter Development) + set(PY_INCLUDE_DIRS ${Python_INCLUDE_DIRS}) + set(PY_LIBRARIES ${Python_LIBRARIES}) + set(PY_LIBRARY_DIRS ${Python_LIBRARY_DIRS}) + set(PY_LINK_OPTIONS ${Python_LINK_OPTIONS}) + set(PY_DYNAMIC_LINKER_FLAGS ${Python_DYNAMIC_LINKER_FLAGS}) + ENDIF() + ENDIF() + MESSAGE(STATUS, "${PY_INCLUDE_DIRS}") + MESSAGE(STATUS, "${PY_LIBRARIES}") + MESSAGE(STATUS, "${PY_LIBRARY_DIRS}") + MESSAGE(STATUS, "${PY_DYNAMIC_LINKER_FLAGS}") +ENDIF() # ----------------------- INCLUDE_DIRECTORIES ----------------------- set(INCLUDE_DIRECTORIES @@ -39,7 +89,7 @@ set(INCLUDE_DIRECTORIES RemoteInput/Plugin RemoteInput/Plugin/JVM ${JNI_INCLUDE_DIRS} - ${Python_INCLUDE_DIRS}) + ${PY_INCLUDE_DIRS}) # ----------------------------- PLATFORM ----------------------------- @@ -90,52 +140,9 @@ ELSE() ENDIF() - -# ------------------------- JAVA LINKER ------------------------- -IF(WIN32) -# set(LIBRARIES_LIST -# ${JAVA_AWT_LIBRARY}) -ELSEIF(APPLE) -# set(JAVA_JNI_LIBRARY_PATH -# /Library/Internet\ Plug-Ins/JavaAppletPlugin.plugin/Contents/Home/lib) -# -# set(JAVA_JNI_LIBRARY_RPATH -# "/Library/Internet Plug-Ins/JavaAppletPlugin.plugin/Contents/Home/lib") - -# find_library(Java_JAWT_LIBRARY -# NAMES libawt.dylib -# HIMES ${_JAVA_HINTS} -# PATHS ${JAVA_JNI_LIBRARY_PATH}) -# -# find_library(Java_JAWT_EXTENDED_LIBRARY -# NAMES libawt_lwawt.dylib -# HIMES ${_JAVA_HINTS} -# PATHS ${JAVA_JNI_LIBRARY_PATH}) -# -# set(LIBRARIES_LIST -# ${Java_JAWT_LIBRARY} -# ${Java_JAWT_EXTENDED_LIBRARY} -# "-rpath \"${JAVA_JNI_LIBRARY_RPATH}\"" -# "-rpath \"${JAVA_JNI_LIBRARY_RPATH}/server\"") -ELSE() -# set(Java_JAWT_LIBRARY -# ${JAVA_AWT_LIBRARY}) -# -# find_library(Java_JAWT_EXTENDED_LIBRARY -# NAMES libawt_xawt.so -# HIMES ${_JAVA_HINTS} -# PATHS ${JAVA_AWT_LIBRARY}/..) -# -# set(LIBRARIES_LIST -# ${Java_JAWT_LIBRARY} -# ${Java_JAWT_EXTENDED_LIBRARY}) -ENDIF() - - - # ----------------------------- SOURCES ----------------------------- + set(SRC_LIST - ${SRC_LIST} ${EXTRA_INCLUDES} RemoteInput/RemoteInput.h RemoteInput/Echo/Atomics.cxx @@ -159,8 +166,6 @@ set(SRC_LIST RemoteInput/Echo/TypeTraits.hxx RemoteInput/Echo/TypeTraits_Functional.hxx RemoteInput/Echo/TypeTraits_Functional_Attributes.hxx - #RemoteInput/Hooks/ModelRendering.cpp - #RemoteInput/Hooks/ModelRendering.hpp RemoteInput/Java/JNI_Common.hxx RemoteInput/Java/Applet.cxx RemoteInput/Java/Applet.hxx @@ -208,7 +213,6 @@ set(SRC_LIST RemoteInput/Platform/NativeHooks_Linux.cxx RemoteInput/Platform/NativeHooks_Windows.cxx RemoteInput/Platform/Platform.hxx - #RemoteInput/Platform/Platform_Darwin.mm RemoteInput/Platform/Platform_Linux.cxx RemoteInput/Platform/Platform_Windows.cxx RemoteInput/Plugin/ControlCenter.cxx @@ -252,17 +256,14 @@ set(SRC_LIST RemoteInput/Injection/Injector_Linux.cpp RemoteInput/Injection/Injector_Arm.cpp) -IF(WIN32) - -ELSEIF(APPLE) - set(SRC_LIST - ${SRC_LIST} +IF(APPLE) + list(APPEND SRC_LIST RemoteInput/Platform/Platform_Darwin.mm) ENDIF() IF(PYTHON_BINDINGS) - set(SRC_LIST - ${SRC_LIST} + list(APPEND SRC_LIST + RemoteInput/Plugin/Python/PythonMacros.hxx RemoteInput/Plugin/Python/PythonCommon.cxx RemoteInput/Plugin/Python/PythonCommon.hxx RemoteInput/Plugin/Python/PythonPlugin.cxx @@ -281,22 +282,43 @@ IF(PYTHON_BINDINGS) ENDIF() IF(PYTHON_BINDINGS AND NOT (CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")) -# set(EXTRA_LIBRARIES -# ${EXTRA_LIBRARIES} -# ${Python_LIBRARIES}) -# set(PYTHON_DYNAMIC_LINKER_FLAGS -# -undefined dynamic_lookup) + # set(EXTRA_LIBRARIES + # ${EXTRA_LIBRARIES} + # ${Python_LIBRARIES}) + # set(PYTHON_DYNAMIC_LINKER_FLAGS + # -undefined dynamic_lookup) ENDIF() # ---------------------------- COMPILE ---------------------------- -add_subdirectory(RemoteInput/Thirdparty) -add_library(${PROJECT_NAME} SHARED ${SRC_LIST} $) -set_target_properties(${PROJECT_NAME} PROPERTIES VERSION ${PROJECT_VERSION}) +IF(PYTHON_BINDINGS AND USE_PYBIND11) + add_subdirectory(RemoteInput/Thirdparty) + + IF(NOT USE_SYSTEM_PYBIND11) + add_subdirectory(RemoteInput/Thirdparty/nanobind) + ENDIF() + + nanobind_add_module(${PROJECT_NAME} SHARED ${SRC_LIST} $) + + IF(NOT USE_SYSTEM_PYBIND11) + target_include_directories(RemoteInput PRIVATE ${CMAKE_SOURCE_DIR}/RemoteInput/Thirdparty/nanobind/include) + ENDIF() +ELSE() + add_subdirectory(RemoteInput/Thirdparty) + add_library(${PROJECT_NAME} SHARED ${SRC_LIST} $) +ENDIF() + +#set_target_properties(${PROJECT_NAME} PROPERTIES VERSION ${PROJECT_VERSION}) target_include_directories(${PROJECT_NAME} PRIVATE ${INCLUDE_DIRECTORIES}) -IF(Py_LIMITED_API) - target_compile_definitions(${PROJECT_NAME} PRIVATE USE_DETOURS=1 HOOK_OPENGL_BLIT=1 Py_LIMITED_API=${Py_LIMITED_API} Py_BUILD_CORE=1 Py_NO_ENABLE_SHARED=1) +IF(PYTHON_BINDINGS) + IF (USE_PYBIND11) + target_compile_definitions(${PROJECT_NAME} PRIVATE USE_DETOURS=1 HOOK_OPENGL_BLIT=1 USE_PYBIND11=1 Py_BUILD_CORE=1 Py_NO_ENABLE_SHARED=1) + ELSEIF(Py_LIMITED_API) + target_compile_definitions(${PROJECT_NAME} PRIVATE USE_DETOURS=1 HOOK_OPENGL_BLIT=1 Py_LIMITED_API=${Py_LIMITED_API} Py_BUILD_CORE=1 Py_NO_ENABLE_SHARED=1) + ELSE() + target_compile_definitions(${PROJECT_NAME} PRIVATE USE_DETOURS=1 HOOK_OPENGL_BLIT=1 Py_BUILD_CORE=1 Py_NO_ENABLE_SHARED=1) + ENDIF() ELSE() target_compile_definitions(${PROJECT_NAME} PRIVATE USE_DETOURS=1 HOOK_OPENGL_BLIT=1) ENDIF() @@ -307,15 +329,15 @@ IF(WIN32) $<$:-DDEBUG> $<$:-O3 -fvisibility=hidden>) target_link_options(${PROJECT_NAME} PRIVATE - $<$:-static -stdlib=libc++ -fuse-ld=lld -Wl,--enable-stdcall-fixup -Wl,--kill-at -Wl"/DEF:RemoteInput/RemoteInput.def" ${Python_LINK_OPTIONS} ${PYTHON_DYNAMIC_LINKER_FLAGS}> - $<$:-s -static -stdlib=libc++ -fuse-ld=lld -Wl,--enable-stdcall-fixup -Wl,--kill-at -Wl"/DEF:RemoteInput/RemoteInput.def" ${Python_LINK_OPTIONS} ${PYTHON_DYNAMIC_LINKER_FLAGS}>) + $<$:-static -stdlib=libc++ -fuse-ld=lld -Wl,--enable-stdcall-fixup -Wl,--kill-at -Wl"/DEF:RemoteInput/RemoteInput.def" ${PY_LINK_OPTIONS} ${PY_DYNAMIC_LINKER_FLAGS}> + $<$:-s -static -stdlib=libc++ -fuse-ld=lld -Wl,--enable-stdcall-fixup -Wl,--kill-at -Wl"/DEF:RemoteInput/RemoteInput.def" ${PY_LINK_OPTIONS} ${PY_DYNAMIC_LINKER_FLAGS}>) ELSEIF(CMAKE_CXX_COMPILER_ID STREQUAL "GNU") target_compile_options(${PROJECT_NAME} PRIVATE $<$:-DDEBUG -Wl,--input-def=RemoteInput/RemoteInput.def> $<$:-O3 -fvisibility=hidden -Wl,--input-def=RemoteInput/RemoteInput.def>) target_link_options(${PROJECT_NAME} PRIVATE - $<$:-static -static-libgcc -static-libstdc++ -Wl,--enable-stdcall-fixup -Wl,--kill-at ${Python_LINK_OPTIONS} ${PYTHON_DYNAMIC_LINKER_FLAGS}> - $<$:-s -static -static-libgcc -static-libstdc++ -Wl,--enable-stdcall-fixup -Wl,--kill-at ${Python_LINK_OPTIONS} ${PYTHON_DYNAMIC_LINKER_FLAGS}>) + $<$:-static -static-libgcc -static-libstdc++ -Wl,--enable-stdcall-fixup -Wl,--kill-at ${PY_LINK_OPTIONS} ${PY_DYNAMIC_LINKER_FLAGS}> + $<$:-s -static -static-libgcc -static-libstdc++ -Wl,--enable-stdcall-fixup -Wl,--kill-at ${PY_LINK_OPTIONS} ${PY_DYNAMIC_LINKER_FLAGS}>) ELSEIF(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") set(CMAKE_MSVC_RUNTIME_LIBRARY "MultiThreaded$<$:Debug>") set_property(TARGET ${PROJECT_NAME} PROPERTY @@ -330,7 +352,7 @@ IF(WIN32) IF(PYTHON_BINDINGS) target_link_options(${PROJECT_NAME} PRIVATE $<$:> - $<$:/LIBPATH:${Python_LIBRARY_DIRS} ${Python_LINK_OPTIONS} ${PYTHON_DYNAMIC_LINKER_FLAGS}>) + $<$:/LIBPATH:${Python_LIBRARY_DIRS} ${Py_LINK_OPTIONS} ${PY_DYNAMIC_LINKER_FLAGS}>) ELSE() target_link_options(${PROJECT_NAME} PRIVATE $<$:> @@ -342,15 +364,55 @@ ELSEIF(APPLE) $<$:-DDEBUG -g -fvisibility=hidden> $<$:-O3 -fvisibility=hidden>) target_link_options(${PROJECT_NAME} PRIVATE - $<$:-static -stdlib=libc++ -Wl"/DEF:RemoteInput/RemoteInput.def" ${Python_LINK_OPTIONS} ${PYTHON_DYNAMIC_LINKER_FLAGS}> - $<$:-s -static -stdlib=libc++ -Wl"/DEF:RemoteInput/RemoteInput.def" ${Python_LINK_OPTIONS} ${PYTHON_DYNAMIC_LINKER_FLAGS}>) + $<$:-static -stdlib=libc++ -Wl"/DEF:RemoteInput/RemoteInput.def" ${PY_LINK_OPTIONS} ${PY_DYNAMIC_LINKER_FLAGS}> + $<$:-s -static -stdlib=libc++ -Wl"/DEF:RemoteInput/RemoteInput.def" ${PY_LINK_OPTIONS} ${PY_DYNAMIC_LINKER_FLAGS}>) ELSE() target_compile_options(${PROJECT_NAME} PRIVATE $<$:-DDEBUG -g -fvisibility=hidden> $<$:-O3 -fvisibility=hidden>) target_link_options(${PROJECT_NAME} PRIVATE - $<$:-g ${Python_LINK_OPTIONS} ${PYTHON_DYNAMIC_LINKER_FLAGS}> #-static-libgcc -static-libstdc++ - $<$:-s ${Python_LINK_OPTIONS} ${PYTHON_DYNAMIC_LINKER_FLAGS}>) #-static-libgcc -static-libstdc++ + $<$:-g ${PY_LINK_OPTIONS} ${PY_DYNAMIC_LINKER_FLAGS}> #-static-libgcc -static-libstdc++ + $<$:-s ${PY_LINK_OPTIONS} ${PY_DYNAMIC_LINKER_FLAGS}>) #-static-libgcc -static-libstdc++ ENDIF() -target_link_libraries(${PROJECT_NAME} ${LIBRARIES_LIST} ${EXTRA_LIBRARIES}) +IF(PYTHON_BINDINGS) + IF(USE_PYTHON3) + set(PY_MODULE Python3::Python) + ELSE() + set(PY_MODULE Python::Python) + ENDIF() + + IF(USE_PYBIND11) + target_link_libraries(${PROJECT_NAME} PRIVATE ${LIBRARIES_LIST} ${EXTRA_LIBRARIES} ${PY_LIBRARIES} ${PY_MODULE}) #nanobind::module + ELSE() + target_link_libraries(${PROJECT_NAME} PRIVATE ${LIBRARIES_LIST} ${EXTRA_LIBRARIES} ${PY_LIBRARIES}) + ENDIF() +ELSE() + target_link_libraries(${PROJECT_NAME} PRIVATE ${LIBRARIES_LIST} ${EXTRA_LIBRARIES}) +ENDIF() + + +# ---------------------------- RENAME ---------------------------- + +IF(WIN32) + add_custom_command(TARGET ${PROJECT_NAME} POST_BUILD + COMMAND ${CMAKE_COMMAND} -E rename + $ + ${CMAKE_BINARY_DIR}/libRemoteInput.dll + COMMENT "Renaming module to RemoteInput.dll" + ) +ELSEIF(APPLE) + add_custom_command(TARGET ${PROJECT_NAME} POST_BUILD + COMMAND ${CMAKE_COMMAND} -E rename + $ + ${CMAKE_BINARY_DIR}/libRemoteInput.dylib + COMMENT "Renaming module to RemoteInput.dylib" + ) +ELSE() + add_custom_command(TARGET ${PROJECT_NAME} POST_BUILD + COMMAND ${CMAKE_COMMAND} -E rename + $ + ${CMAKE_BINARY_DIR}/libRemoteInput.so + COMMENT "Renaming module to RemoteInput.so" + ) +ENDIF() \ No newline at end of file diff --git a/RemoteInput/Platform/Platform_Windows.cxx b/RemoteInput/Platform/Platform_Windows.cxx index 426ade0..f3fd17d 100644 --- a/RemoteInput/Platform/Platform_Windows.cxx +++ b/RemoteInput/Platform/Platform_Windows.cxx @@ -3,6 +3,7 @@ #if defined(_WIN32) || defined(_WIN64) #include #include +#include #include "Thirdparty/Hook.hxx" #if defined(CUSTOM_INJECTOR) #include "Injection/Injector.hxx" @@ -271,6 +272,59 @@ void PrintProcessInfo(std::int32_t pid) noexcept } } +char* realpath(const char* path, char* resolved_path) +{ + auto MapWindowsErrorToErrno = [](DWORD error) -> std::int32_t { + switch (error) + { + case ERROR_FILE_NOT_FOUND: + case ERROR_PATH_NOT_FOUND: + return ENOENT; + case ERROR_ACCESS_DENIED: + return EACCES; + case ERROR_INVALID_PARAMETER: + return EINVAL; + case ERROR_ALREADY_EXISTS: + return EEXIST; + default: + return EIO; + } + }; + + if (!path || !resolved_path) + { + errno = EINVAL; + return nullptr; + } + + HANDLE hFile = CreateFile( + path, + GENERIC_READ, + FILE_SHARE_READ, + nullptr, + OPEN_EXISTING, + FILE_FLAG_BACKUP_SEMANTICS, + nullptr + ); + + if (hFile == INVALID_HANDLE_VALUE) + { + errno = MapWindowsErrorToErrno(GetLastError()); + return nullptr; + } + + DWORD result = GetFinalPathNameByHandle(hFile, resolved_path, MAX_PATH, FILE_NAME_NORMALIZED); + CloseHandle(hFile); + + if (result == 0) + { + errno = MapWindowsErrorToErrno(GetLastError()); + return nullptr; + } + + return resolved_path; +} + bool InjectSelf(std::int32_t pid) noexcept { if (IsProcessAlive(pid)) diff --git a/RemoteInput/Plugin/JVM/RemoteVM.cxx b/RemoteInput/Plugin/JVM/RemoteVM.cxx index 145926b..27f9727 100644 --- a/RemoteInput/Plugin/JVM/RemoteVM.cxx +++ b/RemoteInput/Plugin/JVM/RemoteVM.cxx @@ -1619,7 +1619,7 @@ std::vector RemoteVM::GetBooleanArrayRegion(jbooleanArray array, jsize env->GetBooleanArrayRegion(array, start, len, &elements[0]); return elements; } - return {}; + return elements; } std::vector RemoteVM::GetByteArrayRegion(jbyteArray array, jsize start, jsize len) const noexcept @@ -1636,7 +1636,7 @@ std::vector RemoteVM::GetByteArrayRegion(jbyteArray array, jsize start, j env->GetByteArrayRegion(array, start, len, &elements[0]); return elements; } - return {}; + return elements; } std::vector RemoteVM::GetCharArrayRegion(jcharArray array, jsize start, jsize len) const noexcept diff --git a/RemoteInput/Plugin/Python/Python.cxx b/RemoteInput/Plugin/Python/Python.cxx index 304e638..7b9a12c 100644 --- a/RemoteInput/Plugin/Python/Python.cxx +++ b/RemoteInput/Plugin/Python/Python.cxx @@ -3,6 +3,8 @@ // #include "Python.hxx" + +#if !defined(USE_PYBIND11) #include "Platform.hxx" void Python::steal(Python& a, Python& b) @@ -5378,3 +5380,5 @@ PyObject* (Python::Py_XNewRef)(PyObject* obj) return (*Py_XNewRef_Ptr)(obj); } #endif + +#endif \ No newline at end of file diff --git a/RemoteInput/Plugin/Python/Python.hxx b/RemoteInput/Plugin/Python/Python.hxx index f7afed3..dd05cfa 100644 --- a/RemoteInput/Plugin/Python/Python.hxx +++ b/RemoteInput/Plugin/Python/Python.hxx @@ -5,44 +5,8 @@ #ifndef REMOTEINPUT_PYTHON_HXX #define REMOTEINPUT_PYTHON_HXX -// On Windows, Python is really badly implemented, -// so we must include `math.h` and not `cmath` to get the _hpyot symbol -#include -#include -#include -#include "object.h" - -#if defined(_WIN32) || defined(_WIN64) -#include "structmember.h" -#elif defined(__linux__) && (defined(__x86_64__) || defined(__i386__)) -#include "structmember.h" -#elif defined(__APPLE__) -#if __has_include() -#include /* Python.framework */ -#else -#include "structmember.h" -#endif -#elif defined(__aarch64__) || defined(__arm__) -#include "structmember.h" -#endif - -#if defined(_WIN32) || defined(_WIN64) -#include -#else -#include -#endif // defined - -#if defined(Py_LIMITED_API) -#define HAS_PYTHON_VERSION(MIN_VERSION) ((Py_LIMITED_API >= MIN_VERSION) && (PY_VERSION_HEX >= MIN_VERSION)) -#else -#define HAS_PYTHON_VERSION(MIN_VERSION) (PY_VERSION_HEX >= MIN_VERSION) -#endif - -#if HAS_PYTHON_VERSION(0x03070000) - #ifndef METH_FASTCALL - #define METH_FASTCALL 0x0080 // Python 3.7 incorrectly labels this as Py_LIMITED_API - #endif -#endif +#if !defined(USE_PYBIND11) +#include "PythonMacros.hxx" class Python { @@ -1662,5 +1626,6 @@ public: return static_cast(Py_ListType_Ptr); } }; +#endif #endif //REMOTEINPUT_PYTHON_HXX diff --git a/RemoteInput/Plugin/Python/PythonCommon.cxx b/RemoteInput/Plugin/Python/PythonCommon.cxx index 4cce2fa..32a680e 100644 --- a/RemoteInput/Plugin/Python/PythonCommon.cxx +++ b/RemoteInput/Plugin/Python/PythonCommon.cxx @@ -4,7 +4,185 @@ #include "PythonCommon.hxx" -PyRemoteInputType GetObjectType(PyObject* object) noexcept +#if defined(USE_PYBIND11) +nanobind::object python_create_eios(EIOS* eios) noexcept +{ + if (eios) + { + auto py_eios_object = new PyEIOS(); + py_eios_object->pid = eios->pid; + py_eios_object->native_eios = eios; + + return nanobind::cast(py_eios_object); + } + return nanobind::none(); +} + +nanobind::object python_create_object(PyEIOS* self, jobject object) noexcept +{ + if (object) + { + auto py_java_object = new PyJavaObject(); + py_java_object->eios = self; + py_java_object->object = object; + + return nanobind::cast(py_java_object); + } + return nanobind::none(); +} + +nanobind::object python_create_object(PyJavaObject* self, jobject object) noexcept +{ + if (object) + { + auto py_java_object = new PyJavaObject(); + py_java_object->eios = self->eios; + py_java_object->object = object; + + return nanobind::cast(py_java_object); + } + return nanobind::none(); +} + +nanobind::object python_create_object(PyJavaArray* self, jobject object) noexcept +{ + if (object) + { + auto py_java_object = new PyJavaObject(); + py_java_object->eios = self->eios; + py_java_object->object = object; + + return nanobind::cast(py_java_object); + } + return nanobind::none(); +} + +nanobind::object python_create_array(PyEIOS* self, jarray array, std::size_t array_size) noexcept +{ + if (array) + { + auto py_java_array = new PyJavaArray(); + py_java_array->eios = self; + py_java_array->array = array; + py_java_array->size = array_size; + + return nanobind::cast(py_java_array); + } + return nanobind::none(); +} + +nanobind::object python_create_array(PyJavaObject* self, jarray array, std::size_t array_size) noexcept +{ + if (array) + { + auto py_java_array = new PyJavaArray(); + py_java_array->eios = self->eios; + py_java_array->array = array; + py_java_array->size = array_size; + + return nanobind::cast(py_java_array); + } + return nanobind::none(); +} + +nanobind::object python_create_array(PyJavaArray* self, jarray array, std::size_t array_size) noexcept +{ + if (array) + { + auto py_java_array = new PyJavaArray(); + py_java_array->eios = self->eios; + py_java_array->array = array; + py_java_array->size = array_size; + + return nanobind::cast(py_java_array); + } + return nanobind::none(); +} + +PyEIOS::~PyEIOS() +{ + if (native_eios) + { + EIOS_ReleaseTarget(this->native_eios); + this->native_eios = nullptr; + } +} + +PyJavaObject::~PyJavaObject() +{ + if (this->eios && this->object) + { + this->eios->native_eios->control_center->reflect_release_object(this->object); + this->object = nullptr; + this->eios = nullptr; + this->object = nullptr; + } +} + +PyJavaArray::~PyJavaArray() +{ + if (this->eios && this->array) + { + this->eios->native_eios->control_center->reflect_release_object(this->array); + this->eios = nullptr; + this->array = nullptr; + this->size = 0; + } +} + +template +PyTypeObject* PyTypeFromType() noexcept +{ + static PyTypeObject* type = nullptr; + if (!type) + { + type = Py_TYPE(nanobind::cast(T()).ptr()); + return type; + } + return nullptr; +} + +PyRemoteInputType GetPythonObjectType(PyObject* object) noexcept +{ + if (Py_IS_TYPE(object, PyTypeFromType())) + { + return PyRemoteInputType::EIOS; + } + + if (Py_IS_TYPE(object, PyTypeFromType())) + { + return PyRemoteInputType::JAVA_OBJECT; + } + + if (Py_IS_TYPE(object, PyTypeFromType())) + { + return PyRemoteInputType::JAVA_ARRAY; + } + + return PyRemoteInputType::OTHER; +} + +//PyRemoteInputType GetPythonObjectType(PyObject* object) noexcept +//{ +// if (nanobind::isinstance(object)) +// { +// return PyRemoteInputType::EIOS; +// } +// +// if (nanobind::isinstance(object)) +// { +// return PyRemoteInputType::JAVA_OBJECT; +// } +// +// if (nanobind::isinstance(object)) +// { +// return PyRemoteInputType::JAVA_ARRAY; +// } +// +// return PyRemoteInputType::OTHER; +//} +#else +PyRemoteInputType GetPythonObjectType(PyObject* object) noexcept { if ((python->Py_IS_TYPE)(object, PyEIOS_Type())) { @@ -49,7 +227,7 @@ EIOS* PythonUnwrapEIOS(PyEIOS* eios) noexcept PyEIOS* PythonGetEIOS(PyObject* object) { - PyRemoteInputType type = GetObjectType(object); + PyRemoteInputType type = GetPythonObjectType(object); switch (type) { case PyRemoteInputType::EIOS: @@ -89,7 +267,7 @@ jobject PythonUnwrapJavaObject(PyJavaObject* object) noexcept return nullptr; } - if (GetObjectType(reinterpret_cast(object)) != PyRemoteInputType::JAVA_OBJECT) + if (GetPythonObjectType(reinterpret_cast(object)) != PyRemoteInputType::JAVA_OBJECT) { return nullptr; } @@ -121,10 +299,11 @@ jarray PythonUnwrapJavaArray(PyJavaArray* array) noexcept return nullptr; } - if (GetObjectType(reinterpret_cast(array)) != PyRemoteInputType::JAVA_ARRAY) + if (GetPythonObjectType(reinterpret_cast(array)) != PyRemoteInputType::JAVA_ARRAY) { return nullptr; } return array->array; -} \ No newline at end of file +} +#endif \ No newline at end of file diff --git a/RemoteInput/Plugin/Python/PythonCommon.hxx b/RemoteInput/Plugin/Python/PythonCommon.hxx index 2d9833a..5285d00 100644 --- a/RemoteInput/Plugin/Python/PythonCommon.hxx +++ b/RemoteInput/Plugin/Python/PythonCommon.hxx @@ -5,33 +5,21 @@ #ifndef REMOTEINPUT_PYTHONCOMMON_HXX #define REMOTEINPUT_PYTHONCOMMON_HXX -// On Windows, Python is really badly implemented, -// so we must include `math.h` and not `cmath` to get the _hpyot symbol -#include -#include -#include "object.h" - -#if defined(_WIN32) || defined(_WIN64) -#include "structmember.h" -#elif defined(__linux__) && (defined(__x86_64__) || defined(__i386__)) -#include "structmember.h" -#elif defined(__APPLE__) -#if __has_include() -#include /* Python.framework */ +#if defined(USE_PYBIND11) + #include + #include + #include + #include + #include #else -#include "structmember.h" -#endif -#elif defined(__aarch64__) || defined(__arm__) -#include "structmember.h" -#endif + #include + #include -#include -#include -#include + #include "PythonMacros.hxx" + #include "TypeTraits.hxx" +#endif #include "JNI_Common.hxx" -#include "TypeTraits.hxx" -#include "TypeTraits_Functional.hxx" #include "EIOS.hxx" // STRUCTURES @@ -45,31 +33,67 @@ enum class PyRemoteInputType struct PyEIOS { + #if !defined(USE_PYBIND11) PyObject_HEAD + #endif + std::int32_t pid; EIOS* native_eios; + + #if defined(USE_PYBIND11) + ~PyEIOS(); + #endif }; struct PyJavaObject { + #if defined(USE_PYBIND11) + PyEIOS* eios; + jobject object; + #else PyObject_HEAD PyEIOS* eios; jobject object; + #endif + + #if defined(USE_PYBIND11) + ~PyJavaObject(); + #endif }; struct PyJavaArray { + #if defined(USE_PYBIND11) + PyEIOS* eios; + jarray array; + std::size_t size; + #else PyObject_HEAD PyEIOS* eios; jarray array; std::size_t size; + #endif + + #if defined(USE_PYBIND11) + ~PyJavaArray(); + #endif }; +#if defined(USE_PYBIND11) +nanobind::object python_create_eios(EIOS* eios) noexcept; +nanobind::object python_create_object(PyEIOS* self, jobject object) noexcept; +nanobind::object python_create_object(PyJavaObject* self, jobject object) noexcept; +nanobind::object python_create_object(PyJavaArray* self, jobject object) noexcept; +nanobind::object python_create_array(PyEIOS* self, jarray array, std::size_t array_size) noexcept; +nanobind::object python_create_array(PyJavaObject* self, jarray array, std::size_t array_size) noexcept; +nanobind::object python_create_array(PyJavaArray* self, jarray array, std::size_t array_size) noexcept; +PyRemoteInputType GetPythonObjectType(PyObject* object) noexcept; +#else extern PyTypeObject* PyEIOS_Type() noexcept; extern PyTypeObject* PyJavaObject_Type() noexcept; extern PyTypeObject* PyJavaArray_Type() noexcept; -PyRemoteInputType GetObjectType(PyObject* object) noexcept; +PyRemoteInputType GetPythonObjectType(PyObject* object) noexcept; PyObject* PythonWrapEIOS(EIOS* eios) noexcept; EIOS* PythonUnwrapEIOS(PyEIOS* eios) noexcept; @@ -94,5 +118,6 @@ template PyObject* to_python_array(const std::vector& value); #include "PythonCommon_Templates.hxx" +#endif #endif //REMOTEINPUT_PYTHONCOMMON_HXX diff --git a/RemoteInput/Plugin/Python/PythonCommon_Templates.hxx b/RemoteInput/Plugin/Python/PythonCommon_Templates.hxx index 1709f8b..740fb39 100644 --- a/RemoteInput/Plugin/Python/PythonCommon_Templates.hxx +++ b/RemoteInput/Plugin/Python/PythonCommon_Templates.hxx @@ -2,6 +2,9 @@ // Created by Brandon on 2022-09-21. // +#ifndef REMOTEINPUT_PYTHONCOMMON_TEMPLATES_HXX +#define REMOTEINPUT_PYTHONCOMMON_TEMPLATES_HXX + #include "Python.hxx" extern std::unique_ptr python; @@ -507,4 +510,6 @@ PyObject* to_python_array(const std::vector& values) } } return result; -} \ No newline at end of file +} + +#endif //REMOTEINPUT_PYTHONCOMMON_TEMPLATES_HXX \ No newline at end of file diff --git a/RemoteInput/Plugin/Python/PythonEIOS.cxx b/RemoteInput/Plugin/Python/PythonEIOS.cxx index 6060f2e..51ed6e2 100644 --- a/RemoteInput/Plugin/Python/PythonEIOS.cxx +++ b/RemoteInput/Plugin/Python/PythonEIOS.cxx @@ -11,6 +11,436 @@ #include #include +#if defined(USE_PYBIND11) +void Python_EIOS_Inject(const std::string& process_name) noexcept +{ + EIOS_Inject(process_name.c_str()); +} + +void Python_EIOS_Inject_PID(std::int32_t pid) noexcept +{ + EIOS_Inject_PID(pid); +} + +nanobind::object Python_EIOS_From_PID(std::int32_t pid) noexcept +{ + return python_create_eios(EIOS_From_PID(pid)); +} + +nanobind::object Python_EIOS_RequestTarget(const std::string& pid) noexcept +{ + return python_create_eios(EIOS_RequestTarget(pid.c_str())); +} + +nanobind::object Python_EIOS_GetTargetDimensions(PyEIOS* self) noexcept +{ + std::int32_t width = 0; + std::int32_t height = 0; + EIOS_GetTargetDimensions(self->native_eios, &width, &height); + return nanobind::make_tuple(width, height); +} + +nanobind::object Python_EIOS_GetImageFormat(PyEIOS* self) noexcept +{ + ImageFormat format = EIOS_GetImageFormat(self->native_eios); + return nanobind::cast(format); +} + +void Python_EIOS_SetImageFormat(PyEIOS* self, ImageFormat format) noexcept +{ + EIOS_SetImageFormat(self->native_eios, format); +} + +nanobind::object Python_EIOS_GetImageBuffer(PyEIOS* self) noexcept +{ + std::int32_t width = 0; + std::int32_t height = 0; + EIOS_GetTargetDimensions(self->native_eios, &width, &height); + std::uint8_t* buffer = EIOS_GetImageBuffer(self->native_eios); + return nanobind::steal(PyMemoryView_FromMemory(reinterpret_cast(buffer), width * height * 4 * sizeof(std::uint8_t), PyBUF_READ)); +} + +nanobind::object Python_EIOS_GetDebugImageBuffer(PyEIOS* self) noexcept +{ + std::int32_t width = 0; + std::int32_t height = 0; + EIOS_GetTargetDimensions(self->native_eios, &width, &height); + std::uint8_t* buffer = EIOS_GetDebugImageBuffer(self->native_eios); + return nanobind::steal(PyMemoryView_FromMemory(reinterpret_cast(buffer), width * height * 4 * sizeof(std::uint8_t), PyBUF_WRITE)); +} + +void Python_EIOS_SetGraphicsDebugging(PyEIOS* self, bool enabled) noexcept +{ + EIOS_SetGraphicsDebugging(self->native_eios, enabled); +} + +nanobind::object Python_EIOS_HasFocus(PyEIOS* self) noexcept +{ + return nanobind::bool_(EIOS_HasFocus(self->native_eios)); +} + +void Python_EIOS_GainFocus(PyEIOS* self) noexcept +{ + EIOS_GainFocus(self->native_eios); +} + +void Python_EIOS_LoseFocus(PyEIOS* self) noexcept +{ + EIOS_LoseFocus(self->native_eios); +} + +nanobind::object Python_EIOS_IsKeyboardInputEnabled(PyEIOS* self) noexcept +{ + return nanobind::bool_(EIOS_IsKeyboardInputEnabled(self->native_eios)); +} + +void Python_EIOS_SetKeyboardInputEnabled(PyEIOS* self, bool enabled) noexcept +{ + EIOS_SetKeyboardInputEnabled(self->native_eios, enabled); +} + +nanobind::object Python_EIOS_IsMouseInputEnabled(PyEIOS* self) noexcept +{ + return nanobind::bool_(EIOS_IsMouseInputEnabled(self->native_eios)); +} + +void Python_EIOS_SetMouseInputEnabled(PyEIOS* self, bool enabled) noexcept +{ + EIOS_SetMouseInputEnabled(self->native_eios, enabled); +} + +nanobind::object Python_EIOS_GetMousePosition(PyEIOS* self) noexcept +{ + std::int32_t width = 0; + std::int32_t height = 0; + EIOS_GetMousePosition(self->native_eios, &width, &height); + return nanobind::make_tuple(width, height); +} + +nanobind::object Python_EIOS_GetRealMousePosition(PyEIOS* self) noexcept +{ + std::int32_t width = 0; + std::int32_t height = 0; + EIOS_GetRealMousePosition(self->native_eios, &width, &height); + return nanobind::make_tuple(width, height); +} + +void Python_EIOS_MoveMouse(PyEIOS* self, std::int32_t x, std::int32_t y) noexcept +{ + EIOS_MoveMouse(self->native_eios, x, y); +} + +void Python_EIOS_HoldMouse(PyEIOS* self, std::int32_t button) noexcept +{ + EIOS_HoldMouse(self->native_eios, 0, 0, button); +} + +void Python_EIOS_ReleaseMouse(PyEIOS* self, std::int32_t button) noexcept +{ + EIOS_ReleaseMouse(self->native_eios, 0, 0, button); +} + +void Python_EIOS_ScrollMouse(PyEIOS* self, std::int32_t lines) noexcept +{ + EIOS_ScrollMouse(self->native_eios, 0, 0, lines); +} + +nanobind::object Python_EIOS_IsMouseButtonHeld(PyEIOS* self, std::int32_t button) noexcept +{ + return nanobind::bool_(EIOS_IsMouseButtonHeld(self->native_eios, button)); +} + +void Python_EIOS_SendString(PyEIOS* self, const std::string& text, std::int32_t key_wait, std::int32_t key_mod_wait) noexcept +{ + EIOS_SendString(self->native_eios, text.c_str(), key_wait, key_mod_wait); +} + +void Python_EIOS_HoldKey(PyEIOS* self, std::int32_t key) noexcept +{ + EIOS_HoldKey(self->native_eios, key); +} + +void Python_EIOS_ReleaseKey(PyEIOS* self, std::int32_t key) noexcept +{ + EIOS_ReleaseKey(self->native_eios, key); +} + +nanobind::object Python_EIOS_IsKeyHeld(PyEIOS* self, std::int32_t key) noexcept +{ + return nanobind::bool_(EIOS_IsKeyHeld(self->native_eios, key)); +} + +nanobind::object Python_EIOS_GetKeyboardSpeed(PyEIOS* self) noexcept +{ + return nanobind::int_(EIOS_GetKeyboardSpeed(self->native_eios)); +} + +void Python_EIOS_SetKeyboardSpeed(PyEIOS* self, std::int32_t speed) noexcept +{ + EIOS_SetKeyboardSpeed(self->native_eios, speed); +} + +nanobind::object Python_EIOS_GetKeyboardRepeatDelay(PyEIOS* self) noexcept +{ + return nanobind::int_(EIOS_GetKeyboardRepeatDelay(self->native_eios)); +} + +void Python_EIOS_SetKeyboardRepeatDelay(PyEIOS* self, std::int32_t delay) noexcept +{ + EIOS_SetKeyboardRepeatDelay(self->native_eios, delay); +} + +nanobind::object Python_EIOS_GetClientsPIDs(bool unpaired_only) noexcept +{ + std::size_t client_count = EIOS_GetClients(unpaired_only); + std::vector client_pids(client_count); + + for (std::size_t i = 0; i < client_count; ++i) + { + client_pids[i] = EIOS_GetClientPID(i); + } + return nanobind::cast(client_pids); +} + +nanobind::object Python_EIOS_PairClient_PID(std::int32_t pid) noexcept +{ + return python_create_eios(EIOS_PairClient(pid)); +} + +void Python_EIOS_KillClientPID(std::int32_t pid) noexcept +{ + EIOS_KillClientPID(pid); +} + +void Python_EIOS_KillClient(PyEIOS* self) noexcept +{ + EIOS_KillClient(self->native_eios); +} + +nanobind::object Python_EIOS_Reflect_Object(PyEIOS* self, const std::string& cls, const std::string& field, const std::string& desc) noexcept +{ + EIOS* eios = self->native_eios; + jobject result = eios->control_center->reflect_object({nullptr, cls, field, desc}); + return python_create_object(self, result); +} + +nanobind::object Python_EIOS_Reflect_Bool(PyEIOS* self, const std::string& cls, const std::string& field) noexcept +{ + EIOS* eios = self->native_eios; + jboolean result = eios->control_center->reflect_boolean({nullptr, cls, field, "Z"}); + return nanobind::bool_(result); +} + +nanobind::object Python_EIOS_Reflect_Char(PyEIOS* self, const std::string& cls, const std::string& field) noexcept +{ + EIOS* eios = self->native_eios; + char result = eios->control_center->reflect_char({nullptr, cls, field, "C"}); + return nanobind::cast(std::string(1, result)); +} + +nanobind::object Python_EIOS_Reflect_Byte(PyEIOS* self, const std::string& cls, const std::string& field) noexcept +{ + EIOS* eios = self->native_eios; + std::uint8_t result = eios->control_center->reflect_byte({nullptr, cls, field, "B"}); + return nanobind::int_(result); +} + +nanobind::object Python_EIOS_Reflect_Short(PyEIOS* self, const std::string& cls, const std::string& field) noexcept +{ + EIOS* eios = self->native_eios; + std::int16_t result = eios->control_center->reflect_short({nullptr, cls, field, "S"}); + return nanobind::int_(result); +} + +nanobind::object Python_EIOS_Reflect_Int(PyEIOS* self, const std::string& cls, const std::string& field) noexcept +{ + EIOS* eios = self->native_eios; + std::int32_t result = eios->control_center->reflect_int({nullptr, cls, field, "I"}); + return nanobind::int_(result); +} + +nanobind::object Python_EIOS_Reflect_Long(PyEIOS* self, const std::string& cls, const std::string& field) noexcept +{ + EIOS* eios = self->native_eios; + std::int64_t result = eios->control_center->reflect_long({nullptr, cls, field, "J"}); + return nanobind::int_(result); +} + +nanobind::object Python_EIOS_Reflect_Float(PyEIOS* self, const std::string& cls, const std::string& field) noexcept +{ + EIOS* eios = self->native_eios; + float result = eios->control_center->reflect_float({nullptr, cls, field, "F"}); + return nanobind::float_(result); +} + +nanobind::object Python_EIOS_Reflect_Double(PyEIOS* self, const std::string& cls, const std::string& field) noexcept +{ + EIOS* eios = self->native_eios; + double result = eios->control_center->reflect_double({nullptr, cls, field, "D"}); + return nanobind::float_(result); +} + +nanobind::object Python_EIOS_Reflect_String(PyEIOS* self, const std::string& cls, const std::string& field) noexcept +{ + EIOS* eios = self->native_eios; + std::string result = eios->control_center->reflect_string({nullptr, cls, field, "Ljava/lang/String;"}); + return nanobind::cast(result); +} + +nanobind::object Python_EIOS_Reflect_Array(PyEIOS* self, const std::string& cls, const std::string& field, const std::string& desc) noexcept +{ + EIOS* eios = self->native_eios; + std::size_t array_size = 0; + jarray array = eios->control_center->reflect_array({nullptr, cls, field, desc}, &array_size); + return python_create_array(self, array, array_size); +} + +void Python_Reflect_Release_Objects(PyEIOS* self, const nanobind::object& object) noexcept +{ + // Flatten the List + std::stack stack; + std::vector objects; + stack.push(object); + + while (!stack.empty()) + { + nanobind::handle current = stack.top(); + stack.pop(); + + if (nanobind::isinstance(current)) + { + for (auto item : nanobind::cast(current)) + { + stack.push(item); + } + } + else + { + if (Py_REFCNT(current.ptr()) == 1) + { + objects.push_back(current); + } + } + } + + // Early exit if no objects to free + if (objects.empty()) + { + return; + } + + // Unwrap each object and clear as we go along to prevent double-free + std::vector result; + result.reserve(objects.size()); + EIOS* eios = self ? self->native_eios : nullptr; + + for (auto& handle : objects) + { + if (nanobind::isinstance(handle)) + { + auto* java_object = nanobind::cast(handle); + if (!eios) + { + eios = java_object->eios->native_eios; + } + + result.push_back(java_object->object); + handle.dec_ref(); + } + else if (nanobind::isinstance(handle)) + { + auto* java_array = nanobind::cast(handle); + if (!eios) + { + eios = java_array->eios->native_eios; + } + + result.push_back(java_array->array); + handle.dec_ref(); + } + } + + // Release all objects at once + Reflect_Release_Objects(eios, &result[0], result.size()); +} + +nanobind::object PyEIOS_Str(PyEIOS* self) +{ + std::ostringstream stream; + + std::ios state(nullptr); + state.copyfmt(stream); + + stream << std::setfill('0') << std::uppercase << std::hex; + stream << "EIOS("; + stream << "0x" << reinterpret_cast(self); + stream << "): "; + stream.copyfmt(state); + + stream << "{"<<"\n"; + stream<< " pid: " << self->pid << "\n"; + stream<< "}"; + + return nanobind::cast(stream.str()); +} + +void declare_python_eios(nanobind::module_ &module) +{ + nanobind::class_(module, "EIOS") + // .def(nanobind::init<>()) // Cannot instantiate from Python + .def_ro("pid", &PyEIOS::pid) + .def_ro("native_eios", &PyEIOS::native_eios) + .def_static("inject", &Python_EIOS_Inject) + .def_static("inject_pid", &Python_EIOS_Inject_PID) + .def_static("from_pid", &Python_EIOS_From_PID) + .def_static("request_target", &Python_EIOS_RequestTarget) + .def("get_target_dimensions", &Python_EIOS_GetTargetDimensions) + .def("get_image_format", &Python_EIOS_GetImageFormat) + .def("set_image_format", &Python_EIOS_SetImageFormat) + .def("get_image_buffer", &Python_EIOS_GetImageBuffer) + .def("get_debug_image_buffer", &Python_EIOS_GetDebugImageBuffer) + .def("set_graphics_debugging", &Python_EIOS_SetGraphicsDebugging) + .def("has_focus", &Python_EIOS_HasFocus) + .def("gain_focus", &Python_EIOS_GainFocus) + .def("lose_focus", &Python_EIOS_LoseFocus) + .def("is_keyboard_input_enabled", &Python_EIOS_IsKeyboardInputEnabled) + .def("set_keyboard_input_enabled", &Python_EIOS_SetKeyboardInputEnabled) + .def("is_mouse_input_enabled", &Python_EIOS_IsMouseInputEnabled) + .def("set_mouse_input_enabled", &Python_EIOS_SetMouseInputEnabled) + .def("get_mouse_position", &Python_EIOS_GetMousePosition) + .def("get_real_mouse_position", &Python_EIOS_GetRealMousePosition) + .def("move_mouse", &Python_EIOS_MoveMouse) + .def("hold_mouse", &Python_EIOS_HoldMouse) + .def("release_mouse", &Python_EIOS_ReleaseMouse) + .def("scroll_mouse", &Python_EIOS_ScrollMouse) + .def("is_mouse_button_held", &Python_EIOS_IsMouseButtonHeld) + .def("send_string", &Python_EIOS_SendString) + .def("hold_key", &Python_EIOS_HoldKey) + .def("release_key", &Python_EIOS_ReleaseKey) + .def("is_key_held", &Python_EIOS_IsKeyHeld) + .def("get_keyboard_speed", &Python_EIOS_GetKeyboardSpeed) + .def("set_keyboard_speed", &Python_EIOS_SetKeyboardSpeed) + .def("get_keyboard_repeat_delay", &Python_EIOS_GetKeyboardRepeatDelay) + .def("set_keyboard_repeat_delay", &Python_EIOS_SetKeyboardRepeatDelay) + .def_static("get_clients_pids", &Python_EIOS_GetClientsPIDs) + .def_static("pair_client_pid", &Python_EIOS_PairClient_PID) + .def_static("kill_client_pid", &Python_EIOS_KillClientPID) + .def("kill_client", &Python_EIOS_KillClient) + .def("reflect_object", &Python_EIOS_Reflect_Object, nanobind::arg("cls"), nanobind::arg("field"), nanobind::arg("desc")) + .def("reflect_bool", &Python_EIOS_Reflect_Bool, nanobind::arg("cls"), nanobind::arg("field")) + .def("reflect_char", &Python_EIOS_Reflect_Char, nanobind::arg("cls"), nanobind::arg("field")) + .def("reflect_byte", &Python_EIOS_Reflect_Byte, nanobind::arg("cls"), nanobind::arg("field")) + .def("reflect_short", &Python_EIOS_Reflect_Short, nanobind::arg("cls"), nanobind::arg("field")) + .def("reflect_int", &Python_EIOS_Reflect_Int, nanobind::arg("cls"), nanobind::arg("field")) + .def("reflect_long", &Python_EIOS_Reflect_Long, nanobind::arg("cls"), nanobind::arg("field")) + .def("reflect_float", &Python_EIOS_Reflect_Float, nanobind::arg("cls"), nanobind::arg("field")) + .def("reflect_double", &Python_EIOS_Reflect_Double, nanobind::arg("cls"), nanobind::arg("field")) + .def("reflect_string", &Python_EIOS_Reflect_String, nanobind::arg("cls"), nanobind::arg("field")) + .def("reflect_array", &Python_EIOS_Reflect_Array, nanobind::arg("cls"), nanobind::arg("field"), nanobind::arg("desc")) + .def("release_objects", &Python_Reflect_Release_Objects) + .def("__str__", &PyEIOS_Str); +} +#else int PyEIOS_Clear(PyObject* object) { PyEIOS* py_eios = reinterpret_cast(object); @@ -24,9 +454,7 @@ void PyEIOS_Dealloc(PyObject* object) PyEIOS* py_eios = reinterpret_cast(object); EIOS_ReleaseTarget(py_eios->native_eios); - // PyObject_GC_UnTrack(object); PyEIOS_Clear(object); - //PyObject_Del(object); // NO GC! python->PyObject_Free(object); } @@ -298,7 +726,7 @@ PyObject* Python_EIOS_GetImageBuffer(PyEIOS* self, PyObject* args[], Py_ssize_t std::int32_t height = 0; EIOS_GetTargetDimensions(python_get_eios(self), &width, &height); std::uint8_t* buffer = EIOS_GetImageBuffer(python_get_eios(self)); - return python->PyMemoryView_FromMemory(reinterpret_cast(buffer), width * height * 4, 0x200); + return python->PyMemoryView_FromMemory(reinterpret_cast(buffer), width * height * 4 * sizeof(std::uint8_t), 0x100 /*PyBUF_READ*/); } PyObject* Python_EIOS_GetDebugImageBuffer(PyEIOS* self, PyObject* args[], Py_ssize_t args_length) noexcept @@ -315,7 +743,7 @@ PyObject* Python_EIOS_GetDebugImageBuffer(PyEIOS* self, PyObject* args[], Py_ssi std::int32_t height = 0; EIOS_GetTargetDimensions(python_get_eios(self), &width, &height); std::uint8_t* buffer = EIOS_GetDebugImageBuffer(python_get_eios(self)); - return python->PyMemoryView_FromMemory(reinterpret_cast(buffer), width * height * 4, 0x200); + return python->PyMemoryView_FromMemory(reinterpret_cast(buffer), width * height * 4 * sizeof(std::uint8_t), 0x200 /*PyBUF_WRITE*/); } PyObject* Python_EIOS_SetGraphicsDebugging(PyEIOS* self, PyObject* args[], Py_ssize_t args_length) noexcept @@ -892,7 +1320,6 @@ PyObject* Python_Reflect_Release_Objects(PyEIOS* self, PyObject* args[], Py_ssiz { for (std::size_t i = 0; i < python->PyList_Size(object); ++i) { - PyObject* object = python->PyList_GetItem(object, i); stack.push(python->PyList_GetItem(object, i)); } } @@ -918,7 +1345,7 @@ PyObject* Python_Reflect_Release_Objects(PyEIOS* self, PyObject* args[], Py_ssiz for (PyObject* object : objects) { - PyRemoteInputType type = GetObjectType(object); + PyRemoteInputType type = GetPythonObjectType(object); if (type == PyRemoteInputType::JAVA_OBJECT) { if (!self) @@ -946,4 +1373,5 @@ PyObject* Python_Reflect_Release_Objects(PyEIOS* self, PyObject* args[], Py_ssiz (python->Py_INCREF)(python->Py_GetNone_Object()); return python->Py_GetNone_Object(); -} \ No newline at end of file +} +#endif \ No newline at end of file diff --git a/RemoteInput/Plugin/Python/PythonEIOS.hxx b/RemoteInput/Plugin/Python/PythonEIOS.hxx index f47288c..65065ec 100644 --- a/RemoteInput/Plugin/Python/PythonEIOS.hxx +++ b/RemoteInput/Plugin/Python/PythonEIOS.hxx @@ -6,13 +6,10 @@ #define REMOTEINPUT_PYTHONEIOS_HXX #include "PythonCommon.hxx" -#include "EIOS.hxx" - -#ifdef __cplusplus -extern "C" -{ -#endif +#if defined(USE_PYBIND11) +void declare_python_eios(nanobind::module_ &module); +#else PyObject* Python_EIOS_Inject(PyEIOS* self, PyObject* args[], Py_ssize_t args_length) noexcept; PyObject* Python_EIOS_Inject_PID(PyEIOS* self, PyObject* args[], Py_ssize_t args_length) noexcept; PyObject* Python_EIOS_From_PID(PyEIOS* self, PyObject* args[], Py_ssize_t args_length) noexcept; @@ -51,9 +48,6 @@ PyObject* Python_EIOS_PairClient_PID(PyEIOS* self, PyObject* args[], Py_ssize_t PyObject* Python_EIOS_KillClientPID(PyEIOS* self, PyObject* args[], Py_ssize_t args_length) noexcept; PyObject* Python_EIOS_KillClient(PyEIOS* self, PyObject* args[], Py_ssize_t args_length) noexcept; PyObject* Python_Reflect_Release_Objects(PyEIOS* self, PyObject* args[], Py_ssize_t args_length) noexcept; - -#ifdef __cplusplus -} #endif #endif //REMOTEINPUT_PYTHONEIOS_HXX diff --git a/RemoteInput/Plugin/Python/PythonJavaArray.cxx b/RemoteInput/Plugin/Python/PythonJavaArray.cxx index 01db17a..665b4b3 100644 --- a/RemoteInput/Plugin/Python/PythonJavaArray.cxx +++ b/RemoteInput/Plugin/Python/PythonJavaArray.cxx @@ -3,11 +3,319 @@ // #include "PythonJavaArray.hxx" -#include "NativePlugin.hxx" +#include "PythonJavaList.hxx" + #include #include #include +#if defined(USE_PYBIND11) +#include +#endif + +#if defined(USE_PYBIND11) +nanobind::object read_array_type(Stream &stream, PyJavaArray* object, ReflectionType type, std::size_t dimensions); + +nanobind::object Python_JavaArray_GetLength(PyJavaArray* self) noexcept +{ + EIOS* eios = self->eios->native_eios; + jarray array = self->array; + std::size_t length = eios->control_center->reflect_array_size(array); + return nanobind::int_(length); +} + +nanobind::object Python_JavaArray_Get1D(PyJavaArray* self, const nanobind::object& type_object, const nanobind::object& indices_object, const nanobind::object& index_object, const nanobind::object& length_object) noexcept +{ + ReflectionType type = ReflectionType::OBJECT; + if (!type_object.is_none()) + { + type = nanobind::cast(type_object); + } + + EIOS* eios = self->eios->native_eios; + jarray array = self->array; + std::size_t index = 0; + std::size_t length = 0; + + if (indices_object.is_none() && index_object.is_none() && length_object.is_none()) + { + // Read entire array + Stream &stream = eios->control_center->reflect_array_all(array, type, 1)->data_stream(); + return read_array_type(stream, self, type, 1); + } + + if (!indices_object.is_none()) + { + // Read array indexed by indices + auto indices = nanobind::cast>(indices_object); + Stream &stream = eios->control_center->reflect_array_indices(array, type, &indices[0], indices.size())->data_stream(); + return read_array_type(stream, self, type, 1); + } + + if (!index_object.is_none()) + { + index = nanobind::cast(index_object); + } + + if (length_object) + { + length = nanobind::cast(length_object); + } + + if (!index_object.is_none() && length_object.is_none()) + { + // Read array[index] + Stream &stream = eios->control_center->reflect_array(array, type, 1, index)->data_stream(); + return read_array_type(stream, self, type, 0); + } + + length = std::min(std::max(length, 1), self->size); + + // Read array of [index..control_center->reflect_array(array, type, length, index)->data_stream(); + return read_array_type(stream, self, type, 1); +} + +nanobind::object Python_JavaArray_Get2D(PyJavaArray* self, const nanobind::object& type_object, const nanobind::object& x_object, const nanobind::object& y_object) noexcept +{ + ReflectionType type = ReflectionType::OBJECT; + if (!type_object.is_none()) + { + type = nanobind::cast(type_object); + } + + EIOS* eios = self->eios->native_eios; + jarray array = self->array; + + // Array[x][y] + if (!x_object.is_none() && !y_object.is_none()) + { + std::size_t x = nanobind::cast(x_object); + std::size_t y = nanobind::cast(y_object); + + Stream &stream = eios->control_center->reflect_array(array, type, 1, x, y)->data_stream(); + return read_array_type(stream, self, type, 0); + } + + // Array[][] + Stream &stream = eios->control_center->reflect_array_all(array, type, 2)->data_stream(); + return read_array_type(stream, self, type, 2); +} + +nanobind::object Python_JavaArray_Get3D(PyJavaArray* self, const nanobind::object& type_object, const nanobind::object& x_object, const nanobind::object& y_object, const nanobind::object& z_object) noexcept +{ + ReflectionType type = ReflectionType::OBJECT; + if (!type_object.is_none()) + { + type = nanobind::cast(type_object); + } + + EIOS* eios = self->eios->native_eios; + jarray array = self->array; + + // Array[x][y][z] + if (!x_object.is_none() && !y_object.is_none() && !z_object.is_none()) + { + std::size_t x = nanobind::cast(x_object); + std::size_t y = nanobind::cast(y_object); + std::size_t z = nanobind::cast(z_object); + + Stream &stream = eios->control_center->reflect_array(array, type, 1, x, y, z)->data_stream(); + return read_array_type(stream, self, type, 0); + } + + // Array[][][] + Stream &stream = eios->control_center->reflect_array_all(array, type, 3)->data_stream(); + return read_array_type(stream, self, type, 3); +} + +nanobind::object Python_JavaArray_Get4D(PyJavaArray* self, const nanobind::object& type_object, const nanobind::object& x_object, const nanobind::object& y_object, const nanobind::object& z_object, const nanobind::object& w_object) noexcept +{ + ReflectionType type = ReflectionType::OBJECT; + if (!type_object.is_none()) + { + type = nanobind::cast(type_object); + } + + EIOS* eios = self->eios->native_eios; + jarray array = self->array; + + // Array[x][y][z][w] + if (!x_object.is_none() && !y_object.is_none() && !z_object.is_none() && !w_object.is_none()) + { + std::size_t x = nanobind::cast(x_object); + std::size_t y = nanobind::cast(y_object); + std::size_t z = nanobind::cast(z_object); + std::size_t w = nanobind::cast(z_object); + + Stream &stream = eios->control_center->reflect_array(array, type, 1, x, y, z, w)->data_stream(); + return read_array_type(stream, self, type, 0); + } + + // Array[][][][] + Stream &stream = eios->control_center->reflect_array_all(array, type, 4)->data_stream(); + return read_array_type(stream, self, type, 4); +} + +void Python_JavaArray_Release_Object(PyJavaArray* self) noexcept +{ + if (self->eios && self->array) + { + EIOS* eios = self->eios->native_eios; + jarray object = self->array; + + eios->control_center->reflect_release_object(object); + + self->eios = nullptr; + self->array = nullptr; + self->size = 0; + } +} + +nanobind::object PyJavaArray_Str(PyJavaArray* self) +{ + std::ostringstream stream; + + std::ios state(nullptr); + state.copyfmt(stream); + + stream << std::setfill('0') << std::uppercase << std::hex; + stream << "JavaArray("; + stream << "0x" << reinterpret_cast(self); + stream << "): "; + stream.copyfmt(state); + + stream << "{"<<"\n"; + stream<< " eios: " << self->eios << "\n"; + stream<< " array: " << self->array << "\n"; + stream<< " size: " << self->size << "\n"; + stream<< "}"; + + return nanobind::cast(stream.str()); +} + +void declare_python_java_array(nanobind::module_ &module) +{ + nanobind::class_(module, "JavaArray") + // .def(nanobind::init<>()) // Cannot instantiate from Python + .def_ro("eios", &PyJavaArray::eios) + .def_ro("array", &PyJavaArray::array) + .def_ro("size", &PyJavaArray::size) + .def("get_length", &Python_JavaArray_GetLength) + .def("get_1d", &Python_JavaArray_Get1D, + nanobind::arg("type") = nanobind::none(), + nanobind::arg("indices") = nanobind::none(), + nanobind::arg("index") = nanobind::none(), + nanobind::arg("length") = nanobind::none()) + .def("get_2d", &Python_JavaArray_Get2D, + nanobind::arg("type") = nanobind::none(), + nanobind::arg("x") = nanobind::none(), + nanobind::arg("y") = nanobind::none()) + .def("get_3d", &Python_JavaArray_Get3D, + nanobind::arg("type") = nanobind::none(), + nanobind::arg("x") = nanobind::none(), + nanobind::arg("y") = nanobind::none(), + nanobind::arg("z") = nanobind::none()) + .def("get_4d", &Python_JavaArray_Get4D, + nanobind::arg("type") = nanobind::none(), + nanobind::arg("x") = nanobind::none(), + nanobind::arg("y") = nanobind::none(), + nanobind::arg("z") = nanobind::none(), + nanobind::arg("w") = nanobind::none()) + .def("release_object", &Python_JavaArray_Release_Object) + .def("__str__", &PyJavaArray_Str); +} + +template +nanobind::object read_array_type(Stream &stream, PyJavaArray* array) +{ + if constexpr(std::is_same::value) + { + std::size_t length = stream.read(); + PyObject* list = create_java_list(length); + + for (std::size_t i = 0; i < length; ++i) + { + PyList_SetItem(list, i, python_create_object(array, stream.read()).release().ptr()); + } + + return nanobind::steal(list); + } + else if constexpr(std::is_same::value) + { + std::size_t length = stream.read(); + PyObject* list = create_java_list(length); + + for (std::size_t i = 0; i < length; ++i) + { + PyList_SetItem(list, i, python_create_array(array, stream.read(), length).release().ptr()); + } + + return nanobind::steal(list); + } + else + { + return nanobind::cast(stream.read>()); + } +} + +nanobind::object read_array_type(Stream &stream, PyJavaArray* object, ReflectionType type, std::size_t dimensions) +{ + if (dimensions == 0) + { + std::size_t size = stream.read(); + if (size == 0) + { + return nanobind::none(); + } + + switch(type) + { + case ReflectionType::CHAR: return nanobind::cast(std::string(1, stream.read())); + case ReflectionType::BYTE: return nanobind::int_(stream.read()); + case ReflectionType::BOOL: return nanobind::bool_(stream.read()); + case ReflectionType::SHORT: return nanobind::int_(stream.read()); + case ReflectionType::INT: return nanobind::int_(stream.read()); + case ReflectionType::LONG: return nanobind::int_(stream.read()); + case ReflectionType::FLOAT: return nanobind::float_(stream.read()); + case ReflectionType::DOUBLE: return nanobind::float_(stream.read()); + case ReflectionType::STRING: return nanobind::cast(stream.read()); + case ReflectionType::OBJECT: return python_create_object(object, stream.read()); + case ReflectionType::ARRAY: return python_create_array(object, stream.read(), 0); + default: return nanobind::none(); + } + } + + if (dimensions == 1) + { + switch(type) + { + case ReflectionType::CHAR: return read_array_type(stream, object); + case ReflectionType::BYTE: return read_array_type(stream, object); + case ReflectionType::BOOL: return read_array_type(stream, object); + case ReflectionType::SHORT: return read_array_type(stream, object); + case ReflectionType::INT: return read_array_type(stream, object); + case ReflectionType::LONG: return read_array_type(stream, object); + case ReflectionType::FLOAT: return read_array_type(stream, object); + case ReflectionType::DOUBLE: return read_array_type(stream, object); + case ReflectionType::STRING: return read_array_type(stream, object); + case ReflectionType::OBJECT: return read_array_type(stream, object); + case ReflectionType::ARRAY: return read_array_type(stream, object); + default: return nanobind::none(); + } + } + + std::size_t length = stream.read(); + PyObject* list = create_java_list(length); + + for (std::size_t i = 0; i < length; ++i) + { + PyList_SetItem(list, i, read_array_type(stream, object, type, dimensions - 1).release().ptr()); + } + + return nanobind::steal(list); +} +#else int PyJavaArray_Clear(PyObject* object) { PyJavaArray* py_java_array = reinterpret_cast(object); @@ -22,15 +330,15 @@ int PyJavaArray_Clear(PyObject* object) void PyJavaArray_Dealloc(PyObject* object) { - PyJavaArray* py_java_object = reinterpret_cast(object); - if (py_java_object->eios && py_java_object->array) + EIOS* eios = PythonUnwrapEIOS(reinterpret_cast(object)->eios); + jarray array = reinterpret_cast(object)->array; + + if (eios && array) { - Reflect_Release_Object(PythonUnwrapEIOS(py_java_object->eios), py_java_object->array); + eios->control_center->reflect_release_object(array); } - // PyObject_GC_UnTrack(object); PyJavaArray_Clear(object); - //PyObject_Del(object); // NO GC! python->PyObject_Free(object); } @@ -365,7 +673,7 @@ PyObject* Python_JavaArray_Release_Object(PyJavaArray* self, PyObject* args[], P template PyObject* read_array_type(Stream &stream, PyJavaArray* object) { - extern PyObject* create_java_list(PyEIOS* eios, Py_ssize_t length); + extern PyObject* create_java_list(Py_ssize_t length); if constexpr(std::is_same::value) { @@ -378,7 +686,7 @@ PyObject* read_array_type(Stream &stream, PyJavaArray* object) else if constexpr(std::is_same::value) { std::size_t length = stream.read(); - PyObject* list = create_java_list(PythonGetEIOS(reinterpret_cast(object)), length); //python->PyList_New(length); + PyObject* list = create_java_list(length); //python->PyList_New(length); for (std::size_t i = 0; i < length; ++i) { python->PyList_SetItem(list, i, python_create_object(object, stream.read())); @@ -388,7 +696,7 @@ PyObject* read_array_type(Stream &stream, PyJavaArray* object) else if constexpr(std::is_same::value) { std::size_t length = stream.read(); - PyObject* list = create_java_list(PythonGetEIOS(reinterpret_cast(object)), length); //python->PyList_New(length); + PyObject* list = create_java_list(length); //python->PyList_New(length); for (std::size_t i = 0; i < length; ++i) { python->PyList_SetItem(list, i, python_create_array(object, stream.read(), length)); @@ -477,4 +785,5 @@ PyObject* read_array_type(Stream &stream, PyJavaArray* object, ReflectionType ty } return result; -} \ No newline at end of file +} +#endif \ No newline at end of file diff --git a/RemoteInput/Plugin/Python/PythonJavaArray.hxx b/RemoteInput/Plugin/Python/PythonJavaArray.hxx index 72991c6..e4cd39c 100644 --- a/RemoteInput/Plugin/Python/PythonJavaArray.hxx +++ b/RemoteInput/Plugin/Python/PythonJavaArray.hxx @@ -6,23 +6,16 @@ #define REMOTEINPUT_PYTHONJAVAARRAY_HXX #include "PythonCommon.hxx" -#include "JNI_Common.hxx" - -#ifdef __cplusplus -extern "C" -{ -#endif +#if defined(USE_PYBIND11) +void declare_python_java_array(nanobind::module_ &module); +#else PyObject* Python_JavaArray_GetLength(PyJavaArray* self, PyObject* args[], Py_ssize_t args_length) noexcept; PyObject* Python_JavaArray_Get1D(PyJavaArray* self, PyObject* args[], Py_ssize_t args_length, PyObject* kwnames) noexcept; PyObject* Python_JavaArray_Get2D(PyJavaArray* self, PyObject* args[], Py_ssize_t args_length, PyObject* kwnames) noexcept; PyObject* Python_JavaArray_Get3D(PyJavaArray* self, PyObject* args[], Py_ssize_t args_length, PyObject* kwnames) noexcept; PyObject* Python_JavaArray_Get4D(PyJavaArray* self, PyObject* args[], Py_ssize_t args_length, PyObject* kwnames) noexcept; PyObject* Python_JavaArray_Release_Object(PyJavaArray* self, PyObject* args[], Py_ssize_t args_length) noexcept; - - -#ifdef __cplusplus -} #endif #endif //REMOTEINPUT_PYTHONJAVAARRAY_HXX diff --git a/RemoteInput/Plugin/Python/PythonJavaList.cxx b/RemoteInput/Plugin/Python/PythonJavaList.cxx index 34e1c71..5eb4255 100644 --- a/RemoteInput/Plugin/Python/PythonJavaList.cxx +++ b/RemoteInput/Plugin/Python/PythonJavaList.cxx @@ -3,8 +3,81 @@ // #include "PythonJavaList.hxx" -#include "Python.hxx" #include "PythonCommon.hxx" + +#if defined(USE_PYBIND11) +void (*PyList_tp_dealloc)(PyObject*) = nullptr; + +void PyList_Dealloc(PyObject* self) +{ + PyListObject* list = reinterpret_cast(self); + Py_ssize_t size = reinterpret_cast(list)->ob_size; //Py_SIZE(py_java_list); + + if (list->ob_item && size > 0) + { + PyEIOS* eios = nullptr; + PyObject** items = list->ob_item; + + std::vector java_objects; + java_objects.reserve(size); + + for (Py_ssize_t i = 0; i < size; ++i) + { + PyObject* object = items[i]; + if (object->ob_refcnt == 1) //Py_REFCNT + { + PyRemoteInputType type = GetPythonObjectType(object); + if (type == PyRemoteInputType::JAVA_OBJECT) + { + PyJavaObject* py_java_object = reinterpret_cast(object); + java_objects.push_back(py_java_object->object); + py_java_object->object = nullptr; + + if (!eios) + { + eios = py_java_object->eios; + } + } + else if (type == PyRemoteInputType::JAVA_ARRAY) + { + PyJavaArray* py_java_array = reinterpret_cast(object); + java_objects.push_back(py_java_array->array); + py_java_array->array = nullptr; + + if (!eios) + { + eios = py_java_array->eios; + } + } + } + } + + if (!java_objects.empty() && eios) + { + eios->native_eios->control_center->reflect_release_objects(&java_objects[0], java_objects.size()); + } + } + + if (PyList_tp_dealloc) + { + PyList_tp_dealloc(self); + } +} + +PyObject* create_java_list(Py_ssize_t length) +{ + PyObject* list = PyList_New(length); + static PyTypeObject* type = reinterpret_cast(Py_TYPE(list)); + static destructor original_des = type->tp_dealloc; + PyList_tp_dealloc = original_des; + if (PyList_tp_dealloc) + { + type->tp_dealloc = PyList_Dealloc; + } + + return list; +} +#else #include extern std::unique_ptr python; @@ -37,7 +110,7 @@ void PyList_Dealloc(PyObject* self) PyObject* object = items[i]; if (object->ob_refcnt == 1) //Py_REFCNT { - PyRemoteInputType type = GetObjectType(object); + PyRemoteInputType type = GetPythonObjectType(object); if (type == PyRemoteInputType::JAVA_OBJECT) { PyJavaObject* py_java_object = reinterpret_cast(object); @@ -75,7 +148,7 @@ void PyList_Dealloc(PyObject* self) } } -PyObject* create_java_list(PyEIOS* eios, Py_ssize_t length) +PyObject* create_java_list(Py_ssize_t length) { #if defined(Py_LIMITED_API) struct PyTypeObject @@ -93,7 +166,8 @@ PyObject* create_java_list(PyEIOS* eios, Py_ssize_t length) PyList_tp_dealloc = original_des; if (PyList_tp_dealloc) { - reinterpret_cast(type)->tp_dealloc = PyList_Dealloc; + type->tp_dealloc = PyList_Dealloc; } return list; -} \ No newline at end of file +} +#endif \ No newline at end of file diff --git a/RemoteInput/Plugin/Python/PythonJavaList.hxx b/RemoteInput/Plugin/Python/PythonJavaList.hxx index 46f91c4..f5eb940 100644 --- a/RemoteInput/Plugin/Python/PythonJavaList.hxx +++ b/RemoteInput/Plugin/Python/PythonJavaList.hxx @@ -5,9 +5,8 @@ #ifndef REMOTEINPUT_PYTHONJAVALIST_HXX #define REMOTEINPUT_PYTHONJAVALIST_HXX -#include "Python.hxx" +#include "PythonCommon.hxx" -struct PyEIOS; -PyObject* create_java_list(PyEIOS* eios, Py_ssize_t length); +PyObject* create_java_list(Py_ssize_t length); #endif //REMOTEINPUT_PYTHONJAVALIST_HXX diff --git a/RemoteInput/Plugin/Python/PythonJavaObject.cxx b/RemoteInput/Plugin/Python/PythonJavaObject.cxx index c3394c0..6333068 100644 --- a/RemoteInput/Plugin/Python/PythonJavaObject.cxx +++ b/RemoteInput/Plugin/Python/PythonJavaObject.cxx @@ -3,11 +3,192 @@ // #include "PythonJavaObject.hxx" -#include "NativePlugin.hxx" + #include #include #include +#if defined(USE_PYBIND11) +nanobind::object Python_Reflect_Object(PyJavaObject* self, const std::string& cls, const std::string& field, const std::string& desc) noexcept +{ + EIOS* eios = self->eios->native_eios; + jobject object = self->object; + jobject result = eios->control_center->reflect_object({object, cls, field, desc}); + return python_create_object(self, result); +} + +nanobind::object Python_Reflect_IsSame_Object(PyJavaObject* self, const std::shared_ptr& other) noexcept +{ + EIOS* eios = self->eios->native_eios; + jboolean result = eios->control_center->reflect_is_objects_equal(self->object, other->object); + return nanobind::bool_(result); +} + +nanobind::object Python_Reflect_InstanceOf(PyJavaObject* self, const std::string& cls) noexcept +{ + EIOS* eios = self->eios->native_eios; + jobject object = self->object; + jboolean result = eios->control_center->reflect_instance_of(object, cls); + return nanobind::bool_(result); +} + +nanobind::object Python_Reflect_Bool(PyJavaObject* self, const std::string& cls, const std::string& field) noexcept +{ + EIOS* eios = self->eios->native_eios; + jobject object = self->object; + jboolean result = eios->control_center->reflect_boolean({object, cls, field, "Z"}); + return nanobind::bool_(result); +} + +nanobind::object Python_Reflect_Char(PyJavaObject* self, const std::string& cls, const std::string& field) noexcept +{ + EIOS* eios = self->eios->native_eios; + jobject object = self->object; + char result = eios->control_center->reflect_char({object, cls, field, "C"}); + return nanobind::cast(std::string(1, result)); +} + +nanobind::object Python_Reflect_Byte(PyJavaObject* self, const std::string& cls, const std::string& field) noexcept +{ + EIOS* eios = self->eios->native_eios; + jobject object = self->object; + std::uint8_t result = eios->control_center->reflect_byte({object, cls, field, "B"}); + return nanobind::int_(result); +} + +nanobind::object Python_Reflect_Short(PyJavaObject* self, const std::string& cls, const std::string& field) noexcept +{ + EIOS* eios = self->eios->native_eios; + jobject object = self->object; + std::int16_t result = eios->control_center->reflect_short({object, cls, field, "S"}); + return nanobind::int_(result); +} + +nanobind::object Python_Reflect_Int(PyJavaObject* self, const std::string& cls, const std::string& field) noexcept +{ + EIOS* eios = self->eios->native_eios; + jobject object = self->object; + std::int32_t result = eios->control_center->reflect_int({object, cls, field, "I"}); + return nanobind::int_(result); +} + +nanobind::object Python_Reflect_Long(PyJavaObject* self, const std::string& cls, const std::string& field) noexcept +{ + EIOS* eios = self->eios->native_eios; + jobject object = self->object; + std::int64_t result = eios->control_center->reflect_long({object, cls, field, "J"}); + return nanobind::int_(result); +} + +nanobind::object Python_Reflect_Float(PyJavaObject* self, const std::string& cls, const std::string& field) noexcept +{ + EIOS* eios = self->eios->native_eios; + jobject object = self->object; + float result = eios->control_center->reflect_float({object, cls, field, "F"}); + return nanobind::float_(result); +} + +nanobind::object Python_Reflect_Double(PyJavaObject* self, const std::string& cls, const std::string& field) noexcept +{ + EIOS* eios = self->eios->native_eios; + jobject object = self->object; + double result = eios->control_center->reflect_double({object, cls, field, "D"}); + return nanobind::float_(result); +} + +nanobind::object Python_Reflect_String(PyJavaObject* self, const std::string& cls, const std::string& field) noexcept +{ + EIOS* eios = self->eios->native_eios; + jobject object = self->object; + std::string result = eios->control_center->reflect_string({object, cls, field, "Ljava/lang/String;"}); + return nanobind::cast(result); +} + +// MARK: - Array Functions + +nanobind::object Python_Reflect_Array(PyJavaObject* self, const std::string& cls, const std::string& field, const std::string& desc) noexcept +{ + EIOS* eios = self->eios->native_eios; + jobject object = self->object; + + std::size_t array_size = 0; + jarray array = eios->control_center->reflect_array({object, cls, field, desc}, &array_size); + return python_create_array(self, array, array_size); +} + +void Python_JavaObject_Release_Object(PyJavaObject* self) noexcept +{ + if (self->eios && self->object) + { + EIOS* eios = self->eios->native_eios; + jobject object = self->object; + + eios->control_center->reflect_release_object(object); + + self->eios = nullptr; + self->object = nullptr; + } +} + +nanobind::object PyJavaObject_Str(PyJavaObject* self) +{ + auto eios = self->eios; + jobject object = self->object; + std::ostringstream stream; + + std::ios state(nullptr); + state.copyfmt(stream); + + stream << std::setfill('0') << std::uppercase << std::hex; + stream << "JavaObject("; + stream << "0x" << reinterpret_cast(object); + stream << "): "; + stream.copyfmt(state); + + stream << "{"<<"\n"; + stream<< " eios: " << eios << "\n"; + stream<< " object: " << object << "\n"; + stream<< "}"; + + return nanobind::cast(stream.str()); +} + +void declare_python_java_object(nanobind::module_ &module) +{ + nanobind::class_(module, "JavaObject") + // .def(pybind11::init<>()) // Cannot instantiate from Python + .def_ro("eios", &PyJavaObject::eios) + .def_ro("object", &PyJavaObject::object) + .def("reflect_object", &Python_Reflect_Object, + nanobind::arg("cls"), nanobind::arg("field"), nanobind::arg("desc")) + .def("is_same_object", &Python_Reflect_IsSame_Object, + nanobind::arg("other")) + .def("instance_of", &Python_Reflect_InstanceOf, + nanobind::arg("cls")) + .def("reflect_bool", &Python_Reflect_Bool, + nanobind::arg("cls"), nanobind::arg("field")) + .def("reflect_char", &Python_Reflect_Char, + nanobind::arg("cls"), nanobind::arg("field")) + .def("reflect_byte", &Python_Reflect_Byte, + nanobind::arg("cls"), nanobind::arg("field")) + .def("reflect_short", &Python_Reflect_Short, + nanobind::arg("cls"), nanobind::arg("field")) + .def("reflect_int", &Python_Reflect_Int, + nanobind::arg("cls"), nanobind::arg("field")) + .def("reflect_long", &Python_Reflect_Long, + nanobind::arg("cls"), nanobind::arg("field")) + .def("reflect_float", &Python_Reflect_Float, + nanobind::arg("cls"), nanobind::arg("field")) + .def("reflect_double", &Python_Reflect_Double, + nanobind::arg("cls"), nanobind::arg("field")) + .def("reflect_string", &Python_Reflect_String, + nanobind::arg("cls"), nanobind::arg("field")) + .def("reflect_array", &Python_Reflect_Array, + nanobind::arg("cls"), nanobind::arg("field"), nanobind::arg("desc")) + .def("__del__", &Python_JavaObject_Release_Object) + .def("__str__", &PyJavaObject_Str); +} +#else int PyJavaObject_Clear(PyObject* object) { PyJavaObject* py_java_object = reinterpret_cast(object); @@ -22,15 +203,15 @@ int PyJavaObject_Clear(PyObject* object) void PyJavaObject_Dealloc(PyObject* object) { - PyJavaObject* py_java_object = reinterpret_cast(object); - if (py_java_object->eios && py_java_object->object) + EIOS* eios = PythonUnwrapEIOS(reinterpret_cast(object)->eios); + jobject java_object = reinterpret_cast(object)->object; + + if (eios && java_object) { - Reflect_Release_Object(PythonUnwrapEIOS(py_java_object->eios), py_java_object->object); + eios->control_center->reflect_release_object(java_object); } - // PyObject_GC_UnTrack(object); PyJavaObject_Clear(object); - //PyObject_Del(object); // NO GC! python->PyObject_Free(object); } @@ -293,4 +474,5 @@ PyObject* Python_JavaObject_Release_Object(PyJavaObject* self, PyObject* args[], (python->Py_INCREF)(python->Py_GetNone_Object()); return python->Py_GetNone_Object(); -} \ No newline at end of file +} +#endif \ No newline at end of file diff --git a/RemoteInput/Plugin/Python/PythonJavaObject.hxx b/RemoteInput/Plugin/Python/PythonJavaObject.hxx index 217592c..1b6c108 100644 --- a/RemoteInput/Plugin/Python/PythonJavaObject.hxx +++ b/RemoteInput/Plugin/Python/PythonJavaObject.hxx @@ -6,13 +6,10 @@ #define REMOTEINPUT_PYTHONJAVAOBJECT_HXX #include "PythonCommon.hxx" -#include "JNI_Common.hxx" - -#ifdef __cplusplus -extern "C" -{ -#endif +#if defined(USE_PYBIND11) +void declare_python_java_object(nanobind::module_ &module); +#else PyObject* Python_Reflect_Object(PyJavaObject* self, PyObject* args[], Py_ssize_t args_length) noexcept; PyObject* Python_Reflect_IsSame_Object(PyJavaObject* self, PyObject* args[], Py_ssize_t args_length) noexcept; PyObject* Python_Reflect_InstanceOf(PyJavaObject* self, PyObject* args[], Py_ssize_t args_length) noexcept; @@ -27,9 +24,6 @@ PyObject* Python_Reflect_Double(PyJavaObject* self, PyObject* args[], Py_ssize_t PyObject* Python_Reflect_String(PyJavaObject* self, PyObject* args[], Py_ssize_t args_length) noexcept; PyObject* Python_Reflect_Array(PyJavaObject* self, PyObject* args[], Py_ssize_t args_length) noexcept; PyObject* Python_JavaObject_Release_Object(PyJavaObject* self, PyObject* args[], Py_ssize_t args_length) noexcept; - -#ifdef __cplusplus -} #endif #endif //REMOTEINPUT_PYTHONJAVAOBJECT_HXX diff --git a/RemoteInput/Plugin/Python/PythonMacros.hxx b/RemoteInput/Plugin/Python/PythonMacros.hxx new file mode 100644 index 0000000..7308532 --- /dev/null +++ b/RemoteInput/Plugin/Python/PythonMacros.hxx @@ -0,0 +1,53 @@ +// +// Created by Brandon on 2024-09-17. +// + +#ifndef REMOTEINPUT_PYTHONMACROS_HXX +#define REMOTEINPUT_PYTHONMACROS_HXX + +#if !defined(USE_PYBIND11) +// On Windows, Python is really badly implemented, +// so we must include `math.h` and not `cmath` to get the _hpyot symbol +#include +#include +#include +#include "object.h" + +#if defined(_WIN32) || defined(_WIN64) + #include "structmember.h" +#elif defined(__linux__) && (defined(__x86_64__) || defined(__i386__)) + #include "structmember.h" +#elif defined(__APPLE__) + #if __has_include() + #include /* Python.framework */ + #else + #include "structmember.h" + #endif +#elif defined(__aarch64__) || defined(__arm__) + #include "structmember.h" +#endif + +#if defined(_WIN32) || defined(_WIN64) + #include +#else + #include +#endif // defined + +#if defined(Py_LIMITED_API) + #ifndef HAS_PYTHON_VERSION + #define HAS_PYTHON_VERSION(MIN_VERSION) ((Py_LIMITED_API >= MIN_VERSION) && (PY_VERSION_HEX >= MIN_VERSION)) + #endif +#else + #ifndef HAS_PYTHON_VERSION + #define HAS_PYTHON_VERSION(MIN_VERSION) (PY_VERSION_HEX >= MIN_VERSION) + #endif +#endif + +#if HAS_PYTHON_VERSION(0x03070000) + #ifndef METH_FASTCALL + #define METH_FASTCALL 0x0080 // Python 3.7 incorrectly labels this as Py_LIMITED_API + #endif +#endif +#endif + +#endif //REMOTEINPUT_PYTHONMACROS_HXX diff --git a/RemoteInput/Plugin/Python/PythonPlugin.cxx b/RemoteInput/Plugin/Python/PythonPlugin.cxx index 68d46c7..a4f123b 100644 --- a/RemoteInput/Plugin/Python/PythonPlugin.cxx +++ b/RemoteInput/Plugin/Python/PythonPlugin.cxx @@ -7,9 +7,51 @@ // #include "PythonPlugin.hxx" + +#include "PythonCommon.hxx" #include "PythonEIOS.hxx" #include "PythonJavaObject.hxx" -#include "PythonJavaList.hxx" +#include "PythonJavaArray.hxx" + +#if defined(USE_PYBIND11) +void PrintPythonVersionInfo() +{ + fprintf(stdout, "RUNNING WITH: %s\n", PY_VERSION); + fprintf(stdout, "COMPILED WITH: %d.%d.%d\n", PY_MAJOR_VERSION, PY_MINOR_VERSION, PY_MICRO_VERSION); + fflush(stdout); +} + +NB_MODULE(remote_input, module) { + #if defined(DEBUG) + PrintPythonVersionInfo(); + #endif + + // Register enums + nanobind::enum_(module, "ImageFormat") + .value("BGR_BGRA", ImageFormat::BGR_BGRA) + .value("BGRA", ImageFormat::BGRA) + .value("RGBA", ImageFormat::RGBA) + .value("ARGB", ImageFormat::ARGB) + .value("ABGR", ImageFormat::ABGR); + + nanobind::enum_(module, "ReflectType") + .value("BOOLEAN", ReflectionType::BOOL) + .value("CHAR", ReflectionType::CHAR) + .value("BYTE", ReflectionType::BYTE) + .value("SHORT", ReflectionType::SHORT) + .value("INT", ReflectionType::INT) + .value("LONG", ReflectionType::LONG) + .value("FLOAT", ReflectionType::FLOAT) + .value("DOUBLE", ReflectionType::DOUBLE) + .value("STRING", ReflectionType::STRING) + .value("OBJECT", ReflectionType::OBJECT) + .value("ARRAY", ReflectionType::ARRAY); + + declare_python_eios(module); + declare_python_java_object(module); + declare_python_java_array(module); +} +#else #include "Python.hxx" #include @@ -25,7 +67,6 @@ static struct PyMethodDef RemoteInputMethods[] = {nullptr} /* SENTINEL */ }; -#if PY_MAJOR_VERSION >= 3 static struct PyModuleDef RemoteInputModule = { PyModuleDef_HEAD_INIT, @@ -71,14 +112,14 @@ void PrintPythonVersionInfo() if (Py_Version > 0) { - fprintf(stdout, "Running with Python: %lu.%lu.%lu\n", Py_Version >> 24 & 0xFF, Py_Version >> 16 & 0xFF, Py_Version >> 8 & 0xFF); + fprintf(stdout, "RUNNING WITH: %lu.%lu.%lu\n", Py_Version >> 24 & 0xFF, Py_Version >> 16 & 0xFF, Py_Version >> 8 & 0xFF); } else { fprintf(stdout, "RUNNING WITH: %s\n", python->Py_GetVersion()); } - fprintf(stdout, "Compiled with Python: %d.%d.%d\n", PY_MAJOR_VERSION, PY_MINOR_VERSION, PY_MICRO_VERSION); + fprintf(stdout, "COMPILED WITH: %d.%d.%d\n", PY_MAJOR_VERSION, PY_MINOR_VERSION, PY_MICRO_VERSION); #endif fflush(stdout); @@ -179,46 +220,6 @@ PyObject* PyInit_remote_input() {"ARRAY", static_cast(ReflectionType::ARRAY)}, }); - fprintf(stderr, "LOADED!\n"); return module; } -#else -PyMODINIT_FUNC MODINIT(remote_input)() -{ - fprintf(stderr, "LOADED!\n"); - - PyObject* PyEIOS_Type = PyType_FromSpec(&PyEIOS_Spec); - if (PyType_Ready(reinterpret_cast(PyEIOS_Type)) < 0) - { - return nullptr; - } - - PyObject* PyJavaObject_Type = PyType_FromSpec(&PyJavaObject_Spec); - if (PyType_Ready(reinterpret_cast(PyJavaObject_Type)) < 0) - { - return nullptr; - } - - PyObject* module = PyModule_Create(&RemoteInputModule); - - Py_INCREF(PyEIOS_Type); - if (PyModule_AddObject(module, "EIOS", PyEIOS_Type)) - { - Py_DECREF(PyEIOS_Type); - return nullptr; - } - - Py_INCREF(PyJavaObject_Type); - if (PyModule_AddObject(module, "JavaObject", PyJavaObject_Type)) - { - Py_DECREF(PyJavaObject_Type); - return nullptr; - } - - return module; - - //PyModule_New("remote_input"); - PyObject* module = Py_InitModule("remote_input", RemoteInputMethods); -} -#endif - +#endif \ No newline at end of file diff --git a/RemoteInput/Plugin/Python/PythonPlugin.hxx b/RemoteInput/Plugin/Python/PythonPlugin.hxx index 338e114..32b814b 100644 --- a/RemoteInput/Plugin/Python/PythonPlugin.hxx +++ b/RemoteInput/Plugin/Python/PythonPlugin.hxx @@ -11,6 +11,8 @@ #include "Plugin.hxx" +#ifndef USE_PYBIND11 + //On Windows, Python is really badly implemented, so we must include `math.h` and not `cmath` to get the _hpyot symbol #include #include @@ -33,4 +35,6 @@ extern "C" EXPORT PyObject* PyInit_remote_input(); } +#endif + #endif //REMOTEINPUT_PYTHONPLUGIN_HXX diff --git a/RemoteInput/Thirdparty/nanobind/CMakeLists.txt b/RemoteInput/Thirdparty/nanobind/CMakeLists.txt new file mode 100644 index 0000000..c47b471 --- /dev/null +++ b/RemoteInput/Thirdparty/nanobind/CMakeLists.txt @@ -0,0 +1,170 @@ +cmake_minimum_required(VERSION 3.15...3.27) + +# --------------------------------------------------------------------------- +# Read the project version from nanobind.h +# --------------------------------------------------------------------------- + +file(STRINGS "include/nanobind/nanobind.h" _nanobind_h_version REGEX "^#define NB_VERSION_.*$") +string(REGEX MATCH "#define NB_VERSION_MAJOR ([0-9]+)" _ "${_nanobind_h_version}") +set(_major ${CMAKE_MATCH_1}) +string(REGEX MATCH "#define NB_VERSION_MINOR ([0-9]+)" _ "${_nanobind_h_version}") +set(_minor ${CMAKE_MATCH_1}) +string(REGEX MATCH "#define NB_VERSION_PATCH ([0-9]+)" _ "${_nanobind_h_version}") +set(_patch ${CMAKE_MATCH_1}) + +project(nanobind LANGUAGES NONE VERSION "${_major}.${_minor}.${_patch}") + +# --------------------------------------------------------------------------- +# Only build tests by default if this is the top-level CMake project +# --------------------------------------------------------------------------- + +if (CMAKE_PROJECT_NAME STREQUAL PROJECT_NAME) + set(NB_MASTER_PROJECT ON) +else() + set(NB_MASTER_PROJECT OFF) +endif() + +option(NB_CREATE_INSTALL_RULES "Create installation rules" ${NB_MASTER_PROJECT}) +option(NB_USE_SUBMODULE_DEPS "Use the nanobind dependencies shipped as a git submodule of this repository" ON) + +option(NB_TEST "Compile nanobind tests?" ${NB_MASTER_PROJECT}) +option(NB_TEST_STABLE_ABI "Test the stable ABI interface?" OFF) +option(NB_TEST_SHARED_BUILD "Build a shared nanobind library for the test suite?" OFF) +option(NB_TEST_CUDA "Force the use of the CUDA/NVCC compiler for testing purposes" OFF) +option(NB_TEST_FREE_THREADED "Build free-threaded extensions for the test suite?" ON) + +if (NOT MSVC) + option(NB_TEST_SANITIZE "Build tests with address and undefined behavior sanitizers?" OFF) +endif() + +# --------------------------------------------------------------------------- +# Do a release build if nothing was specified +# --------------------------------------------------------------------------- + +if (NB_MASTER_PROJECT AND NOT CMAKE_BUILD_TYPE AND NOT CMAKE_CONFIGURATION_TYPES) + message(STATUS "nanobind: setting build type to 'Release' as none was specified.") + set(CMAKE_BUILD_TYPE Release CACHE STRING "Choose the type of build." FORCE) + set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS "Debug" "Release" + "MinSizeRel" "RelWithDebInfo") +endif() + +# --------------------------------------------------------------------------- +# Check whether all dependencies are present +# --------------------------------------------------------------------------- + +if (NB_USE_SUBMODULE_DEPS AND NOT IS_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/ext/robin_map/include") + message(FATAL_ERROR "The nanobind dependencies are missing! " + "You probably did not clone the project with --recursive. It is possible to recover " + "by invoking\n$ git submodule update --init --recursive") +endif() + +# --------------------------------------------------------------------------- +# Installation rules +# --------------------------------------------------------------------------- +if(NB_CREATE_INSTALL_RULES AND NOT CMAKE_SKIP_INSTALL_RULES) + # Silence warning in GNUInstallDirs due to no enabled languages + set(CMAKE_INSTALL_LIBDIR "") + include(GNUInstallDirs) + set(NB_INSTALL_DATADIR "nanobind" + CACHE PATH "Installation path for read-only architecture-independent nanobind data files") + + # Normally these would be configurable by the user, but we can't allow that + # because the lookup paths are hard-coded in 'cmake/nanobind-config.cmake' + set(NB_INSTALL_INC_DIR "${NB_INSTALL_DATADIR}/include") + set(NB_INSTALL_SRC_DIR "${NB_INSTALL_DATADIR}/src") + set(NB_INSTALL_EXT_DIR "${NB_INSTALL_DATADIR}/ext") + set(NB_INSTALL_MODULE_DIR "${NB_INSTALL_DATADIR}") + set(NB_INSTALL_CMAKE_DIR "${NB_INSTALL_DATADIR}/cmake") + + install( + DIRECTORY include/ + DESTINATION "${NB_INSTALL_INC_DIR}" + ) + + install( + DIRECTORY src/ + DESTINATION "${NB_INSTALL_SRC_DIR}" + PATTERN "*.py" EXCLUDE + ) + + install( + DIRECTORY src/ + DESTINATION "${NB_INSTALL_MODULE_DIR}" + FILES_MATCHING PATTERN "*\.py" + ) + + if(NB_USE_SUBMODULE_DEPS) + install( + FILES ext/robin_map/include/tsl/robin_map.h + ext/robin_map/include/tsl/robin_hash.h + ext/robin_map/include/tsl/robin_growth_policy.h + DESTINATION "${NB_INSTALL_EXT_DIR}/robin_map/include/tsl" + ) + endif() + + install( + FILES cmake/nanobind-config.cmake + cmake/darwin-ld-cpython.sym + cmake/darwin-ld-pypy.sym + DESTINATION "${NB_INSTALL_CMAKE_DIR}" + ) + + include(CMakePackageConfigHelpers) + write_basic_package_version_file( + ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}-config-version.cmake + COMPATIBILITY SameMajorVersion + ) + install( + FILES ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}-config-version.cmake + DESTINATION "${NB_INSTALL_CMAKE_DIR}" + ) +endif() + +# Return early to skip finding needless dependencies if the user only wishes to +# install nanobind +if (NB_MASTER_PROJECT AND NOT NB_TEST) + return() +else() + enable_language(CXX) +endif() + +# --------------------------------------------------------------------------- +# Compile with a few more compiler warnings turned on +# --------------------------------------------------------------------------- + +if (MSVC) + if (CMAKE_CXX_FLAGS MATCHES "/W[0-4]") + string(REGEX REPLACE "/W[0-4]" "/W4" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") + else() + add_compile_options(/W4) + endif() +elseif (CMAKE_CXX_COMPILER_ID MATCHES "Clang|GNU") + add_compile_options(-Wall -Wextra -Wno-unused-local-typedefs) +endif() + +# --------------------------------------------------------------------------- +# Find the Python interpreter and development libraries +# --------------------------------------------------------------------------- + +if (NOT TARGET Python::Module OR NOT TARGET Python::Interpreter) + set(Python_FIND_FRAMEWORK LAST) # Prefer Brew/Conda to Apple framework python + + if (CMAKE_VERSION VERSION_LESS 3.18) + set(NB_PYTHON_DEV_MODULE Development) + else() + set(NB_PYTHON_DEV_MODULE Development.Module) + endif() + + find_package(Python 3.8 + REQUIRED COMPONENTS Interpreter ${NB_PYTHON_DEV_MODULE} + OPTIONAL_COMPONENTS Development.SABIModule) +endif() + +# --------------------------------------------------------------------------- +# Include nanobind cmake functionality +# --------------------------------------------------------------------------- +include(cmake/nanobind-config.cmake) + +if (NB_TEST) + add_subdirectory(tests) +endif() diff --git a/RemoteInput/Thirdparty/nanobind/LICENSE b/RemoteInput/Thirdparty/nanobind/LICENSE new file mode 100644 index 0000000..3d3426d --- /dev/null +++ b/RemoteInput/Thirdparty/nanobind/LICENSE @@ -0,0 +1,26 @@ +Copyright (c) 2022 Wenzel Jakob , All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/RemoteInput/Thirdparty/nanobind/README.md b/RemoteInput/Thirdparty/nanobind/README.md new file mode 100644 index 0000000..19abaf1 --- /dev/null +++ b/RemoteInput/Thirdparty/nanobind/README.md @@ -0,0 +1,59 @@ +# nanobind: tiny and efficient C++/Python bindings + +[![Documentation](https://img.shields.io/readthedocs/nanobind/latest)](https://nanobind.readthedocs.io/en/latest/) +[![Continuous Integration](https://img.shields.io/github/actions/workflow/status/wjakob/nanobind/ci.yml?label=tests)](https://github.com/wjakob/nanobind/actions/workflows/ci.yml) +[![](https://img.shields.io/pypi/v/nanobind.svg?color=brightgreen)](https://pypi.org/pypi/nanobind/) +![](https://img.shields.io/pypi/l/nanobind.svg?color=brightgreen) +[![](https://img.shields.io/badge/Example-Link-brightgreen)](https://github.com/wjakob/nanobind_example) +[![](https://img.shields.io/badge/Changelog-Link-brightgreen)](https://nanobind.readthedocs.io/en/latest/changelog.html) + +

+ + + + nanobind logo + +

+ +_nanobind_ is a small binding library that exposes C++ types in Python and vice +versa. It is reminiscent of +[Boost.Python](https://www.boost.org/doc/libs/1_64_0/libs/python/doc/html) and +[pybind11](https://github.com/pybind/pybind11) and uses near-identical syntax. +In contrast to these existing tools, nanobind is more efficient: bindings +compile in a shorter amount of time, produce smaller binaries, and have better +runtime performance. + +More concretely, +[benchmarks](https://nanobind.readthedocs.io/en/latest/benchmark.html) show up +to **~4× faster** compile time, **~5× smaller** binaries, and **~10× lower** +runtime overheads compared to pybind11. nanobind also outperforms Cython in +important metrics (**3-12×** binary size reduction, **1.6-4×** compilation time +reduction, similar runtime performance). + +## Documentation + +Please see the following links for tutorial and reference documentation in +[HTML](https://nanobind.readthedocs.io/en/latest/) and +[PDF](https://nanobind.readthedocs.io/_/downloads/en/latest/pdf/) formats. + +## License and attribution + +All material in this repository is licensed under a three-clause [BSD +license](LICENSE). + +Please use the following BibTeX template to cite nanobind in scientific +discourse: + +```bibtex +@misc{nanobind, + author = {Wenzel Jakob}, + year = {2022}, + note = {https://github.com/wjakob/nanobind}, + title = {nanobind: tiny and efficient C++/Python bindings} +} +``` + +The nanobind logo was designed by [AndoTwin Studio](https://andotwinstudio.com) +(high-resolution download: +[light](https://rgl.s3.eu-central-1.amazonaws.com/media/uploads/wjakob/2023/03/27/nanobind_logo.jpg), +[dark](https://rgl.s3.eu-central-1.amazonaws.com/media/uploads/wjakob/2023/03/28/nanobind_logo_dark_1.png)). diff --git a/RemoteInput/Thirdparty/nanobind/cmake/collect-symbols-pypy.py b/RemoteInput/Thirdparty/nanobind/cmake/collect-symbols-pypy.py new file mode 100644 index 0000000..805490d --- /dev/null +++ b/RemoteInput/Thirdparty/nanobind/cmake/collect-symbols-pypy.py @@ -0,0 +1,28 @@ +from urllib.request import urlopen +import tarfile +import subprocess + +funcs: "set[str]" = set() + +files = [ + ('https://downloads.python.org/pypy/pypy3.9-v7.3.11-macos_arm64.tar.bz2', 'pypy3.9-v7.3.11-macos_arm64/bin/libpypy3.9-c.dylib') +] + +for f in files: + fs = urlopen(f[0]) + ft = tarfile.open(fileobj=fs, mode="r|bz2") + success = False + for member in ft: # move to the next file each loop + if member.name == f[1]: + ft.extract(member, path='tmp') + success = True + assert success + + out = subprocess.check_output(['nm', '-gjU', 'tmp/' + f[1]]) + for line in out.decode().split('\n'): + if line.startswith('_Py') or line.startswith('__Py'): + funcs.add(line) + +with open("darwin-ld-pypy.sym", "w") as f: + for func in sorted(list(funcs)): + f.write(f'-U _{func}\n') diff --git a/RemoteInput/Thirdparty/nanobind/cmake/collect-symbols.py b/RemoteInput/Thirdparty/nanobind/cmake/collect-symbols.py new file mode 100644 index 0000000..ad92da4 --- /dev/null +++ b/RemoteInput/Thirdparty/nanobind/cmake/collect-symbols.py @@ -0,0 +1,42 @@ +#!/usr/bin/env python3 +# +# This script collects a list of symbols that are considered to be part of the +# CPython API. The result is used to inform the macOS linker that it's fine for +# those symbols to be undefined when an extension module is linked, as they +# will be provided when the extension module is loaded into the interpreter. + +from urllib.request import urlopen +import re + +funcs: "set[str]" = set() + +for ver in ['3.7', '3.8', '3.9']: + url = f'https://raw.githubusercontent.com/python/cpython/{ver}/PC/python3.def' + output = urlopen(url).read().decode('utf-8') + for match in re.findall(r" (.*)=.*", output): + funcs.add(match) + +for ver in ['3.10', '3.11', 'main']: + url = f'https://raw.githubusercontent.com/python/cpython/{ver}/PC/python3dll.c' + output = urlopen(url).read().decode('utf-8') + for match in re.findall(r"EXPORT_FUNC\((.*)\)", output): + funcs.add(match) + +funcs.remove('name') + +# Add a few more functions that nanobind uses and which aren't in the above list +funcs |= { + 'PyFrame_GetBack', + 'PyGILState_Check', + 'PyObject_LengthHint', + 'Py_CompileStringExFlags', + '_PyInterpreterState_Get', + '_PyObject_MakeTpCall', + '_PyObject_NextNotImplemented', + '_Py_CheckFunctionResult', + '_Py_RefTotal' +} + +with open("darwin-ld-cpython.sym", "w") as f: + for func in sorted(list(funcs)): + f.write(f'-U _{func}\n') diff --git a/RemoteInput/Thirdparty/nanobind/cmake/darwin-ld-cpython.sym b/RemoteInput/Thirdparty/nanobind/cmake/darwin-ld-cpython.sym new file mode 100644 index 0000000..850dd38 --- /dev/null +++ b/RemoteInput/Thirdparty/nanobind/cmake/darwin-ld-cpython.sym @@ -0,0 +1,925 @@ +-U _PyAIter_Check +-U _PyArg_Parse +-U _PyArg_ParseTuple +-U _PyArg_ParseTupleAndKeywords +-U _PyArg_UnpackTuple +-U _PyArg_VaParse +-U _PyArg_VaParseTupleAndKeywords +-U _PyArg_ValidateKeywordArguments +-U _PyBaseObject_Type +-U _PyBool_FromLong +-U _PyBool_Type +-U _PyBuffer_FillContiguousStrides +-U _PyBuffer_FillInfo +-U _PyBuffer_FromContiguous +-U _PyBuffer_GetPointer +-U _PyBuffer_IsContiguous +-U _PyBuffer_Release +-U _PyBuffer_SizeFromFormat +-U _PyBuffer_ToContiguous +-U _PyByteArrayIter_Type +-U _PyByteArray_AsString +-U _PyByteArray_Concat +-U _PyByteArray_FromObject +-U _PyByteArray_FromStringAndSize +-U _PyByteArray_Resize +-U _PyByteArray_Size +-U _PyByteArray_Type +-U _PyBytesIter_Type +-U _PyBytes_AsString +-U _PyBytes_AsStringAndSize +-U _PyBytes_Concat +-U _PyBytes_ConcatAndDel +-U _PyBytes_DecodeEscape +-U _PyBytes_FromFormat +-U _PyBytes_FromFormatV +-U _PyBytes_FromObject +-U _PyBytes_FromString +-U _PyBytes_FromStringAndSize +-U _PyBytes_Repr +-U _PyBytes_Size +-U _PyBytes_Type +-U _PyCFunction_Call +-U _PyCFunction_ClearFreeList +-U _PyCFunction_GetFlags +-U _PyCFunction_GetFunction +-U _PyCFunction_GetSelf +-U _PyCFunction_New +-U _PyCFunction_NewEx +-U _PyCFunction_Type +-U _PyCMethod_New +-U _PyCallIter_New +-U _PyCallIter_Type +-U _PyCallable_Check +-U _PyCapsule_GetContext +-U _PyCapsule_GetDestructor +-U _PyCapsule_GetName +-U _PyCapsule_GetPointer +-U _PyCapsule_Import +-U _PyCapsule_IsValid +-U _PyCapsule_New +-U _PyCapsule_SetContext +-U _PyCapsule_SetDestructor +-U _PyCapsule_SetName +-U _PyCapsule_SetPointer +-U _PyCapsule_Type +-U _PyClassMethodDescr_Type +-U _PyCodec_BackslashReplaceErrors +-U _PyCodec_Decode +-U _PyCodec_Decoder +-U _PyCodec_Encode +-U _PyCodec_Encoder +-U _PyCodec_IgnoreErrors +-U _PyCodec_IncrementalDecoder +-U _PyCodec_IncrementalEncoder +-U _PyCodec_KnownEncoding +-U _PyCodec_LookupError +-U _PyCodec_NameReplaceErrors +-U _PyCodec_Register +-U _PyCodec_RegisterError +-U _PyCodec_ReplaceErrors +-U _PyCodec_StreamReader +-U _PyCodec_StreamWriter +-U _PyCodec_StrictErrors +-U _PyCodec_Unregister +-U _PyCodec_XMLCharRefReplaceErrors +-U _PyComplex_AsCComplex +-U _PyComplex_FromCComplex +-U _PyComplex_FromDoubles +-U _PyComplex_ImagAsDouble +-U _PyComplex_RealAsDouble +-U _PyComplex_Type +-U _PyDescr_NewClassMethod +-U _PyDescr_NewGetSet +-U _PyDescr_NewMember +-U _PyDescr_NewMethod +-U _PyDictItems_Type +-U _PyDictIterItem_Type +-U _PyDictIterKey_Type +-U _PyDictIterValue_Type +-U _PyDictKeys_Type +-U _PyDictProxy_New +-U _PyDictProxy_Type +-U _PyDictValues_Type +-U _PyDict_Clear +-U _PyDict_Contains +-U _PyDict_Copy +-U _PyDict_DelItem +-U _PyDict_DelItemString +-U _PyDict_GetItem +-U _PyDict_GetItemString +-U _PyDict_GetItemWithError +-U _PyDict_Items +-U _PyDict_Keys +-U _PyDict_Merge +-U _PyDict_MergeFromSeq2 +-U _PyDict_New +-U _PyDict_Next +-U _PyDict_SetItem +-U _PyDict_SetItemString +-U _PyDict_Size +-U _PyDict_Type +-U _PyDict_Update +-U _PyDict_Values +-U _PyEllipsis_Type +-U _PyEnum_Type +-U _PyErr_BadArgument +-U _PyErr_BadInternalCall +-U _PyErr_CheckSignals +-U _PyErr_Clear +-U _PyErr_Display +-U _PyErr_ExceptionMatches +-U _PyErr_Fetch +-U _PyErr_Format +-U _PyErr_FormatV +-U _PyErr_GetExcInfo +-U _PyErr_GetHandledException +-U _PyErr_GetRaisedException +-U _PyErr_GivenExceptionMatches +-U _PyErr_NewException +-U _PyErr_NewExceptionWithDoc +-U _PyErr_NoMemory +-U _PyErr_NormalizeException +-U _PyErr_Occurred +-U _PyErr_Print +-U _PyErr_PrintEx +-U _PyErr_ProgramText +-U _PyErr_ResourceWarning +-U _PyErr_Restore +-U _PyErr_SetExcFromWindowsErr +-U _PyErr_SetExcFromWindowsErrWithFilename +-U _PyErr_SetExcFromWindowsErrWithFilenameObject +-U _PyErr_SetExcFromWindowsErrWithFilenameObjects +-U _PyErr_SetExcInfo +-U _PyErr_SetFromErrno +-U _PyErr_SetFromErrnoWithFilename +-U _PyErr_SetFromErrnoWithFilenameObject +-U _PyErr_SetFromErrnoWithFilenameObjects +-U _PyErr_SetFromWindowsErr +-U _PyErr_SetFromWindowsErrWithFilename +-U _PyErr_SetHandledException +-U _PyErr_SetImportError +-U _PyErr_SetImportErrorSubclass +-U _PyErr_SetInterrupt +-U _PyErr_SetInterruptEx +-U _PyErr_SetNone +-U _PyErr_SetObject +-U _PyErr_SetRaisedException +-U _PyErr_SetString +-U _PyErr_SyntaxLocation +-U _PyErr_SyntaxLocationEx +-U _PyErr_WarnEx +-U _PyErr_WarnExplicit +-U _PyErr_WarnFormat +-U _PyErr_WriteUnraisable +-U _PyEval_AcquireLock +-U _PyEval_AcquireThread +-U _PyEval_CallFunction +-U _PyEval_CallMethod +-U _PyEval_CallObjectWithKeywords +-U _PyEval_EvalCode +-U _PyEval_EvalCodeEx +-U _PyEval_EvalFrame +-U _PyEval_EvalFrameEx +-U _PyEval_GetBuiltins +-U _PyEval_GetCallStats +-U _PyEval_GetFrame +-U _PyEval_GetFuncDesc +-U _PyEval_GetFuncName +-U _PyEval_GetGlobals +-U _PyEval_GetLocals +-U _PyEval_InitThreads +-U _PyEval_ReInitThreads +-U _PyEval_ReleaseLock +-U _PyEval_ReleaseThread +-U _PyEval_RestoreThread +-U _PyEval_SaveThread +-U _PyEval_ThreadsInitialized +-U _PyExc_ArithmeticError +-U _PyExc_AssertionError +-U _PyExc_AttributeError +-U _PyExc_BaseException +-U _PyExc_BlockingIOError +-U _PyExc_BrokenPipeError +-U _PyExc_BufferError +-U _PyExc_BytesWarning +-U _PyExc_ChildProcessError +-U _PyExc_ConnectionAbortedError +-U _PyExc_ConnectionError +-U _PyExc_ConnectionRefusedError +-U _PyExc_ConnectionResetError +-U _PyExc_DeprecationWarning +-U _PyExc_EOFError +-U _PyExc_EnvironmentError +-U _PyExc_Exception +-U _PyExc_FileExistsError +-U _PyExc_FileNotFoundError +-U _PyExc_FloatingPointError +-U _PyExc_FutureWarning +-U _PyExc_GeneratorExit +-U _PyExc_IOError +-U _PyExc_ImportError +-U _PyExc_ImportWarning +-U _PyExc_IndentationError +-U _PyExc_IndexError +-U _PyExc_InterruptedError +-U _PyExc_IsADirectoryError +-U _PyExc_KeyError +-U _PyExc_KeyboardInterrupt +-U _PyExc_LookupError +-U _PyExc_MemoryError +-U _PyExc_ModuleNotFoundError +-U _PyExc_NameError +-U _PyExc_NotADirectoryError +-U _PyExc_NotImplementedError +-U _PyExc_OSError +-U _PyExc_OverflowError +-U _PyExc_PendingDeprecationWarning +-U _PyExc_PermissionError +-U _PyExc_ProcessLookupError +-U _PyExc_RecursionError +-U _PyExc_ReferenceError +-U _PyExc_ResourceWarning +-U _PyExc_RuntimeError +-U _PyExc_RuntimeWarning +-U _PyExc_StopAsyncIteration +-U _PyExc_StopIteration +-U _PyExc_SyntaxError +-U _PyExc_SyntaxWarning +-U _PyExc_SystemError +-U _PyExc_SystemExit +-U _PyExc_TabError +-U _PyExc_TimeoutError +-U _PyExc_TypeError +-U _PyExc_UnboundLocalError +-U _PyExc_UnicodeDecodeError +-U _PyExc_UnicodeEncodeError +-U _PyExc_UnicodeError +-U _PyExc_UnicodeTranslateError +-U _PyExc_UnicodeWarning +-U _PyExc_UserWarning +-U _PyExc_ValueError +-U _PyExc_Warning +-U _PyExc_WindowsError +-U _PyExc_ZeroDivisionError +-U _PyExceptionClass_Name +-U _PyException_GetCause +-U _PyException_GetContext +-U _PyException_GetTraceback +-U _PyException_SetCause +-U _PyException_SetContext +-U _PyException_SetTraceback +-U _PyFile_FromFd +-U _PyFile_GetLine +-U _PyFile_WriteObject +-U _PyFile_WriteString +-U _PyFilter_Type +-U _PyFloat_AsDouble +-U _PyFloat_FromDouble +-U _PyFloat_FromString +-U _PyFloat_GetInfo +-U _PyFloat_GetMax +-U _PyFloat_GetMin +-U _PyFloat_Type +-U _PyFrame_GetBack +-U _PyFrame_GetCode +-U _PyFrame_GetLineNumber +-U _PyFrozenSet_New +-U _PyFrozenSet_Type +-U _PyGC_Collect +-U _PyGC_Disable +-U _PyGC_Enable +-U _PyGC_IsEnabled +-U _PyGILState_Check +-U _PyGILState_Ensure +-U _PyGILState_GetThisThreadState +-U _PyGILState_Release +-U _PyGetSetDescr_Type +-U _PyImport_AddModule +-U _PyImport_AddModuleObject +-U _PyImport_AppendInittab +-U _PyImport_Cleanup +-U _PyImport_ExecCodeModule +-U _PyImport_ExecCodeModuleEx +-U _PyImport_ExecCodeModuleObject +-U _PyImport_ExecCodeModuleWithPathnames +-U _PyImport_GetImporter +-U _PyImport_GetMagicNumber +-U _PyImport_GetMagicTag +-U _PyImport_GetModule +-U _PyImport_GetModuleDict +-U _PyImport_Import +-U _PyImport_ImportFrozenModule +-U _PyImport_ImportFrozenModuleObject +-U _PyImport_ImportModule +-U _PyImport_ImportModuleLevel +-U _PyImport_ImportModuleLevelObject +-U _PyImport_ImportModuleNoBlock +-U _PyImport_ReloadModule +-U _PyIndex_Check +-U _PyInterpreterState_Clear +-U _PyInterpreterState_Delete +-U _PyInterpreterState_Get +-U _PyInterpreterState_GetDict +-U _PyInterpreterState_GetID +-U _PyInterpreterState_New +-U _PyIter_Check +-U _PyIter_Next +-U _PyIter_Send +-U _PyListIter_Type +-U _PyListRevIter_Type +-U _PyList_Append +-U _PyList_AsTuple +-U _PyList_GetItem +-U _PyList_GetSlice +-U _PyList_Insert +-U _PyList_New +-U _PyList_Reverse +-U _PyList_SetItem +-U _PyList_SetSlice +-U _PyList_Size +-U _PyList_Sort +-U _PyList_Type +-U _PyLongRangeIter_Type +-U _PyLong_AsDouble +-U _PyLong_AsLong +-U _PyLong_AsLongAndOverflow +-U _PyLong_AsLongLong +-U _PyLong_AsLongLongAndOverflow +-U _PyLong_AsSize_t +-U _PyLong_AsSsize_t +-U _PyLong_AsUnsignedLong +-U _PyLong_AsUnsignedLongLong +-U _PyLong_AsUnsignedLongLongMask +-U _PyLong_AsUnsignedLongMask +-U _PyLong_AsVoidPtr +-U _PyLong_FromDouble +-U _PyLong_FromLong +-U _PyLong_FromLongLong +-U _PyLong_FromSize_t +-U _PyLong_FromSsize_t +-U _PyLong_FromString +-U _PyLong_FromUnsignedLong +-U _PyLong_FromUnsignedLongLong +-U _PyLong_FromVoidPtr +-U _PyLong_GetInfo +-U _PyLong_Type +-U _PyMap_Type +-U _PyMapping_Check +-U _PyMapping_GetItemString +-U _PyMapping_HasKey +-U _PyMapping_HasKeyString +-U _PyMapping_Items +-U _PyMapping_Keys +-U _PyMapping_Length +-U _PyMapping_SetItemString +-U _PyMapping_Size +-U _PyMapping_Values +-U _PyMarshal_ReadObjectFromString +-U _PyMarshal_WriteObjectToString +-U _PyMem_Calloc +-U _PyMem_Free +-U _PyMem_Malloc +-U _PyMem_Realloc +-U _PyMemberDescr_Type +-U _PyMember_GetOne +-U _PyMember_SetOne +-U _PyMemoryView_FromBuffer +-U _PyMemoryView_FromMemory +-U _PyMemoryView_FromObject +-U _PyMemoryView_GetContiguous +-U _PyMemoryView_Type +-U _PyMethodDescr_Type +-U _PyModuleDef_Init +-U _PyModuleDef_Type +-U _PyModule_AddFunctions +-U _PyModule_AddIntConstant +-U _PyModule_AddObject +-U _PyModule_AddObjectRef +-U _PyModule_AddStringConstant +-U _PyModule_AddType +-U _PyModule_Create2 +-U _PyModule_ExecDef +-U _PyModule_FromDefAndSpec2 +-U _PyModule_GetDef +-U _PyModule_GetDict +-U _PyModule_GetFilename +-U _PyModule_GetFilenameObject +-U _PyModule_GetName +-U _PyModule_GetNameObject +-U _PyModule_GetState +-U _PyModule_New +-U _PyModule_NewObject +-U _PyModule_SetDocString +-U _PyModule_Type +-U _PyNullImporter_Type +-U _PyNumber_Absolute +-U _PyNumber_Add +-U _PyNumber_And +-U _PyNumber_AsSsize_t +-U _PyNumber_Check +-U _PyNumber_Divmod +-U _PyNumber_Float +-U _PyNumber_FloorDivide +-U _PyNumber_InPlaceAdd +-U _PyNumber_InPlaceAnd +-U _PyNumber_InPlaceFloorDivide +-U _PyNumber_InPlaceLshift +-U _PyNumber_InPlaceMatrixMultiply +-U _PyNumber_InPlaceMultiply +-U _PyNumber_InPlaceOr +-U _PyNumber_InPlacePower +-U _PyNumber_InPlaceRemainder +-U _PyNumber_InPlaceRshift +-U _PyNumber_InPlaceSubtract +-U _PyNumber_InPlaceTrueDivide +-U _PyNumber_InPlaceXor +-U _PyNumber_Index +-U _PyNumber_Invert +-U _PyNumber_Long +-U _PyNumber_Lshift +-U _PyNumber_MatrixMultiply +-U _PyNumber_Multiply +-U _PyNumber_Negative +-U _PyNumber_Or +-U _PyNumber_Positive +-U _PyNumber_Power +-U _PyNumber_Remainder +-U _PyNumber_Rshift +-U _PyNumber_Subtract +-U _PyNumber_ToBase +-U _PyNumber_TrueDivide +-U _PyNumber_Xor +-U _PyODictItems_Type +-U _PyODictIter_Type +-U _PyODictKeys_Type +-U _PyODictValues_Type +-U _PyODict_DelItem +-U _PyODict_New +-U _PyODict_SetItem +-U _PyODict_Type +-U _PyOS_AfterFork +-U _PyOS_CheckStack +-U _PyOS_FSPath +-U _PyOS_InitInterrupts +-U _PyOS_InputHook +-U _PyOS_InterruptOccurred +-U _PyOS_ReadlineFunctionPointer +-U _PyOS_double_to_string +-U _PyOS_getsig +-U _PyOS_mystricmp +-U _PyOS_mystrnicmp +-U _PyOS_setsig +-U _PyOS_snprintf +-U _PyOS_string_to_double +-U _PyOS_strtol +-U _PyOS_strtoul +-U _PyOS_vsnprintf +-U _PyObject_ASCII +-U _PyObject_AsCharBuffer +-U _PyObject_AsFileDescriptor +-U _PyObject_AsReadBuffer +-U _PyObject_AsWriteBuffer +-U _PyObject_Bytes +-U _PyObject_Call +-U _PyObject_CallFunction +-U _PyObject_CallFunctionObjArgs +-U _PyObject_CallMethod +-U _PyObject_CallMethodObjArgs +-U _PyObject_CallNoArgs +-U _PyObject_CallObject +-U _PyObject_Calloc +-U _PyObject_CheckBuffer +-U _PyObject_CheckReadBuffer +-U _PyObject_ClearWeakRefs +-U _PyObject_CopyData +-U _PyObject_DelAttr +-U _PyObject_DelAttrString +-U _PyObject_DelItem +-U _PyObject_DelItemString +-U _PyObject_Dir +-U _PyObject_Format +-U _PyObject_Free +-U _PyObject_GetTypeData +-U _PyObject_GC_Del +-U _PyObject_GC_IsFinalized +-U _PyObject_GC_IsTracked +-U _PyObject_GC_Track +-U _PyObject_GC_UnTrack +-U _PyObject_GenericGetAttr +-U _PyObject_GenericGetDict +-U _PyObject_GenericSetAttr +-U _PyObject_GenericSetDict +-U _PyObject_GetAIter +-U _PyObject_GetAttr +-U _PyObject_GetAttrString +-U _PyObject_GetBuffer +-U _PyObject_GetItem +-U _PyObject_GetIter +-U _PyObject_HasAttr +-U _PyObject_HasAttrString +-U _PyObject_Hash +-U _PyObject_HashNotImplemented +-U _PyObject_Init +-U _PyObject_InitVar +-U _PyObject_IsInstance +-U _PyObject_IsSubclass +-U _PyObject_IsTrue +-U _PyObject_Length +-U _PyObject_LengthHint +-U _PyObject_Malloc +-U _PyObject_Not +-U _PyObject_Realloc +-U _PyObject_Repr +-U _PyObject_RichCompare +-U _PyObject_RichCompareBool +-U _PyObject_SelfIter +-U _PyObject_SetAttr +-U _PyObject_SetAttrString +-U _PyObject_SetItem +-U _PyObject_Size +-U _PyObject_Str +-U _PyObject_Type +-U _PyObject_Vectorcall +-U _PyObject_VectorcallMethod +-U _PyParser_SimpleParseFileFlags +-U _PyParser_SimpleParseStringFlags +-U _PyParser_SimpleParseStringFlagsFilename +-U _PyProperty_Type +-U _PyRangeIter_Type +-U _PyRange_Type +-U _PyReversed_Type +-U _PySeqIter_New +-U _PySeqIter_Type +-U _PySequence_Check +-U _PySequence_Concat +-U _PySequence_Contains +-U _PySequence_Count +-U _PySequence_DelItem +-U _PySequence_DelSlice +-U _PySequence_Fast +-U _PySequence_GetItem +-U _PySequence_GetSlice +-U _PySequence_In +-U _PySequence_InPlaceConcat +-U _PySequence_InPlaceRepeat +-U _PySequence_Index +-U _PySequence_Length +-U _PySequence_List +-U _PySequence_Repeat +-U _PySequence_SetItem +-U _PySequence_SetSlice +-U _PySequence_Size +-U _PySequence_Tuple +-U _PySetIter_Type +-U _PySet_Add +-U _PySet_Clear +-U _PySet_Contains +-U _PySet_Discard +-U _PySet_New +-U _PySet_Pop +-U _PySet_Size +-U _PySet_Type +-U _PySlice_AdjustIndices +-U _PySlice_GetIndices +-U _PySlice_GetIndicesEx +-U _PySlice_New +-U _PySlice_Type +-U _PySlice_Unpack +-U _PySortWrapper_Type +-U _PyState_AddModule +-U _PyState_FindModule +-U _PyState_RemoveModule +-U _PyStructSequence_GetItem +-U _PyStructSequence_New +-U _PyStructSequence_NewType +-U _PyStructSequence_SetItem +-U _PySuper_Type +-U _PySys_AddWarnOption +-U _PySys_AddWarnOptionUnicode +-U _PySys_AddXOption +-U _PySys_FormatStderr +-U _PySys_FormatStdout +-U _PySys_GetObject +-U _PySys_GetXOptions +-U _PySys_HasWarnOptions +-U _PySys_ResetWarnOptions +-U _PySys_SetArgv +-U _PySys_SetArgvEx +-U _PySys_SetObject +-U _PySys_SetPath +-U _PySys_WriteStderr +-U _PySys_WriteStdout +-U _PyThreadState_Clear +-U _PyThreadState_Delete +-U _PyThreadState_DeleteCurrent +-U _PyThreadState_Get +-U _PyThreadState_GetDict +-U _PyThreadState_GetFrame +-U _PyThreadState_GetID +-U _PyThreadState_GetInterpreter +-U _PyThreadState_New +-U _PyThreadState_SetAsyncExc +-U _PyThreadState_Swap +-U _PyThread_GetInfo +-U _PyThread_ReInitTLS +-U _PyThread_acquire_lock +-U _PyThread_acquire_lock_timed +-U _PyThread_allocate_lock +-U _PyThread_create_key +-U _PyThread_delete_key +-U _PyThread_delete_key_value +-U _PyThread_exit_thread +-U _PyThread_free_lock +-U _PyThread_get_key_value +-U _PyThread_get_stacksize +-U _PyThread_get_thread_ident +-U _PyThread_get_thread_native_id +-U _PyThread_init_thread +-U _PyThread_release_lock +-U _PyThread_set_key_value +-U _PyThread_set_stacksize +-U _PyThread_start_new_thread +-U _PyThread_tss_alloc +-U _PyThread_tss_create +-U _PyThread_tss_delete +-U _PyThread_tss_free +-U _PyThread_tss_get +-U _PyThread_tss_is_created +-U _PyThread_tss_set +-U _PyTraceBack_Here +-U _PyTraceBack_Print +-U _PyTraceBack_Type +-U _PyTupleIter_Type +-U _PyTuple_ClearFreeList +-U _PyTuple_GetItem +-U _PyTuple_GetSlice +-U _PyTuple_New +-U _PyTuple_Pack +-U _PyTuple_SetItem +-U _PyTuple_Size +-U _PyTuple_Type +-U _PyType_ClearCache +-U _PyType_FromMetaclass +-U _PyType_FromModuleAndSpec +-U _PyType_FromSpec +-U _PyType_FromSpecWithBases +-U _PyType_GenericAlloc +-U _PyType_GenericNew +-U _PyType_GetFlags +-U _PyType_GetModule +-U _PyType_GetModuleState +-U _PyType_GetName +-U _PyType_GetQualName +-U _PyType_GetSlot +-U _PyType_IsSubtype +-U _PyType_Modified +-U _PyType_Ready +-U _PyType_Type +-U _PyType_GetTypeDataSize +-U _PyUnicodeDecodeError_Create +-U _PyUnicodeDecodeError_GetEncoding +-U _PyUnicodeDecodeError_GetEnd +-U _PyUnicodeDecodeError_GetObject +-U _PyUnicodeDecodeError_GetReason +-U _PyUnicodeDecodeError_GetStart +-U _PyUnicodeDecodeError_SetEnd +-U _PyUnicodeDecodeError_SetReason +-U _PyUnicodeDecodeError_SetStart +-U _PyUnicodeEncodeError_GetEncoding +-U _PyUnicodeEncodeError_GetEnd +-U _PyUnicodeEncodeError_GetObject +-U _PyUnicodeEncodeError_GetReason +-U _PyUnicodeEncodeError_GetStart +-U _PyUnicodeEncodeError_SetEnd +-U _PyUnicodeEncodeError_SetReason +-U _PyUnicodeEncodeError_SetStart +-U _PyUnicodeIter_Type +-U _PyUnicodeTranslateError_GetEnd +-U _PyUnicodeTranslateError_GetObject +-U _PyUnicodeTranslateError_GetReason +-U _PyUnicodeTranslateError_GetStart +-U _PyUnicodeTranslateError_SetEnd +-U _PyUnicodeTranslateError_SetReason +-U _PyUnicodeTranslateError_SetStart +-U _PyUnicode_Append +-U _PyUnicode_AppendAndDel +-U _PyUnicode_AsASCIIString +-U _PyUnicode_AsCharmapString +-U _PyUnicode_AsDecodedObject +-U _PyUnicode_AsDecodedUnicode +-U _PyUnicode_AsEncodedObject +-U _PyUnicode_AsEncodedString +-U _PyUnicode_AsEncodedUnicode +-U _PyUnicode_AsLatin1String +-U _PyUnicode_AsMBCSString +-U _PyUnicode_AsRawUnicodeEscapeString +-U _PyUnicode_AsUCS4 +-U _PyUnicode_AsUCS4Copy +-U _PyUnicode_AsUTF16String +-U _PyUnicode_AsUTF32String +-U _PyUnicode_AsUTF8AndSize +-U _PyUnicode_AsUTF8String +-U _PyUnicode_AsUnicodeEscapeString +-U _PyUnicode_AsWideChar +-U _PyUnicode_AsWideCharString +-U _PyUnicode_BuildEncodingMap +-U _PyUnicode_ClearFreeList +-U _PyUnicode_Compare +-U _PyUnicode_CompareWithASCIIString +-U _PyUnicode_Concat +-U _PyUnicode_Contains +-U _PyUnicode_Count +-U _PyUnicode_Decode +-U _PyUnicode_DecodeASCII +-U _PyUnicode_DecodeCharmap +-U _PyUnicode_DecodeCodePageStateful +-U _PyUnicode_DecodeFSDefault +-U _PyUnicode_DecodeFSDefaultAndSize +-U _PyUnicode_DecodeLatin1 +-U _PyUnicode_DecodeLocale +-U _PyUnicode_DecodeLocaleAndSize +-U _PyUnicode_DecodeMBCS +-U _PyUnicode_DecodeMBCSStateful +-U _PyUnicode_DecodeRawUnicodeEscape +-U _PyUnicode_DecodeUTF16 +-U _PyUnicode_DecodeUTF16Stateful +-U _PyUnicode_DecodeUTF32 +-U _PyUnicode_DecodeUTF32Stateful +-U _PyUnicode_DecodeUTF7 +-U _PyUnicode_DecodeUTF7Stateful +-U _PyUnicode_DecodeUTF8 +-U _PyUnicode_DecodeUTF8Stateful +-U _PyUnicode_DecodeUnicodeEscape +-U _PyUnicode_EncodeCodePage +-U _PyUnicode_EncodeFSDefault +-U _PyUnicode_EncodeLocale +-U _PyUnicode_FSConverter +-U _PyUnicode_FSDecoder +-U _PyUnicode_Find +-U _PyUnicode_FindChar +-U _PyUnicode_Format +-U _PyUnicode_FromEncodedObject +-U _PyUnicode_FromFormat +-U _PyUnicode_FromFormatV +-U _PyUnicode_FromObject +-U _PyUnicode_FromOrdinal +-U _PyUnicode_FromString +-U _PyUnicode_FromStringAndSize +-U _PyUnicode_FromWideChar +-U _PyUnicode_GetDefaultEncoding +-U _PyUnicode_GetLength +-U _PyUnicode_GetSize +-U _PyUnicode_InternFromString +-U _PyUnicode_InternImmortal +-U _PyUnicode_InternInPlace +-U _PyUnicode_IsIdentifier +-U _PyUnicode_Join +-U _PyUnicode_Partition +-U _PyUnicode_RPartition +-U _PyUnicode_RSplit +-U _PyUnicode_ReadChar +-U _PyUnicode_Replace +-U _PyUnicode_Resize +-U _PyUnicode_RichCompare +-U _PyUnicode_Split +-U _PyUnicode_Splitlines +-U _PyUnicode_Substring +-U _PyUnicode_Tailmatch +-U _PyUnicode_Translate +-U _PyUnicode_Type +-U _PyUnicode_WriteChar +-U _PyVectorcall_Call +-U _PyVectorcall_NARGS +-U _PyWeakref_GetObject +-U _PyWeakref_NewProxy +-U _PyWeakref_NewRef +-U _PyWrapperDescr_Type +-U _PyWrapper_New +-U _PyZip_Type +-U _Py_AddPendingCall +-U _Py_AtExit +-U _Py_BuildValue +-U _Py_BytesMain +-U _Py_CompileString +-U _Py_CompileStringExFlags +-U _Py_DecRef +-U _Py_DecodeLocale +-U _Py_EncodeLocale +-U _Py_EndInterpreter +-U _Py_EnterRecursiveCall +-U _Py_Exit +-U _Py_FatalError +-U _Py_FileSystemDefaultEncodeErrors +-U _Py_FileSystemDefaultEncoding +-U _Py_Finalize +-U _Py_FinalizeEx +-U _Py_GenericAlias +-U _Py_GenericAliasType +-U _Py_GetArgcArgv +-U _Py_GetBuildInfo +-U _Py_GetCompiler +-U _Py_GetCopyright +-U _Py_GetExecPrefix +-U _Py_GetPath +-U _Py_GetPlatform +-U _Py_GetPrefix +-U _Py_GetProgramFullPath +-U _Py_GetProgramName +-U _Py_GetPythonHome +-U _Py_GetRecursionLimit +-U _Py_GetVersion +-U _Py_HasFileSystemDefaultEncoding +-U _Py_IncRef +-U _Py_Initialize +-U _Py_InitializeEx +-U _Py_Is +-U _Py_IsFalse +-U _Py_IsFinalizing +-U _Py_IsInitialized +-U _Py_IsNone +-U _Py_IsTrue +-U _Py_LeaveRecursiveCall +-U _Py_Main +-U _Py_MakePendingCalls +-U _Py_NewInterpreter +-U _Py_NewRef +-U _Py_ReprEnter +-U _Py_ReprLeave +-U _Py_SetPath +-U _Py_SetProgramName +-U _Py_SetPythonHome +-U _Py_SetRecursionLimit +-U _Py_SymtableString +-U _Py_UTF8Mode +-U _Py_VaBuildValue +-U _Py_XNewRef +-U __PyArg_ParseTupleAndKeywords_SizeT +-U __PyArg_ParseTuple_SizeT +-U __PyArg_Parse_SizeT +-U __PyArg_VaParseTupleAndKeywords_SizeT +-U __PyArg_VaParse_SizeT +-U __PyErr_BadInternalCall +-U __PyInterpreterState_Get +-U __PyObject_CallFunction_SizeT +-U __PyObject_CallMethod_SizeT +-U __PyObject_GC_Malloc +-U __PyObject_GC_New +-U __PyObject_GC_NewVar +-U __PyObject_GC_Resize +-U __PyObject_MakeTpCall +-U __PyObject_New +-U __PyObject_NewVar +-U __PyObject_NextNotImplemented +-U __PyState_AddModule +-U __PyThreadState_Init +-U __PyThreadState_Prealloc +-U __PyTrash_delete_later +-U __PyTrash_delete_nesting +-U __PyTrash_deposit_object +-U __PyTrash_destroy_chain +-U __PyTrash_thread_deposit_object +-U __PyTrash_thread_destroy_chain +-U __PyWeakref_CallableProxyType +-U __PyWeakref_ProxyType +-U __PyWeakref_RefType +-U __Py_IsFinalizing +-U __Py_BuildValue_SizeT +-U __Py_CheckFunctionResult +-U __Py_CheckRecursionLimit +-U __Py_CheckRecursiveCall +-U __Py_Dealloc +-U __Py_DecRef +-U __Py_EllipsisObject +-U __Py_FalseStruct +-U __Py_IncRef +-U __Py_NegativeRefcount +-U __Py_NoneStruct +-U __Py_NotImplementedStruct +-U __Py_RefTotal +-U __Py_SwappedOp +-U __Py_TrueStruct +-U __Py_VaBuildValue_SizeT +-U _Py_Version +-U __Py_MergeZeroLocalRefcount +-U __Py_DecRefShared +-U __Py_DecRefSharedDebug +-U __Py_DECREF_DecRefTotal +-U __Py_INCREF_IncRefTotal +-U __PyObject_GetDictPtr +-U _PyList_GetItemRef +-U _PyDict_GetItemRef +-U _PyDict_GetItemStringRef +-U _PyDict_SetDefault +-U _PyDict_SetDefaultRef +-U _PyWeakref_GetRef +-U _PyImport_AddModuleRef +-U _PyUnstable_Module_SetGIL +-U _PyMutex_Unlock +-U _PyMutex_Lock +-U _PyObject_IS_GC +-U _PyCriticalSection_Begin +-U _PyCriticalSection_End +-U _PyCriticalSection2_Begin +-U _PyCriticalSection2_End +-U _PyUnicode_AsUTF8 diff --git a/RemoteInput/Thirdparty/nanobind/cmake/darwin-ld-pypy.sym b/RemoteInput/Thirdparty/nanobind/cmake/darwin-ld-pypy.sym new file mode 100644 index 0000000..878b560 --- /dev/null +++ b/RemoteInput/Thirdparty/nanobind/cmake/darwin-ld-pypy.sym @@ -0,0 +1,964 @@ +-U _PyArg_ValidateKeywordArguments +-U _PyModule_AddType +-U _PyPyAnySet_Check +-U _PyPyAnySet_CheckExact +-U _PyPyArg_Parse +-U _PyPyArg_ParseTuple +-U _PyPyArg_ParseTupleAndKeywords +-U _PyPyArg_UnpackTuple +-U _PyPyArg_VaParse +-U _PyPyArg_VaParseTupleAndKeywords +-U _PyPyBaseObject_Type +-U _PyPyBool_FromLong +-U _PyPyBool_Type +-U _PyPyBuffer_FillInfo +-U _PyPyBuffer_FromContiguous +-U _PyPyBuffer_GetPointer +-U _PyPyBuffer_IsContiguous +-U _PyPyBuffer_Release +-U _PyPyBuffer_ToContiguous +-U _PyPyBufferable_Type +-U _PyPyByteArray_AsString +-U _PyPyByteArray_Check +-U _PyPyByteArray_CheckExact +-U _PyPyByteArray_Concat +-U _PyPyByteArray_FromObject +-U _PyPyByteArray_FromStringAndSize +-U _PyPyByteArray_Resize +-U _PyPyByteArray_Size +-U _PyPyByteArray_Type +-U _PyPyBytes_AS_STRING +-U _PyPyBytes_AsString +-U _PyPyBytes_AsStringAndSize +-U _PyPyBytes_Concat +-U _PyPyBytes_ConcatAndDel +-U _PyPyBytes_FromFormat +-U _PyPyBytes_FromFormatV +-U _PyPyBytes_FromObject +-U _PyPyBytes_FromString +-U _PyPyBytes_FromStringAndSize +-U _PyPyBytes_Size +-U _PyPyBytes_Type +-U _PyPyCFunction_Call +-U _PyPyCFunction_Check +-U _PyPyCFunction_GetFunction +-U _PyPyCFunction_Type +-U _PyPyCFunction_NewEx +-U _PyPyCMethod_New +-U _PyPyCallIter_New +-U _PyPyCallable_Check +-U _PyPyCapsule_GetContext +-U _PyPyCapsule_GetDestructor +-U _PyPyCapsule_GetName +-U _PyPyCapsule_GetPointer +-U _PyPyCapsule_Import +-U _PyPyCapsule_IsValid +-U _PyPyCapsule_New +-U _PyPyCapsule_SetContext +-U _PyPyCapsule_SetDestructor +-U _PyPyCapsule_SetName +-U _PyPyCapsule_SetPointer +-U _PyPyCapsule_Type +-U _PyPyCell_Type +-U _PyPyClassMethodDescr_Type +-U _PyPyClassMethod_New +-U _PyPyClassMethod_Type +-U _PyPyCode_Addr2Line +-U _PyPyCode_Check +-U _PyPyCode_CheckExact +-U _PyPyCode_GetNumFree +-U _PyPyCode_New +-U _PyPyCode_NewEmpty +-U _PyPyCodec_Decode +-U _PyPyCodec_Decoder +-U _PyPyCodec_Encode +-U _PyPyCodec_Encoder +-U _PyPyCodec_IncrementalDecoder +-U _PyPyCodec_IncrementalEncoder +-U _PyPyComplex_AsCComplex +-U _PyPyComplex_Check +-U _PyPyComplex_CheckExact +-U _PyPyComplex_FromCComplex +-U _PyPyComplex_FromDoubles +-U _PyPyComplex_ImagAsDouble +-U _PyPyComplex_RealAsDouble +-U _PyPyComplex_Type +-U _PyPyContextVar_Get +-U _PyPyContextVar_New +-U _PyPyContextVar_Set +-U _PyPyCoro_Check +-U _PyPyCoro_CheckExact +-U _PyPyDateTimeAPI +-U _PyPyDateTime_Check +-U _PyPyDateTime_CheckExact +-U _PyPyDateTime_DATE_GET_HOUR +-U _PyPyDateTime_DATE_GET_MICROSECOND +-U _PyPyDateTime_DATE_GET_MINUTE +-U _PyPyDateTime_DATE_GET_SECOND +-U _PyPyDateTime_DELTA_GET_DAYS +-U _PyPyDateTime_DELTA_GET_MICROSECONDS +-U _PyPyDateTime_DELTA_GET_SECONDS +-U _PyPyDateTime_FromTimestamp +-U _PyPyDateTime_GET_DAY +-U _PyPyDateTime_GET_FOLD +-U _PyPyDateTime_GET_MONTH +-U _PyPyDateTime_GET_YEAR +-U _PyPyDateTime_TIME_GET_FOLD +-U _PyPyDateTime_TIME_GET_HOUR +-U _PyPyDateTime_TIME_GET_MICROSECOND +-U _PyPyDateTime_TIME_GET_MINUTE +-U _PyPyDateTime_TIME_GET_SECOND +-U _PyPyDate_Check +-U _PyPyDate_CheckExact +-U _PyPyDate_FromTimestamp +-U _PyPyDelta_Check +-U _PyPyDelta_CheckExact +-U _PyPyDescr_NewClassMethod +-U _PyPyDescr_NewGetSet +-U _PyPyDescr_NewMethod +-U _PyPyDictKeys_Type +-U _PyPyDictProxy_Check +-U _PyPyDictProxy_CheckExact +-U _PyPyDictProxy_New +-U _PyPyDictProxy_Type +-U _PyPyDictValues_Type +-U _PyPyDict_Clear +-U _PyPyDict_Contains +-U _PyPyDict_Copy +-U _PyPyDict_DelItem +-U _PyPyDict_DelItemString +-U _PyPyDict_GetItem +-U _PyPyDict_GetItemString +-U _PyPyDict_GetItemWithError +-U _PyPyDict_Items +-U _PyPyDict_Keys +-U _PyPyDict_Merge +-U _PyPyDict_New +-U _PyPyDict_Next +-U _PyPyDict_SetDefault +-U _PyPyDict_SetItem +-U _PyPyDict_SetItemString +-U _PyPyDict_Size +-U _PyPyDict_Type +-U _PyPyDict_Update +-U _PyPyDict_Values +-U _PyPyErr_BadArgument +-U _PyPyErr_BadInternalCall +-U _PyPyErr_CheckSignals +-U _PyPyErr_Clear +-U _PyPyErr_Display +-U _PyPyErr_ExceptionMatches +-U _PyPyErr_Fetch +-U _PyPyErr_Format +-U _PyPyErr_GetExcInfo +-U _PyPyErr_GivenExceptionMatches +-U _PyPyErr_NewException +-U _PyPyErr_NewExceptionWithDoc +-U _PyPyErr_NoMemory +-U _PyPyErr_NormalizeException +-U _PyPyErr_Occurred +-U _PyPyErr_Print +-U _PyPyErr_PrintEx +-U _PyPyErr_Restore +-U _PyPyErr_SetExcInfo +-U _PyPyErr_SetFromErrno +-U _PyPyErr_SetFromErrnoWithFilename +-U _PyPyErr_SetFromErrnoWithFilenameObject +-U _PyPyErr_SetFromErrnoWithFilenameObjects +-U _PyPyErr_SetInterrupt +-U _PyPyErr_SetNone +-U _PyPyErr_SetObject +-U _PyPyErr_SetString +-U _PyPyErr_Warn +-U _PyPyErr_WarnEx +-U _PyPyErr_WarnExplicit +-U _PyPyErr_WarnFormat +-U _PyPyErr_WriteUnraisable +-U _PyPyEval_AcquireThread +-U _PyPyEval_CallFunction +-U _PyPyEval_CallMethod +-U _PyPyEval_CallObjectWithKeywords +-U _PyPyEval_EvalCode +-U _PyPyEval_GetBuiltins +-U _PyPyEval_GetFrame +-U _PyPyEval_GetGlobals +-U _PyPyEval_GetLocals +-U _PyPyEval_InitThreads +-U _PyPyEval_MergeCompilerFlags +-U _PyPyEval_ReleaseThread +-U _PyPyEval_RestoreThread +-U _PyPyEval_SaveThread +-U _PyPyEval_ThreadsInitialized +-U _PyPyExc_ArithmeticError +-U _PyPyExc_AssertionError +-U _PyPyExc_AttributeError +-U _PyPyExc_BaseException +-U _PyPyExc_BlockingIOError +-U _PyPyExc_BrokenPipeError +-U _PyPyExc_BufferError +-U _PyPyExc_BytesWarning +-U _PyPyExc_ChildProcessError +-U _PyPyExc_ConnectionAbortedError +-U _PyPyExc_ConnectionError +-U _PyPyExc_ConnectionRefusedError +-U _PyPyExc_ConnectionResetError +-U _PyPyExc_DeprecationWarning +-U _PyPyExc_EOFError +-U _PyPyExc_Exception +-U _PyPyExc_FileExistsError +-U _PyPyExc_FileNotFoundError +-U _PyPyExc_FloatingPointError +-U _PyPyExc_FutureWarning +-U _PyPyExc_GeneratorExit +-U _PyPyExc_ImportError +-U _PyPyExc_ImportWarning +-U _PyPyExc_IndentationError +-U _PyPyExc_IndexError +-U _PyPyExc_InterruptedError +-U _PyPyExc_IsADirectoryError +-U _PyPyExc_KeyError +-U _PyPyExc_KeyboardInterrupt +-U _PyPyExc_LookupError +-U _PyPyExc_MemoryError +-U _PyPyExc_ModuleNotFoundError +-U _PyPyExc_NameError +-U _PyPyExc_NotADirectoryError +-U _PyPyExc_NotImplementedError +-U _PyPyExc_OSError +-U _PyPyExc_OverflowError +-U _PyPyExc_PendingDeprecationWarning +-U _PyPyExc_PermissionError +-U _PyPyExc_ProcessLookupError +-U _PyPyExc_RecursionError +-U _PyPyExc_ReferenceError +-U _PyPyExc_ResourceWarning +-U _PyPyExc_RuntimeError +-U _PyPyExc_RuntimeWarning +-U _PyPyExc_StopAsyncIteration +-U _PyPyExc_StopIteration +-U _PyPyExc_SyntaxError +-U _PyPyExc_SyntaxWarning +-U _PyPyExc_SystemError +-U _PyPyExc_SystemExit +-U _PyPyExc_TabError +-U _PyPyExc_TimeoutError +-U _PyPyExc_TypeError +-U _PyPyExc_UnboundLocalError +-U _PyPyExc_UnicodeDecodeError +-U _PyPyExc_UnicodeEncodeError +-U _PyPyExc_UnicodeError +-U _PyPyExc_UnicodeTranslateError +-U _PyPyExc_UnicodeWarning +-U _PyPyExc_UserWarning +-U _PyPyExc_ValueError +-U _PyPyExc_Warning +-U _PyPyExc_ZeroDivisionError +-U _PyPyExceptionInstance_Class +-U _PyPyException_GetCause +-U _PyPyException_GetContext +-U _PyPyException_GetTraceback +-U _PyPyException_SetCause +-U _PyPyException_SetContext +-U _PyPyException_SetTraceback +-U _PyPyFile_FromFd +-U _PyPyFile_FromString +-U _PyPyFile_GetLine +-U _PyPyFile_WriteObject +-U _PyPyFile_WriteString +-U _PyPyFloat_AS_DOUBLE +-U _PyPyFloat_AsDouble +-U _PyPyFloat_Check +-U _PyPyFloat_CheckExact +-U _PyPyFloat_FromDouble +-U _PyPyFloat_FromString +-U _PyPyFloat_Type +-U _PyPyFrame_New +-U _PyPyFrozenSet_Check +-U _PyPyFrozenSet_CheckExact +-U _PyPyFrozenSet_New +-U _PyPyFrozenSet_Type +-U _PyPyFunction_Check +-U _PyPyFunction_CheckExact +-U _PyPyFunction_GetCode +-U _PyPyFunction_Type +-U _PyPyGILState_Check +-U _PyPyGILState_Ensure +-U _PyPyGILState_Release +-U _PyPyGen_Check +-U _PyPyGen_CheckExact +-U _PyPyGetSetDescr_Type +-U _PyPyImport_AddModule +-U _PyPyImport_ExecCodeModule +-U _PyPyImport_ExecCodeModuleEx +-U _PyPyImport_GetModule +-U _PyPyImport_GetModuleDict +-U _PyPyImport_Import +-U _PyPyImport_ImportModule +-U _PyPyImport_ImportModuleLevelObject +-U _PyPyImport_ImportModuleNoBlock +-U _PyPyImport_ReloadModule +-U _PyPyIndex_Check +-U _PyPyInstanceMethod_Check +-U _PyPyInstanceMethod_Function +-U _PyPyInstanceMethod_GET_FUNCTION +-U _PyPyInstanceMethod_New +-U _PyPyInstanceMethod_Type +-U _PyPyInterpreterState_GetID +-U _PyPyInterpreterState_Head +-U _PyPyInterpreterState_Next +-U _PyPyIter_Check +-U _PyPyIter_Next +-U _PyPyList_Append +-U _PyPyList_AsTuple +-U _PyPyList_GET_ITEM +-U _PyPyList_GET_SIZE +-U _PyPyList_GetItem +-U _PyPyList_GetSlice +-U _PyPyList_Insert +-U _PyPyList_New +-U _PyPyList_Reverse +-U _PyPyList_SET_ITEM +-U _PyPyList_SetItem +-U _PyPyList_SetSlice +-U _PyPyList_Size +-U _PyPyList_Sort +-U _PyPyList_Type +-U _PyPyLong_AsDouble +-U _PyPyLong_AsLong +-U _PyPyLong_AsLongAndOverflow +-U _PyPyLong_AsLongLong +-U _PyPyLong_AsLongLongAndOverflow +-U _PyPyLong_AsSize_t +-U _PyPyLong_AsSsize_t +-U _PyPyLong_AsUnsignedLong +-U _PyPyLong_AsUnsignedLongLong +-U _PyPyLong_AsUnsignedLongLongMask +-U _PyPyLong_AsUnsignedLongMask +-U _PyPyLong_AsVoidPtr +-U _PyPyLong_FromDouble +-U _PyPyLong_FromLong +-U _PyPyLong_FromLongLong +-U _PyPyLong_FromSize_t +-U _PyPyLong_FromSsize_t +-U _PyPyLong_FromString +-U _PyPyLong_FromUnicode +-U _PyPyLong_FromUnicodeObject +-U _PyPyLong_FromUnsignedLong +-U _PyPyLong_FromUnsignedLongLong +-U _PyPyLong_FromVoidPtr +-U _PyPyLong_Type +-U _PyPyMapping_Check +-U _PyPyMapping_GetItemString +-U _PyPyMapping_HasKey +-U _PyPyMapping_HasKeyString +-U _PyPyMapping_Items +-U _PyPyMapping_Keys +-U _PyPyMapping_Length +-U _PyPyMapping_SetItemString +-U _PyPyMapping_Size +-U _PyPyMapping_Values +-U _PyPyMarshal_ReadObjectFromString +-U _PyPyMarshal_WriteObjectToString +-U _PyPyMem_Calloc +-U _PyPyMem_Free +-U _PyPyMem_Malloc +-U _PyPyMem_RawCalloc +-U _PyPyMem_RawFree +-U _PyPyMem_RawMalloc +-U _PyPyMem_RawRealloc +-U _PyPyMem_Realloc +-U _PyPyMemberDescr_Type +-U _PyPyMember_GetOne +-U _PyPyMember_SetOne +-U _PyPyMemoryView_Check +-U _PyPyMemoryView_CheckExact +-U _PyPyMemoryView_FromBuffer +-U _PyPyMemoryView_FromMemory +-U _PyPyMemoryView_FromObject +-U _PyPyMemoryView_GetContiguous +-U _PyPyMemoryView_Type +-U _PyPyMethodDescr_Check +-U _PyPyMethodDescr_CheckExact +-U _PyPyMethodDescr_Type +-U _PyPyMethod_Check +-U _PyPyMethod_CheckExact +-U _PyPyMethod_Function +-U _PyPyMethod_New +-U _PyPyMethod_Self +-U _PyPyMethod_Type +-U _PyPyModuleDef_Init +-U _PyPyModule_AddFunctions +-U _PyPyModule_AddIntConstant +-U _PyPyModule_AddObject +-U _PyPyModule_AddStringConstant +-U _PyPyModule_Check +-U _PyPyModule_CheckExact +-U _PyPyModule_Create2 +-U _PyPyModule_ExecDef +-U _PyPyModule_GetDef +-U _PyPyModule_GetDict +-U _PyPyModule_GetName +-U _PyPyModule_GetState +-U _PyPyModule_New +-U _PyPyModule_NewObject +-U _PyPyModule_Type +-U _PyPyNumber_Absolute +-U _PyPyNumber_Add +-U _PyPyNumber_And +-U _PyPyNumber_AsSsize_t +-U _PyPyNumber_Check +-U _PyPyNumber_Divide +-U _PyPyNumber_Divmod +-U _PyPyNumber_Float +-U _PyPyNumber_FloorDivide +-U _PyPyNumber_InPlaceAdd +-U _PyPyNumber_InPlaceAnd +-U _PyPyNumber_InPlaceDivide +-U _PyPyNumber_InPlaceFloorDivide +-U _PyPyNumber_InPlaceLshift +-U _PyPyNumber_InPlaceMatrixMultiply +-U _PyPyNumber_InPlaceMultiply +-U _PyPyNumber_InPlaceOr +-U _PyPyNumber_InPlacePower +-U _PyPyNumber_InPlaceRemainder +-U _PyPyNumber_InPlaceRshift +-U _PyPyNumber_InPlaceSubtract +-U _PyPyNumber_InPlaceTrueDivide +-U _PyPyNumber_InPlaceXor +-U _PyPyNumber_Index +-U _PyPyNumber_Invert +-U _PyPyNumber_Long +-U _PyPyNumber_Lshift +-U _PyPyNumber_MatrixMultiply +-U _PyPyNumber_Multiply +-U _PyPyNumber_Negative +-U _PyPyNumber_Or +-U _PyPyNumber_Positive +-U _PyPyNumber_Power +-U _PyPyNumber_Remainder +-U _PyPyNumber_Rshift +-U _PyPyNumber_Subtract +-U _PyPyNumber_ToBase +-U _PyPyNumber_TrueDivide +-U _PyPyNumber_Xor +-U _PyPyOS_AfterFork +-U _PyPyOS_FSPath +-U _PyPyOS_InputHook +-U _PyPyOS_InterruptOccurred +-U _PyPyOS_double_to_string +-U _PyPyOS_getsig +-U _PyPyOS_setsig +-U _PyPyOS_snprintf +-U _PyPyOS_string_to_double +-U _PyPyOS_vsnprintf +-U _PyPyObject_ASCII +-U _PyPyObject_AsCharBuffer +-U _PyPyObject_AsFileDescriptor +-U _PyPyObject_AsReadBuffer +-U _PyPyObject_AsWriteBuffer +-U _PyPyObject_Bytes +-U _PyPyObject_Call +-U _PyPyObject_CallFinalizerFromDealloc +-U _PyPyObject_CallFunction +-U _PyPyObject_CallFunctionObjArgs +-U _PyPyObject_CallMethod +-U _PyPyObject_CallMethodNoArgs +-U _PyPyObject_CallMethodObjArgs +-U _PyPyObject_CallMethodOneArg +-U _PyPyObject_CallNoArgs +-U _PyPyObject_CallObject +-U _PyPyObject_CallOneArg +-U _PyPyObject_Calloc +-U _PyPyObject_CheckReadBuffer +-U _PyPyObject_ClearWeakRefs +-U _PyPyObject_Del +-U _PyPyObject_DelAttr +-U _PyPyObject_DelAttrString +-U _PyPyObject_DelItem +-U _PyPyObject_DelItemString +-U _PyPyObject_Dir +-U _PyPyObject_Format +-U _PyPyObject_Free +-U _PyPyObject_GC_Del +-U _PyPyObject_GenericGetAttr +-U _PyPyObject_GenericGetDict +-U _PyPyObject_GenericSetAttr +-U _PyPyObject_GenericSetDict +-U _PyPyObject_GetAttr +-U _PyPyObject_GetAttrString +-U _PyPyObject_GetBuffer +-U _PyPyObject_GetItem +-U _PyPyObject_GetIter +-U _PyPyObject_HasAttr +-U _PyPyObject_HasAttrString +-U _PyPyObject_Hash +-U _PyPyObject_HashNotImplemented +-U _PyPyObject_Init +-U _PyPyObject_InitVar +-U _PyPyObject_IsInstance +-U _PyPyObject_IsSubclass +-U _PyPyObject_IsTrue +-U _PyPyObject_LengthHint +-U _PyPyObject_Malloc +-U _PyPyObject_Not +-U _PyPyObject_Print +-U _PyPyObject_Realloc +-U _PyPyObject_Repr +-U _PyPyObject_RichCompare +-U _PyPyObject_RichCompareBool +-U _PyPyObject_SelfIter +-U _PyPyObject_SetAttr +-U _PyPyObject_SetAttrString +-U _PyPyObject_SetItem +-U _PyPyObject_Size +-U _PyPyObject_Str +-U _PyPyObject_Type +-U _PyPyObject_Unicode +-U _PyPyObject_Vectorcall +-U _PyPyObject_VectorcallDict +-U _PyPyObject_VectorcallMethod +-U _PyPyProperty_Type +-U _PyPyRange_Type +-U _PyPyReversed_Type +-U _PyPyRun_File +-U _PyPyRun_SimpleString +-U _PyPyRun_String +-U _PyPyRun_StringFlags +-U _PyPySeqIter_New +-U _PyPySequence_Check +-U _PyPySequence_Concat +-U _PyPySequence_Contains +-U _PyPySequence_DelItem +-U _PyPySequence_DelSlice +-U _PyPySequence_Fast +-U _PyPySequence_Fast_GET_ITEM +-U _PyPySequence_Fast_GET_SIZE +-U _PyPySequence_Fast_ITEMS +-U _PyPySequence_GetItem +-U _PyPySequence_GetSlice +-U _PyPySequence_ITEM +-U _PyPySequence_InPlaceConcat +-U _PyPySequence_InPlaceRepeat +-U _PyPySequence_Index +-U _PyPySequence_Length +-U _PyPySequence_List +-U _PyPySequence_Repeat +-U _PyPySequence_SetItem +-U _PyPySequence_SetSlice +-U _PyPySequence_Size +-U _PyPySequence_Tuple +-U _PyPySet_Add +-U _PyPySet_Check +-U _PyPySet_CheckExact +-U _PyPySet_Clear +-U _PyPySet_Contains +-U _PyPySet_Discard +-U _PyPySet_GET_SIZE +-U _PyPySet_New +-U _PyPySet_Pop +-U _PyPySet_Size +-U _PyPySet_Type +-U _PyPySlice_AdjustIndices +-U _PyPySlice_GetIndices +-U _PyPySlice_GetIndicesEx +-U _PyPySlice_New +-U _PyPySlice_Type +-U _PyPySlice_Unpack +-U _PyPyState_AddModule +-U _PyPyState_RemoveModule +-U _PyPyStaticMethod_New +-U _PyPyStaticMethod_Type +-U _PyPyStructSequence_GetItem +-U _PyPyStructSequence_InitType +-U _PyPyStructSequence_InitType2 +-U _PyPyStructSequence_New +-U _PyPyStructSequence_NewType +-U _PyPyStructSequence_SetItem +-U _PyPyStructSequence_UnnamedField +-U _PyPySys_GetObject +-U _PyPySys_SetObject +-U _PyPySys_WriteStderr +-U _PyPySys_WriteStdout +-U _PyPyTZInfo_Check +-U _PyPyTZInfo_CheckExact +-U _PyPyThreadState_Clear +-U _PyPyThreadState_Delete +-U _PyPyThreadState_DeleteCurrent +-U _PyPyThreadState_Get +-U _PyPyThreadState_GetDict +-U _PyPyThreadState_New +-U _PyPyThreadState_SetAsyncExc +-U _PyPyThreadState_Swap +-U _PyPyThread_ReInitTLS +-U _PyPyThread_acquire_lock +-U _PyPyThread_allocate_lock +-U _PyPyThread_create_key +-U _PyPyThread_delete_key +-U _PyPyThread_delete_key_value +-U _PyPyThread_exit_thread +-U _PyPyThread_free_lock +-U _PyPyThread_get_key_value +-U _PyPyThread_get_thread_ident +-U _PyPyThread_init_thread +-U _PyPyThread_release_lock +-U _PyPyThread_set_key_value +-U _PyPyThread_start_new_thread +-U _PyPyTime_Check +-U _PyPyTime_CheckExact +-U _PyPyTraceBack_Check +-U _PyPyTraceBack_Here +-U _PyPyTraceBack_Print +-U _PyPyTraceBack_Type +-U _PyPyTraceMalloc_Track +-U _PyPyTraceMalloc_Untrack +-U _PyPyTuple_GetItem +-U _PyPyTuple_GetSlice +-U _PyPyTuple_New +-U _PyPyTuple_Pack +-U _PyPyTuple_SetItem +-U _PyPyTuple_Size +-U _PyPyTuple_Type +-U _PyPyType_FromModuleAndSpec +-U _PyPyType_FromSpec +-U _PyPyType_FromSpecWithBases +-U _PyPyType_GenericAlloc +-U _PyPyType_GenericNew +-U _PyPyType_GetModule +-U _PyPyType_GetModuleState +-U _PyPyType_GetSlot +-U _PyPyType_IsSubtype +-U _PyPyType_Modified +-U _PyPyType_Ready +-U _PyPyType_Type +-U _PyPyUnicode_Append +-U _PyPyUnicode_AppendAndDel +-U _PyPyUnicode_AsASCIIString +-U _PyPyUnicode_AsEncodedObject +-U _PyPyUnicode_AsEncodedString +-U _PyPyUnicode_AsLatin1String +-U _PyPyUnicode_AsUCS4 +-U _PyPyUnicode_AsUCS4Copy +-U _PyPyUnicode_AsUTF16String +-U _PyPyUnicode_AsUTF32String +-U _PyPyUnicode_AsUTF8 +-U _PyPyUnicode_AsUTF8AndSize +-U _PyPyUnicode_AsUTF8String +-U _PyPyUnicode_AsUnicode +-U _PyPyUnicode_AsUnicodeAndSize +-U _PyPyUnicode_AsUnicodeEscapeString +-U _PyPyUnicode_AsWideChar +-U _PyPyUnicode_AsWideCharString +-U _PyPyUnicode_Check +-U _PyPyUnicode_CheckExact +-U _PyPyUnicode_Compare +-U _PyPyUnicode_CompareWithASCIIString +-U _PyPyUnicode_Concat +-U _PyPyUnicode_Contains +-U _PyPyUnicode_Count +-U _PyPyUnicode_Decode +-U _PyPyUnicode_DecodeASCII +-U _PyPyUnicode_DecodeFSDefault +-U _PyPyUnicode_DecodeFSDefaultAndSize +-U _PyPyUnicode_DecodeLatin1 +-U _PyPyUnicode_DecodeLocale +-U _PyPyUnicode_DecodeLocaleAndSize +-U _PyPyUnicode_DecodeUTF16 +-U _PyPyUnicode_DecodeUTF32 +-U _PyPyUnicode_DecodeUTF8 +-U _PyPyUnicode_EncodeASCII +-U _PyPyUnicode_EncodeDecimal +-U _PyPyUnicode_EncodeFSDefault +-U _PyPyUnicode_EncodeLatin1 +-U _PyPyUnicode_EncodeLocale +-U _PyPyUnicode_EncodeUTF8 +-U _PyPyUnicode_FSConverter +-U _PyPyUnicode_FSDecoder +-U _PyPyUnicode_Find +-U _PyPyUnicode_FindChar +-U _PyPyUnicode_Format +-U _PyPyUnicode_FromEncodedObject +-U _PyPyUnicode_FromFormat +-U _PyPyUnicode_FromFormatV +-U _PyPyUnicode_FromKindAndData +-U _PyPyUnicode_FromObject +-U _PyPyUnicode_FromOrdinal +-U _PyPyUnicode_FromString +-U _PyPyUnicode_FromStringAndSize +-U _PyPyUnicode_FromUnicode +-U _PyPyUnicode_FromWideChar +-U _PyPyUnicode_GetDefaultEncoding +-U _PyPyUnicode_GetLength +-U _PyPyUnicode_GetMax +-U _PyPyUnicode_GetSize +-U _PyPyUnicode_InternFromString +-U _PyPyUnicode_InternInPlace +-U _PyPyUnicode_Join +-U _PyPyUnicode_New +-U _PyPyUnicode_ReadChar +-U _PyPyUnicode_Replace +-U _PyPyUnicode_Resize +-U _PyPyUnicode_Split +-U _PyPyUnicode_Splitlines +-U _PyPyUnicode_Substring +-U _PyPyUnicode_Tailmatch +-U _PyPyUnicode_TransformDecimalToASCII +-U _PyPyUnicode_Type +-U _PyPyUnicode_WriteChar +-U _PyPyVectorcall_Call +-U _PyPyWeakref_Check +-U _PyPyWeakref_CheckProxy +-U _PyPyWeakref_CheckRef +-U _PyPyWeakref_CheckRefExact +-U _PyPyWeakref_GET_OBJECT +-U _PyPyWeakref_GetObject +-U _PyPyWeakref_LockObject +-U _PyPyWeakref_NewProxy +-U _PyPyWeakref_NewRef +-U _PyPyWrapperDescr_Type +-U _PyPy_AddPendingCall +-U _PyPy_AtExit +-U _PyPy_BuildValue +-U _PyPy_BytesWarningFlag +-U _PyPy_CompileStringFlags +-U _PyPy_DebugFlag +-U _PyPy_DecRef +-U _PyPy_DontWriteBytecodeFlag +-U _PyPy_EnterRecursiveCall +-U _PyPy_FatalError +-U _PyPy_FindMethod +-U _PyPy_FrozenFlag +-U _PyPy_GenericAlias +-U _PyPy_GetProgramName +-U _PyPy_GetRecursionLimit +-U _PyPy_GetVersion +-U _PyPy_HashRandomizationFlag +-U _PyPy_IgnoreEnvironmentFlag +-U _PyPy_IncRef +-U _PyPy_InspectFlag +-U _PyPy_InteractiveFlag +-U _PyPy_IsInitialized +-U _PyPy_IsolatedFlag +-U _PyPy_LeaveRecursiveCall +-U _PyPy_MakePendingCalls +-U _PyPy_NoSiteFlag +-U _PyPy_NoUserSiteDirectory +-U _PyPy_OptimizeFlag +-U _PyPy_QuietFlag +-U _PyPy_ReprEnter +-U _PyPy_ReprLeave +-U _PyPy_SetRecursionLimit +-U _PyPy_UNICODE_COPY +-U _PyPy_UNICODE_ISALNUM +-U _PyPy_UNICODE_ISALPHA +-U _PyPy_UNICODE_ISDECIMAL +-U _PyPy_UNICODE_ISDIGIT +-U _PyPy_UNICODE_ISLINEBREAK +-U _PyPy_UNICODE_ISLOWER +-U _PyPy_UNICODE_ISNUMERIC +-U _PyPy_UNICODE_ISSPACE +-U _PyPy_UNICODE_ISTITLE +-U _PyPy_UNICODE_ISUPPER +-U _PyPy_UNICODE_TODECIMAL +-U _PyPy_UNICODE_TODIGIT +-U _PyPy_UNICODE_TOLOWER +-U _PyPy_UNICODE_TONUMERIC +-U _PyPy_UNICODE_TOTITLE +-U _PyPy_UNICODE_TOUPPER +-U _PyPy_UnbufferedStdioFlag +-U _PyPy_VaBuildValue +-U _PyPy_VerboseFlag +-U _PySlice_AdjustIndices +-U _PyState_FindModule +-U _PyThread_tss_alloc +-U _PyThread_tss_create +-U _PyThread_tss_delete +-U _PyThread_tss_free +-U _PyThread_tss_get +-U _PyThread_tss_is_created +-U _PyThread_tss_set +-U _PyType_GetFlags +-U _Py_FileSystemDefaultEncoding +-U __PyArg_BadArgument +-U __PyArg_CheckPositional +-U __PyArg_NoKeywords +-U __PyArg_NoKwnames +-U __PyArg_NoPositional +-U __PyArg_ParseStack +-U __PyArg_ParseStackAndKeywords +-U __PyArg_ParseStackAndKeywords_SizeT +-U __PyArg_ParseStack_SizeT +-U __PyArg_ParseTupleAndKeywordsFast +-U __PyArg_ParseTupleAndKeywordsFast_SizeT +-U __PyArg_UnpackKeywords +-U __PyArg_UnpackStack +-U __PyArg_VaParseTupleAndKeywordsFast +-U __PyArg_VaParseTupleAndKeywordsFast_SizeT +-U __PyExc_ArithmeticError +-U __PyExc_AssertionError +-U __PyExc_AttributeError +-U __PyExc_BaseException +-U __PyExc_BlockingIOError +-U __PyExc_BrokenPipeError +-U __PyExc_BufferError +-U __PyExc_BytesWarning +-U __PyExc_ChildProcessError +-U __PyExc_ConnectionAbortedError +-U __PyExc_ConnectionError +-U __PyExc_ConnectionRefusedError +-U __PyExc_ConnectionResetError +-U __PyExc_DeprecationWarning +-U __PyExc_EOFError +-U __PyExc_Exception +-U __PyExc_FileExistsError +-U __PyExc_FileNotFoundError +-U __PyExc_FloatingPointError +-U __PyExc_FutureWarning +-U __PyExc_GeneratorExit +-U __PyExc_ImportError +-U __PyExc_ImportWarning +-U __PyExc_IndentationError +-U __PyExc_IndexError +-U __PyExc_InterruptedError +-U __PyExc_IsADirectoryError +-U __PyExc_KeyError +-U __PyExc_KeyboardInterrupt +-U __PyExc_LookupError +-U __PyExc_MemoryError +-U __PyExc_ModuleNotFoundError +-U __PyExc_NameError +-U __PyExc_NotADirectoryError +-U __PyExc_NotImplementedError +-U __PyExc_OSError +-U __PyExc_OverflowError +-U __PyExc_PendingDeprecationWarning +-U __PyExc_PermissionError +-U __PyExc_ProcessLookupError +-U __PyExc_RecursionError +-U __PyExc_ReferenceError +-U __PyExc_ResourceWarning +-U __PyExc_RuntimeError +-U __PyExc_RuntimeWarning +-U __PyExc_StopAsyncIteration +-U __PyExc_StopIteration +-U __PyExc_SyntaxError +-U __PyExc_SyntaxWarning +-U __PyExc_SystemError +-U __PyExc_SystemExit +-U __PyExc_TabError +-U __PyExc_TimeoutError +-U __PyExc_TypeError +-U __PyExc_UnboundLocalError +-U __PyExc_UnicodeDecodeError +-U __PyExc_UnicodeEncodeError +-U __PyExc_UnicodeError +-U __PyExc_UnicodeTranslateError +-U __PyExc_UnicodeWarning +-U __PyExc_UserWarning +-U __PyExc_ValueError +-U __PyExc_Warning +-U __PyExc_ZeroDivisionError +-U __PyLong_AsTime_t +-U __PyLong_FromTime_t +-U __PyPyArg_ParseTupleAndKeywords_SizeT +-U __PyPyArg_ParseTuple_SizeT +-U __PyPyArg_Parse_SizeT +-U __PyPyArg_VaParseTupleAndKeywords_SizeT +-U __PyPyArg_VaParse_SizeT +-U __PyPyBytes_Eq +-U __PyPyBytes_Join +-U __PyPyBytes_Resize +-U __PyPyComplex_AsCComplex +-U __PyPyComplex_FromCComplex +-U __PyPyDateTime_FromDateAndTime +-U __PyPyDateTime_FromDateAndTimeAndFold +-U __PyPyDateTime_FromTimestamp +-U __PyPyDateTime_Import +-U __PyPyDate_FromDate +-U __PyPyDate_FromTimestamp +-U __PyPyDelta_FromDelta +-U __PyPyDict_GetItemStringWithError +-U __PyPyDict_HasOnlyStringKeys +-U __PyPyErr_FormatFromCause +-U __PyPyErr_WriteUnraisableMsg +-U __PyPyEval_SliceIndex +-U __PyPyFloat_Unpack4 +-U __PyPyFloat_Unpack8 +-U __PyPyImport_AcquireLock +-U __PyPyImport_ReleaseLock +-U __PyPyList_Extend +-U __PyPyLong_AsByteArrayO +-U __PyPyLong_FromByteArray +-U __PyPyLong_NumBits +-U __PyPyLong_Sign +-U __PyPyNamespace_New +-U __PyPyNone_Type +-U __PyPyNotImplemented_Type +-U __PyPyObject_CallFunction_SizeT +-U __PyPyObject_CallMethod_SizeT +-U __PyPyObject_FastCall +-U __PyPyObject_GC_Malloc +-U __PyPyObject_GC_New +-U __PyPyObject_GC_NewVar +-U __PyPyObject_GetDictPtr +-U __PyPyObject_New +-U __PyPyObject_NewVar +-U __PyPyObject_Vectorcall +-U __PyPyPyGC_AddMemoryPressure +-U __PyPyPy_Free +-U __PyPyPy_Malloc +-U __PyPySet_Next +-U __PyPySet_NextEntry +-U __PyPyThreadState_UncheckedGet +-U __PyPyTimeZone_FromTimeZone +-U __PyPyTime_FromTime +-U __PyPyTime_FromTimeAndFold +-U __PyPyTuple_Resize +-U __PyPyType_Lookup +-U __PyPyUnicode_EQ +-U __PyPyUnicode_EqualToASCIIString +-U __PyPyUnicode_Ready +-U __PyPy_BuildValue_SizeT +-U __PyPy_Dealloc +-U __PyPy_EllipsisObject +-U __PyPy_FalseStruct +-U __PyPy_HashDouble +-U __PyPy_HashPointer +-U __PyPy_IsFinalizing +-U __PyPy_NoneStruct +-U __PyPy_NotImplementedStruct +-U __PyPy_PackageContext +-U __PyPy_RestoreSignals +-U __PyPy_TrueStruct +-U __PyPy_VaBuildValue_SizeT +-U __PyPy_get_PyOS_InputHook +-U __PyPy_get_capsule_type +-U __PyPy_object_dealloc +-U __PyPy_setfilesystemdefaultencoding +-U __PyPy_strhex +-U __PyPy_strhex_bytes +-U __PyPy_subtype_dealloc +-U __PyPy_tuple_dealloc +-U __PyPy_tuple_new +-U __PyTime_AsMicroseconds +-U __PyTime_AsMilliseconds +-U __PyTime_AsNanosecondsObject +-U __PyTime_AsSecondsDouble +-U __PyTime_AsTimeval +-U __PyTime_AsTimevalTime_t +-U __PyTime_AsTimeval_noraise +-U __PyTime_FromMillisecondsObject +-U __PyTime_FromNanoseconds +-U __PyTime_FromNanosecondsObject +-U __PyTime_FromSeconds +-U __PyTime_FromSecondsObject +-U __PyTime_GetMonotonicClock +-U __PyTime_GetMonotonicClockWithInfo +-U __PyTime_GetSystemClock +-U __PyTime_GetSystemClockWithInfo +-U __PyTime_Init +-U __PyTime_ObjectToTime_t +-U __PyTime_ObjectToTimespec +-U __PyTime_ObjectToTimeval +-U __PyTime_gmtime +-U __PyTime_localtime +-U __PyType_Name diff --git a/RemoteInput/Thirdparty/nanobind/cmake/nanobind-config.cmake b/RemoteInput/Thirdparty/nanobind/cmake/nanobind-config.cmake new file mode 100644 index 0000000..3ef847e --- /dev/null +++ b/RemoteInput/Thirdparty/nanobind/cmake/nanobind-config.cmake @@ -0,0 +1,472 @@ +include_guard(GLOBAL) + +if (NOT TARGET Python::Module) + message(FATAL_ERROR "You must invoke 'find_package(Python COMPONENTS Interpreter Development REQUIRED)' prior to including nanobind.") +endif() + +# Determine the right suffix for ordinary and stable ABI extensions. + +# We always need to know the extension +if(WIN32) + set(NB_SUFFIX_EXT ".pyd") +else() + set(NB_SUFFIX_EXT "${CMAKE_SHARED_MODULE_SUFFIX}") +endif() + +# Check if FindPython/scikit-build-core defined a SOABI/SOSABI variable +if(DEFINED SKBUILD_SOABI) + set(NB_SOABI "${SKBUILD_SOABI}") +elseif(DEFINED Python_SOABI) + set(NB_SOABI "${Python_SOABI}") +endif() + +if(DEFINED SKBUILD_SOSABI) + set(NB_SOSABI "${SKBUILD_SOSABI}") +elseif(DEFINED Python_SOSABI) + set(NB_SOSABI "${Python_SOSABI}") +endif() + +# PyPy sets an invalid SOABI (platform missing), causing older FindPythons to +# report an incorrect value. Only use it if it looks correct (X-X-X form). +if(DEFINED NB_SOABI AND "${NB_SOABI}" MATCHES ".+-.+-.+") + set(NB_SUFFIX ".${NB_SOABI}${NB_SUFFIX_EXT}") +endif() + +if(DEFINED NB_SOSABI) + if(NB_SOSABI STREQUAL "") + set(NB_SUFFIX_S "${NB_SUFFIX_EXT}") + else() + set(NB_SUFFIX_S ".${NB_SOSABI}${NB_SUFFIX_EXT}") + endif() +endif() + +# Extract Python version and extensions (e.g. free-threaded build) +string(REGEX REPLACE "[^-]*-([^-]*)-.*" "\\1" NB_ABI "${NB_SOABI}") + +# If either suffix is missing, call Python to compute it +if(NOT DEFINED NB_SUFFIX OR NOT DEFINED NB_SUFFIX_S) + # Query Python directly to get the right suffix. + execute_process( + COMMAND "${Python_EXECUTABLE}" "-c" + "import sysconfig; print(sysconfig.get_config_var('EXT_SUFFIX'))" + RESULT_VARIABLE NB_SUFFIX_RET + OUTPUT_VARIABLE EXT_SUFFIX + OUTPUT_STRIP_TRAILING_WHITESPACE) + + if(NB_SUFFIX_RET AND NOT NB_SUFFIX_RET EQUAL 0) + message(FATAL_ERROR "nanobind: Python sysconfig query to " + "find 'EXT_SUFFIX' property failed!") + endif() + + if(NOT DEFINED NB_SUFFIX) + set(NB_SUFFIX "${EXT_SUFFIX}") + endif() + + if(NOT DEFINED NB_SUFFIX_S) + get_filename_component(NB_SUFFIX_EXT "${EXT_SUFFIX}" LAST_EXT) + if(WIN32) + set(NB_SUFFIX_S "${NB_SUFFIX_EXT}") + else() + set(NB_SUFFIX_S ".abi3${NB_SUFFIX_EXT}") + endif() + endif() +endif() + +# Stash these for later use +set(NB_SUFFIX ${NB_SUFFIX} CACHE INTERNAL "") +set(NB_SUFFIX_S ${NB_SUFFIX_S} CACHE INTERNAL "") +set(NB_ABI ${NB_ABI} CACHE INTERNAL "") + +get_filename_component(NB_DIR "${CMAKE_CURRENT_LIST_FILE}" PATH) +get_filename_component(NB_DIR "${NB_DIR}" PATH) + +set(NB_DIR ${NB_DIR} CACHE INTERNAL "") +set(NB_OPT $,$> CACHE INTERNAL "") +set(NB_OPT_SIZE $,$,$> CACHE INTERNAL "") + +# --------------------------------------------------------------------------- +# Helper function to handle undefined CPython API symbols on macOS +# --------------------------------------------------------------------------- + +function (nanobind_link_options name) + if (APPLE) + if (Python_INTERPRETER_ID STREQUAL "PyPy") + set(NB_LINKER_RESPONSE_FILE darwin-ld-pypy.sym) + else() + set(NB_LINKER_RESPONSE_FILE darwin-ld-cpython.sym) + endif() + target_link_options(${name} PRIVATE "-Wl,@${NB_DIR}/cmake/${NB_LINKER_RESPONSE_FILE}") + endif() +endfunction() + +# --------------------------------------------------------------------------- +# Create shared/static library targets for nanobind's non-templated core +# --------------------------------------------------------------------------- + +function (nanobind_build_library TARGET_NAME) + if (TARGET ${TARGET_NAME}) + return() + endif() + + if (TARGET_NAME MATCHES "-static") + set (TARGET_TYPE STATIC) + else() + set (TARGET_TYPE SHARED) + endif() + + add_library(${TARGET_NAME} ${TARGET_TYPE} + EXCLUDE_FROM_ALL + ${NB_DIR}/include/nanobind/make_iterator.h + ${NB_DIR}/include/nanobind/nanobind.h + ${NB_DIR}/include/nanobind/nb_accessor.h + ${NB_DIR}/include/nanobind/nb_attr.h + ${NB_DIR}/include/nanobind/nb_call.h + ${NB_DIR}/include/nanobind/nb_cast.h + ${NB_DIR}/include/nanobind/nb_class.h + ${NB_DIR}/include/nanobind/nb_defs.h + ${NB_DIR}/include/nanobind/nb_descr.h + ${NB_DIR}/include/nanobind/nb_enums.h + ${NB_DIR}/include/nanobind/nb_error.h + ${NB_DIR}/include/nanobind/nb_func.h + ${NB_DIR}/include/nanobind/nb_lib.h + ${NB_DIR}/include/nanobind/nb_misc.h + ${NB_DIR}/include/nanobind/nb_python.h + ${NB_DIR}/include/nanobind/nb_traits.h + ${NB_DIR}/include/nanobind/nb_tuple.h + ${NB_DIR}/include/nanobind/nb_types.h + ${NB_DIR}/include/nanobind/ndarray.h + ${NB_DIR}/include/nanobind/trampoline.h + ${NB_DIR}/include/nanobind/typing.h + ${NB_DIR}/include/nanobind/operators.h + ${NB_DIR}/include/nanobind/stl/array.h + ${NB_DIR}/include/nanobind/stl/bind_map.h + ${NB_DIR}/include/nanobind/stl/bind_vector.h + ${NB_DIR}/include/nanobind/stl/detail + ${NB_DIR}/include/nanobind/stl/detail/nb_array.h + ${NB_DIR}/include/nanobind/stl/detail/nb_dict.h + ${NB_DIR}/include/nanobind/stl/detail/nb_list.h + ${NB_DIR}/include/nanobind/stl/detail/nb_set.h + ${NB_DIR}/include/nanobind/stl/detail/traits.h + ${NB_DIR}/include/nanobind/stl/filesystem.h + ${NB_DIR}/include/nanobind/stl/function.h + ${NB_DIR}/include/nanobind/stl/list.h + ${NB_DIR}/include/nanobind/stl/map.h + ${NB_DIR}/include/nanobind/stl/optional.h + ${NB_DIR}/include/nanobind/stl/pair.h + ${NB_DIR}/include/nanobind/stl/set.h + ${NB_DIR}/include/nanobind/stl/shared_ptr.h + ${NB_DIR}/include/nanobind/stl/string.h + ${NB_DIR}/include/nanobind/stl/string_view.h + ${NB_DIR}/include/nanobind/stl/tuple.h + ${NB_DIR}/include/nanobind/stl/unique_ptr.h + ${NB_DIR}/include/nanobind/stl/unordered_map.h + ${NB_DIR}/include/nanobind/stl/unordered_set.h + ${NB_DIR}/include/nanobind/stl/variant.h + ${NB_DIR}/include/nanobind/stl/vector.h + ${NB_DIR}/include/nanobind/eigen/dense.h + ${NB_DIR}/include/nanobind/eigen/sparse.h + + ${NB_DIR}/src/buffer.h + ${NB_DIR}/src/hash.h + ${NB_DIR}/src/nb_internals.h + ${NB_DIR}/src/nb_internals.cpp + ${NB_DIR}/src/nb_func.cpp + ${NB_DIR}/src/nb_type.cpp + ${NB_DIR}/src/nb_enum.cpp + ${NB_DIR}/src/nb_ndarray.cpp + ${NB_DIR}/src/nb_static_property.cpp + ${NB_DIR}/src/common.cpp + ${NB_DIR}/src/error.cpp + ${NB_DIR}/src/trampoline.cpp + ${NB_DIR}/src/implicit.cpp + ) + + if (TARGET_TYPE STREQUAL "SHARED") + nanobind_link_options(${TARGET_NAME}) + target_compile_definitions(${TARGET_NAME} PRIVATE -DNB_BUILD) + target_compile_definitions(${TARGET_NAME} PUBLIC -DNB_SHARED) + nanobind_lto(${TARGET_NAME}) + + nanobind_strip(${TARGET_NAME}) + elseif(NOT WIN32 AND NOT APPLE) + target_compile_options(${TARGET_NAME} PUBLIC $<${NB_OPT_SIZE}:-ffunction-sections -fdata-sections>) + target_link_options(${TARGET_NAME} PUBLIC $<${NB_OPT_SIZE}:-Wl,--gc-sections>) + endif() + + set_target_properties(${TARGET_NAME} PROPERTIES + POSITION_INDEPENDENT_CODE ON) + + if (MSVC) + # Do not complain about vsnprintf + target_compile_definitions(${TARGET_NAME} PRIVATE -D_CRT_SECURE_NO_WARNINGS) + else() + # Generally needed to handle type punning in Python code + target_compile_options(${TARGET_NAME} PRIVATE -fno-strict-aliasing) + endif() + + if (WIN32) + if (${TARGET_NAME} MATCHES "-abi3") + target_link_libraries(${TARGET_NAME} PUBLIC Python::SABIModule) + else() + target_link_libraries(${TARGET_NAME} PUBLIC Python::Module) + endif() + endif() + + if (TARGET_NAME MATCHES "-ft") + target_compile_definitions(${TARGET_NAME} PUBLIC NB_FREE_THREADED) + endif() + + # Nanobind performs many assertion checks -- detailed error messages aren't + # included in Release/MinSizeRel modes + target_compile_definitions(${TARGET_NAME} PRIVATE + $<${NB_OPT_SIZE}:NB_COMPACT_ASSERTIONS>) + + # If nanobind was installed without submodule dependencies, then the + # dependencies directory won't exist and we need to find them. + # However, if the directory _does_ exist, then the user is free to choose + # whether nanobind uses them (based on `NB_USE_SUBMODULE_DEPS`), with a + # preference to choose them if `NB_USE_SUBMODULE_DEPS` is not defined + if (NOT IS_DIRECTORY ${NB_DIR}/ext/robin_map/include OR + (DEFINED NB_USE_SUBMODULE_DEPS AND NOT NB_USE_SUBMODULE_DEPS)) + include(CMakeFindDependencyMacro) + find_dependency(tsl-robin-map) + target_link_libraries(${TARGET_NAME} PRIVATE tsl::robin_map) + else() + target_include_directories(${TARGET_NAME} PRIVATE + ${NB_DIR}/ext/robin_map/include) + endif() + + target_include_directories(${TARGET_NAME} PUBLIC + ${Python_INCLUDE_DIRS} + ${NB_DIR}/include) + + target_compile_features(${TARGET_NAME} PUBLIC cxx_std_17) + nanobind_set_visibility(${TARGET_NAME}) +endfunction() + +# --------------------------------------------------------------------------- +# Define a convenience function for creating nanobind targets +# --------------------------------------------------------------------------- + +function(nanobind_opt_size name) + if (MSVC) + target_compile_options(${name} PRIVATE $<${NB_OPT_SIZE}:$<$:/Os>>) + else() + target_compile_options(${name} PRIVATE $<${NB_OPT_SIZE}:$<$:-Os>>) + endif() +endfunction() + +function(nanobind_disable_stack_protector name) + if (NOT MSVC) + # The stack protector affects binding size negatively (+8% on Linux in my + # benchmarks). Protecting from stack smashing in a Python VM seems in any + # case futile, so let's get rid of it by default in optimized modes. + target_compile_options(${name} PRIVATE $<${NB_OPT}:-fno-stack-protector>) + endif() +endfunction() + +function(nanobind_extension name) + set_target_properties(${name} PROPERTIES PREFIX "" SUFFIX "${NB_SUFFIX}") +endfunction() + +function(nanobind_extension_abi3 name) + set_target_properties(${name} PROPERTIES PREFIX "" SUFFIX "${NB_SUFFIX_S}") +endfunction() + +function (nanobind_lto name) + set_target_properties(${name} PROPERTIES + INTERPROCEDURAL_OPTIMIZATION_RELEASE ON + INTERPROCEDURAL_OPTIMIZATION_MINSIZEREL ON) +endfunction() + +function (nanobind_compile_options name) + if (MSVC) + target_compile_options(${name} PRIVATE $<$:/bigobj /MP>) + endif() +endfunction() + +function (nanobind_strip name) + if (APPLE) + target_link_options(${name} PRIVATE $<${NB_OPT}:-Wl,-dead_strip -Wl,-x -Wl,-S>) + elseif (NOT WIN32) + target_link_options(${name} PRIVATE $<${NB_OPT}:-Wl,-s>) + endif() +endfunction() + +function (nanobind_set_visibility name) + set_target_properties(${name} PROPERTIES CXX_VISIBILITY_PRESET hidden) +endfunction() + +function (nanobind_musl_static_libcpp name) + if ("$ENV{AUDITWHEEL_PLAT}" MATCHES "musllinux") + target_link_options(${name} PRIVATE -static-libstdc++ -static-libgcc) + endif() +endfunction() + +function(nanobind_add_module name) + cmake_parse_arguments(PARSE_ARGV 1 ARG + "STABLE_ABI;FREE_THREADED;NB_STATIC;NB_SHARED;PROTECT_STACK;LTO;NOMINSIZE;NOSTRIP;MUSL_DYNAMIC_LIBCPP" + "NB_DOMAIN" "") + + add_library(${name} MODULE ${ARG_UNPARSED_ARGUMENTS}) + + nanobind_compile_options(${name}) + nanobind_link_options(${name}) + set_target_properties(${name} PROPERTIES LINKER_LANGUAGE CXX) + + if (ARG_NB_SHARED AND ARG_NB_STATIC) + message(FATAL_ERROR "NB_SHARED and NB_STATIC cannot be specified at the same time!") + elseif (NOT ARG_NB_SHARED) + set(ARG_NB_STATIC TRUE) + endif() + + # Stable ABI builds require CPython >= 3.12 and Python::SABIModule + if ((Python_VERSION VERSION_LESS 3.12) OR + (NOT Python_INTERPRETER_ID STREQUAL "Python") OR + (NOT TARGET Python::SABIModule)) + set(ARG_STABLE_ABI FALSE) + endif() + + if (NB_ABI MATCHES "t") + set(ARG_STABLE_ABI FALSE) + else(ARG_STABLE_ABI) + set(ARG_FREE_THREADED FALSE) + endif() + + set(libname "nanobind") + if (ARG_NB_STATIC) + set(libname "${libname}-static") + endif() + + if (ARG_STABLE_ABI) + set(libname "${libname}-abi3") + endif() + + if (ARG_FREE_THREADED) + set(libname "${libname}-ft") + endif() + + if (ARG_NB_DOMAIN AND ARG_NB_SHARED) + set(libname ${libname}-${ARG_NB_DOMAIN}) + endif() + + nanobind_build_library(${libname}) + + if (ARG_NB_DOMAIN) + target_compile_definitions(${name} PRIVATE NB_DOMAIN=${ARG_NB_DOMAIN}) + endif() + + if (ARG_STABLE_ABI) + target_compile_definitions(${libname} PUBLIC -DPy_LIMITED_API=0x030C0000) + nanobind_extension_abi3(${name}) + else() + nanobind_extension(${name}) + endif() + + if (ARG_FREE_THREADED) + target_compile_definitions(${name} PRIVATE NB_FREE_THREADED) + endif() + + target_link_libraries(${name} PRIVATE ${libname}) + + if (NOT ARG_PROTECT_STACK) + nanobind_disable_stack_protector(${name}) + endif() + + if (NOT ARG_NOMINSIZE) + nanobind_opt_size(${name}) + endif() + + if (NOT ARG_NOSTRIP) + nanobind_strip(${name}) + endif() + + if (ARG_LTO) + nanobind_lto(${name}) + endif() + + if (ARG_NB_STATIC AND NOT ARG_MUSL_DYNAMIC_LIBCPP) + nanobind_musl_static_libcpp(${name}) + endif() + + nanobind_set_visibility(${name}) +endfunction() + +function (nanobind_add_stub name) + cmake_parse_arguments(PARSE_ARGV 1 ARG "VERBOSE;INCLUDE_PRIVATE;EXCLUDE_DOCSTRINGS;INSTALL_TIME;EXCLUDE_FROM_ALL" "MODULE;OUTPUT;MARKER_FILE;COMPONENT;PATTERN_FILE" "PYTHON_PATH;DEPENDS") + + if (EXISTS ${NB_DIR}/src/stubgen.py) + set(NB_STUBGEN "${NB_DIR}/src/stubgen.py") + elseif (EXISTS ${NB_DIR}/stubgen.py) + set(NB_STUBGEN "${NB_DIR}/stubgen.py") + else() + message(FATAL_ERROR "nanobind_add_stub(): could not locate 'stubgen.py'!") + endif() + + if (NOT ARG_VERBOSE) + list(APPEND NB_STUBGEN_ARGS -q) + else() + set(NB_STUBGEN_EXTRA USES_TERMINAL) + endif() + + if (ARG_INCLUDE_PRIVATE) + list(APPEND NB_STUBGEN_ARGS -P) + endif() + + if (ARG_EXCLUDE_DOCSTRINGS) + list(APPEND NB_STUBGEN_ARGS -D) + endif() + + foreach (TMP IN LISTS ARG_PYTHON_PATH) + list(APPEND NB_STUBGEN_ARGS -i "${TMP}") + endforeach() + + if (ARG_PATTERN_FILE) + list(APPEND NB_STUBGEN_ARGS -p "${ARG_PATTERN_FILE}") + endif() + + if (ARG_MARKER_FILE) + list(APPEND NB_STUBGEN_ARGS -M "${ARG_MARKER_FILE}") + list(APPEND NB_STUBGEN_OUTPUTS "${ARG_MARKER_FILE}") + endif() + + if (NOT ARG_MODULE) + message(FATAL_ERROR "nanobind_add_stub(): a 'MODULE' argument must be specified!") + else() + list(APPEND NB_STUBGEN_ARGS -m "${ARG_MODULE}") + endif() + + if (NOT ARG_OUTPUT) + message(FATAL_ERROR "nanobind_add_stub(): an 'OUTPUT' argument must be specified!") + else() + list(APPEND NB_STUBGEN_ARGS -o "${ARG_OUTPUT}") + list(APPEND NB_STUBGEN_OUTPUTS "${ARG_OUTPUT}") + endif() + + file(TO_CMAKE_PATH ${Python_EXECUTABLE} NB_Python_EXECUTABLE) + + set(NB_STUBGEN_CMD "${NB_Python_EXECUTABLE}" "${NB_STUBGEN}" ${NB_STUBGEN_ARGS}) + + if (NOT ARG_INSTALL_TIME) + add_custom_command( + OUTPUT ${NB_STUBGEN_OUTPUTS} + COMMAND ${NB_STUBGEN_CMD} + WORKING_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}" + DEPENDS ${ARG_DEPENDS} "${NB_STUBGEN}" "${ARG_PATTERN_FILE}" + ${NB_STUBGEN_EXTRA} + ) + add_custom_target(${name} ALL DEPENDS ${NB_STUBGEN_OUTPUTS}) + else() + set(NB_STUBGEN_EXTRA "") + if (ARG_COMPONENT) + list(APPEND NB_STUBGEN_EXTRA COMPONENT ${ARG_COMPONENT}) + endif() + if (ARG_EXCLUDE_FROM_ALL) + list(APPEND NB_STUBGEN_EXTRA EXCLUDE_FROM_ALL) + endif() + # \${CMAKE_INSTALL_PREFIX} has same effect as $ + # This is for compatibility with CMake < 3.27. + # For more info: https://github.com/wjakob/nanobind/issues/420#issuecomment-1971353531 + install(CODE "set(CMD \"${NB_STUBGEN_CMD}\")\nexecute_process(\n COMMAND \$\{CMD\}\n WORKING_DIRECTORY \"\${CMAKE_INSTALL_PREFIX}\"\n)" ${NB_STUBGEN_EXTRA}) + endif() +endfunction() diff --git a/RemoteInput/Thirdparty/nanobind/ext/robin_map/CMakeLists.txt b/RemoteInput/Thirdparty/nanobind/ext/robin_map/CMakeLists.txt new file mode 100644 index 0000000..fab865a --- /dev/null +++ b/RemoteInput/Thirdparty/nanobind/ext/robin_map/CMakeLists.txt @@ -0,0 +1,86 @@ +cmake_minimum_required(VERSION 3.5) + +project(tsl-robin-map VERSION 1.3.0 LANGUAGES CXX) + +include(GNUInstallDirs) + + +add_library(robin_map INTERFACE) +# Use tsl::robin_map as target, more consistent with other libraries conventions (Boost, Qt, ...) +add_library(tsl::robin_map ALIAS robin_map) + +target_include_directories(robin_map INTERFACE + "$" + "$") + +list(APPEND headers "${CMAKE_CURRENT_SOURCE_DIR}/include/tsl/robin_growth_policy.h" + "${CMAKE_CURRENT_SOURCE_DIR}/include/tsl/robin_hash.h" + "${CMAKE_CURRENT_SOURCE_DIR}/include/tsl/robin_map.h" + "${CMAKE_CURRENT_SOURCE_DIR}/include/tsl/robin_set.h") +target_sources(robin_map INTERFACE "$") + +if(MSVC) + target_sources(robin_map INTERFACE + "$" + "$") +endif() + + + + +set(IS_SUBPROJECT TRUE) +if(CMAKE_SOURCE_DIR STREQUAL PROJECT_SOURCE_DIR) + set(IS_SUBPROJECT FALSE) +endif() + +# Installation +if(NOT IS_SUBPROJECT) + include(CMakePackageConfigHelpers) + + ## Install include directory and potential natvis file + install(DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/include/tsl" + DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}") + + if(MSVC) + install(FILES "${CMAKE_CURRENT_SOURCE_DIR}/tsl-robin-map.natvis" + DESTINATION "${CMAKE_INSTALL_DATAROOTDIR}") + endif() + + + + ## Create and install tsl-robin-mapConfig.cmake + configure_package_config_file("${CMAKE_CURRENT_SOURCE_DIR}/cmake/tsl-robin-mapConfig.cmake.in" + "${CMAKE_CURRENT_BINARY_DIR}/tsl-robin-mapConfig.cmake" + INSTALL_DESTINATION "${CMAKE_INSTALL_DATAROOTDIR}/cmake/tsl-robin-map") + + install(FILES "${CMAKE_CURRENT_BINARY_DIR}/tsl-robin-mapConfig.cmake" + DESTINATION "${CMAKE_INSTALL_DATAROOTDIR}/cmake/tsl-robin-map") + + + ## Create local tsl-robin-mapTargets.cmake + export(TARGETS robin_map NAMESPACE tsl:: FILE "${CMAKE_CURRENT_BINARY_DIR}/tsl-robin-mapTargets.cmake") + + ## Create and install global tsl-robin-mapTargets.cmake + install(TARGETS robin_map + EXPORT tsl-robin-mapTargets) + + install(EXPORT tsl-robin-mapTargets + NAMESPACE tsl:: + DESTINATION "${CMAKE_INSTALL_DATAROOTDIR}/cmake/tsl-robin-map") + + + + ## Create and install tsl-robin-mapConfigVersion.cmake + # tsl-robin-map is header-only and does not depend on the architecture. + # Remove CMAKE_SIZEOF_VOID_P from tsl-robin-mapConfigVersion.cmake so that a + # tsl-robin-mapConfig.cmake generated for a 64 bit target can be used for 32 bit + # targets and vice versa. + set(CMAKE_SIZEOF_VOID_P_BACKUP ${CMAKE_SIZEOF_VOID_P}) + unset(CMAKE_SIZEOF_VOID_P) + write_basic_package_version_file("${CMAKE_CURRENT_BINARY_DIR}/tsl-robin-mapConfigVersion.cmake" + COMPATIBILITY SameMajorVersion) + set(CMAKE_SIZEOF_VOID_P ${CMAKE_SIZEOF_VOID_P_BACKUP}) + + install(FILES "${CMAKE_CURRENT_BINARY_DIR}/tsl-robin-mapConfigVersion.cmake" + DESTINATION "${CMAKE_INSTALL_DATAROOTDIR}/cmake/tsl-robin-map") +endif() diff --git a/RemoteInput/Thirdparty/nanobind/ext/robin_map/LICENSE b/RemoteInput/Thirdparty/nanobind/ext/robin_map/LICENSE new file mode 100644 index 0000000..e9c5ae9 --- /dev/null +++ b/RemoteInput/Thirdparty/nanobind/ext/robin_map/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2017 Thibaut Goetghebuer-Planchon + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/RemoteInput/Thirdparty/nanobind/ext/robin_map/README.md b/RemoteInput/Thirdparty/nanobind/ext/robin_map/README.md new file mode 100644 index 0000000..09fc544 --- /dev/null +++ b/RemoteInput/Thirdparty/nanobind/ext/robin_map/README.md @@ -0,0 +1,521 @@ +[![CI](https://github.com/Tessil/robin-map/actions/workflows/ci.yml/badge.svg?branch=master)](https://github.com/Tessil/robin-map/actions/workflows/ci.yml) + +## A C++ implementation of a fast hash map and hash set using robin hood hashing + +The robin-map library is a C++ implementation of a fast hash map and hash set using open-addressing and linear robin hood hashing with backward shift deletion to resolve collisions. + +Four classes are provided: `tsl::robin_map`, `tsl::robin_set`, `tsl::robin_pg_map` and `tsl::robin_pg_set`. The first two are faster and use a power of two growth policy, the last two use a prime growth policy instead and are able to cope better with a poor hash function. Use the prime version if there is a chance of repeating patterns in the lower bits of your hash (e.g. you are storing pointers with an identity hash function). See [GrowthPolicy](#growth-policy) for details. + +A **benchmark** of `tsl::robin_map` against other hash maps may be found [here](https://tessil.github.io/2016/08/29/benchmark-hopscotch-map.html). This page also gives some advices on which hash table structure you should try for your use case (useful if you are a bit lost with the multiple hash tables implementations in the `tsl` namespace). + +### Key features + +- Header-only library, just add the [include](include/) directory to your include path and you are ready to go. If you use CMake, you can also use the `tsl::robin_map` exported target from the [CMakeLists.txt](CMakeLists.txt). +- Fast hash table, check the [benchmark](https://tessil.github.io/2016/08/29/benchmark-hopscotch-map.html) for some numbers. +- Support for move-only and non-default constructible key/value. +- Support for heterogeneous lookups allowing the usage of `find` with a type different than `Key` (e.g. if you have a map that uses `std::unique_ptr` as key, you can use a `foo*` or a `std::uintptr_t` as key parameter to `find` without constructing a `std::unique_ptr`, see [example](#heterogeneous-lookups)). +- No need to reserve any sentinel value from the keys. +- Possibility to store the hash value alongside the stored key-value for faster rehash and lookup if the hash or the key equal functions are expensive to compute. Note that hash may be stored even if not asked explicitly when the library can detect that it will have no impact on the size of the structure in memory due to alignment. See the [StoreHash](https://tessil.github.io/robin-map/classtsl_1_1robin__map.html#details) template parameter for details. +- If the hash is known before a lookup, it is possible to pass it as parameter to speed-up the lookup (see `precalculated_hash` parameter in [API](https://tessil.github.io/robin-map/classtsl_1_1robin__map.html#a35021b11aabb61820236692a54b3a0f8)). +- Support for efficient serialization and deserialization (see [example](#serialization) and the `serialize/deserialize` methods in the [API](https://tessil.github.io/robin-map/classtsl_1_1robin__map.html) for details). +- The library can be used with exceptions disabled (through `-fno-exceptions` option on Clang and GCC, without an `/EH` option on MSVC or simply by defining `TSL_NO_EXCEPTIONS`). `std::terminate` is used in replacement of the `throw` instruction when exceptions are disabled. +- API closely similar to `std::unordered_map` and `std::unordered_set`. + +### Differences compared to `std::unordered_map` + +`tsl::robin_map` tries to have an interface similar to `std::unordered_map`, but some differences exist. +- The **strong exception guarantee only holds** if the following statement is true `std::is_nothrow_swappable::value && std::is_nothrow_move_constructible::value` (where `value_type` is `Key` for `tsl::robin_set` and `std::pair` for `tsl::robin_map`). Otherwise if an exception is thrown during the swap or the move, the structure may end up in a undefined state. Note that per the standard, a `value_type` with a noexcept copy constructor and no move constructor also satisfies this condition and will thus guarantee the strong exception guarantee for the structure (see [API](https://tessil.github.io/robin-map/classtsl_1_1robin__map.html#details) for details). +- The type `Key`, and also `T` in case of map, must be swappable. They must also be copy and/or move constructible. +- Iterator invalidation doesn't behave in the same way, any operation modifying the hash table invalidate them (see [API](https://tessil.github.io/robin-map/classtsl_1_1robin__map.html#details) for details). +- References and pointers to keys or values in the map are invalidated in the same way as iterators to these keys-values. +- For iterators of `tsl::robin_map`, `operator*()` and `operator->()` return a reference and a pointer to `const std::pair` instead of `std::pair` making the value `T` not modifiable. To modify the value you have to call the `value()` method of the iterator to get a mutable reference. Example: +```c++ +tsl::robin_map map = {{1, 1}, {2, 1}, {3, 1}}; +for(auto it = map.begin(); it != map.end(); ++it) { + //it->second = 2; // Illegal + it.value() = 2; // Ok +} +``` +- No support for some buckets related methods (like `bucket_size`, `bucket`, ...). + +These differences also apply between `std::unordered_set` and `tsl::robin_set`. + +Thread-safety guarantees are the same as `std::unordered_map/set` (i.e. possible to have multiple readers with no writer). + +### Growth policy + +The library supports multiple growth policies through the `GrowthPolicy` template parameter. Three policies are provided by the library but you can easily implement your own if needed. + +* **[tsl::rh::power_of_two_growth_policy.](https://tessil.github.io/robin-map/classtsl_1_1rh_1_1power__of__two__growth__policy.html)** Default policy used by `tsl::robin_map/set`. This policy keeps the size of the bucket array of the hash table to a power of two. This constraint allows the policy to avoid the usage of the slow modulo operation to map a hash to a bucket, instead of hash % 2n, it uses hash & (2n - 1) (see [fast modulo](https://en.wikipedia.org/wiki/Modulo_operation#Performance_issues)). Fast but this may cause a lot of collisions with a poor hash function as the modulo with a power of two only masks the most significant bits in the end. +* **[tsl::rh::prime_growth_policy.](https://tessil.github.io/robin-map/classtsl_1_1rh_1_1prime__growth__policy.html)** Default policy used by `tsl::robin_pg_map/set`. The policy keeps the size of the bucket array of the hash table to a prime number. When mapping a hash to a bucket, using a prime number as modulo will result in a better distribution of the hash across the buckets even with a poor hash function. To allow the compiler to optimize the modulo operation, the policy use a lookup table with constant primes modulos (see [API](https://tessil.github.io/robin-map/classtsl_1_1rh_1_1prime__growth__policy.html#details) for details). Slower than `tsl::rh::power_of_two_growth_policy` but more secure. +* **[tsl::rh::mod_growth_policy.](https://tessil.github.io/robin-map/classtsl_1_1rh_1_1mod__growth__policy.html)** The policy grows the map by a customizable growth factor passed in parameter. It then just use the modulo operator to map a hash to a bucket. Slower but more flexible. + + +To implement your own policy, you have to implement the following interface. + +```c++ +struct custom_policy { + // Called on hash table construction and rehash, min_bucket_count_in_out is the minimum buckets + // that the hash table needs. The policy can change it to a higher number of buckets if needed + // and the hash table will use this value as bucket count. If 0 bucket is asked, then the value + // must stay at 0. + explicit custom_policy(std::size_t& min_bucket_count_in_out); + + // Return the bucket [0, bucket_count()) to which the hash belongs. + // If bucket_count() is 0, it must always return 0. + std::size_t bucket_for_hash(std::size_t hash) const noexcept; + + // Return the number of buckets that should be used on next growth + std::size_t next_bucket_count() const; + + // Maximum number of buckets supported by the policy + std::size_t max_bucket_count() const; + + // Reset the growth policy as if the policy was created with a bucket count of 0. + // After a clear, the policy must always return 0 when bucket_for_hash() is called. + void clear() noexcept; +} +``` + +### Installation + +To use robin-map, just add the [include](include/) directory to your include path. It is a **header-only** library. + +If you use CMake, you can also use the `tsl::robin_map` exported target from the [CMakeLists.txt](CMakeLists.txt) with `target_link_libraries`. +```cmake +# Example where the robin-map project is stored in a third-party directory +add_subdirectory(third-party/robin-map) +target_link_libraries(your_target PRIVATE tsl::robin_map) +``` + +If the project has been installed through `make install`, you can also use `find_package(tsl-robin-map REQUIRED)` instead of `add_subdirectory`. + +The library is available in [vcpkg](https://github.com/Microsoft/vcpkg/tree/master/ports/robin-map) and [conan](https://conan.io/center/tsl-robin-map). It's also present in [Debian](https://packages.debian.org/buster/robin-map-dev), [Ubuntu](https://packages.ubuntu.com/disco/robin-map-dev) and [Fedora](https://apps.fedoraproject.org/packages/robin-map-devel) package repositories. + +The code should work with any C++17 standard-compliant compiler. + +To run the tests you will need the Boost Test library and CMake. + +```bash +git clone https://github.com/Tessil/robin-map.git +cd robin-map/tests +mkdir build +cd build +cmake .. +cmake --build . +./tsl_robin_map_tests +``` + +### Usage + +The API can be found [here](https://tessil.github.io/robin-map/). + +All methods are not documented yet, but they replicate the behavior of the ones in `std::unordered_map` and `std::unordered_set`, except if specified otherwise. + + +### Example + +```c++ +#include +#include +#include +#include +#include + +int main() { + tsl::robin_map map = {{"a", 1}, {"b", 2}}; + map["c"] = 3; + map["d"] = 4; + + map.insert({"e", 5}); + map.erase("b"); + + for(auto it = map.begin(); it != map.end(); ++it) { + //it->second += 2; // Not valid. + it.value() += 2; + } + + // {d, 6} {a, 3} {e, 7} {c, 5} + for(const auto& key_value : map) { + std::cout << "{" << key_value.first << ", " << key_value.second << "}" << std::endl; + } + + + if(map.find("a") != map.end()) { + std::cout << "Found \"a\"." << std::endl; + } + + const std::size_t precalculated_hash = std::hash()("a"); + // If we already know the hash beforehand, we can pass it in parameter to speed-up lookups. + if(map.find("a", precalculated_hash) != map.end()) { + std::cout << "Found \"a\" with hash " << precalculated_hash << "." << std::endl; + } + + + /* + * Calculating the hash and comparing two std::string may be slow. + * We can store the hash of each std::string in the hash map to make + * the inserts and lookups faster by setting StoreHash to true. + */ + tsl::robin_map, + std::equal_to, + std::allocator>, + true> map2; + + map2["a"] = 1; + map2["b"] = 2; + + // {a, 1} {b, 2} + for(const auto& key_value : map2) { + std::cout << "{" << key_value.first << ", " << key_value.second << "}" << std::endl; + } + + + + + tsl::robin_set set; + set.insert({1, 9, 0}); + set.insert({2, -1, 9}); + + // {0} {1} {2} {9} {-1} + for(const auto& key : set) { + std::cout << "{" << key << "}" << std::endl; + } +} +``` + +#### Heterogeneous lookups + +Heterogeneous overloads allow the usage of other types than `Key` for lookup and erase operations as long as the used types are hashable and comparable to `Key`. + +To activate the heterogeneous overloads in `tsl::robin_map/set`, the qualified-id `KeyEqual::is_transparent` must be valid. It works the same way as for [`std::map::find`](http://en.cppreference.com/w/cpp/container/map/find). You can either use [`std::equal_to<>`](http://en.cppreference.com/w/cpp/utility/functional/equal_to_void) or define your own function object. + +Both `KeyEqual` and `Hash` will need to be able to deal with the different types. + +```c++ +#include +#include +#include +#include + + +struct employee { + employee(int id, std::string name) : m_id(id), m_name(std::move(name)) { + } + + // Either we include the comparators in the class and we use `std::equal_to<>`... + friend bool operator==(const employee& empl, int empl_id) { + return empl.m_id == empl_id; + } + + friend bool operator==(int empl_id, const employee& empl) { + return empl_id == empl.m_id; + } + + friend bool operator==(const employee& empl1, const employee& empl2) { + return empl1.m_id == empl2.m_id; + } + + + int m_id; + std::string m_name; +}; + +// ... or we implement a separate class to compare employees. +struct equal_employee { + using is_transparent = void; + + bool operator()(const employee& empl, int empl_id) const { + return empl.m_id == empl_id; + } + + bool operator()(int empl_id, const employee& empl) const { + return empl_id == empl.m_id; + } + + bool operator()(const employee& empl1, const employee& empl2) const { + return empl1.m_id == empl2.m_id; + } +}; + +struct hash_employee { + std::size_t operator()(const employee& empl) const { + return std::hash()(empl.m_id); + } + + std::size_t operator()(int id) const { + return std::hash()(id); + } +}; + + +int main() { + // Use std::equal_to<> which will automatically deduce and forward the parameters + tsl::robin_map> map; + map.insert({employee(1, "John Doe"), 2001}); + map.insert({employee(2, "Jane Doe"), 2002}); + map.insert({employee(3, "John Smith"), 2003}); + + // John Smith 2003 + auto it = map.find(3); + if(it != map.end()) { + std::cout << it->first.m_name << " " << it->second << std::endl; + } + + map.erase(1); + + + + // Use a custom KeyEqual which has an is_transparent member type + tsl::robin_map map2; + map2.insert({employee(4, "Johnny Doe"), 2004}); + + // 2004 + std::cout << map2.at(4) << std::endl; +} +``` + +#### Serialization + +The library provides an efficient way to serialize and deserialize a map or a set so that it can be saved to a file or send through the network. +To do so, it requires the user to provide a function object for both serialization and deserialization. + +```c++ +struct serializer { + // Must support the following types for U: std::int16_t, std::uint32_t, + // std::uint64_t, float and std::pair if a map is used or Key for + // a set. + template + void operator()(const U& value); +}; +``` + +```c++ +struct deserializer { + // Must support the following types for U: std::int16_t, std::uint32_t, + // std::uint64_t, float and std::pair if a map is used or Key for + // a set. + template + U operator()(); +}; +``` + +Note that the implementation leaves binary compatibility (endianness, float binary representation, size of int, ...) of the types it serializes/deserializes in the hands of the provided function objects if compatibility is required. + +More details regarding the `serialize` and `deserialize` methods can be found in the [API](https://tessil.github.io/robin-map/classtsl_1_1robin__map.html). + +```c++ +#include +#include +#include +#include +#include + + +class serializer { +public: + serializer(const char* file_name) { + m_ostream.exceptions(m_ostream.badbit | m_ostream.failbit); + m_ostream.open(file_name, std::ios::binary); + } + + template::value>::type* = nullptr> + void operator()(const T& value) { + m_ostream.write(reinterpret_cast(&value), sizeof(T)); + } + + void operator()(const std::pair& value) { + (*this)(value.first); + (*this)(value.second); + } + +private: + std::ofstream m_ostream; +}; + +class deserializer { +public: + deserializer(const char* file_name) { + m_istream.exceptions(m_istream.badbit | m_istream.failbit | m_istream.eofbit); + m_istream.open(file_name, std::ios::binary); + } + + template + T operator()() { + T value; + deserialize(value); + + return value; + } + +private: + template::value>::type* = nullptr> + void deserialize(T& value) { + m_istream.read(reinterpret_cast(&value), sizeof(T)); + } + + void deserialize(std::pair& value) { + deserialize(value.first); + deserialize(value.second); + } + +private: + std::ifstream m_istream; +}; + + +int main() { + const tsl::robin_map map = {{1, -1}, {2, -2}, {3, -3}, {4, -4}}; + + + const char* file_name = "robin_map.data"; + { + serializer serial(file_name); + map.serialize(serial); + } + + { + deserializer dserial(file_name); + auto map_deserialized = tsl::robin_map::deserialize(dserial); + + assert(map == map_deserialized); + } + + { + deserializer dserial(file_name); + + /** + * If the serialized and deserialized map are hash compatibles (see conditions in API), + * setting the argument to true speed-up the deserialization process as we don't have + * to recalculate the hash of each key. We also know how much space each bucket needs. + */ + const bool hash_compatible = true; + auto map_deserialized = + tsl::robin_map::deserialize(dserial, hash_compatible); + + assert(map == map_deserialized); + } +} +``` + +##### Serialization with Boost Serialization and compression with zlib + +It is possible to use a serialization library to avoid the boilerplate. + +The following example uses Boost Serialization with the Boost zlib compression stream to reduce the size of the resulting serialized file. The example requires C++20 due to the usage of the template parameter list syntax in lambdas, but it can be adapted to less recent versions. + +```c++ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace boost { namespace serialization { + template + void serialize(Archive & ar, tsl::robin_map& map, const unsigned int version) { + split_free(ar, map, version); + } + + template + void save(Archive & ar, const tsl::robin_map& map, const unsigned int /*version*/) { + auto serializer = [&ar](const auto& v) { ar & v; }; + map.serialize(serializer); + } + + template + void load(Archive & ar, tsl::robin_map& map, const unsigned int /*version*/) { + auto deserializer = [&ar]() { U u; ar & u; return u; }; + map = tsl::robin_map::deserialize(deserializer); + } +}} + + +int main() { + tsl::robin_map map = {{1, -1}, {2, -2}, {3, -3}, {4, -4}}; + + + const char* file_name = "robin_map.data"; + { + std::ofstream ofs; + ofs.exceptions(ofs.badbit | ofs.failbit); + ofs.open(file_name, std::ios::binary); + + boost::iostreams::filtering_ostream fo; + fo.push(boost::iostreams::zlib_compressor()); + fo.push(ofs); + + boost::archive::binary_oarchive oa(fo); + + oa << map; + } + + { + std::ifstream ifs; + ifs.exceptions(ifs.badbit | ifs.failbit | ifs.eofbit); + ifs.open(file_name, std::ios::binary); + + boost::iostreams::filtering_istream fi; + fi.push(boost::iostreams::zlib_decompressor()); + fi.push(ifs); + + boost::archive::binary_iarchive ia(fi); + + tsl::robin_map map_deserialized; + ia >> map_deserialized; + + assert(map == map_deserialized); + } +} +``` + +#### Performance pitfalls + +Two potential performance pitfalls involving `tsl::robin_map` and +`tsl::robin_set` are noteworthy: + +1. *Bad hashes*. Hash functions that produce many collisions can lead to the + following surprising behavior: when the number of collisions exceeds a + certain threshold, the hash table will automatically expand to fix the + problem. However, in degenerate cases, this expansion might have _no effect_ + on the collision count, causing a failure mode where a linear sequence of + insertion leads to exponential storage growth. + + This case has mainly been observed when using the default power-of-two + growth strategy with the default STL `std::hash` for arithmetic types + `T`, which is often an identity! See issue + [#39](https://github.com/Tessil/robin-map/issues/39) for an example. The + solution is simple: use a better hash function and/or `tsl::robin_pg_set` / + `tsl::robin_pg_map`. + +2. *Element erasure and low load factors*. `tsl::robin_map` and + `tsl::robin_set` mirror the STL map/set API, which exposes an `iterator + erase(iterator)` method that removes an element at a certain position, + returning a valid iterator that points to the next element. + + Constructing this new iterator object requires walking to the next nonempty + bucket in the table, which can be a expensive operation when the hash table + has a low *load factor* (i.e., when `capacity()` is much larger then + `size()`). + + The `erase()` method furthermore never shrinks & re-hashes the table as + this is not permitted by the specification of this function. A linear + sequence of random removals without intermediate insertions can then lead to + a degenerate case with quadratic runtime cost. + + In such cases, an iterator return value is often not even needed, so the + cost is entirely unnecessary. Both `tsl::robin_set` and `tsl::robin_map` + therefore provide an alternative erasure method `void erase_fast(iterator)` + that does not return an iterator to avoid having to find the next element. + +### License + +The code is licensed under the MIT license, see the [LICENSE file](LICENSE) for details. diff --git a/RemoteInput/Thirdparty/nanobind/ext/robin_map/cmake/tsl-robin-mapConfig.cmake.in b/RemoteInput/Thirdparty/nanobind/ext/robin_map/cmake/tsl-robin-mapConfig.cmake.in new file mode 100644 index 0000000..d2ce233 --- /dev/null +++ b/RemoteInput/Thirdparty/nanobind/ext/robin_map/cmake/tsl-robin-mapConfig.cmake.in @@ -0,0 +1,9 @@ +# This module sets the following variables: +# * tsl-robin-map_FOUND - true if tsl-robin-map found on the system +# * tsl-robin-map_INCLUDE_DIRS - the directory containing tsl-robin-map headers +@PACKAGE_INIT@ + +if(NOT TARGET tsl::robin_map) + include("${CMAKE_CURRENT_LIST_DIR}/tsl-robin-mapTargets.cmake") + get_target_property(tsl-robin-map_INCLUDE_DIRS tsl::robin_map INTERFACE_INCLUDE_DIRECTORIES) +endif() diff --git a/RemoteInput/Thirdparty/nanobind/ext/robin_map/include/tsl/robin_growth_policy.h b/RemoteInput/Thirdparty/nanobind/ext/robin_map/include/tsl/robin_growth_policy.h new file mode 100644 index 0000000..2dc9c40 --- /dev/null +++ b/RemoteInput/Thirdparty/nanobind/ext/robin_map/include/tsl/robin_growth_policy.h @@ -0,0 +1,415 @@ +/** + * MIT License + * + * Copyright (c) 2017 Thibaut Goetghebuer-Planchon + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef TSL_ROBIN_GROWTH_POLICY_H +#define TSL_ROBIN_GROWTH_POLICY_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// A change of the major version indicates an API and/or ABI break (change of +// in-memory layout of the data structure) +#define TSL_RH_VERSION_MAJOR 1 +// A change of the minor version indicates the addition of a feature without +// impact on the API/ABI +#define TSL_RH_VERSION_MINOR 3 +// A change of the patch version indicates a bugfix without additional +// functionality +#define TSL_RH_VERSION_PATCH 0 + +#ifdef TSL_DEBUG +#define tsl_rh_assert(expr) assert(expr) +#else +#define tsl_rh_assert(expr) (static_cast(0)) +#endif + +/** + * If exceptions are enabled, throw the exception passed in parameter, otherwise + * call std::terminate. + */ +#if (defined(__cpp_exceptions) || defined(__EXCEPTIONS) || \ + (defined(_MSC_VER) && defined(_CPPUNWIND))) && \ + !defined(TSL_NO_EXCEPTIONS) +#define TSL_RH_THROW_OR_TERMINATE(ex, msg) throw ex(msg) +#else +#define TSL_RH_NO_EXCEPTIONS +#ifdef TSL_DEBUG +#include +#define TSL_RH_THROW_OR_TERMINATE(ex, msg) \ + do { \ + std::cerr << msg << std::endl; \ + std::terminate(); \ + } while (0) +#else +#define TSL_RH_THROW_OR_TERMINATE(ex, msg) std::terminate() +#endif +#endif + +#if defined(__GNUC__) || defined(__clang__) +#define TSL_RH_LIKELY(exp) (__builtin_expect(!!(exp), true)) +#else +#define TSL_RH_LIKELY(exp) (exp) +#endif + +#define TSL_RH_UNUSED(x) static_cast(x) + +namespace tsl { +namespace rh { + +/** + * Grow the hash table by a factor of GrowthFactor keeping the bucket count to a + * power of two. It allows the table to use a mask operation instead of a modulo + * operation to map a hash to a bucket. + * + * GrowthFactor must be a power of two >= 2. + */ +template +class power_of_two_growth_policy { + public: + /** + * Called on the hash table creation and on rehash. The number of buckets for + * the table is passed in parameter. This number is a minimum, the policy may + * update this value with a higher value if needed (but not lower). + * + * If 0 is given, min_bucket_count_in_out must still be 0 after the policy + * creation and bucket_for_hash must always return 0 in this case. + */ + explicit power_of_two_growth_policy(std::size_t& min_bucket_count_in_out) { + if (min_bucket_count_in_out > max_bucket_count()) { + TSL_RH_THROW_OR_TERMINATE(std::length_error, + "The hash table exceeds its maximum size."); + } + + if (min_bucket_count_in_out > 0) { + min_bucket_count_in_out = + round_up_to_power_of_two(min_bucket_count_in_out); + m_mask = min_bucket_count_in_out - 1; + } else { + m_mask = 0; + } + } + + /** + * Return the bucket [0, bucket_count()) to which the hash belongs. + * If bucket_count() is 0, it must always return 0. + */ + std::size_t bucket_for_hash(std::size_t hash) const noexcept { + return hash & m_mask; + } + + /** + * Return the number of buckets that should be used on next growth. + */ + std::size_t next_bucket_count() const { + if ((m_mask + 1) > max_bucket_count() / GrowthFactor) { + TSL_RH_THROW_OR_TERMINATE(std::length_error, + "The hash table exceeds its maximum size."); + } + + return (m_mask + 1) * GrowthFactor; + } + + /** + * Return the maximum number of buckets supported by the policy. + */ + std::size_t max_bucket_count() const { + // Largest power of two. + return (std::numeric_limits::max() / 2) + 1; + } + + /** + * Reset the growth policy as if it was created with a bucket count of 0. + * After a clear, the policy must always return 0 when bucket_for_hash is + * called. + */ + void clear() noexcept { m_mask = 0; } + + private: + static std::size_t round_up_to_power_of_two(std::size_t value) { + if (is_power_of_two(value)) { + return value; + } + + if (value == 0) { + return 1; + } + + --value; + for (std::size_t i = 1; i < sizeof(std::size_t) * CHAR_BIT; i *= 2) { + value |= value >> i; + } + + return value + 1; + } + + static constexpr bool is_power_of_two(std::size_t value) { + return value != 0 && (value & (value - 1)) == 0; + } + + protected: + static_assert(is_power_of_two(GrowthFactor) && GrowthFactor >= 2, + "GrowthFactor must be a power of two >= 2."); + + std::size_t m_mask; +}; + +/** + * Grow the hash table by GrowthFactor::num / GrowthFactor::den and use a modulo + * to map a hash to a bucket. Slower but it can be useful if you want a slower + * growth. + */ +template > +class mod_growth_policy { + public: + explicit mod_growth_policy(std::size_t& min_bucket_count_in_out) { + if (min_bucket_count_in_out > max_bucket_count()) { + TSL_RH_THROW_OR_TERMINATE(std::length_error, + "The hash table exceeds its maximum size."); + } + + if (min_bucket_count_in_out > 0) { + m_mod = min_bucket_count_in_out; + } else { + m_mod = 1; + } + } + + std::size_t bucket_for_hash(std::size_t hash) const noexcept { + return hash % m_mod; + } + + std::size_t next_bucket_count() const { + if (m_mod == max_bucket_count()) { + TSL_RH_THROW_OR_TERMINATE(std::length_error, + "The hash table exceeds its maximum size."); + } + + const double next_bucket_count = + std::ceil(double(m_mod) * REHASH_SIZE_MULTIPLICATION_FACTOR); + if (!std::isnormal(next_bucket_count)) { + TSL_RH_THROW_OR_TERMINATE(std::length_error, + "The hash table exceeds its maximum size."); + } + + if (next_bucket_count > double(max_bucket_count())) { + return max_bucket_count(); + } else { + return std::size_t(next_bucket_count); + } + } + + std::size_t max_bucket_count() const { return MAX_BUCKET_COUNT; } + + void clear() noexcept { m_mod = 1; } + + private: + static constexpr double REHASH_SIZE_MULTIPLICATION_FACTOR = + 1.0 * GrowthFactor::num / GrowthFactor::den; + static const std::size_t MAX_BUCKET_COUNT = + std::size_t(double(std::numeric_limits::max() / + REHASH_SIZE_MULTIPLICATION_FACTOR)); + + static_assert(REHASH_SIZE_MULTIPLICATION_FACTOR >= 1.1, + "Growth factor should be >= 1.1."); + + std::size_t m_mod; +}; + +namespace detail { + +#if SIZE_MAX >= ULLONG_MAX +#define TSL_RH_NB_PRIMES 51 +#elif SIZE_MAX >= ULONG_MAX +#define TSL_RH_NB_PRIMES 40 +#else +#define TSL_RH_NB_PRIMES 23 +#endif + +inline constexpr std::array PRIMES = {{ + 1u, + 5u, + 17u, + 29u, + 37u, + 53u, + 67u, + 79u, + 97u, + 131u, + 193u, + 257u, + 389u, + 521u, + 769u, + 1031u, + 1543u, + 2053u, + 3079u, + 6151u, + 12289u, + 24593u, + 49157u, +#if SIZE_MAX >= ULONG_MAX + 98317ul, + 196613ul, + 393241ul, + 786433ul, + 1572869ul, + 3145739ul, + 6291469ul, + 12582917ul, + 25165843ul, + 50331653ul, + 100663319ul, + 201326611ul, + 402653189ul, + 805306457ul, + 1610612741ul, + 3221225473ul, + 4294967291ul, +#endif +#if SIZE_MAX >= ULLONG_MAX + 6442450939ull, + 12884901893ull, + 25769803751ull, + 51539607551ull, + 103079215111ull, + 206158430209ull, + 412316860441ull, + 824633720831ull, + 1649267441651ull, + 3298534883309ull, + 6597069766657ull, +#endif +}}; + +template +static constexpr std::size_t mod(std::size_t hash) { + return hash % PRIMES[IPrime]; +} + +// MOD_PRIME[iprime](hash) returns hash % PRIMES[iprime]. This table allows for +// faster modulo as the compiler can optimize the modulo code better with a +// constant known at the compilation. +inline constexpr std::array + MOD_PRIME = {{ + &mod<0>, &mod<1>, &mod<2>, &mod<3>, &mod<4>, &mod<5>, + &mod<6>, &mod<7>, &mod<8>, &mod<9>, &mod<10>, &mod<11>, + &mod<12>, &mod<13>, &mod<14>, &mod<15>, &mod<16>, &mod<17>, + &mod<18>, &mod<19>, &mod<20>, &mod<21>, &mod<22>, +#if SIZE_MAX >= ULONG_MAX + &mod<23>, &mod<24>, &mod<25>, &mod<26>, &mod<27>, &mod<28>, + &mod<29>, &mod<30>, &mod<31>, &mod<32>, &mod<33>, &mod<34>, + &mod<35>, &mod<36>, &mod<37>, &mod<38>, &mod<39>, +#endif +#if SIZE_MAX >= ULLONG_MAX + &mod<40>, &mod<41>, &mod<42>, &mod<43>, &mod<44>, &mod<45>, + &mod<46>, &mod<47>, &mod<48>, &mod<49>, &mod<50>, +#endif + }}; + +} // namespace detail + +/** + * Grow the hash table by using prime numbers as bucket count. Slower than + * tsl::rh::power_of_two_growth_policy in general but will probably distribute + * the values around better in the buckets with a poor hash function. + * + * To allow the compiler to optimize the modulo operation, a lookup table is + * used with constant primes numbers. + * + * With a switch the code would look like: + * \code + * switch(iprime) { // iprime is the current prime of the hash table + * case 0: hash % 5ul; + * break; + * case 1: hash % 17ul; + * break; + * case 2: hash % 29ul; + * break; + * ... + * } + * \endcode + * + * Due to the constant variable in the modulo the compiler is able to optimize + * the operation by a series of multiplications, substractions and shifts. + * + * The 'hash % 5' could become something like 'hash - (hash * 0xCCCCCCCD) >> 34) + * * 5' in a 64 bits environment. + */ +class prime_growth_policy { + public: + explicit prime_growth_policy(std::size_t& min_bucket_count_in_out) { + auto it_prime = std::lower_bound( + detail::PRIMES.begin(), detail::PRIMES.end(), min_bucket_count_in_out); + if (it_prime == detail::PRIMES.end()) { + TSL_RH_THROW_OR_TERMINATE(std::length_error, + "The hash table exceeds its maximum size."); + } + + m_iprime = static_cast( + std::distance(detail::PRIMES.begin(), it_prime)); + if (min_bucket_count_in_out > 0) { + min_bucket_count_in_out = *it_prime; + } else { + min_bucket_count_in_out = 0; + } + } + + std::size_t bucket_for_hash(std::size_t hash) const noexcept { + return detail::MOD_PRIME[m_iprime](hash); + } + + std::size_t next_bucket_count() const { + if (m_iprime + 1 >= detail::PRIMES.size()) { + TSL_RH_THROW_OR_TERMINATE(std::length_error, + "The hash table exceeds its maximum size."); + } + + return detail::PRIMES[m_iprime + 1]; + } + + std::size_t max_bucket_count() const { return detail::PRIMES.back(); } + + void clear() noexcept { m_iprime = 0; } + + private: + unsigned int m_iprime; + + static_assert(std::numeric_limits::max() >= + detail::PRIMES.size(), + "The type of m_iprime is not big enough."); +}; + +} // namespace rh +} // namespace tsl + +#endif diff --git a/RemoteInput/Thirdparty/nanobind/ext/robin_map/include/tsl/robin_hash.h b/RemoteInput/Thirdparty/nanobind/ext/robin_map/include/tsl/robin_hash.h new file mode 100644 index 0000000..78043e4 --- /dev/null +++ b/RemoteInput/Thirdparty/nanobind/ext/robin_map/include/tsl/robin_hash.h @@ -0,0 +1,1586 @@ +/** + * MIT License + * + * Copyright (c) 2017 Thibaut Goetghebuer-Planchon + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef TSL_ROBIN_HASH_H +#define TSL_ROBIN_HASH_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "robin_growth_policy.h" + +namespace tsl { + +namespace detail_robin_hash { + +template +struct make_void { + using type = void; +}; + +template +struct has_is_transparent : std::false_type {}; + +template +struct has_is_transparent::type> + : std::true_type {}; + +template +struct is_power_of_two_policy : std::false_type {}; + +template +struct is_power_of_two_policy> + : std::true_type {}; + +template +static T numeric_cast(U value, + const char* error_message = "numeric_cast() failed.") { + T ret = static_cast(value); + if (static_cast(ret) != value) { + TSL_RH_THROW_OR_TERMINATE(std::runtime_error, error_message); + } + + const bool is_same_signedness = + (std::is_unsigned::value && std::is_unsigned::value) || + (std::is_signed::value && std::is_signed::value); + if (!is_same_signedness && (ret < T{}) != (value < U{})) { + TSL_RH_THROW_OR_TERMINATE(std::runtime_error, error_message); + } + + TSL_RH_UNUSED(error_message); + + return ret; +} + +template +static T deserialize_value(Deserializer& deserializer) { + // MSVC < 2017 is not conformant, circumvent the problem by removing the + // template keyword +#if defined(_MSC_VER) && _MSC_VER < 1910 + return deserializer.Deserializer::operator()(); +#else + return deserializer.Deserializer::template operator()(); +#endif +} + +/** + * Fixed size type used to represent size_type values on serialization. Need to + * be big enough to represent a std::size_t on 32 and 64 bits platforms, and + * must be the same size on both platforms. + */ +using slz_size_type = std::uint64_t; +static_assert(std::numeric_limits::max() >= + std::numeric_limits::max(), + "slz_size_type must be >= std::size_t"); + +using truncated_hash_type = std::uint32_t; + +/** + * Helper class that stores a truncated hash if StoreHash is true and nothing + * otherwise. + */ +template +class bucket_entry_hash { + public: + bool bucket_hash_equal(std::size_t /*hash*/) const noexcept { return true; } + + truncated_hash_type truncated_hash() const noexcept { return 0; } + + protected: + void set_hash(truncated_hash_type /*hash*/) noexcept {} +}; + +template <> +class bucket_entry_hash { + public: + bool bucket_hash_equal(std::size_t hash) const noexcept { + return m_hash == truncated_hash_type(hash); + } + + truncated_hash_type truncated_hash() const noexcept { return m_hash; } + + protected: + void set_hash(truncated_hash_type hash) noexcept { + m_hash = truncated_hash_type(hash); + } + + private: + truncated_hash_type m_hash; +}; + +/** + * Each bucket entry has: + * - A value of type `ValueType`. + * - An integer to store how far the value of the bucket, if any, is from its + * ideal bucket (ex: if the current bucket 5 has the value 'foo' and + * `hash('foo') % nb_buckets` == 3, `dist_from_ideal_bucket()` will return 2 as + * the current value of the bucket is two buckets away from its ideal bucket) If + * there is no value in the bucket (i.e. `empty()` is true) + * `dist_from_ideal_bucket()` will be < 0. + * - A marker which tells us if the bucket is the last bucket of the bucket + * array (useful for the iterator of the hash table). + * - If `StoreHash` is true, 32 bits of the hash of the value, if any, are also + * stored in the bucket. If the size of the hash is more than 32 bits, it is + * truncated. We don't store the full hash as storing the hash is a potential + * opportunity to use the unused space due to the alignment of the bucket_entry + * structure. We can thus potentially store the hash without any extra space + * (which would not be possible with 64 bits of the hash). + */ +template +class bucket_entry : public bucket_entry_hash { + using bucket_hash = bucket_entry_hash; + + public: + using value_type = ValueType; + using distance_type = std::int16_t; + + bucket_entry() noexcept + : bucket_hash(), + m_dist_from_ideal_bucket(EMPTY_MARKER_DIST_FROM_IDEAL_BUCKET), + m_last_bucket(false) { + tsl_rh_assert(empty()); + } + + bucket_entry(bool last_bucket) noexcept + : bucket_hash(), + m_dist_from_ideal_bucket(EMPTY_MARKER_DIST_FROM_IDEAL_BUCKET), + m_last_bucket(last_bucket) { + tsl_rh_assert(empty()); + } + + bucket_entry(const bucket_entry& other) noexcept( + std::is_nothrow_copy_constructible::value) + : bucket_hash(other), + m_dist_from_ideal_bucket(EMPTY_MARKER_DIST_FROM_IDEAL_BUCKET), + m_last_bucket(other.m_last_bucket) { + if (!other.empty()) { + ::new (static_cast(std::addressof(m_value))) + value_type(other.value()); + m_dist_from_ideal_bucket = other.m_dist_from_ideal_bucket; + } + tsl_rh_assert(empty() == other.empty()); + } + + /** + * Never really used, but still necessary as we must call resize on an empty + * `std::vector`. and we need to support move-only types. See + * robin_hash constructor for details. + */ + bucket_entry(bucket_entry&& other) noexcept( + std::is_nothrow_move_constructible::value) + : bucket_hash(std::move(other)), + m_dist_from_ideal_bucket(EMPTY_MARKER_DIST_FROM_IDEAL_BUCKET), + m_last_bucket(other.m_last_bucket) { + if (!other.empty()) { + ::new (static_cast(std::addressof(m_value))) + value_type(std::move(other.value())); + m_dist_from_ideal_bucket = other.m_dist_from_ideal_bucket; + } + tsl_rh_assert(empty() == other.empty()); + } + + bucket_entry& operator=(const bucket_entry& other) noexcept( + std::is_nothrow_copy_constructible::value) { + if (this != &other) { + clear(); + + bucket_hash::operator=(other); + if (!other.empty()) { + ::new (static_cast(std::addressof(m_value))) + value_type(other.value()); + } + + m_dist_from_ideal_bucket = other.m_dist_from_ideal_bucket; + m_last_bucket = other.m_last_bucket; + } + + return *this; + } + + bucket_entry& operator=(bucket_entry&&) = delete; + + ~bucket_entry() noexcept { clear(); } + + void clear() noexcept { + if (!empty()) { + destroy_value(); + m_dist_from_ideal_bucket = EMPTY_MARKER_DIST_FROM_IDEAL_BUCKET; + } + } + + bool empty() const noexcept { + return m_dist_from_ideal_bucket == EMPTY_MARKER_DIST_FROM_IDEAL_BUCKET; + } + + value_type& value() noexcept { + tsl_rh_assert(!empty()); + return *std::launder( + reinterpret_cast(std::addressof(m_value))); + } + + const value_type& value() const noexcept { + tsl_rh_assert(!empty()); + return *std::launder( + reinterpret_cast(std::addressof(m_value))); + } + + distance_type dist_from_ideal_bucket() const noexcept { + return m_dist_from_ideal_bucket; + } + + bool last_bucket() const noexcept { return m_last_bucket; } + + void set_as_last_bucket() noexcept { m_last_bucket = true; } + + template + void set_value_of_empty_bucket(distance_type dist_from_ideal_bucket, + truncated_hash_type hash, + Args&&... value_type_args) { + tsl_rh_assert(dist_from_ideal_bucket >= 0); + tsl_rh_assert(empty()); + + ::new (static_cast(std::addressof(m_value))) + value_type(std::forward(value_type_args)...); + this->set_hash(hash); + m_dist_from_ideal_bucket = dist_from_ideal_bucket; + + tsl_rh_assert(!empty()); + } + + void swap_with_value_in_bucket(distance_type& dist_from_ideal_bucket, + truncated_hash_type& hash, value_type& value) { + tsl_rh_assert(!empty()); + tsl_rh_assert(dist_from_ideal_bucket > m_dist_from_ideal_bucket); + + using std::swap; + swap(value, this->value()); + swap(dist_from_ideal_bucket, m_dist_from_ideal_bucket); + + if (StoreHash) { + const truncated_hash_type tmp_hash = this->truncated_hash(); + this->set_hash(hash); + hash = tmp_hash; + } else { + // Avoid warning of unused variable if StoreHash is false + TSL_RH_UNUSED(hash); + } + } + + static truncated_hash_type truncate_hash(std::size_t hash) noexcept { + return truncated_hash_type(hash); + } + + private: + void destroy_value() noexcept { + tsl_rh_assert(!empty()); + value().~value_type(); + } + + public: + static const distance_type EMPTY_MARKER_DIST_FROM_IDEAL_BUCKET = -1; + static const distance_type DIST_FROM_IDEAL_BUCKET_LIMIT = 8192; + static_assert(DIST_FROM_IDEAL_BUCKET_LIMIT <= + std::numeric_limits::max() - 1, + "DIST_FROM_IDEAL_BUCKET_LIMIT must be <= " + "std::numeric_limits::max() - 1."); + + private: + distance_type m_dist_from_ideal_bucket; + bool m_last_bucket; + alignas(value_type) unsigned char m_value[sizeof(value_type)]; +}; + +/** + * Internal common class used by `robin_map` and `robin_set`. + * + * ValueType is what will be stored by `robin_hash` (usually `std::pair` + * for map and `Key` for set). + * + * `KeySelect` should be a `FunctionObject` which takes a `ValueType` in + * parameter and returns a reference to the key. + * + * `ValueSelect` should be a `FunctionObject` which takes a `ValueType` in + * parameter and returns a reference to the value. `ValueSelect` should be void + * if there is no value (in a set for example). + * + * The strong exception guarantee only holds if the expression + * `std::is_nothrow_swappable::value && + * std::is_nothrow_move_constructible::value` is true. + * + * Behaviour is undefined if the destructor of `ValueType` throws. + */ +template +class robin_hash : private Hash, private KeyEqual, private GrowthPolicy { + private: + template + using has_mapped_type = + typename std::integral_constant::value>; + + static_assert( + noexcept(std::declval().bucket_for_hash(std::size_t(0))), + "GrowthPolicy::bucket_for_hash must be noexcept."); + static_assert(noexcept(std::declval().clear()), + "GrowthPolicy::clear must be noexcept."); + + public: + template + class robin_iterator; + + using key_type = typename KeySelect::key_type; + using value_type = ValueType; + using size_type = std::size_t; + using difference_type = std::ptrdiff_t; + using hasher = Hash; + using key_equal = KeyEqual; + using allocator_type = Allocator; + using reference = value_type&; + using const_reference = const value_type&; + using pointer = value_type*; + using const_pointer = const value_type*; + using iterator = robin_iterator; + using const_iterator = robin_iterator; + + private: + /** + * Either store the hash because we are asked by the `StoreHash` template + * parameter or store the hash because it doesn't cost us anything in size and + * can be used to speed up rehash. + */ + static constexpr bool STORE_HASH = + StoreHash || + ((sizeof(tsl::detail_robin_hash::bucket_entry) == + sizeof(tsl::detail_robin_hash::bucket_entry)) && + (sizeof(std::size_t) == sizeof(truncated_hash_type) || + is_power_of_two_policy::value) && + // Don't store the hash for primitive types with default hash. + (!std::is_arithmetic::value || + !std::is_same>::value)); + + /** + * Only use the stored hash on lookup if we are explicitly asked. We are not + * sure how slow the KeyEqual operation is. An extra comparison may slow + * things down with a fast KeyEqual. + */ + static constexpr bool USE_STORED_HASH_ON_LOOKUP = StoreHash; + + /** + * We can only use the hash on rehash if the size of the hash type is the same + * as the stored one or if we use a power of two modulo. In the case of the + * power of two modulo, we just mask the least significant bytes, we just have + * to check that the truncated_hash_type didn't truncated more bytes. + */ + static bool USE_STORED_HASH_ON_REHASH(size_type bucket_count) { + if (STORE_HASH && sizeof(std::size_t) == sizeof(truncated_hash_type)) { + TSL_RH_UNUSED(bucket_count); + return true; + } else if (STORE_HASH && is_power_of_two_policy::value) { + return bucket_count == 0 || + (bucket_count - 1) <= + std::numeric_limits::max(); + } else { + TSL_RH_UNUSED(bucket_count); + return false; + } + } + + using bucket_entry = + tsl::detail_robin_hash::bucket_entry; + using distance_type = typename bucket_entry::distance_type; + + using buckets_allocator = typename std::allocator_traits< + allocator_type>::template rebind_alloc; + using buckets_container_type = std::vector; + + public: + /** + * The 'operator*()' and 'operator->()' methods return a const reference and + * const pointer respectively to the stored value type. + * + * In case of a map, to get a mutable reference to the value associated to a + * key (the '.second' in the stored pair), you have to call 'value()'. + * + * The main reason for this is that if we returned a `std::pair&` + * instead of a `const std::pair&`, the user may modify the key which + * will put the map in a undefined state. + */ + template + class robin_iterator { + friend class robin_hash; + + private: + using bucket_entry_ptr = + typename std::conditional::type; + + robin_iterator(bucket_entry_ptr bucket) noexcept : m_bucket(bucket) {} + + public: + using iterator_category = std::forward_iterator_tag; + using value_type = const typename robin_hash::value_type; + using difference_type = std::ptrdiff_t; + using reference = value_type&; + using pointer = value_type*; + + robin_iterator() noexcept {} + + // Copy constructor from iterator to const_iterator. + template ::type* = nullptr> + robin_iterator(const robin_iterator& other) noexcept + : m_bucket(other.m_bucket) {} + + robin_iterator(const robin_iterator& other) = default; + robin_iterator(robin_iterator&& other) = default; + robin_iterator& operator=(const robin_iterator& other) = default; + robin_iterator& operator=(robin_iterator&& other) = default; + + const typename robin_hash::key_type& key() const { + return KeySelect()(m_bucket->value()); + } + + template ::value && + IsConst>::type* = nullptr> + const typename U::value_type& value() const { + return U()(m_bucket->value()); + } + + template ::value && + !IsConst>::type* = nullptr> + typename U::value_type& value() const { + return U()(m_bucket->value()); + } + + reference operator*() const { return m_bucket->value(); } + + pointer operator->() const { return std::addressof(m_bucket->value()); } + + robin_iterator& operator++() { + while (true) { + if (m_bucket->last_bucket()) { + ++m_bucket; + return *this; + } + + ++m_bucket; + if (!m_bucket->empty()) { + return *this; + } + } + } + + robin_iterator operator++(int) { + robin_iterator tmp(*this); + ++*this; + + return tmp; + } + + friend bool operator==(const robin_iterator& lhs, + const robin_iterator& rhs) { + return lhs.m_bucket == rhs.m_bucket; + } + + friend bool operator!=(const robin_iterator& lhs, + const robin_iterator& rhs) { + return !(lhs == rhs); + } + + private: + bucket_entry_ptr m_bucket; + }; + + public: + robin_hash(size_type bucket_count, const Hash& hash, const KeyEqual& equal, + const Allocator& alloc, + float min_load_factor = DEFAULT_MIN_LOAD_FACTOR, + float max_load_factor = DEFAULT_MAX_LOAD_FACTOR) + : Hash(hash), + KeyEqual(equal), + GrowthPolicy(bucket_count), + m_buckets_data(bucket_count, alloc), + m_buckets(m_buckets_data.empty() ? static_empty_bucket_ptr() + : m_buckets_data.data()), + m_bucket_count(bucket_count), + m_nb_elements(0), + m_grow_on_next_insert(false), + m_try_shrink_on_next_insert(false) { + if (bucket_count > max_bucket_count()) { + TSL_RH_THROW_OR_TERMINATE(std::length_error, + "The map exceeds its maximum bucket count."); + } + + if (m_bucket_count > 0) { + tsl_rh_assert(!m_buckets_data.empty()); + m_buckets_data.back().set_as_last_bucket(); + } + + this->min_load_factor(min_load_factor); + this->max_load_factor(max_load_factor); + } + + robin_hash(const robin_hash& other) + : Hash(other), + KeyEqual(other), + GrowthPolicy(other), + m_buckets_data(other.m_buckets_data), + m_buckets(m_buckets_data.empty() ? static_empty_bucket_ptr() + : m_buckets_data.data()), + m_bucket_count(other.m_bucket_count), + m_nb_elements(other.m_nb_elements), + m_load_threshold(other.m_load_threshold), + m_min_load_factor(other.m_min_load_factor), + m_max_load_factor(other.m_max_load_factor), + m_grow_on_next_insert(other.m_grow_on_next_insert), + m_try_shrink_on_next_insert(other.m_try_shrink_on_next_insert) {} + + robin_hash(robin_hash&& other) noexcept( + std::is_nothrow_move_constructible< + Hash>::value&& std::is_nothrow_move_constructible::value&& + std::is_nothrow_move_constructible::value&& + std::is_nothrow_move_constructible::value) + : Hash(std::move(static_cast(other))), + KeyEqual(std::move(static_cast(other))), + GrowthPolicy(std::move(static_cast(other))), + m_buckets_data(std::move(other.m_buckets_data)), + m_buckets(m_buckets_data.empty() ? static_empty_bucket_ptr() + : m_buckets_data.data()), + m_bucket_count(other.m_bucket_count), + m_nb_elements(other.m_nb_elements), + m_load_threshold(other.m_load_threshold), + m_min_load_factor(other.m_min_load_factor), + m_max_load_factor(other.m_max_load_factor), + m_grow_on_next_insert(other.m_grow_on_next_insert), + m_try_shrink_on_next_insert(other.m_try_shrink_on_next_insert) { + other.clear_and_shrink(); + } + + robin_hash& operator=(const robin_hash& other) { + if (&other != this) { + Hash::operator=(other); + KeyEqual::operator=(other); + GrowthPolicy::operator=(other); + + m_buckets_data = other.m_buckets_data; + m_buckets = m_buckets_data.empty() ? static_empty_bucket_ptr() + : m_buckets_data.data(); + m_bucket_count = other.m_bucket_count; + m_nb_elements = other.m_nb_elements; + + m_load_threshold = other.m_load_threshold; + m_min_load_factor = other.m_min_load_factor; + m_max_load_factor = other.m_max_load_factor; + + m_grow_on_next_insert = other.m_grow_on_next_insert; + m_try_shrink_on_next_insert = other.m_try_shrink_on_next_insert; + } + + return *this; + } + + robin_hash& operator=(robin_hash&& other) { + other.swap(*this); + other.clear_and_shrink(); + + return *this; + } + + allocator_type get_allocator() const { + return m_buckets_data.get_allocator(); + } + + /* + * Iterators + */ + iterator begin() noexcept { + std::size_t i = 0; + while (i < m_bucket_count && m_buckets[i].empty()) { + i++; + } + + return iterator(m_buckets + i); + } + + const_iterator begin() const noexcept { return cbegin(); } + + const_iterator cbegin() const noexcept { + std::size_t i = 0; + while (i < m_bucket_count && m_buckets[i].empty()) { + i++; + } + + return const_iterator(m_buckets + i); + } + + iterator end() noexcept { return iterator(m_buckets + m_bucket_count); } + + const_iterator end() const noexcept { return cend(); } + + const_iterator cend() const noexcept { + return const_iterator(m_buckets + m_bucket_count); + } + + /* + * Capacity + */ + bool empty() const noexcept { return m_nb_elements == 0; } + + size_type size() const noexcept { return m_nb_elements; } + + size_type max_size() const noexcept { return m_buckets_data.max_size(); } + + /* + * Modifiers + */ + void clear() noexcept { + if (m_min_load_factor > 0.0f) { + clear_and_shrink(); + } else { + for (auto& bucket : m_buckets_data) { + bucket.clear(); + } + + m_nb_elements = 0; + m_grow_on_next_insert = false; + } + } + + template + std::pair insert(P&& value) { + return insert_impl(KeySelect()(value), std::forward

(value)); + } + + template + iterator insert_hint(const_iterator hint, P&& value) { + if (hint != cend() && + compare_keys(KeySelect()(*hint), KeySelect()(value))) { + return mutable_iterator(hint); + } + + return insert(std::forward

(value)).first; + } + + template + void insert(InputIt first, InputIt last) { + if (std::is_base_of< + std::forward_iterator_tag, + typename std::iterator_traits::iterator_category>::value) { + const auto nb_elements_insert = std::distance(first, last); + const size_type nb_free_buckets = m_load_threshold - size(); + tsl_rh_assert(m_load_threshold >= size()); + + if (nb_elements_insert > 0 && + nb_free_buckets < size_type(nb_elements_insert)) { + reserve(size() + size_type(nb_elements_insert)); + } + } + + for (; first != last; ++first) { + insert(*first); + } + } + + template + std::pair insert_or_assign(K&& key, M&& obj) { + auto it = try_emplace(std::forward(key), std::forward(obj)); + if (!it.second) { + it.first.value() = std::forward(obj); + } + + return it; + } + + template + iterator insert_or_assign(const_iterator hint, K&& key, M&& obj) { + if (hint != cend() && compare_keys(KeySelect()(*hint), key)) { + auto it = mutable_iterator(hint); + it.value() = std::forward(obj); + + return it; + } + + return insert_or_assign(std::forward(key), std::forward(obj)).first; + } + + template + std::pair emplace(Args&&... args) { + return insert(value_type(std::forward(args)...)); + } + + template + iterator emplace_hint(const_iterator hint, Args&&... args) { + return insert_hint(hint, value_type(std::forward(args)...)); + } + + template + std::pair try_emplace(K&& key, Args&&... args) { + return insert_impl(key, std::piecewise_construct, + std::forward_as_tuple(std::forward(key)), + std::forward_as_tuple(std::forward(args)...)); + } + + template + iterator try_emplace_hint(const_iterator hint, K&& key, Args&&... args) { + if (hint != cend() && compare_keys(KeySelect()(*hint), key)) { + return mutable_iterator(hint); + } + + return try_emplace(std::forward(key), std::forward(args)...).first; + } + + void erase_fast(iterator pos) { + erase_from_bucket(pos); + } + + /** + * Here to avoid `template size_type erase(const K& key)` being used + * when we use an `iterator` instead of a `const_iterator`. + */ + iterator erase(iterator pos) { + erase_from_bucket(pos); + + /** + * Erase bucket used a backward shift after clearing the bucket. + * Check if there is a new value in the bucket, if not get the next + * non-empty. + */ + if (pos.m_bucket->empty()) { + ++pos; + } + + return pos; + } + + iterator erase(const_iterator pos) { return erase(mutable_iterator(pos)); } + + iterator erase(const_iterator first, const_iterator last) { + if (first == last) { + return mutable_iterator(first); + } + + auto first_mutable = mutable_iterator(first); + auto last_mutable = mutable_iterator(last); + for (auto it = first_mutable.m_bucket; it != last_mutable.m_bucket; ++it) { + if (!it->empty()) { + it->clear(); + m_nb_elements--; + } + } + + if (last_mutable == end()) { + m_try_shrink_on_next_insert = true; + return end(); + } + + /* + * Backward shift on the values which come after the deleted values. + * We try to move the values closer to their ideal bucket. + */ + std::size_t icloser_bucket = + static_cast(first_mutable.m_bucket - m_buckets); + std::size_t ito_move_closer_value = + static_cast(last_mutable.m_bucket - m_buckets); + tsl_rh_assert(ito_move_closer_value > icloser_bucket); + + const std::size_t ireturn_bucket = + ito_move_closer_value - + std::min( + ito_move_closer_value - icloser_bucket, + std::size_t( + m_buckets[ito_move_closer_value].dist_from_ideal_bucket())); + + while (ito_move_closer_value < m_bucket_count && + m_buckets[ito_move_closer_value].dist_from_ideal_bucket() > 0) { + icloser_bucket = + ito_move_closer_value - + std::min( + ito_move_closer_value - icloser_bucket, + std::size_t( + m_buckets[ito_move_closer_value].dist_from_ideal_bucket())); + + tsl_rh_assert(m_buckets[icloser_bucket].empty()); + const distance_type new_distance = distance_type( + m_buckets[ito_move_closer_value].dist_from_ideal_bucket() - + (ito_move_closer_value - icloser_bucket)); + m_buckets[icloser_bucket].set_value_of_empty_bucket( + new_distance, m_buckets[ito_move_closer_value].truncated_hash(), + std::move(m_buckets[ito_move_closer_value].value())); + m_buckets[ito_move_closer_value].clear(); + + ++icloser_bucket; + ++ito_move_closer_value; + } + + m_try_shrink_on_next_insert = true; + + return iterator(m_buckets + ireturn_bucket); + } + + template + size_type erase(const K& key) { + return erase(key, hash_key(key)); + } + + template + size_type erase(const K& key, std::size_t hash) { + auto it = find(key, hash); + if (it != end()) { + erase_from_bucket(it); + return 1; + } else { + return 0; + } + } + + void swap(robin_hash& other) { + using std::swap; + + swap(static_cast(*this), static_cast(other)); + swap(static_cast(*this), static_cast(other)); + swap(static_cast(*this), static_cast(other)); + swap(m_buckets_data, other.m_buckets_data); + swap(m_buckets, other.m_buckets); + swap(m_bucket_count, other.m_bucket_count); + swap(m_nb_elements, other.m_nb_elements); + swap(m_load_threshold, other.m_load_threshold); + swap(m_min_load_factor, other.m_min_load_factor); + swap(m_max_load_factor, other.m_max_load_factor); + swap(m_grow_on_next_insert, other.m_grow_on_next_insert); + swap(m_try_shrink_on_next_insert, other.m_try_shrink_on_next_insert); + } + + /* + * Lookup + */ + template ::value>::type* = nullptr> + typename U::value_type& at(const K& key) { + return at(key, hash_key(key)); + } + + template ::value>::type* = nullptr> + typename U::value_type& at(const K& key, std::size_t hash) { + return const_cast( + static_cast(this)->at(key, hash)); + } + + template ::value>::type* = nullptr> + const typename U::value_type& at(const K& key) const { + return at(key, hash_key(key)); + } + + template ::value>::type* = nullptr> + const typename U::value_type& at(const K& key, std::size_t hash) const { + auto it = find(key, hash); + if (it != cend()) { + return it.value(); + } else { + TSL_RH_THROW_OR_TERMINATE(std::out_of_range, "Couldn't find key."); + } + } + + template ::value>::type* = nullptr> + typename U::value_type& operator[](K&& key) { + return try_emplace(std::forward(key)).first.value(); + } + + template + size_type count(const K& key) const { + return count(key, hash_key(key)); + } + + template + size_type count(const K& key, std::size_t hash) const { + if (find(key, hash) != cend()) { + return 1; + } else { + return 0; + } + } + + template + iterator find(const K& key) { + return find_impl(key, hash_key(key)); + } + + template + iterator find(const K& key, std::size_t hash) { + return find_impl(key, hash); + } + + template + const_iterator find(const K& key) const { + return find_impl(key, hash_key(key)); + } + + template + const_iterator find(const K& key, std::size_t hash) const { + return find_impl(key, hash); + } + + template + bool contains(const K& key) const { + return contains(key, hash_key(key)); + } + + template + bool contains(const K& key, std::size_t hash) const { + return count(key, hash) != 0; + } + + template + std::pair equal_range(const K& key) { + return equal_range(key, hash_key(key)); + } + + template + std::pair equal_range(const K& key, std::size_t hash) { + iterator it = find(key, hash); + return std::make_pair(it, (it == end()) ? it : std::next(it)); + } + + template + std::pair equal_range(const K& key) const { + return equal_range(key, hash_key(key)); + } + + template + std::pair equal_range( + const K& key, std::size_t hash) const { + const_iterator it = find(key, hash); + return std::make_pair(it, (it == cend()) ? it : std::next(it)); + } + + /* + * Bucket interface + */ + size_type bucket_count() const { return m_bucket_count; } + + size_type max_bucket_count() const { + return std::min(GrowthPolicy::max_bucket_count(), + m_buckets_data.max_size()); + } + + /* + * Hash policy + */ + float load_factor() const { + if (bucket_count() == 0) { + return 0; + } + + return float(m_nb_elements) / float(bucket_count()); + } + + float min_load_factor() const { return m_min_load_factor; } + + float max_load_factor() const { return m_max_load_factor; } + + void min_load_factor(float ml) { + m_min_load_factor = std::clamp(ml, float(MINIMUM_MIN_LOAD_FACTOR), + float(MAXIMUM_MIN_LOAD_FACTOR)); + } + + void max_load_factor(float ml) { + m_max_load_factor = std::clamp(ml, float(MINIMUM_MAX_LOAD_FACTOR), + float(MAXIMUM_MAX_LOAD_FACTOR)); + m_load_threshold = size_type(float(bucket_count()) * m_max_load_factor); + tsl_rh_assert(bucket_count() == 0 || m_load_threshold < bucket_count()); + } + + void rehash(size_type count_) { + count_ = std::max(count_, + size_type(std::ceil(float(size()) / max_load_factor()))); + rehash_impl(count_); + } + + void reserve(size_type count_) { + rehash(size_type(std::ceil(float(count_) / max_load_factor()))); + } + + /* + * Observers + */ + hasher hash_function() const { return static_cast(*this); } + + key_equal key_eq() const { return static_cast(*this); } + + /* + * Other + */ + iterator mutable_iterator(const_iterator pos) { + return iterator(const_cast(pos.m_bucket)); + } + + template + void serialize(Serializer& serializer) const { + serialize_impl(serializer); + } + + template + void deserialize(Deserializer& deserializer, bool hash_compatible) { + deserialize_impl(deserializer, hash_compatible); + } + + private: + template + std::size_t hash_key(const K& key) const { + return Hash::operator()(key); + } + + template + bool compare_keys(const K1& key1, const K2& key2) const { + return KeyEqual::operator()(key1, key2); + } + + std::size_t bucket_for_hash(std::size_t hash) const { + const std::size_t bucket = GrowthPolicy::bucket_for_hash(hash); + tsl_rh_assert(bucket < m_bucket_count || + (bucket == 0 && m_bucket_count == 0)); + + return bucket; + } + + template ::value>::type* = + nullptr> + std::size_t next_bucket(std::size_t index) const noexcept { + tsl_rh_assert(index < bucket_count()); + + return (index + 1) & this->m_mask; + } + + template ::value>::type* = + nullptr> + std::size_t next_bucket(std::size_t index) const noexcept { + tsl_rh_assert(index < bucket_count()); + + index++; + return (index != bucket_count()) ? index : 0; + } + + template + iterator find_impl(const K& key, std::size_t hash) { + return mutable_iterator( + static_cast(this)->find(key, hash)); + } + + template + const_iterator find_impl(const K& key, std::size_t hash) const { + std::size_t ibucket = bucket_for_hash(hash); + distance_type dist_from_ideal_bucket = 0; + + while (dist_from_ideal_bucket <= + m_buckets[ibucket].dist_from_ideal_bucket()) { + if (TSL_RH_LIKELY( + (!USE_STORED_HASH_ON_LOOKUP || + m_buckets[ibucket].bucket_hash_equal(hash)) && + compare_keys(KeySelect()(m_buckets[ibucket].value()), key))) { + return const_iterator(m_buckets + ibucket); + } + + ibucket = next_bucket(ibucket); + dist_from_ideal_bucket++; + } + + return cend(); + } + + void erase_from_bucket(iterator pos) { + pos.m_bucket->clear(); + m_nb_elements--; + + /** + * Backward shift, swap the empty bucket, previous_ibucket, with the values + * on its right, ibucket, until we cross another empty bucket or if the + * other bucket has a distance_from_ideal_bucket == 0. + * + * We try to move the values closer to their ideal bucket. + */ + std::size_t previous_ibucket = + static_cast(pos.m_bucket - m_buckets); + std::size_t ibucket = next_bucket(previous_ibucket); + + while (m_buckets[ibucket].dist_from_ideal_bucket() > 0) { + tsl_rh_assert(m_buckets[previous_ibucket].empty()); + + const distance_type new_distance = + distance_type(m_buckets[ibucket].dist_from_ideal_bucket() - 1); + m_buckets[previous_ibucket].set_value_of_empty_bucket( + new_distance, m_buckets[ibucket].truncated_hash(), + std::move(m_buckets[ibucket].value())); + m_buckets[ibucket].clear(); + + previous_ibucket = ibucket; + ibucket = next_bucket(ibucket); + } + m_try_shrink_on_next_insert = true; + } + + template + std::pair insert_impl(const K& key, + Args&&... value_type_args) { + const std::size_t hash = hash_key(key); + + std::size_t ibucket = bucket_for_hash(hash); + distance_type dist_from_ideal_bucket = 0; + + while (dist_from_ideal_bucket <= + m_buckets[ibucket].dist_from_ideal_bucket()) { + if ((!USE_STORED_HASH_ON_LOOKUP || + m_buckets[ibucket].bucket_hash_equal(hash)) && + compare_keys(KeySelect()(m_buckets[ibucket].value()), key)) { + return std::make_pair(iterator(m_buckets + ibucket), false); + } + + ibucket = next_bucket(ibucket); + dist_from_ideal_bucket++; + } + + while (rehash_on_extreme_load(dist_from_ideal_bucket)) { + ibucket = bucket_for_hash(hash); + dist_from_ideal_bucket = 0; + + while (dist_from_ideal_bucket <= + m_buckets[ibucket].dist_from_ideal_bucket()) { + ibucket = next_bucket(ibucket); + dist_from_ideal_bucket++; + } + } + + if (m_buckets[ibucket].empty()) { + m_buckets[ibucket].set_value_of_empty_bucket( + dist_from_ideal_bucket, bucket_entry::truncate_hash(hash), + std::forward(value_type_args)...); + } else { + insert_value(ibucket, dist_from_ideal_bucket, + bucket_entry::truncate_hash(hash), + std::forward(value_type_args)...); + } + + m_nb_elements++; + /* + * The value will be inserted in ibucket in any case, either because it was + * empty or by stealing the bucket (robin hood). + */ + return std::make_pair(iterator(m_buckets + ibucket), true); + } + + template + void insert_value(std::size_t ibucket, distance_type dist_from_ideal_bucket, + truncated_hash_type hash, Args&&... value_type_args) { + value_type value(std::forward(value_type_args)...); + insert_value_impl(ibucket, dist_from_ideal_bucket, hash, value); + } + + void insert_value(std::size_t ibucket, distance_type dist_from_ideal_bucket, + truncated_hash_type hash, value_type&& value) { + insert_value_impl(ibucket, dist_from_ideal_bucket, hash, value); + } + + /* + * We don't use `value_type&& value` as last argument due to a bug in MSVC + * when `value_type` is a pointer, The compiler is not able to see the + * difference between `std::string*` and `std::string*&&` resulting in a + * compilation error. + * + * The `value` will be in a moved state at the end of the function. + */ + void insert_value_impl(std::size_t ibucket, + distance_type dist_from_ideal_bucket, + truncated_hash_type hash, value_type& value) { + tsl_rh_assert(dist_from_ideal_bucket > + m_buckets[ibucket].dist_from_ideal_bucket()); + m_buckets[ibucket].swap_with_value_in_bucket(dist_from_ideal_bucket, hash, + value); + ibucket = next_bucket(ibucket); + dist_from_ideal_bucket++; + + while (!m_buckets[ibucket].empty()) { + if (dist_from_ideal_bucket > + m_buckets[ibucket].dist_from_ideal_bucket()) { + if (dist_from_ideal_bucket > + bucket_entry::DIST_FROM_IDEAL_BUCKET_LIMIT) { + /** + * The number of probes is really high, rehash the map on the next + * insert. Difficult to do now as rehash may throw an exception. + */ + m_grow_on_next_insert = true; + } + + m_buckets[ibucket].swap_with_value_in_bucket(dist_from_ideal_bucket, + hash, value); + } + + ibucket = next_bucket(ibucket); + dist_from_ideal_bucket++; + } + + m_buckets[ibucket].set_value_of_empty_bucket(dist_from_ideal_bucket, hash, + std::move(value)); + } + + void rehash_impl(size_type count_) { + robin_hash new_table(count_, static_cast(*this), + static_cast(*this), get_allocator(), + m_min_load_factor, m_max_load_factor); + tsl_rh_assert(size() <= new_table.m_load_threshold); + + const bool use_stored_hash = + USE_STORED_HASH_ON_REHASH(new_table.bucket_count()); + for (auto& bucket : m_buckets_data) { + if (bucket.empty()) { + continue; + } + + const std::size_t hash = + use_stored_hash ? bucket.truncated_hash() + : new_table.hash_key(KeySelect()(bucket.value())); + + new_table.insert_value_on_rehash(new_table.bucket_for_hash(hash), 0, + bucket_entry::truncate_hash(hash), + std::move(bucket.value())); + } + + new_table.m_nb_elements = m_nb_elements; + new_table.swap(*this); + } + + void clear_and_shrink() noexcept { + GrowthPolicy::clear(); + m_buckets_data.clear(); + m_buckets = static_empty_bucket_ptr(); + m_bucket_count = 0; + m_nb_elements = 0; + m_load_threshold = 0; + m_grow_on_next_insert = false; + m_try_shrink_on_next_insert = false; + } + + void insert_value_on_rehash(std::size_t ibucket, + distance_type dist_from_ideal_bucket, + truncated_hash_type hash, value_type&& value) { + while (true) { + if (dist_from_ideal_bucket > + m_buckets[ibucket].dist_from_ideal_bucket()) { + if (m_buckets[ibucket].empty()) { + m_buckets[ibucket].set_value_of_empty_bucket(dist_from_ideal_bucket, + hash, std::move(value)); + return; + } else { + m_buckets[ibucket].swap_with_value_in_bucket(dist_from_ideal_bucket, + hash, value); + } + } + + dist_from_ideal_bucket++; + ibucket = next_bucket(ibucket); + } + } + + /** + * Grow the table if m_grow_on_next_insert is true or we reached the + * max_load_factor. Shrink the table if m_try_shrink_on_next_insert is true + * (an erase occurred) and we're below the min_load_factor. + * + * Return true if the table has been rehashed. + */ + bool rehash_on_extreme_load(distance_type curr_dist_from_ideal_bucket) { + if (m_grow_on_next_insert || + curr_dist_from_ideal_bucket > + bucket_entry::DIST_FROM_IDEAL_BUCKET_LIMIT || + size() >= m_load_threshold) { + rehash_impl(GrowthPolicy::next_bucket_count()); + m_grow_on_next_insert = false; + + return true; + } + + if (m_try_shrink_on_next_insert) { + m_try_shrink_on_next_insert = false; + if (m_min_load_factor != 0.0f && load_factor() < m_min_load_factor) { + reserve(size() + 1); + + return true; + } + } + + return false; + } + + template + void serialize_impl(Serializer& serializer) const { + const slz_size_type version = SERIALIZATION_PROTOCOL_VERSION; + serializer(version); + + // Indicate if the truncated hash of each bucket is stored. Use a + // std::int16_t instead of a bool to avoid the need for the serializer to + // support an extra 'bool' type. + const std::int16_t hash_stored_for_bucket = + static_cast(STORE_HASH); + serializer(hash_stored_for_bucket); + + const slz_size_type nb_elements = m_nb_elements; + serializer(nb_elements); + + const slz_size_type bucket_count = m_buckets_data.size(); + serializer(bucket_count); + + const float min_load_factor = m_min_load_factor; + serializer(min_load_factor); + + const float max_load_factor = m_max_load_factor; + serializer(max_load_factor); + + for (const bucket_entry& bucket : m_buckets_data) { + if (bucket.empty()) { + const std::int16_t empty_bucket = + bucket_entry::EMPTY_MARKER_DIST_FROM_IDEAL_BUCKET; + serializer(empty_bucket); + } else { + const std::int16_t dist_from_ideal_bucket = + bucket.dist_from_ideal_bucket(); + serializer(dist_from_ideal_bucket); + if (STORE_HASH) { + const std::uint32_t truncated_hash = bucket.truncated_hash(); + serializer(truncated_hash); + } + serializer(bucket.value()); + } + } + } + + template + void deserialize_impl(Deserializer& deserializer, bool hash_compatible) { + tsl_rh_assert(m_buckets_data.empty()); // Current hash table must be empty + + const slz_size_type version = + deserialize_value(deserializer); + // For now we only have one version of the serialization protocol. + // If it doesn't match there is a problem with the file. + if (version != SERIALIZATION_PROTOCOL_VERSION) { + TSL_RH_THROW_OR_TERMINATE(std::runtime_error, + "Can't deserialize the ordered_map/set. " + "The protocol version header is invalid."); + } + + const bool hash_stored_for_bucket = + deserialize_value(deserializer) ? true : false; + if (hash_compatible && STORE_HASH != hash_stored_for_bucket) { + TSL_RH_THROW_OR_TERMINATE( + std::runtime_error, + "Can't deserialize a map with a different StoreHash " + "than the one used during the serialization when " + "hash compatibility is used"); + } + + const slz_size_type nb_elements = + deserialize_value(deserializer); + const slz_size_type bucket_count_ds = + deserialize_value(deserializer); + const float min_load_factor = deserialize_value(deserializer); + const float max_load_factor = deserialize_value(deserializer); + + if (min_load_factor < MINIMUM_MIN_LOAD_FACTOR || + min_load_factor > MAXIMUM_MIN_LOAD_FACTOR) { + TSL_RH_THROW_OR_TERMINATE( + std::runtime_error, + "Invalid min_load_factor. Check that the serializer " + "and deserializer support floats correctly as they " + "can be converted implicitly to ints."); + } + + if (max_load_factor < MINIMUM_MAX_LOAD_FACTOR || + max_load_factor > MAXIMUM_MAX_LOAD_FACTOR) { + TSL_RH_THROW_OR_TERMINATE( + std::runtime_error, + "Invalid max_load_factor. Check that the serializer " + "and deserializer support floats correctly as they " + "can be converted implicitly to ints."); + } + + this->min_load_factor(min_load_factor); + this->max_load_factor(max_load_factor); + + if (bucket_count_ds == 0) { + tsl_rh_assert(nb_elements == 0); + return; + } + + if (!hash_compatible) { + reserve(numeric_cast(nb_elements, + "Deserialized nb_elements is too big.")); + for (slz_size_type ibucket = 0; ibucket < bucket_count_ds; ibucket++) { + const distance_type dist_from_ideal_bucket = + deserialize_value(deserializer); + if (dist_from_ideal_bucket != + bucket_entry::EMPTY_MARKER_DIST_FROM_IDEAL_BUCKET) { + if (hash_stored_for_bucket) { + TSL_RH_UNUSED(deserialize_value(deserializer)); + } + + insert(deserialize_value(deserializer)); + } + } + + tsl_rh_assert(nb_elements == size()); + } else { + m_bucket_count = numeric_cast( + bucket_count_ds, "Deserialized bucket_count is too big."); + + GrowthPolicy::operator=(GrowthPolicy(m_bucket_count)); + // GrowthPolicy should not modify the bucket count we got from + // deserialization + if (m_bucket_count != bucket_count_ds) { + TSL_RH_THROW_OR_TERMINATE(std::runtime_error, + "The GrowthPolicy is not the same even " + "though hash_compatible is true."); + } + + m_nb_elements = numeric_cast( + nb_elements, "Deserialized nb_elements is too big."); + m_buckets_data.resize(m_bucket_count); + m_buckets = m_buckets_data.data(); + + for (bucket_entry& bucket : m_buckets_data) { + const distance_type dist_from_ideal_bucket = + deserialize_value(deserializer); + if (dist_from_ideal_bucket != + bucket_entry::EMPTY_MARKER_DIST_FROM_IDEAL_BUCKET) { + truncated_hash_type truncated_hash = 0; + if (hash_stored_for_bucket) { + tsl_rh_assert(hash_stored_for_bucket); + truncated_hash = deserialize_value(deserializer); + } + + bucket.set_value_of_empty_bucket( + dist_from_ideal_bucket, truncated_hash, + deserialize_value(deserializer)); + } + } + + if (!m_buckets_data.empty()) { + m_buckets_data.back().set_as_last_bucket(); + } + } + } + + public: + static const size_type DEFAULT_INIT_BUCKETS_SIZE = 0; + + static constexpr float DEFAULT_MAX_LOAD_FACTOR = 0.5f; + static constexpr float MINIMUM_MAX_LOAD_FACTOR = 0.2f; + static constexpr float MAXIMUM_MAX_LOAD_FACTOR = 0.95f; + + static constexpr float DEFAULT_MIN_LOAD_FACTOR = 0.0f; + static constexpr float MINIMUM_MIN_LOAD_FACTOR = 0.0f; + static constexpr float MAXIMUM_MIN_LOAD_FACTOR = 0.15f; + + static_assert(MINIMUM_MAX_LOAD_FACTOR < MAXIMUM_MAX_LOAD_FACTOR, + "MINIMUM_MAX_LOAD_FACTOR should be < MAXIMUM_MAX_LOAD_FACTOR"); + static_assert(MINIMUM_MIN_LOAD_FACTOR < MAXIMUM_MIN_LOAD_FACTOR, + "MINIMUM_MIN_LOAD_FACTOR should be < MAXIMUM_MIN_LOAD_FACTOR"); + static_assert(MAXIMUM_MIN_LOAD_FACTOR < MINIMUM_MAX_LOAD_FACTOR, + "MAXIMUM_MIN_LOAD_FACTOR should be < MINIMUM_MAX_LOAD_FACTOR"); + + private: + /** + * Protocol version currenlty used for serialization. + */ + static const slz_size_type SERIALIZATION_PROTOCOL_VERSION = 1; + + /** + * Return an always valid pointer to an static empty bucket_entry with + * last_bucket() == true. + */ + bucket_entry* static_empty_bucket_ptr() noexcept { + static bucket_entry empty_bucket(true); + tsl_rh_assert(empty_bucket.empty()); + return &empty_bucket; + } + + private: + buckets_container_type m_buckets_data; + + /** + * Points to m_buckets_data.data() if !m_buckets_data.empty() otherwise points + * to static_empty_bucket_ptr. This variable is useful to avoid the cost of + * checking if m_buckets_data is empty when trying to find an element. + * + * TODO Remove m_buckets_data and only use a pointer instead of a + * pointer+vector to save some space in the robin_hash object. Manage the + * Allocator manually. + */ + bucket_entry* m_buckets; + + /** + * Used a lot in find, avoid the call to m_buckets_data.size() which is a bit + * slower. + */ + size_type m_bucket_count; + + size_type m_nb_elements; + + size_type m_load_threshold; + + float m_min_load_factor; + float m_max_load_factor; + + bool m_grow_on_next_insert; + + /** + * We can't shrink down the map on erase operations as the erase methods need + * to return the next iterator. Shrinking the map would invalidate all the + * iterators and we could not return the next iterator in a meaningful way, On + * erase, we thus just indicate on erase that we should try to shrink the hash + * table on the next insert if we go below the min_load_factor. + */ + bool m_try_shrink_on_next_insert; +}; + +} // namespace detail_robin_hash + +} // namespace tsl + +#endif diff --git a/RemoteInput/Thirdparty/nanobind/ext/robin_map/include/tsl/robin_map.h b/RemoteInput/Thirdparty/nanobind/ext/robin_map/include/tsl/robin_map.h new file mode 100644 index 0000000..b594810 --- /dev/null +++ b/RemoteInput/Thirdparty/nanobind/ext/robin_map/include/tsl/robin_map.h @@ -0,0 +1,815 @@ +/** + * MIT License + * + * Copyright (c) 2017 Thibaut Goetghebuer-Planchon + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef TSL_ROBIN_MAP_H +#define TSL_ROBIN_MAP_H + +#include +#include +#include +#include +#include +#include + +#include "robin_hash.h" + +namespace tsl { + +/** + * Implementation of a hash map using open-addressing and the robin hood hashing + * algorithm with backward shift deletion. + * + * For operations modifying the hash map (insert, erase, rehash, ...), the + * strong exception guarantee is only guaranteed when the expression + * `std::is_nothrow_swappable>::value && + * std::is_nothrow_move_constructible>::value` is true, + * otherwise if an exception is thrown during the swap or the move, the hash map + * may end up in a undefined state. Per the standard a `Key` or `T` with a + * noexcept copy constructor and no move constructor also satisfies the + * `std::is_nothrow_move_constructible>::value` criterion (and + * will thus guarantee the strong exception for the map). + * + * When `StoreHash` is true, 32 bits of the hash are stored alongside the + * values. It can improve the performance during lookups if the `KeyEqual` + * function takes time (if it engenders a cache-miss for example) as we then + * compare the stored hashes before comparing the keys. When + * `tsl::rh::power_of_two_growth_policy` is used as `GrowthPolicy`, it may also + * speed-up the rehash process as we can avoid to recalculate the hash. When it + * is detected that storing the hash will not incur any memory penalty due to + * alignment (i.e. `sizeof(tsl::detail_robin_hash::bucket_entry) == sizeof(tsl::detail_robin_hash::bucket_entry)`) + * and `tsl::rh::power_of_two_growth_policy` is used, the hash will be stored + * even if `StoreHash` is false so that we can speed-up the rehash (but it will + * not be used on lookups unless `StoreHash` is true). + * + * `GrowthPolicy` defines how the map grows and consequently how a hash value is + * mapped to a bucket. By default the map uses + * `tsl::rh::power_of_two_growth_policy`. This policy keeps the number of + * buckets to a power of two and uses a mask to map the hash to a bucket instead + * of the slow modulo. Other growth policies are available and you may define + * your own growth policy, check `tsl::rh::power_of_two_growth_policy` for the + * interface. + * + * `std::pair` must be swappable. + * + * `Key` and `T` must be copy and/or move constructible. + * + * If the destructor of `Key` or `T` throws an exception, the behaviour of the + * class is undefined. + * + * Iterators invalidation: + * - clear, operator=, reserve, rehash: always invalidate the iterators. + * - insert, emplace, emplace_hint, operator[]: if there is an effective + * insert, invalidate the iterators. + * - erase: always invalidate the iterators. + */ +template , + class KeyEqual = std::equal_to, + class Allocator = std::allocator>, + bool StoreHash = false, + class GrowthPolicy = tsl::rh::power_of_two_growth_policy<2>> +class robin_map { + private: + template + using has_is_transparent = tsl::detail_robin_hash::has_is_transparent; + + class KeySelect { + public: + using key_type = Key; + + const key_type& operator()( + const std::pair& key_value) const noexcept { + return key_value.first; + } + + key_type& operator()(std::pair& key_value) noexcept { + return key_value.first; + } + }; + + class ValueSelect { + public: + using value_type = T; + + const value_type& operator()( + const std::pair& key_value) const noexcept { + return key_value.second; + } + + value_type& operator()(std::pair& key_value) noexcept { + return key_value.second; + } + }; + + using ht = detail_robin_hash::robin_hash, KeySelect, + ValueSelect, Hash, KeyEqual, + Allocator, StoreHash, GrowthPolicy>; + + public: + using key_type = typename ht::key_type; + using mapped_type = T; + using value_type = typename ht::value_type; + using size_type = typename ht::size_type; + using difference_type = typename ht::difference_type; + using hasher = typename ht::hasher; + using key_equal = typename ht::key_equal; + using allocator_type = typename ht::allocator_type; + using reference = typename ht::reference; + using const_reference = typename ht::const_reference; + using pointer = typename ht::pointer; + using const_pointer = typename ht::const_pointer; + using iterator = typename ht::iterator; + using const_iterator = typename ht::const_iterator; + + public: + /* + * Constructors + */ + robin_map() : robin_map(ht::DEFAULT_INIT_BUCKETS_SIZE) {} + + explicit robin_map(size_type bucket_count, const Hash& hash = Hash(), + const KeyEqual& equal = KeyEqual(), + const Allocator& alloc = Allocator()) + : m_ht(bucket_count, hash, equal, alloc) {} + + robin_map(size_type bucket_count, const Allocator& alloc) + : robin_map(bucket_count, Hash(), KeyEqual(), alloc) {} + + robin_map(size_type bucket_count, const Hash& hash, const Allocator& alloc) + : robin_map(bucket_count, hash, KeyEqual(), alloc) {} + + explicit robin_map(const Allocator& alloc) + : robin_map(ht::DEFAULT_INIT_BUCKETS_SIZE, alloc) {} + + template + robin_map(InputIt first, InputIt last, + size_type bucket_count = ht::DEFAULT_INIT_BUCKETS_SIZE, + const Hash& hash = Hash(), const KeyEqual& equal = KeyEqual(), + const Allocator& alloc = Allocator()) + : robin_map(bucket_count, hash, equal, alloc) { + insert(first, last); + } + + template + robin_map(InputIt first, InputIt last, size_type bucket_count, + const Allocator& alloc) + : robin_map(first, last, bucket_count, Hash(), KeyEqual(), alloc) {} + + template + robin_map(InputIt first, InputIt last, size_type bucket_count, + const Hash& hash, const Allocator& alloc) + : robin_map(first, last, bucket_count, hash, KeyEqual(), alloc) {} + + robin_map(std::initializer_list init, + size_type bucket_count = ht::DEFAULT_INIT_BUCKETS_SIZE, + const Hash& hash = Hash(), const KeyEqual& equal = KeyEqual(), + const Allocator& alloc = Allocator()) + : robin_map(init.begin(), init.end(), bucket_count, hash, equal, alloc) {} + + robin_map(std::initializer_list init, size_type bucket_count, + const Allocator& alloc) + : robin_map(init.begin(), init.end(), bucket_count, Hash(), KeyEqual(), + alloc) {} + + robin_map(std::initializer_list init, size_type bucket_count, + const Hash& hash, const Allocator& alloc) + : robin_map(init.begin(), init.end(), bucket_count, hash, KeyEqual(), + alloc) {} + + robin_map& operator=(std::initializer_list ilist) { + m_ht.clear(); + + m_ht.reserve(ilist.size()); + m_ht.insert(ilist.begin(), ilist.end()); + + return *this; + } + + allocator_type get_allocator() const { return m_ht.get_allocator(); } + + /* + * Iterators + */ + iterator begin() noexcept { return m_ht.begin(); } + const_iterator begin() const noexcept { return m_ht.begin(); } + const_iterator cbegin() const noexcept { return m_ht.cbegin(); } + + iterator end() noexcept { return m_ht.end(); } + const_iterator end() const noexcept { return m_ht.end(); } + const_iterator cend() const noexcept { return m_ht.cend(); } + + /* + * Capacity + */ + bool empty() const noexcept { return m_ht.empty(); } + size_type size() const noexcept { return m_ht.size(); } + size_type max_size() const noexcept { return m_ht.max_size(); } + + /* + * Modifiers + */ + void clear() noexcept { m_ht.clear(); } + + std::pair insert(const value_type& value) { + return m_ht.insert(value); + } + + template ::value>::type* = nullptr> + std::pair insert(P&& value) { + return m_ht.emplace(std::forward

(value)); + } + + std::pair insert(value_type&& value) { + return m_ht.insert(std::move(value)); + } + + iterator insert(const_iterator hint, const value_type& value) { + return m_ht.insert_hint(hint, value); + } + + template ::value>::type* = nullptr> + iterator insert(const_iterator hint, P&& value) { + return m_ht.emplace_hint(hint, std::forward

(value)); + } + + iterator insert(const_iterator hint, value_type&& value) { + return m_ht.insert_hint(hint, std::move(value)); + } + + template + void insert(InputIt first, InputIt last) { + m_ht.insert(first, last); + } + + void insert(std::initializer_list ilist) { + m_ht.insert(ilist.begin(), ilist.end()); + } + + template + std::pair insert_or_assign(const key_type& k, M&& obj) { + return m_ht.insert_or_assign(k, std::forward(obj)); + } + + template + std::pair insert_or_assign(key_type&& k, M&& obj) { + return m_ht.insert_or_assign(std::move(k), std::forward(obj)); + } + + template + iterator insert_or_assign(const_iterator hint, const key_type& k, M&& obj) { + return m_ht.insert_or_assign(hint, k, std::forward(obj)); + } + + template + iterator insert_or_assign(const_iterator hint, key_type&& k, M&& obj) { + return m_ht.insert_or_assign(hint, std::move(k), std::forward(obj)); + } + + /** + * Due to the way elements are stored, emplace will need to move or copy the + * key-value once. The method is equivalent to + * insert(value_type(std::forward(args)...)); + * + * Mainly here for compatibility with the std::unordered_map interface. + */ + template + std::pair emplace(Args&&... args) { + return m_ht.emplace(std::forward(args)...); + } + + /** + * Due to the way elements are stored, emplace_hint will need to move or copy + * the key-value once. The method is equivalent to insert(hint, + * value_type(std::forward(args)...)); + * + * Mainly here for compatibility with the std::unordered_map interface. + */ + template + iterator emplace_hint(const_iterator hint, Args&&... args) { + return m_ht.emplace_hint(hint, std::forward(args)...); + } + + template + std::pair try_emplace(const key_type& k, Args&&... args) { + return m_ht.try_emplace(k, std::forward(args)...); + } + + template + std::pair try_emplace(key_type&& k, Args&&... args) { + return m_ht.try_emplace(std::move(k), std::forward(args)...); + } + + template + iterator try_emplace(const_iterator hint, const key_type& k, Args&&... args) { + return m_ht.try_emplace_hint(hint, k, std::forward(args)...); + } + + template + iterator try_emplace(const_iterator hint, key_type&& k, Args&&... args) { + return m_ht.try_emplace_hint(hint, std::move(k), + std::forward(args)...); + } + + iterator erase(iterator pos) { return m_ht.erase(pos); } + iterator erase(const_iterator pos) { return m_ht.erase(pos); } + iterator erase(const_iterator first, const_iterator last) { + return m_ht.erase(first, last); + } + size_type erase(const key_type& key) { return m_ht.erase(key); } + + /** + * Erase the element at position 'pos'. In contrast to the regular erase() + * function, erase_fast() does not return an iterator. This allows it to be + * faster especially in hash tables with a low load factor, where finding the + * next nonempty bucket would be costly. + */ + void erase_fast(iterator pos) { return m_ht.erase_fast(pos); } + + /** + * Use the hash value 'precalculated_hash' instead of hashing the key. The + * hash value should be the same as hash_function()(key). Useful to speed-up + * the lookup to the value if you already have the hash. + */ + size_type erase(const key_type& key, std::size_t precalculated_hash) { + return m_ht.erase(key, precalculated_hash); + } + + /** + * This overload only participates in the overload resolution if the typedef + * KeyEqual::is_transparent exists. If so, K must be hashable and comparable + * to Key. + */ + template < + class K, class KE = KeyEqual, + typename std::enable_if::value>::type* = nullptr> + size_type erase(const K& key) { + return m_ht.erase(key); + } + + /** + * @copydoc erase(const K& key) + * + * Use the hash value 'precalculated_hash' instead of hashing the key. The + * hash value should be the same as hash_function()(key). Useful to speed-up + * the lookup to the value if you already have the hash. + */ + template < + class K, class KE = KeyEqual, + typename std::enable_if::value>::type* = nullptr> + size_type erase(const K& key, std::size_t precalculated_hash) { + return m_ht.erase(key, precalculated_hash); + } + + void swap(robin_map& other) { other.m_ht.swap(m_ht); } + + /* + * Lookup + */ + T& at(const Key& key) { return m_ht.at(key); } + + /** + * Use the hash value 'precalculated_hash' instead of hashing the key. The + * hash value should be the same as hash_function()(key). Useful to speed-up + * the lookup if you already have the hash. + */ + T& at(const Key& key, std::size_t precalculated_hash) { + return m_ht.at(key, precalculated_hash); + } + + const T& at(const Key& key) const { return m_ht.at(key); } + + /** + * @copydoc at(const Key& key, std::size_t precalculated_hash) + */ + const T& at(const Key& key, std::size_t precalculated_hash) const { + return m_ht.at(key, precalculated_hash); + } + + /** + * This overload only participates in the overload resolution if the typedef + * KeyEqual::is_transparent exists. If so, K must be hashable and comparable + * to Key. + */ + template < + class K, class KE = KeyEqual, + typename std::enable_if::value>::type* = nullptr> + T& at(const K& key) { + return m_ht.at(key); + } + + /** + * @copydoc at(const K& key) + * + * Use the hash value 'precalculated_hash' instead of hashing the key. The + * hash value should be the same as hash_function()(key). Useful to speed-up + * the lookup if you already have the hash. + */ + template < + class K, class KE = KeyEqual, + typename std::enable_if::value>::type* = nullptr> + T& at(const K& key, std::size_t precalculated_hash) { + return m_ht.at(key, precalculated_hash); + } + + /** + * @copydoc at(const K& key) + */ + template < + class K, class KE = KeyEqual, + typename std::enable_if::value>::type* = nullptr> + const T& at(const K& key) const { + return m_ht.at(key); + } + + /** + * @copydoc at(const K& key, std::size_t precalculated_hash) + */ + template < + class K, class KE = KeyEqual, + typename std::enable_if::value>::type* = nullptr> + const T& at(const K& key, std::size_t precalculated_hash) const { + return m_ht.at(key, precalculated_hash); + } + + T& operator[](const Key& key) { return m_ht[key]; } + T& operator[](Key&& key) { return m_ht[std::move(key)]; } + + size_type count(const Key& key) const { return m_ht.count(key); } + + /** + * Use the hash value 'precalculated_hash' instead of hashing the key. The + * hash value should be the same as hash_function()(key). Useful to speed-up + * the lookup if you already have the hash. + */ + size_type count(const Key& key, std::size_t precalculated_hash) const { + return m_ht.count(key, precalculated_hash); + } + + /** + * This overload only participates in the overload resolution if the typedef + * KeyEqual::is_transparent exists. If so, K must be hashable and comparable + * to Key. + */ + template < + class K, class KE = KeyEqual, + typename std::enable_if::value>::type* = nullptr> + size_type count(const K& key) const { + return m_ht.count(key); + } + + /** + * @copydoc count(const K& key) const + * + * Use the hash value 'precalculated_hash' instead of hashing the key. The + * hash value should be the same as hash_function()(key). Useful to speed-up + * the lookup if you already have the hash. + */ + template < + class K, class KE = KeyEqual, + typename std::enable_if::value>::type* = nullptr> + size_type count(const K& key, std::size_t precalculated_hash) const { + return m_ht.count(key, precalculated_hash); + } + + iterator find(const Key& key) { return m_ht.find(key); } + + /** + * Use the hash value 'precalculated_hash' instead of hashing the key. The + * hash value should be the same as hash_function()(key). Useful to speed-up + * the lookup if you already have the hash. + */ + iterator find(const Key& key, std::size_t precalculated_hash) { + return m_ht.find(key, precalculated_hash); + } + + const_iterator find(const Key& key) const { return m_ht.find(key); } + + /** + * @copydoc find(const Key& key, std::size_t precalculated_hash) + */ + const_iterator find(const Key& key, std::size_t precalculated_hash) const { + return m_ht.find(key, precalculated_hash); + } + + /** + * This overload only participates in the overload resolution if the typedef + * KeyEqual::is_transparent exists. If so, K must be hashable and comparable + * to Key. + */ + template < + class K, class KE = KeyEqual, + typename std::enable_if::value>::type* = nullptr> + iterator find(const K& key) { + return m_ht.find(key); + } + + /** + * @copydoc find(const K& key) + * + * Use the hash value 'precalculated_hash' instead of hashing the key. The + * hash value should be the same as hash_function()(key). Useful to speed-up + * the lookup if you already have the hash. + */ + template < + class K, class KE = KeyEqual, + typename std::enable_if::value>::type* = nullptr> + iterator find(const K& key, std::size_t precalculated_hash) { + return m_ht.find(key, precalculated_hash); + } + + /** + * @copydoc find(const K& key) + */ + template < + class K, class KE = KeyEqual, + typename std::enable_if::value>::type* = nullptr> + const_iterator find(const K& key) const { + return m_ht.find(key); + } + + /** + * @copydoc find(const K& key) + * + * Use the hash value 'precalculated_hash' instead of hashing the key. The + * hash value should be the same as hash_function()(key). Useful to speed-up + * the lookup if you already have the hash. + */ + template < + class K, class KE = KeyEqual, + typename std::enable_if::value>::type* = nullptr> + const_iterator find(const K& key, std::size_t precalculated_hash) const { + return m_ht.find(key, precalculated_hash); + } + + bool contains(const Key& key) const { return m_ht.contains(key); } + + /** + * Use the hash value 'precalculated_hash' instead of hashing the key. The + * hash value should be the same as hash_function()(key). Useful to speed-up + * the lookup if you already have the hash. + */ + bool contains(const Key& key, std::size_t precalculated_hash) const { + return m_ht.contains(key, precalculated_hash); + } + + /** + * This overload only participates in the overload resolution if the typedef + * KeyEqual::is_transparent exists. If so, K must be hashable and comparable + * to Key. + */ + template < + class K, class KE = KeyEqual, + typename std::enable_if::value>::type* = nullptr> + bool contains(const K& key) const { + return m_ht.contains(key); + } + + /** + * @copydoc contains(const K& key) const + * + * Use the hash value 'precalculated_hash' instead of hashing the key. The + * hash value should be the same as hash_function()(key). Useful to speed-up + * the lookup if you already have the hash. + */ + template < + class K, class KE = KeyEqual, + typename std::enable_if::value>::type* = nullptr> + bool contains(const K& key, std::size_t precalculated_hash) const { + return m_ht.contains(key, precalculated_hash); + } + + std::pair equal_range(const Key& key) { + return m_ht.equal_range(key); + } + + /** + * Use the hash value 'precalculated_hash' instead of hashing the key. The + * hash value should be the same as hash_function()(key). Useful to speed-up + * the lookup if you already have the hash. + */ + std::pair equal_range(const Key& key, + std::size_t precalculated_hash) { + return m_ht.equal_range(key, precalculated_hash); + } + + std::pair equal_range(const Key& key) const { + return m_ht.equal_range(key); + } + + /** + * @copydoc equal_range(const Key& key, std::size_t precalculated_hash) + */ + std::pair equal_range( + const Key& key, std::size_t precalculated_hash) const { + return m_ht.equal_range(key, precalculated_hash); + } + + /** + * This overload only participates in the overload resolution if the typedef + * KeyEqual::is_transparent exists. If so, K must be hashable and comparable + * to Key. + */ + template < + class K, class KE = KeyEqual, + typename std::enable_if::value>::type* = nullptr> + std::pair equal_range(const K& key) { + return m_ht.equal_range(key); + } + + /** + * @copydoc equal_range(const K& key) + * + * Use the hash value 'precalculated_hash' instead of hashing the key. The + * hash value should be the same as hash_function()(key). Useful to speed-up + * the lookup if you already have the hash. + */ + template < + class K, class KE = KeyEqual, + typename std::enable_if::value>::type* = nullptr> + std::pair equal_range(const K& key, + std::size_t precalculated_hash) { + return m_ht.equal_range(key, precalculated_hash); + } + + /** + * @copydoc equal_range(const K& key) + */ + template < + class K, class KE = KeyEqual, + typename std::enable_if::value>::type* = nullptr> + std::pair equal_range(const K& key) const { + return m_ht.equal_range(key); + } + + /** + * @copydoc equal_range(const K& key, std::size_t precalculated_hash) + */ + template < + class K, class KE = KeyEqual, + typename std::enable_if::value>::type* = nullptr> + std::pair equal_range( + const K& key, std::size_t precalculated_hash) const { + return m_ht.equal_range(key, precalculated_hash); + } + + /* + * Bucket interface + */ + size_type bucket_count() const { return m_ht.bucket_count(); } + size_type max_bucket_count() const { return m_ht.max_bucket_count(); } + + /* + * Hash policy + */ + float load_factor() const { return m_ht.load_factor(); } + + float min_load_factor() const { return m_ht.min_load_factor(); } + float max_load_factor() const { return m_ht.max_load_factor(); } + + /** + * Set the `min_load_factor` to `ml`. When the `load_factor` of the map goes + * below `min_load_factor` after some erase operations, the map will be + * shrunk when an insertion occurs. The erase method itself never shrinks + * the map. + * + * The default value of `min_load_factor` is 0.0f, the map never shrinks by + * default. + */ + void min_load_factor(float ml) { m_ht.min_load_factor(ml); } + void max_load_factor(float ml) { m_ht.max_load_factor(ml); } + + void rehash(size_type count_) { m_ht.rehash(count_); } + void reserve(size_type count_) { m_ht.reserve(count_); } + + /* + * Observers + */ + hasher hash_function() const { return m_ht.hash_function(); } + key_equal key_eq() const { return m_ht.key_eq(); } + + /* + * Other + */ + + /** + * Convert a const_iterator to an iterator. + */ + iterator mutable_iterator(const_iterator pos) { + return m_ht.mutable_iterator(pos); + } + + /** + * Serialize the map through the `serializer` parameter. + * + * The `serializer` parameter must be a function object that supports the + * following call: + * - `template void operator()(const U& value);` where the types + * `std::int16_t`, `std::uint32_t`, `std::uint64_t`, `float` and + * `std::pair` must be supported for U. + * + * The implementation leaves binary compatibility (endianness, IEEE 754 for + * floats, ...) of the types it serializes in the hands of the `Serializer` + * function object if compatibility is required. + */ + template + void serialize(Serializer& serializer) const { + m_ht.serialize(serializer); + } + + /** + * Deserialize a previously serialized map through the `deserializer` + * parameter. + * + * The `deserializer` parameter must be a function object that supports the + * following call: + * - `template U operator()();` where the types `std::int16_t`, + * `std::uint32_t`, `std::uint64_t`, `float` and `std::pair` must be + * supported for U. + * + * If the deserialized hash map type is hash compatible with the serialized + * map, the deserialization process can be sped up by setting + * `hash_compatible` to true. To be hash compatible, the Hash, KeyEqual and + * GrowthPolicy must behave the same way than the ones used on the serialized + * map and the StoreHash must have the same value. The `std::size_t` must also + * be of the same size as the one on the platform used to serialize the map. + * If these criteria are not met, the behaviour is undefined with + * `hash_compatible` sets to true. + * + * The behaviour is undefined if the type `Key` and `T` of the `robin_map` are + * not the same as the types used during serialization. + * + * The implementation leaves binary compatibility (endianness, IEEE 754 for + * floats, size of int, ...) of the types it deserializes in the hands of the + * `Deserializer` function object if compatibility is required. + */ + template + static robin_map deserialize(Deserializer& deserializer, + bool hash_compatible = false) { + robin_map map(0); + map.m_ht.deserialize(deserializer, hash_compatible); + + return map; + } + + friend bool operator==(const robin_map& lhs, const robin_map& rhs) { + if (lhs.size() != rhs.size()) { + return false; + } + + for (const auto& element_lhs : lhs) { + const auto it_element_rhs = rhs.find(element_lhs.first); + if (it_element_rhs == rhs.cend() || + element_lhs.second != it_element_rhs->second) { + return false; + } + } + + return true; + } + + friend bool operator!=(const robin_map& lhs, const robin_map& rhs) { + return !operator==(lhs, rhs); + } + + friend void swap(robin_map& lhs, robin_map& rhs) { lhs.swap(rhs); } + + private: + ht m_ht; +}; + +/** + * Same as `tsl::robin_map`. + */ +template , + class KeyEqual = std::equal_to, + class Allocator = std::allocator>, + bool StoreHash = false> +using robin_pg_map = robin_map; + +} // end namespace tsl + +#endif diff --git a/RemoteInput/Thirdparty/nanobind/ext/robin_map/include/tsl/robin_set.h b/RemoteInput/Thirdparty/nanobind/ext/robin_map/include/tsl/robin_set.h new file mode 100644 index 0000000..e115007 --- /dev/null +++ b/RemoteInput/Thirdparty/nanobind/ext/robin_map/include/tsl/robin_set.h @@ -0,0 +1,668 @@ +/** + * MIT License + * + * Copyright (c) 2017 Thibaut Goetghebuer-Planchon + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef TSL_ROBIN_SET_H +#define TSL_ROBIN_SET_H + +#include +#include +#include +#include +#include +#include + +#include "robin_hash.h" + +namespace tsl { + +/** + * Implementation of a hash set using open-addressing and the robin hood hashing + * algorithm with backward shift deletion. + * + * For operations modifying the hash set (insert, erase, rehash, ...), the + * strong exception guarantee is only guaranteed when the expression + * `std::is_nothrow_swappable::value && + * std::is_nothrow_move_constructible::value` is true, otherwise if an + * exception is thrown during the swap or the move, the hash set may end up in a + * undefined state. Per the standard a `Key` with a noexcept copy constructor + * and no move constructor also satisfies the + * `std::is_nothrow_move_constructible::value` criterion (and will thus + * guarantee the strong exception for the set). + * + * When `StoreHash` is true, 32 bits of the hash are stored alongside the + * values. It can improve the performance during lookups if the `KeyEqual` + * function takes time (or engenders a cache-miss for example) as we then + * compare the stored hashes before comparing the keys. When + * `tsl::rh::power_of_two_growth_policy` is used as `GrowthPolicy`, it may also + * speed-up the rehash process as we can avoid to recalculate the hash. When it + * is detected that storing the hash will not incur any memory penalty due to + * alignment (i.e. `sizeof(tsl::detail_robin_hash::bucket_entry) == sizeof(tsl::detail_robin_hash::bucket_entry)`) + * and `tsl::rh::power_of_two_growth_policy` is used, the hash will be stored + * even if `StoreHash` is false so that we can speed-up the rehash (but it will + * not be used on lookups unless `StoreHash` is true). + * + * `GrowthPolicy` defines how the set grows and consequently how a hash value is + * mapped to a bucket. By default the set uses + * `tsl::rh::power_of_two_growth_policy`. This policy keeps the number of + * buckets to a power of two and uses a mask to set the hash to a bucket instead + * of the slow modulo. Other growth policies are available and you may define + * your own growth policy, check `tsl::rh::power_of_two_growth_policy` for the + * interface. + * + * `Key` must be swappable. + * + * `Key` must be copy and/or move constructible. + * + * If the destructor of `Key` throws an exception, the behaviour of the class is + * undefined. + * + * Iterators invalidation: + * - clear, operator=, reserve, rehash: always invalidate the iterators. + * - insert, emplace, emplace_hint, operator[]: if there is an effective + * insert, invalidate the iterators. + * - erase: always invalidate the iterators. + */ +template , + class KeyEqual = std::equal_to, + class Allocator = std::allocator, bool StoreHash = false, + class GrowthPolicy = tsl::rh::power_of_two_growth_policy<2>> +class robin_set { + private: + template + using has_is_transparent = tsl::detail_robin_hash::has_is_transparent; + + class KeySelect { + public: + using key_type = Key; + + const key_type& operator()(const Key& key) const noexcept { return key; } + + key_type& operator()(Key& key) noexcept { return key; } + }; + + using ht = detail_robin_hash::robin_hash; + + public: + using key_type = typename ht::key_type; + using value_type = typename ht::value_type; + using size_type = typename ht::size_type; + using difference_type = typename ht::difference_type; + using hasher = typename ht::hasher; + using key_equal = typename ht::key_equal; + using allocator_type = typename ht::allocator_type; + using reference = typename ht::reference; + using const_reference = typename ht::const_reference; + using pointer = typename ht::pointer; + using const_pointer = typename ht::const_pointer; + using iterator = typename ht::iterator; + using const_iterator = typename ht::const_iterator; + + /* + * Constructors + */ + robin_set() : robin_set(ht::DEFAULT_INIT_BUCKETS_SIZE) {} + + explicit robin_set(size_type bucket_count, const Hash& hash = Hash(), + const KeyEqual& equal = KeyEqual(), + const Allocator& alloc = Allocator()) + : m_ht(bucket_count, hash, equal, alloc) {} + + robin_set(size_type bucket_count, const Allocator& alloc) + : robin_set(bucket_count, Hash(), KeyEqual(), alloc) {} + + robin_set(size_type bucket_count, const Hash& hash, const Allocator& alloc) + : robin_set(bucket_count, hash, KeyEqual(), alloc) {} + + explicit robin_set(const Allocator& alloc) + : robin_set(ht::DEFAULT_INIT_BUCKETS_SIZE, alloc) {} + + template + robin_set(InputIt first, InputIt last, + size_type bucket_count = ht::DEFAULT_INIT_BUCKETS_SIZE, + const Hash& hash = Hash(), const KeyEqual& equal = KeyEqual(), + const Allocator& alloc = Allocator()) + : robin_set(bucket_count, hash, equal, alloc) { + insert(first, last); + } + + template + robin_set(InputIt first, InputIt last, size_type bucket_count, + const Allocator& alloc) + : robin_set(first, last, bucket_count, Hash(), KeyEqual(), alloc) {} + + template + robin_set(InputIt first, InputIt last, size_type bucket_count, + const Hash& hash, const Allocator& alloc) + : robin_set(first, last, bucket_count, hash, KeyEqual(), alloc) {} + + robin_set(std::initializer_list init, + size_type bucket_count = ht::DEFAULT_INIT_BUCKETS_SIZE, + const Hash& hash = Hash(), const KeyEqual& equal = KeyEqual(), + const Allocator& alloc = Allocator()) + : robin_set(init.begin(), init.end(), bucket_count, hash, equal, alloc) {} + + robin_set(std::initializer_list init, size_type bucket_count, + const Allocator& alloc) + : robin_set(init.begin(), init.end(), bucket_count, Hash(), KeyEqual(), + alloc) {} + + robin_set(std::initializer_list init, size_type bucket_count, + const Hash& hash, const Allocator& alloc) + : robin_set(init.begin(), init.end(), bucket_count, hash, KeyEqual(), + alloc) {} + + robin_set& operator=(std::initializer_list ilist) { + m_ht.clear(); + + m_ht.reserve(ilist.size()); + m_ht.insert(ilist.begin(), ilist.end()); + + return *this; + } + + allocator_type get_allocator() const { return m_ht.get_allocator(); } + + /* + * Iterators + */ + iterator begin() noexcept { return m_ht.begin(); } + const_iterator begin() const noexcept { return m_ht.begin(); } + const_iterator cbegin() const noexcept { return m_ht.cbegin(); } + + iterator end() noexcept { return m_ht.end(); } + const_iterator end() const noexcept { return m_ht.end(); } + const_iterator cend() const noexcept { return m_ht.cend(); } + + /* + * Capacity + */ + bool empty() const noexcept { return m_ht.empty(); } + size_type size() const noexcept { return m_ht.size(); } + size_type max_size() const noexcept { return m_ht.max_size(); } + + /* + * Modifiers + */ + void clear() noexcept { m_ht.clear(); } + + std::pair insert(const value_type& value) { + return m_ht.insert(value); + } + + std::pair insert(value_type&& value) { + return m_ht.insert(std::move(value)); + } + + iterator insert(const_iterator hint, const value_type& value) { + return m_ht.insert_hint(hint, value); + } + + iterator insert(const_iterator hint, value_type&& value) { + return m_ht.insert_hint(hint, std::move(value)); + } + + template + void insert(InputIt first, InputIt last) { + m_ht.insert(first, last); + } + + void insert(std::initializer_list ilist) { + m_ht.insert(ilist.begin(), ilist.end()); + } + + /** + * Due to the way elements are stored, emplace will need to move or copy the + * key-value once. The method is equivalent to + * insert(value_type(std::forward(args)...)); + * + * Mainly here for compatibility with the std::unordered_map interface. + */ + template + std::pair emplace(Args&&... args) { + return m_ht.emplace(std::forward(args)...); + } + + /** + * Due to the way elements are stored, emplace_hint will need to move or copy + * the key-value once. The method is equivalent to insert(hint, + * value_type(std::forward(args)...)); + * + * Mainly here for compatibility with the std::unordered_map interface. + */ + template + iterator emplace_hint(const_iterator hint, Args&&... args) { + return m_ht.emplace_hint(hint, std::forward(args)...); + } + + iterator erase(iterator pos) { return m_ht.erase(pos); } + iterator erase(const_iterator pos) { return m_ht.erase(pos); } + iterator erase(const_iterator first, const_iterator last) { + return m_ht.erase(first, last); + } + size_type erase(const key_type& key) { return m_ht.erase(key); } + + /** + * Erase the element at position 'pos'. In contrast to the regular erase() + * function, erase_fast() does not return an iterator. This allows it to be + * faster especially in hash sets with a low load factor, where finding the + * next nonempty bucket would be costly. + */ + void erase_fast(iterator pos) { return m_ht.erase_fast(pos); } + + /** + * Use the hash value 'precalculated_hash' instead of hashing the key. The + * hash value should be the same as hash_function()(key). Useful to speed-up + * the lookup to the value if you already have the hash. + */ + size_type erase(const key_type& key, std::size_t precalculated_hash) { + return m_ht.erase(key, precalculated_hash); + } + + /** + * This overload only participates in the overload resolution if the typedef + * KeyEqual::is_transparent exists. If so, K must be hashable and comparable + * to Key. + */ + template < + class K, class KE = KeyEqual, + typename std::enable_if::value>::type* = nullptr> + size_type erase(const K& key) { + return m_ht.erase(key); + } + + /** + * @copydoc erase(const K& key) + * + * Use the hash value 'precalculated_hash' instead of hashing the key. The + * hash value should be the same as hash_function()(key). Useful to speed-up + * the lookup to the value if you already have the hash. + */ + template < + class K, class KE = KeyEqual, + typename std::enable_if::value>::type* = nullptr> + size_type erase(const K& key, std::size_t precalculated_hash) { + return m_ht.erase(key, precalculated_hash); + } + + void swap(robin_set& other) { other.m_ht.swap(m_ht); } + + /* + * Lookup + */ + size_type count(const Key& key) const { return m_ht.count(key); } + + /** + * Use the hash value 'precalculated_hash' instead of hashing the key. The + * hash value should be the same as hash_function()(key). Useful to speed-up + * the lookup if you already have the hash. + */ + size_type count(const Key& key, std::size_t precalculated_hash) const { + return m_ht.count(key, precalculated_hash); + } + + /** + * This overload only participates in the overload resolution if the typedef + * KeyEqual::is_transparent exists. If so, K must be hashable and comparable + * to Key. + */ + template < + class K, class KE = KeyEqual, + typename std::enable_if::value>::type* = nullptr> + size_type count(const K& key) const { + return m_ht.count(key); + } + + /** + * @copydoc count(const K& key) const + * + * Use the hash value 'precalculated_hash' instead of hashing the key. The + * hash value should be the same as hash_function()(key). Useful to speed-up + * the lookup if you already have the hash. + */ + template < + class K, class KE = KeyEqual, + typename std::enable_if::value>::type* = nullptr> + size_type count(const K& key, std::size_t precalculated_hash) const { + return m_ht.count(key, precalculated_hash); + } + + iterator find(const Key& key) { return m_ht.find(key); } + + /** + * Use the hash value 'precalculated_hash' instead of hashing the key. The + * hash value should be the same as hash_function()(key). Useful to speed-up + * the lookup if you already have the hash. + */ + iterator find(const Key& key, std::size_t precalculated_hash) { + return m_ht.find(key, precalculated_hash); + } + + const_iterator find(const Key& key) const { return m_ht.find(key); } + + /** + * @copydoc find(const Key& key, std::size_t precalculated_hash) + */ + const_iterator find(const Key& key, std::size_t precalculated_hash) const { + return m_ht.find(key, precalculated_hash); + } + + /** + * This overload only participates in the overload resolution if the typedef + * KeyEqual::is_transparent exists. If so, K must be hashable and comparable + * to Key. + */ + template < + class K, class KE = KeyEqual, + typename std::enable_if::value>::type* = nullptr> + iterator find(const K& key) { + return m_ht.find(key); + } + + /** + * @copydoc find(const K& key) + * + * Use the hash value 'precalculated_hash' instead of hashing the key. The + * hash value should be the same as hash_function()(key). Useful to speed-up + * the lookup if you already have the hash. + */ + template < + class K, class KE = KeyEqual, + typename std::enable_if::value>::type* = nullptr> + iterator find(const K& key, std::size_t precalculated_hash) { + return m_ht.find(key, precalculated_hash); + } + + /** + * @copydoc find(const K& key) + */ + template < + class K, class KE = KeyEqual, + typename std::enable_if::value>::type* = nullptr> + const_iterator find(const K& key) const { + return m_ht.find(key); + } + + /** + * @copydoc find(const K& key) + * + * Use the hash value 'precalculated_hash' instead of hashing the key. The + * hash value should be the same as hash_function()(key). Useful to speed-up + * the lookup if you already have the hash. + */ + template < + class K, class KE = KeyEqual, + typename std::enable_if::value>::type* = nullptr> + const_iterator find(const K& key, std::size_t precalculated_hash) const { + return m_ht.find(key, precalculated_hash); + } + + bool contains(const Key& key) const { return m_ht.contains(key); } + + /** + * Use the hash value 'precalculated_hash' instead of hashing the key. The + * hash value should be the same as hash_function()(key). Useful to speed-up + * the lookup if you already have the hash. + */ + bool contains(const Key& key, std::size_t precalculated_hash) const { + return m_ht.contains(key, precalculated_hash); + } + + /** + * This overload only participates in the overload resolution if the typedef + * KeyEqual::is_transparent exists. If so, K must be hashable and comparable + * to Key. + */ + template < + class K, class KE = KeyEqual, + typename std::enable_if::value>::type* = nullptr> + bool contains(const K& key) const { + return m_ht.contains(key); + } + + /** + * @copydoc contains(const K& key) const + * + * Use the hash value 'precalculated_hash' instead of hashing the key. The + * hash value should be the same as hash_function()(key). Useful to speed-up + * the lookup if you already have the hash. + */ + template < + class K, class KE = KeyEqual, + typename std::enable_if::value>::type* = nullptr> + bool contains(const K& key, std::size_t precalculated_hash) const { + return m_ht.contains(key, precalculated_hash); + } + + std::pair equal_range(const Key& key) { + return m_ht.equal_range(key); + } + + /** + * Use the hash value 'precalculated_hash' instead of hashing the key. The + * hash value should be the same as hash_function()(key). Useful to speed-up + * the lookup if you already have the hash. + */ + std::pair equal_range(const Key& key, + std::size_t precalculated_hash) { + return m_ht.equal_range(key, precalculated_hash); + } + + std::pair equal_range(const Key& key) const { + return m_ht.equal_range(key); + } + + /** + * @copydoc equal_range(const Key& key, std::size_t precalculated_hash) + */ + std::pair equal_range( + const Key& key, std::size_t precalculated_hash) const { + return m_ht.equal_range(key, precalculated_hash); + } + + /** + * This overload only participates in the overload resolution if the typedef + * KeyEqual::is_transparent exists. If so, K must be hashable and comparable + * to Key. + */ + template < + class K, class KE = KeyEqual, + typename std::enable_if::value>::type* = nullptr> + std::pair equal_range(const K& key) { + return m_ht.equal_range(key); + } + + /** + * @copydoc equal_range(const K& key) + * + * Use the hash value 'precalculated_hash' instead of hashing the key. The + * hash value should be the same as hash_function()(key). Useful to speed-up + * the lookup if you already have the hash. + */ + template < + class K, class KE = KeyEqual, + typename std::enable_if::value>::type* = nullptr> + std::pair equal_range(const K& key, + std::size_t precalculated_hash) { + return m_ht.equal_range(key, precalculated_hash); + } + + /** + * @copydoc equal_range(const K& key) + */ + template < + class K, class KE = KeyEqual, + typename std::enable_if::value>::type* = nullptr> + std::pair equal_range(const K& key) const { + return m_ht.equal_range(key); + } + + /** + * @copydoc equal_range(const K& key, std::size_t precalculated_hash) + */ + template < + class K, class KE = KeyEqual, + typename std::enable_if::value>::type* = nullptr> + std::pair equal_range( + const K& key, std::size_t precalculated_hash) const { + return m_ht.equal_range(key, precalculated_hash); + } + + /* + * Bucket interface + */ + size_type bucket_count() const { return m_ht.bucket_count(); } + size_type max_bucket_count() const { return m_ht.max_bucket_count(); } + + /* + * Hash policy + */ + float load_factor() const { return m_ht.load_factor(); } + + float min_load_factor() const { return m_ht.min_load_factor(); } + float max_load_factor() const { return m_ht.max_load_factor(); } + + /** + * Set the `min_load_factor` to `ml`. When the `load_factor` of the set goes + * below `min_load_factor` after some erase operations, the set will be + * shrunk when an insertion occurs. The erase method itself never shrinks + * the set. + * + * The default value of `min_load_factor` is 0.0f, the set never shrinks by + * default. + */ + void min_load_factor(float ml) { m_ht.min_load_factor(ml); } + void max_load_factor(float ml) { m_ht.max_load_factor(ml); } + + void rehash(size_type count_) { m_ht.rehash(count_); } + void reserve(size_type count_) { m_ht.reserve(count_); } + + /* + * Observers + */ + hasher hash_function() const { return m_ht.hash_function(); } + key_equal key_eq() const { return m_ht.key_eq(); } + + /* + * Other + */ + + /** + * Convert a const_iterator to an iterator. + */ + iterator mutable_iterator(const_iterator pos) { + return m_ht.mutable_iterator(pos); + } + + friend bool operator==(const robin_set& lhs, const robin_set& rhs) { + if (lhs.size() != rhs.size()) { + return false; + } + + for (const auto& element_lhs : lhs) { + const auto it_element_rhs = rhs.find(element_lhs); + if (it_element_rhs == rhs.cend()) { + return false; + } + } + + return true; + } + + /** + * Serialize the set through the `serializer` parameter. + * + * The `serializer` parameter must be a function object that supports the + * following call: + * - `template void operator()(const U& value);` where the types + * `std::int16_t`, `std::uint32_t`, `std::uint64_t`, `float` and `Key` must be + * supported for U. + * + * The implementation leaves binary compatibility (endianness, IEEE 754 for + * floats, ...) of the types it serializes in the hands of the `Serializer` + * function object if compatibility is required. + */ + template + void serialize(Serializer& serializer) const { + m_ht.serialize(serializer); + } + + /** + * Deserialize a previously serialized set through the `deserializer` + * parameter. + * + * The `deserializer` parameter must be a function object that supports the + * following call: + * - `template U operator()();` where the types `std::int16_t`, + * `std::uint32_t`, `std::uint64_t`, `float` and `Key` must be supported for + * U. + * + * If the deserialized hash set type is hash compatible with the serialized + * set, the deserialization process can be sped up by setting + * `hash_compatible` to true. To be hash compatible, the Hash, KeyEqual and + * GrowthPolicy must behave the same way than the ones used on the serialized + * set and the StoreHash must have the same value. The `std::size_t` must also + * be of the same size as the one on the platform used to serialize the set. + * If these criteria are not met, the behaviour is undefined with + * `hash_compatible` sets to true. + * + * The behaviour is undefined if the type `Key` of the `robin_set` is not the + * same as the type used during serialization. + * + * The implementation leaves binary compatibility (endianness, IEEE 754 for + * floats, size of int, ...) of the types it deserializes in the hands of the + * `Deserializer` function object if compatibility is required. + */ + template + static robin_set deserialize(Deserializer& deserializer, + bool hash_compatible = false) { + robin_set set(0); + set.m_ht.deserialize(deserializer, hash_compatible); + + return set; + } + + friend bool operator!=(const robin_set& lhs, const robin_set& rhs) { + return !operator==(lhs, rhs); + } + + friend void swap(robin_set& lhs, robin_set& rhs) { lhs.swap(rhs); } + + private: + ht m_ht; +}; + +/** + * Same as `tsl::robin_set`. + */ +template , + class KeyEqual = std::equal_to, + class Allocator = std::allocator, bool StoreHash = false> +using robin_pg_set = robin_set; + +} // end namespace tsl + +#endif diff --git a/RemoteInput/Thirdparty/nanobind/include/nanobind/eigen/dense.h b/RemoteInput/Thirdparty/nanobind/include/nanobind/eigen/dense.h new file mode 100644 index 0000000..afc943e --- /dev/null +++ b/RemoteInput/Thirdparty/nanobind/include/nanobind/eigen/dense.h @@ -0,0 +1,486 @@ +/* + nanobind/eigen/dense.h: type casters for dense Eigen + vectors and matrices + + Copyright (c) 2023 Wenzel Jakob + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +#pragma once + +#include +#include + +static_assert(EIGEN_VERSION_AT_LEAST(3, 3, 1), + "Eigen matrix support in nanobind requires Eigen >= 3.3.1"); + +NAMESPACE_BEGIN(NB_NAMESPACE) + +/// Function argument types that are compatible with various array flavors +using DStride = Eigen::Stride; +template using DRef = Eigen::Ref; +template using DMap = Eigen::Map; + +NAMESPACE_BEGIN(detail) + +/// Determine the number of dimensions of the given Eigen type +template +constexpr int ndim_v = bool(T::IsVectorAtCompileTime) ? 1 : 2; + +/// Extract the compile-time strides of the given Eigen type +template struct stride { + using type = Eigen::Stride<0, 0>; +}; + +template struct stride> { + using type = StrideType; +}; + +template struct stride> { + using type = StrideType; +}; + +template using stride_t = typename stride::type; + +/** \brief Identify types with a contiguous memory representation. + * + * This includes all specializations of ``Eigen::Matrix``/``Eigen::Array`` and + * certain specializations of ``Eigen::Map`` and ``Eigen::Ref``. Note: Eigen + * interprets a compile-time stride of 0 as contiguous. + */ +template +constexpr bool is_contiguous_v = + (stride_t::InnerStrideAtCompileTime == 0 || + stride_t::InnerStrideAtCompileTime == 1) && + (ndim_v == 1 || stride_t::OuterStrideAtCompileTime == 0 || + (stride_t::OuterStrideAtCompileTime != Eigen::Dynamic && + int(stride_t::OuterStrideAtCompileTime) == int(T::InnerSizeAtCompileTime))); + +/// Identify types with a static or dynamic layout that support contiguous storage +template +constexpr bool can_map_contiguous_memory_v = + (stride_t::InnerStrideAtCompileTime == 0 || + stride_t::InnerStrideAtCompileTime == 1 || + stride_t::InnerStrideAtCompileTime == Eigen::Dynamic) && + (ndim_v == 1 || stride_t::OuterStrideAtCompileTime == 0 || + stride_t::OuterStrideAtCompileTime == Eigen::Dynamic || + int(stride_t::OuterStrideAtCompileTime) == int(T::InnerSizeAtCompileTime)); + +/* This type alias builds the most suitable 'ndarray' for the given Eigen type. + In particular, it + + - matches the underlying scalar type + - matches the number of dimensions (i.e. whether the type is a vector/matrix) + - matches the shape (if the row/column count is known at compile time) + - matches the in-memory ordering when the Eigen type is contiguous. + + This is helpful because type_caster> will then perform the + necessary conversion steps (if given incompatible input) to enable data + exchange with Eigen. + + A limitation of this approach is that ndarray does not support compile-time + strides besides c_contig and f_contig. If an Eigen type requires + non-contiguous strides (at compile-time) and we are given an ndarray with + unsuitable strides (at run-time), type casting will fail. Note, however, that + this is rather unusual, since the default stride type of Eigen::Map requires + contiguous memory, and the one of Eigen::Ref requires a contiguous inner + stride, while handling any outer stride. +*/ + +template +using array_for_eigen_t = ndarray< + Scalar, + numpy, + std::conditional_t< + ndim_v == 1, + shape, + shape>, + std::conditional_t< + is_contiguous_v, + std::conditional_t< + ndim_v == 1 || T::IsRowMajor, + c_contig, + f_contig>, + unused>>; + +/// Any kind of Eigen class +template constexpr bool is_eigen_v = is_base_of_template_v; + +/// Detects Eigen::Array, Eigen::Matrix, etc. +template constexpr bool is_eigen_plain_v = is_base_of_template_v; + +/// Detect Eigen::SparseMatrix +template constexpr bool is_eigen_sparse_v = is_base_of_template_v; + +/// Detects expression templates +template constexpr bool is_eigen_xpr_v = + is_eigen_v && !is_eigen_plain_v && !is_eigen_sparse_v && + !std::is_base_of_v, T>; + +template +struct type_caster && + is_ndarray_scalar_v>> { + using Scalar = typename T::Scalar; + using NDArray = array_for_eigen_t; + using NDArrayCaster = make_caster; + + NB_TYPE_CASTER(T, NDArrayCaster::Name) + + bool from_python(handle src, uint8_t flags, cleanup_list *cleanup) noexcept { + // We're in any case making a copy, so non-writable inputs area also okay + using NDArrayConst = array_for_eigen_t; + make_caster caster; + if (!caster.from_python(src, flags, cleanup)) + return false; + + const NDArrayConst &array = caster.value; + if constexpr (ndim_v == 1) + value.resize(array.shape(0)); + else + value.resize(array.shape(0), array.shape(1)); + + // The layout is contiguous & compatible thanks to array_for_eigen_t + memcpy(value.data(), array.data(), array.size() * sizeof(Scalar)); + + return true; + } + + static handle from_cpp(T &&v, rv_policy policy, cleanup_list *cleanup) noexcept { + if (policy == rv_policy::automatic || + policy == rv_policy::automatic_reference) + policy = rv_policy::move; + + return from_cpp((const T &) v, policy, cleanup); + } + + static handle from_cpp(const T &v, rv_policy policy, cleanup_list *cleanup) noexcept { + size_t shape[ndim_v]; + int64_t strides[ndim_v]; + + if constexpr (ndim_v == 1) { + shape[0] = v.size(); + strides[0] = v.innerStride(); + } else { + shape[0] = v.rows(); + shape[1] = v.cols(); + strides[0] = v.rowStride(); + strides[1] = v.colStride(); + } + + void *ptr = (void *) v.data(); + + switch (policy) { + case rv_policy::automatic: + policy = rv_policy::copy; + break; + + case rv_policy::automatic_reference: + policy = rv_policy::reference; + break; + + case rv_policy::move: + // Don't bother moving when the data is static or occupies <1KB + if ((T::SizeAtCompileTime != Eigen::Dynamic || + (size_t) v.size() < (1024 / sizeof(Scalar)))) + policy = rv_policy::copy; + break; + + default: // leave policy unchanged + break; + } + + object owner; + if (policy == rv_policy::move) { + T *temp = new T(std::move(v)); + owner = capsule(temp, [](void *p) noexcept { delete (T *) p; }); + ptr = temp->data(); + policy = rv_policy::reference; + } else if (policy == rv_policy::reference_internal && cleanup->self()) { + owner = borrow(cleanup->self()); + policy = rv_policy::reference; + } + + object o = steal(NDArrayCaster::from_cpp( + NDArray(ptr, ndim_v, shape, owner, strides), + policy, cleanup)); + + return o.release(); + } +}; + +/// Caster for Eigen expression templates +template +struct type_caster && + is_ndarray_scalar_v>> { + using Array = Eigen::Array; + using Caster = make_caster; + static constexpr auto Name = Caster::Name; + template using Cast = T; + template static constexpr bool can_cast() { return true; } + + /// Generating an expression template from a Python object is, of course, not possible + bool from_python(handle src, uint8_t flags, cleanup_list *cleanup) noexcept = delete; + + template + static handle from_cpp(T2 &&v, rv_policy policy, cleanup_list *cleanup) noexcept { + return Caster::from_cpp(std::forward(v), policy, cleanup); + } +}; + +/** \brief Type caster for ``Eigen::Map`` + + The ``Eigen::Map<..>`` type exists to efficiently access memory provided by a + caller. Given that, the nanobind type caster refuses to turn incompatible + inputs into a ``Eigen::Map`` when this would require an implicit + conversion. +*/ + +template +struct type_caster, + enable_if_t && + is_ndarray_scalar_v>> { + using Map = Eigen::Map; + using NDArray = + array_for_eigen_t, + const typename Map::Scalar, + typename Map::Scalar>>; + using NDArrayCaster = type_caster; + static constexpr auto Name = NDArrayCaster::Name; + template using Cast = Map; + template static constexpr bool can_cast() { return true; } + + NDArrayCaster caster; + + bool from_python(handle src, uint8_t flags, cleanup_list *cleanup) noexcept { + // Disable implicit conversions + return from_python_(src, flags & ~(uint8_t)cast_flags::convert, cleanup); + } + + bool from_python_(handle src, uint8_t flags, cleanup_list* cleanup) noexcept { + if (!caster.from_python(src, flags, cleanup)) + return false; + + // Check for memory layout compatibility of non-contiguous 'Map' types + if constexpr (!is_contiguous_v) { + // Dynamic inner strides support any input, check the fixed case + if constexpr (StrideType::InnerStrideAtCompileTime != Eigen::Dynamic) { + // A compile-time stride of 0 implies "contiguous" .. + int64_t is_expected = StrideType::InnerStrideAtCompileTime == 0 + ? 1 /* .. and equals 1 for the inner stride */ + : StrideType::InnerStrideAtCompileTime, + is_actual = caster.value.stride( + (ndim_v != 1 && T::IsRowMajor) ? 1 : 0); + + if (is_expected != is_actual) + return false; + } + + // Analogous check for the outer strides + if constexpr (ndim_v == 2 && StrideType::OuterStrideAtCompileTime != Eigen::Dynamic) { + int64_t os_expected = StrideType::OuterStrideAtCompileTime == 0 + ? caster.value.shape(T::IsRowMajor ? 1 : 0) + : StrideType::OuterStrideAtCompileTime, + os_actual = caster.value.stride(T::IsRowMajor ? 0 : 1); + + if (os_expected != os_actual) + return false; + } + } + return true; + } + + static handle from_cpp(const Map &v, rv_policy policy, cleanup_list *cleanup) noexcept { + size_t shape[ndim_v]; + int64_t strides[ndim_v]; + + if constexpr (ndim_v == 1) { + shape[0] = v.size(); + strides[0] = v.innerStride(); + } else { + shape[0] = v.rows(); + shape[1] = v.cols(); + strides[0] = v.rowStride(); + strides[1] = v.colStride(); + } + + return NDArrayCaster::from_cpp( + NDArray((void *) v.data(), ndim_v, shape, handle(), strides), + (policy == rv_policy::automatic || + policy == rv_policy::automatic_reference) + ? rv_policy::reference + : policy, + cleanup); + } + + StrideType strides() const { + constexpr int IS = StrideType::InnerStrideAtCompileTime, + OS = StrideType::OuterStrideAtCompileTime; + + int64_t inner = caster.value.stride(0), + outer; + + if constexpr (ndim_v == 1) + outer = caster.value.shape(0); + else + outer = caster.value.stride(1); + + (void) inner; (void) outer; + if constexpr (ndim_v == 2 && T::IsRowMajor) + std::swap(inner, outer); + + // Eigen may expect a stride of 0 to avoid an assertion failure + if constexpr (IS == 0) + inner = 0; + + if constexpr (OS == 0) + outer = 0; + + if constexpr (std::is_same_v>) + return StrideType(inner); + else if constexpr (std::is_same_v>) + return StrideType(outer); + else + return StrideType(outer, inner); + } + + operator Map() { + NDArray &t = caster.value; + if constexpr (ndim_v == 1) + return Map(t.data(), t.shape(0), strides()); + else + return Map(t.data(), t.shape(0), t.shape(1), strides()); + } +}; + +/** \brief Caster for Eigen::Ref + + Compared to the ``Eigen::Map`` type caster above, the reference caster + accepts a wider set of inputs when it is used in *constant reference* mode + (i.e., ``Eigen::Ref``). In this case, it performs stride conversions + (except for unusual non-contiguous strides) as well as conversions of the + underlying scalar type (if implicit conversions are enabled). + + For non-constant references, the caster matches that of ``Eigen::Map`` and + requires an input with the expected layout (so that changes can propagate to + the caller). +*/ +template +struct type_caster, + enable_if_t && + is_ndarray_scalar_v>> { + using Ref = Eigen::Ref; + + /// Potentially convert strides/dtype when casting constant references + static constexpr bool MaybeConvert = + std::is_const_v && + // Restrict to contiguous 'T' (limitation in Eigen, see PR #215) + can_map_contiguous_memory_v; + + using NDArray = + array_for_eigen_t, + const typename Ref::Scalar, + typename Ref::Scalar>>; + using NDArrayCaster = type_caster; + + /// Eigen::Map caster with fixed strides + using Map = Eigen::Map; + using MapCaster = make_caster; + + // Extended version taking arbitrary strides + using DMap = Eigen::Map; + using DMapCaster = make_caster; + + /** + * The constructor of ``Ref`` uses one of two strategies + * depending on the input. It may either + * + * 1. Create a copy ``Ref::m_object`` (owned by Ref), or + * 2. Reference the existing input (non-owned). + * + * When the value below is ``true``, then it is guaranteed that + * ``Ref()`` owns the underlying data. + */ + static constexpr bool DMapConstructorOwnsData = + !Eigen::internal::traits::template match::type::value; + + static constexpr auto Name = + const_name(DMapCaster::Name, MapCaster::Name); + + template using Cast = Ref; + template static constexpr bool can_cast() { return true; } + + MapCaster caster; + struct Empty { }; + std::conditional_t dcaster; + + bool from_python(handle src, uint8_t flags, cleanup_list *cleanup) noexcept { + // Try a direct cast without implicit conversion first + if (caster.from_python(src, flags, cleanup)) + return true; + + // Potentially convert strides/dtype when casting constant references + if constexpr (MaybeConvert) { + /* Generating an implicit copy requires some object to assume + ownership. During a function call, ``dcaster`` can serve that + role (this case is detected by checking whether ``flags`` has + the ``manual`` flag set). When used in other situations (e.g. + ``nb::cast()``), the created ``Eigen::Ref<..>`` must take + ownership of the copy. This is only guranteed to work if + DMapConstructorOwnsData. + + If neither of these is possible, we disable implicit + conversions. */ + + if ((flags & (uint8_t) cast_flags::manual) && + !DMapConstructorOwnsData) + flags &= ~(uint8_t) cast_flags::convert; + + if (dcaster.from_python_(src, flags, cleanup)) + return true; + } + + return false; + } + + static handle from_cpp(const Ref &v, rv_policy policy, cleanup_list *cleanup) noexcept { + // Copied from the Eigen::Map caster + + size_t shape[ndim_v]; + int64_t strides[ndim_v]; + + if constexpr (ndim_v == 1) { + shape[0] = v.size(); + strides[0] = v.innerStride(); + } else { + shape[0] = v.rows(); + shape[1] = v.cols(); + strides[0] = v.rowStride(); + strides[1] = v.colStride(); + } + + return NDArrayCaster::from_cpp( + NDArray((void *) v.data(), ndim_v, shape, handle(), strides), + (policy == rv_policy::automatic || + policy == rv_policy::automatic_reference) + ? rv_policy::reference + : policy, + cleanup); + } + + operator Ref() { + if constexpr (MaybeConvert) { + if (dcaster.caster.value.is_valid()) + return Ref(dcaster.operator DMap()); + } + + return Ref(caster.operator Map()); + } +}; + +NAMESPACE_END(detail) + +NAMESPACE_END(NB_NAMESPACE) diff --git a/RemoteInput/Thirdparty/nanobind/include/nanobind/eigen/sparse.h b/RemoteInput/Thirdparty/nanobind/include/nanobind/eigen/sparse.h new file mode 100644 index 0000000..718fef2 --- /dev/null +++ b/RemoteInput/Thirdparty/nanobind/include/nanobind/eigen/sparse.h @@ -0,0 +1,178 @@ +/* + nanobind/eigen/sparse.h: type casters for sparse Eigen matrices + + Copyright (c) 2023 Henri Menke and Wenzel Jakob + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +#pragma once + +#include +#include +#include + +#include +#include +#include + +NAMESPACE_BEGIN(NB_NAMESPACE) + +NAMESPACE_BEGIN(detail) + +/// Detect Eigen::SparseMatrix +template constexpr bool is_eigen_sparse_matrix_v = + is_eigen_sparse_v && + !std::is_base_of_v, T>; + + +/// Caster for Eigen::SparseMatrix +template struct type_caster>> { + using Scalar = typename T::Scalar; + using StorageIndex = typename T::StorageIndex; + using Index = typename T::Index; + using SparseMap = Eigen::Map; + + static_assert(std::is_same_v>, + "nanobind: Eigen sparse caster only implemented for matrices"); + + static constexpr bool RowMajor = T::IsRowMajor; + + using ScalarNDArray = ndarray>; + using StorageIndexNDArray = ndarray>; + + using ScalarCaster = make_caster; + using StorageIndexCaster = make_caster; + + NB_TYPE_CASTER(T, const_name("scipy.sparse.csr_matrix[", + "scipy.sparse.csc_matrix[") + + make_caster::Name + const_name("]")) + + ScalarCaster data_caster; + StorageIndexCaster indices_caster, indptr_caster; + + bool from_python(handle src, uint8_t flags, cleanup_list *cleanup) noexcept { + object obj = borrow(src); + try { + object matrix_type = module_::import_("scipy.sparse").attr(RowMajor ? "csr_matrix" : "csc_matrix"); + if (!obj.type().is(matrix_type)) + obj = matrix_type(obj); + } catch (const python_error &) { + return false; + } + + if (object data_o = obj.attr("data"); !data_caster.from_python(data_o, flags, cleanup)) + return false; + ScalarNDArray& values = data_caster.value; + + if (object indices_o = obj.attr("indices"); !indices_caster.from_python(indices_o, flags, cleanup)) + return false; + StorageIndexNDArray& inner_indices = indices_caster.value; + + if (object indptr_o = obj.attr("indptr"); !indptr_caster.from_python(indptr_o, flags, cleanup)) + return false; + StorageIndexNDArray& outer_indices = indptr_caster.value; + + object shape_o = obj.attr("shape"), nnz_o = obj.attr("nnz"); + Index rows, cols, nnz; + try { + if (len(shape_o) != 2) + return false; + rows = cast(shape_o[0]); + cols = cast(shape_o[1]); + nnz = cast(nnz_o); + } catch (const python_error &) { + return false; + } + + value = SparseMap(rows, cols, nnz, outer_indices.data(), inner_indices.data(), values.data()); + + return true; + } + + static handle from_cpp(T &&v, rv_policy policy, cleanup_list *cleanup) noexcept { + if (policy == rv_policy::automatic || + policy == rv_policy::automatic_reference) + policy = rv_policy::move; + + return from_cpp((const T &) v, policy, cleanup); + } + + static handle from_cpp(const T &v, rv_policy policy, cleanup_list *) noexcept { + if (!v.isCompressed()) { + PyErr_SetString(PyExc_ValueError, + "nanobind: unable to return an Eigen sparse matrix that is not in a compressed format. " + "Please call `.makeCompressed()` before returning the value on the C++ end."); + return handle(); + } + + object matrix_type; + try { + matrix_type = module_::import_("scipy.sparse").attr(RowMajor ? "csr_matrix" : "csc_matrix"); + } catch (python_error &e) { + e.restore(); + return handle(); + } + + const Index rows = v.rows(), cols = v.cols(); + const size_t data_shape[] = { (size_t) v.nonZeros() }; + const size_t outer_indices_shape[] = { (size_t) ((RowMajor ? rows : cols) + 1) }; + + T *src = std::addressof(const_cast(v)); + object owner; + if (policy == rv_policy::move) { + src = new T(std::move(v)); + owner = capsule(src, [](void *p) noexcept { delete (T *) p; }); + } + + ScalarNDArray data(src->valuePtr(), 1, data_shape, owner); + StorageIndexNDArray outer_indices(src->outerIndexPtr(), 1, outer_indices_shape, owner); + StorageIndexNDArray inner_indices(src->innerIndexPtr(), 1, data_shape, owner); + + try { + return matrix_type(nanobind::make_tuple( + std::move(data), std::move(inner_indices), std::move(outer_indices)), + nanobind::make_tuple(rows, cols)) + .release(); + } catch (python_error &e) { + e.restore(); + return handle(); + } + } +}; + + +/// Caster for Eigen::Map, still needs to be implemented. +template +struct type_caster, enable_if_t>> { + using Map = Eigen::Map; + using SparseMatrixCaster = type_caster; + static constexpr auto Name = SparseMatrixCaster::Name; + template using Cast = Map; + template static constexpr bool can_cast() { return true; } + + bool from_python(handle src, uint8_t flags, cleanup_list *cleanup) noexcept = delete; + + static handle from_cpp(const Map &v, rv_policy policy, cleanup_list *cleanup) noexcept = delete; +}; + + +/// Caster for Eigen::Ref, still needs to be implemented +template +struct type_caster, enable_if_t>> { + using Ref = Eigen::Ref; + using Map = Eigen::Map; + using MapCaster = make_caster; + static constexpr auto Name = MapCaster::Name; + template using Cast = Ref; + template static constexpr bool can_cast() { return true; } + + bool from_python(handle src, uint8_t flags, cleanup_list *cleanup) noexcept = delete; + + static handle from_cpp(const Ref &v, rv_policy policy, cleanup_list *cleanup) noexcept = delete; +}; + +NAMESPACE_END(detail) + +NAMESPACE_END(NB_NAMESPACE) diff --git a/RemoteInput/Thirdparty/nanobind/include/nanobind/eval.h b/RemoteInput/Thirdparty/nanobind/include/nanobind/eval.h new file mode 100644 index 0000000..eb18f8e --- /dev/null +++ b/RemoteInput/Thirdparty/nanobind/include/nanobind/eval.h @@ -0,0 +1,61 @@ +/* + nanobind/eval.h: Support for evaluating Python expressions and + statements from strings + + Adapted by Nico Schlömer from pybind11's eval.h. + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +#pragma once + +#include + +NAMESPACE_BEGIN(NB_NAMESPACE) + +enum eval_mode { + // Evaluate a string containing an isolated expression + eval_expr = Py_eval_input, + + // Evaluate a string containing a single statement. Returns \c none + eval_single_statement = Py_single_input, + + // Evaluate a string containing a sequence of statement. Returns \c none + eval_statements = Py_file_input +}; + +template +object eval(const str &expr, handle global = handle(), handle local = handle()) { + if (!local.is_valid()) + local = global; + + // This used to be PyRun_String, but that function isn't in the stable ABI. + object codeobj = steal(Py_CompileString(expr.c_str(), "", start)); + if (!codeobj.is_valid()) + raise_python_error(); + + PyObject *result = PyEval_EvalCode(codeobj.ptr(), global.ptr(), local.ptr()); + if (!result) + raise_python_error(); + + return steal(result); +} + +template +object eval(const char (&s)[N], handle global = handle(), handle local = handle()) { + // Support raw string literals by removing common leading whitespace + str expr = (s[0] == '\n') ? str(module_::import_("textwrap").attr("dedent")(s)) : str(s); + return eval(expr, global, local); +} + +inline void exec(const str &expr, handle global = handle(), handle local = handle()) { + eval(expr, global, local); +} + +template +void exec(const char (&s)[N], handle global = handle(), handle local = handle()) { + eval(s, global, local); +} + +NAMESPACE_END(NB_NAMESPACE) diff --git a/RemoteInput/Thirdparty/nanobind/include/nanobind/intrusive/counter.h b/RemoteInput/Thirdparty/nanobind/include/nanobind/intrusive/counter.h new file mode 100644 index 0000000..cb15464 --- /dev/null +++ b/RemoteInput/Thirdparty/nanobind/include/nanobind/intrusive/counter.h @@ -0,0 +1,261 @@ +/* + nanobind/intrusive/counter.h: Intrusive reference counting sample + implementation. + + Intrusive reference counting is a simple solution for various lifetime and + ownership-related issues that can arise in Python bindings of C++ code. The + implementation here represents one of many ways in which intrusive + reference counting can be realized and is included for convenience. + + The code in this file is designed to be truly minimal: it depends neither + on Python, nanobind, nor the STL. This enables its use in small projects + with a 100% optional Python interface. + + Two section of nanobind's documentation discuss intrusive reference + counting in general: + + - https://nanobind.readthedocs.io/en/latest/ownership.html + - https://nanobind.readthedocs.io/en/latest/ownership_adv.html + + Comments below are specific to this sample implementation. + + Copyright (c) 2023 Wenzel Jakob + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +#pragma once + +#include + +// Override this definition to specify DLL export/import declarations +#if !defined(NB_INTRUSIVE_EXPORT) +# define NB_INTRUSIVE_EXPORT +#endif + +#if !defined(Py_PYTHON_H) +/* While the implementation below does not directly depend on Python, the + PyObject type occurs in a few function interfaces (in a fully opaque + manner). The lines below forward-declare it. */ +extern "C" { + struct _object; + typedef _object PyObject; +}; +#endif + +#if !defined(NAMESPACE_BEGIN) +# define NAMESPACE_BEGIN(name) namespace name { +#endif + +#if !defined(NAMESPACE_END) +# define NAMESPACE_END(name) } +#endif + +NAMESPACE_BEGIN(nanobind) + +/** \brief Simple intrusive reference counter. + * + * Intrusive reference counting is a simple solution for various lifetime and + * ownership-related issues that can arise in Python bindings of C++ code. The + * implementation here represents one of many ways in which intrusive reference + * counting can be realized and is included for convenience. + * + * The ``intrusive_counter`` class represents an atomic counter that can be + * increased (via ``inc_ref()``) or decreased (via ``dec_ref()``). When the + * counter reaches zero, the object should be deleted, which ``dec_ref()`` + * indicates by returning ``true``. + * + * In addition to this simple counting mechanism, ownership of the object can + * also be transferred to Python (via ``set_self_py()``). In this case, + * subsequent calls to ``inc_ref()`` and ``dec_ref()`` modify the reference + * count of the underlying Python object. The ``intrusive_counter`` class + * supports both cases using only ``sizeof(void*)`` bytes of storage. + * + * To incorporate intrusive reference counting into your own project, you would + * usually add an ``intrusive_counter``-typed member to the base class of an + * object hierarchy and expose it as follows: + * + * ```cpp + * #include + * + * class Object { + * public: + * void inc_ref() noexcept { m_ref_count.inc_ref(); } + * bool dec_ref() noexcept { return m_ref_count.dec_ref(); } + * + * // Important: must declare virtual destructor + * virtual ~Object() = default; + * + * void set_self_py(PyObject *self) noexcept { + * m_ref_count.set_self_py(self); + * } + * + * private: + * nb::intrusive_counter m_ref_count; + * }; + * + * // Convenience function for increasing the reference count of an instance + * inline void inc_ref(Object *o) noexcept { + * if (o) + * o->inc_ref(); + * } + * + * // Convenience function for decreasing the reference count of an instance + * // and potentially deleting it when the count reaches zero + * inline void dec_ref(Object *o) noexcept { + * if (o && o->dec_ref()) + * delete o; + * } + * ``` + * + * Alternatively, you could also inherit from ``intrusive_base``, which obviates + * the need for all of the above declarations: + * + * ```cpp + * class Object : public intrusive_base { + * public: + * // ... + * }; + * ``` + * + * When binding the base class in Python, you must indicate to nanobind that + * this type uses intrusive reference counting and expose the ``set_self_py`` + * member. This must only be done once, as the attribute is automatically + * inherited by subclasses. + * + * ```cpp + * nb::class_( + * m, "Object", + * nb::intrusive_ptr( + * [](Object *o, PyObject *po) noexcept { o->set_self_py(po); })); + * ``` + * + * Also, somewhere in your binding initialization code, you must call + * + * ```cpp + * nb::intrusive_init( + * [](PyObject *o) noexcept { + * nb::gil_scoped_acquire guard; + * Py_INCREF(o); + * }, + * [](PyObject *o) noexcept { + * nb::gil_scoped_acquire guard; + * Py_DECREF(o); + * }); + * ``` + * + * For this all to compile, a single one of your .cpp files must include this + * header file from somewhere as follows: + * + * ```cpp + * #include + * ``` + * + * Calling the ``inc_ref()`` and ``dec_ref()`` members many times throughout + * the code can quickly become tedious. Nanobind also ships with a ``ref`` + * RAII helper class to help with this. + * + * ```cpp + * #include + * + * { + * ref x = new MyObject(); // <-- assigment to ref<..> automatically calls inc_ref() + * x->func(); // ref<..> can be used like a normal pointer + * } // <-- Destruction of ref<..> calls dec_ref(), deleting the instance in this example. + * ``` + * + * When the file ``nanobind/intrusive/ref.h`` is included following + * ``nanobind/nanobind.h``, it also exposes a custom type caster to bind + * functions taking or returning ``ref``-typed values. + */ +struct NB_INTRUSIVE_EXPORT intrusive_counter { +public: + intrusive_counter() noexcept = default; + + // The counter value is not affected by copy/move assignment/construction + intrusive_counter(const intrusive_counter &) noexcept { } + intrusive_counter(intrusive_counter &&) noexcept { } + intrusive_counter &operator=(const intrusive_counter &) noexcept { return *this; } + intrusive_counter &operator=(intrusive_counter &&) noexcept { return *this; } + + /// Increase the object's reference count + void inc_ref() const noexcept; + + /// Decrease the object's reference count, return ``true`` if it should be deallocated + bool dec_ref() const noexcept; + + /// Return the Python object associated with this instance (or NULL) + PyObject *self_py() const noexcept; + + /// Set the Python object associated with this instance + void set_self_py(PyObject *self) noexcept; + +protected: + /** + * \brief Mutable counter. Note that the value ``1`` actually encodes + * a zero reference count (see the file ``counter.inl`` for details). + */ + mutable uintptr_t m_state = 1; +}; + +static_assert( + sizeof(intrusive_counter) == sizeof(void *), + "The intrusive_counter class should always have the same size as a pointer."); + +/// Reference-counted base type of an object hierarchy +class NB_INTRUSIVE_EXPORT intrusive_base { +public: + /// Increase the object's reference count + void inc_ref() const noexcept { m_ref_count.inc_ref(); } + + /// Decrease the object's reference count, return ``true`` if it should be deallocated + bool dec_ref() const noexcept { return m_ref_count.dec_ref(); } + + /// Set the Python object associated with this instance + void set_self_py(PyObject *self) noexcept { m_ref_count.set_self_py(self); } + + /// Return the Python object associated with this instance (or NULL) + PyObject *self_py() const noexcept { return m_ref_count.self_py(); } + + /// Virtual destructor + virtual ~intrusive_base() = default; + +private: + mutable intrusive_counter m_ref_count; +}; + +/** + * \brief Increase the reference count of an intrusively reference-counted + * object ``o`` if ``o`` is non-NULL. + */ +inline void inc_ref(const intrusive_base *o) noexcept { + if (o) + o->inc_ref(); +} + +/** + * \brief Decrease the reference count and potentially delete an intrusively + * reference-counted object ``o`` if ``o`` is non-NULL. + */ +inline void dec_ref(const intrusive_base *o) noexcept { + if (o && o->dec_ref()) + delete o; +} + +/** + * \brief Install Python reference counting handlers + * + * The ``intrusive_counter`` class is designed so that the dependency on Python is + * *optional*: the code compiles in ordinary C++ projects, in which case the + * Python reference counting functionality will simply not be used. + * + * Python binding code must invoke ``intrusive_init`` once to supply two + * functions that increase and decrease the reference count of a Python object, + * while ensuring that the GIL is held. + */ +extern NB_INTRUSIVE_EXPORT +void intrusive_init(void (*intrusive_inc_ref_py)(PyObject *) noexcept, + void (*intrusive_dec_ref_py)(PyObject *) noexcept); + +NAMESPACE_END(nanobind) diff --git a/RemoteInput/Thirdparty/nanobind/include/nanobind/intrusive/counter.inl b/RemoteInput/Thirdparty/nanobind/include/nanobind/intrusive/counter.inl new file mode 100644 index 0000000..faf6a27 --- /dev/null +++ b/RemoteInput/Thirdparty/nanobind/include/nanobind/intrusive/counter.inl @@ -0,0 +1,148 @@ +/* + nanobind/intrusive/counter.inl: Intrusive reference counting sample + implementation; see 'counter.h' for an explanation of the interface. + + Copyright (c) 2023 Wenzel Jakob + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +#include "counter.h" +#include +#include + +NAMESPACE_BEGIN(nanobind) + +// The code below uses intrinsics for atomic operations. This is not as nice +// and portable as ``std::atomic`` but avoids pulling in large amounts of +// STL header code + +#if !defined(_MSC_VER) +#define NB_ATOMIC_LOAD(ptr) __atomic_load_n(ptr, 0) +#define NB_ATOMIC_STORE(ptr, v) __atomic_store_n(ptr, v, 0) +#define NB_ATOMIC_CMPXCHG(ptr, cmp, xchg) \ + __atomic_compare_exchange_n(ptr, cmp, xchg, true, 0, 0) +#else +extern "C" void *_InterlockedCompareExchangePointer( + void *volatile *Destination, + void *Exchange, void *Comparand); +#pragma intrinsic(_InterlockedCompareExchangePointer) + +#define NB_ATOMIC_LOAD(ptr) *((volatile const uintptr_t *) ptr) +#define NB_ATOMIC_STORE(ptr, v) *((volatile uintptr_t *) ptr) = v; +#define NB_ATOMIC_CMPXCHG(ptr, cmp, xchg) nb_cmpxchg(ptr, cmp, xchg) + +static bool nb_cmpxchg(uintptr_t *ptr, uintptr_t *cmp, uintptr_t xchg) { + uintptr_t cmpv = *cmp; + uintptr_t prev = (uintptr_t) _InterlockedCompareExchangePointer( + (void * volatile *) ptr, (void *) xchg, (void *) cmpv); + if (prev == cmpv) { + return true; + } else { + *cmp = prev; + return false; + } +} +#endif + +static void (*intrusive_inc_ref_py)(PyObject *) noexcept = nullptr, + (*intrusive_dec_ref_py)(PyObject *) noexcept = nullptr; + +void intrusive_init(void (*intrusive_inc_ref_py_)(PyObject *) noexcept, + void (*intrusive_dec_ref_py_)(PyObject *) noexcept) { + intrusive_inc_ref_py = intrusive_inc_ref_py_; + intrusive_dec_ref_py = intrusive_dec_ref_py_; +} + +/** A few implementation details: + * + * The ``intrusive_counter`` constructor sets the ``m_state`` field to ``1``, + * which indicates that the instance is owned by C++. Bits 2..63 of this + * field are used to store the actual reference count value. The + * ``inc_ref()`` and ``dec_ref()`` functions increment or decrement this + * number. When ``dec_ref()`` removes the last reference, the instance + * returns ``true`` to indicate that it should be deallocated using a + * *delete expression* that would typically be handled using a polymorphic + * destructor. + * + * When an class with intrusive reference counting is returned from C++ to + * Python, nanobind will invoke ``set_self_py()``, which hands ownership + * over to Python/nanobind. Any remaining references will be moved from the + * ``m_state`` field to the Python reference count. In this mode, + * ``inc_ref()`` and ``dec_ref()`` wrap Python reference counting + * primitives (``Py_INCREF()`` / ``Py_DECREF()``) which must be made + * available by calling the function ``intrusive_init`` once during module + * initialization. Note that the `m_state` field is also used to store a + * pointer to the `PyObject *`. Python instance pointers are always aligned + * (i.e. bit 1 is zero), which disambiguates between the two possible + * configurations. + */ + +void intrusive_counter::inc_ref() const noexcept { + uintptr_t v = NB_ATOMIC_LOAD(&m_state); + + while (true) { + if (v & 1) { + if (!NB_ATOMIC_CMPXCHG(&m_state, &v, v + 2)) + continue; + } else { + intrusive_inc_ref_py((PyObject *) v); + } + + break; + } +} + +bool intrusive_counter::dec_ref() const noexcept { + uintptr_t v = NB_ATOMIC_LOAD(&m_state); + + while (true) { + if (v & 1) { + if (v == 1) { + fprintf(stderr, + "intrusive_counter::dec_ref(%p): reference count " + "underflow!", (void *) this); + abort(); + } + + if (!NB_ATOMIC_CMPXCHG(&m_state, &v, v - 2)) + continue; + + if (v == 3) + return true; + } else { + intrusive_dec_ref_py((PyObject *) v); + } + + return false; + } +} + +void intrusive_counter::set_self_py(PyObject *o) noexcept { + uintptr_t v = NB_ATOMIC_LOAD(&m_state); + + if (v & 1) { + v >>= 1; + for (uintptr_t i = 0; i < v; ++i) + intrusive_inc_ref_py(o); + + NB_ATOMIC_STORE(&m_state, (uintptr_t) o); + } else { + fprintf(stderr, + "intrusive_counter::set_self_py(%p): a Python object was " + "already present!", (void *) this); + abort(); + } +} + +PyObject *intrusive_counter::self_py() const noexcept { + uintptr_t v = NB_ATOMIC_LOAD(&m_state); + + if (v & 1) + return nullptr; + else + return (PyObject *) v; +} + +NAMESPACE_END(nanobind) diff --git a/RemoteInput/Thirdparty/nanobind/include/nanobind/intrusive/ref.h b/RemoteInput/Thirdparty/nanobind/include/nanobind/intrusive/ref.h new file mode 100644 index 0000000..3f113b6 --- /dev/null +++ b/RemoteInput/Thirdparty/nanobind/include/nanobind/intrusive/ref.h @@ -0,0 +1,153 @@ +/* + nanobind/intrusive/ref.h: This file defines the ``ref`` RAII scoped + reference counting helper class. + + When included following ``nanobind/nanobind.h``, the code below also + exposes a custom type caster to bind functions taking or returning + ``ref``-typed values. + + Copyright (c) 2023 Wenzel Jakob + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +#pragma once + +#include "counter.h" + +NAMESPACE_BEGIN(nanobind) + +/** + * \brief RAII scoped reference counting helper class + * + * ``ref`` is a simple RAII wrapper class that encapsulates a pointer to an + * instance with intrusive reference counting. + * + * It takes care of increasing and decreasing the reference count as needed and + * deleting the instance when the count reaches zero. + * + * For this to work, compatible functions ``inc_ref()`` and ``dec_ref()`` must + * be defined before including this file. Default implementations for + * subclasses of the type ``intrusive_base`` are already provided as part of the + * file ``counter.h``. + */ +template class ref { +public: + /// Create a null reference + ref() = default; + + /// Construct a reference from a pointer + ref(T *ptr) : m_ptr(ptr) { inc_ref((intrusive_base *) m_ptr); } + + /// Copy a reference, increases the reference count + ref(const ref &r) : m_ptr(r.m_ptr) { inc_ref((intrusive_base *) m_ptr); } + + /// Move a reference witout changing the reference count + ref(ref &&r) noexcept : m_ptr(r.m_ptr) { r.m_ptr = nullptr; } + + /// Destroy this reference + ~ref() { dec_ref((intrusive_base *) m_ptr); } + + /// Move-assign another reference into this one + ref &operator=(ref &&r) noexcept { + dec_ref((intrusive_base *) m_ptr); + m_ptr = r.m_ptr; + r.m_ptr = nullptr; + return *this; + } + + /// Copy-assign another reference into this one + ref &operator=(const ref &r) { + inc_ref((intrusive_base *) r.m_ptr); + dec_ref((intrusive_base *) m_ptr); + m_ptr = r.m_ptr; + return *this; + } + + /// Overwrite this reference with a pointer to another object + ref &operator=(T *ptr) { + inc_ref((intrusive_base *) ptr); + dec_ref((intrusive_base *) m_ptr); + m_ptr = ptr; + return *this; + } + + /// Clear the currently stored reference + void reset() { + dec_ref((intrusive_base *) m_ptr); + m_ptr = nullptr; + } + + /// Compare this reference with another reference + bool operator==(const ref &r) const { return m_ptr == r.m_ptr; } + + /// Compare this reference with another reference + bool operator!=(const ref &r) const { return m_ptr != r.m_ptr; } + + /// Compare this reference with a pointer + bool operator==(const T *ptr) const { return m_ptr == ptr; } + + /// Compare this reference with a pointer + bool operator!=(const T *ptr) const { return m_ptr != ptr; } + + /// Access the object referenced by this reference + T *operator->() { return m_ptr; } + + /// Access the object referenced by this reference + const T *operator->() const { return m_ptr; } + + /// Return a C++ reference to the referenced object + T &operator*() { return *m_ptr; } + + /// Return a const C++ reference to the referenced object + const T &operator*() const { return *m_ptr; } + + /// Return a pointer to the referenced object + operator T *() { return m_ptr; } + + /// Return a const pointer to the referenced object + operator const T *() const { return m_ptr; } + + /// Return a pointer to the referenced object + T *get() { return m_ptr; } + + /// Return a const pointer to the referenced object + const T *get() const { return m_ptr; } + +private: + T *m_ptr = nullptr; +}; + +// Registar a type caster for ``ref`` if nanobind was previously #included +#if defined(NB_VERSION_MAJOR) +NAMESPACE_BEGIN(detail) +template struct type_caster> { + using Caster = make_caster; + static constexpr bool IsClass = true; + NB_TYPE_CASTER(ref, Caster::Name) + + bool from_python(handle src, uint8_t flags, + cleanup_list *cleanup) noexcept { + Caster caster; + if (!caster.from_python(src, flags, cleanup)) + return false; + + value = Value(caster.operator T *()); + return true; + } + + static handle from_cpp(const ref &value, rv_policy policy, + cleanup_list *cleanup) noexcept { + if constexpr (std::is_base_of_v) + if (policy != rv_policy::copy && policy != rv_policy::move && value.get()) + if (PyObject* obj = value->self_py()) + return handle(obj).inc_ref(); + + return Caster::from_cpp(value.get(), policy, cleanup); + } +}; +NAMESPACE_END(detail) +#endif + +NAMESPACE_END(nanobind) diff --git a/RemoteInput/Thirdparty/nanobind/include/nanobind/make_iterator.h b/RemoteInput/Thirdparty/nanobind/include/nanobind/make_iterator.h new file mode 100644 index 0000000..9d54689 --- /dev/null +++ b/RemoteInput/Thirdparty/nanobind/include/nanobind/make_iterator.h @@ -0,0 +1,155 @@ +/* + nanobind/make_iterator.h: nb::make_[key,value_]iterator() + + This implementation is a port from pybind11 with minimal adjustments. + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +#pragma once + +#include +#include + +NAMESPACE_BEGIN(NB_NAMESPACE) +NAMESPACE_BEGIN(detail) + +/* There are a large number of apparently unused template arguments because + each combination requires a separate nb::class_ registration. */ +template +struct iterator_state { + Iterator it; + Sentinel end; + bool first_or_done; +}; + +template +struct remove_rvalue_ref { using type = T; }; +template +struct remove_rvalue_ref { using type = T; }; + +// Note: these helpers take the iterator by non-const reference because some +// iterators in the wild can't be dereferenced when const. +template struct iterator_access { + using result_type = decltype(*std::declval()); + result_type operator()(Iterator &it) const { return *it; } +}; + +template struct iterator_key_access { + // Note double parens in decltype((...)) to capture the value category + // as well. This will be lvalue if the iterator's operator* returned an + // lvalue reference, and xvalue if the iterator's operator* returned an + // object (or rvalue reference but that's unlikely). decltype of an xvalue + // produces T&&, but we want to return a value T from operator() in that + // case, in order to avoid creating a Python object that references a + // C++ temporary. Thus, pass the result through remove_rvalue_ref. + using result_type = typename remove_rvalue_ref< + decltype(((*std::declval()).first))>::type; + result_type operator()(Iterator &it) const { return (*it).first; } +}; + +template struct iterator_value_access { + using result_type = typename remove_rvalue_ref< + decltype(((*std::declval()).second))>::type; + result_type operator()(Iterator &it) const { return (*it).second; } +}; + +template +typed make_iterator_impl(handle scope, const char *name, + Iterator &&first, Sentinel &&last, + Extra &&...extra) { + using State = iterator_state; + + static_assert( + !detail::is_base_caster_v> || + detail::is_copy_constructible_v || + (Policy != rv_policy::automatic_reference && + Policy != rv_policy::copy), + "make_iterator_impl(): the generated __next__ would copy elements, so the " + "element type must be copy-constructible"); + + if (!type().is_valid()) { + class_(scope, name) + .def("__iter__", [](handle h) { return h; }) + .def("__next__", + [](State &s) -> ValueType { + if (!s.first_or_done) + ++s.it; + else + s.first_or_done = false; + + if (s.it == s.end) { + s.first_or_done = true; + throw stop_iteration(); + } + + return Access()(s.it); + }, + std::forward(extra)..., + Policy); + } + + return borrow>(cast(State{ + std::forward(first), std::forward(last), true })); +} + +NAMESPACE_END(detail) + +/// Makes a python iterator from a first and past-the-end C++ InputIterator. +template ::result_type, + typename... Extra> +auto make_iterator(handle scope, const char *name, Iterator &&first, Sentinel &&last, Extra &&...extra) { + return detail::make_iterator_impl, Policy, + Iterator, Sentinel, ValueType, Extra...>( + scope, name, std::forward(first), + std::forward(last), std::forward(extra)...); +} + +/// Makes an iterator over the keys (`.first`) of a iterator over pairs from a +/// first and past-the-end InputIterator. +template ::result_type, + typename... Extra> +auto make_key_iterator(handle scope, const char *name, Iterator &&first, + Sentinel &&last, Extra &&...extra) { + return detail::make_iterator_impl, + Policy, Iterator, Sentinel, KeyType, + Extra...>( + scope, name, std::forward(first), + std::forward(last), std::forward(extra)...); +} + +/// Makes an iterator over the values (`.second`) of a iterator over pairs from a +/// first and past-the-end InputIterator. +template ::result_type, + typename... Extra> +auto make_value_iterator(handle scope, const char *name, Iterator &&first, Sentinel &&last, Extra &&...extra) { + return detail::make_iterator_impl, + Policy, Iterator, Sentinel, ValueType, + Extra...>( + scope, name, std::forward(first), + std::forward(last), std::forward(extra)...); +} + +/// Makes an iterator over values of a container supporting `std::begin()`/`std::end()` +template +auto make_iterator(handle scope, const char *name, Type &value, Extra &&...extra) { + return make_iterator(scope, name, std::begin(value), + std::end(value), + std::forward(extra)...); +} + +NAMESPACE_END(NB_NAMESPACE) diff --git a/RemoteInput/Thirdparty/nanobind/include/nanobind/nanobind.h b/RemoteInput/Thirdparty/nanobind/include/nanobind/nanobind.h new file mode 100644 index 0000000..7198785 --- /dev/null +++ b/RemoteInput/Thirdparty/nanobind/include/nanobind/nanobind.h @@ -0,0 +1,58 @@ +/* + nanobind/nanobind.h: Main include file for core nanobind components + + Copyright (c) 2022 Wenzel Jakob + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +#pragma once + +#if __cplusplus < 201703L && (!defined(_MSVC_LANG) || _MSVC_LANG < 201703L) +# error The nanobind library requires C++17! +#endif + +#if defined(_MSC_VER) +# pragma warning(push) +# pragma warning(disable: 4702) // unreachable code (e.g. when binding a noreturn function) + // The next two lines disable warnings that are "just noise" according to Stephan T. Lavavej (a MSFT STL maintainer) +# pragma warning(disable: 4275) // non dll-interface class 'std::exception' used as base for dll-interface class [..] +# pragma warning(disable: 4251) // [..] needs to have a dll-interface to be used by clients of class [..] +#endif + +#define NB_VERSION_MAJOR 2 +#define NB_VERSION_MINOR 2 +#define NB_VERSION_PATCH 0 +#define NB_VERSION_DEV 1 // A value > 0 indicates a development release + +// Core C++ headers that nanobind depends on +#include +#include +#include +#include +#include +#include +#include + +// Implementation. The nb_*.h files should only be included through nanobind.h +#include "nb_python.h" +#include "nb_defs.h" +#include "nb_enums.h" +#include "nb_traits.h" +#include "nb_tuple.h" +#include "nb_lib.h" +#include "nb_descr.h" +#include "nb_types.h" +#include "nb_accessor.h" +#include "nb_error.h" +#include "nb_attr.h" +#include "nb_cast.h" +#include "nb_misc.h" +#include "nb_call.h" +#include "nb_func.h" +#include "nb_class.h" + +#if defined(_MSC_VER) +# pragma warning(pop) +#endif diff --git a/RemoteInput/Thirdparty/nanobind/include/nanobind/nb_accessor.h b/RemoteInput/Thirdparty/nanobind/include/nanobind/nb_accessor.h new file mode 100644 index 0000000..f690360 --- /dev/null +++ b/RemoteInput/Thirdparty/nanobind/include/nanobind/nb_accessor.h @@ -0,0 +1,225 @@ +/* + nanobind/nb_accessor.h: Accessor helper class for .attr(), operator[] + + Copyright (c) 2022 Wenzel Jakob + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +NAMESPACE_BEGIN(NB_NAMESPACE) +NAMESPACE_BEGIN(detail) + +template class accessor : public api> { + template friend void nanobind::del(accessor &); + template friend void nanobind::del(accessor &&); +public: + static constexpr auto Name = const_name("object"); + + template + accessor(handle obj, Key &&key) + : m_base(obj.ptr()), m_key(std::move(key)) { } + accessor(const accessor &) = delete; + accessor(accessor &&) = delete; + ~accessor() { + if constexpr (Impl::cache_dec_ref) + Py_XDECREF(m_cache); + } + + template accessor& operator=(T &&value); + + template > = 0> + operator T() const { return borrow(ptr()); } + NB_INLINE PyObject *ptr() const { + Impl::get(m_base, m_key, &m_cache); + return m_cache; + } + NB_INLINE handle base() const { return m_base; } + NB_INLINE object key() const { return steal(Impl::key(m_key)); } + +private: + NB_INLINE void del () { Impl::del(m_base, m_key); } + +private: + PyObject *m_base; + mutable PyObject *m_cache{nullptr}; + typename Impl::key_type m_key; +}; + +struct str_attr { + static constexpr bool cache_dec_ref = true; + using key_type = const char *; + + NB_INLINE static void get(PyObject *obj, const char *key, PyObject **cache) { + detail::getattr_or_raise(obj, key, cache); + } + + NB_INLINE static void set(PyObject *obj, const char *key, PyObject *v) { + setattr(obj, key, v); + } + + NB_INLINE static PyObject *key(const char *key) { + return PyUnicode_InternFromString(key); + } +}; + +struct obj_attr { + static constexpr bool cache_dec_ref = true; + using key_type = handle; + + NB_INLINE static void get(PyObject *obj, handle key, PyObject **cache) { + detail::getattr_or_raise(obj, key.ptr(), cache); + } + + NB_INLINE static void set(PyObject *obj, handle key, PyObject *v) { + setattr(obj, key.ptr(), v); + } + + NB_INLINE static PyObject *key(handle key) { + Py_INCREF(key.ptr()); + return key.ptr(); + } +}; + +struct str_item { + static constexpr bool cache_dec_ref = true; + using key_type = const char *; + + NB_INLINE static void get(PyObject *obj, const char *key, PyObject **cache) { + detail::getitem_or_raise(obj, key, cache); + } + + NB_INLINE static void set(PyObject *obj, const char *key, PyObject *v) { + setitem(obj, key, v); + } + + NB_INLINE static void del(PyObject *obj, const char *key) { + delitem(obj, key); + } +}; + +struct obj_item { + static constexpr bool cache_dec_ref = true; + using key_type = handle; + + NB_INLINE static void get(PyObject *obj, handle key, PyObject **cache) { + detail::getitem_or_raise(obj, key.ptr(), cache); + } + + NB_INLINE static void set(PyObject *obj, handle key, PyObject *v) { + setitem(obj, key.ptr(), v); + } + + NB_INLINE static void del(PyObject *obj, handle key) { + delitem(obj, key.ptr()); + } +}; + +struct num_item { + static constexpr bool cache_dec_ref = true; + using key_type = Py_ssize_t; + + NB_INLINE static void get(PyObject *obj, Py_ssize_t index, PyObject **cache) { + detail::getitem_or_raise(obj, index, cache); + } + + NB_INLINE static void set(PyObject *obj, Py_ssize_t index, PyObject *v) { + setitem(obj, index, v); + } + + NB_INLINE static void del(PyObject *obj, Py_ssize_t index) { + delitem(obj, index); + } +}; + +struct num_item_list { + #if defined(Py_GIL_DISABLED) + static constexpr bool cache_dec_ref = true; + #else + static constexpr bool cache_dec_ref = false; + #endif + + using key_type = Py_ssize_t; + + NB_INLINE static void get(PyObject *obj, Py_ssize_t index, PyObject **cache) { + #if defined(Py_GIL_DISABLED) + *cache = PyList_GetItemRef(obj, index); + #else + *cache = NB_LIST_GET_ITEM(obj, index); + #endif + } + + NB_INLINE static void set(PyObject *obj, Py_ssize_t index, PyObject *v) { +#if defined(Py_LIMITED_API) || defined(NB_FREE_THREADED) + Py_INCREF(v); + PyList_SetItem(obj, index, v); +#else + PyObject *old = NB_LIST_GET_ITEM(obj, index); + Py_INCREF(v); + NB_LIST_SET_ITEM(obj, index, v); + Py_DECREF(old); +#endif + } + + NB_INLINE static void del(PyObject *obj, Py_ssize_t index) { + delitem(obj, index); + } +}; + +struct num_item_tuple { + static constexpr bool cache_dec_ref = false; + using key_type = Py_ssize_t; + + NB_INLINE static void get(PyObject *obj, Py_ssize_t index, PyObject **cache) { + *cache = NB_TUPLE_GET_ITEM(obj, index); + } + + template static void set(Ts...) { + static_assert(false_v, "tuples are immutable!"); + } +}; + +template accessor api::attr(handle key) const { + return { derived(), borrow(key) }; +} + +template accessor api::attr(const char *key) const { + return { derived(), key }; +} + +template accessor api::doc() const { + return { derived(), "__doc__" }; +} + +template accessor api::operator[](handle key) const { + return { derived(), borrow(key) }; +} + +template accessor api::operator[](const char *key) const { + return { derived(), key }; +} + +template +template >> +accessor api::operator[](T index) const { + return { derived(), (Py_ssize_t) index }; +} + +NAMESPACE_END(detail) + +template >> +detail::accessor list::operator[](T index) const { + return { derived(), (Py_ssize_t) index }; +} + +template >> +detail::accessor tuple::operator[](T index) const { + return { derived(), (Py_ssize_t) index }; +} + +template str str::format(Args&&... args) { + return steal( + derived().attr("format")((detail::forward_t) args...).release()); +} + +NAMESPACE_END(NB_NAMESPACE) diff --git a/RemoteInput/Thirdparty/nanobind/include/nanobind/nb_attr.h b/RemoteInput/Thirdparty/nanobind/include/nanobind/nb_attr.h new file mode 100644 index 0000000..0476824 --- /dev/null +++ b/RemoteInput/Thirdparty/nanobind/include/nanobind/nb_attr.h @@ -0,0 +1,433 @@ +/* + nanobind/nb_attr.h: Annotations for function and class declarations + + Copyright (c) 2022 Wenzel Jakob + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +NAMESPACE_BEGIN(NB_NAMESPACE) + +struct scope { + PyObject *value; + NB_INLINE scope(handle value) : value(value.ptr()) {} +}; + +struct name { + const char *value; + NB_INLINE name(const char *value) : value(value) {} +}; + +struct arg_v; +struct arg_locked; +struct arg_locked_v; + +// Basic function argument descriptor (no default value, not locked) +struct arg { + NB_INLINE constexpr explicit arg(const char *name = nullptr) : name_(name), signature_(nullptr) { } + + // operator= can be used to provide a default value + template NB_INLINE arg_v operator=(T &&value) const; + + // Mutators that don't change default value or locked state + NB_INLINE arg &noconvert(bool value = true) { + convert_ = !value; + return *this; + } + NB_INLINE arg &none(bool value = true) { + none_ = value; + return *this; + } + NB_INLINE arg &sig(const char *value) { + signature_ = value; + return *this; + } + + // After lock(), this argument is locked + NB_INLINE arg_locked lock(); + + const char *name_, *signature_; + uint8_t convert_{ true }; + bool none_{ false }; +}; + +// Function argument descriptor with default value (not locked) +struct arg_v : arg { + object value; + NB_INLINE arg_v(const arg &base, object &&value) + : arg(base), value(std::move(value)) {} + + private: + // Inherited mutators would slice off the default, and are not generally needed + using arg::noconvert; + using arg::none; + using arg::sig; + using arg::lock; +}; + +// Function argument descriptor that is locked (no default value) +struct arg_locked : arg { + NB_INLINE constexpr explicit arg_locked(const char *name = nullptr) : arg(name) { } + NB_INLINE constexpr explicit arg_locked(const arg &base) : arg(base) { } + + // operator= can be used to provide a default value + template NB_INLINE arg_locked_v operator=(T &&value) const; + + // Mutators must be respecified in order to not slice off the locked status + NB_INLINE arg_locked &noconvert(bool value = true) { + convert_ = !value; + return *this; + } + NB_INLINE arg_locked &none(bool value = true) { + none_ = value; + return *this; + } + NB_INLINE arg_locked &sig(const char *value) { + signature_ = value; + return *this; + } + + // Redundant extra lock() is allowed + NB_INLINE arg_locked &lock() { return *this; } +}; + +// Function argument descriptor that is potentially locked and has a default value +struct arg_locked_v : arg_locked { + object value; + NB_INLINE arg_locked_v(const arg_locked &base, object &&value) + : arg_locked(base), value(std::move(value)) {} + + private: + // Inherited mutators would slice off the default, and are not generally needed + using arg_locked::noconvert; + using arg_locked::none; + using arg_locked::sig; + using arg_locked::lock; +}; + +NB_INLINE arg_locked arg::lock() { return arg_locked{*this}; } + +template struct call_guard { + using type = detail::tuple; +}; + +struct dynamic_attr {}; +struct is_weak_referenceable {}; +struct is_method {}; +struct is_implicit {}; +struct is_operator {}; +struct is_arithmetic {}; +struct is_flag {}; +struct is_final {}; +struct is_generic {}; +struct kw_only {}; +struct lock_self {}; + +template struct keep_alive {}; +template struct supplement {}; +template struct intrusive_ptr { + intrusive_ptr(void (*set_self_py)(T *, PyObject *) noexcept) + : set_self_py(set_self_py) { } + void (*set_self_py)(T *, PyObject *) noexcept; +}; + +struct type_slots { + type_slots (const PyType_Slot *value) : value(value) { } + const PyType_Slot *value; +}; + +struct type_slots_callback { + using cb_t = void (*)(const detail::type_init_data *t, + PyType_Slot *&slots, size_t max_slots) noexcept; + type_slots_callback(cb_t callback) : callback(callback) { } + cb_t callback; +}; + +struct sig { + const char *value; + sig(const char *value) : value(value) { } +}; + +struct is_getter { }; + +NAMESPACE_BEGIN(literals) +constexpr arg operator"" _a(const char *name, size_t) { return arg(name); } +NAMESPACE_END(literals) + +NAMESPACE_BEGIN(detail) + +enum class func_flags : uint32_t { + /* Low 3 bits reserved for return value policy */ + + /// Did the user specify a name for this function, or is it anonymous? + has_name = (1 << 4), + /// Did the user specify a scope in which this function should be installed? + has_scope = (1 << 5), + /// Did the user specify a docstring? + has_doc = (1 << 6), + /// Did the user specify nb::arg/arg_v annotations for all arguments? + has_args = (1 << 7), + /// Does the function signature contain an *args-style argument? + has_var_args = (1 << 8), + /// Does the function signature contain an *kwargs-style argument? + has_var_kwargs = (1 << 9), + /// Is this function a method of a class? + is_method = (1 << 10), + /// Is this function a method called __init__? (automatically generated) + is_constructor = (1 << 11), + /// Can this constructor be used to perform an implicit conversion? + is_implicit = (1 << 12), + /// Is this function an arithmetic operator? + is_operator = (1 << 13), + /// When the function is GCed, do we need to call func_data_prelim::free_capture? + has_free = (1 << 14), + /// Should the func_new() call return a new reference? + return_ref = (1 << 15), + /// Does this overload specify a custom function signature (for docstrings, typing) + has_signature = (1 << 16), + /// Does this function have one or more nb::keep_alive() annotations? + has_keep_alive = (1 << 17) +}; + +enum cast_flags : uint8_t { + // Enable implicit conversions (code assumes this has value 1, don't reorder..) + convert = (1 << 0), + + // Passed to the 'self' argument in a constructor call (__init__) + construct = (1 << 1), + + // Indicates that the function dispatcher should accept 'None' arguments + accepts_none = (1 << 2), + + // Indicates that this cast is performed by nb::cast or nb::try_cast. + // This implies that objects added to the cleanup list may be + // released immediately after the caster's final output value is + // obtained, i.e., before it is used. + manual = (1 << 3) +}; + + +struct arg_data { + const char *name; + const char *signature; + PyObject *name_py; + PyObject *value; + uint8_t flag; +}; + +template struct func_data_prelim { + // A small amount of space to capture data used by the function/closure + void *capture[3]; + + // Callback to clean up the 'capture' field + void (*free_capture)(void *); + + /// Implementation of the function call + PyObject *(*impl)(void *, PyObject **, uint8_t *, rv_policy, + cleanup_list *); + + /// Function signature description + const char *descr; + + /// C++ types referenced by 'descr' + const std::type_info **descr_types; + + /// Supplementary flags + uint32_t flags; + + /// Total number of parameters accepted by the C++ function; nb::args + /// and nb::kwargs parameters are counted as one each. If the + /// 'has_args' flag is set, then there is one arg_data structure + /// for each of these. + uint16_t nargs; + + /// Number of paramters to the C++ function that may be filled from + /// Python positional arguments without additional ceremony. nb::args and + /// nb::kwargs parameters are not counted in this total, nor are any + /// parameters after nb::args or after a nb::kw_only annotation. + /// The parameters counted here may be either named (nb::arg("name")) + /// or unnamed (nb::arg()). If unnamed, they are effectively positional-only. + /// nargs_pos is always <= nargs. + uint16_t nargs_pos; + + // ------- Extra fields ------- + + const char *name; + const char *doc; + PyObject *scope; + + // *WARNING*: nanobind regularly receives requests from users who run it + // through Clang-Tidy, or who compile with increased warnings levels, like + // + // -Wpedantic, -Wcast-qual, -Wsign-conversion, etc. + // + // (i.e., beyond -Wall -Wextra and /W4 that are currently already used) + // + // Their next step is to open a big pull request needed to silence all of + // the resulting messages. This comment is strategically placed here + // because the zero-length array construction below will almost certainly + // be flagged in this process. + // + // My policy on this is as follows: I am always happy to fix issues in the + // codebase. However, many of the resulting change requests are in the + // "ritual purification" category: things that cause churn, decrease + // readability, and which don't fix actual problems. It's a never-ending + // cycle because each new revision of such tooling adds further warnings + // and purification rites. + // + // So just to be clear: I do not wish to pepper this codebase with + // "const_cast" and #pragmas/comments to avoid warnings in external + // tooling just so those users can have a "silent" build. I don't think it + // is reasonable for them to impose their own style on this project. + // + // As a workaround it is likely possible to restrict the scope of style + // checks to particular C++ namespaces or source code locations. +#if defined(_MSC_VER) + // MSVC doesn't support zero-length arrays + arg_data args[Size == 0 ? 1 : Size]; +#else + // GCC and Clang do. + arg_data args[Size]; +#endif +}; + +template +NB_INLINE void func_extra_apply(F &f, const name &name, size_t &) { + f.name = name.value; + f.flags |= (uint32_t) func_flags::has_name; +} + +template +NB_INLINE void func_extra_apply(F &f, const scope &scope, size_t &) { + f.scope = scope.value; + f.flags |= (uint32_t) func_flags::has_scope; +} + +template +NB_INLINE void func_extra_apply(F &f, const sig &s, size_t &) { + f.flags |= (uint32_t) func_flags::has_signature; + f.name = s.value; +} + +template +NB_INLINE void func_extra_apply(F &f, const char *doc, size_t &) { + f.doc = doc; + f.flags |= (uint32_t) func_flags::has_doc; +} + +template +NB_INLINE void func_extra_apply(F &f, is_method, size_t &) { + f.flags |= (uint32_t) func_flags::is_method; +} + +template +NB_INLINE void func_extra_apply(F &, is_getter, size_t &) { } + +template +NB_INLINE void func_extra_apply(F &f, is_implicit, size_t &) { + f.flags |= (uint32_t) func_flags::is_implicit; +} + +template +NB_INLINE void func_extra_apply(F &f, is_operator, size_t &) { + f.flags |= (uint32_t) func_flags::is_operator; +} + +template +NB_INLINE void func_extra_apply(F &f, rv_policy pol, size_t &) { + f.flags = (f.flags & ~0b111) | (uint16_t) pol; +} + +template +NB_INLINE void func_extra_apply(F &, std::nullptr_t, size_t &) { } + +template +NB_INLINE void func_extra_apply(F &f, const arg &a, size_t &index) { + uint8_t flag = 0; + if (a.none_) + flag |= (uint8_t) cast_flags::accepts_none; + if (a.convert_) + flag |= (uint8_t) cast_flags::convert; + + arg_data &arg = f.args[index]; + arg.flag = flag; + arg.name = a.name_; + arg.signature = a.signature_; + arg.value = nullptr; + index++; +} +// arg_locked will select the arg overload; the locking is added statically +// in nb_func.h + +template +NB_INLINE void func_extra_apply(F &f, const arg_v &a, size_t &index) { + arg_data &ad = f.args[index]; + func_extra_apply(f, (const arg &) a, index); + ad.value = a.value.ptr(); +} +template +NB_INLINE void func_extra_apply(F &f, const arg_locked_v &a, size_t &index) { + arg_data &ad = f.args[index]; + func_extra_apply(f, (const arg_locked &) a, index); + ad.value = a.value.ptr(); +} + +template +NB_INLINE void func_extra_apply(F &, kw_only, size_t &) {} + +template +NB_INLINE void func_extra_apply(F &, lock_self, size_t &) {} + +template +NB_INLINE void func_extra_apply(F &, call_guard, size_t &) {} + +template +NB_INLINE void func_extra_apply(F &f, nanobind::keep_alive, size_t &) { + f.flags |= (uint32_t) func_flags::has_keep_alive; +} + +template struct func_extra_info { + using call_guard = void; + static constexpr bool keep_alive = false; + static constexpr size_t nargs_locked = 0; +}; + +template struct func_extra_info + : func_extra_info { }; + +template +struct func_extra_info, Ts...> : func_extra_info { + static_assert(std::is_same_v::call_guard, void>, + "call_guard<> can only be specified once!"); + using call_guard = nanobind::call_guard; +}; + +template +struct func_extra_info, Ts...> : func_extra_info { + static constexpr bool keep_alive = true; +}; + +template +struct func_extra_info : func_extra_info { + static constexpr size_t nargs_locked = 1 + func_extra_info::nargs_locked; +}; + +template +struct func_extra_info : func_extra_info { + static constexpr size_t nargs_locked = 1 + func_extra_info::nargs_locked; +}; + +template +NB_INLINE void process_keep_alive(PyObject **, PyObject *, T *) { } + +template +NB_INLINE void +process_keep_alive(PyObject **args, PyObject *result, + nanobind::keep_alive *) { + keep_alive(Nurse == 0 ? result : args[Nurse - 1], + Patient == 0 ? result : args[Patient - 1]); +} + +NAMESPACE_END(detail) +NAMESPACE_END(NB_NAMESPACE) diff --git a/RemoteInput/Thirdparty/nanobind/include/nanobind/nb_call.h b/RemoteInput/Thirdparty/nanobind/include/nanobind/nb_call.h new file mode 100644 index 0000000..dfbeb45 --- /dev/null +++ b/RemoteInput/Thirdparty/nanobind/include/nanobind/nb_call.h @@ -0,0 +1,150 @@ +/* + nanobind/nb_call.h: Functionality for calling Python functions from C++ + + Copyright (c) 2022 Wenzel Jakob + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +NAMESPACE_BEGIN(NB_NAMESPACE) +NAMESPACE_BEGIN(detail) + +#if defined(_MSC_VER) +# pragma warning(push) +# pragma warning(disable: 6255) // _alloca indicates failure by raising a stack overflow exception +#endif + +class kwargs_proxy : public handle { +public: + explicit kwargs_proxy(handle h) : handle(h) { } +}; + +class args_proxy : public handle { +public: + explicit args_proxy(handle h) : handle(h) { } + kwargs_proxy operator*() const { return kwargs_proxy(*this); } +}; + +template +args_proxy api::operator*() const { + return args_proxy(derived().ptr()); +} + +/// Implementation detail of api::operator() (call operator) +template +NB_INLINE void call_analyze(size_t &nargs, size_t &nkwargs, const T &value) { + using D = std::decay_t; + static_assert(!std::is_base_of_v, + "nb::arg().lock() may be used only when defining functions, " + "not when calling them"); + + if constexpr (std::is_same_v) + nkwargs++; + else if constexpr (std::is_same_v) + nargs += len(value); + else if constexpr (std::is_same_v) + nkwargs += len(value); + else + nargs += 1; + + (void) nargs; (void) nkwargs; (void) value; +} + +/// Implementation detail of api::operator() (call operator) +template +NB_INLINE void call_init(PyObject **args, PyObject *kwnames, size_t &nargs, + size_t &nkwargs, const size_t kwargs_offset, + T &&value) { + using D = std::decay_t; + + if constexpr (std::is_same_v) { + args[kwargs_offset + nkwargs] = value.value.release().ptr(); + NB_TUPLE_SET_ITEM(kwnames, nkwargs++, + PyUnicode_InternFromString(value.name_)); + } else if constexpr (std::is_same_v) { + for (size_t i = 0, l = len(value); i < l; ++i) + args[nargs++] = borrow(value[i]).release().ptr(); + } else if constexpr (std::is_same_v) { + PyObject *key, *entry; + Py_ssize_t pos = 0; + ft_object_guard guard(value); + while (PyDict_Next(value.ptr(), &pos, &key, &entry)) { + Py_INCREF(key); Py_INCREF(entry); + args[kwargs_offset + nkwargs] = entry; + NB_TUPLE_SET_ITEM(kwnames, nkwargs++, key); + } + } else { + args[nargs++] = + make_caster::from_cpp((forward_t) value, policy, nullptr).ptr(); + } + (void) args; (void) kwnames; (void) nargs; + (void) nkwargs; (void) kwargs_offset; +} + +#define NB_DO_VECTORCALL() \ + PyObject *base, **args_p; \ + if constexpr (method_call) { \ + base = derived().key().release().ptr(); \ + args[0] = derived().base().inc_ref().ptr(); \ + args_p = args; \ + nargs++; \ + } else { \ + base = derived().inc_ref().ptr(); \ + args[0] = nullptr; \ + args_p = args + 1; \ + } \ + nargs |= NB_VECTORCALL_ARGUMENTS_OFFSET; \ + return steal(obj_vectorcall(base, args_p, nargs, kwnames, method_call)) + +template +template +object api::operator()(Args &&...args_) const { + static constexpr bool method_call = + std::is_same_v> || + std::is_same_v>; + + if constexpr (((std::is_same_v || + std::is_same_v || + std::is_same_v) || ...)) { + // Complex call with keyword arguments, *args/**kwargs expansion, etc. + size_t nargs = 0, nkwargs = 0, nargs2 = 0, nkwargs2 = 0; + + // Determine storage requirements for positional and keyword args + (call_analyze(nargs, nkwargs, (const Args &) args_), ...); + + // Allocate memory on the stack + PyObject **args = + (PyObject **) alloca((nargs + nkwargs + 1) * sizeof(PyObject *)); + + PyObject *kwnames = + nkwargs ? PyTuple_New((Py_ssize_t) nkwargs) : nullptr; + + // Fill 'args' and 'kwnames' variables + (call_init(args + 1, kwnames, nargs2, nkwargs2, nargs, + (forward_t) args_), ...); + + NB_DO_VECTORCALL(); + } else { + // Simple version with only positional arguments + PyObject *args[sizeof...(Args) + 1], *kwnames = nullptr; + size_t nargs = 0; + + ((args[1 + nargs++] = + detail::make_caster::from_cpp( + (detail::forward_t) args_, policy, nullptr) + .ptr()), + ...); + + NB_DO_VECTORCALL(); + } +} + +#undef NB_DO_VECTORCALL + +#if defined(_MSC_VER) +# pragma warning(pop) +#endif + +NAMESPACE_END(detail) +NAMESPACE_END(NB_NAMESPACE) diff --git a/RemoteInput/Thirdparty/nanobind/include/nanobind/nb_cast.h b/RemoteInput/Thirdparty/nanobind/include/nanobind/nb_cast.h new file mode 100644 index 0000000..74ad6ce --- /dev/null +++ b/RemoteInput/Thirdparty/nanobind/include/nanobind/nb_cast.h @@ -0,0 +1,699 @@ +/* + nanobind/nb_cast.h: Type caster interface and essential type casters + + Copyright (c) 2022 Wenzel Jakob + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +#define NB_TYPE_CASTER(Value_, descr) \ + using Value = Value_; \ + static constexpr auto Name = descr; \ + template using Cast = movable_cast_t; \ + template static constexpr bool can_cast() { return true; } \ + template , Value>> = 0> \ + static handle from_cpp(T_ *p, rv_policy policy, cleanup_list *list) { \ + if (!p) \ + return none().release(); \ + return from_cpp(*p, policy, list); \ + } \ + explicit operator Value*() { return &value; } \ + explicit operator Value&() { return (Value &) value; } \ + explicit operator Value&&() { return (Value &&) value; } \ + Value value; + +#define NB_MAKE_OPAQUE(...) \ + namespace nanobind::detail { \ + template <> class type_caster<__VA_ARGS__> \ + : public type_caster_base<__VA_ARGS__> { }; } + +NAMESPACE_BEGIN(NB_NAMESPACE) +NAMESPACE_BEGIN(detail) + +/** + * Type casters expose a member 'Cast' which users of a type caster must + * query to determine what the caster actually can (and prefers) to produce. + * The convenience alias ``cast_t`` defined below performs this query for a + * given type ``T``. + * + * Often ``cast_t`` is simply equal to ``T`` or ``T&``. More significant + * deviations are also possible, which could be due to one of the following + * two reasons: + * + * 1. Efficiency: most STL type casters create a local copy (``value`` member) + * of the value being cast. The caller should move this value to its + * intended destination instead of making further copies along the way. + * Consequently, ``cast_t>`` yields ``cast_t> + * &&`` to enable such behavior. + * + * 2. STL pairs may contain references, and such pairs aren't + * default-constructible. The STL pair caster therefore cannot create a local + * copy and must construct the pair on the fly, which in turns means that it + * cannot return references. Therefore, ``cast_t&>`` + * yields ``std::pair``. + */ + +/// Ask a type caster what flavors of a type it can actually produce -- may be different from 'T' +template using cast_t = typename make_caster::template Cast; + +/// This is a default choice for the 'Cast' type alias described above. It +/// prefers to return rvalue references to allow the caller to move the object. +template +using movable_cast_t = + std::conditional_t, intrinsic_t *, + std::conditional_t, + intrinsic_t &, intrinsic_t &&>>; + +/// This version is more careful about what the caller actually requested and +/// only moves when this was explicitly requested. It is the default for the +/// base type caster (i.e., types bound via ``nanobind::class_<..>``) +template +using precise_cast_t = + std::conditional_t, intrinsic_t *, + std::conditional_t, + intrinsic_t &&, intrinsic_t &>>; + +/// Many type casters delegate to another caster using the pattern: +/// ~~~ .cc +/// bool from_python(handle src, uint8_t flags, cleanup_list *cl) noexcept { +/// SomeCaster c; +/// if (!c.from_python(src, flags, cl)) return false; +/// /* do something with */ c.operator T(); +/// return true; +/// } +/// ~~~ +/// This function adjusts the flags to avoid issues where the resulting T object +/// refers into storage that will dangle after SomeCaster is destroyed, and +/// causes a static assertion failure if that's not sufficient. Use it like: +/// ~~~ .cc +/// if (!c.from_python(src, flags_for_local_caster(flags), cl)) +/// return false; +/// ~~~ +/// where the template argument T is the type you plan to extract. +template +NB_INLINE uint8_t flags_for_local_caster(uint8_t flags) noexcept { + using Caster = make_caster; + constexpr bool is_ref = std::is_pointer_v || std::is_reference_v; + if constexpr (is_base_caster_v) { + if constexpr (is_ref) { + /* References/pointers to a type produced by implicit conversions + refer to storage owned by the cleanup_list. In a nb::cast() call, + that storage will be released before the reference can be used; + to prevent dangling, don't allow implicit conversions there. */ + if (flags & ((uint8_t) cast_flags::manual)) + flags &= ~((uint8_t) cast_flags::convert); + } + } else { + /* Any pointer produced by a non-base caster will generally point + into storage owned by the caster, which won't live long enough. + Exception: the 'char' caster produces a result that points to + storage owned by the incoming Python 'str' object, so it's OK. */ + static_assert(!is_ref || std::is_same_v || + (std::is_pointer_v && std::is_constructible_v), + "nanobind generally cannot produce objects that " + "contain interior pointers T* (or references T&) if " + "the pointee T is not handled by nanobind's regular " + "class binding mechanism. For example, you can write " + "a function that accepts int*, or std::vector, " + "but not std::vector."); + } + return flags; +} + +template +struct type_caster && !is_std_char_v>> { + NB_INLINE bool from_python(handle src, uint8_t flags, cleanup_list *) noexcept { + if constexpr (std::is_floating_point_v) { + if constexpr (sizeof(T) == 8) + return detail::load_f64(src.ptr(), flags, &value); + else + return detail::load_f32(src.ptr(), flags, &value); + } else { + if constexpr (std::is_signed_v) { + if constexpr (sizeof(T) == 8) + return detail::load_i64(src.ptr(), flags, (int64_t *) &value); + else if constexpr (sizeof(T) == 4) + return detail::load_i32(src.ptr(), flags, (int32_t *) &value); + else if constexpr (sizeof(T) == 2) + return detail::load_i16(src.ptr(), flags, (int16_t *) &value); + else + return detail::load_i8(src.ptr(), flags, (int8_t *) &value); + } else { + if constexpr (sizeof(T) == 8) + return detail::load_u64(src.ptr(), flags, (uint64_t *) &value); + else if constexpr (sizeof(T) == 4) + return detail::load_u32(src.ptr(), flags, (uint32_t *) &value); + else if constexpr (sizeof(T) == 2) + return detail::load_u16(src.ptr(), flags, (uint16_t *) &value); + else + return detail::load_u8(src.ptr(), flags, (uint8_t *) &value); + } + } + } + + NB_INLINE static handle from_cpp(T src, rv_policy, cleanup_list *) noexcept { + if constexpr (std::is_floating_point_v) { + return PyFloat_FromDouble((double) src); + } else { + if constexpr (std::is_signed_v) { + if constexpr (sizeof(T) <= sizeof(long)) + return PyLong_FromLong((long) src); + else + return PyLong_FromLongLong((long long) src); + } else { + if constexpr (sizeof(T) <= sizeof(unsigned long)) + return PyLong_FromUnsignedLong((unsigned long) src); + else + return PyLong_FromUnsignedLongLong((unsigned long long) src); + } + } + } + + NB_TYPE_CASTER(T, const_name>("int", "float")) +}; + +template +struct type_caster>> { + NB_INLINE bool from_python(handle src, uint8_t flags, cleanup_list *) noexcept { + int64_t result; + bool rv = enum_from_python(&typeid(T), src.ptr(), &result, flags); + value = (T) result; + return rv; + } + + NB_INLINE static handle from_cpp(T src, rv_policy, cleanup_list *) noexcept { + return enum_from_cpp(&typeid(T), (int64_t) src); + } + + NB_TYPE_CASTER(T, const_name()) +}; + +template <> struct type_caster { + static constexpr auto Name = const_name("None"); +}; + +template <> struct type_caster { + template using Cast = void *; + template static constexpr bool can_cast() { return true; } + using Value = void*; + static constexpr auto Name = const_name("types.CapsuleType"); + explicit operator void *() { return value; } + Value value; + + bool from_python(handle src, uint8_t, cleanup_list *) noexcept { + if (src.is_none()) { + value = nullptr; + return true; + } else { + value = PyCapsule_GetPointer(src.ptr(), "nb_handle"); + if (!value) { + PyErr_Clear(); + return false; + } + return true; + } + } + + static handle from_cpp(void *ptr, rv_policy, cleanup_list *) noexcept { + if (ptr) + return PyCapsule_New(ptr, "nb_handle", nullptr); + else + return none().release(); + } +}; + +template struct none_caster { + bool from_python(handle src, uint8_t, cleanup_list *) noexcept { + if (src.is_none()) + return true; + return false; + } + + static handle from_cpp(T, rv_policy, cleanup_list *) noexcept { + return none().release(); + } + + NB_TYPE_CASTER(T, const_name("None")) +}; + +template <> struct type_caster : none_caster { }; + +template <> struct type_caster { + bool from_python(handle src, uint8_t, cleanup_list *) noexcept { + if (src.ptr() == Py_True) { + value = true; + return true; + } else if (src.ptr() == Py_False) { + value = false; + return true; + } else { + return false; + } + } + + static handle from_cpp(bool src, rv_policy, cleanup_list *) noexcept { + return handle(src ? Py_True : Py_False).inc_ref(); + } + + NB_TYPE_CASTER(bool, const_name("bool")) +}; + +template <> struct type_caster { + using Value = const char *; + Value value; + Py_ssize_t size; + static constexpr auto Name = const_name("str"); + template + using Cast = std::conditional_t, const char *, char>; + + bool from_python(handle src, uint8_t, cleanup_list *) noexcept { + value = PyUnicode_AsUTF8AndSize(src.ptr(), &size); + if (!value) { + PyErr_Clear(); + return false; + } + return true; + } + + static handle from_cpp(const char *value, rv_policy, + cleanup_list *) noexcept { + if (value == nullptr) { + PyObject* result = Py_None; + Py_INCREF(result); + return result; + } + return PyUnicode_FromString(value); + } + + static handle from_cpp(char value, rv_policy, cleanup_list *) noexcept { + return PyUnicode_FromStringAndSize(&value, 1); + } + + template + NB_INLINE bool can_cast() const noexcept { + return std::is_pointer_v || (value && size == 1); + } + + explicit operator const char *() { return value; } + + explicit operator char() { + if (can_cast()) + return value[0]; + else + throw next_overload(); + } +}; + +template struct type_caster> { + using Caster = make_caster; + using T2 = pointer_and_handle; + NB_TYPE_CASTER(T2, Caster::Name) + + bool from_python(handle src, uint8_t flags, cleanup_list *cleanup) noexcept { + Caster c; + if (!c.from_python(src, flags_for_local_caster(flags), cleanup) || + !c.template can_cast()) + return false; + value.h = src; + value.p = c.operator T*(); + return true; + } +}; + +template struct typed_name { + static constexpr auto Name = type_caster::Name; +}; + +#if PY_VERSION_HEX < 0x03090000 +#define NB_TYPED_NAME_PYTHON38(type, name) \ + template <> struct typed_name { \ + static constexpr auto Name = detail::const_name(name); \ + }; + +NB_TYPED_NAME_PYTHON38(nanobind::tuple, NB_TYPING_TUPLE) +NB_TYPED_NAME_PYTHON38(list, NB_TYPING_LIST) +NB_TYPED_NAME_PYTHON38(set, NB_TYPING_SET) +NB_TYPED_NAME_PYTHON38(dict, NB_TYPING_DICT) +NB_TYPED_NAME_PYTHON38(type_object, NB_TYPING_TYPE) +#endif + +template struct type_caster> { + using Caster = make_caster; + using Typed = typed; + + NB_TYPE_CASTER(Typed, typed_name>::Name + const_name("[") + + concat(const_name>(const_name("..."), + make_caster::Name)...) + const_name("]")) + + bool from_python(handle src, uint8_t flags, cleanup_list *cleanup) noexcept { + Caster caster; + if (!caster.from_python(src, flags_for_local_caster(flags), cleanup) || + !caster.template can_cast()) + return false; + value = caster.operator cast_t(); + return true; + } + + static handle from_cpp(const Value &src, rv_policy policy, cleanup_list *cleanup) noexcept { + return Caster::from_cpp(src, policy, cleanup); + } +}; + +template +struct type_caster && !T::nb_typed>> { +public: + NB_TYPE_CASTER(T, T::Name) + + type_caster() : value(nullptr, ::nanobind::detail::steal_t()) { } + + bool from_python(handle src, uint8_t, cleanup_list *) noexcept { + if (!isinstance(src)) + return false; + + if constexpr (std::is_base_of_v) + value = borrow(src); + else + value = src; + + return true; + } + + static handle from_cpp(T&& src, rv_policy, cleanup_list *) noexcept { + if constexpr (std::is_base_of_v) + return src.release(); + else + return src.inc_ref(); + } + + static handle from_cpp(const T &src, rv_policy, cleanup_list *) noexcept { + return src.inc_ref(); + } +}; + +template NB_INLINE rv_policy infer_policy(rv_policy policy) { + if constexpr (is_pointer_v) { + if (policy == rv_policy::automatic) + policy = rv_policy::take_ownership; + else if (policy == rv_policy::automatic_reference) + policy = rv_policy::reference; + } else if constexpr (std::is_lvalue_reference_v) { + if (policy == rv_policy::automatic || + policy == rv_policy::automatic_reference) + policy = rv_policy::copy; + } else { + if (policy == rv_policy::automatic || + policy == rv_policy::automatic_reference || + policy == rv_policy::reference || + policy == rv_policy::reference_internal) + policy = rv_policy::move; + } + return policy; +} + +template struct type_hook : std::false_type { }; + +template struct type_caster_base : type_caster_base_tag { + using Type = Type_; + static constexpr auto Name = const_name(); + template using Cast = precise_cast_t; + + NB_INLINE bool from_python(handle src, uint8_t flags, + cleanup_list *cleanup) noexcept { + return nb_type_get(&typeid(Type), src.ptr(), flags, cleanup, + (void **) &value); + } + + template + NB_INLINE static handle from_cpp(T &&value, rv_policy policy, + cleanup_list *cleanup) noexcept { + Type *ptr; + if constexpr (is_pointer_v) + ptr = (Type *) value; + else + ptr = (Type *) &value; + + policy = infer_policy(policy); + const std::type_info *type = &typeid(Type); + + constexpr bool has_type_hook = + !std::is_base_of_v>; + if constexpr (has_type_hook) + type = type_hook::get(ptr); + + if constexpr (!std::is_polymorphic_v) { + return nb_type_put(type, ptr, policy, cleanup); + } else { + const std::type_info *type_p = + (!has_type_hook && ptr) ? &typeid(*ptr) : nullptr; + return nb_type_put_p(type, type_p, ptr, policy, cleanup); + } + } + + template + bool can_cast() const noexcept { + return std::is_pointer_v || (value != nullptr); + } + + operator Type*() { return value; } + + operator Type&() { + raise_next_overload_if_null(value); + return *value; + } + + operator Type&&() { + raise_next_overload_if_null(value); + return (Type &&) *value; + } + +private: + Type *value; +}; + +template +struct type_caster : type_caster_base { }; + +template +T cast_impl(handle h) { + using Caster = detail::make_caster; + + // A returned reference/pointer would usually refer into the type_caster + // object, which will be destroyed before the returned value can be used, + // so we prohibit it by default, with two exceptions that we know are safe: + // + // - If we're casting to a bound object type, the returned pointer points + // into storage owned by that object, not the type caster. Note this is + // only safe if we don't allow implicit conversions, because the pointer + // produced after an implicit conversion points into storage owned by + // a temporary object in the cleanup list, and we have to release those + // temporaries before we return. + // + // - If we're casting to const char*, the caster was provided by nanobind, + // and we know it will only accept Python 'str' objects, producing + // a pointer to storage owned by that object. + + constexpr bool is_ref = std::is_reference_v || std::is_pointer_v; + static_assert( + !is_ref || + is_base_caster_v || + std::is_same_v, + "nanobind::cast(): cannot return a reference to a temporary."); + + Caster caster; + bool rv; + if constexpr (Convert && !is_ref) { + // Release the values in the cleanup list only after we + // initialize the return object, since the initialization + // might access those temporaries. + struct raii_cleanup { + cleanup_list list{nullptr}; + ~raii_cleanup() { list.release(); } + } cleanup; + rv = caster.from_python(h.ptr(), + ((uint8_t) cast_flags::convert) | + ((uint8_t) cast_flags::manual), + &cleanup.list); + if (!rv) + detail::raise_cast_error(); + return caster.operator cast_t(); + } else { + rv = caster.from_python(h.ptr(), (uint8_t) cast_flags::manual, nullptr); + if (!rv) + detail::raise_cast_error(); + return caster.operator cast_t(); + } +} + +template +bool try_cast_impl(handle h, T &out) noexcept { + using Caster = detail::make_caster; + + // See comments in cast_impl above + constexpr bool is_ref = std::is_reference_v || std::is_pointer_v; + static_assert( + !is_ref || + is_base_caster_v || + std::is_same_v, + "nanobind::try_cast(): cannot return a reference to a temporary."); + + Caster caster; + bool rv; + if constexpr (Convert && !is_ref) { + cleanup_list cleanup(nullptr); + rv = caster.from_python(h.ptr(), + ((uint8_t) cast_flags::convert) | + ((uint8_t) cast_flags::manual), + &cleanup) && + caster.template can_cast(); + if (rv) { + out = caster.operator cast_t(); + } + cleanup.release(); // 'from_python' is 'noexcept', so this always runs + } else { + rv = caster.from_python(h.ptr(), (uint8_t) cast_flags::manual, nullptr) && + caster.template can_cast(); + if (rv) { + out = caster.operator cast_t(); + } + } + + return rv; +} + +NAMESPACE_END(detail) + +template +NB_INLINE T cast(const detail::api &value, bool convert = true) { + if constexpr (std::is_same_v) { + (void) value; (void) convert; + return; + } else { + if (convert) + return detail::cast_impl(value); + else + return detail::cast_impl(value); + } +} + +template +NB_INLINE bool try_cast(const detail::api &value, T &out, bool convert = true) noexcept { + if (convert) + return detail::try_cast_impl(value, out); + else + return detail::try_cast_impl(value, out); +} + +template +object cast(T &&value, rv_policy policy = rv_policy::automatic_reference) { + handle h = detail::make_caster::from_cpp((detail::forward_t) value, + policy, nullptr); + if (!h.is_valid()) + detail::raise_cast_error(); + + return steal(h); +} + +template +object cast(T &&value, rv_policy policy, handle parent) { + detail::cleanup_list cleanup(parent.ptr()); + handle h = detail::make_caster::from_cpp((detail::forward_t) value, + policy, &cleanup); + + cleanup.release(); + + if (!h.is_valid()) + detail::raise_cast_error(); + + return steal(h); +} + +template object find(const T &value) noexcept { + return steal(detail::make_caster::from_cpp(value, rv_policy::none, nullptr)); +} + +template +tuple make_tuple(Args &&...args) { + tuple result = steal(PyTuple_New((Py_ssize_t) sizeof...(Args))); + + size_t nargs = 0; + PyObject *o = result.ptr(); + + (NB_TUPLE_SET_ITEM(o, nargs++, + detail::make_caster::from_cpp( + (detail::forward_t) args, policy, nullptr) + .ptr()), + ...); + + detail::tuple_check(o, sizeof...(Args)); + + return result; +} + +template arg_v arg::operator=(T &&value) const { + return arg_v(*this, cast((detail::forward_t) value)); +} +template arg_locked_v arg_locked::operator=(T &&value) const { + return arg_locked_v(*this, cast((detail::forward_t) value)); +} + +template template +detail::accessor& detail::accessor::operator=(T &&value) { + object result = cast((detail::forward_t) value); + Impl::set(m_base, m_key, result.ptr()); + return *this; +} + +template void list::append(T &&value) { + object o = nanobind::cast((detail::forward_t) value); + if (PyList_Append(m_ptr, o.ptr())) + raise_python_error(); +} + +template void list::insert(Py_ssize_t index, T &&value) { + object o = nanobind::cast((detail::forward_t) value); + if (PyList_Insert(m_ptr, index, o.ptr())) + raise_python_error(); +} + +template bool dict::contains(T&& key) const { + object o = nanobind::cast((detail::forward_t) key); + int rv = PyDict_Contains(m_ptr, o.ptr()); + if (rv == -1) + raise_python_error(); + return rv == 1; +} + +template bool set::contains(T&& key) const { + object o = nanobind::cast((detail::forward_t) key); + int rv = PySet_Contains(m_ptr, o.ptr()); + if (rv == -1) + raise_python_error(); + return rv == 1; +} + +template void set::add(T&& key) { + object o = nanobind::cast((detail::forward_t) key); + int rv = PySet_Add(m_ptr, o.ptr()); + if (rv == -1) + raise_python_error(); +} + +template bool set::discard(T &&value) { + object o = nanobind::cast((detail::forward_t) value); + int rv = PySet_Discard(m_ptr, o.ptr()); + if (rv < 0) + raise_python_error(); + return rv == 1; +} + +template bool mapping::contains(T&& key) const { + object o = nanobind::cast((detail::forward_t) key); + int rv = PyMapping_HasKey(m_ptr, o.ptr()); + if (rv == -1) + raise_python_error(); + return rv == 1; +} + +NAMESPACE_END(NB_NAMESPACE) diff --git a/RemoteInput/Thirdparty/nanobind/include/nanobind/nb_class.h b/RemoteInput/Thirdparty/nanobind/include/nanobind/nb_class.h new file mode 100644 index 0000000..e8da8d5 --- /dev/null +++ b/RemoteInput/Thirdparty/nanobind/include/nanobind/nb_class.h @@ -0,0 +1,808 @@ +/* + nanobind/nb_class.h: Functionality for binding C++ classes/structs + + Copyright (c) 2022 Wenzel Jakob + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +NAMESPACE_BEGIN(NB_NAMESPACE) +NAMESPACE_BEGIN(detail) + +/// Flags about a type that persist throughout its lifetime +enum class type_flags : uint32_t { + /// Does the type provide a C++ destructor? + is_destructible = (1 << 0), + + /// Does the type provide a C++ copy constructor? + is_copy_constructible = (1 << 1), + + /// Does the type provide a C++ move constructor? + is_move_constructible = (1 << 2), + + /// Is the 'destruct' field of the type_data structure set? + has_destruct = (1 << 4), + + /// Is the 'copy' field of the type_data structure set? + has_copy = (1 << 5), + + /// Is the 'move' field of the type_data structure set? + has_move = (1 << 6), + + /// Internal: does the type maintain a list of implicit conversions? + has_implicit_conversions = (1 << 7), + + /// Is this a python type that extends a bound C++ type? + is_python_type = (1 << 8), + + /// This type does not permit subclassing from Python + is_final = (1 << 9), + + /// Instances of this type support dynamic attribute assignment + has_dynamic_attr = (1 << 10), + + /// The class uses an intrusive reference counting approach + intrusive_ptr = (1 << 11), + + /// Is this a class that inherits from enable_shared_from_this? + /// If so, type_data::keep_shared_from_this_alive is also set. + has_shared_from_this = (1 << 12), + + /// Instances of this type can be referenced by 'weakref' + is_weak_referenceable = (1 << 13), + + /// A custom signature override was specified + has_signature = (1 << 14), + + /// The class implements __class_getitem__ similar to typing.Generic + is_generic = (1 << 15), + + /// Does the type implement a custom __new__ operator? + has_new = (1 << 16) + + // Two more bits bits available without needing a larger reorganization +}; + +/// Flags about a type that are only relevant when it is being created. +/// These are currently stored in type_data::flags alongside the type_flags +/// for more efficient memory layout, but could move elsewhere if we run +/// out of flags. +enum class type_init_flags : uint32_t { + /// Is the 'supplement' field of the type_init_data structure set? + has_supplement = (1 << 19), + + /// Is the 'doc' field of the type_init_data structure set? + has_doc = (1 << 20), + + /// Is the 'base' field of the type_init_data structure set? + has_base = (1 << 21), + + /// Is the 'base_py' field of the type_init_data structure set? + has_base_py = (1 << 22), + + /// This type provides extra PyType_Slot fields + has_type_slots = (1 << 23), + + all_init_flags = (0x1f << 19) +}; + +// See internals.h +struct nb_alias_chain; + +/// Information about a type that persists throughout its lifetime +struct type_data { + uint32_t size; + uint32_t align : 8; + uint32_t flags : 24; + const char *name; + const std::type_info *type; + PyTypeObject *type_py; + nb_alias_chain *alias_chain; +#if defined(Py_LIMITED_API) + PyObject* (*vectorcall)(PyObject *, PyObject * const*, size_t, PyObject *); +#endif + void *init; // Constructor nb_func + void (*destruct)(void *); + void (*copy)(void *, const void *); + void (*move)(void *, void *) noexcept; + union { + // Implicit conversions for C++ type bindings + struct { + const std::type_info **cpp; + bool (**py)(PyTypeObject *, PyObject *, cleanup_list *) noexcept; + } implicit; + + // Forward and reverse mappings for enumerations + struct { + void *fwd; + void *rev; + } enum_tbl; + }; + void (*set_self_py)(void *, PyObject *) noexcept; + bool (*keep_shared_from_this_alive)(PyObject *) noexcept; +#if defined(Py_LIMITED_API) + uint32_t dictoffset; + uint32_t weaklistoffset; +#endif +}; + +/// Information about a type that is only relevant when it is being created +struct type_init_data : type_data { + PyObject *scope; + const std::type_info *base; + PyTypeObject *base_py; + const char *doc; + const PyType_Slot *type_slots; + size_t supplement; +}; + +NB_INLINE void type_extra_apply(type_init_data &t, const handle &h) { + t.flags |= (uint32_t) type_init_flags::has_base_py; + t.base_py = (PyTypeObject *) h.ptr(); +} + +NB_INLINE void type_extra_apply(type_init_data &t, const char *doc) { + t.flags |= (uint32_t) type_init_flags::has_doc; + t.doc = doc; +} + +NB_INLINE void type_extra_apply(type_init_data &t, type_slots c) { + t.flags |= (uint32_t) type_init_flags::has_type_slots; + t.type_slots = c.value; +} + +template +NB_INLINE void type_extra_apply(type_init_data &t, intrusive_ptr ip) { + t.flags |= (uint32_t) type_flags::intrusive_ptr; + t.set_self_py = (void (*)(void *, PyObject *) noexcept) ip.set_self_py; +} + +NB_INLINE void type_extra_apply(type_init_data &t, is_final) { + t.flags |= (uint32_t) type_flags::is_final; +} + +NB_INLINE void type_extra_apply(type_init_data &t, dynamic_attr) { + t.flags |= (uint32_t) type_flags::has_dynamic_attr; +} + +NB_INLINE void type_extra_apply(type_init_data & t, is_weak_referenceable) { + t.flags |= (uint32_t) type_flags::is_weak_referenceable; +} + +NB_INLINE void type_extra_apply(type_init_data & t, is_generic) { + t.flags |= (uint32_t) type_flags::is_generic; +} + +NB_INLINE void type_extra_apply(type_init_data & t, const sig &s) { + t.flags |= (uint32_t) type_flags::has_signature; + t.name = s.value; +} + +template +NB_INLINE void type_extra_apply(type_init_data &t, supplement) { + static_assert(std::is_trivially_default_constructible_v, + "The supplement must be a POD (plain old data) type"); + static_assert(alignof(T) <= alignof(void *), + "The alignment requirement of the supplement is too high."); + t.flags |= (uint32_t) type_init_flags::has_supplement | (uint32_t) type_flags::is_final; + t.supplement = sizeof(T); +} + +enum class enum_flags : uint32_t { + /// Is this an arithmetic enumeration? + is_arithmetic = (1 << 1), + + /// Is the number type underlying the enumeration signed? + is_signed = (1 << 2), + + /// Is the underlying enumeration type Flag? + is_flag = (1 << 3) +}; + +struct enum_init_data { + const std::type_info *type; + PyObject *scope; + const char *name; + const char *docstr; + uint32_t flags; +}; + +NB_INLINE void enum_extra_apply(enum_init_data &e, is_arithmetic) { + e.flags |= (uint32_t) enum_flags::is_arithmetic; +} + +NB_INLINE void enum_extra_apply(enum_init_data &e, is_flag) { + e.flags |= (uint32_t) enum_flags::is_flag; +} + +NB_INLINE void enum_extra_apply(enum_init_data &e, const char *doc) { + e.docstr = doc; +} + +template +NB_INLINE void enum_extra_apply(enum_init_data &, T) { + static_assert( + std::is_void_v, + "Invalid enum binding annotation. The implementation of " + "enums changed nanobind 2.0.0: only nb::is_arithmetic and " + "docstrings can be passed since this change."); +} + +template void wrap_copy(void *dst, const void *src) { + new ((T *) dst) T(*(const T *) src); +} + +template void wrap_move(void *dst, void *src) noexcept { + new ((T *) dst) T(std::move(*(T *) src)); +} + +template void wrap_destruct(void *value) noexcept { + ((T *) value)->~T(); +} + +template typename, typename...> +struct extract; + +template typename Pred> +struct extract { + using type = T; +}; + +template typename Pred, + typename Tv, typename... Ts> +struct extract { + using type = std::conditional_t< + Pred::value, + Tv, + typename extract::type + >; +}; + +template using is_alias = std::is_base_of; +template using is_base = std::is_base_of; + +enum op_id : int; +enum op_type : int; +struct undefined_t; +template struct op_; + +// The header file include/nanobind/stl/detail/traits.h extends this type trait +template +struct is_copy_constructible : std::is_copy_constructible { }; + +template +constexpr bool is_copy_constructible_v = is_copy_constructible::value; + +NAMESPACE_END(detail) + +// Low level access to nanobind type objects +inline bool type_check(handle h) { return detail::nb_type_check(h.ptr()); } +inline size_t type_size(handle h) { return detail::nb_type_size(h.ptr()); } +inline size_t type_align(handle h) { return detail::nb_type_align(h.ptr()); } +inline const std::type_info& type_info(handle h) { return *detail::nb_type_info(h.ptr()); } +template +inline T &type_supplement(handle h) { return *(T *) detail::nb_type_supplement(h.ptr()); } +inline str type_name(handle h) { return steal(detail::nb_type_name(h.ptr())); }; + +// Low level access to nanobind instance objects +inline bool inst_check(handle h) { return type_check(h.type()); } +inline str inst_name(handle h) { + return steal(detail::nb_inst_name(h.ptr())); +}; +inline object inst_alloc(handle h) { + return steal(detail::nb_inst_alloc((PyTypeObject *) h.ptr())); +} +inline object inst_alloc_zero(handle h) { + return steal(detail::nb_inst_alloc_zero((PyTypeObject *) h.ptr())); +} +inline object inst_take_ownership(handle h, void *p) { + return steal(detail::nb_inst_take_ownership((PyTypeObject *) h.ptr(), p)); +} +inline object inst_reference(handle h, void *p, handle parent = handle()) { + return steal(detail::nb_inst_reference((PyTypeObject *) h.ptr(), p, parent.ptr())); +} +inline void inst_zero(handle h) { detail::nb_inst_zero(h.ptr()); } +inline void inst_set_state(handle h, bool ready, bool destruct) { + detail::nb_inst_set_state(h.ptr(), ready, destruct); +} +inline std::pair inst_state(handle h) { + return detail::nb_inst_state(h.ptr()); +} +inline void inst_mark_ready(handle h) { inst_set_state(h, true, true); } +inline bool inst_ready(handle h) { return inst_state(h).first; } +inline void inst_destruct(handle h) { detail::nb_inst_destruct(h.ptr()); } +inline void inst_copy(handle dst, handle src) { detail::nb_inst_copy(dst.ptr(), src.ptr()); } +inline void inst_move(handle dst, handle src) { detail::nb_inst_move(dst.ptr(), src.ptr()); } +inline void inst_replace_copy(handle dst, handle src) { detail::nb_inst_replace_copy(dst.ptr(), src.ptr()); } +inline void inst_replace_move(handle dst, handle src) { detail::nb_inst_replace_move(dst.ptr(), src.ptr()); } +template T *inst_ptr(handle h) { return (T *) detail::nb_inst_ptr(h.ptr()); } +inline void *type_get_slot(handle h, int slot_id) { +#if NB_TYPE_GET_SLOT_IMPL + return detail::type_get_slot((PyTypeObject *) h.ptr(), slot_id); +#else + return PyType_GetSlot((PyTypeObject *) h.ptr(), slot_id); +#endif +} + + +template struct init { + template friend class class_; + NB_INLINE init() {} + +private: + template + NB_INLINE static void execute(Class &cl, const Extra&... extra) { + using Type = typename Class::Type; + using Alias = typename Class::Alias; + cl.def( + "__init__", + [](pointer_and_handle v, Args... args) { + if constexpr (!std::is_same_v && + std::is_constructible_v) { + if (!detail::nb_inst_python_derived(v.h.ptr())) { + new (v.p) Type{ (detail::forward_t) args... }; + return; + } + } + new ((void *) v.p) Alias{ (detail::forward_t) args... }; + }, + extra...); + } +}; + +template struct init_implicit { + template friend class class_; + NB_INLINE init_implicit() { } + +private: + template + NB_INLINE static void execute(Class &cl, const Extra&... extra) { + using Type = typename Class::Type; + using Alias = typename Class::Alias; + + cl.def( + "__init__", + [](pointer_and_handle v, Arg arg) { + if constexpr (!std::is_same_v && + std::is_constructible_v) { + if (!detail::nb_inst_python_derived(v.h.ptr())) { + new ((Type *) v.p) Type{ (detail::forward_t) arg }; + return; + } + } + new ((Alias *) v.p) Alias{ (detail::forward_t) arg }; + }, is_implicit(), extra...); + + using Caster = detail::make_caster; + + if constexpr (!detail::is_class_caster_v) { + detail::implicitly_convertible( + [](PyTypeObject *, PyObject *src, + detail::cleanup_list *cleanup) noexcept -> bool { + return Caster().from_python( + src, detail::cast_flags::convert, cleanup); + }, + &typeid(Type)); + } + } +}; + +namespace detail { + // This is 'inline' so we can define it in a header and not pay + // for it if unused, and also 'noinline' so we don't generate + // multiple copies and produce code bloat. + NB_NOINLINE inline void wrap_base_new(handle cls, bool do_wrap) { + if (PyCFunction_Check(cls.attr("__new__").ptr())) { + if (do_wrap) { + cpp_function_def( + [](handle type) { + if (!type_check(type)) + detail::raise_cast_error(); + return inst_alloc(type); + }, + scope(cls), name("__new__")); + } + } else { + if (!do_wrap) { + // We already defined the wrapper, so this zero-arg overload + // would be unreachable. Raise an error rather than hiding it. + raise("nanobind: %s must define its zero-argument __new__ " + "before any other overloads", type_name(cls).c_str()); + } + } + } +} + +template > +struct new_; + +template +struct new_ { + std::remove_reference_t func; + + new_(Func &&f) : func((detail::forward_t) f) {} + + template + NB_INLINE void execute(Class &cl, const Extra&... extra) { + // If this is the first __new__ overload we're defining, then wrap + // nanobind's built-in __new__ so we overload with it instead of + // replacing it; this is important for pickle support. + // We can't do this if the user-provided __new__ takes no + // arguments, because it would make an ambiguous overload set. + detail::wrap_base_new(cl, sizeof...(Args) != 0); + + auto wrapper = [func = (detail::forward_t) func](handle, Args... args) { + return func((detail::forward_t) args...); + }; + + if constexpr ((std::is_base_of_v || ...)) { + // If any argument annotations are specified, add another for the + // extra class argument that we don't forward to Func, so visible + // arg() annotations stay aligned with visible function arguments. + cl.def_static("__new__", std::move(wrapper), arg("cls"), extra...); + } else { + cl.def_static("__new__", std::move(wrapper), extra...); + } + cl.def("__init__", [](handle, Args...) {}, extra...); + } +}; +template new_(Func&& f) -> new_; + +template struct for_setter { + T value; + for_setter(const T &value) : value(value) { } +}; + +template struct for_getter { + T value; + for_getter(const T &value) : value(value) { } +}; + +template for_getter(T) -> for_getter>; +template for_setter(T) -> for_setter>; + +namespace detail { + template auto filter_getter(const T &v) { return v; } + template auto filter_getter(const for_getter &v) { return v.value; } + template std::nullptr_t filter_getter(const for_setter &) { return nullptr; } + + template auto filter_setter(const T &v) { return v; } + template auto filter_setter(const for_setter &v) { return v.value; } + template std::nullptr_t filter_setter(const for_getter &) { return nullptr; } +} + +template +class class_ : public object { +public: + NB_OBJECT_DEFAULT(class_, object, "type", PyType_Check) + using Type = T; + using Base = typename detail::extract::type; + using Alias = typename detail::extract::type; + + static_assert(sizeof(Alias) < (1 << 24), "Instance size is too big!"); + static_assert(alignof(Alias) < (1 << 8), "Instance alignment is too big!"); + static_assert( + sizeof...(Ts) == !std::is_same_v + !std::is_same_v, + "nanobind::class_<> was invoked with extra arguments that could not be handled"); + + static_assert( + detail::is_base_caster_v>, + "You attempted to bind a type that is already intercepted by a type " + "caster. Having both at the same time is not allowed. Are you perhaps " + "binding an STL type, while at the same time including a matching " + "type caster from ? Or did you perhaps forget to " + "declare NB_MAKE_OPAQUE(..) to specifically disable the type caster " + "catch-all for a specific type? Please review the documentation " + "to learn about the difference between bindings and type casters."); + + template + NB_INLINE class_(handle scope, const char *name, const Extra &... extra) { + detail::type_init_data d; + + d.flags = 0; + d.align = (uint8_t) alignof(Alias); + d.size = (uint32_t) sizeof(Alias); + d.name = name; + d.scope = scope.ptr(); + d.type = &typeid(T); + + if constexpr (!std::is_same_v) { + d.base = &typeid(Base); + d.flags |= (uint32_t) detail::type_init_flags::has_base; + } + + if constexpr (detail::is_copy_constructible_v) { + d.flags |= (uint32_t) detail::type_flags::is_copy_constructible; + + if constexpr (!std::is_trivially_copy_constructible_v) { + d.flags |= (uint32_t) detail::type_flags::has_copy; + d.copy = detail::wrap_copy; + } + } + + if constexpr (std::is_move_constructible::value) { + d.flags |= (uint32_t) detail::type_flags::is_move_constructible; + + if constexpr (!std::is_trivially_move_constructible_v) { + d.flags |= (uint32_t) detail::type_flags::has_move; + d.move = detail::wrap_move; + } + } + + if constexpr (std::is_destructible_v) { + d.flags |= (uint32_t) detail::type_flags::is_destructible; + + if constexpr (!std::is_trivially_destructible_v) { + d.flags |= (uint32_t) detail::type_flags::has_destruct; + d.destruct = detail::wrap_destruct; + } + } + + if constexpr (detail::has_shared_from_this_v) { + d.flags |= (uint32_t) detail::type_flags::has_shared_from_this; + d.keep_shared_from_this_alive = [](PyObject *self) noexcept { + // weak_from_this().lock() is equivalent to shared_from_this(), + // except that it returns an empty shared_ptr instead of + // throwing an exception if there is no active shared_ptr + // for this object. (Added in C++17.) + if (auto sp = inst_ptr(self)->weak_from_this().lock()) { + detail::keep_alive(self, new auto(std::move(sp)), + [](void *p) noexcept { + delete (decltype(sp) *) p; + }); + return true; + } + return false; + }; + } + + (detail::type_extra_apply(d, extra), ...); + + m_ptr = detail::nb_type_new(&d); + } + + template + NB_INLINE class_ &def(const char *name_, Func &&f, const Extra &... extra) { + cpp_function_def((detail::forward_t) f, scope(*this), + name(name_), is_method(), extra...); + return *this; + } + + template + NB_INLINE class_ &def(init &&arg, const Extra &... extra) { + arg.execute(*this, extra...); + return *this; + } + + template + NB_INLINE class_ &def(init_implicit &&arg, const Extra &... extra) { + arg.execute(*this, extra...); + return *this; + } + + template + NB_INLINE class_ &def(new_ &&arg, const Extra &... extra) { + arg.execute(*this, extra...); + return *this; + } + + template + NB_INLINE class_ &def_static(const char *name_, Func &&f, + const Extra &... extra) { + static_assert( + !std::is_member_function_pointer_v, + "def_static(...) called with a non-static member function pointer"); + cpp_function_def((detail::forward_t) f, scope(*this), name(name_), + extra...); + return *this; + } + + template + NB_INLINE class_ &def_prop_rw(const char *name_, Getter &&getter, + Setter &&setter, const Extra &...extra) { + object get_p, set_p; + + if constexpr (!std::is_same_v) + get_p = cpp_function((detail::forward_t) getter, + is_method(), is_getter(), + rv_policy::reference_internal, + detail::filter_getter(extra)...); + + if constexpr (!std::is_same_v) + set_p = cpp_function((detail::forward_t) setter, + is_method(), detail::filter_setter(extra)...); + + detail::property_install(m_ptr, name_, get_p.ptr(), set_p.ptr()); + return *this; + } + + template + NB_INLINE class_ &def_prop_rw_static(const char *name_, Getter &&getter, + Setter &&setter, + const Extra &...extra) { + object get_p, set_p; + + if constexpr (!std::is_same_v) + get_p = cpp_function((detail::forward_t) getter, is_getter(), + rv_policy::reference, + detail::filter_getter(extra)...); + + if constexpr (!std::is_same_v) + set_p = cpp_function((detail::forward_t) setter, + detail::filter_setter(extra)...); + + detail::property_install_static(m_ptr, name_, get_p.ptr(), set_p.ptr()); + return *this; + } + + template + NB_INLINE class_ &def_prop_ro(const char *name_, Getter &&getter, + const Extra &...extra) { + return def_prop_rw(name_, getter, nullptr, extra...); + } + + template + NB_INLINE class_ &def_prop_ro_static(const char *name_, + Getter &&getter, + const Extra &...extra) { + return def_prop_rw_static(name_, getter, nullptr, extra...); + } + + template + NB_INLINE class_ &def_rw(const char *name, D C::*p, + const Extra &...extra) { + // Unions never satisfy is_base_of, thus the is_same alternative + static_assert(std::is_base_of_v || std::is_same_v, + "def_rw() requires a (base) class member!"); + + using Q = + std::conditional_t>, + const D &, D &&>; + + def_prop_rw(name, + [p](const T &c) -> const D & { return c.*p; }, + [p](T &c, Q value) { c.*p = (Q) value; }, + extra...); + + return *this; + } + + template + NB_INLINE class_ &def_rw_static(const char *name, D *p, + const Extra &...extra) { + using Q = + std::conditional_t>, + const D &, D &&>; + + def_prop_rw_static(name, + [p](handle) -> const D & { return *p; }, + [p](handle, Q value) { *p = (Q) value; }, extra...); + + return *this; + } + + template + NB_INLINE class_ &def_ro(const char *name, D C::*p, + const Extra &...extra) { + // Unions never satisfy is_base_of, thus the is_same alternative + static_assert(std::is_base_of_v || std::is_same_v, + "def_ro() requires a (base) class member!"); + + def_prop_ro(name, + [p](const T &c) -> const D & { return c.*p; }, extra...); + + return *this; + } + + template + NB_INLINE class_ &def_ro_static(const char *name, D *p, + const Extra &...extra) { + def_prop_ro_static(name, + [p](handle) -> const D & { return *p; }, extra...); + + return *this; + } + + template + class_ &def(const detail::op_ &op, const Extra&... extra) { + op.execute(*this, extra...); + return *this; + } + + template + class_ & def_cast(const detail::op_ &op, const Extra&... extra) { + op.execute_cast(*this, extra...); + return *this; + } +}; + +template class enum_ : public object { +public: + static_assert(std::is_enum_v, "nanobind::enum_<> requires an enumeration type!"); + + using Base = class_; + using Underlying = std::underlying_type_t; + + template + NB_INLINE enum_(handle scope, const char *name, const Extra &... extra) { + detail::enum_init_data ed { }; + ed.type = &typeid(T); + ed.scope = scope.ptr(); + ed.name = name; + ed.flags = std::is_signed_v + ? (uint32_t) detail::enum_flags::is_signed + : 0; + (detail::enum_extra_apply(ed, extra), ...); + m_ptr = detail::enum_create(&ed); + } + + NB_INLINE enum_ &value(const char *name, T value, const char *doc = nullptr) { + detail::enum_append(m_ptr, name, (int64_t) value, doc); + return *this; + } + + NB_INLINE enum_ &export_values() { detail::enum_export(m_ptr); return *this; } + + template + NB_INLINE enum_ &def(const char *name_, Func &&f, const Extra &... extra) { + cpp_function_def((detail::forward_t) f, scope(*this), + name(name_), is_method(), extra...); + return *this; + } + + template + NB_INLINE enum_ &def_static(const char *name_, Func &&f, + const Extra &... extra) { + static_assert( + !std::is_member_function_pointer_v, + "def_static(...) called with a non-static member function pointer"); + cpp_function_def((detail::forward_t) f, scope(*this), name(name_), + extra...); + return *this; + } + + template + NB_INLINE enum_ &def_prop_rw(const char *name_, Getter &&getter, + Setter &&setter, const Extra &...extra) { + object get_p, set_p; + + if constexpr (!std::is_same_v) + get_p = cpp_function((detail::forward_t) getter, + is_method(), is_getter(), + rv_policy::reference_internal, + detail::filter_getter(extra)...); + + if constexpr (!std::is_same_v) + set_p = cpp_function((detail::forward_t) setter, + is_method(), detail::filter_setter(extra)...); + + detail::property_install(m_ptr, name_, get_p.ptr(), set_p.ptr()); + return *this; + } + + + template + NB_INLINE enum_ &def_prop_ro(const char *name_, Getter &&getter, + const Extra &...extra) { + return def_prop_rw(name_, getter, nullptr, extra...); + } +}; + +template void implicitly_convertible() { + using Caster = detail::make_caster; + + if constexpr (detail::is_base_caster_v) { + detail::implicitly_convertible(&typeid(Source), &typeid(Target)); + } else { + detail::implicitly_convertible( + [](PyTypeObject *, PyObject *src, + detail::cleanup_list *cleanup) noexcept -> bool { + return Caster().from_python(src, detail::cast_flags::convert, + cleanup); + }, + &typeid(Target)); + } +} + +NAMESPACE_END(NB_NAMESPACE) diff --git a/RemoteInput/Thirdparty/nanobind/include/nanobind/nb_defs.h b/RemoteInput/Thirdparty/nanobind/include/nanobind/nb_defs.h new file mode 100644 index 0000000..071e41f --- /dev/null +++ b/RemoteInput/Thirdparty/nanobind/include/nanobind/nb_defs.h @@ -0,0 +1,203 @@ +/* + nanobind/nb_defs.h: Preprocessor definitions used by the project + + Copyright (c) 2022 Wenzel Jakob + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +#pragma once + +#define NB_STRINGIFY(x) #x +#define NB_TOSTRING(x) NB_STRINGIFY(x) +#define NB_CONCAT(first, second) first##second +#define NB_NEXT_OVERLOAD ((PyObject *) 1) // special failure return code + +#if !defined(NAMESPACE_BEGIN) +# define NAMESPACE_BEGIN(name) namespace name { +#endif + +#if !defined(NAMESPACE_END) +# define NAMESPACE_END(name) } +#endif + +#if defined(_WIN32) +# define NB_EXPORT __declspec(dllexport) +# define NB_IMPORT __declspec(dllimport) +# define NB_INLINE __forceinline +# define NB_NOINLINE __declspec(noinline) +# define NB_INLINE_LAMBDA +#else +# define NB_EXPORT __attribute__ ((visibility("default"))) +# define NB_IMPORT NB_EXPORT +# define NB_INLINE inline __attribute__((always_inline)) +# define NB_NOINLINE __attribute__((noinline)) +# if defined(__clang__) +# define NB_INLINE_LAMBDA __attribute__((always_inline)) +# else +# define NB_INLINE_LAMBDA +# endif +#endif + +#if defined(__GNUC__) && !defined(_WIN32) +# define NB_NAMESPACE nanobind __attribute__((visibility("hidden"))) +#else +# define NB_NAMESPACE nanobind +#endif + +#if defined(__GNUC__) +# define NB_UNLIKELY(x) __builtin_expect(bool(x), 0) +# define NB_LIKELY(x) __builtin_expect(bool(x), 1) +#else +# define NB_LIKELY(x) x +# define NB_UNLIKELY(x) x +#endif + +#if defined(NB_SHARED) +# if defined(NB_BUILD) +# define NB_CORE NB_EXPORT +# else +# define NB_CORE NB_IMPORT +# endif +#else +# define NB_CORE +#endif + +#if !defined(NB_SHARED) && defined(__GNUC__) && !defined(_WIN32) +# define NB_EXPORT_SHARED __attribute__ ((visibility("hidden"))) +#else +# define NB_EXPORT_SHARED +#endif + +#if defined(__cpp_lib_char8_t) && __cpp_lib_char8_t >= 201811L +# define NB_HAS_U8STRING +#endif + +#if defined(Py_TPFLAGS_HAVE_VECTORCALL) +# define NB_VECTORCALL PyObject_Vectorcall +# define NB_HAVE_VECTORCALL Py_TPFLAGS_HAVE_VECTORCALL +#elif defined(_Py_TPFLAGS_HAVE_VECTORCALL) +# define NB_VECTORCALL _PyObject_Vectorcall +# define NB_HAVE_VECTORCALL _Py_TPFLAGS_HAVE_VECTORCALL +#else +# define NB_HAVE_VECTORCALL (1UL << 11) +#endif + +#if defined(PY_VECTORCALL_ARGUMENTS_OFFSET) +# define NB_VECTORCALL_ARGUMENTS_OFFSET PY_VECTORCALL_ARGUMENTS_OFFSET +# define NB_VECTORCALL_NARGS PyVectorcall_NARGS +#else +# define NB_VECTORCALL_ARGUMENTS_OFFSET ((size_t) 1 << (8 * sizeof(size_t) - 1)) +# define NB_VECTORCALL_NARGS(n) ((n) & ~NB_VECTORCALL_ARGUMENTS_OFFSET) +#endif + +#if PY_VERSION_HEX < 0x03090000 +# define NB_TYPING_ABC "typing." +# define NB_TYPING_TUPLE "typing.Tuple" +# define NB_TYPING_LIST "typing.List" +# define NB_TYPING_DICT "typing.Dict" +# define NB_TYPING_SET "typing.Set" +# define NB_TYPING_TYPE "typing.Type" +#else +# define NB_TYPING_ABC "collections.abc." +# define NB_TYPING_TUPLE "tuple" +# define NB_TYPING_LIST "list" +# define NB_TYPING_DICT "dict" +# define NB_TYPING_SET "set" +# define NB_TYPING_TYPE "type" +#endif + +#define NB_TYPING_SEQUENCE NB_TYPING_ABC "Sequence" +#define NB_TYPING_MAPPING NB_TYPING_ABC "Mapping" +#define NB_TYPING_CALLABLE NB_TYPING_ABC "Callable" +#define NB_TYPING_ITERATOR NB_TYPING_ABC "Iterator" +#define NB_TYPING_ITERABLE NB_TYPING_ABC "Iterable" + +#if PY_VERSION_HEX < 0x03090000 +# define NB_TYPING_ABSTRACT_SET "typing.AbstractSet" +#else +# define NB_TYPING_ABSTRACT_SET "collections.abc.Set" +#endif + +#if defined(Py_LIMITED_API) +# if PY_VERSION_HEX < 0x030C0000 || defined(PYPY_VERSION) +# error "nanobind can target Python's limited API, but this requires CPython >= 3.12" +# endif +# define NB_TUPLE_GET_SIZE PyTuple_Size +# define NB_TUPLE_GET_ITEM PyTuple_GetItem +# define NB_TUPLE_SET_ITEM PyTuple_SetItem +# define NB_LIST_GET_SIZE PyList_Size +# define NB_LIST_GET_ITEM PyList_GetItem +# define NB_LIST_SET_ITEM PyList_SetItem +# define NB_DICT_GET_SIZE PyDict_Size +# define NB_SET_GET_SIZE PySet_Size +#else +# define NB_TUPLE_GET_SIZE PyTuple_GET_SIZE +# define NB_TUPLE_GET_ITEM PyTuple_GET_ITEM +# define NB_TUPLE_SET_ITEM PyTuple_SET_ITEM +# define NB_LIST_GET_SIZE PyList_GET_SIZE +# define NB_LIST_GET_ITEM PyList_GET_ITEM +# define NB_LIST_SET_ITEM PyList_SET_ITEM +# define NB_DICT_GET_SIZE PyDict_GET_SIZE +# define NB_SET_GET_SIZE PySet_GET_SIZE +#endif + +#if defined(PYPY_VERSION_NUM) && PYPY_VERSION_NUM < 0x07030a00 +# error "nanobind requires a newer PyPy version (>= 7.3.10)" +#endif + +#if defined(NB_FREE_THREADED) && !defined(Py_GIL_DISABLED) +# error "Free-threaded extensions require a free-threaded version of Python" +#endif + +#if defined(NB_DOMAIN) +# define NB_DOMAIN_STR NB_TOSTRING(NB_DOMAIN) +#else +# define NB_DOMAIN_STR nullptr +#endif + +#if !defined(PYPY_VERSION) +# if PY_VERSION_HEX < 0x030A0000 +# define NB_TYPE_GET_SLOT_IMPL 1 // Custom implementation of nb::type_get_slot +# else +# define NB_TYPE_GET_SLOT_IMPL 0 +# endif +# if PY_VERSION_HEX < 0x030C0000 +# define NB_TYPE_FROM_METACLASS_IMPL 1 // Custom implementation of PyType_FromMetaclass +# else +# define NB_TYPE_FROM_METACLASS_IMPL 0 +# endif +#else +# define NB_TYPE_FROM_METACLASS_IMPL 1 +# define NB_TYPE_GET_SLOT_IMPL 1 +#endif + +#define NB_NONCOPYABLE(X) \ + X(const X &) = delete; \ + X &operator=(const X &) = delete; + + +#define NB_MODULE_IMPL(name) \ + extern "C" [[maybe_unused]] NB_EXPORT PyObject *PyInit_##name(); \ + extern "C" NB_EXPORT PyObject *PyInit_##name() + +#define NB_MODULE(name, variable) \ + static PyModuleDef NB_CONCAT(nanobind_module_def_, name); \ + [[maybe_unused]] static void NB_CONCAT(nanobind_init_, \ + name)(::nanobind::module_ &); \ + NB_MODULE_IMPL(name) { \ + nanobind::detail::init(NB_DOMAIN_STR); \ + nanobind::module_ m = \ + nanobind::steal(nanobind::detail::module_new( \ + NB_TOSTRING(name), &NB_CONCAT(nanobind_module_def_, name))); \ + try { \ + NB_CONCAT(nanobind_init_, name)(m); \ + return m.release().ptr(); \ + } catch (const std::exception &e) { \ + PyErr_SetString(PyExc_ImportError, e.what()); \ + return nullptr; \ + } \ + } \ + void NB_CONCAT(nanobind_init_, name)(::nanobind::module_ & (variable)) + diff --git a/RemoteInput/Thirdparty/nanobind/include/nanobind/nb_descr.h b/RemoteInput/Thirdparty/nanobind/include/nanobind/nb_descr.h new file mode 100644 index 0000000..4975e13 --- /dev/null +++ b/RemoteInput/Thirdparty/nanobind/include/nanobind/nb_descr.h @@ -0,0 +1,155 @@ +/* + nanobind/nb_descr.h: Constexpr string class for function signatures + + Copyright (c) 2022 Wenzel Jakob + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +NAMESPACE_BEGIN(NB_NAMESPACE) +NAMESPACE_BEGIN(detail) + +/// Helper type for concatenating type signatures at compile time +template +struct descr { + char text[N + 1]{'\0'}; + + constexpr descr() = default; + constexpr descr(char const (&s)[N+1]) : descr(s, std::make_index_sequence()) { } + + template + constexpr descr(char const (&s)[N+1], std::index_sequence) : text{s[Is]..., '\0'} { } + + template + constexpr descr(char c, Cs... cs) : text{c, static_cast(cs)..., '\0'} { } + + constexpr size_t type_count() const { return sizeof...(Ts); } + constexpr size_t size() const { return N; } + + NB_INLINE void put_types(const std::type_info **out) const { + size_t ctr = 0; + ((out[ctr++] = &typeid(Ts)), ...); + out[ctr++] = nullptr; + } +}; + +template +constexpr descr plus_impl(const descr &a, const descr &b, + std::index_sequence, std::index_sequence) { + return {a.text[Is1]..., b.text[Is2]...}; +} + +template +constexpr descr operator+(const descr &a, const descr &b) { + return plus_impl(a, b, std::make_index_sequence(), std::make_index_sequence()); +} + +template +constexpr descr const_name(char const(&text)[N]) { return descr(text); } +constexpr descr<0> const_name(char const(&)[1]) { return {}; } + +template +struct int_to_str : int_to_str {}; +template struct int_to_str<0, Digits...> { + static constexpr auto digits = descr(('0' + Digits)...); +}; + +constexpr auto const_name(char c) { return descr<1>(c); } + +// Ternary description (like std::conditional) +template +constexpr auto const_name(char const(&text1)[N1], char const(&text2)[N2]) { + (void) text1; (void) text2; + + if constexpr(B) + return const_name(text1); + else + return const_name(text2); +} + +template +constexpr auto const_name(const T1 &d1, const T2 &d2) { + (void) d1; (void) d2; + + if constexpr (B) + return d1; + else + return d2; +} + +// Use a different name based on whether the parameter is used as input or output +template +constexpr auto io_name(char const (&text1)[N1], char const (&text2)[N2]) { + return const_name('@') + const_name(text1) + const_name('@') + + const_name(text2) + const_name('@'); +} + +#if PY_VERSION_HEX < 0x030A0000 +template constexpr auto optional_name(const T &v) { + return const_name("typing.Optional[") + v + const_name("]"); +} +template constexpr auto union_name(const Ts&... vs) { + return const_name("typing.Union[") + concat(vs...) + const_name("]"); +} +#else +template constexpr auto optional_name(const T &v) { + return v + const_name(" | None"); +} +template constexpr auto union_name(const T &v) { + return v; +} +template +constexpr auto union_name(const T1 &v1, const T2 &v2, const Ts &...vs) { + return v1 + const_name(" | ") + union_name(v2, vs...); +} +#endif + +template +auto constexpr const_name() -> std::remove_cv_t::digits)> { + return int_to_str::digits; +} + +template constexpr descr<1, Type> const_name() { return {'%'}; } + +constexpr descr<0> concat() { return {}; } +constexpr descr<0> concat_maybe() { return {}; } + +template +constexpr descr concat(const descr &descr) { return descr; } + +template +constexpr descr concat_maybe(const descr &descr) { return descr; } + +template +constexpr auto concat(const descr &d, const Args &...args) + -> decltype(std::declval>() + concat(args...)) { + return d + const_name(", ") + concat(args...); +} + +template +constexpr auto concat_maybe(const descr<0> &, const descr<0> &, const Args &...args) + -> decltype(concat_maybe(args...)) { return concat_maybe(args...); } + +template +constexpr auto concat_maybe(const descr<0> &, const descr &arg, const Args &...args) + -> decltype(concat_maybe(arg, args...)) { return concat_maybe(arg, args...); } + +template +constexpr auto concat_maybe(const descr &arg, const descr<0> &, const Args &...args) + -> decltype(concat_maybe(arg, args...)) { return concat_maybe(arg, args...); } + +template = 0> +constexpr auto concat_maybe(const descr &arg0, const descr &arg1, const Args &...args) + -> decltype(concat(arg0, concat_maybe(arg1, args...))) { + return concat(arg0, concat_maybe(arg1, args...)); +} + +template +constexpr descr type_descr(const descr &descr) { + return const_name("{") + descr + const_name("}"); +} + +NAMESPACE_END(detail) +NAMESPACE_END(NB_NAMESPACE) diff --git a/RemoteInput/Thirdparty/nanobind/include/nanobind/nb_enums.h b/RemoteInput/Thirdparty/nanobind/include/nanobind/nb_enums.h new file mode 100644 index 0000000..cae5dcb --- /dev/null +++ b/RemoteInput/Thirdparty/nanobind/include/nanobind/nb_enums.h @@ -0,0 +1,26 @@ +/* + nanobind/nb_enums.h: enumerations used in nanobind (just rv_policy atm.) + + Copyright (c) 2022 Wenzel Jakob + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +NAMESPACE_BEGIN(NB_NAMESPACE) + +// Approach used to cast a previously unknown C++ instance into a Python object +enum class rv_policy { + automatic, + automatic_reference, + take_ownership, + copy, + move, + reference, + reference_internal, + none + /* Note to self: nb_func.h assumes that this value fits into 3 bits, + hence no further policies can be added. */ +}; + +NAMESPACE_END(NB_NAMESPACE) diff --git a/RemoteInput/Thirdparty/nanobind/include/nanobind/nb_error.h b/RemoteInput/Thirdparty/nanobind/include/nanobind/nb_error.h new file mode 100644 index 0000000..3abc960 --- /dev/null +++ b/RemoteInput/Thirdparty/nanobind/include/nanobind/nb_error.h @@ -0,0 +1,152 @@ +/* + nanobind/nb_error.h: Python exception handling, binding of exceptions + + Copyright (c) 2022 Wenzel Jakob + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +NAMESPACE_BEGIN(NB_NAMESPACE) + +/// RAII wrapper that temporarily clears any Python error state +#if PY_VERSION_HEX >= 0x030C0000 +struct error_scope { + error_scope() { value = PyErr_GetRaisedException(); } + ~error_scope() { PyErr_SetRaisedException(value); } +private: + PyObject *value; +}; +#else +struct error_scope { + error_scope() { PyErr_Fetch(&type, &value, &trace); } + ~error_scope() { PyErr_Restore(type, value, trace); } +private: + PyObject *type, *value, *trace; +}; +#endif + +/// Wraps a Python error state as a C++ exception +class NB_EXPORT python_error : public std::exception { +public: + NB_EXPORT_SHARED python_error(); + NB_EXPORT_SHARED python_error(const python_error &); + NB_EXPORT_SHARED python_error(python_error &&) noexcept; + NB_EXPORT_SHARED ~python_error() override; + + bool matches(handle exc) const noexcept { +#if PY_VERSION_HEX < 0x030C0000 + return PyErr_GivenExceptionMatches(m_type, exc.ptr()) != 0; +#else + return PyErr_GivenExceptionMatches(m_value, exc.ptr()) != 0; +#endif + } + + /// Move the error back into the Python domain. This may only be called + /// once, and you should not reraise the exception in C++ afterward. + NB_EXPORT_SHARED void restore() noexcept; + + /// Pass the error to Python's `sys.unraisablehook`, which prints + /// a traceback to `sys.stderr` by default but may be overridden. + /// The *context* should be some object whose repr() helps clarify where + /// the error occurred. Like `.restore()`, this consumes the error and + /// you should not reraise the exception in C++ afterward. + void discard_as_unraisable(handle context) noexcept { + restore(); + PyErr_WriteUnraisable(context.ptr()); + } + + void discard_as_unraisable(const char *context) noexcept { + object context_s = steal(PyUnicode_FromString(context)); + discard_as_unraisable(context_s); + } + + handle value() const { return m_value; } + +#if PY_VERSION_HEX < 0x030C0000 + handle type() const { return m_type; } + object traceback() const { return borrow(m_traceback); } +#else + handle type() const { return value().type(); } + object traceback() const { return steal(PyException_GetTraceback(m_value)); } +#endif + [[deprecated]] + object trace() const { return traceback(); } + + NB_EXPORT_SHARED const char *what() const noexcept override; + +private: +#if PY_VERSION_HEX < 0x030C0000 + mutable PyObject *m_type = nullptr; + mutable PyObject *m_value = nullptr; + mutable PyObject *m_traceback = nullptr; +#else + mutable PyObject *m_value = nullptr; +#endif + mutable char *m_what = nullptr; +}; + +/// Thrown by nanobind::cast when casting fails +using cast_error = std::bad_cast; + +enum class exception_type { + runtime_error, stop_iteration, index_error, key_error, value_error, + type_error, buffer_error, import_error, attribute_error, next_overload +}; + +// Base interface used to expose common Python exceptions in C++ +class NB_EXPORT builtin_exception : public std::runtime_error { +public: + NB_EXPORT_SHARED builtin_exception(exception_type type, const char *what); + NB_EXPORT_SHARED builtin_exception(builtin_exception &&) = default; + NB_EXPORT_SHARED builtin_exception(const builtin_exception &) = default; + NB_EXPORT_SHARED ~builtin_exception(); + NB_EXPORT_SHARED exception_type type() const { return m_type; } +private: + exception_type m_type; +}; + +#define NB_EXCEPTION(name) \ + inline builtin_exception name(const char *what = nullptr) { \ + return builtin_exception(exception_type::name, what); \ + } + +NB_EXCEPTION(stop_iteration) +NB_EXCEPTION(index_error) +NB_EXCEPTION(key_error) +NB_EXCEPTION(value_error) +NB_EXCEPTION(type_error) +NB_EXCEPTION(buffer_error) +NB_EXCEPTION(import_error) +NB_EXCEPTION(attribute_error) +NB_EXCEPTION(next_overload) + +#undef NB_EXCEPTION + +inline void register_exception_translator(detail::exception_translator t, + void *payload = nullptr) { + detail::register_exception_translator(t, payload); +} + +template +class exception : public object { + NB_OBJECT_DEFAULT(exception, object, "Exception", PyExceptionClass_Check) + + exception(handle scope, const char *name, handle base = PyExc_Exception) + : object(detail::exception_new(scope.ptr(), name, base.ptr()), + detail::steal_t()) { + detail::register_exception_translator( + [](const std::exception_ptr &p, void *payload) { + try { + std::rethrow_exception(p); + } catch (T &e) { + PyErr_SetString((PyObject *) payload, e.what()); + } + }, m_ptr); + } +}; + +NB_CORE void chain_error(handle type, const char *fmt, ...) noexcept; +[[noreturn]] NB_CORE void raise_from(python_error &e, handle type, const char *fmt, ...); + +NAMESPACE_END(NB_NAMESPACE) diff --git a/RemoteInput/Thirdparty/nanobind/include/nanobind/nb_func.h b/RemoteInput/Thirdparty/nanobind/include/nanobind/nb_func.h new file mode 100644 index 0000000..1cd98a1 --- /dev/null +++ b/RemoteInput/Thirdparty/nanobind/include/nanobind/nb_func.h @@ -0,0 +1,406 @@ +/* + nanobind/nb_func.h: Functionality for binding C++ functions/methods + + Copyright (c) 2022 Wenzel Jakob + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +NAMESPACE_BEGIN(NB_NAMESPACE) +NAMESPACE_BEGIN(detail) + +template +bool from_python_keep_alive(Caster &c, PyObject **args, uint8_t *args_flags, + cleanup_list *cleanup, size_t index) { + size_t size_before = cleanup->size(); + if (!c.from_python(args[index], args_flags[index], cleanup)) + return false; + + // If an implicit conversion took place, update the 'args' array so that + // the keep_alive annotation can later process this change + size_t size_after = cleanup->size(); + if (size_after != size_before) + args[index] = (*cleanup)[size_after - 1]; + + return true; +} + +// Return the number of nb::arg and nb::arg_v types in the first I types Ts. +// Invoke with std::make_index_sequence() to provide +// an index pack 'Is' that parallels the types pack Ts. +template +constexpr size_t count_args_before_index(std::index_sequence) { + static_assert(sizeof...(Is) == sizeof...(Ts)); + return ((Is < I && std::is_base_of_v) + ... + 0); +} + +#if defined(NB_FREE_THREADED) +struct ft_args_collector { + PyObject **args; + handle h1; + handle h2; + size_t index = 0; + + NB_INLINE explicit ft_args_collector(PyObject **a) : args(a) {} + NB_INLINE void apply(arg_locked *) { + if (h1.ptr() == nullptr) + h1 = args[index]; + h2 = args[index]; + ++index; + } + NB_INLINE void apply(arg *) { ++index; } + NB_INLINE void apply(...) {} +}; + +struct ft_args_guard { + NB_INLINE void lock(const ft_args_collector& info) { + PyCriticalSection2_Begin(&cs, info.h1.ptr(), info.h2.ptr()); + } + ~ft_args_guard() { + PyCriticalSection2_End(&cs); + } + PyCriticalSection2 cs; +}; +#endif + +struct no_guard {}; + +template +NB_INLINE PyObject *func_create(Func &&func, Return (*)(Args...), + std::index_sequence is, + const Extra &...extra) { + using Info = func_extra_info; + + if constexpr (CheckGuard && !std::is_same_v) { + return func_create( + [func = (forward_t) func](Args... args) NB_INLINE_LAMBDA { + typename Info::call_guard::type g; + (void) g; + return func((forward_t) args...); + }, + (Return(*)(Args...)) nullptr, is, extra...); + } + + (void) is; + + // Detect locations of nb::args / nb::kwargs (if they exist). + // Find the first and last occurrence of each; we'll later make sure these + // match, in order to guarantee there's only one instance. + static constexpr size_t + args_pos_1 = index_1_v, args>...>, + args_pos_n = index_n_v, args>...>, + kwargs_pos_1 = index_1_v, kwargs>...>, + kwargs_pos_n = index_n_v, kwargs>...>, + nargs = sizeof...(Args); + + // Determine the number of nb::arg/nb::arg_v annotations + constexpr size_t nargs_provided = + (std::is_base_of_v + ... + 0); + constexpr bool is_method_det = + (std::is_same_v + ... + 0) != 0; + constexpr bool is_getter_det = + (std::is_same_v + ... + 0) != 0; + constexpr bool has_arg_annotations = nargs_provided > 0 && !is_getter_det; + + // Determine the number of potentially-locked function arguments + constexpr bool lock_self_det = + (std::is_same_v + ... + 0) != 0; + static_assert(Info::nargs_locked <= 2, + "At most two function arguments can be locked"); + static_assert(!(lock_self_det && !is_method_det), + "The nb::lock_self() annotation only applies to methods"); + + // Detect location of nb::kw_only annotation, if supplied. As with args/kwargs + // we find the first and last location and later verify they match each other. + // Note this is an index in Extra... while args/kwargs_pos_* are indices in + // Args... . + constexpr size_t + kwonly_pos_1 = index_1_v...>, + kwonly_pos_n = index_n_v...>; + // Arguments after nb::args are implicitly keyword-only even if there is no + // nb::kw_only annotation + constexpr bool explicit_kw_only = kwonly_pos_1 != sizeof...(Extra); + constexpr bool implicit_kw_only = args_pos_1 + 1 < kwargs_pos_1; + + // A few compile-time consistency checks + static_assert(args_pos_1 == args_pos_n && kwargs_pos_1 == kwargs_pos_n, + "Repeated use of nb::kwargs or nb::args in the function signature!"); + static_assert(!has_arg_annotations || nargs_provided + is_method_det == nargs, + "The number of nb::arg annotations must match the argument count!"); + static_assert(kwargs_pos_1 == nargs || kwargs_pos_1 + 1 == nargs, + "nb::kwargs must be the last element of the function signature!"); + static_assert(args_pos_1 == nargs || args_pos_1 < kwargs_pos_1, + "nb::args must precede nb::kwargs if both are present!"); + static_assert(has_arg_annotations || (!implicit_kw_only && !explicit_kw_only), + "Keyword-only arguments must have names!"); + + // Find the index in Args... of the first keyword-only parameter. Since + // the 'self' parameter doesn't get a nb::arg annotation, we must adjust + // by 1 for methods. Note that nargs_before_kw_only is only used if + // a kw_only annotation exists (i.e., if explicit_kw_only is true); + // the conditional is just to save the compiler some effort otherwise. + constexpr size_t nargs_before_kw_only = + explicit_kw_only + ? is_method_det + count_args_before_index( + std::make_index_sequence()) + : nargs; + + if constexpr (explicit_kw_only) { + static_assert(kwonly_pos_1 == kwonly_pos_n, + "Repeated use of nb::kw_only annotation!"); + + // If both kw_only and *args are specified, kw_only must be + // immediately after the nb::arg for *args. + static_assert(args_pos_1 == nargs || nargs_before_kw_only == args_pos_1 + 1, + "Arguments after nb::args are implicitly keyword-only; any " + "nb::kw_only() annotation must be positioned to reflect that!"); + + // If both kw_only and **kwargs are specified, kw_only must be + // before the nb::arg for **kwargs. + static_assert(nargs_before_kw_only < kwargs_pos_1, + "Variadic nb::kwargs are implicitly keyword-only; any " + "nb::kw_only() annotation must be positioned to reflect that!"); + } + + // Collect function signature information for the docstring + using cast_out = make_caster< + std::conditional_t, void_type, Return>>; + + // Compile-time function signature + static constexpr auto descr = + const_name("(") + + concat(type_descr( + make_caster>>::Name)...) + + const_name(") -> ") + cast_out::Name; + + // std::type_info for all function arguments + const std::type_info* descr_types[descr.type_count() + 1]; + descr.put_types(descr_types); + + // Auxiliary data structure to capture the provided function/closure + struct capture { + std::remove_reference_t func; + }; + + // The following temporary record will describe the function in detail + func_data_prelim f; + f.flags = (args_pos_1 < nargs ? (uint32_t) func_flags::has_var_args : 0) | + (kwargs_pos_1 < nargs ? (uint32_t) func_flags::has_var_kwargs : 0) | + (ReturnRef ? (uint32_t) func_flags::return_ref : 0) | + (has_arg_annotations ? (uint32_t) func_flags::has_args : 0); + + /* Store captured function inside 'func_data_prelim' if there is space. Issues + with aliasing are resolved via separate compilation of libnanobind. */ + if constexpr (sizeof(capture) <= sizeof(f.capture)) { + capture *cap = (capture *) f.capture; + new (cap) capture{ (forward_t) func }; + + if constexpr (!std::is_trivially_destructible_v) { + f.flags |= (uint32_t) func_flags::has_free; + f.free_capture = [](void *p) { + ((capture *) p)->~capture(); + }; + } + } else { + void **cap = (void **) f.capture; + cap[0] = new capture{ (forward_t) func }; + + f.flags |= (uint32_t) func_flags::has_free; + f.free_capture = [](void *p) { + delete (capture *) ((void **) p)[0]; + }; + } + + f.impl = [](void *p, PyObject **args, uint8_t *args_flags, rv_policy policy, + cleanup_list *cleanup) NB_INLINE_LAMBDA -> PyObject * { + (void) p; (void) args; (void) args_flags; (void) policy; (void) cleanup; + + const capture *cap; + if constexpr (sizeof(capture) <= sizeof(f.capture)) + cap = (capture *) p; + else + cap = (capture *) ((void **) p)[0]; + + tuple...> in; + (void) in; + +#if defined(NB_FREE_THREADED) + std::conditional_t guard; + if constexpr (Info::nargs_locked) { + ft_args_collector collector{args}; + if constexpr (is_method_det) { + if constexpr (lock_self_det) + collector.apply((arg_locked *) nullptr); + else + collector.apply((arg *) nullptr); + } + (collector.apply((Extra *) nullptr), ...); + guard.lock(collector); + } +#endif + + if constexpr (Info::keep_alive) { + if ((!from_python_keep_alive(in.template get(), args, + args_flags, cleanup, Is) || ...)) + return NB_NEXT_OVERLOAD; + } else { + if ((!in.template get().from_python(args[Is], args_flags[Is], + cleanup) || ...)) + return NB_NEXT_OVERLOAD; + } + + PyObject *result; + if constexpr (std::is_void_v) { +#if defined(_WIN32) // temporary workaround for an internal compiler error in MSVC + cap->func(static_cast>(in.template get())...); +#else + cap->func(in.template get().operator cast_t()...); +#endif + result = Py_None; + Py_INCREF(result); + } else { +#if defined(_WIN32) // temporary workaround for an internal compiler error in MSVC + result = cast_out::from_cpp( + cap->func(static_cast>(in.template get())...), + policy, cleanup).ptr(); +#else + result = cast_out::from_cpp( + cap->func((in.template get()) + .operator cast_t()...), + policy, cleanup).ptr(); +#endif + } + + if constexpr (Info::keep_alive) + (process_keep_alive(args, result, (Extra *) nullptr), ...); + + return result; + }; + + f.descr = descr.text; + f.descr_types = descr_types; + f.nargs = nargs; + + // Set nargs_pos to the number of C++ function parameters (Args...) that + // can be filled from Python positional arguments in a one-to-one fashion. + // This ends at: + // - the location of the variadic *args parameter, if present; otherwise + // - the location of the first keyword-only parameter, if any; otherwise + // - the location of the variadic **kwargs parameter, if present; otherwise + // - the end of the parameter list + // It's correct to give *args priority over kw_only because we verified + // above that kw_only comes afterward if both are present. It's correct + // to give kw_only priority over **kwargs because we verified above that + // kw_only comes before if both are present. + f.nargs_pos = args_pos_1 < nargs ? args_pos_1 : + explicit_kw_only ? nargs_before_kw_only : + kwargs_pos_1 < nargs ? kwargs_pos_1 : nargs; + + // Fill remaining fields of 'f' + size_t arg_index = 0; + (void) arg_index; + (func_extra_apply(f, extra, arg_index), ...); + + return nb_func_new((const void *) &f); +} + +NAMESPACE_END(detail) + +// The initial template parameter to cpp_function/cpp_function_def is +// used by class_ to ensure that member pointers are treated as members +// of the class being defined; other users can safely leave it at its +// default of void. + +template +NB_INLINE object cpp_function(Return (*f)(Args...), const Extra&... extra) { + return steal(detail::func_create( + f, f, std::make_index_sequence(), extra...)); +} + +template +NB_INLINE void cpp_function_def(Return (*f)(Args...), const Extra&... extra) { + detail::func_create( + f, f, std::make_index_sequence(), extra...); +} + +/// Construct a cpp_function from a lambda function (pot. with internal state) +template < + typename = void, typename Func, typename... Extra, + detail::enable_if_t>> = 0> +NB_INLINE object cpp_function(Func &&f, const Extra &...extra) { + using am = detail::analyze_method::operator())>; + return steal(detail::func_create( + (detail::forward_t) f, (typename am::func *) nullptr, + std::make_index_sequence(), extra...)); +} + +template < + typename = void, typename Func, typename... Extra, + detail::enable_if_t>> = 0> +NB_INLINE void cpp_function_def(Func &&f, const Extra &...extra) { + using am = detail::analyze_method::operator())>; + detail::func_create( + (detail::forward_t) f, (typename am::func *) nullptr, + std::make_index_sequence(), extra...); +} + +/// Construct a cpp_function from a class method (non-const) +template +NB_INLINE object cpp_function(Return (Class::*f)(Args...), const Extra &...extra) { + using T = std::conditional_t, Class, Target>; + return steal(detail::func_create( + [f](T *c, Args... args) NB_INLINE_LAMBDA -> Return { + return (c->*f)((detail::forward_t) args...); + }, + (Return(*)(T *, Args...)) nullptr, + std::make_index_sequence(), extra...)); +} + +template +NB_INLINE void cpp_function_def(Return (Class::*f)(Args...), const Extra &...extra) { + using T = std::conditional_t, Class, Target>; + detail::func_create( + [f](T *c, Args... args) NB_INLINE_LAMBDA -> Return { + return (c->*f)((detail::forward_t) args...); + }, + (Return(*)(T *, Args...)) nullptr, + std::make_index_sequence(), extra...); +} + +/// Construct a cpp_function from a class method (const) +template +NB_INLINE object cpp_function(Return (Class::*f)(Args...) const, const Extra &...extra) { + using T = std::conditional_t, Class, Target>; + return steal(detail::func_create( + [f](const T *c, Args... args) NB_INLINE_LAMBDA -> Return { + return (c->*f)((detail::forward_t) args...); + }, + (Return(*)(const T *, Args...)) nullptr, + std::make_index_sequence(), extra...)); +} + +template +NB_INLINE void cpp_function_def(Return (Class::*f)(Args...) const, const Extra &...extra) { + using T = std::conditional_t, Class, Target>; + detail::func_create( + [f](const T *c, Args... args) NB_INLINE_LAMBDA -> Return { + return (c->*f)((detail::forward_t) args...); + }, + (Return(*)(const T *, Args...)) nullptr, + std::make_index_sequence(), extra...); +} + +template +module_ &module_::def(const char *name_, Func &&f, const Extra &...extra) { + cpp_function_def((detail::forward_t) f, scope(*this), + name(name_), extra...); + return *this; +} + +NAMESPACE_END(NB_NAMESPACE) diff --git a/RemoteInput/Thirdparty/nanobind/include/nanobind/nb_lib.h b/RemoteInput/Thirdparty/nanobind/include/nanobind/nb_lib.h new file mode 100644 index 0000000..3541bce --- /dev/null +++ b/RemoteInput/Thirdparty/nanobind/include/nanobind/nb_lib.h @@ -0,0 +1,566 @@ +/* + nanobind/nb_lib.h: Interface to libnanobind.so + + Copyright (c) 2022 Wenzel Jakob + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +NAMESPACE_BEGIN(NB_NAMESPACE) + +// Forward declarations for types in ndarray.h (1) +namespace dlpack { struct dltensor; struct dtype; } + +NAMESPACE_BEGIN(detail) + +// Forward declarations for types in ndarray.h (2) +struct ndarray_handle; +struct ndarray_config; + +/** + * Helper class to clean temporaries created by function dispatch. + * The first element serves a special role: it stores the 'self' + * object of method calls (for rv_policy::reference_internal). + */ +struct NB_CORE cleanup_list { +public: + static constexpr uint32_t Small = 6; + + cleanup_list(PyObject *self) : + m_size{1}, + m_capacity{Small}, + m_data{m_local} { + m_local[0] = self; + } + + ~cleanup_list() = default; + + /// Append a single PyObject to the cleanup stack + NB_INLINE void append(PyObject *value) noexcept { + if (m_size >= m_capacity) + expand(); + m_data[m_size++] = value; + } + + NB_INLINE PyObject *self() const { + return m_local[0]; + } + + /// Decrease the reference count of all appended objects + void release() noexcept; + + /// Does the list contain any entries? (besides the 'self' argument) + bool used() { return m_size != 1; } + + /// Return the size of the cleanup stack + size_t size() const { return m_size; } + + /// Subscript operator + PyObject *operator[](size_t index) const { return m_data[index]; } + +protected: + /// Out of memory, expand.. + void expand() noexcept; + +protected: + uint32_t m_size; + uint32_t m_capacity; + PyObject **m_data; + PyObject *m_local[Small]; +}; + +// ======================================================================== + +/// Raise a runtime error with the given message +#if defined(__GNUC__) + __attribute__((noreturn, __format__ (__printf__, 1, 2))) +#else + [[noreturn]] +#endif +NB_CORE void raise(const char *fmt, ...); + +/// Raise a type error with the given message +#if defined(__GNUC__) + __attribute__((noreturn, __format__ (__printf__, 1, 2))) +#else + [[noreturn]] +#endif +NB_CORE void raise_type_error(const char *fmt, ...); + +/// Abort the process with a fatal error +#if defined(__GNUC__) + __attribute__((noreturn, __format__ (__printf__, 1, 2))) +#else + [[noreturn]] +#endif +NB_CORE void fail(const char *fmt, ...) noexcept; + +/// Raise nanobind::python_error after an error condition was found +[[noreturn]] NB_CORE void raise_python_error(); + +/// Raise nanobind::next_overload +NB_CORE void raise_next_overload_if_null(void *p); + +/// Raise nanobind::cast_error +[[noreturn]] NB_CORE void raise_cast_error(); + +// ======================================================================== + +NB_CORE void init(const char *domain); + +// ======================================================================== + +/// Convert a Python object into a Python unicode string +NB_CORE PyObject *str_from_obj(PyObject *o); + +/// Convert an UTF8 null-terminated C string into a Python unicode string +NB_CORE PyObject *str_from_cstr(const char *c); + +/// Convert an UTF8 C string + size into a Python unicode string +NB_CORE PyObject *str_from_cstr_and_size(const char *c, size_t n); + +// ======================================================================== + +/// Convert a Python object into a Python byte string +NB_CORE PyObject *bytes_from_obj(PyObject *o); + +/// Convert an UTF8 null-terminated C string into a Python byte string +NB_CORE PyObject *bytes_from_cstr(const char *c); + +/// Convert a memory region into a Python byte string +NB_CORE PyObject *bytes_from_cstr_and_size(const void *c, size_t n); + +// ======================================================================== + +/// Convert a Python object into a Python byte array +NB_CORE PyObject *bytearray_from_obj(PyObject *o); + +/// Convert a memory region into a Python byte array +NB_CORE PyObject *bytearray_from_cstr_and_size(const void *c, size_t n); + +// ======================================================================== + +/// Convert a Python object into a Python boolean object +NB_CORE PyObject *bool_from_obj(PyObject *o); + +/// Convert a Python object into a Python integer object +NB_CORE PyObject *int_from_obj(PyObject *o); + +/// Convert a Python object into a Python floating point object +NB_CORE PyObject *float_from_obj(PyObject *o); + +// ======================================================================== + +/// Convert a Python object into a Python list +NB_CORE PyObject *list_from_obj(PyObject *o); + +/// Convert a Python object into a Python tuple +NB_CORE PyObject *tuple_from_obj(PyObject *o); + +/// Convert a Python object into a Python set +NB_CORE PyObject *set_from_obj(PyObject *o); + +// ======================================================================== + +/// Get an object attribute or raise an exception +NB_CORE PyObject *getattr(PyObject *obj, const char *key); +NB_CORE PyObject *getattr(PyObject *obj, PyObject *key); + +/// Get an object attribute or return a default value (never raises) +NB_CORE PyObject *getattr(PyObject *obj, const char *key, PyObject *def) noexcept; +NB_CORE PyObject *getattr(PyObject *obj, PyObject *key, PyObject *def) noexcept; + +/// Get an object attribute or raise an exception. Skip if 'out' is non-null +NB_CORE void getattr_or_raise(PyObject *obj, const char *key, PyObject **out); +NB_CORE void getattr_or_raise(PyObject *obj, PyObject *key, PyObject **out); + +/// Set an object attribute or raise an exception +NB_CORE void setattr(PyObject *obj, const char *key, PyObject *value); +NB_CORE void setattr(PyObject *obj, PyObject *key, PyObject *value); + +/// Delete an object attribute or raise an exception +NB_CORE void delattr(PyObject *obj, const char *key); +NB_CORE void delattr(PyObject *obj, PyObject *key); + +// ======================================================================== + +/// Index into an object or raise an exception. Skip if 'out' is non-null +NB_CORE void getitem_or_raise(PyObject *obj, Py_ssize_t, PyObject **out); +NB_CORE void getitem_or_raise(PyObject *obj, const char *key, PyObject **out); +NB_CORE void getitem_or_raise(PyObject *obj, PyObject *key, PyObject **out); + +/// Set an item or raise an exception +NB_CORE void setitem(PyObject *obj, Py_ssize_t, PyObject *value); +NB_CORE void setitem(PyObject *obj, const char *key, PyObject *value); +NB_CORE void setitem(PyObject *obj, PyObject *key, PyObject *value); + +/// Delete an item or raise an exception +NB_CORE void delitem(PyObject *obj, Py_ssize_t); +NB_CORE void delitem(PyObject *obj, const char *key); +NB_CORE void delitem(PyObject *obj, PyObject *key); + +// ======================================================================== + +/// Determine the length of a Python object +NB_CORE size_t obj_len(PyObject *o); + +/// Try to roughly determine the length of a Python object +NB_CORE size_t obj_len_hint(PyObject *o) noexcept; + +/// Obtain a string representation of a Python object +NB_CORE PyObject* obj_repr(PyObject *o); + +/// Perform a comparison between Python objects and handle errors +NB_CORE bool obj_comp(PyObject *a, PyObject *b, int value); + +/// Perform an unary operation on a Python object with error handling +NB_CORE PyObject *obj_op_1(PyObject *a, PyObject* (*op)(PyObject*)); + +/// Perform an unary operation on a Python object with error handling +NB_CORE PyObject *obj_op_2(PyObject *a, PyObject *b, + PyObject *(*op)(PyObject *, PyObject *)); + +// Perform a vector function call +NB_CORE PyObject *obj_vectorcall(PyObject *base, PyObject *const *args, + size_t nargsf, PyObject *kwnames, + bool method_call); + +/// Create an iterator from 'o', raise an exception in case of errors +NB_CORE PyObject *obj_iter(PyObject *o); + +/// Advance the iterator 'o', raise an exception in case of errors +NB_CORE PyObject *obj_iter_next(PyObject *o); + +// ======================================================================== + +// Conversion validity check done by nb::make_tuple +NB_CORE void tuple_check(PyObject *tuple, size_t nargs); + +// ======================================================================== + +// Append a single argument to a function call +NB_CORE void call_append_arg(PyObject *args, size_t &nargs, PyObject *value); + +// Append a variable-length sequence of arguments to a function call +NB_CORE void call_append_args(PyObject *args, size_t &nargs, PyObject *value); + +// Append a single keyword argument to a function call +NB_CORE void call_append_kwarg(PyObject *kwargs, const char *name, PyObject *value); + +// Append a variable-length dictionary of keyword arguments to a function call +NB_CORE void call_append_kwargs(PyObject *kwargs, PyObject *value); + +// ======================================================================== + +// If the given sequence has the size 'size', return a pointer to its contents. +// May produce a temporary. +NB_CORE PyObject **seq_get_with_size(PyObject *seq, size_t size, + PyObject **temp) noexcept; + +// Like the above, but return the size instead of checking it. +NB_CORE PyObject **seq_get(PyObject *seq, size_t *size, + PyObject **temp) noexcept; + +// ======================================================================== + +/// Create a new capsule object with a name +NB_CORE PyObject *capsule_new(const void *ptr, const char *name, + void (*cleanup)(void *) noexcept) noexcept; + +// ======================================================================== + +/// Create a Python function object for the given function record +NB_CORE PyObject *nb_func_new(const void *data) noexcept; + +// ======================================================================== + +/// Create a Python type object for the given type record +struct type_init_data; +NB_CORE PyObject *nb_type_new(const type_init_data *c) noexcept; + +/// Extract a pointer to a C++ type underlying a Python object, if possible +NB_CORE bool nb_type_get(const std::type_info *t, PyObject *o, uint8_t flags, + cleanup_list *cleanup, void **out) noexcept; + +/// Cast a C++ type instance into a Python object +NB_CORE PyObject *nb_type_put(const std::type_info *cpp_type, void *value, + rv_policy rvp, cleanup_list *cleanup, + bool *is_new = nullptr) noexcept; + +// Special version of nb_type_put for polymorphic classes +NB_CORE PyObject *nb_type_put_p(const std::type_info *cpp_type, + const std::type_info *cpp_type_p, void *value, + rv_policy rvp, cleanup_list *cleanup, + bool *is_new = nullptr) noexcept; + +// Special version of 'nb_type_put' for unique pointers and ownership transfer +NB_CORE PyObject *nb_type_put_unique(const std::type_info *cpp_type, + void *value, cleanup_list *cleanup, + bool cpp_delete) noexcept; + +// Special version of 'nb_type_put_unique' for polymorphic classes +NB_CORE PyObject *nb_type_put_unique_p(const std::type_info *cpp_type, + const std::type_info *cpp_type_p, + void *value, cleanup_list *cleanup, + bool cpp_delete) noexcept; + +/// Try to reliquish ownership from Python object to a unique_ptr; +/// return true if successful, false if not. (Failure is only +/// possible if `cpp_delete` is true.) +NB_CORE bool nb_type_relinquish_ownership(PyObject *o, bool cpp_delete) noexcept; + +/// Reverse the effects of nb_type_relinquish_ownership(). +NB_CORE void nb_type_restore_ownership(PyObject *o, bool cpp_delete) noexcept; + +/// Get a pointer to a user-defined 'extra' value associated with the nb_type t. +NB_CORE void *nb_type_supplement(PyObject *t) noexcept; + +/// Check if the given python object represents a nanobind type +NB_CORE bool nb_type_check(PyObject *t) noexcept; + +/// Return the size of the type wrapped by the given nanobind type object +NB_CORE size_t nb_type_size(PyObject *t) noexcept; + +/// Return the alignment of the type wrapped by the given nanobind type object +NB_CORE size_t nb_type_align(PyObject *t) noexcept; + +/// Return a unicode string representing the long-form name of the given type +NB_CORE PyObject *nb_type_name(PyObject *t) noexcept; + +/// Return a unicode string representing the long-form name of object's type +NB_CORE PyObject *nb_inst_name(PyObject *o) noexcept; + +/// Return the C++ type_info wrapped by the given nanobind type object +NB_CORE const std::type_info *nb_type_info(PyObject *t) noexcept; + +/// Get a pointer to the instance data of a nanobind instance (nb_inst) +NB_CORE void *nb_inst_ptr(PyObject *o) noexcept; + +/// Check if a Python type object wraps an instance of a specific C++ type +NB_CORE bool nb_type_isinstance(PyObject *obj, const std::type_info *t) noexcept; + +/// Search for the Python type object associated with a C++ type +NB_CORE PyObject *nb_type_lookup(const std::type_info *t) noexcept; + +/// Allocate an instance of type 't' +NB_CORE PyObject *nb_inst_alloc(PyTypeObject *t); + +/// Allocate an zero-initialized instance of type 't' +NB_CORE PyObject *nb_inst_alloc_zero(PyTypeObject *t); + +/// Allocate an instance of type 't' referencing the existing 'ptr' +NB_CORE PyObject *nb_inst_reference(PyTypeObject *t, void *ptr, + PyObject *parent); + +/// Allocate an instance of type 't' taking ownership of the existing 'ptr' +NB_CORE PyObject *nb_inst_take_ownership(PyTypeObject *t, void *ptr); + +/// Call the destructor of the given python object +NB_CORE void nb_inst_destruct(PyObject *o) noexcept; + +/// Zero-initialize a POD type and mark it as ready + to be destructed upon GC +NB_CORE void nb_inst_zero(PyObject *o) noexcept; + +/// Copy-construct 'dst' from 'src', mark it as ready and to be destructed (must have the same nb_type) +NB_CORE void nb_inst_copy(PyObject *dst, const PyObject *src) noexcept; + +/// Move-construct 'dst' from 'src', mark it as ready and to be destructed (must have the same nb_type) +NB_CORE void nb_inst_move(PyObject *dst, const PyObject *src) noexcept; + +/// Destruct 'dst', copy-construct 'dst' from 'src', mark ready and retain 'destruct' status (must have the same nb_type) +NB_CORE void nb_inst_replace_copy(PyObject *dst, const PyObject *src) noexcept; + +/// Destruct 'dst', move-construct 'dst' from 'src', mark ready and retain 'destruct' status (must have the same nb_type) +NB_CORE void nb_inst_replace_move(PyObject *dst, const PyObject *src) noexcept; + +/// Check if a particular instance uses a Python-derived type +NB_CORE bool nb_inst_python_derived(PyObject *o) noexcept; + +/// Overwrite the instance's ready/destruct flags +NB_CORE void nb_inst_set_state(PyObject *o, bool ready, bool destruct) noexcept; + +/// Query the 'ready' and 'destruct' flags of an instance +NB_CORE std::pair nb_inst_state(PyObject *o) noexcept; + +// ======================================================================== + +// Create and install a Python property object +NB_CORE void property_install(PyObject *scope, const char *name, + PyObject *getter, PyObject *setter) noexcept; + +NB_CORE void property_install_static(PyObject *scope, const char *name, + PyObject *getter, + PyObject *setter) noexcept; + +// ======================================================================== + +NB_CORE PyObject *get_override(void *ptr, const std::type_info *type, + const char *name, bool pure); + +// ======================================================================== + +// Ensure that 'patient' cannot be GCed while 'nurse' is alive +NB_CORE void keep_alive(PyObject *nurse, PyObject *patient); + +// Keep 'payload' alive until 'nurse' is GCed +NB_CORE void keep_alive(PyObject *nurse, void *payload, + void (*deleter)(void *) noexcept) noexcept; + + +// ======================================================================== + +/// Indicate to nanobind that an implicit constructor can convert 'src' -> 'dst' +NB_CORE void implicitly_convertible(const std::type_info *src, + const std::type_info *dst) noexcept; + +/// Register a callback to check if implicit conversion to 'dst' is possible +NB_CORE void implicitly_convertible(bool (*predicate)(PyTypeObject *, + PyObject *, + cleanup_list *), + const std::type_info *dst) noexcept; + +// ======================================================================== + +struct enum_init_data; + +/// Create a new enumeration type +NB_CORE PyObject *enum_create(enum_init_data *) noexcept; + +/// Append an entry to an enumeration +NB_CORE void enum_append(PyObject *tp, const char *name, + int64_t value, const char *doc) noexcept; + +// Query an enumeration's Python object -> integer value map +NB_CORE bool enum_from_python(const std::type_info *, PyObject *, int64_t *, + uint8_t flags) noexcept; + +// Query an enumeration's integer value -> Python object map +NB_CORE PyObject *enum_from_cpp(const std::type_info *, int64_t) noexcept; + +/// Export enum entries to the parent scope +NB_CORE void enum_export(PyObject *tp); + +// ======================================================================== + +/// Try to import a Python extension module, raises an exception upon failure +NB_CORE PyObject *module_import(const char *name); + +/// Try to import a Python extension module, raises an exception upon failure +NB_CORE PyObject *module_import(PyObject *name); + +/// Create a new extension module with the given name +NB_CORE PyObject *module_new(const char *name, PyModuleDef *def) noexcept; + +/// Create a submodule of an existing module +NB_CORE PyObject *module_new_submodule(PyObject *base, const char *name, + const char *doc) noexcept; + + +// ======================================================================== + +// Try to import a reference-counted ndarray object via DLPack +NB_CORE ndarray_handle *ndarray_import(PyObject *o, + const ndarray_config *c, + bool convert, + cleanup_list *cleanup) noexcept; + +// Describe a local ndarray object using a DLPack capsule +NB_CORE ndarray_handle *ndarray_create(void *value, size_t ndim, + const size_t *shape, PyObject *owner, + const int64_t *strides, + dlpack::dtype dtype, bool ro, + int device, int device_id, + char order); + +/// Increase the reference count of the given ndarray object; returns a pointer +/// to the underlying DLTensor +NB_CORE dlpack::dltensor *ndarray_inc_ref(ndarray_handle *) noexcept; + +/// Decrease the reference count of the given ndarray object +NB_CORE void ndarray_dec_ref(ndarray_handle *) noexcept; + +/// Wrap a ndarray_handle* into a PyCapsule +NB_CORE PyObject *ndarray_export(ndarray_handle *, int framework, + rv_policy policy, cleanup_list *cleanup) noexcept; + +/// Check if an object represents an ndarray +NB_CORE bool ndarray_check(PyObject *o) noexcept; + +// ======================================================================== + +/// Print to stdout using Python +NB_CORE void print(PyObject *file, PyObject *str, PyObject *end); + +// ======================================================================== + +typedef void (*exception_translator)(const std::exception_ptr &, void *); + +NB_CORE void register_exception_translator(exception_translator translator, + void *payload); + +NB_CORE PyObject *exception_new(PyObject *mod, const char *name, + PyObject *base); + +// ======================================================================== + +NB_CORE bool load_i8 (PyObject *o, uint8_t flags, int8_t *out) noexcept; +NB_CORE bool load_u8 (PyObject *o, uint8_t flags, uint8_t *out) noexcept; +NB_CORE bool load_i16(PyObject *o, uint8_t flags, int16_t *out) noexcept; +NB_CORE bool load_u16(PyObject *o, uint8_t flags, uint16_t *out) noexcept; +NB_CORE bool load_i32(PyObject *o, uint8_t flags, int32_t *out) noexcept; +NB_CORE bool load_u32(PyObject *o, uint8_t flags, uint32_t *out) noexcept; +NB_CORE bool load_i64(PyObject *o, uint8_t flags, int64_t *out) noexcept; +NB_CORE bool load_u64(PyObject *o, uint8_t flags, uint64_t *out) noexcept; +NB_CORE bool load_f32(PyObject *o, uint8_t flags, float *out) noexcept; +NB_CORE bool load_f64(PyObject *o, uint8_t flags, double *out) noexcept; + +// ======================================================================== + +/// Increase the reference count of 'o', and check that the GIL is held +NB_CORE void incref_checked(PyObject *o) noexcept; + +/// Decrease the reference count of 'o', and check that the GIL is held +NB_CORE void decref_checked(PyObject *o) noexcept; + +// ======================================================================== + +NB_CORE bool leak_warnings() noexcept; +NB_CORE bool implicit_cast_warnings() noexcept; +NB_CORE void set_leak_warnings(bool value) noexcept; +NB_CORE void set_implicit_cast_warnings(bool value) noexcept; + +// ======================================================================== + +NB_CORE bool iterable_check(PyObject *o) noexcept; + +// ======================================================================== + +NB_CORE void slice_compute(PyObject *slice, Py_ssize_t size, + Py_ssize_t &start, Py_ssize_t &stop, + Py_ssize_t &step, size_t &slice_length); + +// ======================================================================== + +NB_CORE bool issubclass(PyObject *a, PyObject *b); + +// ======================================================================== + +NB_CORE PyObject *repr_list(PyObject *o); +NB_CORE PyObject *repr_map(PyObject *o); + +NB_CORE bool is_alive() noexcept; + +#if NB_TYPE_GET_SLOT_IMPL +NB_CORE void *type_get_slot(PyTypeObject *t, int slot_id); +#endif + +NB_CORE PyObject *dict_get_item_ref_or_fail(PyObject *d, PyObject *k); + +NAMESPACE_END(detail) + +using detail::raise; +using detail::raise_type_error; +using detail::raise_python_error; + +NAMESPACE_END(NB_NAMESPACE) diff --git a/RemoteInput/Thirdparty/nanobind/include/nanobind/nb_misc.h b/RemoteInput/Thirdparty/nanobind/include/nanobind/nb_misc.h new file mode 100644 index 0000000..b29d6b6 --- /dev/null +++ b/RemoteInput/Thirdparty/nanobind/include/nanobind/nb_misc.h @@ -0,0 +1,118 @@ +/* + nanobind/nb_misc.h: Miscellaneous bits (GIL, etc.) + + Copyright (c) 2022 Wenzel Jakob + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +NAMESPACE_BEGIN(NB_NAMESPACE) + +struct gil_scoped_acquire { +public: + NB_NONCOPYABLE(gil_scoped_acquire) + gil_scoped_acquire() noexcept : state(PyGILState_Ensure()) { } + ~gil_scoped_acquire() { PyGILState_Release(state); } + +private: + const PyGILState_STATE state; +}; + +struct gil_scoped_release { +public: + NB_NONCOPYABLE(gil_scoped_release) + gil_scoped_release() noexcept : state(PyEval_SaveThread()) { } + ~gil_scoped_release() { PyEval_RestoreThread(state); } + +private: + PyThreadState *state; +}; + +struct ft_mutex { +public: + NB_NONCOPYABLE(ft_mutex) + ft_mutex() = default; + +#if !defined(NB_FREE_THREADED) + void lock() { } + void unlock() { } +#else + void lock() { PyMutex_Lock(&mutex); } + void unlock() { PyMutex_Unlock(&mutex); } +private: + PyMutex mutex { 0 }; +#endif +}; + +struct ft_lock_guard { +public: + NB_NONCOPYABLE(ft_lock_guard) + ft_lock_guard(ft_mutex &m) : m(m) { m.lock(); } + ~ft_lock_guard() { m.unlock(); } +private: + ft_mutex &m; +}; + + +struct ft_object_guard { +public: + NB_NONCOPYABLE(ft_object_guard) +#if !defined(NB_FREE_THREADED) + ft_object_guard(handle) { } +#else + ft_object_guard(handle h) { PyCriticalSection_Begin(&cs, h.ptr()); } + ~ft_object_guard() { PyCriticalSection_End(&cs); } +private: + PyCriticalSection cs; +#endif +}; + +struct ft_object2_guard { +public: + NB_NONCOPYABLE(ft_object2_guard) +#if !defined(NB_FREE_THREADED) + ft_object2_guard(handle, handle) { } +#else + ft_object2_guard(handle h1, handle h2) { PyCriticalSection2_Begin(&cs, h1.ptr(), h2.ptr()); } + ~ft_object2_guard() { PyCriticalSection2_End(&cs); } +private: + PyCriticalSection2 cs; +#endif +}; + +inline bool leak_warnings() noexcept { + return detail::leak_warnings(); +} + +inline bool implicit_cast_warnings() noexcept { + return detail::implicit_cast_warnings(); +} + +inline void set_leak_warnings(bool value) noexcept { + detail::set_leak_warnings(value); +} + +inline void set_implicit_cast_warnings(bool value) noexcept { + detail::set_implicit_cast_warnings(value); +} + +inline dict globals() { + PyObject *p = PyEval_GetGlobals(); + if (!p) + raise("nanobind::globals(): no frame is currently executing!"); + return borrow(p); +} + +inline Py_hash_t hash(handle h) { + Py_hash_t rv = PyObject_Hash(h.ptr()); + if (rv == -1 && PyErr_Occurred()) + nanobind::raise_python_error(); + return rv; +} + +inline bool is_alive() noexcept { + return detail::is_alive(); +} + +NAMESPACE_END(NB_NAMESPACE) diff --git a/RemoteInput/Thirdparty/nanobind/include/nanobind/nb_python.h b/RemoteInput/Thirdparty/nanobind/include/nanobind/nb_python.h new file mode 100644 index 0000000..356500c --- /dev/null +++ b/RemoteInput/Thirdparty/nanobind/include/nanobind/nb_python.h @@ -0,0 +1,61 @@ +/* + nanobind/nb_python.h: Include CPython headers while temporarily disabling + certain warnings. Also, disable dangerous preprocessor definitions. + + Copyright (c) 2022 Wenzel Jakob + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +/// Include Python header, disable linking to pythonX_d.lib on Windows in debug mode + +#if defined(_MSC_VER) +# pragma warning(push) +# if defined(_DEBUG) && !defined(Py_DEBUG) +# define NB_DEBUG_MARKER +# undef _DEBUG +# endif +#endif + +#include +#include +#include +#include + +/* Python #defines overrides on all sorts of core functions, which + tends to weak havok in C++ codebases that expect these to work + like regular functions (potentially with several overloads) */ +#if defined(isalnum) +# undef isalnum +# undef isalpha +# undef islower +# undef isspace +# undef isupper +# undef tolower +# undef toupper +#endif + +#if defined(copysign) +# undef copysign +#endif + +#if defined(setter) +# undef setter +#endif + +#if defined(getter) +# undef getter +#endif + +#if defined(_MSC_VER) +# if defined(NB_DEBUG_MARKER) +# define _DEBUG +# undef NB_DEBUG_MARKER +# endif +# pragma warning(pop) +#endif + +#if PY_VERSION_HEX < 0x03080000 +# error The nanobind library requires Python 3.8 (or newer) +#endif diff --git a/RemoteInput/Thirdparty/nanobind/include/nanobind/nb_traits.h b/RemoteInput/Thirdparty/nanobind/include/nanobind/nb_traits.h new file mode 100644 index 0000000..4480c86 --- /dev/null +++ b/RemoteInput/Thirdparty/nanobind/include/nanobind/nb_traits.h @@ -0,0 +1,220 @@ +/* + nanobind/nb_traits.h: type traits for metaprogramming in nanobind + + Copyright (c) 2022 Wenzel Jakob + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +NAMESPACE_BEGIN(NB_NAMESPACE) +using ssize_t = std::make_signed_t; + +NAMESPACE_BEGIN(detail) + +struct void_type { }; + +template struct index_1; +template struct index_n; + +template <> struct index_1<> { constexpr static size_t value = 0; }; +template <> struct index_n<> { constexpr static size_t value = 0; }; + +template struct index_1 { + constexpr static size_t value_rec = index_1::value; + constexpr static size_t value = B ? 0 : (value_rec + 1); +}; + +template struct index_n { + constexpr static size_t value_rec = index_n::value; + constexpr static size_t value = + (value_rec < sizeof...(Bs) || !B) ? (value_rec + 1) : 0; +}; + +template constexpr size_t index_1_v = index_1::value; +template constexpr size_t index_n_v = index_n::value; + +/// Helper template to strip away type modifiers +template struct intrinsic_type { using type = T; }; +template struct intrinsic_type { using type = typename intrinsic_type::type; }; +template struct intrinsic_type { using type = typename intrinsic_type::type; }; +template struct intrinsic_type { using type = typename intrinsic_type::type; }; +template struct intrinsic_type { using type = typename intrinsic_type::type; }; +template struct intrinsic_type { using type = typename intrinsic_type::type; }; +template struct intrinsic_type { using type = typename intrinsic_type::type; }; +template using intrinsic_t = typename intrinsic_type::type; + +// More relaxed pointer test +template +constexpr bool is_pointer_v = std::is_pointer_v>; + +template +using forwarded_type = std::conditional_t, + std::remove_reference_t &, + std::remove_reference_t &&>; + +/// Forwards a value U as rvalue or lvalue according to whether T is rvalue or lvalue; typically +/// used for forwarding a container's elements. +template NB_INLINE forwarded_type forward_like_(U &&u) { + return (forwarded_type) u; +} + +template +constexpr bool is_std_char_v = + std::is_same_v +#if defined(NB_HAS_U8STRING) + || std::is_same_v /* std::u8string */ +#endif + || std::is_same_v || + std::is_same_v || std::is_same_v; + +template using enable_if_t = std::enable_if_t; + +/// Check if a function is a lambda function +template +constexpr bool is_lambda_v = !std::is_function_v && !std::is_pointer_v && + !std::is_member_pointer_v; + +/// Inspect the signature of a method call +template struct analyze_method { }; +template +struct analyze_method { + using func = Ret(Args...); + static constexpr size_t argc = sizeof...(Args); +}; + +template +struct analyze_method { + using func = Ret(Args...); + static constexpr size_t argc = sizeof...(Args); +}; + +template +struct analyze_method { + using func = Ret(Args...); + static constexpr size_t argc = sizeof...(Args); +}; + +template +struct analyze_method { + using func = Ret(Args...); + static constexpr size_t argc = sizeof...(Args); +}; + +template +struct strip_function_object { + using type = typename analyze_method::func; +}; + +// Extracts the function signature from a function, function pointer or lambda. +template > +using function_signature_t = std::conditional_t< + std::is_function_v, F, + typename std::conditional_t< + std::is_pointer_v || std::is_member_pointer_v, + std::remove_pointer, + strip_function_object>::type>; + +template +using forward_t = std::conditional_t, T, T &&>; + +template inline constexpr bool false_v = false; + +template struct overload_cast_impl { + template + constexpr auto operator()(Return (*pf)(Args...)) const noexcept + -> decltype(pf) { return pf; } + + template + constexpr auto operator()(Return (Class::*pmf)(Args...), std::false_type = {}) const noexcept + -> decltype(pmf) { return pmf; } + + template + constexpr auto operator()(Return (Class::*pmf)(Args...) const, std::true_type) const noexcept + -> decltype(pmf) { return pmf; } +}; + +/// Detector pattern +template typename Op, typename Arg> +struct detector : std::false_type { }; + +template