From 9ff1ad40445b713ad3cd401af704c8ff7887fc78 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Thu, 4 May 2023 10:57:31 -0400 Subject: [PATCH 001/107] Add boost as submodule --- .gitmodules | 3 +++ CMakeLists.txt | 4 ++-- libraries/CMakeLists.txt | 4 ++++ libraries/boost | 1 + libraries/libfc/CMakeLists.txt | 2 +- 5 files changed, 11 insertions(+), 3 deletions(-) create mode 160000 libraries/boost diff --git a/.gitmodules b/.gitmodules index ab01b3d5c0..f6cea8d706 100644 --- a/.gitmodules +++ b/.gitmodules @@ -31,3 +31,6 @@ [submodule "libraries/cli11/cli11"] path = libraries/cli11/cli11 url = https://github.com/AntelopeIO/CLI11.git +[submodule "libraries/boost"] + path = libraries/boost + url = https://github.com/boostorg/boost diff --git a/CMakeLists.txt b/CMakeLists.txt index 049183b252..42067d3e91 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -101,11 +101,11 @@ else() set(no_whole_archive_flag "--no-whole-archive") endif() -set(Boost_USE_MULTITHREADED ON) +set( Boost_USE_MULTITHREADED ON ) set( Boost_USE_STATIC_LIBS ON CACHE STRING "ON or OFF" ) # Most boost deps get implictly picked up via fc, as just about everything links to fc. In addition we pick up # the pthread dependency through fc. -find_package(Boost 1.71 REQUIRED COMPONENTS program_options unit_test_framework system) +#find_package(Boost 1.71 REQUIRED COMPONENTS program_options unit_test_framework system) if( APPLE AND UNIX ) # Apple Specific Options Here diff --git a/libraries/CMakeLists.txt b/libraries/CMakeLists.txt index 4b041dd047..8e79b46c59 100644 --- a/libraries/CMakeLists.txt +++ b/libraries/CMakeLists.txt @@ -5,6 +5,10 @@ set(SOFTFLOAT_INSTALL_COMPONENT "dev") set(EOSVM_INSTALL_COMPONENT "dev") set(BN256_INSTALL_COMPONENT "dev") +set( BOOST_INCLUDE_LIBRARIES iostreams date_time system program_options chrono test ) +add_subdirectory( boost EXCLUDE_FROM_ALL ) +#set( Boost_INCLUDE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/boost ) + add_subdirectory( libfc ) add_subdirectory( builtins ) add_subdirectory( softfloat ) diff --git a/libraries/boost b/libraries/boost new file mode 160000 index 0000000000..b6928ae5c9 --- /dev/null +++ b/libraries/boost @@ -0,0 +1 @@ +Subproject commit b6928ae5c92e21a04bbe17a558e6e066dbe632f6 diff --git a/libraries/libfc/CMakeLists.txt b/libraries/libfc/CMakeLists.txt index ac86842034..148197be5f 100644 --- a/libraries/libfc/CMakeLists.txt +++ b/libraries/libfc/CMakeLists.txt @@ -91,7 +91,7 @@ if(APPLE) add_library(zstd INTERFACE) endif() -find_package(Boost 1.66 REQUIRED COMPONENTS +find_package(Boost REQUIRED COMPONENTS date_time chrono unit_test_framework From 1cfbf0a76971efe63c420e212574cba6b98d327c Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Mon, 8 May 2023 10:13:39 -0400 Subject: [PATCH 002/107] Leap builds with boost 1.82 as submodule. --- CMakeLists.txt | 4 ++++ libraries/CMakeLists.txt | 4 ---- libraries/appbase | 2 +- libraries/chain/CMakeLists.txt | 9 +++++++++ libraries/chainbase | 2 +- libraries/libfc/CMakeLists.txt | 11 +++++------ plugins/chain_plugin/CMakeLists.txt | 4 ++-- programs/cleos/CMakeLists.txt | 2 +- programs/nodeos/CMakeLists.txt | 3 ++- tests/CMakeLists.txt | 4 ++-- 10 files changed, 27 insertions(+), 18 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 42067d3e91..3910a3381d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -103,6 +103,10 @@ endif() set( Boost_USE_MULTITHREADED ON ) set( Boost_USE_STATIC_LIBS ON CACHE STRING "ON or OFF" ) + +#set( BOOST_INCLUDE_LIBRARIES headers iostreams date_time system program_options chrono test interprocess multi_index lexical_cast asio thread serialization multiprecision beast unit_test_framework ) +add_subdirectory( libraries/boost ) + # Most boost deps get implictly picked up via fc, as just about everything links to fc. In addition we pick up # the pthread dependency through fc. #find_package(Boost 1.71 REQUIRED COMPONENTS program_options unit_test_framework system) diff --git a/libraries/CMakeLists.txt b/libraries/CMakeLists.txt index 8e79b46c59..4b041dd047 100644 --- a/libraries/CMakeLists.txt +++ b/libraries/CMakeLists.txt @@ -5,10 +5,6 @@ set(SOFTFLOAT_INSTALL_COMPONENT "dev") set(EOSVM_INSTALL_COMPONENT "dev") set(BN256_INSTALL_COMPONENT "dev") -set( BOOST_INCLUDE_LIBRARIES iostreams date_time system program_options chrono test ) -add_subdirectory( boost EXCLUDE_FROM_ALL ) -#set( Boost_INCLUDE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/boost ) - add_subdirectory( libfc ) add_subdirectory( builtins ) add_subdirectory( softfloat ) diff --git a/libraries/appbase b/libraries/appbase index c7ce7c2024..e785cc75ae 160000 --- a/libraries/appbase +++ b/libraries/appbase @@ -1 +1 @@ -Subproject commit c7ce7c202497d772f8bbaf34a3ced0df136ec9fd +Subproject commit e785cc75aebda9a048f657ce4244014b40139fea diff --git a/libraries/chain/CMakeLists.txt b/libraries/chain/CMakeLists.txt index 7796205658..6484e46f96 100644 --- a/libraries/chain/CMakeLists.txt +++ b/libraries/chain/CMakeLists.txt @@ -129,8 +129,17 @@ add_library( eosio_chain ${HEADERS} ) +## Boost::accumulators depends on Boost::numeric_ublas, which is still missing cmake support (see +## https://github.com/boostorg/cmake/issues/39). Until this is fixed, manually add Boost::numeric_ublas +## as an interface library +## ---------------------------------------------------------------------------------------------------- +add_library(boost_numeric_ublas INTERFACE) +add_library(Boost::numeric_ublas ALIAS boost_numeric_ublas) + target_link_libraries( eosio_chain PUBLIC bn256 fc chainbase eosio_rapidjson Logging IR WAST WASM softfloat builtins ${CHAIN_EOSVM_LIBRARIES} ${LLVM_LIBS} ${CHAIN_RT_LINKAGE} + Boost::signals2 Boost::hana Boost::property_tree Boost::multi_index Boost::asio Boost::lockfree + Boost::assign Boost::accumulators ) target_include_directories( eosio_chain PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" "${CMAKE_CURRENT_BINARY_DIR}/include" diff --git a/libraries/chainbase b/libraries/chainbase index c1d30da95c..7b3badc5ec 160000 --- a/libraries/chainbase +++ b/libraries/chainbase @@ -1 +1 @@ -Subproject commit c1d30da95c9f5e2e80d32732d3063671ff23b123 +Subproject commit 7b3badc5ecf5d1c6d41d9932811a1df994bbed51 diff --git a/libraries/libfc/CMakeLists.txt b/libraries/libfc/CMakeLists.txt index 148197be5f..683cdb31b8 100644 --- a/libraries/libfc/CMakeLists.txt +++ b/libraries/libfc/CMakeLists.txt @@ -91,11 +91,9 @@ if(APPLE) add_library(zstd INTERFACE) endif() -find_package(Boost REQUIRED COMPONENTS - date_time - chrono - unit_test_framework - iostreams) +if(NOT boost_headers_SOURCE_DIR) + find_package(Boost REQUIRED COMPONENTS date_time chrono unit_test_framework iostreams) +endif() find_path(GMP_INCLUDE_DIR NAMES gmp.h) find_library(GMP_LIBRARY gmp) @@ -130,7 +128,8 @@ if(APPLE) find_library(security_framework Security) find_library(corefoundation_framework CoreFoundation) endif() -target_link_libraries( fc PUBLIC Boost::date_time Boost::chrono Boost::iostreams Threads::Threads +target_link_libraries( fc PUBLIC Boost::date_time Boost::chrono Boost::iostreams Boost::interprocess Boost::multi_index + Boost::multiprecision Boost::beast Boost::asio Boost::thread Boost::unit_test_framework Threads::Threads OpenSSL::Crypto ZLIB::ZLIB ${PLATFORM_SPECIFIC_LIBS} ${CMAKE_DL_LIBS} secp256k1 ${security_framework} ${corefoundation_framework}) # Critically, this ensures that OpenSSL 1.1 & 3.0 both have a variant of BN_zero() with void return value. But it also allows access diff --git a/plugins/chain_plugin/CMakeLists.txt b/plugins/chain_plugin/CMakeLists.txt index 0648d20fb4..ae21541990 100644 --- a/plugins/chain_plugin/CMakeLists.txt +++ b/plugins/chain_plugin/CMakeLists.txt @@ -11,7 +11,7 @@ if(EOSIO_ENABLE_DEVELOPER_OPTIONS) target_compile_definitions(chain_plugin PUBLIC EOSIO_DEVELOPER) endif() -target_link_libraries( chain_plugin eosio_chain custom_appbase appbase resource_monitor_plugin ) +target_link_libraries( chain_plugin eosio_chain custom_appbase appbase resource_monitor_plugin Boost::bimap ) target_include_directories( chain_plugin PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" "${CMAKE_CURRENT_SOURCE_DIR}/../chain_interface/include" "${CMAKE_CURRENT_SOURCE_DIR}/../../libraries/appbase/include" "${CMAKE_CURRENT_SOURCE_DIR}/../resource_monitor_plugin/include") -add_subdirectory( test ) \ No newline at end of file +add_subdirectory( test ) diff --git a/programs/cleos/CMakeLists.txt b/programs/cleos/CMakeLists.txt index bf184cf927..71b9d6c866 100644 --- a/programs/cleos/CMakeLists.txt +++ b/programs/cleos/CMakeLists.txt @@ -13,7 +13,7 @@ set(LOCALEDOMAIN ${CLI_CLIENT_EXECUTABLE_NAME}) target_include_directories(${CLI_CLIENT_EXECUTABLE_NAME} PUBLIC ${CMAKE_CURRENT_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR}) target_link_libraries( ${CLI_CLIENT_EXECUTABLE_NAME} - PRIVATE appbase version leap-cli11 chain_api_plugin producer_plugin chain_plugin http_plugin eosio_chain fc ${CMAKE_DL_LIBS} ${PLATFORM_SPECIFIC_LIBS} ) + PRIVATE appbase version leap-cli11 chain_api_plugin producer_plugin chain_plugin http_plugin eosio_chain fc ${CMAKE_DL_LIBS} ${PLATFORM_SPECIFIC_LIBS} Boost::process Boost::dll ) if (CURL_FOUND) target_sources(${CLI_CLIENT_EXECUTABLE_NAME} PRIVATE do_http_post_libcurl.cpp) diff --git a/programs/nodeos/CMakeLists.txt b/programs/nodeos/CMakeLists.txt index 23ac7269c2..493c9e3a43 100644 --- a/programs/nodeos/CMakeLists.txt +++ b/programs/nodeos/CMakeLists.txt @@ -40,7 +40,8 @@ target_link_libraries( ${NODE_EXECUTABLE_NAME} PRIVATE -Wl,${whole_archive_flag} prometheus_plugin -Wl,${no_whole_archive_flag} PRIVATE -Wl,${build_id_flag} PRIVATE chain_plugin http_plugin producer_plugin http_client_plugin - PRIVATE eosio_chain_wrap fc ${CMAKE_DL_LIBS} ${PLATFORM_SPECIFIC_LIBS} ) + PRIVATE eosio_chain_wrap fc ${CMAKE_DL_LIBS} ${PLATFORM_SPECIFIC_LIBS} + Boost::dll ) include(additionalPlugins) diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 0f868a8f6b..8fbaa2b002 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -104,9 +104,9 @@ add_subdirectory( performance_tests ) find_package(Threads) add_executable(ship_client ship_client.cpp) -target_link_libraries(ship_client abieos Boost::program_options Boost::system Threads::Threads) +target_link_libraries(ship_client abieos Boost::program_options Boost::system Boost::algorithm Boost::asio Boost::beast Threads::Threads) add_executable(ship_streamer ship_streamer.cpp) -target_link_libraries(ship_streamer abieos Boost::program_options Boost::system Threads::Threads) +target_link_libraries(ship_streamer abieos Boost::program_options Boost::system Boost::asio Boost::beast Threads::Threads) add_test(NAME ship_test COMMAND tests/ship_test.py -v --num-clients 10 --num-requests 5000 --clean-run ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST ship_test PROPERTY LABELS nonparallelizable_tests) From d22f340ba96e666459aa158c98b984e768b034e5 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Mon, 8 May 2023 10:46:11 -0400 Subject: [PATCH 003/107] Remove outdated comments. --- CMakeLists.txt | 5 ----- 1 file changed, 5 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 3910a3381d..f44aba6cc3 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -104,13 +104,8 @@ endif() set( Boost_USE_MULTITHREADED ON ) set( Boost_USE_STATIC_LIBS ON CACHE STRING "ON or OFF" ) -#set( BOOST_INCLUDE_LIBRARIES headers iostreams date_time system program_options chrono test interprocess multi_index lexical_cast asio thread serialization multiprecision beast unit_test_framework ) add_subdirectory( libraries/boost ) -# Most boost deps get implictly picked up via fc, as just about everything links to fc. In addition we pick up -# the pthread dependency through fc. -#find_package(Boost 1.71 REQUIRED COMPONENTS program_options unit_test_framework system) - if( APPLE AND UNIX ) # Apple Specific Options Here message( STATUS "Configuring Leap on macOS" ) From 0ae0289621f0f5a528aabdcdf739fd91a5ec8651 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Wed, 10 May 2023 10:26:33 -0400 Subject: [PATCH 004/107] Update submodules branches to tip --- libraries/appbase | 2 +- libraries/chainbase | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/libraries/appbase b/libraries/appbase index e785cc75ae..b9472c58c8 160000 --- a/libraries/appbase +++ b/libraries/appbase @@ -1 +1 @@ -Subproject commit e785cc75aebda9a048f657ce4244014b40139fea +Subproject commit b9472c58c8d32b8e816cb4a8480d5b454f1bdbf4 diff --git a/libraries/chainbase b/libraries/chainbase index 7b3badc5ec..bf078f3ff6 160000 --- a/libraries/chainbase +++ b/libraries/chainbase @@ -1 +1 @@ -Subproject commit 7b3badc5ecf5d1c6d41d9932811a1df994bbed51 +Subproject commit bf078f3ff6559fffe93b8195df702729ae4ba4f5 From fcfd72f80f2278dc0d70d6250ca221f050d9de09 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Thu, 11 May 2023 14:47:09 -0400 Subject: [PATCH 005/107] force c++17 on files that use LLVM --- libraries/chain/CMakeLists.txt | 5 ++++ .../runtimes/eos-vm-oc/LLVMEmitIR.h | 18 +++++++++++++++ .../runtimes/eos-vm-oc/LLVMJIT.cpp | 23 ++++++++++--------- .../webassembly/runtimes/eos-vm-oc/LLVMJIT.h | 7 ------ 4 files changed, 35 insertions(+), 18 deletions(-) create mode 100644 libraries/chain/webassembly/runtimes/eos-vm-oc/LLVMEmitIR.h diff --git a/libraries/chain/CMakeLists.txt b/libraries/chain/CMakeLists.txt index 1f011d5144..6a41319865 100644 --- a/libraries/chain/CMakeLists.txt +++ b/libraries/chain/CMakeLists.txt @@ -34,11 +34,16 @@ if("eos-vm-oc" IN_LIST EOSIO_WASM_RUNTIMES) webassembly/runtimes/eos-vm-oc.cpp webassembly/runtimes/eos-vm-oc/default_real_main.cpp) + set_source_files_properties(webassembly/runtimes/eos-vm-oc/LLVMJIT.cpp PROPERTIES COMPILE_FLAGS "--std=gnu++17") + set_source_files_properties(webassembly/runtimes/eos-vm-oc/LLVMEmitIR.cpp PROPERTIES COMPILE_FLAGS "--std=gnu++17") + if(LLVM_VERSION VERSION_LESS 7.1 AND CMAKE_CXX_COMPILER_ID STREQUAL "Clang") enable_language(ASM-LLVMWAR) list(APPEND CHAIN_EOSVMOC_SOURCES webassembly/runtimes/eos-vm-oc/llvmWARshim.llvmwar) + set_source_files_properties(webassembly/runtimes/eos-vm-oc/llvmWARshim.llvmwar PROPERTIES COMPILE_FLAGS "--std=gnu++17") else() list(APPEND CHAIN_EOSVMOC_SOURCES webassembly/runtimes/eos-vm-oc/llvmWARshim.cpp) + set_source_files_properties(webassembly/runtimes/eos-vm-oc/llvmWARshim.cpp PROPERTIES COMPILE_FLAGS "--std=gnu++17") endif() llvm_map_components_to_libnames(LLVM_LIBS support core passes mcjit native orcjit) diff --git a/libraries/chain/webassembly/runtimes/eos-vm-oc/LLVMEmitIR.h b/libraries/chain/webassembly/runtimes/eos-vm-oc/LLVMEmitIR.h new file mode 100644 index 0000000000..2451603a77 --- /dev/null +++ b/libraries/chain/webassembly/runtimes/eos-vm-oc/LLVMEmitIR.h @@ -0,0 +1,18 @@ +#pragma once + +#include "Inline/BasicTypes.h" +#include "IR/Module.h" + +#include "llvm/IR/Module.h" + +#include +#include + +namespace eosio { namespace chain { namespace eosvmoc { + +namespace LLVMJIT { + bool getFunctionIndexFromExternalName(const char* externalName,Uptr& outFunctionDefIndex); + const char* getTableSymbolName(); + llvm::Module* emitModule(const IR::Module& module); +} +}}} diff --git a/libraries/chain/webassembly/runtimes/eos-vm-oc/LLVMJIT.cpp b/libraries/chain/webassembly/runtimes/eos-vm-oc/LLVMJIT.cpp index 8b76b715af..f1eca8073d 100644 --- a/libraries/chain/webassembly/runtimes/eos-vm-oc/LLVMJIT.cpp +++ b/libraries/chain/webassembly/runtimes/eos-vm-oc/LLVMJIT.cpp @@ -15,6 +15,7 @@ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND */ #include "LLVMJIT.h" +#include "LLVMEmitIR.h" #include "llvm/ExecutionEngine/ExecutionEngine.h" #include "llvm/ExecutionEngine/RTDyldMemoryManager.h" @@ -39,6 +40,7 @@ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND #include "llvm/Object/ObjectFile.h" #include "llvm/Object/SymbolSize.h" #include "llvm/Support/Debug.h" +#include "llvm/Support/DataExtractor.h" #include "llvm/Support/DataTypes.h" #include "llvm/Support/FileSystem.h" #include "llvm/Support/TargetSelect.h" @@ -49,10 +51,7 @@ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND #include "llvm/Transforms/InstCombine/InstCombine.h" #include "llvm/Transforms/Utils.h" #include - -#include -#include -#include +#include #include "llvm/Support/LEB128.h" @@ -151,7 +150,7 @@ namespace LLVMJIT std::list> stack_sizes; U8* get_next_code_ptr(uintptr_t numBytes, U32 alignment) { - FC_ASSERT(alignment <= alignof(std::max_align_t), "alignment of section exceeds max_align_t"); + WAVM_ASSERT_THROW(alignment <= alignof(std::max_align_t)); uintptr_t p = (uintptr_t)ptr; p += alignment - 1LL; p &= ~(alignment - 1LL); @@ -306,12 +305,14 @@ namespace LLVMJIT unsigned num_functions_stack_size_found = 0; for(const auto& stacksizes : jitModule->unitmemorymanager->stack_sizes) { - fc::datastream ds(reinterpret_cast(stacksizes.data()), stacksizes.size()); - while(ds.remaining()) { - uint64_t funcaddr; - fc::unsigned_int stack_size; - fc::raw::unpack(ds, funcaddr); - fc::raw::unpack(ds, stack_size); + llvm::DataExtractor ds(llvm::ArrayRef(stacksizes.data(), stacksizes.size()), true, 8); + llvm::DataExtractor::Cursor c(0); + + while(!ds.eof(c)) { + ds.getAddress(c); + WAVM_ASSERT_THROW(!!c); + const uint64_t stack_size = ds.getULEB128(c); + WAVM_ASSERT_THROW(!!c); ++num_functions_stack_size_found; if(stack_size > 16u*1024u) diff --git a/libraries/chain/webassembly/runtimes/eos-vm-oc/LLVMJIT.h b/libraries/chain/webassembly/runtimes/eos-vm-oc/LLVMJIT.h index 4d5a685c29..13e2510195 100644 --- a/libraries/chain/webassembly/runtimes/eos-vm-oc/LLVMJIT.h +++ b/libraries/chain/webassembly/runtimes/eos-vm-oc/LLVMJIT.h @@ -3,10 +3,6 @@ #include "Inline/BasicTypes.h" #include "IR/Module.h" -#pragma push_macro("N") -#undef N -#include "llvm/IR/Module.h" -#pragma pop_macro("N") #include #include @@ -19,9 +15,6 @@ struct instantiated_code { }; namespace LLVMJIT { - bool getFunctionIndexFromExternalName(const char* externalName,Uptr& outFunctionDefIndex); - const char* getTableSymbolName(); - llvm::Module* emitModule(const IR::Module& module); instantiated_code instantiateModule(const IR::Module& module); } }}} From 04bcbdb6eb73785849c3e7c90e30eaeb2025013a Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Wed, 17 May 2023 18:00:12 -0400 Subject: [PATCH 006/107] Remove boost install from cicd docker files and pinned build --- .cicd/platforms/ubuntu20.Dockerfile | 1 - .cicd/platforms/ubuntu22.Dockerfile | 1 - scripts/pinned_build.sh | 19 +------------------ 3 files changed, 1 insertion(+), 20 deletions(-) diff --git a/.cicd/platforms/ubuntu20.Dockerfile b/.cicd/platforms/ubuntu20.Dockerfile index baccb7c937..4296c802b9 100644 --- a/.cicd/platforms/ubuntu20.Dockerfile +++ b/.cicd/platforms/ubuntu20.Dockerfile @@ -6,7 +6,6 @@ RUN apt-get update && apt-get upgrade -y && \ cmake \ git \ jq \ - libboost-all-dev \ libcurl4-openssl-dev \ libgmp-dev \ libssl-dev \ diff --git a/.cicd/platforms/ubuntu22.Dockerfile b/.cicd/platforms/ubuntu22.Dockerfile index 1e5a936a4d..52ace75948 100644 --- a/.cicd/platforms/ubuntu22.Dockerfile +++ b/.cicd/platforms/ubuntu22.Dockerfile @@ -6,7 +6,6 @@ RUN apt-get update && apt-get upgrade -y && \ cmake \ git \ jq \ - libboost-all-dev \ libcurl4-openssl-dev \ libgmp-dev \ libssl-dev \ diff --git a/scripts/pinned_build.sh b/scripts/pinned_build.sh index c57257e957..36bd7ad397 100755 --- a/scripts/pinned_build.sh +++ b/scripts/pinned_build.sh @@ -101,27 +101,10 @@ install_llvm() { export LLVM_DIR="${LLVM_DIR}" } -install_boost() { - BOOST_DIR="$1" - - if [ ! -d "${BOOST_DIR}" ]; then - echo "Installing Boost ${BOOST_VER} @ ${BOOST_DIR}" - try wget -O "boost_${BOOST_VER//\./_}.tar.gz" "https://boostorg.jfrog.io/artifactory/main/release/${BOOST_VER}/source/boost_${BOOST_VER//\./_}.tar.gz" - try tar -xvzf "boost_${BOOST_VER//\./_}.tar.gz" -C "${DEP_DIR}" - pushdir "${BOOST_DIR}" - try ./bootstrap.sh -with-toolset=clang --prefix="${BOOST_DIR}/bin" - ./b2 toolset=clang cxxflags="-stdlib=libc++ -D__STRICT_ANSI__ -nostdinc++ -I\${CLANG_DIR}/include/c++/v1 -D_FORTIFY_SOURCE=2 -fstack-protector-strong -fPIE" linkflags='-stdlib=libc++ -pie' link=static threading=multi --with-iostreams --with-date_time --with-system --with-program_options --with-chrono --with-test -q -j "${JOBS}" install - popdir "${DEP_DIR}" - rm "boost_${BOOST_VER//\./_}.tar.gz" - fi - export BOOST_DIR="${BOOST_DIR}" -} - pushdir "${DEP_DIR}" install_clang "${DEP_DIR}/clang-${CLANG_VER}" install_llvm "${DEP_DIR}/llvm-${LLVM_VER}" -install_boost "${DEP_DIR}/boost_${BOOST_VER//\./_}" # go back to the directory where the script starts popdir "${START_DIR}" @@ -130,7 +113,7 @@ pushdir "${LEAP_DIR}" # build Leap echo "Building Leap ${SCRIPT_DIR}" -try cmake -DCMAKE_TOOLCHAIN_FILE="${SCRIPT_DIR}/pinned_toolchain.cmake" -DCMAKE_INSTALL_PREFIX=/usr/local -DCMAKE_BUILD_TYPE=Release -DCMAKE_PREFIX_PATH="${LLVM_DIR}/lib/cmake" -DCMAKE_PREFIX_PATH="${BOOST_DIR}/bin" "${SCRIPT_DIR}/.." +try cmake -DCMAKE_TOOLCHAIN_FILE="${SCRIPT_DIR}/pinned_toolchain.cmake" -DCMAKE_INSTALL_PREFIX=/usr/local -DCMAKE_BUILD_TYPE=Release -DCMAKE_PREFIX_PATH="${LLVM_DIR}/lib/cmake" "${SCRIPT_DIR}/.." try make -j "${JOBS}" try cpack From ee18ab5b88429cf236d4625d63503d4eadd8d3a5 Mon Sep 17 00:00:00 2001 From: 766C6164 Date: Wed, 24 May 2023 20:34:48 -0400 Subject: [PATCH 007/107] Make tester consumers built with same boost as leap --- CMakeModules/EosioTester.cmake.in | 26 +++++++++++++------------- CMakeModules/EosioTesterBuild.cmake.in | 26 +++++++++++++------------- 2 files changed, 26 insertions(+), 26 deletions(-) diff --git a/CMakeModules/EosioTester.cmake.in b/CMakeModules/EosioTester.cmake.in index a12004f73f..3d4151ef91 100644 --- a/CMakeModules/EosioTester.cmake.in +++ b/CMakeModules/EosioTester.cmake.in @@ -42,13 +42,8 @@ else ( APPLE ) endif ( APPLE ) set( Boost_USE_STATIC_LIBS ON CACHE STRING "ON or OFF" ) -find_package(Boost @Boost_MAJOR_VERSION@.@Boost_MINOR_VERSION@ EXACT REQUIRED COMPONENTS - date_time - filesystem - system - chrono - iostreams - unit_test_framework) +add_subdirectory( @CMAKE_SOURCE_DIR@/libraries/boost ${PROJECT_BINARY_DIR}/libraries/boost) +include_directories(${boostorg_SOURCE_DIR}) find_library(libtester eosio_testing @CMAKE_INSTALL_FULL_LIBDIR@ NO_DEFAULT_PATH) find_library(libchain eosio_chain @CMAKE_INSTALL_FULL_LIBDIR@ NO_DEFAULT_PATH) @@ -94,12 +89,18 @@ macro(add_eosio_test_executable test_name) ${libbn256} @GMP_LIBRARY@ - ${Boost_FILESYSTEM_LIBRARY} - ${Boost_SYSTEM_LIBRARY} - ${Boost_CHRONO_LIBRARY} - ${Boost_IOSTREAMS_LIBRARY} + Boost::date_time + Boost::filesystem + Boost::system + Boost::chrono + Boost::multi_index + Boost::multiprecision + Boost::interprocess + Boost::asio + Boost::signals2 + Boost::iostreams "-lz" # Needed by Boost iostreams - ${Boost_DATE_TIME_LIBRARY} + Boost::unit_test_framework ${LLVM_LIBS} @@ -115,7 +116,6 @@ macro(add_eosio_test_executable test_name) endif() target_include_directories( ${test_name} PUBLIC - ${Boost_INCLUDE_DIRS} @OPENSSL_INCLUDE_DIR@ @CMAKE_INSTALL_PREFIX@ @CMAKE_INSTALL_FULL_INCLUDEDIR@ diff --git a/CMakeModules/EosioTesterBuild.cmake.in b/CMakeModules/EosioTesterBuild.cmake.in index aa67d25595..d82a6e84b5 100644 --- a/CMakeModules/EosioTesterBuild.cmake.in +++ b/CMakeModules/EosioTesterBuild.cmake.in @@ -39,13 +39,8 @@ else ( APPLE ) endif ( APPLE ) set( Boost_USE_STATIC_LIBS ON CACHE STRING "ON or OFF" ) -find_package(Boost @Boost_MAJOR_VERSION@.@Boost_MINOR_VERSION@ EXACT REQUIRED COMPONENTS - date_time - filesystem - system - chrono - iostreams - unit_test_framework) +add_subdirectory( @CMAKE_SOURCE_DIR@/libraries/boost ${PROJECT_BINARY_DIR}/libraries/boost) +include_directories(${boostorg_SOURCE_DIR}) find_library(libtester eosio_testing @CMAKE_BINARY_DIR@/libraries/testing NO_DEFAULT_PATH) find_library(libchain eosio_chain @CMAKE_BINARY_DIR@/libraries/chain NO_DEFAULT_PATH) @@ -91,12 +86,18 @@ macro(add_eosio_test_executable test_name) ${libbn256} @GMP_LIBRARY@ - ${Boost_FILESYSTEM_LIBRARY} - ${Boost_SYSTEM_LIBRARY} - ${Boost_CHRONO_LIBRARY} - ${Boost_IOSTREAMS_LIBRARY} + Boost::date_time + Boost::filesystem + Boost::system + Boost::chrono + Boost::multi_index + Boost::multiprecision + Boost::interprocess + Boost::asio + Boost::signals2 + Boost::iostreams "-lz" # Needed by Boost iostreams - ${Boost_DATE_TIME_LIBRARY} + Boost::unit_test_framework ${LLVM_LIBS} @@ -112,7 +113,6 @@ macro(add_eosio_test_executable test_name) endif() target_include_directories( ${test_name} PUBLIC - ${Boost_INCLUDE_DIRS} @OPENSSL_INCLUDE_DIR@ @CMAKE_SOURCE_DIR@/libraries/chain/include @CMAKE_BINARY_DIR@/libraries/chain/include From 3ca72151e430a9afd9dd2481c7fdb15f936892b5 Mon Sep 17 00:00:00 2001 From: 766C6164 Date: Wed, 24 May 2023 20:39:40 -0400 Subject: [PATCH 008/107] Removed accidental leftover --- CMakeModules/EosioTester.cmake.in | 1 - CMakeModules/EosioTesterBuild.cmake.in | 1 - 2 files changed, 2 deletions(-) diff --git a/CMakeModules/EosioTester.cmake.in b/CMakeModules/EosioTester.cmake.in index 3d4151ef91..a4a668c237 100644 --- a/CMakeModules/EosioTester.cmake.in +++ b/CMakeModules/EosioTester.cmake.in @@ -43,7 +43,6 @@ endif ( APPLE ) set( Boost_USE_STATIC_LIBS ON CACHE STRING "ON or OFF" ) add_subdirectory( @CMAKE_SOURCE_DIR@/libraries/boost ${PROJECT_BINARY_DIR}/libraries/boost) -include_directories(${boostorg_SOURCE_DIR}) find_library(libtester eosio_testing @CMAKE_INSTALL_FULL_LIBDIR@ NO_DEFAULT_PATH) find_library(libchain eosio_chain @CMAKE_INSTALL_FULL_LIBDIR@ NO_DEFAULT_PATH) diff --git a/CMakeModules/EosioTesterBuild.cmake.in b/CMakeModules/EosioTesterBuild.cmake.in index d82a6e84b5..609451efc2 100644 --- a/CMakeModules/EosioTesterBuild.cmake.in +++ b/CMakeModules/EosioTesterBuild.cmake.in @@ -40,7 +40,6 @@ endif ( APPLE ) set( Boost_USE_STATIC_LIBS ON CACHE STRING "ON or OFF" ) add_subdirectory( @CMAKE_SOURCE_DIR@/libraries/boost ${PROJECT_BINARY_DIR}/libraries/boost) -include_directories(${boostorg_SOURCE_DIR}) find_library(libtester eosio_testing @CMAKE_BINARY_DIR@/libraries/testing NO_DEFAULT_PATH) find_library(libchain eosio_chain @CMAKE_BINARY_DIR@/libraries/chain NO_DEFAULT_PATH) From d43a7e720eed473fc7d3bc8cba6bcd146184f124 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Wed, 5 Jul 2023 16:22:41 -0400 Subject: [PATCH 009/107] Fix build with boost submodule after catchup with main --- libraries/chainbase | 2 +- libraries/libfc/CMakeLists.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/libraries/chainbase b/libraries/chainbase index 07e7d20d3d..00556160c4 160000 --- a/libraries/chainbase +++ b/libraries/chainbase @@ -1 +1 @@ -Subproject commit 07e7d20d3d164bc56a926432f1625426bc1d29b1 +Subproject commit 00556160c4bafae6f2b19dc359f56bc56e76c11e diff --git a/libraries/libfc/CMakeLists.txt b/libraries/libfc/CMakeLists.txt index 8957a5ff89..3b00430669 100644 --- a/libraries/libfc/CMakeLists.txt +++ b/libraries/libfc/CMakeLists.txt @@ -115,7 +115,7 @@ if(APPLE) find_library(security_framework Security) find_library(corefoundation_framework CoreFoundation) endif() -target_link_libraries( fc PUBLIC Boost::date_time Boost::chrono Boost::iostreams Boost::interprocess Boost::multi_index +target_link_libraries( fc PUBLIC Boost::date_time Boost::chrono Boost::iostreams Boost::interprocess Boost::multi_index Boost::dll Boost::multiprecision Boost::beast Boost::asio Boost::thread Boost::unit_test_framework Threads::Threads OpenSSL::Crypto ZLIB::ZLIB ${PLATFORM_SPECIFIC_LIBS} ${CMAKE_DL_LIBS} secp256k1 ${security_framework} ${corefoundation_framework}) From 02011fb527eaa4f960f6415c639718ba3ffd2323 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Wed, 5 Jul 2023 16:27:48 -0400 Subject: [PATCH 010/107] catchup appbase --- libraries/appbase | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/appbase b/libraries/appbase index ae8944308a..0a82417e0a 160000 --- a/libraries/appbase +++ b/libraries/appbase @@ -1 +1 @@ -Subproject commit ae8944308acb526a7ced103685f29b9aafe6741e +Subproject commit 0a82417e0a9ca521190c3f761902dd4267c5576c From a990d74a2965d7a4dae7c1166299c52611f549d9 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Wed, 5 Jul 2023 16:56:35 -0400 Subject: [PATCH 011/107] update chainbase --- libraries/chainbase | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/chainbase b/libraries/chainbase index 00556160c4..3fbcd9c687 160000 --- a/libraries/chainbase +++ b/libraries/chainbase @@ -1 +1 @@ -Subproject commit 00556160c4bafae6f2b19dc359f56bc56e76c11e +Subproject commit 3fbcd9c68758f43f1dd7aacd34f0ce98f6714da5 From f7affe87a465a41e808ecc8e4e67c5261417820a Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Thu, 6 Jul 2023 11:31:27 -0400 Subject: [PATCH 012/107] Update `chainbase` and `appbase` to tip --- libraries/appbase | 2 +- libraries/chainbase | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/libraries/appbase b/libraries/appbase index 0a82417e0a..f078f79ac0 160000 --- a/libraries/appbase +++ b/libraries/appbase @@ -1 +1 @@ -Subproject commit 0a82417e0a9ca521190c3f761902dd4267c5576c +Subproject commit f078f79ac031837a312db4dc54903fe4104cf859 diff --git a/libraries/chainbase b/libraries/chainbase index 3fbcd9c687..148aac7461 160000 --- a/libraries/chainbase +++ b/libraries/chainbase @@ -1 +1 @@ -Subproject commit 3fbcd9c68758f43f1dd7aacd34f0ce98f6714da5 +Subproject commit 148aac7461fffbe8730ba0b55367dde6fdaa0e08 From 0ee195e398bbdac9634fa82e20832e0a27f1bbf2 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Thu, 6 Jul 2023 15:02:40 -0400 Subject: [PATCH 013/107] Add package zlib1g-dev to dockerfiles --- .cicd/platforms/ubuntu20.Dockerfile | 1 + .cicd/platforms/ubuntu22.Dockerfile | 1 + 2 files changed, 2 insertions(+) diff --git a/.cicd/platforms/ubuntu20.Dockerfile b/.cicd/platforms/ubuntu20.Dockerfile index 4296c802b9..24d197e7f0 100644 --- a/.cicd/platforms/ubuntu20.Dockerfile +++ b/.cicd/platforms/ubuntu20.Dockerfile @@ -12,4 +12,5 @@ RUN apt-get update && apt-get upgrade -y && \ llvm-11-dev \ ninja-build \ python3-numpy \ + zlib1g-dev \ zstd diff --git a/.cicd/platforms/ubuntu22.Dockerfile b/.cicd/platforms/ubuntu22.Dockerfile index 52ace75948..440ec5dc0a 100644 --- a/.cicd/platforms/ubuntu22.Dockerfile +++ b/.cicd/platforms/ubuntu22.Dockerfile @@ -12,4 +12,5 @@ RUN apt-get update && apt-get upgrade -y && \ llvm-11-dev \ ninja-build \ python3-numpy \ + zlib1g-dev \ zstd From 967e2178270a4ec6a4fd0f5bc16ffb015d61ab9b Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Fri, 7 Jul 2023 13:29:26 -0400 Subject: [PATCH 014/107] Update appbase to tip --- libraries/appbase | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/appbase b/libraries/appbase index f078f79ac0..02a08a374a 160000 --- a/libraries/appbase +++ b/libraries/appbase @@ -1 +1 @@ -Subproject commit f078f79ac031837a312db4dc54903fe4104cf859 +Subproject commit 02a08a374a6018b9f9d067a7dfc35936d10a4c6d From 5ca2421b758cad9afdf12e9be96a3389b6e45da4 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Fri, 7 Jul 2023 15:51:14 -0400 Subject: [PATCH 015/107] Add `ubuntu-dev-tools` to .cicd Dockerfiles --- .cicd/platforms/ubuntu20.Dockerfile | 1 + .cicd/platforms/ubuntu22.Dockerfile | 1 + 2 files changed, 2 insertions(+) diff --git a/.cicd/platforms/ubuntu20.Dockerfile b/.cicd/platforms/ubuntu20.Dockerfile index 24d197e7f0..c60c53f5bb 100644 --- a/.cicd/platforms/ubuntu20.Dockerfile +++ b/.cicd/platforms/ubuntu20.Dockerfile @@ -12,5 +12,6 @@ RUN apt-get update && apt-get upgrade -y && \ llvm-11-dev \ ninja-build \ python3-numpy \ + ubuntu-dev-tools \ zlib1g-dev \ zstd diff --git a/.cicd/platforms/ubuntu22.Dockerfile b/.cicd/platforms/ubuntu22.Dockerfile index 440ec5dc0a..fd943f7043 100644 --- a/.cicd/platforms/ubuntu22.Dockerfile +++ b/.cicd/platforms/ubuntu22.Dockerfile @@ -12,5 +12,6 @@ RUN apt-get update && apt-get upgrade -y && \ llvm-11-dev \ ninja-build \ python3-numpy \ + ubuntu-dev-tools \ zlib1g-dev \ zstd From 11f71f8273e3d422735adc3d27e5dd37a510fcf4 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Tue, 11 Jul 2023 13:03:58 -0400 Subject: [PATCH 016/107] Update appbase to tip --- libraries/appbase | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/appbase b/libraries/appbase index 02a08a374a..3492ca16e8 160000 --- a/libraries/appbase +++ b/libraries/appbase @@ -1 +1 @@ -Subproject commit 02a08a374a6018b9f9d067a7dfc35936d10a4c6d +Subproject commit 3492ca16e881d39ed20b0d0cdbe59156a699a10f From 244c7d3e52d7472d7a091f6774f41152dcff1438 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Tue, 11 Jul 2023 13:15:04 -0400 Subject: [PATCH 017/107] point boost to AntelopeIO repo --- .gitmodules | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitmodules b/.gitmodules index f6cea8d706..d646d0340c 100644 --- a/.gitmodules +++ b/.gitmodules @@ -33,4 +33,4 @@ url = https://github.com/AntelopeIO/CLI11.git [submodule "libraries/boost"] path = libraries/boost - url = https://github.com/boostorg/boost + url = https://github.com/AntelopeIO/boost From e2550cf9d9c40af5dedd570668a308796ac3a193 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Tue, 11 Jul 2023 13:30:25 -0400 Subject: [PATCH 018/107] Update appbase to tip --- libraries/appbase | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/appbase b/libraries/appbase index 3492ca16e8..6316189788 160000 --- a/libraries/appbase +++ b/libraries/appbase @@ -1 +1 @@ -Subproject commit 3492ca16e881d39ed20b0d0cdbe59156a699a10f +Subproject commit 63161897889248ebf8fb3bfae8cfe0936b373b6b From 8b4a134316d35720308c2763c420fbb052c9b79f Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Tue, 11 Jul 2023 13:36:03 -0400 Subject: [PATCH 019/107] remove old boost submodule --- .gitmodules | 3 --- libraries/boost | 1 - 2 files changed, 4 deletions(-) delete mode 160000 libraries/boost diff --git a/.gitmodules b/.gitmodules index d646d0340c..ab01b3d5c0 100644 --- a/.gitmodules +++ b/.gitmodules @@ -31,6 +31,3 @@ [submodule "libraries/cli11/cli11"] path = libraries/cli11/cli11 url = https://github.com/AntelopeIO/CLI11.git -[submodule "libraries/boost"] - path = libraries/boost - url = https://github.com/AntelopeIO/boost diff --git a/libraries/boost b/libraries/boost deleted file mode 160000 index b6928ae5c9..0000000000 --- a/libraries/boost +++ /dev/null @@ -1 +0,0 @@ -Subproject commit b6928ae5c92e21a04bbe17a558e6e066dbe632f6 From eae52736b9150143e6f6aa86551d9a6f95302985 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Tue, 11 Jul 2023 13:40:06 -0400 Subject: [PATCH 020/107] Add boost submodule from AntelopeIO --- .gitmodules | 3 +++ libraries/boost | 1 + 2 files changed, 4 insertions(+) create mode 160000 libraries/boost diff --git a/.gitmodules b/.gitmodules index ab01b3d5c0..d646d0340c 100644 --- a/.gitmodules +++ b/.gitmodules @@ -31,3 +31,6 @@ [submodule "libraries/cli11/cli11"] path = libraries/cli11/cli11 url = https://github.com/AntelopeIO/CLI11.git +[submodule "libraries/boost"] + path = libraries/boost + url = https://github.com/AntelopeIO/boost diff --git a/libraries/boost b/libraries/boost new file mode 160000 index 0000000000..25fd279875 --- /dev/null +++ b/libraries/boost @@ -0,0 +1 @@ +Subproject commit 25fd27987595c4936fe83e640b625b97ea0bebb9 From 127e0465e56e9795b24120e7539227e63e75ee26 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Tue, 11 Jul 2023 14:38:22 -0400 Subject: [PATCH 021/107] Update boost to tip --- libraries/boost | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/boost b/libraries/boost index 25fd279875..41141acf3a 160000 --- a/libraries/boost +++ b/libraries/boost @@ -1 +1 @@ -Subproject commit 25fd27987595c4936fe83e640b625b97ea0bebb9 +Subproject commit 41141acf3a937c357bf50cacd03269833b35049e From 1ab33ebdd8f507b15d94973bb60dbdc4dfbbe48e Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Tue, 11 Jul 2023 15:06:31 -0400 Subject: [PATCH 022/107] whitespace change --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 71582a1ce4..c036a07f07 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,5 @@ # Leap -Leap is a C++ implementation of the [Antelope](https://github.com/AntelopeIO) protocol. It contains blockchain node software and supporting tools for developers and node operators. +Leap is a C++ implementation of the [Antelope](https://github.com/AntelopeIO) protocol. It contains blockchain node software and supporting tools for developers and node operators. ## Branches The `main` branch is the development branch; do not use it for production. Refer to the [release page](https://github.com/AntelopeIO/leap/releases) for current information on releases, pre-releases, and obsolete releases, as well as the corresponding tags for those releases. From a93484ee9a9cce6a232d1599d97a0362e581d3a5 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Tue, 11 Jul 2023 17:22:12 -0400 Subject: [PATCH 023/107] move boost `add_subdirectory()` to the libraries `CMakeLists.txt` --- CMakeLists.txt | 5 ----- README.md | 2 +- libraries/CMakeLists.txt | 4 ++++ 3 files changed, 5 insertions(+), 6 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 35fc09243e..d983207680 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -101,11 +101,6 @@ else() set(no_whole_archive_flag "--no-whole-archive") endif() -set( Boost_USE_MULTITHREADED ON ) -set( Boost_USE_STATIC_LIBS ON CACHE STRING "ON or OFF" ) - -add_subdirectory( libraries/boost ) - if( APPLE AND UNIX ) # Apple Specific Options Here message( STATUS "Configuring Leap on macOS" ) diff --git a/README.md b/README.md index c036a07f07..71582a1ce4 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,5 @@ # Leap -Leap is a C++ implementation of the [Antelope](https://github.com/AntelopeIO) protocol. It contains blockchain node software and supporting tools for developers and node operators. +Leap is a C++ implementation of the [Antelope](https://github.com/AntelopeIO) protocol. It contains blockchain node software and supporting tools for developers and node operators. ## Branches The `main` branch is the development branch; do not use it for production. Refer to the [release page](https://github.com/AntelopeIO/leap/releases) for current information on releases, pre-releases, and obsolete releases, as well as the corresponding tags for those releases. diff --git a/libraries/CMakeLists.txt b/libraries/CMakeLists.txt index 4b041dd047..462d73801c 100644 --- a/libraries/CMakeLists.txt +++ b/libraries/CMakeLists.txt @@ -5,6 +5,10 @@ set(SOFTFLOAT_INSTALL_COMPONENT "dev") set(EOSVM_INSTALL_COMPONENT "dev") set(BN256_INSTALL_COMPONENT "dev") +set( Boost_USE_MULTITHREADED ON ) +set( Boost_USE_STATIC_LIBS ON CACHE STRING "ON or OFF" ) +add_subdirectory( boost ) + add_subdirectory( libfc ) add_subdirectory( builtins ) add_subdirectory( softfloat ) From 14c34413416c16daeabfdcd93ed708e169e1bec6 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Tue, 11 Jul 2023 18:19:05 -0400 Subject: [PATCH 024/107] Remove outdated `libboost-all-dev` references. --- README.md | 1 - package.cmake | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/README.md b/README.md index 71582a1ce4..9d0dfa8530 100644 --- a/README.md +++ b/README.md @@ -132,7 +132,6 @@ sudo apt-get install -y \ build-essential \ cmake \ git \ - libboost-all-dev \ libcurl4-openssl-dev \ libgmp-dev \ libssl-dev \ diff --git a/package.cmake b/package.cmake index ff3aebbd4b..c782938e54 100644 --- a/package.cmake +++ b/package.cmake @@ -61,7 +61,7 @@ set(CPACK_DEBIAN_BASE_FILE_NAME "${CPACK_DEBIAN_FILE_NAME}.deb") string(REGEX REPLACE "^(${CMAKE_PROJECT_NAME})" "\\1-dev" CPACK_DEBIAN_DEV_FILE_NAME "${CPACK_DEBIAN_BASE_FILE_NAME}") #deb package tooling will be unable to detect deps for the dev package. llvm is tricky since we don't know what package could have been used; try to figure it out -set(CPACK_DEBIAN_DEV_PACKAGE_DEPENDS "libboost-all-dev, libssl-dev, libgmp-dev, python3-numpy") +set(CPACK_DEBIAN_DEV_PACKAGE_DEPENDS "libssl-dev, libgmp-dev, python3-numpy") find_program(DPKG_QUERY "dpkg-query") if(DPKG_QUERY AND OS_RELEASE MATCHES "\n?ID=\"?ubuntu" AND LLVM_CMAKE_DIR) execute_process(COMMAND "${DPKG_QUERY}" -S "${LLVM_CMAKE_DIR}" COMMAND cut -d: -f1 RESULT_VARIABLE LLVM_PKG_FIND_RESULT OUTPUT_VARIABLE LLVM_PKG_FIND_OUTPUT) From 3a7c072015d9fd848ea2c9d95c9897acd40ac9bd Mon Sep 17 00:00:00 2001 From: Jonathan Giszczak Date: Sat, 15 Jul 2023 18:43:37 -0500 Subject: [PATCH 025/107] Listen on multiple addresses for net_plugin p2p. --- plugins/net_plugin/net_plugin.cpp | 118 ++++++++++++++++++++---------- 1 file changed, 79 insertions(+), 39 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 63242aa1b6..1222eb23b0 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -42,6 +42,21 @@ using namespace eosio::chain::plugin_interface; +namespace boost +{ + /// @brief Overload for boost::lexical_cast to convert vector of strings to string + /// + /// Used by boost::program_options to print the default value of an std::vector option + /// + /// @param v the vector to convert + /// @return the contents of the vector as a comma-separated string + template<> + inline std::string lexical_cast(const std::vector& v) + { + return boost::join(v, ","); + } +} + namespace eosio { static auto _net_plugin = application::register_plugin(); @@ -395,8 +410,8 @@ namespace eosio { * Thread safe, only updated in plugin initialize * @{ */ - string p2p_address; - string p2p_server_address; + vector p2p_addresses; + vector p2p_server_addresses; vector allowed_peers; ///< peer keys allowed to connect std::mapthread_pool.get_executor() ), socket( new tcp::socket( std::move(s) ) ), + p2p_address( address), connection_id( ++my_impl->current_connection_id ), response_expected_timer( my_impl->thread_pool.get_executor() ), last_handshake_recv(), @@ -2655,7 +2675,7 @@ namespace eosio { } - void net_plugin_impl::create_session(tcp::socket&& socket) { + void net_plugin_impl::create_session(tcp::socket&& socket, const string& p2p_address) { uint32_t visitors = 0; uint32_t from_addr = 0; boost::system::error_code rec; @@ -2681,7 +2701,7 @@ namespace eosio { visitors < connections.get_max_client_count())) { fc_ilog(logger, "Accepted new connection: " + paddr_str); - connection_ptr new_connection = std::make_shared(std::move(socket)); + connection_ptr new_connection = std::make_shared(std::move(socket), p2p_address); new_connection->strand.post([new_connection, this]() { if (new_connection->start_session()) { connections.add(new_connection); @@ -3144,9 +3164,9 @@ namespace eosio { if (msg.time + c_time <= check_time) return false; } else if (net_version < proto_dup_node_id_goaway || msg.network_version < proto_dup_node_id_goaway) { - if (my_impl->p2p_address < msg.p2p_address) { - fc_dlog( logger, "my_impl->p2p_address '${lhs}' < msg.p2p_address '${rhs}'", - ("lhs", my_impl->p2p_address)( "rhs", msg.p2p_address ) ); + if (p2p_address < msg.p2p_address) { + fc_dlog( logger, "p2p_address '${lhs}' < msg.p2p_address '${rhs}'", + ("lhs", p2p_address)( "rhs", msg.p2p_address ) ); // only the connection from lower p2p_address to higher p2p_address will be considered as a duplicate, // so there is no chance for both connections to be closed return false; @@ -3828,7 +3848,7 @@ namespace eosio { // If we couldn't sign, don't send a token. if(hello.sig == chain::signature_type()) hello.token = sha256(); - hello.p2p_address = my_impl->p2p_address; + hello.p2p_address = p2p_address; if( is_transactions_only_connection() ) hello.p2p_address += ":trx"; // if we are not accepting transactions tell peer we are blocks only if( is_blocks_only_connection() || !my_impl->p2p_accept_transactions ) hello.p2p_address += ":blk"; @@ -3860,8 +3880,8 @@ namespace eosio { void net_plugin::set_program_options( options_description& /*cli*/, options_description& cfg ) { cfg.add_options() - ( "p2p-listen-endpoint", bpo::value()->default_value( "0.0.0.0:9876" ), "The actual host:port used to listen for incoming p2p connections.") - ( "p2p-server-address", bpo::value(), "An externally accessible host:port for identifying this node. Defaults to p2p-listen-endpoint.") + ( "p2p-listen-endpoint", bpo::value< vector >()->default_value( vector(1, string("0.0.0.0:9876")) ), "The actual host:port used to listen for incoming p2p connections. May be specified multiple times.") + ( "p2p-server-address", bpo::value< vector >(), "An externally accessible host:port for identifying this node. Defaults to p2p-listen-endpoint. May be specified as many times as p2p-listen-endpoint") ( "p2p-peer-address", bpo::value< vector >()->composing(), "The public endpoint of a peer node to connect to. Use multiple p2p-peer-address options as needed to compose a network.\n" " Syntax: host:port[:|]\n" @@ -3882,7 +3902,7 @@ namespace eosio { ( "agent-name", bpo::value()->default_value("EOS Test Agent"), "The name supplied to identify this node amongst the peers.") ( "allowed-connection", bpo::value>()->multitoken()->default_value({"any"}, "any"), "Can be 'any' or 'producers' or 'specified' or 'none'. If 'specified', peer-key must be specified at least once. If only 'producers', peer-key is not required. 'producers' and 'specified' may be combined.") ( "peer-key", bpo::value>()->composing()->multitoken(), "Optional public key of peer allowed to connect. May be used multiple times.") - ( "peer-private-key", boost::program_options::value>()->composing()->multitoken(), + ( "peer-private-key", bpo::value>()->composing()->multitoken(), "Tuple of [PublicKey, WIF private key] (may specify multiple times)") ( "max-clients", bpo::value()->default_value(def_max_clients), "Maximum number of clients from which connections are accepted, use 0 for no limit") ( "connection-cleanup-period", bpo::value()->default_value(def_conn_retry_wait), "number of seconds to wait before cleaning up dead connections") @@ -3942,16 +3962,31 @@ namespace eosio { std::chrono::seconds( options.at("connection-cleanup-period").as() ), options.at("max-clients").as() ); - if( options.count( "p2p-listen-endpoint" ) && options.at("p2p-listen-endpoint").as().length()) { - p2p_address = options.at( "p2p-listen-endpoint" ).as(); - EOS_ASSERT( p2p_address.length() <= max_p2p_address_length, chain::plugin_config_exception, - "p2p-listen-endpoint too long, must be less than ${m}", ("m", max_p2p_address_length) ); + if( options.count( "p2p-listen-endpoint" ) && !options.at("p2p-listen-endpoint").as>().empty()) { + p2p_addresses = options.at( "p2p-listen-endpoint" ).as>(); + auto addr_count = p2p_addresses.size(); + std::sort(p2p_addresses.begin(), p2p_addresses.end()); + std::unique(p2p_addresses.begin(), p2p_addresses.end()); + if( size_t addr_diff = addr_count - p2p_addresses.size(); addr_diff != 0) { + fc_ilog( logger, "Removed ${count} duplicate p2p-listen-endpoint entries", ("count", addr_diff)); + } + for(auto& addr : p2p_addresses) { + EOS_ASSERT( addr.length() <= max_p2p_address_length, chain::plugin_config_exception, + "p2p-listen-endpoint ${a} too long, must be less than ${m}", + ("a", addr)("m", max_p2p_address_length) ); + } } if( options.count( "p2p-server-address" ) ) { - p2p_server_address = options.at( "p2p-server-address" ).as(); - EOS_ASSERT( p2p_server_address.length() <= max_p2p_address_length, chain::plugin_config_exception, - "p2p_server_address too long, must be less than ${m}", ("m", max_p2p_address_length) ); + p2p_server_addresses = options.at( "p2p-server-address" ).as>(); + EOS_ASSERT( p2p_server_addresses.size() <= p2p_addresses.size(), chain::plugin_config_exception, + "p2p-server-address may not be specified more times than p2p-listen-endpoint" ); + for( auto& addr: p2p_server_addresses ) { + EOS_ASSERT( addr.length() <= max_p2p_address_length, chain::plugin_config_exception, + "p2p-server-address ${a} too long, must be less than ${m}", + ("a", addr)("m", max_p2p_address_length) ); + } } + p2p_server_addresses.resize(p2p_addresses.size()); // extend with empty entries as needed thread_pool_size = options.at( "net-threads" ).as(); EOS_ASSERT( thread_pool_size > 0, chain::plugin_config_exception, @@ -4044,7 +4079,7 @@ namespace eosio { dispatcher = std::make_unique( my_impl->thread_pool.get_executor() ); - if( !p2p_accept_transactions && p2p_address.size() ) { + if( !p2p_accept_transactions && p2p_addresses.size() ) { fc_ilog( logger, "\n" "***********************************\n" "* p2p-accept-transactions = false *\n" @@ -4052,13 +4087,14 @@ namespace eosio { "***********************************\n" ); } - std::string listen_address = p2p_address; + std::vector listen_addresses = p2p_addresses; - if( !p2p_address.empty() ) { - auto [host, port] = fc::split_host_port(listen_address); + std::transform(p2p_addresses.begin(), p2p_addresses.end(), p2p_server_addresses.begin(), + p2p_addresses.begin(), [](const string& p2p_address, const string& p2p_server_address) { + auto [host, port] = fc::split_host_port(p2p_address); if( !p2p_server_address.empty() ) { - p2p_address = p2p_server_address; + return p2p_server_address; } else if( host.empty() || host == "0.0.0.0" || host == "[::]") { boost::system::error_code ec; auto hostname = host_name( ec ); @@ -4068,9 +4104,10 @@ namespace eosio { "Unable to retrieve host_name. ${msg}", ("msg", ec.message())); } - p2p_address = hostname + ":" + port; + return hostname + ":" + port; } - } + return p2p_address; + }); { chain::controller& cc = chain_plug->chain(); @@ -4094,8 +4131,10 @@ namespace eosio { incoming_transaction_ack_subscription = app().get_channel().subscribe( [this](auto&& t) { transaction_ack(std::forward(t)); }); - app().executor().post(priority::highest, [my=shared_from_this(), address = std::move(listen_address)](){ - if (address.size()) { + for(auto listen_itr = listen_addresses.begin(), p2p_iter = p2p_addresses.begin(); + listen_itr != listen_addresses.end(); + ++listen_itr, ++p2p_iter) { + app().executor().post(priority::highest, [my=shared_from_this(), address = std::move(*listen_itr), p2p_addr = *p2p_iter](){ try { const boost::posix_time::milliseconds accept_timeout(100); @@ -4104,20 +4143,21 @@ namespace eosio { fc::create_listener( my->thread_pool.get_executor(), logger, accept_timeout, address, extra_listening_log_info, - [my = my](tcp::socket&& socket) { my->create_session(std::move(socket)); }); + [my = my, addr = p2p_addr](tcp::socket&& socket) { my->create_session(std::move(socket), addr); }); } catch (const std::exception& e) { fc_elog( logger, "net_plugin::plugin_startup failed to listen on ${addr}, ${what}", ("addr", address)("what", e.what()) ); app().quit(); return; } - } - - my->ticker(); - my->start_monitors(); - my->update_chain_info(); - my->connections.connect_supplied_peers(); - }); + }); + app().executor().post(priority::highest, [my=shared_from_this()](){ + my->ticker(); + my->start_monitors(); + my->update_chain_info(); + my->connections.connect_supplied_peers(); + }); + } } void net_plugin::plugin_initialize( const variables_map& options ) { From d5fdd64a0d0d0b24276bbdae3696b8cc70e4a4ea Mon Sep 17 00:00:00 2001 From: Jonathan Giszczak Date: Mon, 17 Jul 2023 11:10:51 -0500 Subject: [PATCH 026/107] Move post of timers outside listener post loop. --- plugins/net_plugin/net_plugin.cpp | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 1222eb23b0..0f08cc947a 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -4151,13 +4151,13 @@ namespace eosio { return; } }); - app().executor().post(priority::highest, [my=shared_from_this()](){ - my->ticker(); - my->start_monitors(); - my->update_chain_info(); - my->connections.connect_supplied_peers(); - }); } + app().executor().post(priority::highest, [my=shared_from_this()](){ + my->ticker(); + my->start_monitors(); + my->update_chain_info(); + my->connections.connect_supplied_peers(); + }); } void net_plugin::plugin_initialize( const variables_map& options ) { From 1c81166570655840918aea5e8167a1dd3f7935a0 Mon Sep 17 00:00:00 2001 From: Jonathan Giszczak Date: Mon, 17 Jul 2023 19:05:47 -0500 Subject: [PATCH 027/107] Restore running nodeos with zero p2p listen ports. --- plugins/net_plugin/net_plugin.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 0f08cc947a..f8a4fb0596 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -3962,7 +3962,7 @@ namespace eosio { std::chrono::seconds( options.at("connection-cleanup-period").as() ), options.at("max-clients").as() ); - if( options.count( "p2p-listen-endpoint" ) && !options.at("p2p-listen-endpoint").as>().empty()) { + if( options.count( "p2p-listen-endpoint" ) && !options.at("p2p-listen-endpoint").as>().empty() && options.at("p2p-listen-endpoint").as>()[0].length()) { p2p_addresses = options.at( "p2p-listen-endpoint" ).as>(); auto addr_count = p2p_addresses.size(); std::sort(p2p_addresses.begin(), p2p_addresses.end()); From ffcd97e6cf1aed6698e522fe1a359951249f466a Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Tue, 18 Jul 2023 11:16:29 -0400 Subject: [PATCH 028/107] Add boost to install --- CMakeLists.txt | 7 ++ scripts/MakeBoostDistro.py | 212 +++++++++++++++++++++++++++++++++++++ 2 files changed, 219 insertions(+) create mode 100755 scripts/MakeBoostDistro.py diff --git a/CMakeLists.txt b/CMakeLists.txt index d983207680..c66ee52cdf 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -272,6 +272,13 @@ configure_file(${CMAKE_SOURCE_DIR}/libraries/cli11/bash-completion/completions/c install(FILES libraries/cli11/bash-completion/completions/leap-util DESTINATION ${CMAKE_INSTALL_FULL_DATAROOTDIR}/bash-completion/completions COMPONENT base) install(FILES libraries/cli11/bash-completion/completions/cleos DESTINATION ${CMAKE_INSTALL_FULL_DATAROOTDIR}/bash-completion/completions COMPONENT base) +add_custom_target(boost_install ALL) +add_custom_command(TARGET boost_install + COMMAND ${CMAKE_SOURCE_DIR}/scripts/MakeBoostDistro.py ${CMAKE_SOURCE_DIR}/libraries/boost ${CMAKE_BINARY_DIR}/boost-dist + VERBATIM) + +install(DIRECTORY "${CMAKE_BINARY_DIR}/boost-dist/boost" DESTINATION include COMPONENT dev EXCLUDE_FROM_ALL) + add_custom_target(dev-install COMMAND "${CMAKE_COMMAND}" --build "${CMAKE_BINARY_DIR}" COMMAND "${CMAKE_COMMAND}" --install "${CMAKE_BINARY_DIR}" diff --git a/scripts/MakeBoostDistro.py b/scripts/MakeBoostDistro.py new file mode 100755 index 0000000000..e7e8f02fd9 --- /dev/null +++ b/scripts/MakeBoostDistro.py @@ -0,0 +1,212 @@ +#!/usr/bin/python3 +# + +# Prepare a boost checkout for release +# 1) Copy all the files at the root level to the dest folder ($DEST) +# 2) Copy all the "special" folders to the dest folder ($DEST) +# 3) copy all the files from $SOURCE/libs to $DEST/libs +# 4a) For each subproject, copy everything except "include" into $DEST/libs +# 4b) For each subproject, copy the contents of the "includes" folder into $DEST/boost +# +# Usage: %0 source dest + +from __future__ import print_function + +import os, sys +import shutil +import stat +import six +import datetime + +IgnoreFiles = shutil.ignore_patterns( + '[.]*', + '[.]gitattributes', + '[.]gitignore', + '[.]gitmodules', + '[.]travis[.]yml', + 'appveyor[.]yml', + 'circle[.]yml') + +def IgnoreFile(src, name): + return len(IgnoreFiles(src, [name])) > 0 + +## from +def MergeTree(src, dst, symlinks = False): + if not os.path.exists(dst): + os.makedirs(dst) + shutil.copystat(src, dst) + lst = os.listdir(src) + excl = IgnoreFiles(src, lst) + lst = [x for x in lst if x not in excl] + for item in lst: + s = os.path.join(src, item) + d = os.path.join(dst, item) + if symlinks and os.path.islink(s): + if os.path.lexists(d): + os.remove(d) + os.symlink(os.readlink(s), d) + try: + st = os.lstat(s) + mode = stat.S_IMODE(st.st_mode) + os.lchmod(d, mode) + except: + pass # lchmod not available + elif os.path.isdir(s): + MergeTree(s, d, symlinks) + else: + if os.path.exists(d): + print("## Overwriting file %s with %s" % (d, s)) + shutil.copy2(s, d) + + +def CopyFile (s, d, f): + if os.path.isfile(os.path.join(s,f)) and not IgnoreFile(s, f): + shutil.copy2(os.path.join(s,f), os.path.join(d,f)) + +def CopyDir (s, d, dd): + if os.path.isdir(os.path.join(s,dd)) and not IgnoreFile(s, dd): + shutil.copytree(os.path.join(s,dd), os.path.join(d,dd), symlinks=False, ignore=IgnoreFiles) + +def MergeIf(s, d, dd): +# if dd == 'detail': +# print "MergeIf %s -> %s" % (os.path.join(s, dd), os.path.join(d, dd)) + if os.path.exists(os.path.join(s, dd)): + MergeTree(os.path.join(s, dd), os.path.join(d, dd), symlinks=False) + +def CopyInclude(src, dst): + for item in os.listdir(src): + if IgnoreFile(src, item): + continue + if item == 'pending': + continue + if item == 'detail': + continue + s = os.path.join(src, item) + d = os.path.join(dst, item) + if os.path.isdir(s): + MergeTree(s, d, symlinks=False) + else: + if os.path.exists(d): + print("## Overwriting file %s with %s" % (d, s)) + CopyFile(src, dst, item) + + +def CopySubProject(src, dst, headers, p): + # First, everything except the "include" directory + Source = os.path.join(src,p) + Dest = os.path.join(dst,p) + # print "CopySubProject %p" % p + os.makedirs(Dest) + for item in os.listdir(Source): + if os.path.isfile(os.path.join(Source, item)): + CopyFile(Source, Dest, item) + elif item != "include": + CopyDir(Source, Dest, item) + + #shutil.copytree(Source, Dest, symlinks=False, ignore=shutil.ignore_patterns('\.*', "include")) + + # Now the includes + Source = os.path.join(src, "%s/include/boost" % p) + if os.path.exists(Source): + CopyInclude(Source, headers) +# MergeTree(Source, Dest, symlinks=False, ignore=shutil.ignore_patterns('\.*', 'detail', 'pending')) + MergeIf(Source, headers, 'detail') + MergeIf(Source, headers, 'pending') + + +def CopyNestedProject(src, dst, headers, p): + # First, everything except the "include" directory + Source = os.path.join(src,p[1]) + Dest = os.path.join(dst,p[1]) + os.makedirs(Dest) + for item in os.listdir(Source): + if os.path.isfile(os.path.join(Source, item)): + CopyFile(Source, Dest, item) + elif item != "include": + CopyDir(Source, Dest, item) + # shutil.copytree(Source, Dest, symlinks=False, ignore=shutil.ignore_patterns('\.*', "include")) + + Source = os.path.join(src, "%s/include/boost" % (p[1])) + # Dest = os.path.join(headers, p) + # print "Installing headers from %s to %s" % (Source, headers) + CopyInclude(Source, headers) + # # MergeTree(Source, Dest, symlinks=False, ignore=shutil.ignore_patterns('\.*', 'detail', 'pending')) + # MergeIf(Source, headers, 'detail') + # MergeIf(Source, headers, 'pending') + +BoostHeaders = "boost" +BoostLibs = "libs" + +BoostSpecialFolders = [ "doc", "more", "status", "tools" ] + +SourceRoot = sys.argv[1] +DestRoot = sys.argv[2] + +print("Source = %s" % SourceRoot) +print("Dest = %s" % DestRoot) + +if not os.path.exists(SourceRoot): + print("## Error: %s does not exist" % SourceRoot) + exit(1) + +if os.path.exists(DestRoot): + print("The destination directory already exists. All good.\n") + exit(0) + #timestamp1 = datetime.datetime.now().strftime("%Y_%m_%d_%H_%M_%S") + #os.rename(DestRoot,DestRoot + "_bck_" + timestamp1) + +if not os.path.exists(DestRoot): + print("Creating destination directory %s" % DestRoot) + os.makedirs(DestRoot) + +DestHeaders = os.path.join(DestRoot, BoostHeaders) +DestLibs = os.path.join(DestRoot, BoostLibs) +os.makedirs(DestHeaders) +os.makedirs(DestLibs) + +## Step 1 +for f in os.listdir(SourceRoot): + if f != 'CMakeLists.txt': + CopyFile(SourceRoot, DestRoot, f) + +## Step 2 +for d in BoostSpecialFolders: + CopyDir(SourceRoot, DestRoot, d) + +## Step 3 +SourceLibs = os.path.join(SourceRoot, BoostLibs) +for f in os.listdir(SourceLibs): + CopyFile(SourceLibs, DestLibs, f) + +## Step 4 +BoostSubProjects = set() +for f in os.listdir(SourceLibs): + if os.path.isdir(os.path.join(SourceLibs,f)): + if os.path.isfile(os.path.join(SourceLibs,f,"meta","libraries.json")): + BoostSubProjects.add(f) + elif os.path.isdir(os.path.join(SourceLibs,f,"include")): + BoostSubProjects.add(f) + elif f == 'headers': + BoostSubProjects.add(f) + elif os.path.isfile(os.path.join(SourceLibs,f,"sublibs")): + for s in os.listdir(os.path.join(SourceLibs,f)): + if os.path.isdir(os.path.join(SourceLibs,f,s)): + if os.path.isfile(os.path.join(SourceLibs,f,s,"meta","libraries.json")): + BoostSubProjects.add((f,s)) + elif os.path.isdir(os.path.join(SourceLibs,f,s,"include")): + BoostSubProjects.add((f,s)) + +for p in BoostSubProjects: + if isinstance(p, six.string_types): + CopySubProject(SourceLibs, DestLibs, DestHeaders, p) + else: + NestedSource = os.path.join(SourceRoot,"libs",p[0]) + NestedDest = os.path.join(DestRoot,"libs",p[0]) + NestedHeaders = os.path.join(DestRoot,"boost") + if not os.path.exists(NestedDest): + os.makedirs(NestedDest) + if not os.path.exists(NestedHeaders): + os.makedirs(NestedHeaders) + for f in os.listdir(NestedSource): + CopyFile(NestedSource, NestedDest, f) + CopyNestedProject(NestedSource, NestedDest, NestedHeaders, p) From ea7131725813f2f3e2dc0d39f1608f67fe630c02 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Tue, 18 Jul 2023 11:30:23 -0400 Subject: [PATCH 029/107] Update appbase --- libraries/appbase | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/appbase b/libraries/appbase index 6316189788..fe1b3a6cd9 160000 --- a/libraries/appbase +++ b/libraries/appbase @@ -1 +1 @@ -Subproject commit 63161897889248ebf8fb3bfae8cfe0936b373b6b +Subproject commit fe1b3a6cd9b6f7529d6fb4beac0e880d136308a8 From 53228de8d2df05bd273bdf4fdf22ea8f653d8485 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Tue, 18 Jul 2023 16:54:41 -0400 Subject: [PATCH 030/107] Add `python3` to command. --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index c66ee52cdf..9b729ab331 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -274,7 +274,7 @@ install(FILES libraries/cli11/bash-completion/completions/cleos DESTINATION ${CM add_custom_target(boost_install ALL) add_custom_command(TARGET boost_install - COMMAND ${CMAKE_SOURCE_DIR}/scripts/MakeBoostDistro.py ${CMAKE_SOURCE_DIR}/libraries/boost ${CMAKE_BINARY_DIR}/boost-dist + COMMAND python3 ${CMAKE_SOURCE_DIR}/scripts/MakeBoostDistro.py ${CMAKE_SOURCE_DIR}/libraries/boost ${CMAKE_BINARY_DIR}/boost-dist VERBATIM) install(DIRECTORY "${CMAKE_BINARY_DIR}/boost-dist/boost" DESTINATION include COMPONENT dev EXCLUDE_FROM_ALL) From 6d5b70b6aabf76961e4a730bd2b0bb18de326cfa Mon Sep 17 00:00:00 2001 From: Lin Huang Date: Tue, 18 Jul 2023 17:04:05 -0400 Subject: [PATCH 031/107] update eos-vm version to make wasm globals thread safe --- libraries/eos-vm | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/eos-vm b/libraries/eos-vm index 1e9345f96a..73a88d245a 160000 --- a/libraries/eos-vm +++ b/libraries/eos-vm @@ -1 +1 @@ -Subproject commit 1e9345f96a4dcefa3a16ff51b58e2e7df739eeff +Subproject commit 73a88d245a594f5a85510ed1dee81e2ac7f535aa From 6ce21266aa9e9ef394c8cf81742616adc9a6d960 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Tue, 18 Jul 2023 17:54:22 -0400 Subject: [PATCH 032/107] whitespace change --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 9b729ab331..0a34b59c18 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -277,7 +277,7 @@ add_custom_command(TARGET boost_install COMMAND python3 ${CMAKE_SOURCE_DIR}/scripts/MakeBoostDistro.py ${CMAKE_SOURCE_DIR}/libraries/boost ${CMAKE_BINARY_DIR}/boost-dist VERBATIM) -install(DIRECTORY "${CMAKE_BINARY_DIR}/boost-dist/boost" DESTINATION include COMPONENT dev EXCLUDE_FROM_ALL) +install(DIRECTORY "${CMAKE_BINARY_DIR}/boost-dist/boost" DESTINATION include COMPONENT dev EXCLUDE_FROM_ALL) add_custom_target(dev-install COMMAND "${CMAKE_COMMAND}" --build "${CMAKE_BINARY_DIR}" From 3204110e5ad11ee0e76078a5cda416a8925dc409 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Tue, 18 Jul 2023 18:00:33 -0400 Subject: [PATCH 033/107] Add `python3-all` to `.cicd/platforms/ubuntu22.Dockerfile` --- .cicd/platforms/ubuntu22.Dockerfile | 1 + CMakeLists.txt | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.cicd/platforms/ubuntu22.Dockerfile b/.cicd/platforms/ubuntu22.Dockerfile index fd943f7043..0ef6f4c86d 100644 --- a/.cicd/platforms/ubuntu22.Dockerfile +++ b/.cicd/platforms/ubuntu22.Dockerfile @@ -11,6 +11,7 @@ RUN apt-get update && apt-get upgrade -y && \ libssl-dev \ llvm-11-dev \ ninja-build \ + python3-all \ python3-numpy \ ubuntu-dev-tools \ zlib1g-dev \ diff --git a/CMakeLists.txt b/CMakeLists.txt index 0a34b59c18..9b729ab331 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -277,7 +277,7 @@ add_custom_command(TARGET boost_install COMMAND python3 ${CMAKE_SOURCE_DIR}/scripts/MakeBoostDistro.py ${CMAKE_SOURCE_DIR}/libraries/boost ${CMAKE_BINARY_DIR}/boost-dist VERBATIM) -install(DIRECTORY "${CMAKE_BINARY_DIR}/boost-dist/boost" DESTINATION include COMPONENT dev EXCLUDE_FROM_ALL) +install(DIRECTORY "${CMAKE_BINARY_DIR}/boost-dist/boost" DESTINATION include COMPONENT dev EXCLUDE_FROM_ALL) add_custom_target(dev-install COMMAND "${CMAKE_COMMAND}" --build "${CMAKE_BINARY_DIR}" From 158bd75cd03908f8396f9863cf5d6ab606b44eb9 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Tue, 18 Jul 2023 18:02:35 -0400 Subject: [PATCH 034/107] Add `python3-distutils` to `.cicd/platforms/ubuntu22.Dockerfile` --- .cicd/platforms/ubuntu22.Dockerfile | 1 + 1 file changed, 1 insertion(+) diff --git a/.cicd/platforms/ubuntu22.Dockerfile b/.cicd/platforms/ubuntu22.Dockerfile index 0ef6f4c86d..fa3da263dd 100644 --- a/.cicd/platforms/ubuntu22.Dockerfile +++ b/.cicd/platforms/ubuntu22.Dockerfile @@ -12,6 +12,7 @@ RUN apt-get update && apt-get upgrade -y && \ llvm-11-dev \ ninja-build \ python3-all \ + python3-distutils \ python3-numpy \ ubuntu-dev-tools \ zlib1g-dev \ From 28a47da37001003fe5cb76e57aca1a5a731a7c0f Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Tue, 18 Jul 2023 18:46:42 -0400 Subject: [PATCH 035/107] Add `python3-all` to `.cicd/platforms/ubuntu20.Dockerfile` and remove `python3-distutils` from `.cicd/platforms/ubuntu22.Dockerfile` --- .cicd/platforms/ubuntu20.Dockerfile | 1 + .cicd/platforms/ubuntu22.Dockerfile | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/.cicd/platforms/ubuntu20.Dockerfile b/.cicd/platforms/ubuntu20.Dockerfile index c60c53f5bb..3924f7ffe5 100644 --- a/.cicd/platforms/ubuntu20.Dockerfile +++ b/.cicd/platforms/ubuntu20.Dockerfile @@ -11,6 +11,7 @@ RUN apt-get update && apt-get upgrade -y && \ libssl-dev \ llvm-11-dev \ ninja-build \ + python3-all \ python3-numpy \ ubuntu-dev-tools \ zlib1g-dev \ diff --git a/.cicd/platforms/ubuntu22.Dockerfile b/.cicd/platforms/ubuntu22.Dockerfile index fa3da263dd..0ef6f4c86d 100644 --- a/.cicd/platforms/ubuntu22.Dockerfile +++ b/.cicd/platforms/ubuntu22.Dockerfile @@ -12,7 +12,6 @@ RUN apt-get update && apt-get upgrade -y && \ llvm-11-dev \ ninja-build \ python3-all \ - python3-distutils \ python3-numpy \ ubuntu-dev-tools \ zlib1g-dev \ From fe1faf8672b8ace12b86c18f98caffc4d6431b6c Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Wed, 19 Jul 2023 13:28:35 -0400 Subject: [PATCH 036/107] Update `CMakeModules/EosioTesterBuild.cmake.in` to support installed boost --- CMakeLists.txt | 13 ++++++++++++ CMakeModules/EosioTesterBuild.cmake.in | 28 +++++++++++++++++++++++--- libraries/CMakeLists.txt | 2 +- 3 files changed, 39 insertions(+), 4 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 9b729ab331..7a87a9265e 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -279,6 +279,19 @@ add_custom_command(TARGET boost_install install(DIRECTORY "${CMAKE_BINARY_DIR}/boost-dist/boost" DESTINATION include COMPONENT dev EXCLUDE_FROM_ALL) +install(TARGETS boost_date_time COMPONENT dev EXCLUDE_FROM_ALL) +install(TARGETS boost_filesystem COMPONENT dev EXCLUDE_FROM_ALL) +install(TARGETS boost_system COMPONENT dev EXCLUDE_FROM_ALL) +install(TARGETS boost_chrono COMPONENT dev EXCLUDE_FROM_ALL) +install(TARGETS boost_multi_index COMPONENT dev EXCLUDE_FROM_ALL) +install(TARGETS boost_multiprecision COMPONENT dev EXCLUDE_FROM_ALL) +install(TARGETS boost_interprocess COMPONENT dev EXCLUDE_FROM_ALL) +install(TARGETS boost_asio COMPONENT dev EXCLUDE_FROM_ALL) +install(TARGETS boost_signals2 COMPONENT dev EXCLUDE_FROM_ALL) +install(TARGETS boost_iostreams COMPONENT dev EXCLUDE_FROM_ALL) +install(TARGETS boost_unit_test_framework COMPONENT dev EXCLUDE_FROM_ALL) +install(TARGETS boost_headers COMPONENT dev EXCLUDE_FROM_ALL) + add_custom_target(dev-install COMMAND "${CMAKE_COMMAND}" --build "${CMAKE_BINARY_DIR}" COMMAND "${CMAKE_COMMAND}" --install "${CMAKE_BINARY_DIR}" diff --git a/CMakeModules/EosioTesterBuild.cmake.in b/CMakeModules/EosioTesterBuild.cmake.in index 331099f06a..dbb2cc85b5 100644 --- a/CMakeModules/EosioTesterBuild.cmake.in +++ b/CMakeModules/EosioTesterBuild.cmake.in @@ -33,7 +33,17 @@ else ( APPLE ) endif ( APPLE ) set( Boost_USE_STATIC_LIBS ON CACHE STRING "ON or OFF" ) -add_subdirectory( @CMAKE_SOURCE_DIR@/libraries/boost ${PROJECT_BINARY_DIR}/libraries/boost) +IF(EXISTS @CMAKE_SOURCE_DIR@/libraries/boost) + add_subdirectory( @CMAKE_SOURCE_DIR@/libraries/boost ${PROJECT_BINARY_DIR}/libraries/boost EXCLUDE_FROM_ALL) +else() + find_package(Boost @Boost_MAJOR_VERSION@.@Boost_MINOR_VERSION@ EXACT REQUIRED COMPONENTS + date_time + filesystem + system + chrono + iostreams + unit_test_framework) +endif() find_library(libtester eosio_testing @CMAKE_BINARY_DIR@/libraries/testing NO_DEFAULT_PATH) find_library(libchain eosio_chain @CMAKE_BINARY_DIR@/libraries/chain NO_DEFAULT_PATH) @@ -77,8 +87,10 @@ macro(add_eosio_test_executable test_name) ${libbuiltins} ${libsecp256k1} ${libbn256} - @GMP_LIBRARY@ + @GMP_LIBRARY@ ) +IF(EXISTS @CMAKE_SOURCE_DIR@/libraries/boost) + target_link_libraries( ${test_name} Boost::date_time Boost::filesystem Boost::system @@ -90,8 +102,18 @@ macro(add_eosio_test_executable test_name) Boost::signals2 Boost::iostreams "-lz" # Needed by Boost iostreams - Boost::unit_test_framework + Boost::unit_test_framework) +else() + target_link_libraries( ${test_name} + ${Boost_FILESYSTEM_LIBRARY} + ${Boost_SYSTEM_LIBRARY} + ${Boost_CHRONO_LIBRARY} + ${Boost_IOSTREAMS_LIBRARY} + "-lz" # Needed by Boost iostreams + ${Boost_DATE_TIME_LIBRARY}) +endif() + target_link_libraries( ${test_name} ${LLVM_LIBS} ${PLATFORM_SPECIFIC_LIBS} diff --git a/libraries/CMakeLists.txt b/libraries/CMakeLists.txt index 462d73801c..e7ad9b144e 100644 --- a/libraries/CMakeLists.txt +++ b/libraries/CMakeLists.txt @@ -7,7 +7,7 @@ set(BN256_INSTALL_COMPONENT "dev") set( Boost_USE_MULTITHREADED ON ) set( Boost_USE_STATIC_LIBS ON CACHE STRING "ON or OFF" ) -add_subdirectory( boost ) +add_subdirectory( boost EXCLUDE_FROM_ALL ) add_subdirectory( libfc ) add_subdirectory( builtins ) From db93c63a57cdcfd1e8f37de9e69528bbbbe0101d Mon Sep 17 00:00:00 2001 From: Jonathan Giszczak Date: Wed, 19 Jul 2023 12:39:08 -0500 Subject: [PATCH 037/107] Set p2p_address for outgoing connections as well. Outbound connections are always attributed to the first listen address. --- .../eosio/net_plugin/auto_bp_peering.hpp | 2 +- plugins/net_plugin/net_plugin.cpp | 46 ++++++++++--------- .../tests/auto_bp_peering_unittest.cpp | 9 ++-- tests/CMakeLists.txt | 2 +- 4 files changed, 31 insertions(+), 28 deletions(-) diff --git a/plugins/net_plugin/include/eosio/net_plugin/auto_bp_peering.hpp b/plugins/net_plugin/include/eosio/net_plugin/auto_bp_peering.hpp index a394312669..b5122f80aa 100644 --- a/plugins/net_plugin/include/eosio/net_plugin/auto_bp_peering.hpp +++ b/plugins/net_plugin/include/eosio/net_plugin/auto_bp_peering.hpp @@ -182,7 +182,7 @@ class bp_connection_manager { fc_dlog(self()->get_logger(), "pending_downstream_neighbors: ${pending_downstream_neighbors}", ("pending_downstream_neighbors", to_string(pending_downstream_neighbors))); - for (auto neighbor : pending_downstream_neighbors) { self()->connections.connect(config.bp_peer_addresses[neighbor]); } + for (auto neighbor : pending_downstream_neighbors) { self()->connections.connect(config.bp_peer_addresses[neighbor], *self()->p2p_addresses.begin() ); } pending_neighbors = std::move(pending_downstream_neighbors); finder.add_upstream_neighbors(pending_neighbors); diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index f8a4fb0596..b437580a44 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -350,7 +350,7 @@ namespace eosio { private: // must call with held mutex connection_ptr find_connection_i(const string& host) const; void add_i(connection_ptr&& c); - void connect_i(const string& peer); + void connect_i(const string& peer, const string& p2p_address); void connection_monitor(const std::weak_ptr& from_connection); @@ -370,14 +370,14 @@ namespace eosio { void register_update_p2p_connection_metrics(std::function&& fun); - void connect_supplied_peers(); + void connect_supplied_peers(const string& p2p_address); void start_conn_timer(); void start_conn_timer(boost::asio::steady_timer::duration du, std::weak_ptr from_connection); void stop_conn_timer(); void add(connection_ptr c); - string connect(const string& host); + string connect(const string& host, const string& p2p_address); string disconnect(const string& host); void close_all(); @@ -533,7 +533,7 @@ namespace eosio { bool in_sync() const; fc::logger& get_logger() { return logger; } - void create_session(tcp::socket&& socket, const string& p2p_address); + void create_session(tcp::socket&& socket, const string p2p_address); }; // peer_[x]log must be called from thread in connection strand @@ -765,7 +765,7 @@ namespace eosio { public: enum class connection_state { connecting, connected, closing, closed }; - explicit connection( const string& endpoint ); + explicit connection( const string& endpoint, const string& address ); /// @brief ctor /// @param socket created by boost::asio in fc::listener /// @param address identifier of listen socket which accepted this new connection @@ -808,6 +808,7 @@ namespace eosio { std::atomic conn_state{connection_state::connecting}; + string p2p_address; // address string used in handshake const string peer_addr; enum connection_types : char { both, @@ -828,7 +829,6 @@ namespace eosio { queued_buffer buffer_queue; - string p2p_address; // address string used in handshake fc::sha256 conn_node_id; string short_conn_node_id; string log_p2p_address; @@ -1146,8 +1146,9 @@ namespace eosio { //--------------------------------------------------------------------------- - connection::connection( const string& endpoint ) - : peer_addr( endpoint ), + connection::connection( const string& endpoint, const string& address ) + : p2p_address( address ), + peer_addr( endpoint ), strand( my_impl->thread_pool.get_executor() ), socket( new tcp::socket( my_impl->thread_pool.get_executor() ) ), log_p2p_address( endpoint ), @@ -1161,16 +1162,17 @@ namespace eosio { } connection::connection(tcp::socket&& s, const string& address) - : peer_addr(), + : p2p_address( address), + peer_addr(), strand( my_impl->thread_pool.get_executor() ), socket( new tcp::socket( std::move(s) ) ), - p2p_address( address), connection_id( ++my_impl->current_connection_id ), response_expected_timer( my_impl->thread_pool.get_executor() ), last_handshake_recv(), last_handshake_sent() { - fc_dlog( logger, "new connection object created" ); + update_endpoints(); + fc_dlog( logger, "new connection object created for peer ${address}:${port} from listener ${addr}", ("address", log_remote_endpoint_ip)("port", log_remote_endpoint_port)("addr", p2p_address) ); } // called from connection strand @@ -1244,7 +1246,6 @@ namespace eosio { bool connection::start_session() { verify_strand_in_this_thread( strand, __func__, __LINE__ ); - update_endpoints(); boost::asio::ip::tcp::no_delay nodelay( true ); boost::system::error_code ec; socket->set_option( nodelay, ec ); @@ -2675,7 +2676,7 @@ namespace eosio { } - void net_plugin_impl::create_session(tcp::socket&& socket, const string& p2p_address) { + void net_plugin_impl::create_session(tcp::socket&& socket, const string p2p_address) { uint32_t visitors = 0; uint32_t from_addr = 0; boost::system::error_code rec; @@ -2700,7 +2701,7 @@ namespace eosio { (auto_bp_peering_enabled() || connections.get_max_client_count() == 0 || visitors < connections.get_max_client_count())) { fc_ilog(logger, "Accepted new connection: " + paddr_str); - +fc_dlog(logger, "Instantiating connection with listener address ${addr}", ("addr", p2p_address)); connection_ptr new_connection = std::make_shared(std::move(socket), p2p_address); new_connection->strand.post([new_connection, this]() { if (new_connection->start_session()) { @@ -3848,6 +3849,7 @@ namespace eosio { // If we couldn't sign, don't send a token. if(hello.sig == chain::signature_type()) hello.token = sha256(); + peer_dlog( this, "populated handshake with address ${addr}", ("addr", p2p_address)); hello.p2p_address = p2p_address; if( is_transactions_only_connection() ) hello.p2p_address += ":trx"; // if we are not accepting transactions tell peer we are blocks only @@ -4156,7 +4158,7 @@ namespace eosio { my->ticker(); my->start_monitors(); my->update_chain_info(); - my->connections.connect_supplied_peers(); + my->connections.connect_supplied_peers(*my->p2p_addresses.begin()); // attribute every outbound connection to the first listen port }); } @@ -4193,7 +4195,7 @@ namespace eosio { /// RPC API string net_plugin::connect( const string& host ) { - return my->connections.connect( host ); + return my->connections.connect( host, *my->p2p_addresses.begin() ); } /// RPC API @@ -4267,10 +4269,10 @@ namespace eosio { update_p2p_connection_metrics = std::move(fun); } - void connections_manager::connect_supplied_peers() { + void connections_manager::connect_supplied_peers(const string& p2p_address) { std::lock_guard g(connections_mtx); for (const auto& peer : supplied_peers) { - connect_i(peer); + connect_i(peer, p2p_address); } } @@ -4280,12 +4282,12 @@ namespace eosio { } // called by API - string connections_manager::connect( const string& host ) { + string connections_manager::connect( const string& host, const string& p2p_address ) { std::lock_guard g( connections_mtx ); if( find_connection_i( host ) ) return "already connected"; - connect_i( host ); + connect_i( host, p2p_address ); supplied_peers.insert(host); return "added connection"; } @@ -4342,8 +4344,8 @@ namespace eosio { } // call with connections_mtx - void connections_manager::connect_i( const string& host ) { - connection_ptr c = std::make_shared( host ); + void connections_manager::connect_i( const string& host, const string& p2p_address ) { + connection_ptr c = std::make_shared( host, p2p_address ); fc_dlog( logger, "calling active connector: ${h}", ("h", host) ); if( c->resolve_and_connect() ) { fc_dlog( logger, "adding new connection to the list: ${host} ${cid}", ("host", host)("cid", c->connection_id) ); diff --git a/plugins/net_plugin/tests/auto_bp_peering_unittest.cpp b/plugins/net_plugin/tests/auto_bp_peering_unittest.cpp index 93ac898a5b..6aa7fbebd6 100644 --- a/plugins/net_plugin/tests/auto_bp_peering_unittest.cpp +++ b/plugins/net_plugin/tests/auto_bp_peering_unittest.cpp @@ -18,7 +18,7 @@ struct mock_connections_manager { uint32_t max_client_count = 0; std::vector connections; - std::function connect; + std::function connect; std::function disconnect; uint32_t get_max_client_count() const { return max_client_count; } @@ -36,6 +36,7 @@ struct mock_net_plugin : eosio::auto_bp_peering::bp_connection_manager p2p_addresses{"0.0.0.0:9876"}; bool in_sync() { return is_in_sync; } @@ -165,7 +166,7 @@ BOOST_AUTO_TEST_CASE(test_on_pending_schedule) { std::vector connected_hosts; - plugin.connections.connect = [&connected_hosts](std::string host) { connected_hosts.push_back(host); }; + plugin.connections.connect = [&connected_hosts](std::string host, std::string p2p_address) { connected_hosts.push_back(host); }; // make sure nothing happens when it is not in_sync plugin.is_in_sync = false; @@ -209,7 +210,7 @@ BOOST_AUTO_TEST_CASE(test_on_active_schedule1) { plugin.config.my_bp_accounts = { "prodd"_n, "produ"_n }; plugin.active_neighbors = { "proda"_n, "prodh"_n, "prodn"_n }; - plugin.connections.connect = [](std::string host) {}; + plugin.connections.connect = [](std::string host, std::string p2p_address) {}; std::vector disconnected_hosts; plugin.connections.disconnect = [&disconnected_hosts](std::string host) { disconnected_hosts.push_back(host); }; @@ -245,7 +246,7 @@ BOOST_AUTO_TEST_CASE(test_on_active_schedule2) { plugin.config.my_bp_accounts = { "prodd"_n, "produ"_n }; plugin.active_neighbors = { "proda"_n, "prodh"_n, "prodn"_n }; - plugin.connections.connect = [](std::string host) {}; + plugin.connections.connect = [](std::string host, std::string p2p_address) {}; std::vector disconnected_hosts; plugin.connections.disconnect = [&disconnected_hosts](std::string host) { disconnected_hosts.push_back(host); }; diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 0dbeb89358..be1df6a7dd 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -263,7 +263,7 @@ set_property(TEST nodeos_repeat_transaction_lr_test PROPERTY LABELS long_running add_test(NAME light_validation_sync_test COMMAND tests/light_validation_sync_test.py -v ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST light_validation_sync_test PROPERTY LABELS nonparallelizable_tests) -add_test(NAME auto_bp_peering_test COMMAND tests/auto_bp_peering_test.py ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME auto_bp_peering_test COMMAND tests/auto_bp_peering_test.py -v ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST auto_bp_peering_test PROPERTY LABELS long_running_tests) add_test(NAME gelf_test COMMAND tests/gelf_test.py ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) From eb0282eb969394fa916519e935136b4f55bc035c Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Wed, 19 Jul 2023 13:50:36 -0400 Subject: [PATCH 038/107] Update `EosioTester.cmake.in` --- CMakeModules/EosioTester.cmake.in | 29 +++++++++++++++++++++++--- CMakeModules/EosioTesterBuild.cmake.in | 2 +- 2 files changed, 27 insertions(+), 4 deletions(-) diff --git a/CMakeModules/EosioTester.cmake.in b/CMakeModules/EosioTester.cmake.in index f4b78322b7..163c4eee9c 100644 --- a/CMakeModules/EosioTester.cmake.in +++ b/CMakeModules/EosioTester.cmake.in @@ -36,7 +36,17 @@ else ( APPLE ) endif ( APPLE ) set( Boost_USE_STATIC_LIBS ON CACHE STRING "ON or OFF" ) -add_subdirectory( @CMAKE_SOURCE_DIR@/libraries/boost ${PROJECT_BINARY_DIR}/libraries/boost) +IF(EXISTS @CMAKE_SOURCE_DIR@/libraries/boost) + add_subdirectory( @CMAKE_SOURCE_DIR@/libraries/boost ${PROJECT_BINARY_DIR}/libraries/boost EXCLUDE_FROM_ALL) +else() + find_package(Boost @Boost_MAJOR_VERSION@.@Boost_MINOR_VERSION@ EXACT REQUIRED COMPONENTS + date_time + filesystem + system + chrono + iostreams + unit_test_framework) +endif() find_library(libtester eosio_testing @CMAKE_INSTALL_FULL_LIBDIR@ NO_DEFAULT_PATH) find_library(libchain eosio_chain @CMAKE_INSTALL_FULL_LIBDIR@ NO_DEFAULT_PATH) @@ -80,8 +90,10 @@ macro(add_eosio_test_executable test_name) ${libbuiltins} ${libsecp256k1} ${libbn256} - @GMP_LIBRARY@ + @GMP_LIBRARY@) +IF(EXISTS @CMAKE_SOURCE_DIR@/libraries/boost) + target_link_libraries( ${test_name} Boost::date_time Boost::filesystem Boost::system @@ -93,7 +105,18 @@ macro(add_eosio_test_executable test_name) Boost::signals2 Boost::iostreams "-lz" # Needed by Boost iostreams - Boost::unit_test_framework + Boost::unit_test_framework) +else() + target_link_libraries( ${test_name} + ${Boost_FILESYSTEM_LIBRARY} + ${Boost_SYSTEM_LIBRARY} + ${Boost_CHRONO_LIBRARY} + ${Boost_IOSTREAMS_LIBRARY} + "-lz" # Needed by Boost iostreams + ${Boost_DATE_TIME_LIBRARY}) +endif() + + target_link_libraries( ${test_name} ${LLVM_LIBS} diff --git a/CMakeModules/EosioTesterBuild.cmake.in b/CMakeModules/EosioTesterBuild.cmake.in index dbb2cc85b5..fb43776048 100644 --- a/CMakeModules/EosioTesterBuild.cmake.in +++ b/CMakeModules/EosioTesterBuild.cmake.in @@ -87,7 +87,7 @@ macro(add_eosio_test_executable test_name) ${libbuiltins} ${libsecp256k1} ${libbn256} - @GMP_LIBRARY@ ) + @GMP_LIBRARY@) IF(EXISTS @CMAKE_SOURCE_DIR@/libraries/boost) target_link_libraries( ${test_name} From b4bc63e67e48a0b218756a648fa2e3be70e554d5 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Wed, 19 Jul 2023 14:19:22 -0400 Subject: [PATCH 039/107] remove boost version specification --- CMakeModules/EosioTester.cmake.in | 2 +- CMakeModules/EosioTesterBuild.cmake.in | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/CMakeModules/EosioTester.cmake.in b/CMakeModules/EosioTester.cmake.in index 163c4eee9c..941422cb58 100644 --- a/CMakeModules/EosioTester.cmake.in +++ b/CMakeModules/EosioTester.cmake.in @@ -39,7 +39,7 @@ set( Boost_USE_STATIC_LIBS ON CACHE STRING "ON or OFF" ) IF(EXISTS @CMAKE_SOURCE_DIR@/libraries/boost) add_subdirectory( @CMAKE_SOURCE_DIR@/libraries/boost ${PROJECT_BINARY_DIR}/libraries/boost EXCLUDE_FROM_ALL) else() - find_package(Boost @Boost_MAJOR_VERSION@.@Boost_MINOR_VERSION@ EXACT REQUIRED COMPONENTS + find_package(Boost REQUIRED COMPONENTS  date_time  filesystem  system diff --git a/CMakeModules/EosioTesterBuild.cmake.in b/CMakeModules/EosioTesterBuild.cmake.in index fb43776048..617319a200 100644 --- a/CMakeModules/EosioTesterBuild.cmake.in +++ b/CMakeModules/EosioTesterBuild.cmake.in @@ -36,7 +36,7 @@ set( Boost_USE_STATIC_LIBS ON CACHE STRING "ON or OFF" ) IF(EXISTS @CMAKE_SOURCE_DIR@/libraries/boost) add_subdirectory( @CMAKE_SOURCE_DIR@/libraries/boost ${PROJECT_BINARY_DIR}/libraries/boost EXCLUDE_FROM_ALL) else() - find_package(Boost @Boost_MAJOR_VERSION@.@Boost_MINOR_VERSION@ EXACT REQUIRED COMPONENTS + find_package(Boost REQUIRED COMPONENTS  date_time  filesystem  system From 91b96a0b35c10059c1470af6b18daa3b30b0e998 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Wed, 19 Jul 2023 14:58:16 -0400 Subject: [PATCH 040/107] Use boost submodule organization always. --- CMakeLists.txt | 38 +++++++++++++------------- CMakeModules/EosioTester.cmake.in | 25 ++--------------- CMakeModules/EosioTesterBuild.cmake.in | 24 ++-------------- 3 files changed, 25 insertions(+), 62 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 7a87a9265e..0b44f388b3 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -272,25 +272,25 @@ configure_file(${CMAKE_SOURCE_DIR}/libraries/cli11/bash-completion/completions/c install(FILES libraries/cli11/bash-completion/completions/leap-util DESTINATION ${CMAKE_INSTALL_FULL_DATAROOTDIR}/bash-completion/completions COMPONENT base) install(FILES libraries/cli11/bash-completion/completions/cleos DESTINATION ${CMAKE_INSTALL_FULL_DATAROOTDIR}/bash-completion/completions COMPONENT base) -add_custom_target(boost_install ALL) -add_custom_command(TARGET boost_install - COMMAND python3 ${CMAKE_SOURCE_DIR}/scripts/MakeBoostDistro.py ${CMAKE_SOURCE_DIR}/libraries/boost ${CMAKE_BINARY_DIR}/boost-dist - VERBATIM) - -install(DIRECTORY "${CMAKE_BINARY_DIR}/boost-dist/boost" DESTINATION include COMPONENT dev EXCLUDE_FROM_ALL) - -install(TARGETS boost_date_time COMPONENT dev EXCLUDE_FROM_ALL) -install(TARGETS boost_filesystem COMPONENT dev EXCLUDE_FROM_ALL) -install(TARGETS boost_system COMPONENT dev EXCLUDE_FROM_ALL) -install(TARGETS boost_chrono COMPONENT dev EXCLUDE_FROM_ALL) -install(TARGETS boost_multi_index COMPONENT dev EXCLUDE_FROM_ALL) -install(TARGETS boost_multiprecision COMPONENT dev EXCLUDE_FROM_ALL) -install(TARGETS boost_interprocess COMPONENT dev EXCLUDE_FROM_ALL) -install(TARGETS boost_asio COMPONENT dev EXCLUDE_FROM_ALL) -install(TARGETS boost_signals2 COMPONENT dev EXCLUDE_FROM_ALL) -install(TARGETS boost_iostreams COMPONENT dev EXCLUDE_FROM_ALL) -install(TARGETS boost_unit_test_framework COMPONENT dev EXCLUDE_FROM_ALL) -install(TARGETS boost_headers COMPONENT dev EXCLUDE_FROM_ALL) +#add_custom_target(boost_install ALL) +#add_custom_command(TARGET boost_install +# COMMAND python3 ${CMAKE_SOURCE_DIR}/scripts/MakeBoostDistro.py ${CMAKE_SOURCE_DIR}/libraries/boost ${CMAKE_BINARY_DIR}/boost-dist +# VERBATIM) + +install(DIRECTORY "${CMAKE_SOURCE_DIR}/libraries/boost/" DESTINATION ${CMAKE_INSTALL_FULL_DATAROOTDIR}/leap_boost COMPONENT dev EXCLUDE_FROM_ALL) + +#install(TARGETS boost_date_time COMPONENT dev EXCLUDE_FROM_ALL) +#install(TARGETS boost_filesystem COMPONENT dev EXCLUDE_FROM_ALL) +#install(TARGETS boost_system COMPONENT dev EXCLUDE_FROM_ALL) +#install(TARGETS boost_chrono COMPONENT dev EXCLUDE_FROM_ALL) +#install(TARGETS boost_multi_index COMPONENT dev EXCLUDE_FROM_ALL) +#install(TARGETS boost_multiprecision COMPONENT dev EXCLUDE_FROM_ALL) +#install(TARGETS boost_interprocess COMPONENT dev EXCLUDE_FROM_ALL) +#install(TARGETS boost_asio COMPONENT dev EXCLUDE_FROM_ALL) +#install(TARGETS boost_signals2 COMPONENT dev EXCLUDE_FROM_ALL) +#install(TARGETS boost_iostreams COMPONENT dev EXCLUDE_FROM_ALL) +#install(TARGETS boost_unit_test_framework COMPONENT dev EXCLUDE_FROM_ALL) +#install(TARGETS boost_headers COMPONENT dev EXCLUDE_FROM_ALL) add_custom_target(dev-install COMMAND "${CMAKE_COMMAND}" --build "${CMAKE_BINARY_DIR}" diff --git a/CMakeModules/EosioTester.cmake.in b/CMakeModules/EosioTester.cmake.in index 941422cb58..a708e27164 100644 --- a/CMakeModules/EosioTester.cmake.in +++ b/CMakeModules/EosioTester.cmake.in @@ -39,13 +39,7 @@ set( Boost_USE_STATIC_LIBS ON CACHE STRING "ON or OFF" ) IF(EXISTS @CMAKE_SOURCE_DIR@/libraries/boost) add_subdirectory( @CMAKE_SOURCE_DIR@/libraries/boost ${PROJECT_BINARY_DIR}/libraries/boost EXCLUDE_FROM_ALL) else() - find_package(Boost REQUIRED COMPONENTS - date_time - filesystem - system - chrono - iostreams - unit_test_framework) + add_subdirectory( ${CMAKE_INSTALL_FULL_DATAROOTDIR}/leap_boost ${PROJECT_BINARY_DIR}/libraries/boost EXCLUDE_FROM_ALL) endif() find_library(libtester eosio_testing @CMAKE_INSTALL_FULL_LIBDIR@ NO_DEFAULT_PATH) @@ -90,10 +84,8 @@ macro(add_eosio_test_executable test_name) ${libbuiltins} ${libsecp256k1} ${libbn256} - @GMP_LIBRARY@) + @GMP_LIBRARY@ -IF(EXISTS @CMAKE_SOURCE_DIR@/libraries/boost) - target_link_libraries( ${test_name} Boost::date_time Boost::filesystem Boost::system @@ -105,18 +97,7 @@ IF(EXISTS @CMAKE_SOURCE_DIR@/libraries/boost) Boost::signals2 Boost::iostreams "-lz" # Needed by Boost iostreams - Boost::unit_test_framework) -else() - target_link_libraries( ${test_name} - ${Boost_FILESYSTEM_LIBRARY} - ${Boost_SYSTEM_LIBRARY} - ${Boost_CHRONO_LIBRARY} - ${Boost_IOSTREAMS_LIBRARY} - "-lz" # Needed by Boost iostreams - ${Boost_DATE_TIME_LIBRARY}) -endif() - - target_link_libraries( ${test_name} + Boost::unit_test_framework ${LLVM_LIBS} diff --git a/CMakeModules/EosioTesterBuild.cmake.in b/CMakeModules/EosioTesterBuild.cmake.in index 617319a200..599ca2d58d 100644 --- a/CMakeModules/EosioTesterBuild.cmake.in +++ b/CMakeModules/EosioTesterBuild.cmake.in @@ -36,13 +36,7 @@ set( Boost_USE_STATIC_LIBS ON CACHE STRING "ON or OFF" ) IF(EXISTS @CMAKE_SOURCE_DIR@/libraries/boost) add_subdirectory( @CMAKE_SOURCE_DIR@/libraries/boost ${PROJECT_BINARY_DIR}/libraries/boost EXCLUDE_FROM_ALL) else() - find_package(Boost REQUIRED COMPONENTS - date_time - filesystem - system - chrono - iostreams - unit_test_framework) + add_subdirectory( ${CMAKE_INSTALL_FULL_DATAROOTDIR}/leap_boost ${PROJECT_BINARY_DIR}/libraries/boost EXCLUDE_FROM_ALL) endif() find_library(libtester eosio_testing @CMAKE_BINARY_DIR@/libraries/testing NO_DEFAULT_PATH) @@ -87,10 +81,8 @@ macro(add_eosio_test_executable test_name) ${libbuiltins} ${libsecp256k1} ${libbn256} - @GMP_LIBRARY@) + @GMP_LIBRARY@ -IF(EXISTS @CMAKE_SOURCE_DIR@/libraries/boost) - target_link_libraries( ${test_name} Boost::date_time Boost::filesystem Boost::system @@ -102,18 +94,8 @@ IF(EXISTS @CMAKE_SOURCE_DIR@/libraries/boost) Boost::signals2 Boost::iostreams "-lz" # Needed by Boost iostreams - Boost::unit_test_framework) -else() - target_link_libraries( ${test_name} - ${Boost_FILESYSTEM_LIBRARY} - ${Boost_SYSTEM_LIBRARY} - ${Boost_CHRONO_LIBRARY} - ${Boost_IOSTREAMS_LIBRARY} - "-lz" # Needed by Boost iostreams - ${Boost_DATE_TIME_LIBRARY}) -endif() + Boost::unit_test_framework - target_link_libraries( ${test_name} ${LLVM_LIBS} ${PLATFORM_SPECIFIC_LIBS} From 4d71d2aa0792420e9046331f62a0295081a2824b Mon Sep 17 00:00:00 2001 From: Jonathan Giszczak Date: Wed, 19 Jul 2023 14:02:03 -0500 Subject: [PATCH 041/107] Rename connection's p2p_address to listen_address. Erase undefined elements from p2p_addresses vector after std::unique(). Add assert before std::transform of two vectors. Const correctness and whitespace cleanup. --- plugins/net_plugin/net_plugin.cpp | 39 ++++++++++++++++--------------- 1 file changed, 20 insertions(+), 19 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index b437580a44..4995feeb96 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -533,7 +533,7 @@ namespace eosio { bool in_sync() const; fc::logger& get_logger() { return logger; } - void create_session(tcp::socket&& socket, const string p2p_address); + void create_session(tcp::socket&& socket, const string listen_address); }; // peer_[x]log must be called from thread in connection strand @@ -808,7 +808,7 @@ namespace eosio { std::atomic conn_state{connection_state::connecting}; - string p2p_address; // address string used in handshake + string listen_address; // address sent to peer in handshake const string peer_addr; enum connection_types : char { both, @@ -1147,7 +1147,7 @@ namespace eosio { //--------------------------------------------------------------------------- connection::connection( const string& endpoint, const string& address ) - : p2p_address( address ), + : listen_address( address ), peer_addr( endpoint ), strand( my_impl->thread_pool.get_executor() ), socket( new tcp::socket( my_impl->thread_pool.get_executor() ) ), @@ -1162,7 +1162,7 @@ namespace eosio { } connection::connection(tcp::socket&& s, const string& address) - : p2p_address( address), + : listen_address( address), peer_addr(), strand( my_impl->thread_pool.get_executor() ), socket( new tcp::socket( std::move(s) ) ), @@ -1172,7 +1172,7 @@ namespace eosio { last_handshake_sent() { update_endpoints(); - fc_dlog( logger, "new connection object created for peer ${address}:${port} from listener ${addr}", ("address", log_remote_endpoint_ip)("port", log_remote_endpoint_port)("addr", p2p_address) ); + fc_dlog( logger, "new connection object created for peer ${address}:${port} from listener ${addr}", ("address", log_remote_endpoint_ip)("port", log_remote_endpoint_port)("addr", listen_address) ); } // called from connection strand @@ -2676,7 +2676,7 @@ namespace eosio { } - void net_plugin_impl::create_session(tcp::socket&& socket, const string p2p_address) { + void net_plugin_impl::create_session(tcp::socket&& socket, const string listen_address) { uint32_t visitors = 0; uint32_t from_addr = 0; boost::system::error_code rec; @@ -2701,8 +2701,8 @@ namespace eosio { (auto_bp_peering_enabled() || connections.get_max_client_count() == 0 || visitors < connections.get_max_client_count())) { fc_ilog(logger, "Accepted new connection: " + paddr_str); -fc_dlog(logger, "Instantiating connection with listener address ${addr}", ("addr", p2p_address)); - connection_ptr new_connection = std::make_shared(std::move(socket), p2p_address); + + connection_ptr new_connection = std::make_shared(std::move(socket), listen_address); new_connection->strand.post([new_connection, this]() { if (new_connection->start_session()) { connections.add(new_connection); @@ -3165,9 +3165,9 @@ fc_dlog(logger, "Instantiating connection with listener address ${addr}", ("addr if (msg.time + c_time <= check_time) return false; } else if (net_version < proto_dup_node_id_goaway || msg.network_version < proto_dup_node_id_goaway) { - if (p2p_address < msg.p2p_address) { - fc_dlog( logger, "p2p_address '${lhs}' < msg.p2p_address '${rhs}'", - ("lhs", p2p_address)( "rhs", msg.p2p_address ) ); + if (listen_address < msg.p2p_address) { + fc_dlog( logger, "listen_address '${lhs}' < msg.p2p_address '${rhs}'", + ("lhs", listen_address)( "rhs", msg.p2p_address ) ); // only the connection from lower p2p_address to higher p2p_address will be considered as a duplicate, // so there is no chance for both connections to be closed return false; @@ -3849,8 +3849,7 @@ fc_dlog(logger, "Instantiating connection with listener address ${addr}", ("addr // If we couldn't sign, don't send a token. if(hello.sig == chain::signature_type()) hello.token = sha256(); - peer_dlog( this, "populated handshake with address ${addr}", ("addr", p2p_address)); - hello.p2p_address = p2p_address; + hello.p2p_address = listen_address; if( is_transactions_only_connection() ) hello.p2p_address += ":trx"; // if we are not accepting transactions tell peer we are blocks only if( is_blocks_only_connection() || !my_impl->p2p_accept_transactions ) hello.p2p_address += ":blk"; @@ -3964,15 +3963,16 @@ fc_dlog(logger, "Instantiating connection with listener address ${addr}", ("addr std::chrono::seconds( options.at("connection-cleanup-period").as() ), options.at("max-clients").as() ); - if( options.count( "p2p-listen-endpoint" ) && !options.at("p2p-listen-endpoint").as>().empty() && options.at("p2p-listen-endpoint").as>()[0].length()) { + if( options.count( "p2p-listen-endpoint" ) && !options.at("p2p-listen-endpoint").as>().empty() && !options.at("p2p-listen-endpoint").as>()[0].empty()) { p2p_addresses = options.at( "p2p-listen-endpoint" ).as>(); auto addr_count = p2p_addresses.size(); std::sort(p2p_addresses.begin(), p2p_addresses.end()); - std::unique(p2p_addresses.begin(), p2p_addresses.end()); + auto last = std::unique(p2p_addresses.begin(), p2p_addresses.end()); + p2p_addresses.erase(last, p2p_addresses.end()); if( size_t addr_diff = addr_count - p2p_addresses.size(); addr_diff != 0) { - fc_ilog( logger, "Removed ${count} duplicate p2p-listen-endpoint entries", ("count", addr_diff)); + fc_wlog( logger, "Removed ${count} duplicate p2p-listen-endpoint entries", ("count", addr_diff)); } - for(auto& addr : p2p_addresses) { + for( const auto& addr : p2p_addresses ) { EOS_ASSERT( addr.length() <= max_p2p_address_length, chain::plugin_config_exception, "p2p-listen-endpoint ${a} too long, must be less than ${m}", ("a", addr)("m", max_p2p_address_length) ); @@ -3982,7 +3982,7 @@ fc_dlog(logger, "Instantiating connection with listener address ${addr}", ("addr p2p_server_addresses = options.at( "p2p-server-address" ).as>(); EOS_ASSERT( p2p_server_addresses.size() <= p2p_addresses.size(), chain::plugin_config_exception, "p2p-server-address may not be specified more times than p2p-listen-endpoint" ); - for( auto& addr: p2p_server_addresses ) { + for( const auto& addr: p2p_server_addresses ) { EOS_ASSERT( addr.length() <= max_p2p_address_length, chain::plugin_config_exception, "p2p-server-address ${a} too long, must be less than ${m}", ("a", addr)("m", max_p2p_address_length) ); @@ -4091,6 +4091,7 @@ fc_dlog(logger, "Instantiating connection with listener address ${addr}", ("addr std::vector listen_addresses = p2p_addresses; + EOS_ASSERT( p2p_addresses.size() == p2p_server_addresses.size(), chain::plugin_config_exception, "" ); std::transform(p2p_addresses.begin(), p2p_addresses.end(), p2p_server_addresses.begin(), p2p_addresses.begin(), [](const string& p2p_address, const string& p2p_server_address) { auto [host, port] = fc::split_host_port(p2p_address); @@ -4109,7 +4110,7 @@ fc_dlog(logger, "Instantiating connection with listener address ${addr}", ("addr return hostname + ":" + port; } return p2p_address; - }); + }); { chain::controller& cc = chain_plug->chain(); From 17f6d07175e2a5fda4f560447f97d08c1e86faf2 Mon Sep 17 00:00:00 2001 From: Lin Huang Date: Wed, 19 Jul 2023 15:14:27 -0400 Subject: [PATCH 042/107] update to latest eos-vm --- libraries/eos-vm | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/eos-vm b/libraries/eos-vm index 73a88d245a..a5aadacab5 160000 --- a/libraries/eos-vm +++ b/libraries/eos-vm @@ -1 +1 @@ -Subproject commit 73a88d245a594f5a85510ed1dee81e2ac7f535aa +Subproject commit a5aadacab5c0695a9f8c71890b9aed25e3c881ea From 0910fdc1147e9af473d8b9ab9a66ee2622a1e5ef Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Wed, 19 Jul 2023 15:17:56 -0400 Subject: [PATCH 043/107] Fix names in `.in` files. --- CMakeModules/EosioTester.cmake.in | 2 +- CMakeModules/EosioTesterBuild.cmake.in | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/CMakeModules/EosioTester.cmake.in b/CMakeModules/EosioTester.cmake.in index a708e27164..fe531a72be 100644 --- a/CMakeModules/EosioTester.cmake.in +++ b/CMakeModules/EosioTester.cmake.in @@ -39,7 +39,7 @@ set( Boost_USE_STATIC_LIBS ON CACHE STRING "ON or OFF" ) IF(EXISTS @CMAKE_SOURCE_DIR@/libraries/boost) add_subdirectory( @CMAKE_SOURCE_DIR@/libraries/boost ${PROJECT_BINARY_DIR}/libraries/boost EXCLUDE_FROM_ALL) else() - add_subdirectory( ${CMAKE_INSTALL_FULL_DATAROOTDIR}/leap_boost ${PROJECT_BINARY_DIR}/libraries/boost EXCLUDE_FROM_ALL) + add_subdirectory( @CMAKE_INSTALL_FULL_DATAROOTDIR@/leap_boost ${PROJECT_BINARY_DIR}/libraries/boost EXCLUDE_FROM_ALL) endif() find_library(libtester eosio_testing @CMAKE_INSTALL_FULL_LIBDIR@ NO_DEFAULT_PATH) diff --git a/CMakeModules/EosioTesterBuild.cmake.in b/CMakeModules/EosioTesterBuild.cmake.in index 599ca2d58d..649fd4e9e4 100644 --- a/CMakeModules/EosioTesterBuild.cmake.in +++ b/CMakeModules/EosioTesterBuild.cmake.in @@ -36,7 +36,7 @@ set( Boost_USE_STATIC_LIBS ON CACHE STRING "ON or OFF" ) IF(EXISTS @CMAKE_SOURCE_DIR@/libraries/boost) add_subdirectory( @CMAKE_SOURCE_DIR@/libraries/boost ${PROJECT_BINARY_DIR}/libraries/boost EXCLUDE_FROM_ALL) else() - add_subdirectory( ${CMAKE_INSTALL_FULL_DATAROOTDIR}/leap_boost ${PROJECT_BINARY_DIR}/libraries/boost EXCLUDE_FROM_ALL) + add_subdirectory( @CMAKE_INSTALL_FULL_DATAROOTDIR@/leap_boost ${PROJECT_BINARY_DIR}/libraries/boost EXCLUDE_FROM_ALL) endif() find_library(libtester eosio_testing @CMAKE_BINARY_DIR@/libraries/testing NO_DEFAULT_PATH) From 0a177a99359a1d27506542528a2128d7910a6235 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Wed, 19 Jul 2023 15:44:45 -0400 Subject: [PATCH 044/107] Add missing packages in debian install. --- CMakeLists.txt | 18 ------------------ package.cmake | 2 +- 2 files changed, 1 insertion(+), 19 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 0b44f388b3..32c223a6d4 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -272,26 +272,8 @@ configure_file(${CMAKE_SOURCE_DIR}/libraries/cli11/bash-completion/completions/c install(FILES libraries/cli11/bash-completion/completions/leap-util DESTINATION ${CMAKE_INSTALL_FULL_DATAROOTDIR}/bash-completion/completions COMPONENT base) install(FILES libraries/cli11/bash-completion/completions/cleos DESTINATION ${CMAKE_INSTALL_FULL_DATAROOTDIR}/bash-completion/completions COMPONENT base) -#add_custom_target(boost_install ALL) -#add_custom_command(TARGET boost_install -# COMMAND python3 ${CMAKE_SOURCE_DIR}/scripts/MakeBoostDistro.py ${CMAKE_SOURCE_DIR}/libraries/boost ${CMAKE_BINARY_DIR}/boost-dist -# VERBATIM) - install(DIRECTORY "${CMAKE_SOURCE_DIR}/libraries/boost/" DESTINATION ${CMAKE_INSTALL_FULL_DATAROOTDIR}/leap_boost COMPONENT dev EXCLUDE_FROM_ALL) -#install(TARGETS boost_date_time COMPONENT dev EXCLUDE_FROM_ALL) -#install(TARGETS boost_filesystem COMPONENT dev EXCLUDE_FROM_ALL) -#install(TARGETS boost_system COMPONENT dev EXCLUDE_FROM_ALL) -#install(TARGETS boost_chrono COMPONENT dev EXCLUDE_FROM_ALL) -#install(TARGETS boost_multi_index COMPONENT dev EXCLUDE_FROM_ALL) -#install(TARGETS boost_multiprecision COMPONENT dev EXCLUDE_FROM_ALL) -#install(TARGETS boost_interprocess COMPONENT dev EXCLUDE_FROM_ALL) -#install(TARGETS boost_asio COMPONENT dev EXCLUDE_FROM_ALL) -#install(TARGETS boost_signals2 COMPONENT dev EXCLUDE_FROM_ALL) -#install(TARGETS boost_iostreams COMPONENT dev EXCLUDE_FROM_ALL) -#install(TARGETS boost_unit_test_framework COMPONENT dev EXCLUDE_FROM_ALL) -#install(TARGETS boost_headers COMPONENT dev EXCLUDE_FROM_ALL) - add_custom_target(dev-install COMMAND "${CMAKE_COMMAND}" --build "${CMAKE_BINARY_DIR}" COMMAND "${CMAKE_COMMAND}" --install "${CMAKE_BINARY_DIR}" diff --git a/package.cmake b/package.cmake index c782938e54..930acf4456 100644 --- a/package.cmake +++ b/package.cmake @@ -61,7 +61,7 @@ set(CPACK_DEBIAN_BASE_FILE_NAME "${CPACK_DEBIAN_FILE_NAME}.deb") string(REGEX REPLACE "^(${CMAKE_PROJECT_NAME})" "\\1-dev" CPACK_DEBIAN_DEV_FILE_NAME "${CPACK_DEBIAN_BASE_FILE_NAME}") #deb package tooling will be unable to detect deps for the dev package. llvm is tricky since we don't know what package could have been used; try to figure it out -set(CPACK_DEBIAN_DEV_PACKAGE_DEPENDS "libssl-dev, libgmp-dev, python3-numpy") +set(CPACK_DEBIAN_DEV_PACKAGE_DEPENDS "libssl-dev, libgmp-dev, python3-all, python3-numpy, ubuntu-dev-tools, zlib1g-dev") find_program(DPKG_QUERY "dpkg-query") if(DPKG_QUERY AND OS_RELEASE MATCHES "\n?ID=\"?ubuntu" AND LLVM_CMAKE_DIR) execute_process(COMMAND "${DPKG_QUERY}" -S "${LLVM_CMAKE_DIR}" COMMAND cut -d: -f1 RESULT_VARIABLE LLVM_PKG_FIND_RESULT OUTPUT_VARIABLE LLVM_PKG_FIND_OUTPUT) From 877799a998bc450a7e1d7f98ac712cdf73267f60 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Wed, 19 Jul 2023 17:24:15 -0400 Subject: [PATCH 045/107] Cleanup changes and remove `.git` from boost install. --- CMakeLists.txt | 6 +- CMakeModules/EosioTester.cmake.in | 1 + CMakeModules/EosioTesterBuild.cmake.in | 1 + scripts/MakeBoostDistro.py | 212 ------------------------- 4 files changed, 7 insertions(+), 213 deletions(-) delete mode 100755 scripts/MakeBoostDistro.py diff --git a/CMakeLists.txt b/CMakeLists.txt index 32c223a6d4..b0b2366ab0 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -272,7 +272,11 @@ configure_file(${CMAKE_SOURCE_DIR}/libraries/cli11/bash-completion/completions/c install(FILES libraries/cli11/bash-completion/completions/leap-util DESTINATION ${CMAKE_INSTALL_FULL_DATAROOTDIR}/bash-completion/completions COMPONENT base) install(FILES libraries/cli11/bash-completion/completions/cleos DESTINATION ${CMAKE_INSTALL_FULL_DATAROOTDIR}/bash-completion/completions COMPONENT base) -install(DIRECTORY "${CMAKE_SOURCE_DIR}/libraries/boost/" DESTINATION ${CMAKE_INSTALL_FULL_DATAROOTDIR}/leap_boost COMPONENT dev EXCLUDE_FROM_ALL) +# Add the boost submodule we used to build to our install package, so headers can be found for libtester +install(DIRECTORY "${CMAKE_SOURCE_DIR}/libraries/boost/" + DESTINATION ${CMAKE_INSTALL_FULL_DATAROOTDIR}/leap_boost + PATTERN ".git" EXCLUDE + COMPONENT dev EXCLUDE_FROM_ALL) add_custom_target(dev-install COMMAND "${CMAKE_COMMAND}" --build "${CMAKE_BINARY_DIR}" diff --git a/CMakeModules/EosioTester.cmake.in b/CMakeModules/EosioTester.cmake.in index fe531a72be..0fa2ef5469 100644 --- a/CMakeModules/EosioTester.cmake.in +++ b/CMakeModules/EosioTester.cmake.in @@ -35,6 +35,7 @@ else ( APPLE ) set( CMAKE_CXX_FLAGS "${CMAKE_C_FLAGS} ${CMAKE_CXX_FLAGS} -Wall") endif ( APPLE ) +set( Boost_USE_MULTITHREADED ON ) set( Boost_USE_STATIC_LIBS ON CACHE STRING "ON or OFF" ) IF(EXISTS @CMAKE_SOURCE_DIR@/libraries/boost) add_subdirectory( @CMAKE_SOURCE_DIR@/libraries/boost ${PROJECT_BINARY_DIR}/libraries/boost EXCLUDE_FROM_ALL) diff --git a/CMakeModules/EosioTesterBuild.cmake.in b/CMakeModules/EosioTesterBuild.cmake.in index 649fd4e9e4..9a7774c97e 100644 --- a/CMakeModules/EosioTesterBuild.cmake.in +++ b/CMakeModules/EosioTesterBuild.cmake.in @@ -32,6 +32,7 @@ else ( APPLE ) set( CMAKE_CXX_FLAGS "${CMAKE_C_FLAGS} ${CMAKE_CXX_FLAGS} -Wall") endif ( APPLE ) +set( Boost_USE_MULTITHREADED ON ) set( Boost_USE_STATIC_LIBS ON CACHE STRING "ON or OFF" ) IF(EXISTS @CMAKE_SOURCE_DIR@/libraries/boost) add_subdirectory( @CMAKE_SOURCE_DIR@/libraries/boost ${PROJECT_BINARY_DIR}/libraries/boost EXCLUDE_FROM_ALL) diff --git a/scripts/MakeBoostDistro.py b/scripts/MakeBoostDistro.py deleted file mode 100755 index e7e8f02fd9..0000000000 --- a/scripts/MakeBoostDistro.py +++ /dev/null @@ -1,212 +0,0 @@ -#!/usr/bin/python3 -# - -# Prepare a boost checkout for release -# 1) Copy all the files at the root level to the dest folder ($DEST) -# 2) Copy all the "special" folders to the dest folder ($DEST) -# 3) copy all the files from $SOURCE/libs to $DEST/libs -# 4a) For each subproject, copy everything except "include" into $DEST/libs -# 4b) For each subproject, copy the contents of the "includes" folder into $DEST/boost -# -# Usage: %0 source dest - -from __future__ import print_function - -import os, sys -import shutil -import stat -import six -import datetime - -IgnoreFiles = shutil.ignore_patterns( - '[.]*', - '[.]gitattributes', - '[.]gitignore', - '[.]gitmodules', - '[.]travis[.]yml', - 'appveyor[.]yml', - 'circle[.]yml') - -def IgnoreFile(src, name): - return len(IgnoreFiles(src, [name])) > 0 - -## from -def MergeTree(src, dst, symlinks = False): - if not os.path.exists(dst): - os.makedirs(dst) - shutil.copystat(src, dst) - lst = os.listdir(src) - excl = IgnoreFiles(src, lst) - lst = [x for x in lst if x not in excl] - for item in lst: - s = os.path.join(src, item) - d = os.path.join(dst, item) - if symlinks and os.path.islink(s): - if os.path.lexists(d): - os.remove(d) - os.symlink(os.readlink(s), d) - try: - st = os.lstat(s) - mode = stat.S_IMODE(st.st_mode) - os.lchmod(d, mode) - except: - pass # lchmod not available - elif os.path.isdir(s): - MergeTree(s, d, symlinks) - else: - if os.path.exists(d): - print("## Overwriting file %s with %s" % (d, s)) - shutil.copy2(s, d) - - -def CopyFile (s, d, f): - if os.path.isfile(os.path.join(s,f)) and not IgnoreFile(s, f): - shutil.copy2(os.path.join(s,f), os.path.join(d,f)) - -def CopyDir (s, d, dd): - if os.path.isdir(os.path.join(s,dd)) and not IgnoreFile(s, dd): - shutil.copytree(os.path.join(s,dd), os.path.join(d,dd), symlinks=False, ignore=IgnoreFiles) - -def MergeIf(s, d, dd): -# if dd == 'detail': -# print "MergeIf %s -> %s" % (os.path.join(s, dd), os.path.join(d, dd)) - if os.path.exists(os.path.join(s, dd)): - MergeTree(os.path.join(s, dd), os.path.join(d, dd), symlinks=False) - -def CopyInclude(src, dst): - for item in os.listdir(src): - if IgnoreFile(src, item): - continue - if item == 'pending': - continue - if item == 'detail': - continue - s = os.path.join(src, item) - d = os.path.join(dst, item) - if os.path.isdir(s): - MergeTree(s, d, symlinks=False) - else: - if os.path.exists(d): - print("## Overwriting file %s with %s" % (d, s)) - CopyFile(src, dst, item) - - -def CopySubProject(src, dst, headers, p): - # First, everything except the "include" directory - Source = os.path.join(src,p) - Dest = os.path.join(dst,p) - # print "CopySubProject %p" % p - os.makedirs(Dest) - for item in os.listdir(Source): - if os.path.isfile(os.path.join(Source, item)): - CopyFile(Source, Dest, item) - elif item != "include": - CopyDir(Source, Dest, item) - - #shutil.copytree(Source, Dest, symlinks=False, ignore=shutil.ignore_patterns('\.*', "include")) - - # Now the includes - Source = os.path.join(src, "%s/include/boost" % p) - if os.path.exists(Source): - CopyInclude(Source, headers) -# MergeTree(Source, Dest, symlinks=False, ignore=shutil.ignore_patterns('\.*', 'detail', 'pending')) - MergeIf(Source, headers, 'detail') - MergeIf(Source, headers, 'pending') - - -def CopyNestedProject(src, dst, headers, p): - # First, everything except the "include" directory - Source = os.path.join(src,p[1]) - Dest = os.path.join(dst,p[1]) - os.makedirs(Dest) - for item in os.listdir(Source): - if os.path.isfile(os.path.join(Source, item)): - CopyFile(Source, Dest, item) - elif item != "include": - CopyDir(Source, Dest, item) - # shutil.copytree(Source, Dest, symlinks=False, ignore=shutil.ignore_patterns('\.*', "include")) - - Source = os.path.join(src, "%s/include/boost" % (p[1])) - # Dest = os.path.join(headers, p) - # print "Installing headers from %s to %s" % (Source, headers) - CopyInclude(Source, headers) - # # MergeTree(Source, Dest, symlinks=False, ignore=shutil.ignore_patterns('\.*', 'detail', 'pending')) - # MergeIf(Source, headers, 'detail') - # MergeIf(Source, headers, 'pending') - -BoostHeaders = "boost" -BoostLibs = "libs" - -BoostSpecialFolders = [ "doc", "more", "status", "tools" ] - -SourceRoot = sys.argv[1] -DestRoot = sys.argv[2] - -print("Source = %s" % SourceRoot) -print("Dest = %s" % DestRoot) - -if not os.path.exists(SourceRoot): - print("## Error: %s does not exist" % SourceRoot) - exit(1) - -if os.path.exists(DestRoot): - print("The destination directory already exists. All good.\n") - exit(0) - #timestamp1 = datetime.datetime.now().strftime("%Y_%m_%d_%H_%M_%S") - #os.rename(DestRoot,DestRoot + "_bck_" + timestamp1) - -if not os.path.exists(DestRoot): - print("Creating destination directory %s" % DestRoot) - os.makedirs(DestRoot) - -DestHeaders = os.path.join(DestRoot, BoostHeaders) -DestLibs = os.path.join(DestRoot, BoostLibs) -os.makedirs(DestHeaders) -os.makedirs(DestLibs) - -## Step 1 -for f in os.listdir(SourceRoot): - if f != 'CMakeLists.txt': - CopyFile(SourceRoot, DestRoot, f) - -## Step 2 -for d in BoostSpecialFolders: - CopyDir(SourceRoot, DestRoot, d) - -## Step 3 -SourceLibs = os.path.join(SourceRoot, BoostLibs) -for f in os.listdir(SourceLibs): - CopyFile(SourceLibs, DestLibs, f) - -## Step 4 -BoostSubProjects = set() -for f in os.listdir(SourceLibs): - if os.path.isdir(os.path.join(SourceLibs,f)): - if os.path.isfile(os.path.join(SourceLibs,f,"meta","libraries.json")): - BoostSubProjects.add(f) - elif os.path.isdir(os.path.join(SourceLibs,f,"include")): - BoostSubProjects.add(f) - elif f == 'headers': - BoostSubProjects.add(f) - elif os.path.isfile(os.path.join(SourceLibs,f,"sublibs")): - for s in os.listdir(os.path.join(SourceLibs,f)): - if os.path.isdir(os.path.join(SourceLibs,f,s)): - if os.path.isfile(os.path.join(SourceLibs,f,s,"meta","libraries.json")): - BoostSubProjects.add((f,s)) - elif os.path.isdir(os.path.join(SourceLibs,f,s,"include")): - BoostSubProjects.add((f,s)) - -for p in BoostSubProjects: - if isinstance(p, six.string_types): - CopySubProject(SourceLibs, DestLibs, DestHeaders, p) - else: - NestedSource = os.path.join(SourceRoot,"libs",p[0]) - NestedDest = os.path.join(DestRoot,"libs",p[0]) - NestedHeaders = os.path.join(DestRoot,"boost") - if not os.path.exists(NestedDest): - os.makedirs(NestedDest) - if not os.path.exists(NestedHeaders): - os.makedirs(NestedHeaders) - for f in os.listdir(NestedSource): - CopyFile(NestedSource, NestedDest, f) - CopyNestedProject(NestedSource, NestedDest, NestedHeaders, p) From ea6793c7e6ca777b14d9d18dac95713bdddd3a49 Mon Sep 17 00:00:00 2001 From: Lin Huang Date: Wed, 19 Jul 2023 18:10:01 -0400 Subject: [PATCH 046/107] update eos-vm --- libraries/eos-vm | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/eos-vm b/libraries/eos-vm index a5aadacab5..eb7961543a 160000 --- a/libraries/eos-vm +++ b/libraries/eos-vm @@ -1 +1 @@ -Subproject commit a5aadacab5c0695a9f8c71890b9aed25e3c881ea +Subproject commit eb7961543a6a1cf16fe66da824dc326e5daaecd8 From 4483aedd305e62999c807b79f882fb737e32b649 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Wed, 19 Jul 2023 19:28:00 -0400 Subject: [PATCH 047/107] Move `COMPONENT` before `PATTERN` in `install` command. --- CMakeLists.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index b0b2366ab0..02fe5ce94c 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -275,8 +275,8 @@ install(FILES libraries/cli11/bash-completion/completions/cleos DESTINATION ${CM # Add the boost submodule we used to build to our install package, so headers can be found for libtester install(DIRECTORY "${CMAKE_SOURCE_DIR}/libraries/boost/" DESTINATION ${CMAKE_INSTALL_FULL_DATAROOTDIR}/leap_boost - PATTERN ".git" EXCLUDE - COMPONENT dev EXCLUDE_FROM_ALL) + COMPONENT dev EXCLUDE_FROM_ALL + PATTERN ".git" EXCLUDE) add_custom_target(dev-install COMMAND "${CMAKE_COMMAND}" --build "${CMAKE_BINARY_DIR}" From 3c10138c25d6797337e3039d1ad3d695c600c5f6 Mon Sep 17 00:00:00 2001 From: Jonathan Giszczak Date: Wed, 19 Jul 2023 19:50:56 -0500 Subject: [PATCH 048/107] Add p2p multiple listen port test and new fields to connections API --- .../include/eosio/net_plugin/net_plugin.hpp | 13 +++- plugins/net_plugin/net_plugin.cpp | 3 + tests/CMakeLists.txt | 4 +- tests/p2p_multiple_listen_test.py | 75 +++++++++++++++++++ 4 files changed, 90 insertions(+), 5 deletions(-) create mode 100755 tests/p2p_multiple_listen_test.py diff --git a/plugins/net_plugin/include/eosio/net_plugin/net_plugin.hpp b/plugins/net_plugin/include/eosio/net_plugin/net_plugin.hpp index 8eafaba2e5..5d5d12ef40 100644 --- a/plugins/net_plugin/include/eosio/net_plugin/net_plugin.hpp +++ b/plugins/net_plugin/include/eosio/net_plugin/net_plugin.hpp @@ -9,9 +9,14 @@ namespace eosio { struct connection_status { string peer; - bool connecting = false; - bool syncing = false; - bool is_bp_peer = false; + string remote_ip; + string remote_port; + bool connecting = false; + bool syncing = false; + bool is_bp_peer = false; + bool is_socket_open = false; + bool is_blocks_only = false; + bool is_transactions_only = false; handshake_message last_handshake; }; @@ -49,4 +54,4 @@ namespace eosio { } -FC_REFLECT( eosio::connection_status, (peer)(connecting)(syncing)(is_bp_peer)(last_handshake) ) +FC_REFLECT( eosio::connection_status, (peer)(remote_ip)(remote_port)(connecting)(syncing)(is_bp_peer)(is_socket_open)(is_blocks_only)(is_transactions_only)(last_handshake) ) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 4995feeb96..936fa409c1 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -1234,9 +1234,12 @@ namespace eosio { connection_status connection::get_status()const { connection_status stat; stat.peer = peer_addr; + stat.remote_ip = log_remote_endpoint_ip; + stat.remote_port = log_remote_endpoint_port; stat.connecting = state() == connection_state::connecting; stat.syncing = peer_syncing_from_us; stat.is_bp_peer = is_bp_connection; + stat.is_socket_open = socket_is_open(); fc::lock_guard g( conn_mtx ); stat.last_handshake = last_handshake_recv; return stat; diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index be1df6a7dd..d3ecc359f4 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -49,6 +49,7 @@ configure_file(${CMAKE_CURRENT_SOURCE_DIR}/ship_streamer_test.py ${CMAKE_CURRENT configure_file(${CMAKE_CURRENT_SOURCE_DIR}/large-lib-test.py ${CMAKE_CURRENT_BINARY_DIR}/large-lib-test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/http_plugin_test.py ${CMAKE_CURRENT_BINARY_DIR}/http_plugin_test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/p2p_high_latency_test.py ${CMAKE_CURRENT_BINARY_DIR}/p2p_high_latency_test.py COPYONLY) +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/p2p_multiple_listen_test.py ${CMAKE_CURRENT_BINARY_DIR}/p2p_multiple_listen_test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/compute_transaction_test.py ${CMAKE_CURRENT_BINARY_DIR}/compute_transaction_test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/subjective_billing_test.py ${CMAKE_CURRENT_BINARY_DIR}/subjective_billing_test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/get_account_test.py ${CMAKE_CURRENT_BINARY_DIR}/get_account_test.py COPYONLY) @@ -183,7 +184,8 @@ set_property(TEST nested_container_multi_index_test PROPERTY LABELS nonparalleli add_test(NAME nodeos_run_check_test COMMAND tests/nodeos_run_test.py -v ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_run_check_test PROPERTY LABELS nonparallelizable_tests) - +add_test(NAME p2p_multiple_listen_test COMMAND tests/p2p_multiple_listen_test.py -v ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +set_property(TEST p2p_multiple_listen_test PROPERTY LABELS nonparallelizable_tests) # needs iproute-tc or iproute2 depending on platform #add_test(NAME p2p_high_latency_test COMMAND tests/p2p_high_latency_test.py -v WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) diff --git a/tests/p2p_multiple_listen_test.py b/tests/p2p_multiple_listen_test.py new file mode 100755 index 0000000000..f21d07d46a --- /dev/null +++ b/tests/p2p_multiple_listen_test.py @@ -0,0 +1,75 @@ +#!/usr/bin/env python3 + +import signal + +from TestHarness import Cluster, TestHelper, Utils, WalletMgr + +############################################################### +# p2p_multiple_listen_test +# +# Test nodeos ability to listen on multiple ports for p2p +# +############################################################### + +Print=Utils.Print +errorExit=Utils.errorExit + +args=TestHelper.parse_args({"-p","-n","-d","--keep-logs" + ,"--dump-error-details","-v" + ,"--leave-running","--unshared"}) +pnodes=args.p +delay=args.d +debug=args.v +total_nodes=4 +dumpErrorDetails=args.dump_error_details + +Utils.Debug=debug +testSuccessful=False + +cluster=Cluster(unshared=args.unshared, keepRunning=args.leave_running, keepLogs=args.keep_logs) +walletMgr=WalletMgr(True) + +try: + TestHelper.printSystemInfo("BEGIN") + + cluster.setWalletMgr(walletMgr) + + Print(f'producing nodes: {pnodes}, delay between nodes launch: {delay} second{"s" if delay != 1 else ""}') + + Print("Stand up cluster") + specificArgs = { + '0': '--agent-name node-00 --p2p-listen-endpoint 0.0.0.0:9779 --p2p-server-address localhost:9779 --plugin eosio::net_api_plugin', + '2': '--agent-name node-02 --p2p-peer-address localhost:9779 --plugin eosio::net_api_plugin', + } + if cluster.launch(pnodes=pnodes, totalNodes=total_nodes, topo='ring', delay=delay, + specificExtraNodeosArgs=specificArgs) is False: + errorExit("Failed to stand up eos cluster.") + + cluster.waitOnClusterSync(blockAdvancing=5) + cluster.biosNode.kill(signal.SIGTERM) + cluster.getNode(1).kill(signal.SIGTERM) + cluster.getNode(3).kill(signal.SIGTERM) + cluster.waitOnClusterSync(blockAdvancing=5) + connections = cluster.nodes[0].processUrllibRequest('net', 'connections') + open_socket_count = 0 + for conn in connections['payload']: + if conn['is_socket_open']: + open_socket_count += 1 + assert conn['last_handshake']['agent'] == 'node-02', f'Connected node identifed as "{conn["last_handshake"]["agent"]}" instead of node-02' + assert conn['last_handshake']['p2p_address'][:14] == 'localhost:9878', 'Connected node is not listening on port 9878' + assert open_socket_count == 1, 'Node 0 is expected to have only one open socket' + connections = cluster.nodes[2].processUrllibRequest('net', 'connections') + open_socket_count = 0 + for conn in connections['payload']: + if conn['is_socket_open']: + open_socket_count += 1 + assert conn['last_handshake']['agent'] == 'node-00', f'Connected node identifed as "{conn["last_handshake"]["agent"]}" instead of node-00' + assert conn['last_handshake']['p2p_address'][:14] == 'localhost:9779', 'Connected node is not listening on port 9779' + assert open_socket_count == 1, 'Node 2 is expected to have only one open socket' + + testSuccessful=True +finally: + TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, dumpErrorDetails=dumpErrorDetails) + +exitCode = 0 if testSuccessful else 1 +exit(exitCode) From ae0d542b5f7eab7fb08642b98b9b103336a0a816 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Thu, 20 Jul 2023 16:45:38 -0400 Subject: [PATCH 049/107] Use `python3-distutils` instead of `python3-all` --- .cicd/platforms/ubuntu20.Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.cicd/platforms/ubuntu20.Dockerfile b/.cicd/platforms/ubuntu20.Dockerfile index 3924f7ffe5..6d27a13fae 100644 --- a/.cicd/platforms/ubuntu20.Dockerfile +++ b/.cicd/platforms/ubuntu20.Dockerfile @@ -11,7 +11,7 @@ RUN apt-get update && apt-get upgrade -y && \ libssl-dev \ llvm-11-dev \ ninja-build \ - python3-all \ + python3-distutils \ python3-numpy \ ubuntu-dev-tools \ zlib1g-dev \ From 82ba8aa055c01e0446b089b8e224d1b22b25e726 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Thu, 20 Jul 2023 17:30:45 -0400 Subject: [PATCH 050/107] Install `python3-distutils` instread of `python3-all` in our containers. --- .cicd/platforms/ubuntu22.Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.cicd/platforms/ubuntu22.Dockerfile b/.cicd/platforms/ubuntu22.Dockerfile index 0ef6f4c86d..1d86365382 100644 --- a/.cicd/platforms/ubuntu22.Dockerfile +++ b/.cicd/platforms/ubuntu22.Dockerfile @@ -11,7 +11,7 @@ RUN apt-get update && apt-get upgrade -y && \ libssl-dev \ llvm-11-dev \ ninja-build \ - python3-all \ + python3-distutils \ python3-numpy \ ubuntu-dev-tools \ zlib1g-dev \ From a3ce6d565941cf4ab7f823fb0c20e0fbd8ef055f Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Thu, 20 Jul 2023 17:32:26 -0400 Subject: [PATCH 051/107] Update readme --- README.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 9d0dfa8530..e979c6d509 100644 --- a/README.md +++ b/README.md @@ -136,7 +136,9 @@ sudo apt-get install -y \ libgmp-dev \ libssl-dev \ llvm-11-dev \ - python3-numpy + python3-numpy \ + ubuntu-dev-tools \ + zlib1g-dev ``` To build, make sure you are in the root of the `leap` repo, then run the following command: ```bash From 4b7abd0b284b717a9b838ed223102026f09fd612 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Thu, 20 Jul 2023 17:38:44 -0400 Subject: [PATCH 052/107] Remove unneeded `find_package` in `libfc` --- libraries/libfc/CMakeLists.txt | 4 ---- 1 file changed, 4 deletions(-) diff --git a/libraries/libfc/CMakeLists.txt b/libraries/libfc/CMakeLists.txt index 3b00430669..742501ca9f 100644 --- a/libraries/libfc/CMakeLists.txt +++ b/libraries/libfc/CMakeLists.txt @@ -78,10 +78,6 @@ if(APPLE) add_library(zstd INTERFACE) endif() -if(NOT boost_headers_SOURCE_DIR) - find_package(Boost REQUIRED COMPONENTS date_time chrono unit_test_framework iostreams) -endif() - find_path(GMP_INCLUDE_DIR NAMES gmp.h) find_library(GMP_LIBRARY gmp) if(NOT GMP_LIBRARY MATCHES ${CMAKE_SHARED_LIBRARY_SUFFIX}) From 344c4ff328d22fae16d6d95431e3d3b033fc0536 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Thu, 20 Jul 2023 17:40:18 -0400 Subject: [PATCH 053/107] use `python3-distutils` instead of `python3-all` in `package.cmake` --- package.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.cmake b/package.cmake index 930acf4456..b2000e1ed5 100644 --- a/package.cmake +++ b/package.cmake @@ -61,7 +61,7 @@ set(CPACK_DEBIAN_BASE_FILE_NAME "${CPACK_DEBIAN_FILE_NAME}.deb") string(REGEX REPLACE "^(${CMAKE_PROJECT_NAME})" "\\1-dev" CPACK_DEBIAN_DEV_FILE_NAME "${CPACK_DEBIAN_BASE_FILE_NAME}") #deb package tooling will be unable to detect deps for the dev package. llvm is tricky since we don't know what package could have been used; try to figure it out -set(CPACK_DEBIAN_DEV_PACKAGE_DEPENDS "libssl-dev, libgmp-dev, python3-all, python3-numpy, ubuntu-dev-tools, zlib1g-dev") +set(CPACK_DEBIAN_DEV_PACKAGE_DEPENDS "libssl-dev, libgmp-dev, python3-distutils, python3-numpy, ubuntu-dev-tools, zlib1g-dev") find_program(DPKG_QUERY "dpkg-query") if(DPKG_QUERY AND OS_RELEASE MATCHES "\n?ID=\"?ubuntu" AND LLVM_CMAKE_DIR) execute_process(COMMAND "${DPKG_QUERY}" -S "${LLVM_CMAKE_DIR}" COMMAND cut -d: -f1 RESULT_VARIABLE LLVM_PKG_FIND_RESULT OUTPUT_VARIABLE LLVM_PKG_FIND_OUTPUT) From 07c9402cc7f51961dd421749c0dafc14b2476caf Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Thu, 20 Jul 2023 17:41:54 -0400 Subject: [PATCH 054/107] Remove unused `BOOST_VER` variable from `scripts/pinned_build.sh` --- scripts/pinned_build.sh | 1 - 1 file changed, 1 deletion(-) diff --git a/scripts/pinned_build.sh b/scripts/pinned_build.sh index f29f1a410e..ebf37d29d7 100755 --- a/scripts/pinned_build.sh +++ b/scripts/pinned_build.sh @@ -30,7 +30,6 @@ DEP_DIR="$(realpath "$1")" LEAP_DIR="$2" JOBS="$3" CLANG_VER=11.0.1 -BOOST_VER=1.82.0 LLVM_VER=11.0.1 SCRIPT_DIR="$( cd -- "$( dirname -- "${BASH_SOURCE[0]:-$0}"; )" &> /dev/null && pwd 2> /dev/null; )"; START_DIR="$(pwd)" From 8d09101fcaaac51ce8dae33c0650028e3fabd612 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 20 Jul 2023 17:00:50 -0500 Subject: [PATCH 055/107] GH-1417 Better track trx idle time and block time tracking. Also add producer to Block time report. --- plugins/producer_plugin/producer_plugin.cpp | 32 ++++++++++++++------- 1 file changed, 22 insertions(+), 10 deletions(-) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 71ca488274..4c56d481d9 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -278,12 +278,11 @@ struct block_time_tracker { } } - void report( const fc::time_point& idle_trx_time, uint32_t block_num ) { + void report(uint32_t block_num, account_name producer) { if( _log.is_enabled( fc::log_level::debug ) ) { auto now = fc::time_point::now(); - add_idle_time( now - idle_trx_time ); - fc_dlog( _log, "Block #${n} trx idle: ${i}us out of ${t}us, success: ${sn}, ${s}us, fail: ${fn}, ${f}us, transient: ${trans_trx_num}, ${trans_trx_time}us, other: ${o}us", - ("n", block_num) + fc_dlog( _log, "Block #${n} ${p} trx idle: ${i}us out of ${t}us, success: ${sn}, ${s}us, fail: ${fn}, ${f}us, transient: ${trans_trx_num}, ${trans_trx_time}us, other: ${o}us", + ("n", block_num)("p", producer) ("i", block_idle_time)("t", now - clear_time)("sn", trx_success_num)("s", trx_success_time) ("fn", trx_fail_num)("f", trx_fail_time) ("trans_trx_num", transient_trx_num)("trans_trx_time", transient_trx_time) @@ -558,11 +557,18 @@ class producer_plugin_impl : public std::enable_shared_from_thischain(); + std::optional> block_info; if( chain.is_building_block() ) { - _time_tracker.report( _idle_trx_time, chain.pending_block_num() ); + block_info = std::make_tuple(chain.pending_block_num(), chain.pending_block_producer()); } _unapplied_transactions.add_aborted( chain.abort_block() ); _subjective_billing.abort_block(); + + if (block_info) { + auto[block_num, block_producer] = *block_info; + _time_tracker.report(block_num, block_producer); + } + _time_tracker.clear(); _idle_trx_time = fc::time_point::now(); } @@ -586,6 +592,8 @@ class producer_plugin_impl : public std::enable_shared_from_thistimestamp < fc::minutes(5) || (blk_num % 1000 == 0)) // only log every 1000 during sync fc_dlog(_log, "received incoming block ${n} ${id}", ("n", blk_num)("id", id)); + _time_tracker.add_idle_time(now - _idle_trx_time); + EOS_ASSERT( block->timestamp < (now + fc::seconds( 7 )), block_from_the_future, "received a block from the future, ignoring it: ${id}", ("id", id) ); @@ -2028,7 +2036,6 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { try { _account_fails.report_and_clear(hbs->block_num); - _time_tracker.clear(); if( !remove_expired_trxs( preprocess_deadline ) ) return start_block_result::exhausted; @@ -2624,8 +2631,6 @@ void producer_plugin_impl::schedule_production_loop() { auto result = start_block(); - _idle_trx_time = fc::time_point::now(); - if (result == start_block_result::failed) { elog("Failed to start a pending block, will try again later"); _timer.expires_from_now( boost::posix_time::microseconds( config::block_interval_us / 10 )); @@ -2661,6 +2666,8 @@ void producer_plugin_impl::schedule_production_loop() { } else { fc_dlog(_log, "Speculative Block Created"); } + + _idle_trx_time = fc::time_point::now(); } void producer_plugin_impl::schedule_maybe_produce_block( bool exhausted ) { @@ -2770,6 +2777,8 @@ static auto maybe_make_debug_time_logger() -> std::optionalchain(); EOS_ASSERT(chain.is_building_block(), missing_pending_block_state, "pending_block_state does not exist but it should, another plugin may have corrupted it"); @@ -2811,8 +2820,6 @@ void producer_plugin_impl::produce_block() { block_state_ptr new_bs = chain.head_block_state(); - _time_tracker.report(_idle_trx_time, new_bs->block_num); - br.total_time += fc::time_point::now() - start; ++_metrics.blocks_produced.value; @@ -2825,6 +2832,11 @@ void producer_plugin_impl::produce_block() { ("count",new_bs->block->transactions.size())("lib",chain.last_irreversible_block_num()) ("net", br.total_net_usage)("cpu", br.total_cpu_usage_us)("et", br.total_elapsed_time)("tt", br.total_time) ("confs", new_bs->header.confirmed)); + + _time_tracker.report(new_bs->block_num, new_bs->block->producer); + _time_tracker.clear(); + + _idle_trx_time = fc::time_point::now(); } void producer_plugin::received_block(uint32_t block_num) { From 27a8f53fc8b75da9fe030ef9e2a7c35c1f0f96ae Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Thu, 20 Jul 2023 18:37:23 -0400 Subject: [PATCH 056/107] Don't need the `IF(EXISTS @CMAKE_SOURCE_DIR@/libraries/boost)` since the two `EosioTester` files are for install or build tree. --- CMakeModules/EosioTester.cmake.in | 7 ++----- CMakeModules/EosioTesterBuild.cmake.in | 7 ++----- 2 files changed, 4 insertions(+), 10 deletions(-) diff --git a/CMakeModules/EosioTester.cmake.in b/CMakeModules/EosioTester.cmake.in index 0fa2ef5469..8b1135bd40 100644 --- a/CMakeModules/EosioTester.cmake.in +++ b/CMakeModules/EosioTester.cmake.in @@ -37,11 +37,8 @@ endif ( APPLE ) set( Boost_USE_MULTITHREADED ON ) set( Boost_USE_STATIC_LIBS ON CACHE STRING "ON or OFF" ) -IF(EXISTS @CMAKE_SOURCE_DIR@/libraries/boost) - add_subdirectory( @CMAKE_SOURCE_DIR@/libraries/boost ${PROJECT_BINARY_DIR}/libraries/boost EXCLUDE_FROM_ALL) -else() - add_subdirectory( @CMAKE_INSTALL_FULL_DATAROOTDIR@/leap_boost ${PROJECT_BINARY_DIR}/libraries/boost EXCLUDE_FROM_ALL) -endif() + +add_subdirectory( @CMAKE_INSTALL_FULL_DATAROOTDIR@/leap_boost ${PROJECT_BINARY_DIR}/libraries/boost EXCLUDE_FROM_ALL) find_library(libtester eosio_testing @CMAKE_INSTALL_FULL_LIBDIR@ NO_DEFAULT_PATH) find_library(libchain eosio_chain @CMAKE_INSTALL_FULL_LIBDIR@ NO_DEFAULT_PATH) diff --git a/CMakeModules/EosioTesterBuild.cmake.in b/CMakeModules/EosioTesterBuild.cmake.in index 9a7774c97e..6beb37467b 100644 --- a/CMakeModules/EosioTesterBuild.cmake.in +++ b/CMakeModules/EosioTesterBuild.cmake.in @@ -34,11 +34,8 @@ endif ( APPLE ) set( Boost_USE_MULTITHREADED ON ) set( Boost_USE_STATIC_LIBS ON CACHE STRING "ON or OFF" ) -IF(EXISTS @CMAKE_SOURCE_DIR@/libraries/boost) - add_subdirectory( @CMAKE_SOURCE_DIR@/libraries/boost ${PROJECT_BINARY_DIR}/libraries/boost EXCLUDE_FROM_ALL) -else() - add_subdirectory( @CMAKE_INSTALL_FULL_DATAROOTDIR@/leap_boost ${PROJECT_BINARY_DIR}/libraries/boost EXCLUDE_FROM_ALL) -endif() + +add_subdirectory( @CMAKE_SOURCE_DIR@/libraries/boost ${PROJECT_BINARY_DIR}/libraries/boost EXCLUDE_FROM_ALL) find_library(libtester eosio_testing @CMAKE_BINARY_DIR@/libraries/testing NO_DEFAULT_PATH) find_library(libchain eosio_chain @CMAKE_BINARY_DIR@/libraries/chain NO_DEFAULT_PATH) From b2b0af01ad07eb5b5c9513f09c25825c2b47a640 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Thu, 20 Jul 2023 18:41:12 -0400 Subject: [PATCH 057/107] Removed `python3-distutils` from docker images. --- .cicd/platforms/ubuntu20.Dockerfile | 1 - .cicd/platforms/ubuntu22.Dockerfile | 1 - 2 files changed, 2 deletions(-) diff --git a/.cicd/platforms/ubuntu20.Dockerfile b/.cicd/platforms/ubuntu20.Dockerfile index 6d27a13fae..c60c53f5bb 100644 --- a/.cicd/platforms/ubuntu20.Dockerfile +++ b/.cicd/platforms/ubuntu20.Dockerfile @@ -11,7 +11,6 @@ RUN apt-get update && apt-get upgrade -y && \ libssl-dev \ llvm-11-dev \ ninja-build \ - python3-distutils \ python3-numpy \ ubuntu-dev-tools \ zlib1g-dev \ diff --git a/.cicd/platforms/ubuntu22.Dockerfile b/.cicd/platforms/ubuntu22.Dockerfile index 1d86365382..fd943f7043 100644 --- a/.cicd/platforms/ubuntu22.Dockerfile +++ b/.cicd/platforms/ubuntu22.Dockerfile @@ -11,7 +11,6 @@ RUN apt-get update && apt-get upgrade -y && \ libssl-dev \ llvm-11-dev \ ninja-build \ - python3-distutils \ python3-numpy \ ubuntu-dev-tools \ zlib1g-dev \ From a4f939990ee55bd7f190f5b4c654f0fb23f0aa2f Mon Sep 17 00:00:00 2001 From: Jonathan Giszczak Date: Thu, 20 Jul 2023 18:12:15 -0500 Subject: [PATCH 058/107] Expand description of p2p-server-address option and normalize language. --- plugins/net_plugin/net_plugin.cpp | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 936fa409c1..9504c555b2 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -765,11 +765,11 @@ namespace eosio { public: enum class connection_state { connecting, connected, closing, closed }; - explicit connection( const string& endpoint, const string& address ); + explicit connection( const string& endpoint, const string& listen_address ); /// @brief ctor /// @param socket created by boost::asio in fc::listener /// @param address identifier of listen socket which accepted this new connection - explicit connection( tcp::socket&& socket, const string& address ); + explicit connection( tcp::socket&& socket, const string& listen_address ); ~connection() = default; connection( const connection& ) = delete; @@ -1146,8 +1146,8 @@ namespace eosio { //--------------------------------------------------------------------------- - connection::connection( const string& endpoint, const string& address ) - : listen_address( address ), + connection::connection( const string& endpoint, const string& listen_address ) + : listen_address( listen_address ), peer_addr( endpoint ), strand( my_impl->thread_pool.get_executor() ), socket( new tcp::socket( my_impl->thread_pool.get_executor() ) ), @@ -1161,8 +1161,8 @@ namespace eosio { fc_ilog( logger, "created connection ${c} to ${n}", ("c", connection_id)("n", endpoint) ); } - connection::connection(tcp::socket&& s, const string& address) - : listen_address( address), + connection::connection(tcp::socket&& s, const string& listen_address) + : listen_address( listen_address ), peer_addr(), strand( my_impl->thread_pool.get_executor() ), socket( new tcp::socket( std::move(s) ) ), @@ -3884,8 +3884,8 @@ namespace eosio { void net_plugin::set_program_options( options_description& /*cli*/, options_description& cfg ) { cfg.add_options() - ( "p2p-listen-endpoint", bpo::value< vector >()->default_value( vector(1, string("0.0.0.0:9876")) ), "The actual host:port used to listen for incoming p2p connections. May be specified multiple times.") - ( "p2p-server-address", bpo::value< vector >(), "An externally accessible host:port for identifying this node. Defaults to p2p-listen-endpoint. May be specified as many times as p2p-listen-endpoint") + ( "p2p-listen-endpoint", bpo::value< vector >()->default_value( vector(1, string("0.0.0.0:9876")) ), "The actual host:port used to listen for incoming p2p connections. May be used multiple times.") + ( "p2p-server-address", bpo::value< vector >(), "An externally accessible host:port for identifying this node. Defaults to p2p-listen-endpoint. May be used as many times as p2p-listen-endpoint. If provided, the first address will be used in handshakes with other nodes. Otherwise the default is used.") ( "p2p-peer-address", bpo::value< vector >()->composing(), "The public endpoint of a peer node to connect to. Use multiple p2p-peer-address options as needed to compose a network.\n" " Syntax: host:port[:|]\n" From 0de5ba1e705362ae0ac6ae01200548614530c657 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Thu, 20 Jul 2023 20:16:40 -0400 Subject: [PATCH 059/107] Revert last commit. --- .cicd/platforms/ubuntu20.Dockerfile | 1 + .cicd/platforms/ubuntu22.Dockerfile | 1 + 2 files changed, 2 insertions(+) diff --git a/.cicd/platforms/ubuntu20.Dockerfile b/.cicd/platforms/ubuntu20.Dockerfile index c60c53f5bb..6d27a13fae 100644 --- a/.cicd/platforms/ubuntu20.Dockerfile +++ b/.cicd/platforms/ubuntu20.Dockerfile @@ -11,6 +11,7 @@ RUN apt-get update && apt-get upgrade -y && \ libssl-dev \ llvm-11-dev \ ninja-build \ + python3-distutils \ python3-numpy \ ubuntu-dev-tools \ zlib1g-dev \ diff --git a/.cicd/platforms/ubuntu22.Dockerfile b/.cicd/platforms/ubuntu22.Dockerfile index fd943f7043..1d86365382 100644 --- a/.cicd/platforms/ubuntu22.Dockerfile +++ b/.cicd/platforms/ubuntu22.Dockerfile @@ -11,6 +11,7 @@ RUN apt-get update && apt-get upgrade -y && \ libssl-dev \ llvm-11-dev \ ninja-build \ + python3-distutils \ python3-numpy \ ubuntu-dev-tools \ zlib1g-dev \ From 066e7853f1e03234d096b383e758ab394f25eff4 Mon Sep 17 00:00:00 2001 From: Lin Huang Date: Thu, 20 Jul 2023 21:02:56 -0400 Subject: [PATCH 060/107] update eos-vm --- libraries/eos-vm | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/eos-vm b/libraries/eos-vm index eb7961543a..65568f5e5e 160000 --- a/libraries/eos-vm +++ b/libraries/eos-vm @@ -1 +1 @@ -Subproject commit eb7961543a6a1cf16fe66da824dc326e5daaecd8 +Subproject commit 65568f5e5ee0d79aeb2a382a58d0596ae28d0434 From ffc80c53bc83bc1a165e46664f688792eff47c1d Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 21 Jul 2023 07:46:32 -0500 Subject: [PATCH 061/107] GH-1416 Change default max-transaction-cpu-usage for integration/performance tests to 475ms --- tests/TestHarness/launcher.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/TestHarness/launcher.py b/tests/TestHarness/launcher.py index 665ed4fc13..bc662a4612 100644 --- a/tests/TestHarness/launcher.py +++ b/tests/TestHarness/launcher.py @@ -200,8 +200,8 @@ def comma_separated(string): cfg.add_argument('--enable-gelf-logging', action='store_true', help='enable gelf logging appender in logging configuration file', default=False) cfg.add_argument('--gelf-endpoint', help='hostname:port or ip:port of GELF endpoint', default='128.0.0.1:12201') cfg.add_argument('--template', help='the startup script template', default='testnet.template') - cfg.add_argument('--max-block-cpu-usage', type=int, help='the "max-block-cpu-usage" value to use in the genesis.json file', default=200000) - cfg.add_argument('--max-transaction-cpu-usage', type=int, help='the "max-transaction-cpu-usage" value to use in the genesis.json file', default=150000) + cfg.add_argument('--max-block-cpu-usage', type=int, help='the "max-block-cpu-usage" value to use in the genesis.json file', default=None) + cfg.add_argument('--max-transaction-cpu-usage', type=int, help='the "max-transaction-cpu-usage" value to use in the genesis.json file', default=None) cfg.add_argument('--logging-level', type=fc_log_level, help='Provide the "level" value to use in the logging.json file') cfg.add_argument('--logging-level-map', type=json.loads, help='JSON string of a logging level dictionary to use in the logging.json file for specific nodes, matching based on node number. Ex: {"bios":"off","00":"info"}') cfg.add_argument('--is-nodeos-v2', action='store_true', help='Toggles old nodeos compatibility', default=False) @@ -359,9 +359,9 @@ def init_genesis(self): 'net_usage_leeway': 500, 'context_free_discount_net_usage_num': 20, 'context_free_discount_net_usage_den': 100, - 'max_block_cpu_usage': self.args.max_block_cpu_usage, + 'max_block_cpu_usage': 200000 if self.args.max_block_cpu_usage is None else self.args.max_block_cpu_usage, 'target_block_cpu_usage_pct': 1000, - 'max_transaction_cpu_usage': self.args.max_transaction_cpu_usage, + 'max_transaction_cpu_usage': 475000 if self.args.max_transaction_cpu_usage is None else self.args.max_transaction_cpu_usage, 'min_transaction_cpu_usage': 100, 'max_transaction_lifetime': 3600, 'deferred_trx_expiration_window': 600, @@ -375,8 +375,8 @@ def init_genesis(self): with open(genesis_path, 'r') as f: genesis = json.load(f) genesis['initial_key'] = self.network.nodes['bios'].keys[0].pubkey - genesis['initial_configuration']['max_block_cpu_usage'] = self.args.max_block_cpu_usage - genesis['initial_configuration']['max_transaction_cpu_usage'] = self.args.max_transaction_cpu_usage + genesis['initial_configuration']['max_block_cpu_usage'] = 200000 if self.args.max_block_cpu_usage is None else self.args.max_block_cpu_usage + genesis['initial_configuration']['max_transaction_cpu_usage'] = 475000 if self.args.max_transaction_cpu_usage is None else self.args.max_transaction_cpu_usage return genesis def write_genesis_file(self, node, genesis): From feb9b29fe72ed001239bece41c66d1ad4d413704 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Fri, 21 Jul 2023 09:09:16 -0400 Subject: [PATCH 062/107] Switch to using boost from `boostorg` instead of `AntelopeIO` --- .gitmodules | 2 +- libraries/boost | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.gitmodules b/.gitmodules index d646d0340c..022c13dfb4 100644 --- a/.gitmodules +++ b/.gitmodules @@ -33,4 +33,4 @@ url = https://github.com/AntelopeIO/CLI11.git [submodule "libraries/boost"] path = libraries/boost - url = https://github.com/AntelopeIO/boost + url = https://github.com/boostorg/boost.git diff --git a/libraries/boost b/libraries/boost index 41141acf3a..c8b2c632fd 160000 --- a/libraries/boost +++ b/libraries/boost @@ -1 +1 @@ -Subproject commit 41141acf3a937c357bf50cacd03269833b35049e +Subproject commit c8b2c632fdea9560de4fecbbff202dcd05910c6c From 89ff1f49e174b35dd4698a18827eb7c9973ff336 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Fri, 21 Jul 2023 09:15:17 -0400 Subject: [PATCH 063/107] Move boost to `boost-1.82.0` tag --- libraries/boost | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/boost b/libraries/boost index c8b2c632fd..b6928ae5c9 160000 --- a/libraries/boost +++ b/libraries/boost @@ -1 +1 @@ -Subproject commit c8b2c632fdea9560de4fecbbff202dcd05910c6c +Subproject commit b6928ae5c92e21a04bbe17a558e6e066dbe632f6 From 17f2b2ae923bac6778a48ced7423f74964acd6ad Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 21 Jul 2023 08:25:58 -0500 Subject: [PATCH 064/107] GH-1416 Change default max-block-cpu-usage for integration/performance tests to 500ms --- tests/TestHarness/launcher.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/TestHarness/launcher.py b/tests/TestHarness/launcher.py index bc662a4612..1f420d5fa1 100644 --- a/tests/TestHarness/launcher.py +++ b/tests/TestHarness/launcher.py @@ -359,7 +359,7 @@ def init_genesis(self): 'net_usage_leeway': 500, 'context_free_discount_net_usage_num': 20, 'context_free_discount_net_usage_den': 100, - 'max_block_cpu_usage': 200000 if self.args.max_block_cpu_usage is None else self.args.max_block_cpu_usage, + 'max_block_cpu_usage': 500000 if self.args.max_block_cpu_usage is None else self.args.max_block_cpu_usage, 'target_block_cpu_usage_pct': 1000, 'max_transaction_cpu_usage': 475000 if self.args.max_transaction_cpu_usage is None else self.args.max_transaction_cpu_usage, 'min_transaction_cpu_usage': 100, @@ -375,7 +375,7 @@ def init_genesis(self): with open(genesis_path, 'r') as f: genesis = json.load(f) genesis['initial_key'] = self.network.nodes['bios'].keys[0].pubkey - genesis['initial_configuration']['max_block_cpu_usage'] = 200000 if self.args.max_block_cpu_usage is None else self.args.max_block_cpu_usage + genesis['initial_configuration']['max_block_cpu_usage'] = 500000 if self.args.max_block_cpu_usage is None else self.args.max_block_cpu_usage genesis['initial_configuration']['max_transaction_cpu_usage'] = 475000 if self.args.max_transaction_cpu_usage is None else self.args.max_transaction_cpu_usage return genesis From 9ff0583b33259b965bd11f3bc228cd2a30e7c814 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Fri, 21 Jul 2023 09:51:56 -0500 Subject: [PATCH 065/107] Ubuntu documents apt-get install or dpkg -i as acceptable paths for .deb installs. Use apt-get instead of apt. --- .github/workflows/build.yaml | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 286d3de481..037bff97c4 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -91,8 +91,8 @@ jobs: cpack - name: Install dev package run: | - apt update && apt upgrade -y - apt install -y ./build/leap_*.deb ./build/leap-dev*.deb + apt-get update + apt-get install -y ./build/leap_*.deb ./build/leap-dev*.deb - name: Test using TestHarness run: | python3 -c "from TestHarness import Cluster" @@ -255,7 +255,8 @@ jobs: token: ${{github.token}} - name: Install cdt Packages run: | - apt install -y ./*.deb + apt-get update + apt-get install -y ./*.deb rm ./*.deb # Reference Contracts From 4c7fb728cf644eb4a84cfe1221d9cd6f69c60754 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Fri, 21 Jul 2023 11:16:58 -0400 Subject: [PATCH 066/107] Change checkout action to `submodules: recursive` --- .github/workflows/build.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 286d3de481..be76e51c35 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -79,7 +79,7 @@ jobs: steps: - uses: actions/checkout@v3 with: - submodules: true + submodules: recursive - name: Download builddir uses: actions/download-artifact@v3 with: From 3b2227ec9dbea7209bf63d7ff2f0a15b7422c1d8 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 21 Jul 2023 10:39:47 -0500 Subject: [PATCH 067/107] GH-1435 Make sure app_thread of test is always joined to avoid terminate --- tests/test_read_only_trx.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/test_read_only_trx.cpp b/tests/test_read_only_trx.cpp index db36eea1e9..e8f2574fd9 100644 --- a/tests/test_read_only_trx.cpp +++ b/tests/test_read_only_trx.cpp @@ -111,6 +111,10 @@ void test_trxs_common(std::vector& specific_args, bool test_disable plugin_promise.set_value( {app->find_plugin(), app->find_plugin()} ); app->exec(); } ); + fc::scoped_exit> on_except = [&](){ + if (app_thread.joinable()) + app_thread.join(); + }; auto[prod_plug, chain_plug] = plugin_fut.get(); From 0e364bee8bae2c61f79a19b086f568bf2ae5b394 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Fri, 21 Jul 2023 12:23:40 -0400 Subject: [PATCH 068/107] Exclude `testwave` (and other unnecessary files) from boost install --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 02fe5ce94c..e1dc1367fb 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -276,7 +276,7 @@ install(FILES libraries/cli11/bash-completion/completions/cleos DESTINATION ${CM install(DIRECTORY "${CMAKE_SOURCE_DIR}/libraries/boost/" DESTINATION ${CMAKE_INSTALL_FULL_DATAROOTDIR}/leap_boost COMPONENT dev EXCLUDE_FROM_ALL - PATTERN ".git" EXCLUDE) + PATTERN ".git|example|bench|testwave" EXCLUDE) add_custom_target(dev-install COMMAND "${CMAKE_COMMAND}" --build "${CMAKE_BINARY_DIR}" From be358ce3d5b0f1f222e3063e32e27c7dcd24e8b8 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Fri, 21 Jul 2023 11:32:24 -0500 Subject: [PATCH 069/107] Add upgrade step back in. Factor out update and upgrade into own step in libtester-tests job to simplify. --- .github/workflows/build.yaml | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 037bff97c4..8275dea347 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -91,7 +91,7 @@ jobs: cpack - name: Install dev package run: | - apt-get update + apt-get update && apt-get upgrade -y apt-get install -y ./build/leap_*.deb ./build/leap-dev*.deb - name: Test using TestHarness run: | @@ -200,6 +200,11 @@ jobs: runs-on: ["self-hosted", "enf-x86-midtier"] container: ${{ matrix.test != 'deb-install' && fromJSON(needs.build-base.outputs.p)[matrix.platform].image || matrix.platform == 'ubuntu20' && 'ubuntu:focal' || 'ubuntu:jammy' }} steps: + - name: Update Package Index & Upgrade Packages + run: | + apt-get update + apt-get upgrade -y + # LEAP - if: ${{ matrix.test != 'deb-install' }} name: Clone leap @@ -236,7 +241,6 @@ jobs: - if: ${{ matrix.test == 'deb-install' }} name: Install leap-dev Package run: | - apt-get update export DEBIAN_FRONTEND='noninteractive' export TZ='Etc/UTC' apt-get install -y ./*.deb @@ -255,7 +259,6 @@ jobs: token: ${{github.token}} - name: Install cdt Packages run: | - apt-get update apt-get install -y ./*.deb rm ./*.deb From 60ef3a9153f0f4f6a160c7d319cb88c7c99d43f6 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Fri, 21 Jul 2023 13:03:56 -0400 Subject: [PATCH 070/107] try again to not add `testwave` to the deb install. --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index e1dc1367fb..78ae406d1b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -276,7 +276,7 @@ install(FILES libraries/cli11/bash-completion/completions/cleos DESTINATION ${CM install(DIRECTORY "${CMAKE_SOURCE_DIR}/libraries/boost/" DESTINATION ${CMAKE_INSTALL_FULL_DATAROOTDIR}/leap_boost COMPONENT dev EXCLUDE_FROM_ALL - PATTERN ".git|example|bench|testwave" EXCLUDE) + PATTERN "(\\.git|example|bench|testwave)" EXCLUDE) add_custom_target(dev-install COMMAND "${CMAKE_COMMAND}" --build "${CMAKE_BINARY_DIR}" From 0323a206c04f620d9322501c48cded571c988a2b Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Fri, 21 Jul 2023 13:28:35 -0400 Subject: [PATCH 071/107] Again remove `python3-distutils` from docker files --- .cicd/platforms/ubuntu20.Dockerfile | 1 - .cicd/platforms/ubuntu22.Dockerfile | 1 - 2 files changed, 2 deletions(-) diff --git a/.cicd/platforms/ubuntu20.Dockerfile b/.cicd/platforms/ubuntu20.Dockerfile index 6d27a13fae..c60c53f5bb 100644 --- a/.cicd/platforms/ubuntu20.Dockerfile +++ b/.cicd/platforms/ubuntu20.Dockerfile @@ -11,7 +11,6 @@ RUN apt-get update && apt-get upgrade -y && \ libssl-dev \ llvm-11-dev \ ninja-build \ - python3-distutils \ python3-numpy \ ubuntu-dev-tools \ zlib1g-dev \ diff --git a/.cicd/platforms/ubuntu22.Dockerfile b/.cicd/platforms/ubuntu22.Dockerfile index 1d86365382..fd943f7043 100644 --- a/.cicd/platforms/ubuntu22.Dockerfile +++ b/.cicd/platforms/ubuntu22.Dockerfile @@ -11,7 +11,6 @@ RUN apt-get update && apt-get upgrade -y && \ libssl-dev \ llvm-11-dev \ ninja-build \ - python3-distutils \ python3-numpy \ ubuntu-dev-tools \ zlib1g-dev \ From 6eb19aadb93371791b6c6adb971c03ff85b3cac8 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Fri, 21 Jul 2023 14:19:56 -0400 Subject: [PATCH 072/107] Another attempt at filtering testwave --- CMakeLists.txt | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 78ae406d1b..f4d30e3223 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -276,7 +276,11 @@ install(FILES libraries/cli11/bash-completion/completions/cleos DESTINATION ${CM install(DIRECTORY "${CMAKE_SOURCE_DIR}/libraries/boost/" DESTINATION ${CMAKE_INSTALL_FULL_DATAROOTDIR}/leap_boost COMPONENT dev EXCLUDE_FROM_ALL - PATTERN "(\\.git|example|bench|testwave)" EXCLUDE) + PATTERN "\\.git" EXCLUDE + PATTERN "/example" EXCLUDE + PATTERN "/bench" EXCLUDE + PATTERN "/testvawe" EXCLUDE + ) add_custom_target(dev-install COMMAND "${CMAKE_COMMAND}" --build "${CMAKE_BINARY_DIR}" From 6c2040a289fe1b985bc5c3bfbbae56c0318abd5b Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Fri, 21 Jul 2023 14:24:19 -0400 Subject: [PATCH 073/107] Some more exclude patterns to make the dev install smaller --- CMakeLists.txt | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/CMakeLists.txt b/CMakeLists.txt index f4d30e3223..b6bc2e1069 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -279,6 +279,13 @@ install(DIRECTORY "${CMAKE_SOURCE_DIR}/libraries/boost/" PATTERN "\\.git" EXCLUDE PATTERN "/example" EXCLUDE PATTERN "/bench" EXCLUDE + PATTERN "math/test" EXCLUDE + PATTERN "json/test" EXCLUDE + PATTERN "graph/test" EXCLUDE + PATTERN "gil/test" EXCLUDE + PATTERN "geometry/test" EXCLUDE + PATTERN "beast/test" EXCLUDE + PATTERN "/doc" EXCLUDE PATTERN "/testvawe" EXCLUDE ) From 5780dfef1ad155704f120636acdeea5bf68c6a7d Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Fri, 21 Jul 2023 14:44:03 -0400 Subject: [PATCH 074/107] Cleanup exclude patterns in CMakeLists.txt. --- CMakeLists.txt | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index b6bc2e1069..01d5c15013 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -276,17 +276,17 @@ install(FILES libraries/cli11/bash-completion/completions/cleos DESTINATION ${CM install(DIRECTORY "${CMAKE_SOURCE_DIR}/libraries/boost/" DESTINATION ${CMAKE_INSTALL_FULL_DATAROOTDIR}/leap_boost COMPONENT dev EXCLUDE_FROM_ALL - PATTERN "\\.git" EXCLUDE - PATTERN "/example" EXCLUDE - PATTERN "/bench" EXCLUDE + PATTERN ".git" EXCLUDE + PATTERN "example" EXCLUDE + PATTERN "bench" EXCLUDE + PATTERN "doc" EXCLUDE + PATTERN "testvawe" EXCLUDE PATTERN "math/test" EXCLUDE PATTERN "json/test" EXCLUDE PATTERN "graph/test" EXCLUDE PATTERN "gil/test" EXCLUDE PATTERN "geometry/test" EXCLUDE PATTERN "beast/test" EXCLUDE - PATTERN "/doc" EXCLUDE - PATTERN "/testvawe" EXCLUDE ) add_custom_target(dev-install From bf6518b1cb6c33d7bf16a6c9e5f5fdac17d490a0 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 21 Jul 2023 15:05:02 -0500 Subject: [PATCH 075/107] GH-1435 Capture exception on app thread to avoid terminate call --- .../chain_plugin/test/test_trx_retry_db.cpp | 14 ++++++---- plugins/producer_plugin/test/test_options.cpp | 26 +++++++++++-------- .../producer_plugin/test/test_trx_full.cpp | 22 +++++++++------- tests/test_read_only_trx.cpp | 24 ++++++++++------- tests/test_snapshot_scheduler.cpp | 22 +++++++++------- 5 files changed, 64 insertions(+), 44 deletions(-) diff --git a/plugins/chain_plugin/test/test_trx_retry_db.cpp b/plugins/chain_plugin/test/test_trx_retry_db.cpp index 8c7a3925c9..cfad3ed512 100644 --- a/plugins/chain_plugin/test/test_trx_retry_db.cpp +++ b/plugins/chain_plugin/test/test_trx_retry_db.cpp @@ -224,11 +224,15 @@ BOOST_AUTO_TEST_CASE(trx_retry_logic) { std::promise plugin_promise; std::future plugin_fut = plugin_promise.get_future(); std::thread app_thread( [&]() { - std::vector argv = {"test"}; - app->initialize( argv.size(), (char**) &argv[0] ); - app->startup(); - plugin_promise.set_value(app->find_plugin()); - app->exec(); + try { + std::vector argv = {"test"}; + app->initialize(argv.size(), (char**)&argv[0]); + app->startup(); + plugin_promise.set_value(app->find_plugin()); + app->exec(); + return; + } FC_LOG_AND_DROP() + BOOST_CHECK(!"app threw exception see logged error"); } ); (void)plugin_fut.get(); // wait for app to be started diff --git a/plugins/producer_plugin/test/test_options.cpp b/plugins/producer_plugin/test/test_options.cpp index 23cdea785d..3fe429b6a9 100644 --- a/plugins/producer_plugin/test/test_options.cpp +++ b/plugins/producer_plugin/test/test_options.cpp @@ -30,17 +30,21 @@ BOOST_AUTO_TEST_CASE(state_dir) { std::promise> plugin_promise; std::future> plugin_fut = plugin_promise.get_future(); std::thread app_thread( [&]() { - fc::logger::get(DEFAULT_LOGGER).set_log_level(fc::log_level::debug); - std::vector argv = - {"test", - "--data-dir", temp_dir_str.c_str(), - "--state-dir", custom_state_dir_str.c_str(), - "--config-dir", temp_dir_str.c_str(), - "-p", "eosio", "-e" }; - app->initialize( argv.size(), (char**) &argv[0] ); - app->startup(); - plugin_promise.set_value( {app->find_plugin(), app->find_plugin()} ); - app->exec(); + try { + fc::logger::get(DEFAULT_LOGGER).set_log_level(fc::log_level::debug); + std::vector argv = + {"test", + "--data-dir", temp_dir_str.c_str(), + "--state-dir", custom_state_dir_str.c_str(), + "--config-dir", temp_dir_str.c_str(), + "-p", "eosio", "-e" }; + app->initialize( argv.size(), (char**) &argv[0] ); + app->startup(); + plugin_promise.set_value( {app->find_plugin(), app->find_plugin()} ); + app->exec(); + return; + } FC_LOG_AND_DROP() + BOOST_CHECK(!"app threw exception see logged error"); } ); auto[prod_plug, chain_plug] = plugin_fut.get(); diff --git a/plugins/producer_plugin/test/test_trx_full.cpp b/plugins/producer_plugin/test/test_trx_full.cpp index 34ddcc6ea9..129b135114 100644 --- a/plugins/producer_plugin/test/test_trx_full.cpp +++ b/plugins/producer_plugin/test/test_trx_full.cpp @@ -108,15 +108,19 @@ BOOST_AUTO_TEST_CASE(producer) { std::promise> plugin_promise; std::future> plugin_fut = plugin_promise.get_future(); std::thread app_thread( [&]() { - fc::logger::get(DEFAULT_LOGGER).set_log_level(fc::log_level::debug); - std::vector argv = - {"test", "--data-dir", temp_dir_str.c_str(), "--config-dir", temp_dir_str.c_str(), - "-p", "eosio", "-e", "--disable-subjective-p2p-billing=true" }; - app->initialize( argv.size(), (char**) &argv[0] ); - app->startup(); - plugin_promise.set_value( - {app->find_plugin(), app->find_plugin()} ); - app->exec(); + try { + fc::logger::get(DEFAULT_LOGGER).set_log_level(fc::log_level::debug); + std::vector argv = + {"test", "--data-dir", temp_dir_str.c_str(), "--config-dir", temp_dir_str.c_str(), + "-p", "eosio", "-e", "--disable-subjective-p2p-billing=true" }; + app->initialize( argv.size(), (char**) &argv[0] ); + app->startup(); + plugin_promise.set_value( + {app->find_plugin(), app->find_plugin()} ); + app->exec(); + return; + } FC_LOG_AND_DROP() + BOOST_CHECK(!"app threw exception see logged error"); } ); auto[prod_plug, chain_plug] = plugin_fut.get(); diff --git a/tests/test_read_only_trx.cpp b/tests/test_read_only_trx.cpp index e8f2574fd9..c55e3ae0aa 100644 --- a/tests/test_read_only_trx.cpp +++ b/tests/test_read_only_trx.cpp @@ -87,7 +87,7 @@ BOOST_AUTO_TEST_CASE(not_check_configs_if_no_read_only_threads) { test_configs_common(specific_args, app_init_status::succeeded); } -void test_trxs_common(std::vector& specific_args, bool test_disable_tierup = false) { +void test_trxs_common(std::vector& specific_args, bool test_disable_tierup = false) { try { fc::scoped_exit> on_exit = []() { chain::wasm_interface_collection::test_disable_tierup = false; }; @@ -102,14 +102,18 @@ void test_trxs_common(std::vector& specific_args, bool test_disable std::promise> plugin_promise; std::future> plugin_fut = plugin_promise.get_future(); std::thread app_thread( [&]() { - fc::logger::get(DEFAULT_LOGGER).set_log_level(fc::log_level::debug); - std::vector argv = {"test", "--data-dir", temp_dir_str.c_str(), "--config-dir", temp_dir_str.c_str()}; - argv.insert( argv.end(), specific_args.begin(), specific_args.end() ); - app->initialize( argv.size(), (char**) &argv[0] ); - app->find_plugin()->chain(); - app->startup(); - plugin_promise.set_value( {app->find_plugin(), app->find_plugin()} ); - app->exec(); + try { + fc::logger::get(DEFAULT_LOGGER).set_log_level(fc::log_level::debug); + std::vector argv = {"test", "--data-dir", temp_dir_str.c_str(), "--config-dir", temp_dir_str.c_str()}; + argv.insert(argv.end(), specific_args.begin(), specific_args.end()); + app->initialize(argv.size(), (char**)&argv[0]); + app->find_plugin()->chain(); + app->startup(); + plugin_promise.set_value({app->find_plugin(), app->find_plugin()}); + app->exec(); + return; + } FC_LOG_AND_DROP() + BOOST_CHECK(!"app threw exception see logged error"); } ); fc::scoped_exit> on_except = [&](){ if (app_thread.joinable()) @@ -176,7 +180,7 @@ void test_trxs_common(std::vector& specific_args, bool test_disable BOOST_CHECK_EQUAL( num_pushes, next_calls.load() ); BOOST_CHECK_EQUAL( num_pushes, num_get_account_calls.load() ); BOOST_CHECK( trx_match.load() ); // trace should match the transaction -} +} FC_LOG_AND_RETHROW() } // test read-only trxs on main thread (no --read-only-threads) BOOST_AUTO_TEST_CASE(no_read_only_threads) { diff --git a/tests/test_snapshot_scheduler.cpp b/tests/test_snapshot_scheduler.cpp index bb8f6d8742..84c4410d5d 100644 --- a/tests/test_snapshot_scheduler.cpp +++ b/tests/test_snapshot_scheduler.cpp @@ -61,15 +61,19 @@ BOOST_AUTO_TEST_CASE(snapshot_scheduler_test) { std::future> plugin_fut = plugin_promise.get_future(); std::thread app_thread([&]() { - fc::logger::get(DEFAULT_LOGGER).set_log_level(fc::log_level::debug); - std::vector argv = - {"test", "--data-dir", temp.c_str(), "--config-dir", temp.c_str(), - "-p", "eosio", "-e"}; - app->initialize(argv.size(), (char**) &argv[0]); - app->startup(); - plugin_promise.set_value( - {app->find_plugin(), app->find_plugin()}); - app->exec(); + try { + fc::logger::get(DEFAULT_LOGGER).set_log_level(fc::log_level::debug); + std::vector argv = + {"test", "--data-dir", temp.c_str(), "--config-dir", temp.c_str(), + "-p", "eosio", "-e"}; + app->initialize(argv.size(), (char**) &argv[0]); + app->startup(); + plugin_promise.set_value( + {app->find_plugin(), app->find_plugin()}); + app->exec(); + return; + } FC_LOG_AND_DROP() + BOOST_CHECK(!"app threw exception see logged error"); }); auto [prod_plug, chain_plug] = plugin_fut.get(); From bc35fb5a1c2b68346ece0ab63d503f38574c236f Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 21 Jul 2023 16:28:37 -0500 Subject: [PATCH 076/107] GH-1435 Only activate protocol features and process trxs when building a block. --- unittests/test_utils.hpp | 125 +++++++++++++++++++++++++-------------- 1 file changed, 79 insertions(+), 46 deletions(-) diff --git a/unittests/test_utils.hpp b/unittests/test_utils.hpp index d14815580e..e24d0759cb 100644 --- a/unittests/test_utils.hpp +++ b/unittests/test_utils.hpp @@ -4,11 +4,15 @@ #include #include #include +#include #include #include #include +#include + #include +#include #include #include #include @@ -60,7 +64,7 @@ auto make_bios_ro_trx(eosio::chain::controller& control) { // Push an input transaction to controller and return trx trace // If account is eosio then signs with the default private key -auto push_input_trx(eosio::chain::controller& control, account_name account, signed_transaction& trx) { +auto push_input_trx(appbase::scoped_app& app, eosio::chain::controller& control, account_name account, signed_transaction& trx) { trx.expiration = fc::time_point_sec{fc::time_point::now() + fc::seconds(30)}; trx.set_reference_block( control.head_block_id() ); if (account == config::system_account_name) { @@ -70,13 +74,42 @@ auto push_input_trx(eosio::chain::controller& control, account_name account, sig trx.sign(testing::tester::get_private_key(account, "active"), control.get_chain_id()); } auto ptrx = std::make_shared( trx, packed_transaction::compression_type::zlib ); - auto fut = transaction_metadata::start_recover_keys( ptrx, control.get_thread_pool(), control.get_chain_id(), fc::microseconds::maximum(), transaction_metadata::trx_type::input ); - auto r = control.push_transaction( fut.get(), fc::time_point::maximum(), fc::microseconds::maximum(), 0, false, 0 ); - return r; + + std::promise trx_promise; + std::future trx_future = trx_promise.get_future(); + + app->executor().post( priority::low, exec_queue::read_write, [&ptrx, &app, &trx_promise]() { + app->get_method()(ptrx, + false, // api_trx + transaction_metadata::trx_type::input, // trx_type + true, // return_failure_traces + [&trx_promise](const next_function_variant& result) { + if( std::holds_alternative( result ) ) { + try { + std::get(result)->dynamic_rethrow_exception(); + } catch(...) { + trx_promise.set_exception(std::current_exception()); + } + } else if ( std::get( result )->except ) { + try { + std::get(result)->except->dynamic_rethrow_exception(); + } catch(...) { + trx_promise.set_exception(std::current_exception()); + } + } else { + trx_promise.set_value(std::get(result)); + } + }); + }); + + if (trx_future.wait_for(std::chrono::seconds(5)) == std::future_status::timeout) + throw std::runtime_error("failed to execute trx: " + ptrx->get_transaction().actions.at(0).name.to_string() + " to account: " + account.to_string()); + + return trx_future.get(); } // Push setcode trx to controller and return trx trace -auto set_code(eosio::chain::controller& control, account_name account, const vector& wasm) { +auto set_code(appbase::scoped_app& app, eosio::chain::controller& control, account_name account, const vector& wasm) { signed_transaction trx; trx.actions.emplace_back(std::vector{{account, config::active_name}}, chain::setcode{ @@ -85,56 +118,56 @@ auto set_code(eosio::chain::controller& control, account_name account, const vec .vmversion = 0, .code = bytes(wasm.begin(), wasm.end()) }); - return push_input_trx(control, account, trx); + return push_input_trx(app, control, account, trx); } void activate_protocol_features_set_bios_contract(appbase::scoped_app& app, chain_plugin* chain_plug) { using namespace appbase; - std::promise feature_promise; - std::future feature_future = feature_promise.get_future(); - app->executor().post( priority::high, exec_queue::read_write, [&chain_plug=chain_plug, &feature_promise](){ - const auto& pfm = chain_plug->chain().get_protocol_feature_manager(); - auto preactivate_feature_digest = pfm.get_builtin_digest(builtin_protocol_feature_t::preactivate_feature); - BOOST_CHECK( preactivate_feature_digest ); - chain_plug->chain().preactivate_feature( *preactivate_feature_digest, false ); - std::vector pfs{ - builtin_protocol_feature_t::only_link_to_existing_permission, - builtin_protocol_feature_t::replace_deferred, - builtin_protocol_feature_t::no_duplicate_deferred_id, - builtin_protocol_feature_t::fix_linkauth_restriction, - builtin_protocol_feature_t::disallow_empty_producer_schedule, - builtin_protocol_feature_t::restrict_action_to_self, - builtin_protocol_feature_t::only_bill_first_authorizer, - builtin_protocol_feature_t::forward_setcode, - builtin_protocol_feature_t::get_sender, - builtin_protocol_feature_t::ram_restrictions, - builtin_protocol_feature_t::webauthn_key, - builtin_protocol_feature_t::wtmsig_block_signatures }; - for (const auto t : pfs) { - auto feature_digest = pfm.get_builtin_digest(t); - BOOST_CHECK( feature_digest ); - chain_plug->chain().preactivate_feature( *feature_digest, false ); - } - feature_promise.set_value(); - }); + std::atomic feature_set = false; + // has to execute when pending block is not null + for (int tries = 0; tries < 100; ++tries) { + app->executor().post( priority::high, exec_queue::read_write, [&chain_plug=chain_plug, &feature_set](){ + try { + if (!chain_plug->chain().is_building_block() || feature_set) + return; + const auto& pfm = chain_plug->chain().get_protocol_feature_manager(); + auto preactivate_feature_digest = pfm.get_builtin_digest(builtin_protocol_feature_t::preactivate_feature); + BOOST_CHECK( preactivate_feature_digest ); + chain_plug->chain().preactivate_feature( *preactivate_feature_digest, false ); + std::vector pfs{ + builtin_protocol_feature_t::only_link_to_existing_permission, + builtin_protocol_feature_t::replace_deferred, + builtin_protocol_feature_t::no_duplicate_deferred_id, + builtin_protocol_feature_t::fix_linkauth_restriction, + builtin_protocol_feature_t::disallow_empty_producer_schedule, + builtin_protocol_feature_t::restrict_action_to_self, + builtin_protocol_feature_t::only_bill_first_authorizer, + builtin_protocol_feature_t::forward_setcode, + builtin_protocol_feature_t::get_sender, + builtin_protocol_feature_t::ram_restrictions, + builtin_protocol_feature_t::webauthn_key, + builtin_protocol_feature_t::wtmsig_block_signatures }; + for (const auto t : pfs) { + auto feature_digest = pfm.get_builtin_digest(t); + BOOST_CHECK( feature_digest ); + chain_plug->chain().preactivate_feature( *feature_digest, false ); + } + feature_set = true; + return; + } FC_LOG_AND_DROP() + BOOST_CHECK(!"exception setting protocol features"); + }); + if (feature_set) + break; + std::this_thread::sleep_for(std::chrono::milliseconds(50)); + } // Wait for next block std::this_thread::sleep_for( std::chrono::milliseconds(config::block_interval_ms) ); - if (feature_future.wait_for(std::chrono::seconds(5)) == std::future_status::timeout) - throw std::runtime_error("failed to preactivate features"); - - std::promise setcode_promise; - std::future setcode_future = setcode_promise.get_future(); - app->executor().post( priority::high, exec_queue::read_write, [&chain_plug=chain_plug, &setcode_promise](){ - auto r = set_code(chain_plug->chain(), config::system_account_name, testing::contracts::eosio_bios_wasm()); - BOOST_CHECK(r->receipt && r->receipt->status == transaction_receipt_header::executed); - setcode_promise.set_value(); - }); - - if (setcode_future.wait_for(std::chrono::seconds(5)) == std::future_status::timeout) - throw std::runtime_error("failed to setcode"); + auto r = set_code(app, chain_plug->chain(), config::system_account_name, testing::contracts::eosio_bios_wasm()); + BOOST_CHECK(r->receipt && r->receipt->status == transaction_receipt_header::executed); } From b3c9eba384f8311dd2bf5517c673322b6a5b49f4 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 21 Jul 2023 16:56:02 -0500 Subject: [PATCH 077/107] GH-1416 Only update provided genesis.json if explicitly provided with values --- tests/TestHarness/launcher.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/TestHarness/launcher.py b/tests/TestHarness/launcher.py index 1f420d5fa1..9fca7c85c4 100644 --- a/tests/TestHarness/launcher.py +++ b/tests/TestHarness/launcher.py @@ -375,8 +375,8 @@ def init_genesis(self): with open(genesis_path, 'r') as f: genesis = json.load(f) genesis['initial_key'] = self.network.nodes['bios'].keys[0].pubkey - genesis['initial_configuration']['max_block_cpu_usage'] = 500000 if self.args.max_block_cpu_usage is None else self.args.max_block_cpu_usage - genesis['initial_configuration']['max_transaction_cpu_usage'] = 475000 if self.args.max_transaction_cpu_usage is None else self.args.max_transaction_cpu_usage + if self.args.max_block_cpu_usage is not None: genesis['initial_configuration']['max_block_cpu_usage'] = self.args.max_block_cpu_usage + if self.args.max_transaction_cpu_usage is not None: genesis['initial_configuration']['max_transaction_cpu_usage'] = self.args.max_transaction_cpu_usage return genesis def write_genesis_file(self, node, genesis): From 2f55f154ca812fae5a2a2c697e014962c97ce620 Mon Sep 17 00:00:00 2001 From: Jonathan Giszczak Date: Fri, 21 Jul 2023 17:26:28 -0500 Subject: [PATCH 078/107] Revise net_plugin p2p-listen-endpoint parsing for readability. Exercise multiple listen endpoints, with overrides, in p2p test. --- plugins/net_plugin/net_plugin.cpp | 29 ++++++++++++++----------- tests/p2p_multiple_listen_test.py | 36 +++++++++++++++++++++++-------- 2 files changed, 43 insertions(+), 22 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 9504c555b2..1328e1195a 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -3966,19 +3966,22 @@ namespace eosio { std::chrono::seconds( options.at("connection-cleanup-period").as() ), options.at("max-clients").as() ); - if( options.count( "p2p-listen-endpoint" ) && !options.at("p2p-listen-endpoint").as>().empty() && !options.at("p2p-listen-endpoint").as>()[0].empty()) { - p2p_addresses = options.at( "p2p-listen-endpoint" ).as>(); - auto addr_count = p2p_addresses.size(); - std::sort(p2p_addresses.begin(), p2p_addresses.end()); - auto last = std::unique(p2p_addresses.begin(), p2p_addresses.end()); - p2p_addresses.erase(last, p2p_addresses.end()); - if( size_t addr_diff = addr_count - p2p_addresses.size(); addr_diff != 0) { - fc_wlog( logger, "Removed ${count} duplicate p2p-listen-endpoint entries", ("count", addr_diff)); - } - for( const auto& addr : p2p_addresses ) { - EOS_ASSERT( addr.length() <= max_p2p_address_length, chain::plugin_config_exception, - "p2p-listen-endpoint ${a} too long, must be less than ${m}", - ("a", addr)("m", max_p2p_address_length) ); + if( options.count( "p2p-listen-endpoint" )) { + auto p2ps = options.at("p2p-listen-endpoint").as>(); + if (!p2ps.front().empty()) { + p2p_addresses = p2ps; + auto addr_count = p2p_addresses.size(); + std::sort(p2p_addresses.begin(), p2p_addresses.end()); + auto last = std::unique(p2p_addresses.begin(), p2p_addresses.end()); + p2p_addresses.erase(last, p2p_addresses.end()); + if( size_t addr_diff = addr_count - p2p_addresses.size(); addr_diff != 0) { + fc_wlog( logger, "Removed ${count} duplicate p2p-listen-endpoint entries", ("count", addr_diff)); + } + for( const auto& addr : p2p_addresses ) { + EOS_ASSERT( addr.length() <= max_p2p_address_length, chain::plugin_config_exception, + "p2p-listen-endpoint ${a} too long, must be less than ${m}", + ("a", addr)("m", max_p2p_address_length) ); + } } } if( options.count( "p2p-server-address" ) ) { diff --git a/tests/p2p_multiple_listen_test.py b/tests/p2p_multiple_listen_test.py index f21d07d46a..1b2948c7ec 100755 --- a/tests/p2p_multiple_listen_test.py +++ b/tests/p2p_multiple_listen_test.py @@ -20,7 +20,7 @@ pnodes=args.p delay=args.d debug=args.v -total_nodes=4 +total_nodes=5 dumpErrorDetails=args.dump_error_details Utils.Debug=debug @@ -38,35 +38,53 @@ Print("Stand up cluster") specificArgs = { - '0': '--agent-name node-00 --p2p-listen-endpoint 0.0.0.0:9779 --p2p-server-address localhost:9779 --plugin eosio::net_api_plugin', + '0': '--agent-name node-00 --p2p-listen-endpoint 0.0.0.0:9876 --p2p-listen-endpoint 0.0.0.0:9779 --p2p-server-address ext-ip0:20000 --p2p-server-address ext-ip1:20001 --plugin eosio::net_api_plugin', '2': '--agent-name node-02 --p2p-peer-address localhost:9779 --plugin eosio::net_api_plugin', + '4': '--agent-name node-04 --p2p-peer-address localhost:9876 --plugin eosio::net_api_plugin', } - if cluster.launch(pnodes=pnodes, totalNodes=total_nodes, topo='ring', delay=delay, + if cluster.launch(pnodes=pnodes, totalNodes=total_nodes, topo='line', delay=delay, specificExtraNodeosArgs=specificArgs) is False: errorExit("Failed to stand up eos cluster.") - + + # Be sure all nodes start out connected cluster.waitOnClusterSync(blockAdvancing=5) + # Shut down bios node, which is connected to all other nodes in all topologies cluster.biosNode.kill(signal.SIGTERM) + # Shut down second node, interrupting the default connections between it and nodes 0 and 3 cluster.getNode(1).kill(signal.SIGTERM) + # Shut down the fourth node, interrupting the default connections between it and nodes 3 and 5 cluster.getNode(3).kill(signal.SIGTERM) + # Be sure all remaining nodes continue to sync via the two listen ports on node 0 cluster.waitOnClusterSync(blockAdvancing=5) connections = cluster.nodes[0].processUrllibRequest('net', 'connections') open_socket_count = 0 for conn in connections['payload']: if conn['is_socket_open']: open_socket_count += 1 - assert conn['last_handshake']['agent'] == 'node-02', f'Connected node identifed as "{conn["last_handshake"]["agent"]}" instead of node-02' - assert conn['last_handshake']['p2p_address'][:14] == 'localhost:9878', 'Connected node is not listening on port 9878' - assert open_socket_count == 1, 'Node 0 is expected to have only one open socket' + if conn['last_handshake']['agent'] == 'node-02': + assert conn['last_handshake']['p2p_address'].split()[0] == 'localhost:9878', f"Connected node is listening on '{conn['last_handshake']['p2p_address'].split()[0]}' instead of port 9878" + elif conn['last_handshake']['agent'] == 'node-04': + assert conn['last_handshake']['p2p_address'].split()[0] == 'localhost:9880', f"Connected node is listening on '{conn['last_handshake']['p2p_address'].split()[0]}' instead of port 9880" + assert open_socket_count == 2, 'Node 0 is expected to have only two open sockets' + connections = cluster.nodes[2].processUrllibRequest('net', 'connections') open_socket_count = 0 for conn in connections['payload']: if conn['is_socket_open']: open_socket_count += 1 - assert conn['last_handshake']['agent'] == 'node-00', f'Connected node identifed as "{conn["last_handshake"]["agent"]}" instead of node-00' - assert conn['last_handshake']['p2p_address'][:14] == 'localhost:9779', 'Connected node is not listening on port 9779' + assert conn['last_handshake']['agent'] == 'node-00', f"Connected node identifed as '{conn['last_handshake']['agent']}' instead of node-00" + assert conn['last_handshake']['p2p_address'].split()[0] == 'ext-ip0:20000', f"Connected node is advertising '{conn['last_handshake']['p2p_address'].split()[0]}' instead of ext-ip0:20000" assert open_socket_count == 1, 'Node 2 is expected to have only one open socket' + connections = cluster.nodes[4].processUrllibRequest('net', 'connections') + open_socket_count = 0 + for conn in connections['payload']: + if conn['is_socket_open']: + open_socket_count += 1 + assert conn['last_handshake']['agent'] == 'node-00', f"Connected node identifed as '{conn['last_handshake']['agent']}' instead of node-00" + assert conn['last_handshake']['p2p_address'].split()[0] == 'ext-ip1:20001', f"Connected node is advertising '{conn['last_handshake']['p2p_address'].split()[0]} 'instead of ext-ip1:20001" + assert open_socket_count == 1, 'Node 4 is expected to have only one open socket' + testSuccessful=True finally: TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, dumpErrorDetails=dumpErrorDetails) From 8537bd3e718c8285e7aa6d75ca0598963983a2a3 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Fri, 21 Jul 2023 19:06:34 -0400 Subject: [PATCH 079/107] Update exclude patterns in install() for dev package --- CMakeLists.txt | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 01d5c15013..c7a561ba9a 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -276,17 +276,17 @@ install(FILES libraries/cli11/bash-completion/completions/cleos DESTINATION ${CM install(DIRECTORY "${CMAKE_SOURCE_DIR}/libraries/boost/" DESTINATION ${CMAKE_INSTALL_FULL_DATAROOTDIR}/leap_boost COMPONENT dev EXCLUDE_FROM_ALL - PATTERN ".git" EXCLUDE - PATTERN "example" EXCLUDE - PATTERN "bench" EXCLUDE - PATTERN "doc" EXCLUDE - PATTERN "testvawe" EXCLUDE - PATTERN "math/test" EXCLUDE - PATTERN "json/test" EXCLUDE - PATTERN "graph/test" EXCLUDE - PATTERN "gil/test" EXCLUDE - PATTERN "geometry/test" EXCLUDE - PATTERN "beast/test" EXCLUDE + PATTERN ".git/*" EXCLUDE + PATTERN "example/*" EXCLUDE + PATTERN "bench/*" EXCLUDE + PATTERN "doc/*" EXCLUDE + PATTERN "testwave/*" EXCLUDE + PATTERN "math/test/*" EXCLUDE + PATTERN "json/test/*" EXCLUDE + PATTERN "graph/test/*" EXCLUDE + PATTERN "gil/test/*" EXCLUDE + PATTERN "geometry/test/*" EXCLUDE + PATTERN "beast/test/*" EXCLUDE ) add_custom_target(dev-install From 2d6aff43d1995b2828fa28ae8021207364665771 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Fri, 21 Jul 2023 19:10:58 -0400 Subject: [PATCH 080/107] Use `file` as a dependency instead of `ubuntu-dev-tools` --- .cicd/platforms/ubuntu20.Dockerfile | 2 +- .cicd/platforms/ubuntu22.Dockerfile | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.cicd/platforms/ubuntu20.Dockerfile b/.cicd/platforms/ubuntu20.Dockerfile index c60c53f5bb..e9c3a1d4b6 100644 --- a/.cicd/platforms/ubuntu20.Dockerfile +++ b/.cicd/platforms/ubuntu20.Dockerfile @@ -12,6 +12,6 @@ RUN apt-get update && apt-get upgrade -y && \ llvm-11-dev \ ninja-build \ python3-numpy \ - ubuntu-dev-tools \ + file \ zlib1g-dev \ zstd diff --git a/.cicd/platforms/ubuntu22.Dockerfile b/.cicd/platforms/ubuntu22.Dockerfile index fd943f7043..57d49fe026 100644 --- a/.cicd/platforms/ubuntu22.Dockerfile +++ b/.cicd/platforms/ubuntu22.Dockerfile @@ -12,6 +12,6 @@ RUN apt-get update && apt-get upgrade -y && \ llvm-11-dev \ ninja-build \ python3-numpy \ - ubuntu-dev-tools \ + file \ zlib1g-dev \ zstd From 4a4875ecba277668e81f3397541d48cc1a38aeef Mon Sep 17 00:00:00 2001 From: Jonathan Giszczak Date: Fri, 21 Jul 2023 21:32:18 -0500 Subject: [PATCH 081/107] Add test to verify nodeos can run with p2p disabled. --- tests/CMakeLists.txt | 3 ++ tests/p2p_no_listen_test.py | 76 +++++++++++++++++++++++++++++++++++++ 2 files changed, 79 insertions(+) create mode 100755 tests/p2p_no_listen_test.py diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index d57718db4f..6359798055 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -50,6 +50,7 @@ configure_file(${CMAKE_CURRENT_SOURCE_DIR}/large-lib-test.py ${CMAKE_CURRENT_BIN configure_file(${CMAKE_CURRENT_SOURCE_DIR}/http_plugin_test.py ${CMAKE_CURRENT_BINARY_DIR}/http_plugin_test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/p2p_high_latency_test.py ${CMAKE_CURRENT_BINARY_DIR}/p2p_high_latency_test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/p2p_multiple_listen_test.py ${CMAKE_CURRENT_BINARY_DIR}/p2p_multiple_listen_test.py COPYONLY) +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/p2p_no_listen_test.py ${CMAKE_CURRENT_BINARY_DIR}/p2p_no_listen_test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/compute_transaction_test.py ${CMAKE_CURRENT_BINARY_DIR}/compute_transaction_test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/subjective_billing_test.py ${CMAKE_CURRENT_BINARY_DIR}/subjective_billing_test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/get_account_test.py ${CMAKE_CURRENT_BINARY_DIR}/get_account_test.py COPYONLY) @@ -186,6 +187,8 @@ set_property(TEST nodeos_run_check_test PROPERTY LABELS nonparallelizable_tests) add_test(NAME p2p_multiple_listen_test COMMAND tests/p2p_multiple_listen_test.py -v ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST p2p_multiple_listen_test PROPERTY LABELS nonparallelizable_tests) +add_test(NAME p2p_no_listen_test COMMAND tests/p2p_no_listen_test.py -v ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +set_property(TEST p2p_no_listen_test PROPERTY LABELS nonparallelizable_tests) # needs iproute-tc or iproute2 depending on platform #add_test(NAME p2p_high_latency_test COMMAND tests/p2p_high_latency_test.py -v WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) diff --git a/tests/p2p_no_listen_test.py b/tests/p2p_no_listen_test.py new file mode 100755 index 0000000000..76b3c76886 --- /dev/null +++ b/tests/p2p_no_listen_test.py @@ -0,0 +1,76 @@ +#!/usr/bin/env python3 + +import errno +import pathlib +import shutil +import signal +import socket +import time + +from TestHarness import Node, TestHelper, Utils + +############################################################### +# p2p_no_listen_test +# +# Test nodeos disabling p2p +# +############################################################### + +Print=Utils.Print +errorExit=Utils.errorExit + +args=TestHelper.parse_args({"--keep-logs","-v","--leave-running","--unshared"}) +debug=args.v + +Utils.Debug=debug +testSuccessful=False + +try: + TestHelper.printSystemInfo("BEGIN") + + cmd = [ + Utils.EosServerPath, + '-e', + '-p', + 'eosio', + '--p2p-listen-endpoint', + '', + '--plugin', + 'eosio::chain_api_plugin', + '--config-dir', + Utils.ConfigDir, + '--data-dir', + Utils.DataDir, + '--http-server-address', + 'localhost:8888' + ] + node = Node('localhost', '8888', '00', data_dir=pathlib.Path(Utils.DataDir), + config_dir=pathlib.Path(Utils.ConfigDir), cmd=cmd) + + time.sleep(1) + if not node.verifyAlive(): + raise RuntimeError + time.sleep(10) + node.waitForBlock(5) + + s = socket.socket() + err = s.connect_ex(('localhost',9876)) + assert err == errno.ECONNREFUSED, 'Connection to port 9876 must be refused' + + testSuccessful=True +finally: + Utils.ShuttingDown=True + + if not args.leave_running: + node.kill(signal.SIGTERM) + + if not (args.leave_running or args.keep_logs or not testSuccessful): + shutil.rmtree(Utils.DataPath, ignore_errors=True) + + if testSuccessful: + Utils.Print("Test succeeded.") + else: + Utils.Print("Test failed.") + +exitCode = 0 if testSuccessful else 1 +exit(exitCode) From 3b45daafc1f5043f7dce0a3df34605890d9aa130 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Sun, 23 Jul 2023 18:18:46 -0400 Subject: [PATCH 082/107] on cmake 3.22+ compress .deb packages via zstd --- package.cmake | 3 +++ 1 file changed, 3 insertions(+) diff --git a/package.cmake b/package.cmake index ff3aebbd4b..c61b380898 100644 --- a/package.cmake +++ b/package.cmake @@ -46,6 +46,9 @@ set(CPACK_PACKAGE_HOMEPAGE_URL "https://github.com/AntelopeIO/leap") set(CPACK_DEBIAN_PACKAGE_SHLIBDEPS ON) set(CPACK_DEBIAN_BASE_PACKAGE_SECTION "utils") +if(CMAKE_VERSION VERSION_GREATER_EQUAL 3.22) + set(CPACK_DEBIAN_COMPRESSION_TYPE "zstd") +endif() set(CPACK_DEBIAN_PACKAGE_CONFLICTS "eosio, mandel") set(CPACK_RPM_PACKAGE_CONFLICTS "eosio, mandel") From 60eabad15e9a980b515c040baf107f85edf1f83b Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Mon, 24 Jul 2023 08:46:59 -0400 Subject: [PATCH 083/107] Update package list in README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index e979c6d509..1ffaa053e1 100644 --- a/README.md +++ b/README.md @@ -137,7 +137,7 @@ sudo apt-get install -y \ libssl-dev \ llvm-11-dev \ python3-numpy \ - ubuntu-dev-tools \ + file \ zlib1g-dev ``` To build, make sure you are in the root of the `leap` repo, then run the following command: From 6a51290961f7656e17b0c9d768f60a5a4c230eae Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Mon, 24 Jul 2023 08:50:15 -0400 Subject: [PATCH 084/107] Update package list in `package.cmake` --- package.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.cmake b/package.cmake index b2000e1ed5..63989e7535 100644 --- a/package.cmake +++ b/package.cmake @@ -61,7 +61,7 @@ set(CPACK_DEBIAN_BASE_FILE_NAME "${CPACK_DEBIAN_FILE_NAME}.deb") string(REGEX REPLACE "^(${CMAKE_PROJECT_NAME})" "\\1-dev" CPACK_DEBIAN_DEV_FILE_NAME "${CPACK_DEBIAN_BASE_FILE_NAME}") #deb package tooling will be unable to detect deps for the dev package. llvm is tricky since we don't know what package could have been used; try to figure it out -set(CPACK_DEBIAN_DEV_PACKAGE_DEPENDS "libssl-dev, libgmp-dev, python3-distutils, python3-numpy, ubuntu-dev-tools, zlib1g-dev") +set(CPACK_DEBIAN_DEV_PACKAGE_DEPENDS "libssl-dev, libgmp-dev, python3-distutils, python3-numpy, file, zlib1g-dev") find_program(DPKG_QUERY "dpkg-query") if(DPKG_QUERY AND OS_RELEASE MATCHES "\n?ID=\"?ubuntu" AND LLVM_CMAKE_DIR) execute_process(COMMAND "${DPKG_QUERY}" -S "${LLVM_CMAKE_DIR}" COMMAND cut -d: -f1 RESULT_VARIABLE LLVM_PKG_FIND_RESULT OUTPUT_VARIABLE LLVM_PKG_FIND_OUTPUT) From b114f3bf7d9d431b879aa4513f4f289ca643732c Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 24 Jul 2023 08:50:00 -0500 Subject: [PATCH 085/107] GH-1435 Let scope_exit always do the app_thread join. --- tests/test_read_only_trx.cpp | 133 ++++++++++++++++++----------------- 1 file changed, 67 insertions(+), 66 deletions(-) diff --git a/tests/test_read_only_trx.cpp b/tests/test_read_only_trx.cpp index c55e3ae0aa..8bb3a83a9d 100644 --- a/tests/test_read_only_trx.cpp +++ b/tests/test_read_only_trx.cpp @@ -98,31 +98,6 @@ void test_trxs_common(std::vector& specific_args, bool test_disable appbase::scoped_app app; auto temp_dir_str = temp.path().string(); producer_plugin::set_test_mode(true); - - std::promise> plugin_promise; - std::future> plugin_fut = plugin_promise.get_future(); - std::thread app_thread( [&]() { - try { - fc::logger::get(DEFAULT_LOGGER).set_log_level(fc::log_level::debug); - std::vector argv = {"test", "--data-dir", temp_dir_str.c_str(), "--config-dir", temp_dir_str.c_str()}; - argv.insert(argv.end(), specific_args.begin(), specific_args.end()); - app->initialize(argv.size(), (char**)&argv[0]); - app->find_plugin()->chain(); - app->startup(); - plugin_promise.set_value({app->find_plugin(), app->find_plugin()}); - app->exec(); - return; - } FC_LOG_AND_DROP() - BOOST_CHECK(!"app threw exception see logged error"); - } ); - fc::scoped_exit> on_except = [&](){ - if (app_thread.joinable()) - app_thread.join(); - }; - - auto[prod_plug, chain_plug] = plugin_fut.get(); - - activate_protocol_features_set_bios_contract(app, chain_plug); std::atomic next_calls = 0; std::atomic num_get_account_calls = 0; @@ -131,50 +106,76 @@ void test_trxs_common(std::vector& specific_args, bool test_disable std::atomic trx_match = true; const size_t num_pushes = 4242; - for( size_t i = 1; i <= num_pushes; ++i ) { - auto ptrx = i % 3 == 0 ? make_unique_trx() : make_bios_ro_trx(chain_plug->chain()); - app->executor().post( priority::low, exec_queue::read_only, [&chain_plug=chain_plug, &num_get_account_calls]() { - chain_plug->get_read_only_api(fc::seconds(90)).get_account(chain_apis::read_only::get_account_params{.account_name=config::system_account_name}, fc::time_point::now()+fc::seconds(90)); - ++num_get_account_calls; - }); - app->executor().post( priority::low, exec_queue::read_only, [ptrx, &next_calls, &num_posts, &trace_with_except, &trx_match, &app]() { - ++num_posts; - bool return_failure_traces = true; - app->get_method()(ptrx, - false, // api_trx - transaction_metadata::trx_type::read_only, // trx_type - return_failure_traces, - [ptrx, &next_calls, &trace_with_except, &trx_match, return_failure_traces] - (const next_function_variant& result) { - if( !std::holds_alternative( result ) && !std::get( result )->except ) { - if( std::get( result )->id != ptrx->id() ) { - elog( "trace not for trx ${id}: ${t}", - ("id", ptrx->id())("t", fc::json::to_pretty_string(*std::get(result))) ); - trx_match = false; + { + std::promise> plugin_promise; + std::future> plugin_fut = plugin_promise.get_future(); + std::thread app_thread( [&]() { + try { + fc::logger::get(DEFAULT_LOGGER).set_log_level(fc::log_level::debug); + std::vector argv = {"test", "--data-dir", temp_dir_str.c_str(), "--config-dir", temp_dir_str.c_str()}; + argv.insert(argv.end(), specific_args.begin(), specific_args.end()); + app->initialize(argv.size(), (char**)&argv[0]); + app->find_plugin()->chain(); + app->startup(); + plugin_promise.set_value({app->find_plugin(), app->find_plugin()}); + app->exec(); + return; + } FC_LOG_AND_DROP() + BOOST_CHECK(!"app threw exception see logged error"); + } ); + fc::scoped_exit> on_except = [&](){ + if (app_thread.joinable()) + app_thread.join(); + }; + + auto[prod_plug, chain_plug] = plugin_fut.get(); + + activate_protocol_features_set_bios_contract(app, chain_plug); + + for( size_t i = 1; i <= num_pushes; ++i ) { + auto ptrx = i % 3 == 0 ? make_unique_trx() : make_bios_ro_trx(chain_plug->chain()); + app->executor().post( priority::low, exec_queue::read_only, [&chain_plug=chain_plug, &num_get_account_calls]() { + chain_plug->get_read_only_api(fc::seconds(90)).get_account(chain_apis::read_only::get_account_params{.account_name=config::system_account_name}, fc::time_point::now()+fc::seconds(90)); + ++num_get_account_calls; + }); + app->executor().post( priority::low, exec_queue::read_only, [ptrx, &next_calls, &num_posts, &trace_with_except, &trx_match, &app]() { + ++num_posts; + bool return_failure_traces = true; + app->get_method()(ptrx, + false, // api_trx + transaction_metadata::trx_type::read_only, // trx_type + return_failure_traces, + [ptrx, &next_calls, &trace_with_except, &trx_match, return_failure_traces] + (const next_function_variant& result) { + if( !std::holds_alternative( result ) && !std::get( result )->except ) { + if( std::get( result )->id != ptrx->id() ) { + elog( "trace not for trx ${id}: ${t}", + ("id", ptrx->id())("t", fc::json::to_pretty_string(*std::get(result))) ); + trx_match = false; + } + } else if( !return_failure_traces && !std::holds_alternative( result ) && std::get( result )->except ) { + elog( "trace with except ${e}", + ("e", fc::json::to_pretty_string( *std::get( result ) )) ); + ++trace_with_except; } - } else if( !return_failure_traces && !std::holds_alternative( result ) && std::get( result )->except ) { - elog( "trace with except ${e}", - ("e", fc::json::to_pretty_string( *std::get( result ) )) ); - ++trace_with_except; - } - ++next_calls; - }); - }); - app->executor().post( priority::low, exec_queue::read_only, [&chain_plug=chain_plug]() { - chain_plug->get_read_only_api(fc::seconds(90)).get_consensus_parameters(chain_apis::read_only::get_consensus_parameters_params{}, fc::time_point::now()+fc::seconds(90)); - }); + ++next_calls; + }); + }); + app->executor().post( priority::low, exec_queue::read_only, [&chain_plug=chain_plug]() { + chain_plug->get_read_only_api(fc::seconds(90)).get_consensus_parameters(chain_apis::read_only::get_consensus_parameters_params{}, fc::time_point::now()+fc::seconds(90)); + }); + } + + // Wait long enough such that all transactions are executed + auto start = fc::time_point::now(); + auto hard_deadline = start + fc::seconds(10); // To protect against waiting forever + while ( (next_calls < num_pushes || num_get_account_calls < num_pushes) && fc::time_point::now() < hard_deadline ){ + std::this_thread::sleep_for( 100ms ); + } + + app->quit(); } - // Wait long enough such that all transactions are executed - auto start = fc::time_point::now(); - auto hard_deadline = start + fc::seconds(10); // To protect against waiting forever - while ( (next_calls < num_pushes || num_get_account_calls < num_pushes) && fc::time_point::now() < hard_deadline ){ - std::this_thread::sleep_for( 100ms );; - } - - app->quit(); - app_thread.join(); - BOOST_CHECK_EQUAL( trace_with_except, 0 ); // should not have any traces with except in it BOOST_CHECK_EQUAL( num_pushes, num_posts ); BOOST_CHECK_EQUAL( num_pushes, next_calls.load() ); From 2f24964035d4015dd52c577baaec0070b2826c17 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Mon, 24 Jul 2023 09:59:32 -0400 Subject: [PATCH 086/107] Add `ubuntu-dev-tools` back. otherwise install fails on ubuntu20 with: `debconf: falling back to frontend: Readline` --- package.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.cmake b/package.cmake index 63989e7535..b2000e1ed5 100644 --- a/package.cmake +++ b/package.cmake @@ -61,7 +61,7 @@ set(CPACK_DEBIAN_BASE_FILE_NAME "${CPACK_DEBIAN_FILE_NAME}.deb") string(REGEX REPLACE "^(${CMAKE_PROJECT_NAME})" "\\1-dev" CPACK_DEBIAN_DEV_FILE_NAME "${CPACK_DEBIAN_BASE_FILE_NAME}") #deb package tooling will be unable to detect deps for the dev package. llvm is tricky since we don't know what package could have been used; try to figure it out -set(CPACK_DEBIAN_DEV_PACKAGE_DEPENDS "libssl-dev, libgmp-dev, python3-distutils, python3-numpy, file, zlib1g-dev") +set(CPACK_DEBIAN_DEV_PACKAGE_DEPENDS "libssl-dev, libgmp-dev, python3-distutils, python3-numpy, ubuntu-dev-tools, zlib1g-dev") find_program(DPKG_QUERY "dpkg-query") if(DPKG_QUERY AND OS_RELEASE MATCHES "\n?ID=\"?ubuntu" AND LLVM_CMAKE_DIR) execute_process(COMMAND "${DPKG_QUERY}" -S "${LLVM_CMAKE_DIR}" COMMAND cut -d: -f1 RESULT_VARIABLE LLVM_PKG_FIND_RESULT OUTPUT_VARIABLE LLVM_PKG_FIND_OUTPUT) From 4977b08add7c6155de7971487787dc405f5dcbd6 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Mon, 24 Jul 2023 10:03:15 -0400 Subject: [PATCH 087/107] Simplify exclude patterns. --- CMakeLists.txt | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index c7a561ba9a..d5ca041374 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -280,13 +280,7 @@ install(DIRECTORY "${CMAKE_SOURCE_DIR}/libraries/boost/" PATTERN "example/*" EXCLUDE PATTERN "bench/*" EXCLUDE PATTERN "doc/*" EXCLUDE - PATTERN "testwave/*" EXCLUDE - PATTERN "math/test/*" EXCLUDE - PATTERN "json/test/*" EXCLUDE - PATTERN "graph/test/*" EXCLUDE - PATTERN "gil/test/*" EXCLUDE - PATTERN "geometry/test/*" EXCLUDE - PATTERN "beast/test/*" EXCLUDE + PATTERN "libs/*/test" EXCLUDE ) add_custom_target(dev-install From ee7829dd9f9d3584118f1c6295190704995fd944 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 24 Jul 2023 09:04:33 -0500 Subject: [PATCH 088/107] GH-1435 Protect against accessing destroyed types --- unittests/test_utils.hpp | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/unittests/test_utils.hpp b/unittests/test_utils.hpp index e24d0759cb..5bf3b74675 100644 --- a/unittests/test_utils.hpp +++ b/unittests/test_utils.hpp @@ -75,29 +75,29 @@ auto push_input_trx(appbase::scoped_app& app, eosio::chain::controller& control, } auto ptrx = std::make_shared( trx, packed_transaction::compression_type::zlib ); - std::promise trx_promise; - std::future trx_future = trx_promise.get_future(); + std::shared_ptr> trx_promise = std::make_shared>(); + std::future trx_future = trx_promise->get_future(); - app->executor().post( priority::low, exec_queue::read_write, [&ptrx, &app, &trx_promise]() { + app->executor().post( priority::low, exec_queue::read_write, [&ptrx, &app, trx_promise]() { app->get_method()(ptrx, false, // api_trx transaction_metadata::trx_type::input, // trx_type true, // return_failure_traces - [&trx_promise](const next_function_variant& result) { + [trx_promise](const next_function_variant& result) { if( std::holds_alternative( result ) ) { try { std::get(result)->dynamic_rethrow_exception(); } catch(...) { - trx_promise.set_exception(std::current_exception()); + trx_promise->set_exception(std::current_exception()); } } else if ( std::get( result )->except ) { try { std::get(result)->except->dynamic_rethrow_exception(); } catch(...) { - trx_promise.set_exception(std::current_exception()); + trx_promise->set_exception(std::current_exception()); } } else { - trx_promise.set_value(std::get(result)); + trx_promise->set_value(std::get(result)); } }); }); @@ -124,12 +124,12 @@ auto set_code(appbase::scoped_app& app, eosio::chain::controller& control, accou void activate_protocol_features_set_bios_contract(appbase::scoped_app& app, chain_plugin* chain_plug) { using namespace appbase; - std::atomic feature_set = false; + std::shared_ptr> feature_set = std::make_shared>(false); // has to execute when pending block is not null for (int tries = 0; tries < 100; ++tries) { - app->executor().post( priority::high, exec_queue::read_write, [&chain_plug=chain_plug, &feature_set](){ + app->executor().post( priority::high, exec_queue::read_write, [&chain_plug=chain_plug, feature_set](){ try { - if (!chain_plug->chain().is_building_block() || feature_set) + if (!chain_plug->chain().is_building_block() || *feature_set) return; const auto& pfm = chain_plug->chain().get_protocol_feature_manager(); auto preactivate_feature_digest = pfm.get_builtin_digest(builtin_protocol_feature_t::preactivate_feature); @@ -153,12 +153,12 @@ void activate_protocol_features_set_bios_contract(appbase::scoped_app& app, chai BOOST_CHECK( feature_digest ); chain_plug->chain().preactivate_feature( *feature_digest, false ); } - feature_set = true; + *feature_set = true; return; } FC_LOG_AND_DROP() BOOST_CHECK(!"exception setting protocol features"); }); - if (feature_set) + if (*feature_set) break; std::this_thread::sleep_for(std::chrono::milliseconds(50)); } From a3c571d14dee13bb1032ad9c822b1083af9e6205 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 24 Jul 2023 09:19:24 -0500 Subject: [PATCH 089/107] GH-1435 Use auto --- unittests/test_utils.hpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/unittests/test_utils.hpp b/unittests/test_utils.hpp index 5bf3b74675..974fee3927 100644 --- a/unittests/test_utils.hpp +++ b/unittests/test_utils.hpp @@ -75,7 +75,7 @@ auto push_input_trx(appbase::scoped_app& app, eosio::chain::controller& control, } auto ptrx = std::make_shared( trx, packed_transaction::compression_type::zlib ); - std::shared_ptr> trx_promise = std::make_shared>(); + auto trx_promise = std::make_shared>(); std::future trx_future = trx_promise->get_future(); app->executor().post( priority::low, exec_queue::read_write, [&ptrx, &app, trx_promise]() { @@ -124,7 +124,7 @@ auto set_code(appbase::scoped_app& app, eosio::chain::controller& control, accou void activate_protocol_features_set_bios_contract(appbase::scoped_app& app, chain_plugin* chain_plug) { using namespace appbase; - std::shared_ptr> feature_set = std::make_shared>(false); + auto feature_set = std::make_shared>(false); // has to execute when pending block is not null for (int tries = 0; tries < 100; ++tries) { app->executor().post( priority::high, exec_queue::read_write, [&chain_plug=chain_plug, feature_set](){ From 632e728d77604c0c643c29b36a4b0b24c310d151 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 24 Jul 2023 09:19:40 -0500 Subject: [PATCH 090/107] GH-1435 Re-indent --- tests/test_read_only_trx.cpp | 184 ++++++++++++++++++----------------- 1 file changed, 93 insertions(+), 91 deletions(-) diff --git a/tests/test_read_only_trx.cpp b/tests/test_read_only_trx.cpp index 8bb3a83a9d..49134a54a7 100644 --- a/tests/test_read_only_trx.cpp +++ b/tests/test_read_only_trx.cpp @@ -87,101 +87,103 @@ BOOST_AUTO_TEST_CASE(not_check_configs_if_no_read_only_threads) { test_configs_common(specific_args, app_init_status::succeeded); } -void test_trxs_common(std::vector& specific_args, bool test_disable_tierup = false) { try { - fc::scoped_exit> on_exit = []() { - chain::wasm_interface_collection::test_disable_tierup = false; - }; - chain::wasm_interface_collection::test_disable_tierup = test_disable_tierup; - - using namespace std::chrono_literals; - fc::temp_directory temp; - appbase::scoped_app app; - auto temp_dir_str = temp.path().string(); - producer_plugin::set_test_mode(true); - - std::atomic next_calls = 0; - std::atomic num_get_account_calls = 0; - std::atomic num_posts = 0; - std::atomic trace_with_except = 0; - std::atomic trx_match = true; - const size_t num_pushes = 4242; - - { - std::promise> plugin_promise; - std::future> plugin_fut = plugin_promise.get_future(); - std::thread app_thread( [&]() { - try { - fc::logger::get(DEFAULT_LOGGER).set_log_level(fc::log_level::debug); - std::vector argv = {"test", "--data-dir", temp_dir_str.c_str(), "--config-dir", temp_dir_str.c_str()}; - argv.insert(argv.end(), specific_args.begin(), specific_args.end()); - app->initialize(argv.size(), (char**)&argv[0]); - app->find_plugin()->chain(); - app->startup(); - plugin_promise.set_value({app->find_plugin(), app->find_plugin()}); - app->exec(); - return; - } FC_LOG_AND_DROP() - BOOST_CHECK(!"app threw exception see logged error"); - } ); - fc::scoped_exit> on_except = [&](){ - if (app_thread.joinable()) - app_thread.join(); +void test_trxs_common(std::vector& specific_args, bool test_disable_tierup = false) { + try { + fc::scoped_exit> on_exit = []() { + chain::wasm_interface_collection::test_disable_tierup = false; }; - - auto[prod_plug, chain_plug] = plugin_fut.get(); - - activate_protocol_features_set_bios_contract(app, chain_plug); - - for( size_t i = 1; i <= num_pushes; ++i ) { - auto ptrx = i % 3 == 0 ? make_unique_trx() : make_bios_ro_trx(chain_plug->chain()); - app->executor().post( priority::low, exec_queue::read_only, [&chain_plug=chain_plug, &num_get_account_calls]() { - chain_plug->get_read_only_api(fc::seconds(90)).get_account(chain_apis::read_only::get_account_params{.account_name=config::system_account_name}, fc::time_point::now()+fc::seconds(90)); - ++num_get_account_calls; - }); - app->executor().post( priority::low, exec_queue::read_only, [ptrx, &next_calls, &num_posts, &trace_with_except, &trx_match, &app]() { - ++num_posts; - bool return_failure_traces = true; - app->get_method()(ptrx, - false, // api_trx - transaction_metadata::trx_type::read_only, // trx_type - return_failure_traces, - [ptrx, &next_calls, &trace_with_except, &trx_match, return_failure_traces] - (const next_function_variant& result) { - if( !std::holds_alternative( result ) && !std::get( result )->except ) { - if( std::get( result )->id != ptrx->id() ) { - elog( "trace not for trx ${id}: ${t}", - ("id", ptrx->id())("t", fc::json::to_pretty_string(*std::get(result))) ); - trx_match = false; + chain::wasm_interface_collection::test_disable_tierup = test_disable_tierup; + + using namespace std::chrono_literals; + fc::temp_directory temp; + appbase::scoped_app app; + auto temp_dir_str = temp.path().string(); + producer_plugin::set_test_mode(true); + + std::atomic next_calls = 0; + std::atomic num_get_account_calls = 0; + std::atomic num_posts = 0; + std::atomic trace_with_except = 0; + std::atomic trx_match = true; + const size_t num_pushes = 4242; + + { + std::promise> plugin_promise; + std::future> plugin_fut = plugin_promise.get_future(); + std::thread app_thread( [&]() { + try { + fc::logger::get(DEFAULT_LOGGER).set_log_level(fc::log_level::debug); + std::vector argv = {"test", "--data-dir", temp_dir_str.c_str(), "--config-dir", temp_dir_str.c_str()}; + argv.insert(argv.end(), specific_args.begin(), specific_args.end()); + app->initialize(argv.size(), (char**)&argv[0]); + app->find_plugin()->chain(); + app->startup(); + plugin_promise.set_value({app->find_plugin(), app->find_plugin()}); + app->exec(); + return; + } FC_LOG_AND_DROP() + BOOST_CHECK(!"app threw exception see logged error"); + } ); + fc::scoped_exit> on_except = [&](){ + if (app_thread.joinable()) + app_thread.join(); + }; + + auto[prod_plug, chain_plug] = plugin_fut.get(); + + activate_protocol_features_set_bios_contract(app, chain_plug); + + for( size_t i = 1; i <= num_pushes; ++i ) { + auto ptrx = i % 3 == 0 ? make_unique_trx() : make_bios_ro_trx(chain_plug->chain()); + app->executor().post( priority::low, exec_queue::read_only, [&chain_plug=chain_plug, &num_get_account_calls]() { + chain_plug->get_read_only_api(fc::seconds(90)).get_account(chain_apis::read_only::get_account_params{.account_name=config::system_account_name}, fc::time_point::now()+fc::seconds(90)); + ++num_get_account_calls; + }); + app->executor().post( priority::low, exec_queue::read_only, [ptrx, &next_calls, &num_posts, &trace_with_except, &trx_match, &app]() { + ++num_posts; + bool return_failure_traces = true; + app->get_method()(ptrx, + false, // api_trx + transaction_metadata::trx_type::read_only, // trx_type + return_failure_traces, + [ptrx, &next_calls, &trace_with_except, &trx_match, return_failure_traces] + (const next_function_variant& result) { + if( !std::holds_alternative( result ) && !std::get( result )->except ) { + if( std::get( result )->id != ptrx->id() ) { + elog( "trace not for trx ${id}: ${t}", + ("id", ptrx->id())("t", fc::json::to_pretty_string(*std::get(result))) ); + trx_match = false; + } + } else if( !return_failure_traces && !std::holds_alternative( result ) && std::get( result )->except ) { + elog( "trace with except ${e}", + ("e", fc::json::to_pretty_string( *std::get( result ) )) ); + ++trace_with_except; } - } else if( !return_failure_traces && !std::holds_alternative( result ) && std::get( result )->except ) { - elog( "trace with except ${e}", - ("e", fc::json::to_pretty_string( *std::get( result ) )) ); - ++trace_with_except; - } - ++next_calls; - }); - }); - app->executor().post( priority::low, exec_queue::read_only, [&chain_plug=chain_plug]() { - chain_plug->get_read_only_api(fc::seconds(90)).get_consensus_parameters(chain_apis::read_only::get_consensus_parameters_params{}, fc::time_point::now()+fc::seconds(90)); - }); + ++next_calls; + }); + }); + app->executor().post( priority::low, exec_queue::read_only, [&chain_plug=chain_plug]() { + chain_plug->get_read_only_api(fc::seconds(90)).get_consensus_parameters(chain_apis::read_only::get_consensus_parameters_params{}, fc::time_point::now()+fc::seconds(90)); + }); + } + + // Wait long enough such that all transactions are executed + auto start = fc::time_point::now(); + auto hard_deadline = start + fc::seconds(10); // To protect against waiting forever + while ( (next_calls < num_pushes || num_get_account_calls < num_pushes) && fc::time_point::now() < hard_deadline ){ + std::this_thread::sleep_for( 100ms ); + } + + app->quit(); } - // Wait long enough such that all transactions are executed - auto start = fc::time_point::now(); - auto hard_deadline = start + fc::seconds(10); // To protect against waiting forever - while ( (next_calls < num_pushes || num_get_account_calls < num_pushes) && fc::time_point::now() < hard_deadline ){ - std::this_thread::sleep_for( 100ms ); - } - - app->quit(); - } - - BOOST_CHECK_EQUAL( trace_with_except, 0 ); // should not have any traces with except in it - BOOST_CHECK_EQUAL( num_pushes, num_posts ); - BOOST_CHECK_EQUAL( num_pushes, next_calls.load() ); - BOOST_CHECK_EQUAL( num_pushes, num_get_account_calls.load() ); - BOOST_CHECK( trx_match.load() ); // trace should match the transaction -} FC_LOG_AND_RETHROW() } + BOOST_CHECK_EQUAL( trace_with_except, 0 ); // should not have any traces with except in it + BOOST_CHECK_EQUAL( num_pushes, num_posts ); + BOOST_CHECK_EQUAL( num_pushes, next_calls.load() ); + BOOST_CHECK_EQUAL( num_pushes, num_get_account_calls.load() ); + BOOST_CHECK( trx_match.load() ); // trace should match the transaction + } FC_LOG_AND_RETHROW() +} // test read-only trxs on main thread (no --read-only-threads) BOOST_AUTO_TEST_CASE(no_read_only_threads) { From b784e141d6846c5da817c93b5a7f54fea14e0803 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Mon, 24 Jul 2023 10:46:12 -0400 Subject: [PATCH 091/107] set DEBIAN_FRONTEND & TZ globally during libtester job --- .github/workflows/build.yaml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 8275dea347..7fa8aa236a 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -199,6 +199,9 @@ jobs: test: [build-tree, make-dev-install, deb-install] runs-on: ["self-hosted", "enf-x86-midtier"] container: ${{ matrix.test != 'deb-install' && fromJSON(needs.build-base.outputs.p)[matrix.platform].image || matrix.platform == 'ubuntu20' && 'ubuntu:focal' || 'ubuntu:jammy' }} + env: + DEBIAN_FRONTEND: noninteractive + TZ: Etc/UTC steps: - name: Update Package Index & Upgrade Packages run: | @@ -241,8 +244,6 @@ jobs: - if: ${{ matrix.test == 'deb-install' }} name: Install leap-dev Package run: | - export DEBIAN_FRONTEND='noninteractive' - export TZ='Etc/UTC' apt-get install -y ./*.deb rm ./*.deb From 8332a802f0a0fd6528e4cf0903afc2754773b3cc Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Mon, 24 Jul 2023 10:57:46 -0400 Subject: [PATCH 092/107] Exclude test directories from `boost/tools` --- CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/CMakeLists.txt b/CMakeLists.txt index d5ca041374..058a270774 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -281,6 +281,7 @@ install(DIRECTORY "${CMAKE_SOURCE_DIR}/libraries/boost/" PATTERN "bench/*" EXCLUDE PATTERN "doc/*" EXCLUDE PATTERN "libs/*/test" EXCLUDE + PATTERN "tools/*/test" EXCLUDE ) add_custom_target(dev-install From a8b17a2b2ceb9ea43a0cd80f51b29f7cfb32b6f9 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Mon, 24 Jul 2023 11:06:05 -0400 Subject: [PATCH 093/107] Remove again `ubuntu-dev-tools` from package.cmake --- package.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.cmake b/package.cmake index b2000e1ed5..63989e7535 100644 --- a/package.cmake +++ b/package.cmake @@ -61,7 +61,7 @@ set(CPACK_DEBIAN_BASE_FILE_NAME "${CPACK_DEBIAN_FILE_NAME}.deb") string(REGEX REPLACE "^(${CMAKE_PROJECT_NAME})" "\\1-dev" CPACK_DEBIAN_DEV_FILE_NAME "${CPACK_DEBIAN_BASE_FILE_NAME}") #deb package tooling will be unable to detect deps for the dev package. llvm is tricky since we don't know what package could have been used; try to figure it out -set(CPACK_DEBIAN_DEV_PACKAGE_DEPENDS "libssl-dev, libgmp-dev, python3-distutils, python3-numpy, ubuntu-dev-tools, zlib1g-dev") +set(CPACK_DEBIAN_DEV_PACKAGE_DEPENDS "libssl-dev, libgmp-dev, python3-distutils, python3-numpy, file, zlib1g-dev") find_program(DPKG_QUERY "dpkg-query") if(DPKG_QUERY AND OS_RELEASE MATCHES "\n?ID=\"?ubuntu" AND LLVM_CMAKE_DIR) execute_process(COMMAND "${DPKG_QUERY}" -S "${LLVM_CMAKE_DIR}" COMMAND cut -d: -f1 RESULT_VARIABLE LLVM_PKG_FIND_RESULT OUTPUT_VARIABLE LLVM_PKG_FIND_OUTPUT) From 144f99f57be68bcf39a9d6459437974f681d7495 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 24 Jul 2023 13:36:59 -0500 Subject: [PATCH 094/107] GH-1417 Refactor block_time_tracker to use RAII for simplify use. --- plugins/producer_plugin/producer_plugin.cpp | 165 ++++++++++++-------- 1 file changed, 100 insertions(+), 65 deletions(-) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 4c56d481d9..78e3b6a970 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -253,29 +253,58 @@ class account_failures { struct block_time_tracker { - void add_idle_time( const fc::microseconds& idle ) { - block_idle_time += idle; - } + struct trx_time_tracker { + enum class trx_status { success, fail, other }; - void add_fail_time( const fc::microseconds& fail_time, bool is_transient ) { - if( is_transient ) { - // transient time includes both success and fail time - transient_trx_time += fail_time; - ++transient_trx_num; - } else { - trx_fail_time += fail_time; - ++trx_fail_num; + trx_time_tracker(block_time_tracker& btt, bool transient) + : _block_time_tracker(btt), _is_transient(transient) {} + + trx_time_tracker(trx_time_tracker&&) = default; + + trx_time_tracker() = delete; + trx_time_tracker(const trx_time_tracker&) = delete; + trx_time_tracker& operator=(const trx_time_tracker&) = delete; + trx_time_tracker& operator=(trx_time_tracker&&) = delete; + + void trx_success() { _trx_status = trx_status::success; } + + // Neither success for fail, will be reported as other + void cancel() { _trx_status = trx_status::other; } + + // updates block_time_tracker + ~trx_time_tracker() { + switch (_trx_status) { + case trx_status::success: + _block_time_tracker.add_success_time(_is_transient); + break; + case trx_status::fail: + _block_time_tracker.add_fail_time(_is_transient); + break; + case trx_status::other: + break; // just reset timer which happens below + } + _block_time_tracker.start_idle_time(); } + + private: + block_time_tracker& _block_time_tracker; + trx_status _trx_status = trx_status::fail; + bool _is_transient; + }; + + trx_time_tracker start_trx(bool is_transient, fc::time_point now = fc::time_point::now()) { + last_trx_time_point = now; + return {*this, is_transient}; } - void add_success_time( const fc::microseconds& time, bool is_transient ) { - if( is_transient ) { - transient_trx_time += time; - ++transient_trx_num; - } else { - trx_success_time += time; - ++trx_success_num; - } + void start_idle_time(fc::time_point now = fc::time_point::now()) { + last_trx_time_point = now; + } + + fc::microseconds add_idle_time(fc::time_point now = fc::time_point::now()) { + auto dur = now - last_trx_time_point; + block_idle_time += dur; + return dur; } void report(uint32_t block_num, account_name producer) { @@ -283,19 +312,44 @@ struct block_time_tracker { auto now = fc::time_point::now(); fc_dlog( _log, "Block #${n} ${p} trx idle: ${i}us out of ${t}us, success: ${sn}, ${s}us, fail: ${fn}, ${f}us, transient: ${trans_trx_num}, ${trans_trx_time}us, other: ${o}us", ("n", block_num)("p", producer) - ("i", block_idle_time)("t", now - clear_time)("sn", trx_success_num)("s", trx_success_time) + ("i", block_idle_time)("t", now - clear_time_point)("sn", trx_success_num)("s", trx_success_time) ("fn", trx_fail_num)("f", trx_fail_time) ("trans_trx_num", transient_trx_num)("trans_trx_time", transient_trx_time) - ("o", (now - clear_time) - block_idle_time - trx_success_time - trx_fail_time - transient_trx_time) ); + ("o", (now - clear_time_point) - block_idle_time - trx_success_time - trx_fail_time - transient_trx_time) ); } } void clear() { block_idle_time = trx_fail_time = trx_success_time = transient_trx_time = fc::microseconds{}; trx_fail_num = trx_success_num = transient_trx_num = 0; - clear_time = fc::time_point::now(); + clear_time_point = last_trx_time_point = fc::time_point::now(); + } + + private: + void add_success_time(bool is_transient) { + auto now = fc::time_point::now(); + if( is_transient ) { + transient_trx_time += now - last_trx_time_point; + ++transient_trx_num; + } else { + trx_success_time += now - last_trx_time_point; + ++trx_success_num; + } } + void add_fail_time(bool is_transient) { + auto now = fc::time_point::now(); + if( is_transient ) { + // transient time includes both success and fail time + transient_trx_time += now - last_trx_time_point; + ++transient_trx_num; + } else { + trx_fail_time += now - last_trx_time_point; + ++trx_fail_num; + } + } + + private: fc::microseconds block_idle_time; uint32_t trx_success_num = 0; uint32_t trx_fail_num = 0; @@ -303,7 +357,8 @@ struct block_time_tracker { fc::microseconds trx_success_time; fc::microseconds trx_fail_time; fc::microseconds transient_trx_time; - fc::time_point clear_time{fc::time_point::now()}; + fc::time_point last_trx_time_point{fc::time_point::now()}; + fc::time_point clear_time_point{fc::time_point::now()}; }; } // anonymous namespace @@ -379,7 +434,6 @@ class producer_plugin_impl : public std::enable_shared_from_this _protocol_features_to_activate; bool _protocol_features_signaled = false; // to mark whether it has been signaled in start_block @@ -569,7 +623,6 @@ class producer_plugin_impl : public std::enable_shared_from_this& block_id, const block_state_ptr& bsp) { @@ -585,14 +638,14 @@ class producer_plugin_impl : public std::enable_shared_from_thiscalculate_id(); auto blk_num = block->block_num(); - auto now = fc::time_point::now(); if (now - block->timestamp < fc::minutes(5) || (blk_num % 1000 == 0)) // only log every 1000 during sync fc_dlog(_log, "received incoming block ${n} ${id}", ("n", blk_num)("id", id)); - _time_tracker.add_idle_time(now - _idle_trx_time); + _time_tracker.add_idle_time(now); EOS_ASSERT( block->timestamp < (now + fc::seconds( 7 )), block_from_the_future, "received a block from the future, ignoring it: ${id}", ("id", id) ); @@ -725,28 +778,23 @@ class producer_plugin_impl : public std::enable_shared_from_this_idle_trx_time; - self->_time_tracker.add_idle_time( idle_time ); + auto idle_time = self->_time_tracker.add_idle_time(start); + auto trx_tracker = self->_time_tracker.start_trx(is_transient, start); fc_tlog( _log, "Time since last trx: ${t}us", ("t", idle_time) ); auto exception_handler = [self, is_transient, &next, trx{std::move(trx)}, &start](fc::exception_ptr ex) { - self->_time_tracker.add_idle_time( start - self->_idle_trx_time ); self->log_trx_results( trx, nullptr, ex, 0, start, is_transient ); next( std::move(ex) ); - self->_idle_trx_time = fc::time_point::now(); - auto dur = self->_idle_trx_time - start; - self->_time_tracker.add_fail_time(dur, is_transient); }; try { auto result = future.get(); - if( !self->process_incoming_transaction_async( result, api_trx, return_failure_traces, next) ) { + if( !self->process_incoming_transaction_async( result, api_trx, return_failure_traces, trx_tracker, next) ) { if( self->in_producing_mode() ) { self->schedule_maybe_produce_block( true ); } else { self->restart_speculative_block(); } } - self->_idle_trx_time = fc::time_point::now(); } CATCH_AND_CALL(exception_handler); } ); } @@ -756,6 +804,7 @@ class producer_plugin_impl : public std::enable_shared_from_this& next) { bool exhausted = false; chain::controller& chain = chain_plug->chain(); @@ -783,12 +832,16 @@ class producer_plugin_impl : public std::enable_shared_from_thisis_transient()); return push_result{.failed = true}; } @@ -2329,14 +2381,6 @@ producer_plugin_impl::handle_push_result( const transaction_metadata_ptr& trx, auto end = fc::time_point::now(); push_result pr; if( trace->except ) { - // Transient trxs are dry-run or read-only. - // Dry-run trxs only run in write window. Read-only trxs can run in - // both write and read windows; time spent in read window is counted - // by read window summary. - if ( chain.is_write_window() ) { - auto dur = end - start; - _time_tracker.add_fail_time(dur, trx->is_transient()); - } if( exception_is_exhausted( *trace->except ) ) { if( in_producing_mode() ) { fc_dlog(trx->is_transient() ? _transient_trx_failed_trace_log : _trx_failed_trace_log, "[TRX_TRACE] Block ${block_num} for producer ${prod} COULD NOT FIT, tx: ${txid} RETRYING ", @@ -2376,14 +2420,6 @@ producer_plugin_impl::handle_push_result( const transaction_metadata_ptr& trx, } else { fc_tlog( _log, "Subjective bill for success ${a}: ${b} elapsed ${t}us, time ${r}us", ("a",first_auth)("b",sub_bill)("t",trace->elapsed)("r", end - start)); - // Transient trxs are dry-run or read-only. - // Dry-run trxs only run in write window. Read-only trxs can run in - // both write and read windows; time spent in read window is counted - // by read window summary. - if ( chain.is_write_window() ) { - auto dur = end - start; - _time_tracker.add_success_time(dur, trx->is_transient()); - } log_trx_results( trx, trace, start ); // if producing then trx is in objective cpu account billing if (!disable_subjective_enforcement && _pending_block_mode != pending_block_mode::producing) { @@ -2522,13 +2558,13 @@ void producer_plugin_impl::process_scheduled_and_incoming_trxs( const fc::time_p try { auto start = fc::time_point::now(); + auto trx_tracker = _time_tracker.start_trx(false, start); // delayed transaction cannot be transient fc::microseconds max_trx_time = fc::milliseconds( _max_transaction_time_ms.load() ); if( max_trx_time.count() < 0 ) max_trx_time = fc::microseconds::maximum(); auto trace = chain.push_scheduled_transaction(trx_id, deadline, max_trx_time, 0, false); auto end = fc::time_point::now(); if (trace->except) { - _time_tracker.add_fail_time(end - start, false); // delayed transaction cannot be transient if (exception_is_exhausted(*trace->except)) { if( block_is_exhausted() ) { exhausted = true; @@ -2548,7 +2584,7 @@ void producer_plugin_impl::process_scheduled_and_incoming_trxs( const fc::time_p num_failed++; } } else { - _time_tracker.add_success_time(end - start, false); // delayed transaction cannot be transient + trx_tracker.trx_success(); fc_dlog(_trx_successful_trace_log, "[TRX_TRACE] Block ${block_num} for producer ${prod} is ACCEPTING scheduled tx: ${txid}, time: ${r}, auth: ${a}, cpu: ${cpu}", ("block_num", chain.head_block_num() + 1)("prod", get_pending_block_producer()) @@ -2667,7 +2703,7 @@ void producer_plugin_impl::schedule_production_loop() { fc_dlog(_log, "Speculative Block Created"); } - _idle_trx_time = fc::time_point::now(); + _time_tracker.start_idle_time(); } void producer_plugin_impl::schedule_maybe_produce_block( bool exhausted ) { @@ -2777,7 +2813,7 @@ static auto maybe_make_debug_time_logger() -> std::optionalchain(); @@ -2835,8 +2871,6 @@ void producer_plugin_impl::produce_block() { _time_tracker.report(new_bs->block_num, new_bs->block->producer); _time_tracker.clear(); - - _idle_trx_time = fc::time_point::now(); } void producer_plugin::received_block(uint32_t block_num) { @@ -2883,9 +2917,10 @@ void producer_plugin_impl::start_write_window() { app().executor().set_to_write_window(); chain.set_to_write_window(); chain.unset_db_read_only_mode(); - _idle_trx_time = _ro_window_deadline = fc::time_point::now(); + auto now = fc::time_point::now(); + _time_tracker.start_idle_time(now); - _ro_window_deadline += _ro_write_window_time_us; // not allowed on block producers, so no need to limit to block deadline + _ro_window_deadline = now + _ro_write_window_time_us; // not allowed on block producers, so no need to limit to block deadline auto expire_time = boost::posix_time::microseconds(_ro_write_window_time_us.count()); _ro_timer.expires_from_now( expire_time ); _ro_timer.async_wait( app().executor().wrap( // stay on app thread @@ -2905,7 +2940,7 @@ void producer_plugin_impl::switch_to_read_window() { EOS_ASSERT(chain.is_write_window(), producer_exception, "expected to be in write window"); EOS_ASSERT( _ro_num_active_exec_tasks.load() == 0 && _ro_exec_tasks_fut.empty(), producer_exception, "_ro_exec_tasks_fut expected to be empty" ); - _time_tracker.add_idle_time( fc::time_point::now() - _idle_trx_time ); + _time_tracker.add_idle_time(); // we are in write window, so no read-only trx threads are processing transactions. if ( app().executor().read_only_queue().empty() ) { // no read-only tasks to process. stay in write window @@ -3024,10 +3059,10 @@ bool producer_plugin_impl::push_read_only_transaction(transaction_metadata_ptr t chain.unset_db_read_only_mode(); }); + std::optional trx_tracker; if ( chain.is_write_window() ) { chain.set_db_read_only_mode(); - auto idle_time = fc::time_point::now() - _idle_trx_time; - _time_tracker.add_idle_time( idle_time ); + trx_tracker.emplace(_time_tracker.start_trx(true, start)); } // use read-window/write-window deadline if there are read/write windows, otherwise use block_deadline if only the app thead @@ -3044,8 +3079,8 @@ bool producer_plugin_impl::push_read_only_transaction(transaction_metadata_ptr t _ro_exhausted_trx_queue.push_front( {std::move(trx), std::move(next)} ); } - if ( chain.is_write_window() ) { - _idle_trx_time = fc::time_point::now(); + if ( chain.is_write_window() && !pr.failed ) { + trx_tracker->trx_success(); } } catch ( const guard_exception& e ) { chain_plugin::handle_guard_exception(e); From f9e6188892e9e9b5b5a41c380378a4f117f296f9 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Mon, 24 Jul 2023 14:38:08 -0400 Subject: [PATCH 095/107] remove `file` from `package.cmake` --- package.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.cmake b/package.cmake index bc567d3776..72e88c6c56 100644 --- a/package.cmake +++ b/package.cmake @@ -64,7 +64,7 @@ set(CPACK_DEBIAN_BASE_FILE_NAME "${CPACK_DEBIAN_FILE_NAME}.deb") string(REGEX REPLACE "^(${CMAKE_PROJECT_NAME})" "\\1-dev" CPACK_DEBIAN_DEV_FILE_NAME "${CPACK_DEBIAN_BASE_FILE_NAME}") #deb package tooling will be unable to detect deps for the dev package. llvm is tricky since we don't know what package could have been used; try to figure it out -set(CPACK_DEBIAN_DEV_PACKAGE_DEPENDS "libssl-dev, libgmp-dev, python3-distutils, python3-numpy, file, zlib1g-dev") +set(CPACK_DEBIAN_DEV_PACKAGE_DEPENDS "libssl-dev, libgmp-dev, python3-distutils, python3-numpy, zlib1g-dev") find_program(DPKG_QUERY "dpkg-query") if(DPKG_QUERY AND OS_RELEASE MATCHES "\n?ID=\"?ubuntu" AND LLVM_CMAKE_DIR) execute_process(COMMAND "${DPKG_QUERY}" -S "${LLVM_CMAKE_DIR}" COMMAND cut -d: -f1 RESULT_VARIABLE LLVM_PKG_FIND_RESULT OUTPUT_VARIABLE LLVM_PKG_FIND_OUTPUT) From 56124be461dbfa9a63c046de2b09fc62dab220ee Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 24 Jul 2023 14:14:27 -0500 Subject: [PATCH 096/107] GH-1417 Guard against double accounting --- plugins/producer_plugin/producer_plugin.cpp | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 78e3b6a970..5f51858ec3 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -281,9 +281,9 @@ struct block_time_tracker { _block_time_tracker.add_fail_time(_is_transient); break; case trx_status::other: - break; // just reset timer which happens below + _block_time_tracker.start_idle_time(); // just reset timer, will be in other category + break; } - _block_time_tracker.start_idle_time(); } private: @@ -304,6 +304,7 @@ struct block_time_tracker { fc::microseconds add_idle_time(fc::time_point now = fc::time_point::now()) { auto dur = now - last_trx_time_point; block_idle_time += dur; + last_trx_time_point = now; // guard against calling add_idle_time() twice in a row. return dur; } @@ -335,6 +336,7 @@ struct block_time_tracker { trx_success_time += now - last_trx_time_point; ++trx_success_num; } + last_trx_time_point = now; } void add_fail_time(bool is_transient) { @@ -347,6 +349,7 @@ struct block_time_tracker { trx_fail_time += now - last_trx_time_point; ++trx_fail_num; } + last_trx_time_point = now; } private: From 0499a9739c8409917ac2160dba98b307a13ce031 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Mon, 24 Jul 2023 16:06:08 -0400 Subject: [PATCH 097/107] update to asset-artifact-download-action@v3 --- .github/workflows/build.yaml | 3 +-- .github/workflows/ph_backward_compatibility.yaml | 3 +-- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 7fa8aa236a..60e2f7fc19 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -249,7 +249,7 @@ jobs: # CDT - name: Download cdt - uses: AntelopeIO/asset-artifact-download-action@v2 + uses: AntelopeIO/asset-artifact-download-action@v3 with: owner: AntelopeIO repo: cdt @@ -257,7 +257,6 @@ jobs: target: '${{needs.v.outputs.cdt-target}}' prereleases: ${{fromJSON(needs.v.outputs.cdt-prerelease)}} artifact-name: cdt_ubuntu_package_amd64 - token: ${{github.token}} - name: Install cdt Packages run: | apt-get install -y ./*.deb diff --git a/.github/workflows/ph_backward_compatibility.yaml b/.github/workflows/ph_backward_compatibility.yaml index e166c92eff..26037cbfee 100644 --- a/.github/workflows/ph_backward_compatibility.yaml +++ b/.github/workflows/ph_backward_compatibility.yaml @@ -42,13 +42,12 @@ jobs: run: | zstdcat build.tar.zst | tar x - name: Download Prev Leap Version - uses: AntelopeIO/asset-artifact-download-action@v2 + uses: AntelopeIO/asset-artifact-download-action@v3 with: owner: AntelopeIO repo: leap file: '(leap).*${{matrix.platform}}.04.*(x86_64|amd64).deb' target: '${{matrix.release}}' - token: ${{github.token}} - name: Install leap & replace binaries for PH use run: | apt-get update From c9f0ff7cfa05aa49f809b27485ad7e44359a2761 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 24 Jul 2023 15:15:29 -0500 Subject: [PATCH 098/107] GH-1417 Add back in counting of trx in start_block missed in the refactor --- plugins/producer_plugin/producer_plugin.cpp | 23 ++++++++++++++------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 5f51858ec3..110fd90667 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -395,6 +395,7 @@ class producer_plugin_impl : public std::enable_shared_from_this& next ); push_result handle_push_result( const transaction_metadata_ptr& trx, const next_function& next, @@ -840,11 +841,8 @@ class producer_plugin_impl : public std::enable_shared_from_this& next ) { auto start = fc::time_point::now(); @@ -2367,7 +2366,12 @@ producer_plugin_impl::push_transaction( const fc::time_point& block_deadline, auto trace = chain.push_transaction( trx, block_deadline, max_trx_time, prev_billed_cpu_time_us, false, sub_bill ); - return handle_push_result(trx, next, start, chain, trace, return_failure_trace, disable_subjective_enforcement, first_auth, sub_bill, prev_billed_cpu_time_us); + auto pr = handle_push_result(trx, next, start, chain, trace, return_failure_trace, disable_subjective_enforcement, first_auth, sub_bill, prev_billed_cpu_time_us); + + if (!pr.failed) { + trx_tracker.trx_success(); + } + return pr; } producer_plugin_impl::push_result @@ -2452,7 +2456,8 @@ bool producer_plugin_impl::process_unapplied_trxs( const fc::time_point& deadlin ++num_processed; try { - push_result pr = push_transaction( deadline, itr->trx_meta, false, itr->return_failure_trace, itr->next ); + auto trx_tracker = _time_tracker.start_trx(itr->trx_meta->is_transient()); + push_result pr = push_transaction( deadline, itr->trx_meta, false, itr->return_failure_trace, trx_tracker, itr->next ); exhausted = pr.block_exhausted; if( exhausted ) { @@ -2534,7 +2539,8 @@ void producer_plugin_impl::process_scheduled_and_incoming_trxs( const fc::time_p auto trx_meta = itr->trx_meta; bool api_trx = itr->trx_type == trx_enum_type::incoming_api; - push_result pr = push_transaction( deadline, trx_meta, api_trx, itr->return_failure_trace, itr->next ); + auto trx_tracker = _time_tracker.start_trx(trx_meta->is_transient()); + push_result pr = push_transaction( deadline, trx_meta, api_trx, itr->return_failure_trace, trx_tracker, itr->next ); exhausted = pr.block_exhausted; if( pr.trx_exhausted ) { @@ -2631,7 +2637,8 @@ bool producer_plugin_impl::process_incoming_trxs( const fc::time_point& deadline auto trx_meta = itr->trx_meta; bool api_trx = itr->trx_type == trx_enum_type::incoming_api; - push_result pr = push_transaction( deadline, trx_meta, api_trx, itr->return_failure_trace, itr->next ); + auto trx_tracker = _time_tracker.start_trx(trx_meta->is_transient()); + push_result pr = push_transaction( deadline, trx_meta, api_trx, itr->return_failure_trace, trx_tracker, itr->next ); exhausted = pr.block_exhausted; if( pr.trx_exhausted ) { From 1cc213bc9fc84622f676900fd16a8ad71432e8f6 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 24 Jul 2023 17:45:49 -0500 Subject: [PATCH 099/107] GH-1417 Explicitly track other time. Add pause/unpause for when in read window. --- plugins/producer_plugin/producer_plugin.cpp | 90 ++++++++++++++------- 1 file changed, 61 insertions(+), 29 deletions(-) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 110fd90667..18d9721dcb 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -254,7 +254,7 @@ class account_failures { struct block_time_tracker { struct trx_time_tracker { - enum class trx_status { success, fail, other }; + enum class time_status { success, fail, other }; trx_time_tracker(block_time_tracker& btt, bool transient) : _block_time_tracker(btt), _is_transient(transient) {} @@ -266,90 +266,117 @@ struct block_time_tracker { trx_time_tracker& operator=(const trx_time_tracker&) = delete; trx_time_tracker& operator=(trx_time_tracker&&) = delete; - void trx_success() { _trx_status = trx_status::success; } + void trx_success() { _time_status = time_status::success; } // Neither success for fail, will be reported as other - void cancel() { _trx_status = trx_status::other; } + void cancel() { _time_status = time_status::other; } // updates block_time_tracker ~trx_time_tracker() { - switch (_trx_status) { - case trx_status::success: + switch (_time_status) { + case time_status::success: _block_time_tracker.add_success_time(_is_transient); break; - case trx_status::fail: + case time_status::fail: _block_time_tracker.add_fail_time(_is_transient); break; - case trx_status::other: - _block_time_tracker.start_idle_time(); // just reset timer, will be in other category + case time_status::other: + _block_time_tracker.add_other_time(); break; } } private: block_time_tracker& _block_time_tracker; - trx_status _trx_status = trx_status::fail; + time_status _time_status = time_status::fail; bool _is_transient; }; trx_time_tracker start_trx(bool is_transient, fc::time_point now = fc::time_point::now()) { - last_trx_time_point = now; + assert(!paused); + add_other_time(now); + last_time_point = now; return {*this, is_transient}; } - void start_idle_time(fc::time_point now = fc::time_point::now()) { - last_trx_time_point = now; + void add_other_time(fc::time_point now = fc::time_point::now()) { + assert(!paused); + other_time += now - last_time_point; + last_time_point = now; } fc::microseconds add_idle_time(fc::time_point now = fc::time_point::now()) { - auto dur = now - last_trx_time_point; + assert(!paused); + auto dur = now - last_time_point; block_idle_time += dur; - last_trx_time_point = now; // guard against calling add_idle_time() twice in a row. + last_time_point = now; // guard against calling add_idle_time() twice in a row. return dur; } + // assumes idle time before pause + void pause(fc::time_point now = fc::time_point::now()) { + assert(!paused); + add_idle_time(now); + paused = true; + } + + // assumes last call was to pause + void unpause(fc::time_point now = fc::time_point::now()) { + assert(paused); + paused = false; + auto pause_time = now - last_time_point; + clear_time_point += pause_time; + last_time_point = now; + } + void report(uint32_t block_num, account_name producer) { + using namespace std::string_literals; + assert(!paused); if( _log.is_enabled( fc::log_level::debug ) ) { auto now = fc::time_point::now(); - fc_dlog( _log, "Block #${n} ${p} trx idle: ${i}us out of ${t}us, success: ${sn}, ${s}us, fail: ${fn}, ${f}us, transient: ${trans_trx_num}, ${trans_trx_time}us, other: ${o}us", + auto diff = now - clear_time_point - block_idle_time - trx_success_time - trx_fail_time - transient_trx_time - other_time; + fc_dlog( _log, "Block #${n} ${p} trx idle: ${i}us out of ${t}us, success: ${sn}, ${s}us, fail: ${fn}, ${f}us, transient: ${ttn}, ${tt}us, other: ${o}us${rest}", ("n", block_num)("p", producer) ("i", block_idle_time)("t", now - clear_time_point)("sn", trx_success_num)("s", trx_success_time) ("fn", trx_fail_num)("f", trx_fail_time) - ("trans_trx_num", transient_trx_num)("trans_trx_time", transient_trx_time) - ("o", (now - clear_time_point) - block_idle_time - trx_success_time - trx_fail_time - transient_trx_time) ); + ("ttn", transient_trx_num)("tt", transient_trx_time) + ("o", other_time)("rest", diff.count() > 5 ? ", diff: "s + std::to_string(diff.count()) + "us"s : ""s ) ); } } void clear() { - block_idle_time = trx_fail_time = trx_success_time = transient_trx_time = fc::microseconds{}; + assert(!paused); + block_idle_time = trx_fail_time = trx_success_time = transient_trx_time = other_time = fc::microseconds{}; trx_fail_num = trx_success_num = transient_trx_num = 0; - clear_time_point = last_trx_time_point = fc::time_point::now(); + clear_time_point = last_time_point = fc::time_point::now(); } private: void add_success_time(bool is_transient) { + assert(!paused); auto now = fc::time_point::now(); if( is_transient ) { - transient_trx_time += now - last_trx_time_point; + transient_trx_time += now - last_time_point; ++transient_trx_num; } else { - trx_success_time += now - last_trx_time_point; + trx_success_time += now - last_time_point; ++trx_success_num; } - last_trx_time_point = now; + last_time_point = now; } void add_fail_time(bool is_transient) { + assert(!paused); auto now = fc::time_point::now(); if( is_transient ) { // transient time includes both success and fail time - transient_trx_time += now - last_trx_time_point; + transient_trx_time += now - last_time_point; ++transient_trx_num; } else { - trx_fail_time += now - last_trx_time_point; + trx_fail_time += now - last_time_point; ++trx_fail_num; } - last_trx_time_point = now; + last_time_point = now; } private: @@ -360,8 +387,10 @@ struct block_time_tracker { fc::microseconds trx_success_time; fc::microseconds trx_fail_time; fc::microseconds transient_trx_time; - fc::time_point last_trx_time_point{fc::time_point::now()}; + fc::microseconds other_time; + fc::time_point last_time_point{fc::time_point::now()}; fc::time_point clear_time_point{fc::time_point::now()}; + bool paused = false; }; } // anonymous namespace @@ -621,6 +650,7 @@ class producer_plugin_impl : public std::enable_shared_from_this_time_tracker.pause(); // start_write_window assumes time_tracker is paused my->start_write_window(); } @@ -2713,7 +2744,7 @@ void producer_plugin_impl::schedule_production_loop() { fc_dlog(_log, "Speculative Block Created"); } - _time_tracker.start_idle_time(); + _time_tracker.add_other_time(); } void producer_plugin_impl::schedule_maybe_produce_block( bool exhausted ) { @@ -2879,6 +2910,7 @@ void producer_plugin_impl::produce_block() { ("net", br.total_net_usage)("cpu", br.total_cpu_usage_us)("et", br.total_elapsed_time)("tt", br.total_time) ("confs", new_bs->header.confirmed)); + _time_tracker.add_other_time(); _time_tracker.report(new_bs->block_num, new_bs->block->producer); _time_tracker.clear(); } @@ -2928,7 +2960,7 @@ void producer_plugin_impl::start_write_window() { chain.set_to_write_window(); chain.unset_db_read_only_mode(); auto now = fc::time_point::now(); - _time_tracker.start_idle_time(now); + _time_tracker.unpause(now); _ro_window_deadline = now + _ro_write_window_time_us; // not allowed on block producers, so no need to limit to block deadline auto expire_time = boost::posix_time::microseconds(_ro_write_window_time_us.count()); @@ -2950,7 +2982,7 @@ void producer_plugin_impl::switch_to_read_window() { EOS_ASSERT(chain.is_write_window(), producer_exception, "expected to be in write window"); EOS_ASSERT( _ro_num_active_exec_tasks.load() == 0 && _ro_exec_tasks_fut.empty(), producer_exception, "_ro_exec_tasks_fut expected to be empty" ); - _time_tracker.add_idle_time(); + _time_tracker.pause(); // we are in write window, so no read-only trx threads are processing transactions. if ( app().executor().read_only_queue().empty() ) { // no read-only tasks to process. stay in write window From 31f38898e752e30ccad0e8aaeacef2ccd94566a8 Mon Sep 17 00:00:00 2001 From: Jonathan Giszczak Date: Mon, 24 Jul 2023 19:40:15 -0500 Subject: [PATCH 100/107] Extend auto bp peering test to include p2p-server-address args. Add network topology diagrams to p2p_multiple_listen_test. --- tests/auto_bp_peering_test.py | 22 ++++++++++++++-------- tests/p2p_multiple_listen_test.py | 18 ++++++++++++++---- 2 files changed, 28 insertions(+), 12 deletions(-) diff --git a/tests/auto_bp_peering_test.py b/tests/auto_bp_peering_test.py index 912ab10e3c..a55bdd8807 100755 --- a/tests/auto_bp_peering_test.py +++ b/tests/auto_bp_peering_test.py @@ -1,10 +1,8 @@ #!/usr/bin/env python3 -import re -import signal -import time +import socket -from TestHarness import Cluster, TestHelper, Utils, WalletMgr, ReturnType +from TestHarness import Cluster, TestHelper, Utils, WalletMgr ############################################################### # auto_bp_peering_test @@ -35,7 +33,7 @@ dumpErrorDetails = args.dump_error_details keepLogs = args.keep_logs -# Setup cluster and it's wallet manager +# Setup cluster and its wallet manager walletMgr = WalletMgr(True) cluster = Cluster(unshared=args.unshared, keepRunning=args.leave_running, keepLogs=args.keep_logs) cluster.setWalletMgr(walletMgr) @@ -47,12 +45,17 @@ for nodeId in range(0, producerNodes): producer_name = "defproducer" + chr(ord('a') + nodeId) port = cluster.p2pBasePort + nodeId - hostname = "localhost:" + str(port) + if producer_name == 'defproducerf': + hostname = 'ext-ip0:9999' + elif producer_name == 'defproducerk': + hostname = socket.gethostname() + ':9886' + else: + hostname = "localhost:" + str(port) peer_names[hostname] = producer_name auto_bp_peer_args += (" --p2p-auto-bp-peer " + producer_name + "," + hostname) -def neigbors_in_schedule(name, schedule): +def neighbors_in_schedule(name, schedule): index = schedule.index(name) result = [] num = len(schedule) @@ -71,6 +74,9 @@ def neigbors_in_schedule(name, schedule): for nodeId in range(0, producerNodes): specificNodeosArgs[nodeId] = auto_bp_peer_args + specificNodeosArgs[5] = specificNodeosArgs[5] + ' --p2p-server-address ext-ip0:9999' + specificNodeosArgs[10] = specificNodeosArgs[10] + ' --p2p-server-address ""' + TestHelper.printSystemInfo("BEGIN") cluster.launch( prodCount=producerCountInEachNode, @@ -113,7 +119,7 @@ def neigbors_in_schedule(name, schedule): peers = peers.sort() name = "defproducer" + chr(ord('a') + nodeId) - expected_peers = neigbors_in_schedule(name, scheduled_producers) + expected_peers = neighbors_in_schedule(name, scheduled_producers) if peers != expected_peers: Utils.Print("ERROR: expect {} has connections to {}, got connections to {}".format( name, expected_peers, peers)) diff --git a/tests/p2p_multiple_listen_test.py b/tests/p2p_multiple_listen_test.py index 1b2948c7ec..62f1534c63 100755 --- a/tests/p2p_multiple_listen_test.py +++ b/tests/p2p_multiple_listen_test.py @@ -46,15 +46,25 @@ specificExtraNodeosArgs=specificArgs) is False: errorExit("Failed to stand up eos cluster.") - # Be sure all nodes start out connected + # Be sure all nodes start out connected (bios node omitted from diagram for brevity) + # node00 node01 node02 node03 node04 + # localhost:9876 -> localhost:9877 -> localhost:9878 -> localhost:9879 -> localhost:9880 + # localhost:9779 ^ | | + # ^ +---------------------------+ | + # +------------------------------------------------------------------------+ cluster.waitOnClusterSync(blockAdvancing=5) # Shut down bios node, which is connected to all other nodes in all topologies cluster.biosNode.kill(signal.SIGTERM) - # Shut down second node, interrupting the default connections between it and nodes 0 and 3 + # Shut down second node, interrupting the default connections between it and nodes 00 and 02 cluster.getNode(1).kill(signal.SIGTERM) - # Shut down the fourth node, interrupting the default connections between it and nodes 3 and 5 + # Shut down the fourth node, interrupting the default connections between it and nodes 02 and 04 cluster.getNode(3).kill(signal.SIGTERM) - # Be sure all remaining nodes continue to sync via the two listen ports on node 0 + # Be sure all remaining nodes continue to sync via the two listen ports on node 00 + # node00 node01 node02 node03 node04 + # localhost:9876 offline localhost:9878 offline localhost:9880 + # localhost:9779 ^ | | + # ^ +---------------------------+ | + # +------------------------------------------------------------------------+ cluster.waitOnClusterSync(blockAdvancing=5) connections = cluster.nodes[0].processUrllibRequest('net', 'connections') open_socket_count = 0 From c669aea5d66b7eb43f966b02945ef687d919b6d5 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 25 Jul 2023 09:11:08 -0500 Subject: [PATCH 101/107] GH-1417 Fix spelling in comment --- plugins/producer_plugin/producer_plugin.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 18d9721dcb..1f0a12667c 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -268,7 +268,7 @@ struct block_time_tracker { void trx_success() { _time_status = time_status::success; } - // Neither success for fail, will be reported as other + // Neither success nor fail, will be reported as other void cancel() { _time_status = time_status::other; } // updates block_time_tracker From 506747a41a4c8d5d0ab6c2ce9fc3c4d650c335ce Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 25 Jul 2023 10:59:47 -0500 Subject: [PATCH 102/107] GH-1446 Do not print version and non-default args when nodeos explicitly asked for help,version,full-version,print-default-config --- programs/nodeos/main.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/programs/nodeos/main.cpp b/programs/nodeos/main.cpp index 6c99a2fdd4..6e2feeba91 100644 --- a/programs/nodeos/main.cpp +++ b/programs/nodeos/main.cpp @@ -173,6 +173,7 @@ int main(int argc, char** argv) if(!app->initialize(argc, argv, initialize_logging)) { const auto& opts = app->get_options(); if( opts.count("help") || opts.count("version") || opts.count("full-version") || opts.count("print-default-config") ) { + on_exit.cancel(); return SUCCESS; } return INITIALIZE_FAIL; From 553a09d905f1e9d00fe8b614ceb9cae248d3ad16 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Tue, 25 Jul 2023 14:30:00 -0400 Subject: [PATCH 103/107] don't build leap-dev .deb package by default --- .github/workflows/build_base.yaml | 2 +- CMakeLists.txt | 2 ++ package.cmake | 7 ++++--- 3 files changed, 7 insertions(+), 4 deletions(-) diff --git a/.github/workflows/build_base.yaml b/.github/workflows/build_base.yaml index 5d47ba37cc..b1771fbfbf 100644 --- a/.github/workflows/build_base.yaml +++ b/.github/workflows/build_base.yaml @@ -77,7 +77,7 @@ jobs: run: | # https://github.com/actions/runner/issues/2033 chown -R $(id -u):$(id -g) $PWD - cmake -B build -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr -GNinja + cmake -B build -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr -DENABLE_LEAP_DEV_DEB=On -GNinja cmake --build build tar -pc --exclude "*.o" build | zstd --long -T0 -9 > build.tar.zst - name: Upload builddir diff --git a/CMakeLists.txt b/CMakeLists.txt index 058a270774..479cd1ea81 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -293,5 +293,7 @@ add_custom_target(dev-install include(doxygen) +option(ENABLE_LEAP_DEV_DEB "Enable building the leap-dev .deb package" OFF) + include(package.cmake) include(CPack) diff --git a/package.cmake b/package.cmake index 72e88c6c56..dd1c1b8e57 100644 --- a/package.cmake +++ b/package.cmake @@ -53,9 +53,10 @@ endif() set(CPACK_DEBIAN_PACKAGE_CONFLICTS "eosio, mandel") set(CPACK_RPM_PACKAGE_CONFLICTS "eosio, mandel") -#only consider "base" and "dev" components for per-component packages -get_cmake_property(CPACK_COMPONENTS_ALL COMPONENTS) -list(REMOVE_ITEM CPACK_COMPONENTS_ALL "Unspecified") +set(CPACK_COMPONENTS_ALL "base") +if(ENABLE_LEAP_DEV_DEB) + list(APPEND CPACK_COMPONENTS_ALL "dev") +endif() #enable per component packages for .deb; ensure main package is just "leap", not "leap-base", and make the dev package have "leap-dev" at the front not the back set(CPACK_DEB_COMPONENT_INSTALL ON) From cf2b9049adbc9a889a9646a6a0926b4955043d38 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Tue, 25 Jul 2023 16:00:44 -0400 Subject: [PATCH 104/107] Update `appbase` and `chainbase` to tip of `main` --- libraries/appbase | 2 +- libraries/chainbase | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/libraries/appbase b/libraries/appbase index fe1b3a6cd9..b75b31e14f 160000 --- a/libraries/appbase +++ b/libraries/appbase @@ -1 +1 @@ -Subproject commit fe1b3a6cd9b6f7529d6fb4beac0e880d136308a8 +Subproject commit b75b31e14f966fa3de6246e120dcba36c6ce5264 diff --git a/libraries/chainbase b/libraries/chainbase index 148aac7461..bffb7ebde6 160000 --- a/libraries/chainbase +++ b/libraries/chainbase @@ -1 +1 @@ -Subproject commit 148aac7461fffbe8730ba0b55367dde6fdaa0e08 +Subproject commit bffb7ebde635be15d406d74d6fef46f4c744d441 From 012eda1d81ec0fa921a13c3fc0551bec9f91981f Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 26 Jul 2023 09:52:21 -0500 Subject: [PATCH 105/107] GH-1417 Remove unneeded line --- plugins/producer_plugin/producer_plugin.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index b7661e3bdd..dd0b944b0a 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -274,7 +274,6 @@ struct block_time_tracker { trx_time_tracker start_trx(bool is_transient, fc::time_point now = fc::time_point::now()) { assert(!paused); add_other_time(now); - last_time_point = now; return {*this, is_transient}; } From 7ec90bf63f0807b4b0345a36fe59c887436ebbe7 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 26 Jul 2023 10:38:04 -0500 Subject: [PATCH 106/107] GH-1418 Ignore error of remote_endpoint --- .../include/eosio/http_plugin/beast_http_listener.hpp | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/plugins/http_plugin/include/eosio/http_plugin/beast_http_listener.hpp b/plugins/http_plugin/include/eosio/http_plugin/beast_http_listener.hpp index 8a0e878954..9cadbffcd7 100644 --- a/plugins/http_plugin/include/eosio/http_plugin/beast_http_listener.hpp +++ b/plugins/http_plugin/include/eosio/http_plugin/beast_http_listener.hpp @@ -110,7 +110,9 @@ class beast_http_listener : public std::enable_shared_from_thisplugin_state_->logger, "closing connection"); } else { // Create the session object and run it - std::string remote_endpoint = boost::lexical_cast(self->socket_.remote_endpoint()); + boost::system::error_code re_ec; + auto re = self->socket_.remote_endpoint(re_ec); + std::string remote_endpoint = re_ec ? "unknown" : boost::lexical_cast(re); std::make_shared( std::move(self->socket_), self->plugin_state_, From a150f0b415effab70c9e731b22beb83e11c721ec Mon Sep 17 00:00:00 2001 From: Lin Huang Date: Wed, 26 Jul 2023 17:45:26 -0400 Subject: [PATCH 107/107] Bump eos-vm to the merged commit for making wasm globals threaded safe --- libraries/eos-vm | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/eos-vm b/libraries/eos-vm index 65568f5e5e..4d5415fcf5 160000 --- a/libraries/eos-vm +++ b/libraries/eos-vm @@ -1 +1 @@ -Subproject commit 65568f5e5ee0d79aeb2a382a58d0596ae28d0434 +Subproject commit 4d5415fcf56d2d47f9c32b779f39361c871581ad