diff --git a/.cicd/platforms/ubuntu20.Dockerfile b/.cicd/platforms/ubuntu20.Dockerfile index baccb7c937..e9c3a1d4b6 100644 --- a/.cicd/platforms/ubuntu20.Dockerfile +++ b/.cicd/platforms/ubuntu20.Dockerfile @@ -6,11 +6,12 @@ RUN apt-get update && apt-get upgrade -y && \ cmake \ git \ jq \ - libboost-all-dev \ libcurl4-openssl-dev \ libgmp-dev \ libssl-dev \ llvm-11-dev \ ninja-build \ python3-numpy \ + file \ + zlib1g-dev \ zstd diff --git a/.cicd/platforms/ubuntu22.Dockerfile b/.cicd/platforms/ubuntu22.Dockerfile index 1e5a936a4d..57d49fe026 100644 --- a/.cicd/platforms/ubuntu22.Dockerfile +++ b/.cicd/platforms/ubuntu22.Dockerfile @@ -6,11 +6,12 @@ RUN apt-get update && apt-get upgrade -y && \ cmake \ git \ jq \ - libboost-all-dev \ libcurl4-openssl-dev \ libgmp-dev \ libssl-dev \ llvm-11-dev \ ninja-build \ python3-numpy \ + file \ + zlib1g-dev \ zstd diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 35e42c98e6..b38c102e0c 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -92,7 +92,7 @@ jobs: steps: - uses: actions/checkout@v3 with: - submodules: true + submodules: recursive - name: Download builddir uses: actions/download-artifact@v3 with: @@ -104,8 +104,8 @@ jobs: cpack - name: Install dev package run: | - apt update && apt upgrade -y - apt install -y ./build/leap_*.deb ./build/leap-dev*.deb + apt-get update && apt-get upgrade -y + apt-get install -y ./build/leap_*.deb ./build/leap-dev*.deb - name: Test using TestHarness run: | python3 -c "from TestHarness import Cluster" @@ -212,7 +212,15 @@ jobs: test: [build-tree, make-dev-install, deb-install] runs-on: ["self-hosted", "enf-x86-midtier"] container: ${{ matrix.test != 'deb-install' && fromJSON(needs.platforms.outputs.p)[matrix.platform].image || matrix.platform == 'ubuntu20' && 'ubuntu:focal' || 'ubuntu:jammy' }} + env: + DEBIAN_FRONTEND: noninteractive + TZ: Etc/UTC steps: + - name: Update Package Index & Upgrade Packages + run: | + apt-get update + apt-get upgrade -y + # LEAP - if: ${{ matrix.test != 'deb-install' }} name: Clone leap @@ -249,15 +257,12 @@ jobs: - if: ${{ matrix.test == 'deb-install' }} name: Install leap-dev Package run: | - apt-get update - export DEBIAN_FRONTEND='noninteractive' - export TZ='Etc/UTC' apt-get install -y ./*.deb rm ./*.deb # CDT - name: Download cdt - uses: AntelopeIO/asset-artifact-download-action@v2 + uses: AntelopeIO/asset-artifact-download-action@v3 with: owner: AntelopeIO repo: cdt @@ -265,10 +270,9 @@ jobs: target: '${{needs.v.outputs.cdt-target}}' prereleases: ${{fromJSON(needs.v.outputs.cdt-prerelease)}} artifact-name: cdt_ubuntu_package_amd64 - token: ${{github.token}} - name: Install cdt Packages run: | - apt install -y ./*.deb + apt-get install -y ./*.deb rm ./*.deb # Reference Contracts diff --git a/.github/workflows/build_base.yaml b/.github/workflows/build_base.yaml index ae38bd3c03..51dd5d2167 100644 --- a/.github/workflows/build_base.yaml +++ b/.github/workflows/build_base.yaml @@ -38,7 +38,7 @@ jobs: run: | # https://github.com/actions/runner/issues/2033 chown -R $(id -u):$(id -g) $PWD - cmake -B build -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr -GNinja + cmake -B build -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr -DENABLE_LEAP_DEV_DEB=On -GNinja cmake --build build tar -pc --exclude "*.o" build | zstd --long -T0 -9 > build.tar.zst - name: Upload builddir diff --git a/.github/workflows/ph_backward_compatibility.yaml b/.github/workflows/ph_backward_compatibility.yaml index 3f5190009a..0abe6b6400 100644 --- a/.github/workflows/ph_backward_compatibility.yaml +++ b/.github/workflows/ph_backward_compatibility.yaml @@ -53,13 +53,12 @@ jobs: run: | zstdcat build.tar.zst | tar x - name: Download Prev Leap Version - uses: AntelopeIO/asset-artifact-download-action@v2 + uses: AntelopeIO/asset-artifact-download-action@v3 with: owner: AntelopeIO repo: leap file: '(leap).*${{matrix.platform}}.04.*(x86_64|amd64).deb' target: '${{matrix.release}}' - token: ${{github.token}} - name: Install leap & replace binaries for PH use run: | apt-get update diff --git a/.gitmodules b/.gitmodules index ab01b3d5c0..022c13dfb4 100644 --- a/.gitmodules +++ b/.gitmodules @@ -31,3 +31,6 @@ [submodule "libraries/cli11/cli11"] path = libraries/cli11/cli11 url = https://github.com/AntelopeIO/CLI11.git +[submodule "libraries/boost"] + path = libraries/boost + url = https://github.com/boostorg/boost.git diff --git a/CMakeLists.txt b/CMakeLists.txt index 9afc74d3b1..479cd1ea81 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -101,12 +101,6 @@ else() set(no_whole_archive_flag "--no-whole-archive") endif() -set(Boost_USE_MULTITHREADED ON) -set( Boost_USE_STATIC_LIBS ON CACHE STRING "ON or OFF" ) -# Most boost deps get implictly picked up via fc, as just about everything links to fc. In addition we pick up -# the pthread dependency through fc. -find_package(Boost 1.71 REQUIRED COMPONENTS program_options unit_test_framework system) - if( APPLE AND UNIX ) # Apple Specific Options Here message( STATUS "Configuring Leap on macOS" ) @@ -278,6 +272,18 @@ configure_file(${CMAKE_SOURCE_DIR}/libraries/cli11/bash-completion/completions/c install(FILES libraries/cli11/bash-completion/completions/leap-util DESTINATION ${CMAKE_INSTALL_FULL_DATAROOTDIR}/bash-completion/completions COMPONENT base) install(FILES libraries/cli11/bash-completion/completions/cleos DESTINATION ${CMAKE_INSTALL_FULL_DATAROOTDIR}/bash-completion/completions COMPONENT base) +# Add the boost submodule we used to build to our install package, so headers can be found for libtester +install(DIRECTORY "${CMAKE_SOURCE_DIR}/libraries/boost/" + DESTINATION ${CMAKE_INSTALL_FULL_DATAROOTDIR}/leap_boost + COMPONENT dev EXCLUDE_FROM_ALL + PATTERN ".git/*" EXCLUDE + PATTERN "example/*" EXCLUDE + PATTERN "bench/*" EXCLUDE + PATTERN "doc/*" EXCLUDE + PATTERN "libs/*/test" EXCLUDE + PATTERN "tools/*/test" EXCLUDE + ) + add_custom_target(dev-install COMMAND "${CMAKE_COMMAND}" --build "${CMAKE_BINARY_DIR}" COMMAND "${CMAKE_COMMAND}" --install "${CMAKE_BINARY_DIR}" @@ -287,5 +293,7 @@ add_custom_target(dev-install include(doxygen) +option(ENABLE_LEAP_DEV_DEB "Enable building the leap-dev .deb package" OFF) + include(package.cmake) include(CPack) diff --git a/CMakeModules/EosioTester.cmake.in b/CMakeModules/EosioTester.cmake.in index f98f8e2d81..8b1135bd40 100644 --- a/CMakeModules/EosioTester.cmake.in +++ b/CMakeModules/EosioTester.cmake.in @@ -35,14 +35,10 @@ else ( APPLE ) set( CMAKE_CXX_FLAGS "${CMAKE_C_FLAGS} ${CMAKE_CXX_FLAGS} -Wall") endif ( APPLE ) +set( Boost_USE_MULTITHREADED ON ) set( Boost_USE_STATIC_LIBS ON CACHE STRING "ON or OFF" ) -find_package(Boost @Boost_MAJOR_VERSION@.@Boost_MINOR_VERSION@ EXACT REQUIRED COMPONENTS - date_time - filesystem - system - chrono - iostreams - unit_test_framework) + +add_subdirectory( @CMAKE_INSTALL_FULL_DATAROOTDIR@/leap_boost ${PROJECT_BINARY_DIR}/libraries/boost EXCLUDE_FROM_ALL) find_library(libtester eosio_testing @CMAKE_INSTALL_FULL_LIBDIR@ NO_DEFAULT_PATH) find_library(libchain eosio_chain @CMAKE_INSTALL_FULL_LIBDIR@ NO_DEFAULT_PATH) @@ -88,12 +84,18 @@ macro(add_eosio_test_executable test_name) ${libbn256} @GMP_LIBRARY@ - ${Boost_FILESYSTEM_LIBRARY} - ${Boost_SYSTEM_LIBRARY} - ${Boost_CHRONO_LIBRARY} - ${Boost_IOSTREAMS_LIBRARY} + Boost::date_time + Boost::filesystem + Boost::system + Boost::chrono + Boost::multi_index + Boost::multiprecision + Boost::interprocess + Boost::asio + Boost::signals2 + Boost::iostreams "-lz" # Needed by Boost iostreams - ${Boost_DATE_TIME_LIBRARY} + Boost::unit_test_framework ${LLVM_LIBS} @@ -109,7 +111,6 @@ macro(add_eosio_test_executable test_name) endif() target_include_directories( ${test_name} PUBLIC - ${Boost_INCLUDE_DIRS} @OPENSSL_INCLUDE_DIR@ @CMAKE_INSTALL_PREFIX@ @CMAKE_INSTALL_FULL_INCLUDEDIR@ diff --git a/CMakeModules/EosioTesterBuild.cmake.in b/CMakeModules/EosioTesterBuild.cmake.in index dccec72e50..6beb37467b 100644 --- a/CMakeModules/EosioTesterBuild.cmake.in +++ b/CMakeModules/EosioTesterBuild.cmake.in @@ -32,14 +32,10 @@ else ( APPLE ) set( CMAKE_CXX_FLAGS "${CMAKE_C_FLAGS} ${CMAKE_CXX_FLAGS} -Wall") endif ( APPLE ) +set( Boost_USE_MULTITHREADED ON ) set( Boost_USE_STATIC_LIBS ON CACHE STRING "ON or OFF" ) -find_package(Boost @Boost_MAJOR_VERSION@.@Boost_MINOR_VERSION@ EXACT REQUIRED COMPONENTS - date_time - filesystem - system - chrono - iostreams - unit_test_framework) + +add_subdirectory( @CMAKE_SOURCE_DIR@/libraries/boost ${PROJECT_BINARY_DIR}/libraries/boost EXCLUDE_FROM_ALL) find_library(libtester eosio_testing @CMAKE_BINARY_DIR@/libraries/testing NO_DEFAULT_PATH) find_library(libchain eosio_chain @CMAKE_BINARY_DIR@/libraries/chain NO_DEFAULT_PATH) @@ -85,12 +81,18 @@ macro(add_eosio_test_executable test_name) ${libbn256} @GMP_LIBRARY@ - ${Boost_FILESYSTEM_LIBRARY} - ${Boost_SYSTEM_LIBRARY} - ${Boost_CHRONO_LIBRARY} - ${Boost_IOSTREAMS_LIBRARY} + Boost::date_time + Boost::filesystem + Boost::system + Boost::chrono + Boost::multi_index + Boost::multiprecision + Boost::interprocess + Boost::asio + Boost::signals2 + Boost::iostreams "-lz" # Needed by Boost iostreams - ${Boost_DATE_TIME_LIBRARY} + Boost::unit_test_framework ${LLVM_LIBS} @@ -106,7 +108,6 @@ macro(add_eosio_test_executable test_name) endif() target_include_directories( ${test_name} PUBLIC - ${Boost_INCLUDE_DIRS} @OPENSSL_INCLUDE_DIR@ @CMAKE_SOURCE_DIR@/libraries/chain/include @CMAKE_BINARY_DIR@/libraries/chain/include diff --git a/README.md b/README.md index 71582a1ce4..1ffaa053e1 100644 --- a/README.md +++ b/README.md @@ -132,12 +132,13 @@ sudo apt-get install -y \ build-essential \ cmake \ git \ - libboost-all-dev \ libcurl4-openssl-dev \ libgmp-dev \ libssl-dev \ llvm-11-dev \ - python3-numpy + python3-numpy \ + file \ + zlib1g-dev ``` To build, make sure you are in the root of the `leap` repo, then run the following command: ```bash diff --git a/libraries/CMakeLists.txt b/libraries/CMakeLists.txt index 4b041dd047..e7ad9b144e 100644 --- a/libraries/CMakeLists.txt +++ b/libraries/CMakeLists.txt @@ -5,6 +5,10 @@ set(SOFTFLOAT_INSTALL_COMPONENT "dev") set(EOSVM_INSTALL_COMPONENT "dev") set(BN256_INSTALL_COMPONENT "dev") +set( Boost_USE_MULTITHREADED ON ) +set( Boost_USE_STATIC_LIBS ON CACHE STRING "ON or OFF" ) +add_subdirectory( boost EXCLUDE_FROM_ALL ) + add_subdirectory( libfc ) add_subdirectory( builtins ) add_subdirectory( softfloat ) diff --git a/libraries/appbase b/libraries/appbase index 2da170ea8c..b75b31e14f 160000 --- a/libraries/appbase +++ b/libraries/appbase @@ -1 +1 @@ -Subproject commit 2da170ea8c39442c7d1374c3403e80d60338b34d +Subproject commit b75b31e14f966fa3de6246e120dcba36c6ce5264 diff --git a/libraries/boost b/libraries/boost new file mode 160000 index 0000000000..b6928ae5c9 --- /dev/null +++ b/libraries/boost @@ -0,0 +1 @@ +Subproject commit b6928ae5c92e21a04bbe17a558e6e066dbe632f6 diff --git a/libraries/chain/CMakeLists.txt b/libraries/chain/CMakeLists.txt index 5fd6e1f572..242132824c 100644 --- a/libraries/chain/CMakeLists.txt +++ b/libraries/chain/CMakeLists.txt @@ -34,11 +34,16 @@ if("eos-vm-oc" IN_LIST EOSIO_WASM_RUNTIMES) webassembly/runtimes/eos-vm-oc.cpp webassembly/runtimes/eos-vm-oc/default_real_main.cpp) + set_source_files_properties(webassembly/runtimes/eos-vm-oc/LLVMJIT.cpp PROPERTIES COMPILE_FLAGS "--std=gnu++17") + set_source_files_properties(webassembly/runtimes/eos-vm-oc/LLVMEmitIR.cpp PROPERTIES COMPILE_FLAGS "--std=gnu++17") + if(LLVM_VERSION VERSION_LESS 7.1 AND CMAKE_CXX_COMPILER_ID STREQUAL "Clang") enable_language(ASM-LLVMWAR) list(APPEND CHAIN_EOSVMOC_SOURCES webassembly/runtimes/eos-vm-oc/llvmWARshim.llvmwar) + set_source_files_properties(webassembly/runtimes/eos-vm-oc/llvmWARshim.llvmwar PROPERTIES COMPILE_FLAGS "--std=gnu++17") else() list(APPEND CHAIN_EOSVMOC_SOURCES webassembly/runtimes/eos-vm-oc/llvmWARshim.cpp) + set_source_files_properties(webassembly/runtimes/eos-vm-oc/llvmWARshim.cpp PROPERTIES COMPILE_FLAGS "--std=gnu++17") endif() llvm_map_components_to_libnames(LLVM_LIBS support core passes mcjit native orcjit) @@ -131,8 +136,17 @@ add_library( eosio_chain ${HEADERS} ) +## Boost::accumulators depends on Boost::numeric_ublas, which is still missing cmake support (see +## https://github.com/boostorg/cmake/issues/39). Until this is fixed, manually add Boost::numeric_ublas +## as an interface library +## ---------------------------------------------------------------------------------------------------- +add_library(boost_numeric_ublas INTERFACE) +add_library(Boost::numeric_ublas ALIAS boost_numeric_ublas) + target_link_libraries( eosio_chain PUBLIC bn256 fc chainbase eosio_rapidjson Logging IR WAST WASM softfloat builtins ${CHAIN_EOSVM_LIBRARIES} ${LLVM_LIBS} ${CHAIN_RT_LINKAGE} + Boost::signals2 Boost::hana Boost::property_tree Boost::multi_index Boost::asio Boost::lockfree + Boost::assign Boost::accumulators ) target_include_directories( eosio_chain PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" "${CMAKE_CURRENT_BINARY_DIR}/include" diff --git a/libraries/chain/webassembly/runtimes/eos-vm-oc/LLVMEmitIR.h b/libraries/chain/webassembly/runtimes/eos-vm-oc/LLVMEmitIR.h new file mode 100644 index 0000000000..2451603a77 --- /dev/null +++ b/libraries/chain/webassembly/runtimes/eos-vm-oc/LLVMEmitIR.h @@ -0,0 +1,18 @@ +#pragma once + +#include "Inline/BasicTypes.h" +#include "IR/Module.h" + +#include "llvm/IR/Module.h" + +#include +#include + +namespace eosio { namespace chain { namespace eosvmoc { + +namespace LLVMJIT { + bool getFunctionIndexFromExternalName(const char* externalName,Uptr& outFunctionDefIndex); + const char* getTableSymbolName(); + llvm::Module* emitModule(const IR::Module& module); +} +}}} diff --git a/libraries/chain/webassembly/runtimes/eos-vm-oc/LLVMJIT.cpp b/libraries/chain/webassembly/runtimes/eos-vm-oc/LLVMJIT.cpp index 8b76b715af..f1eca8073d 100644 --- a/libraries/chain/webassembly/runtimes/eos-vm-oc/LLVMJIT.cpp +++ b/libraries/chain/webassembly/runtimes/eos-vm-oc/LLVMJIT.cpp @@ -15,6 +15,7 @@ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND */ #include "LLVMJIT.h" +#include "LLVMEmitIR.h" #include "llvm/ExecutionEngine/ExecutionEngine.h" #include "llvm/ExecutionEngine/RTDyldMemoryManager.h" @@ -39,6 +40,7 @@ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND #include "llvm/Object/ObjectFile.h" #include "llvm/Object/SymbolSize.h" #include "llvm/Support/Debug.h" +#include "llvm/Support/DataExtractor.h" #include "llvm/Support/DataTypes.h" #include "llvm/Support/FileSystem.h" #include "llvm/Support/TargetSelect.h" @@ -49,10 +51,7 @@ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND #include "llvm/Transforms/InstCombine/InstCombine.h" #include "llvm/Transforms/Utils.h" #include - -#include -#include -#include +#include #include "llvm/Support/LEB128.h" @@ -151,7 +150,7 @@ namespace LLVMJIT std::list> stack_sizes; U8* get_next_code_ptr(uintptr_t numBytes, U32 alignment) { - FC_ASSERT(alignment <= alignof(std::max_align_t), "alignment of section exceeds max_align_t"); + WAVM_ASSERT_THROW(alignment <= alignof(std::max_align_t)); uintptr_t p = (uintptr_t)ptr; p += alignment - 1LL; p &= ~(alignment - 1LL); @@ -306,12 +305,14 @@ namespace LLVMJIT unsigned num_functions_stack_size_found = 0; for(const auto& stacksizes : jitModule->unitmemorymanager->stack_sizes) { - fc::datastream ds(reinterpret_cast(stacksizes.data()), stacksizes.size()); - while(ds.remaining()) { - uint64_t funcaddr; - fc::unsigned_int stack_size; - fc::raw::unpack(ds, funcaddr); - fc::raw::unpack(ds, stack_size); + llvm::DataExtractor ds(llvm::ArrayRef(stacksizes.data(), stacksizes.size()), true, 8); + llvm::DataExtractor::Cursor c(0); + + while(!ds.eof(c)) { + ds.getAddress(c); + WAVM_ASSERT_THROW(!!c); + const uint64_t stack_size = ds.getULEB128(c); + WAVM_ASSERT_THROW(!!c); ++num_functions_stack_size_found; if(stack_size > 16u*1024u) diff --git a/libraries/chain/webassembly/runtimes/eos-vm-oc/LLVMJIT.h b/libraries/chain/webassembly/runtimes/eos-vm-oc/LLVMJIT.h index 4d5a685c29..13e2510195 100644 --- a/libraries/chain/webassembly/runtimes/eos-vm-oc/LLVMJIT.h +++ b/libraries/chain/webassembly/runtimes/eos-vm-oc/LLVMJIT.h @@ -3,10 +3,6 @@ #include "Inline/BasicTypes.h" #include "IR/Module.h" -#pragma push_macro("N") -#undef N -#include "llvm/IR/Module.h" -#pragma pop_macro("N") #include #include @@ -19,9 +15,6 @@ struct instantiated_code { }; namespace LLVMJIT { - bool getFunctionIndexFromExternalName(const char* externalName,Uptr& outFunctionDefIndex); - const char* getTableSymbolName(); - llvm::Module* emitModule(const IR::Module& module); instantiated_code instantiateModule(const IR::Module& module); } }}} diff --git a/libraries/chainbase b/libraries/chainbase index 0cc3c62aa6..bffb7ebde6 160000 --- a/libraries/chainbase +++ b/libraries/chainbase @@ -1 +1 @@ -Subproject commit 0cc3c62aa641ea89e4f89e61eb2662fd4da92684 +Subproject commit bffb7ebde635be15d406d74d6fef46f4c744d441 diff --git a/libraries/eos-vm b/libraries/eos-vm index 1e9345f96a..4d5415fcf5 160000 --- a/libraries/eos-vm +++ b/libraries/eos-vm @@ -1 +1 @@ -Subproject commit 1e9345f96a4dcefa3a16ff51b58e2e7df739eeff +Subproject commit 4d5415fcf56d2d47f9c32b779f39361c871581ad diff --git a/libraries/libfc/CMakeLists.txt b/libraries/libfc/CMakeLists.txt index e0dca99888..742501ca9f 100644 --- a/libraries/libfc/CMakeLists.txt +++ b/libraries/libfc/CMakeLists.txt @@ -78,12 +78,6 @@ if(APPLE) add_library(zstd INTERFACE) endif() -find_package(Boost 1.66 REQUIRED COMPONENTS - date_time - chrono - unit_test_framework - iostreams) - find_path(GMP_INCLUDE_DIR NAMES gmp.h) find_library(GMP_LIBRARY gmp) if(NOT GMP_LIBRARY MATCHES ${CMAKE_SHARED_LIBRARY_SUFFIX}) @@ -117,7 +111,8 @@ if(APPLE) find_library(security_framework Security) find_library(corefoundation_framework CoreFoundation) endif() -target_link_libraries( fc PUBLIC Boost::date_time Boost::chrono Boost::iostreams Threads::Threads +target_link_libraries( fc PUBLIC Boost::date_time Boost::chrono Boost::iostreams Boost::interprocess Boost::multi_index Boost::dll + Boost::multiprecision Boost::beast Boost::asio Boost::thread Boost::unit_test_framework Threads::Threads OpenSSL::Crypto ZLIB::ZLIB ${PLATFORM_SPECIFIC_LIBS} ${CMAKE_DL_LIBS} secp256k1 ${security_framework} ${corefoundation_framework}) # Critically, this ensures that OpenSSL 1.1 & 3.0 both have a variant of BN_zero() with void return value. But it also allows access diff --git a/package.cmake b/package.cmake index ff3aebbd4b..dd1c1b8e57 100644 --- a/package.cmake +++ b/package.cmake @@ -46,13 +46,17 @@ set(CPACK_PACKAGE_HOMEPAGE_URL "https://github.com/AntelopeIO/leap") set(CPACK_DEBIAN_PACKAGE_SHLIBDEPS ON) set(CPACK_DEBIAN_BASE_PACKAGE_SECTION "utils") +if(CMAKE_VERSION VERSION_GREATER_EQUAL 3.22) + set(CPACK_DEBIAN_COMPRESSION_TYPE "zstd") +endif() set(CPACK_DEBIAN_PACKAGE_CONFLICTS "eosio, mandel") set(CPACK_RPM_PACKAGE_CONFLICTS "eosio, mandel") -#only consider "base" and "dev" components for per-component packages -get_cmake_property(CPACK_COMPONENTS_ALL COMPONENTS) -list(REMOVE_ITEM CPACK_COMPONENTS_ALL "Unspecified") +set(CPACK_COMPONENTS_ALL "base") +if(ENABLE_LEAP_DEV_DEB) + list(APPEND CPACK_COMPONENTS_ALL "dev") +endif() #enable per component packages for .deb; ensure main package is just "leap", not "leap-base", and make the dev package have "leap-dev" at the front not the back set(CPACK_DEB_COMPONENT_INSTALL ON) @@ -61,7 +65,7 @@ set(CPACK_DEBIAN_BASE_FILE_NAME "${CPACK_DEBIAN_FILE_NAME}.deb") string(REGEX REPLACE "^(${CMAKE_PROJECT_NAME})" "\\1-dev" CPACK_DEBIAN_DEV_FILE_NAME "${CPACK_DEBIAN_BASE_FILE_NAME}") #deb package tooling will be unable to detect deps for the dev package. llvm is tricky since we don't know what package could have been used; try to figure it out -set(CPACK_DEBIAN_DEV_PACKAGE_DEPENDS "libboost-all-dev, libssl-dev, libgmp-dev, python3-numpy") +set(CPACK_DEBIAN_DEV_PACKAGE_DEPENDS "libssl-dev, libgmp-dev, python3-distutils, python3-numpy, zlib1g-dev") find_program(DPKG_QUERY "dpkg-query") if(DPKG_QUERY AND OS_RELEASE MATCHES "\n?ID=\"?ubuntu" AND LLVM_CMAKE_DIR) execute_process(COMMAND "${DPKG_QUERY}" -S "${LLVM_CMAKE_DIR}" COMMAND cut -d: -f1 RESULT_VARIABLE LLVM_PKG_FIND_RESULT OUTPUT_VARIABLE LLVM_PKG_FIND_OUTPUT) diff --git a/plugins/chain_plugin/CMakeLists.txt b/plugins/chain_plugin/CMakeLists.txt index 0648d20fb4..ae21541990 100644 --- a/plugins/chain_plugin/CMakeLists.txt +++ b/plugins/chain_plugin/CMakeLists.txt @@ -11,7 +11,7 @@ if(EOSIO_ENABLE_DEVELOPER_OPTIONS) target_compile_definitions(chain_plugin PUBLIC EOSIO_DEVELOPER) endif() -target_link_libraries( chain_plugin eosio_chain custom_appbase appbase resource_monitor_plugin ) +target_link_libraries( chain_plugin eosio_chain custom_appbase appbase resource_monitor_plugin Boost::bimap ) target_include_directories( chain_plugin PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" "${CMAKE_CURRENT_SOURCE_DIR}/../chain_interface/include" "${CMAKE_CURRENT_SOURCE_DIR}/../../libraries/appbase/include" "${CMAKE_CURRENT_SOURCE_DIR}/../resource_monitor_plugin/include") -add_subdirectory( test ) \ No newline at end of file +add_subdirectory( test ) diff --git a/plugins/chain_plugin/test/test_trx_retry_db.cpp b/plugins/chain_plugin/test/test_trx_retry_db.cpp index 8c7a3925c9..cfad3ed512 100644 --- a/plugins/chain_plugin/test/test_trx_retry_db.cpp +++ b/plugins/chain_plugin/test/test_trx_retry_db.cpp @@ -224,11 +224,15 @@ BOOST_AUTO_TEST_CASE(trx_retry_logic) { std::promise plugin_promise; std::future plugin_fut = plugin_promise.get_future(); std::thread app_thread( [&]() { - std::vector argv = {"test"}; - app->initialize( argv.size(), (char**) &argv[0] ); - app->startup(); - plugin_promise.set_value(app->find_plugin()); - app->exec(); + try { + std::vector argv = {"test"}; + app->initialize(argv.size(), (char**)&argv[0]); + app->startup(); + plugin_promise.set_value(app->find_plugin()); + app->exec(); + return; + } FC_LOG_AND_DROP() + BOOST_CHECK(!"app threw exception see logged error"); } ); (void)plugin_fut.get(); // wait for app to be started diff --git a/plugins/net_plugin/include/eosio/net_plugin/auto_bp_peering.hpp b/plugins/net_plugin/include/eosio/net_plugin/auto_bp_peering.hpp index a394312669..b5122f80aa 100644 --- a/plugins/net_plugin/include/eosio/net_plugin/auto_bp_peering.hpp +++ b/plugins/net_plugin/include/eosio/net_plugin/auto_bp_peering.hpp @@ -182,7 +182,7 @@ class bp_connection_manager { fc_dlog(self()->get_logger(), "pending_downstream_neighbors: ${pending_downstream_neighbors}", ("pending_downstream_neighbors", to_string(pending_downstream_neighbors))); - for (auto neighbor : pending_downstream_neighbors) { self()->connections.connect(config.bp_peer_addresses[neighbor]); } + for (auto neighbor : pending_downstream_neighbors) { self()->connections.connect(config.bp_peer_addresses[neighbor], *self()->p2p_addresses.begin() ); } pending_neighbors = std::move(pending_downstream_neighbors); finder.add_upstream_neighbors(pending_neighbors); diff --git a/plugins/net_plugin/include/eosio/net_plugin/net_plugin.hpp b/plugins/net_plugin/include/eosio/net_plugin/net_plugin.hpp index 8eafaba2e5..5d5d12ef40 100644 --- a/plugins/net_plugin/include/eosio/net_plugin/net_plugin.hpp +++ b/plugins/net_plugin/include/eosio/net_plugin/net_plugin.hpp @@ -9,9 +9,14 @@ namespace eosio { struct connection_status { string peer; - bool connecting = false; - bool syncing = false; - bool is_bp_peer = false; + string remote_ip; + string remote_port; + bool connecting = false; + bool syncing = false; + bool is_bp_peer = false; + bool is_socket_open = false; + bool is_blocks_only = false; + bool is_transactions_only = false; handshake_message last_handshake; }; @@ -49,4 +54,4 @@ namespace eosio { } -FC_REFLECT( eosio::connection_status, (peer)(connecting)(syncing)(is_bp_peer)(last_handshake) ) +FC_REFLECT( eosio::connection_status, (peer)(remote_ip)(remote_port)(connecting)(syncing)(is_bp_peer)(is_socket_open)(is_blocks_only)(is_transactions_only)(last_handshake) ) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 63242aa1b6..1328e1195a 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -42,6 +42,21 @@ using namespace eosio::chain::plugin_interface; +namespace boost +{ + /// @brief Overload for boost::lexical_cast to convert vector of strings to string + /// + /// Used by boost::program_options to print the default value of an std::vector option + /// + /// @param v the vector to convert + /// @return the contents of the vector as a comma-separated string + template<> + inline std::string lexical_cast(const std::vector& v) + { + return boost::join(v, ","); + } +} + namespace eosio { static auto _net_plugin = application::register_plugin(); @@ -335,7 +350,7 @@ namespace eosio { private: // must call with held mutex connection_ptr find_connection_i(const string& host) const; void add_i(connection_ptr&& c); - void connect_i(const string& peer); + void connect_i(const string& peer, const string& p2p_address); void connection_monitor(const std::weak_ptr& from_connection); @@ -355,14 +370,14 @@ namespace eosio { void register_update_p2p_connection_metrics(std::function&& fun); - void connect_supplied_peers(); + void connect_supplied_peers(const string& p2p_address); void start_conn_timer(); void start_conn_timer(boost::asio::steady_timer::duration du, std::weak_ptr from_connection); void stop_conn_timer(); void add(connection_ptr c); - string connect(const string& host); + string connect(const string& host, const string& p2p_address); string disconnect(const string& host); void close_all(); @@ -395,8 +410,8 @@ namespace eosio { * Thread safe, only updated in plugin initialize * @{ */ - string p2p_address; - string p2p_server_address; + vector p2p_addresses; + vector p2p_server_addresses; vector allowed_peers; ///< peer keys allowed to connect std::map conn_state{connection_state::connecting}; + string listen_address; // address sent to peer in handshake const string peer_addr; enum connection_types : char { both, @@ -1004,7 +1023,7 @@ namespace eosio { return mvo; } - bool incoming() const { return peer_address().empty(); } // thread safe becuase of peer_address + bool incoming() const { return peer_address().empty(); } // thread safe because of peer_address bool incoming_and_handshake_received() const { if (!incoming()) return false; fc::lock_guard g_conn( conn_mtx ); @@ -1127,8 +1146,9 @@ namespace eosio { //--------------------------------------------------------------------------- - connection::connection( const string& endpoint ) - : peer_addr( endpoint ), + connection::connection( const string& endpoint, const string& listen_address ) + : listen_address( listen_address ), + peer_addr( endpoint ), strand( my_impl->thread_pool.get_executor() ), socket( new tcp::socket( my_impl->thread_pool.get_executor() ) ), log_p2p_address( endpoint ), @@ -1141,8 +1161,9 @@ namespace eosio { fc_ilog( logger, "created connection ${c} to ${n}", ("c", connection_id)("n", endpoint) ); } - connection::connection(tcp::socket&& s) - : peer_addr(), + connection::connection(tcp::socket&& s, const string& listen_address) + : listen_address( listen_address ), + peer_addr(), strand( my_impl->thread_pool.get_executor() ), socket( new tcp::socket( std::move(s) ) ), connection_id( ++my_impl->current_connection_id ), @@ -1150,7 +1171,8 @@ namespace eosio { last_handshake_recv(), last_handshake_sent() { - fc_dlog( logger, "new connection object created" ); + update_endpoints(); + fc_dlog( logger, "new connection object created for peer ${address}:${port} from listener ${addr}", ("address", log_remote_endpoint_ip)("port", log_remote_endpoint_port)("addr", listen_address) ); } // called from connection strand @@ -1212,9 +1234,12 @@ namespace eosio { connection_status connection::get_status()const { connection_status stat; stat.peer = peer_addr; + stat.remote_ip = log_remote_endpoint_ip; + stat.remote_port = log_remote_endpoint_port; stat.connecting = state() == connection_state::connecting; stat.syncing = peer_syncing_from_us; stat.is_bp_peer = is_bp_connection; + stat.is_socket_open = socket_is_open(); fc::lock_guard g( conn_mtx ); stat.last_handshake = last_handshake_recv; return stat; @@ -1224,7 +1249,6 @@ namespace eosio { bool connection::start_session() { verify_strand_in_this_thread( strand, __func__, __LINE__ ); - update_endpoints(); boost::asio::ip::tcp::no_delay nodelay( true ); boost::system::error_code ec; socket->set_option( nodelay, ec ); @@ -2655,7 +2679,7 @@ namespace eosio { } - void net_plugin_impl::create_session(tcp::socket&& socket) { + void net_plugin_impl::create_session(tcp::socket&& socket, const string listen_address) { uint32_t visitors = 0; uint32_t from_addr = 0; boost::system::error_code rec; @@ -2681,7 +2705,7 @@ namespace eosio { visitors < connections.get_max_client_count())) { fc_ilog(logger, "Accepted new connection: " + paddr_str); - connection_ptr new_connection = std::make_shared(std::move(socket)); + connection_ptr new_connection = std::make_shared(std::move(socket), listen_address); new_connection->strand.post([new_connection, this]() { if (new_connection->start_session()) { connections.add(new_connection); @@ -3144,9 +3168,9 @@ namespace eosio { if (msg.time + c_time <= check_time) return false; } else if (net_version < proto_dup_node_id_goaway || msg.network_version < proto_dup_node_id_goaway) { - if (my_impl->p2p_address < msg.p2p_address) { - fc_dlog( logger, "my_impl->p2p_address '${lhs}' < msg.p2p_address '${rhs}'", - ("lhs", my_impl->p2p_address)( "rhs", msg.p2p_address ) ); + if (listen_address < msg.p2p_address) { + fc_dlog( logger, "listen_address '${lhs}' < msg.p2p_address '${rhs}'", + ("lhs", listen_address)( "rhs", msg.p2p_address ) ); // only the connection from lower p2p_address to higher p2p_address will be considered as a duplicate, // so there is no chance for both connections to be closed return false; @@ -3828,7 +3852,7 @@ namespace eosio { // If we couldn't sign, don't send a token. if(hello.sig == chain::signature_type()) hello.token = sha256(); - hello.p2p_address = my_impl->p2p_address; + hello.p2p_address = listen_address; if( is_transactions_only_connection() ) hello.p2p_address += ":trx"; // if we are not accepting transactions tell peer we are blocks only if( is_blocks_only_connection() || !my_impl->p2p_accept_transactions ) hello.p2p_address += ":blk"; @@ -3860,8 +3884,8 @@ namespace eosio { void net_plugin::set_program_options( options_description& /*cli*/, options_description& cfg ) { cfg.add_options() - ( "p2p-listen-endpoint", bpo::value()->default_value( "0.0.0.0:9876" ), "The actual host:port used to listen for incoming p2p connections.") - ( "p2p-server-address", bpo::value(), "An externally accessible host:port for identifying this node. Defaults to p2p-listen-endpoint.") + ( "p2p-listen-endpoint", bpo::value< vector >()->default_value( vector(1, string("0.0.0.0:9876")) ), "The actual host:port used to listen for incoming p2p connections. May be used multiple times.") + ( "p2p-server-address", bpo::value< vector >(), "An externally accessible host:port for identifying this node. Defaults to p2p-listen-endpoint. May be used as many times as p2p-listen-endpoint. If provided, the first address will be used in handshakes with other nodes. Otherwise the default is used.") ( "p2p-peer-address", bpo::value< vector >()->composing(), "The public endpoint of a peer node to connect to. Use multiple p2p-peer-address options as needed to compose a network.\n" " Syntax: host:port[:|]\n" @@ -3882,7 +3906,7 @@ namespace eosio { ( "agent-name", bpo::value()->default_value("EOS Test Agent"), "The name supplied to identify this node amongst the peers.") ( "allowed-connection", bpo::value>()->multitoken()->default_value({"any"}, "any"), "Can be 'any' or 'producers' or 'specified' or 'none'. If 'specified', peer-key must be specified at least once. If only 'producers', peer-key is not required. 'producers' and 'specified' may be combined.") ( "peer-key", bpo::value>()->composing()->multitoken(), "Optional public key of peer allowed to connect. May be used multiple times.") - ( "peer-private-key", boost::program_options::value>()->composing()->multitoken(), + ( "peer-private-key", bpo::value>()->composing()->multitoken(), "Tuple of [PublicKey, WIF private key] (may specify multiple times)") ( "max-clients", bpo::value()->default_value(def_max_clients), "Maximum number of clients from which connections are accepted, use 0 for no limit") ( "connection-cleanup-period", bpo::value()->default_value(def_conn_retry_wait), "number of seconds to wait before cleaning up dead connections") @@ -3942,16 +3966,35 @@ namespace eosio { std::chrono::seconds( options.at("connection-cleanup-period").as() ), options.at("max-clients").as() ); - if( options.count( "p2p-listen-endpoint" ) && options.at("p2p-listen-endpoint").as().length()) { - p2p_address = options.at( "p2p-listen-endpoint" ).as(); - EOS_ASSERT( p2p_address.length() <= max_p2p_address_length, chain::plugin_config_exception, - "p2p-listen-endpoint too long, must be less than ${m}", ("m", max_p2p_address_length) ); + if( options.count( "p2p-listen-endpoint" )) { + auto p2ps = options.at("p2p-listen-endpoint").as>(); + if (!p2ps.front().empty()) { + p2p_addresses = p2ps; + auto addr_count = p2p_addresses.size(); + std::sort(p2p_addresses.begin(), p2p_addresses.end()); + auto last = std::unique(p2p_addresses.begin(), p2p_addresses.end()); + p2p_addresses.erase(last, p2p_addresses.end()); + if( size_t addr_diff = addr_count - p2p_addresses.size(); addr_diff != 0) { + fc_wlog( logger, "Removed ${count} duplicate p2p-listen-endpoint entries", ("count", addr_diff)); + } + for( const auto& addr : p2p_addresses ) { + EOS_ASSERT( addr.length() <= max_p2p_address_length, chain::plugin_config_exception, + "p2p-listen-endpoint ${a} too long, must be less than ${m}", + ("a", addr)("m", max_p2p_address_length) ); + } + } } if( options.count( "p2p-server-address" ) ) { - p2p_server_address = options.at( "p2p-server-address" ).as(); - EOS_ASSERT( p2p_server_address.length() <= max_p2p_address_length, chain::plugin_config_exception, - "p2p_server_address too long, must be less than ${m}", ("m", max_p2p_address_length) ); + p2p_server_addresses = options.at( "p2p-server-address" ).as>(); + EOS_ASSERT( p2p_server_addresses.size() <= p2p_addresses.size(), chain::plugin_config_exception, + "p2p-server-address may not be specified more times than p2p-listen-endpoint" ); + for( const auto& addr: p2p_server_addresses ) { + EOS_ASSERT( addr.length() <= max_p2p_address_length, chain::plugin_config_exception, + "p2p-server-address ${a} too long, must be less than ${m}", + ("a", addr)("m", max_p2p_address_length) ); + } } + p2p_server_addresses.resize(p2p_addresses.size()); // extend with empty entries as needed thread_pool_size = options.at( "net-threads" ).as(); EOS_ASSERT( thread_pool_size > 0, chain::plugin_config_exception, @@ -4044,7 +4087,7 @@ namespace eosio { dispatcher = std::make_unique( my_impl->thread_pool.get_executor() ); - if( !p2p_accept_transactions && p2p_address.size() ) { + if( !p2p_accept_transactions && p2p_addresses.size() ) { fc_ilog( logger, "\n" "***********************************\n" "* p2p-accept-transactions = false *\n" @@ -4052,13 +4095,15 @@ namespace eosio { "***********************************\n" ); } - std::string listen_address = p2p_address; + std::vector listen_addresses = p2p_addresses; - if( !p2p_address.empty() ) { - auto [host, port] = fc::split_host_port(listen_address); + EOS_ASSERT( p2p_addresses.size() == p2p_server_addresses.size(), chain::plugin_config_exception, "" ); + std::transform(p2p_addresses.begin(), p2p_addresses.end(), p2p_server_addresses.begin(), + p2p_addresses.begin(), [](const string& p2p_address, const string& p2p_server_address) { + auto [host, port] = fc::split_host_port(p2p_address); if( !p2p_server_address.empty() ) { - p2p_address = p2p_server_address; + return p2p_server_address; } else if( host.empty() || host == "0.0.0.0" || host == "[::]") { boost::system::error_code ec; auto hostname = host_name( ec ); @@ -4068,9 +4113,10 @@ namespace eosio { "Unable to retrieve host_name. ${msg}", ("msg", ec.message())); } - p2p_address = hostname + ":" + port; + return hostname + ":" + port; } - } + return p2p_address; + }); { chain::controller& cc = chain_plug->chain(); @@ -4094,8 +4140,10 @@ namespace eosio { incoming_transaction_ack_subscription = app().get_channel().subscribe( [this](auto&& t) { transaction_ack(std::forward(t)); }); - app().executor().post(priority::highest, [my=shared_from_this(), address = std::move(listen_address)](){ - if (address.size()) { + for(auto listen_itr = listen_addresses.begin(), p2p_iter = p2p_addresses.begin(); + listen_itr != listen_addresses.end(); + ++listen_itr, ++p2p_iter) { + app().executor().post(priority::highest, [my=shared_from_this(), address = std::move(*listen_itr), p2p_addr = *p2p_iter](){ try { const boost::posix_time::milliseconds accept_timeout(100); @@ -4104,19 +4152,20 @@ namespace eosio { fc::create_listener( my->thread_pool.get_executor(), logger, accept_timeout, address, extra_listening_log_info, - [my = my](tcp::socket&& socket) { my->create_session(std::move(socket)); }); + [my = my, addr = p2p_addr](tcp::socket&& socket) { my->create_session(std::move(socket), addr); }); } catch (const std::exception& e) { fc_elog( logger, "net_plugin::plugin_startup failed to listen on ${addr}, ${what}", ("addr", address)("what", e.what()) ); app().quit(); return; } - } - + }); + } + app().executor().post(priority::highest, [my=shared_from_this()](){ my->ticker(); my->start_monitors(); my->update_chain_info(); - my->connections.connect_supplied_peers(); + my->connections.connect_supplied_peers(*my->p2p_addresses.begin()); // attribute every outbound connection to the first listen port }); } @@ -4153,7 +4202,7 @@ namespace eosio { /// RPC API string net_plugin::connect( const string& host ) { - return my->connections.connect( host ); + return my->connections.connect( host, *my->p2p_addresses.begin() ); } /// RPC API @@ -4227,10 +4276,10 @@ namespace eosio { update_p2p_connection_metrics = std::move(fun); } - void connections_manager::connect_supplied_peers() { + void connections_manager::connect_supplied_peers(const string& p2p_address) { std::lock_guard g(connections_mtx); for (const auto& peer : supplied_peers) { - connect_i(peer); + connect_i(peer, p2p_address); } } @@ -4240,12 +4289,12 @@ namespace eosio { } // called by API - string connections_manager::connect( const string& host ) { + string connections_manager::connect( const string& host, const string& p2p_address ) { std::lock_guard g( connections_mtx ); if( find_connection_i( host ) ) return "already connected"; - connect_i( host ); + connect_i( host, p2p_address ); supplied_peers.insert(host); return "added connection"; } @@ -4302,8 +4351,8 @@ namespace eosio { } // call with connections_mtx - void connections_manager::connect_i( const string& host ) { - connection_ptr c = std::make_shared( host ); + void connections_manager::connect_i( const string& host, const string& p2p_address ) { + connection_ptr c = std::make_shared( host, p2p_address ); fc_dlog( logger, "calling active connector: ${h}", ("h", host) ); if( c->resolve_and_connect() ) { fc_dlog( logger, "adding new connection to the list: ${host} ${cid}", ("host", host)("cid", c->connection_id) ); diff --git a/plugins/net_plugin/tests/auto_bp_peering_unittest.cpp b/plugins/net_plugin/tests/auto_bp_peering_unittest.cpp index 93ac898a5b..6aa7fbebd6 100644 --- a/plugins/net_plugin/tests/auto_bp_peering_unittest.cpp +++ b/plugins/net_plugin/tests/auto_bp_peering_unittest.cpp @@ -18,7 +18,7 @@ struct mock_connections_manager { uint32_t max_client_count = 0; std::vector connections; - std::function connect; + std::function connect; std::function disconnect; uint32_t get_max_client_count() const { return max_client_count; } @@ -36,6 +36,7 @@ struct mock_net_plugin : eosio::auto_bp_peering::bp_connection_manager p2p_addresses{"0.0.0.0:9876"}; bool in_sync() { return is_in_sync; } @@ -165,7 +166,7 @@ BOOST_AUTO_TEST_CASE(test_on_pending_schedule) { std::vector connected_hosts; - plugin.connections.connect = [&connected_hosts](std::string host) { connected_hosts.push_back(host); }; + plugin.connections.connect = [&connected_hosts](std::string host, std::string p2p_address) { connected_hosts.push_back(host); }; // make sure nothing happens when it is not in_sync plugin.is_in_sync = false; @@ -209,7 +210,7 @@ BOOST_AUTO_TEST_CASE(test_on_active_schedule1) { plugin.config.my_bp_accounts = { "prodd"_n, "produ"_n }; plugin.active_neighbors = { "proda"_n, "prodh"_n, "prodn"_n }; - plugin.connections.connect = [](std::string host) {}; + plugin.connections.connect = [](std::string host, std::string p2p_address) {}; std::vector disconnected_hosts; plugin.connections.disconnect = [&disconnected_hosts](std::string host) { disconnected_hosts.push_back(host); }; @@ -245,7 +246,7 @@ BOOST_AUTO_TEST_CASE(test_on_active_schedule2) { plugin.config.my_bp_accounts = { "prodd"_n, "produ"_n }; plugin.active_neighbors = { "proda"_n, "prodh"_n, "prodn"_n }; - plugin.connections.connect = [](std::string host) {}; + plugin.connections.connect = [](std::string host, std::string p2p_address) {}; std::vector disconnected_hosts; plugin.connections.disconnect = [&disconnected_hosts](std::string host) { disconnected_hosts.push_back(host); }; diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 4ad6fd9730..dd0b944b0a 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -232,47 +232,134 @@ class account_failures { struct block_time_tracker { - void add_idle_time(const fc::microseconds& idle) { block_idle_time += idle; } + struct trx_time_tracker { + enum class time_status { success, fail, other }; - void add_fail_time(const fc::microseconds& fail_time, bool is_transient) { - if (is_transient) { - // transient time includes both success and fail time - transient_trx_time += fail_time; - ++transient_trx_num; - } else { - trx_fail_time += fail_time; - ++trx_fail_num; + trx_time_tracker(block_time_tracker& btt, bool transient) + : _block_time_tracker(btt), _is_transient(transient) {} + + trx_time_tracker(trx_time_tracker&&) = default; + + trx_time_tracker() = delete; + trx_time_tracker(const trx_time_tracker&) = delete; + trx_time_tracker& operator=(const trx_time_tracker&) = delete; + trx_time_tracker& operator=(trx_time_tracker&&) = delete; + + void trx_success() { _time_status = time_status::success; } + + // Neither success nor fail, will be reported as other + void cancel() { _time_status = time_status::other; } + + // updates block_time_tracker + ~trx_time_tracker() { + switch (_time_status) { + case time_status::success: + _block_time_tracker.add_success_time(_is_transient); + break; + case time_status::fail: + _block_time_tracker.add_fail_time(_is_transient); + break; + case time_status::other: + _block_time_tracker.add_other_time(); + break; + } } + + private: + block_time_tracker& _block_time_tracker; + time_status _time_status = time_status::fail; + bool _is_transient; + }; + + trx_time_tracker start_trx(bool is_transient, fc::time_point now = fc::time_point::now()) { + assert(!paused); + add_other_time(now); + return {*this, is_transient}; } - void add_success_time(const fc::microseconds& time, bool is_transient) { - if (is_transient) { - transient_trx_time += time; - ++transient_trx_num; - } else { - trx_success_time += time; - ++trx_success_num; - } + void add_other_time(fc::time_point now = fc::time_point::now()) { + assert(!paused); + other_time += now - last_time_point; + last_time_point = now; } - void report(const fc::time_point& idle_trx_time, uint32_t block_num) { - if (_log.is_enabled(fc::log_level::debug)) { + fc::microseconds add_idle_time(fc::time_point now = fc::time_point::now()) { + assert(!paused); + auto dur = now - last_time_point; + block_idle_time += dur; + last_time_point = now; // guard against calling add_idle_time() twice in a row. + return dur; + } + + // assumes idle time before pause + void pause(fc::time_point now = fc::time_point::now()) { + assert(!paused); + add_idle_time(now); + paused = true; + } + + // assumes last call was to pause + void unpause(fc::time_point now = fc::time_point::now()) { + assert(paused); + paused = false; + auto pause_time = now - last_time_point; + clear_time_point += pause_time; + last_time_point = now; + } + + void report(uint32_t block_num, account_name producer) { + using namespace std::string_literals; + assert(!paused); + if( _log.is_enabled( fc::log_level::debug ) ) { auto now = fc::time_point::now(); - add_idle_time(now - idle_trx_time); - fc_dlog(_log, "Block #${n} trx idle: ${i}us out of ${t}us, success: ${sn}, ${s}us, fail: ${fn}, ${f}us, " - "transient: ${trans_trx_num}, ${trans_trx_time}us, other: ${o}us", - ("n", block_num)("i", block_idle_time)("t", now - clear_time)("sn", trx_success_num)("s", trx_success_time) - ("fn", trx_fail_num)("f", trx_fail_time)("trans_trx_num", transient_trx_num)("trans_trx_time", transient_trx_time) - ("o", (now - clear_time) - block_idle_time - trx_success_time - trx_fail_time - transient_trx_time)); + auto diff = now - clear_time_point - block_idle_time - trx_success_time - trx_fail_time - transient_trx_time - other_time; + fc_dlog( _log, "Block #${n} ${p} trx idle: ${i}us out of ${t}us, success: ${sn}, ${s}us, fail: ${fn}, ${f}us, " + "transient: ${ttn}, ${tt}us, other: ${o}us${rest}", + ("n", block_num)("p", producer) + ("i", block_idle_time)("t", now - clear_time_point)("sn", trx_success_num)("s", trx_success_time) + ("fn", trx_fail_num)("f", trx_fail_time) + ("ttn", transient_trx_num)("tt", transient_trx_time) + ("o", other_time)("rest", diff.count() > 5 ? ", diff: "s + std::to_string(diff.count()) + "us"s : ""s ) ); } } void clear() { - block_idle_time = trx_fail_time = trx_success_time = transient_trx_time = fc::microseconds{}; + assert(!paused); + block_idle_time = trx_fail_time = trx_success_time = transient_trx_time = other_time = fc::microseconds{}; trx_fail_num = trx_success_num = transient_trx_num = 0; - clear_time = fc::time_point::now(); + clear_time_point = last_time_point = fc::time_point::now(); } + private: + void add_success_time(bool is_transient) { + assert(!paused); + auto now = fc::time_point::now(); + if( is_transient ) { + // transient time includes both success and fail time + transient_trx_time += now - last_time_point; + ++transient_trx_num; + } else { + trx_success_time += now - last_time_point; + ++trx_success_num; + } + last_time_point = now; + } + + void add_fail_time(bool is_transient) { + assert(!paused); + auto now = fc::time_point::now(); + if( is_transient ) { + // transient time includes both success and fail time + transient_trx_time += now - last_time_point; + ++transient_trx_num; + } else { + trx_fail_time += now - last_time_point; + ++trx_fail_num; + } + last_time_point = now; + } + + private: fc::microseconds block_idle_time; uint32_t trx_success_num = 0; uint32_t trx_fail_num = 0; @@ -280,7 +367,10 @@ struct block_time_tracker { fc::microseconds trx_success_time; fc::microseconds trx_fail_time; fc::microseconds transient_trx_time; - fc::time_point clear_time{fc::time_point::now()}; + fc::microseconds other_time; + fc::time_point last_time_point{fc::time_point::now()}; + fc::time_point clear_time_point{fc::time_point::now()}; + bool paused = false; }; } // anonymous namespace @@ -313,6 +403,7 @@ class producer_plugin_impl : public std::enable_shared_from_this& next); push_result handle_push_result(const transaction_metadata_ptr& trx, const next_function& next, @@ -584,11 +675,18 @@ class producer_plugin_impl : public std::enable_shared_from_thischain(); - if (chain.is_building_block()) { - _time_tracker.report(_idle_trx_time, chain.pending_block_num()); + std::optional> block_info; + if( chain.is_building_block() ) { + block_info = std::make_tuple(chain.pending_block_num(), chain.pending_block_producer()); } - _unapplied_transactions.add_aborted(chain.abort_block()); - _idle_trx_time = fc::time_point::now(); + _unapplied_transactions.add_aborted( chain.abort_block() ); + _time_tracker.add_other_time(); + + if (block_info) { + auto[block_num, block_producer] = *block_info; + _time_tracker.report(block_num, block_producer); + } + _time_tracker.clear(); } bool on_incoming_block(const signed_block_ptr& block, const std::optional& block_id, const block_state_ptr& bsp) { @@ -601,13 +699,15 @@ class producer_plugin_impl : public std::enable_shared_from_thiscalculate_id(); auto blk_num = block->block_num(); - auto now = fc::time_point::now(); if (now - block->timestamp < fc::minutes(5) || (blk_num % 1000 == 0)) // only log every 1000 during sync fc_dlog(_log, "received incoming block ${n} ${id}", ("n", blk_num)("id", id)); + _time_tracker.add_idle_time(now); + EOS_ASSERT(block->timestamp < (now + fc::seconds(7)), block_from_the_future, "received a block from the future, ignoring it: ${id}", ("id", id)); /* de-dupe here... no point in aborting block if we already know the block */ @@ -748,30 +848,25 @@ class producer_plugin_impl : public std::enable_shared_from_this_idle_trx_time; - self->_time_tracker.add_idle_time(idle_time); + auto start = fc::time_point::now(); + auto idle_time = self->_time_tracker.add_idle_time(start); + auto trx_tracker = self->_time_tracker.start_trx(is_transient, start); fc_tlog(_log, "Time since last trx: ${t}us", ("t", idle_time)); auto exception_handler = [self, is_transient, &next, trx{std::move(trx)}, &start](fc::exception_ptr ex) { - self->_time_tracker.add_idle_time(start - self->_idle_trx_time); self->log_trx_results(trx, nullptr, ex, 0, start, is_transient); next(std::move(ex)); - self->_idle_trx_time = fc::time_point::now(); - auto dur = self->_idle_trx_time - start; - self->_time_tracker.add_fail_time(dur, is_transient); }; try { auto result = future.get(); - if (!self->process_incoming_transaction_async(result, api_trx, return_failure_traces, next)) { + if (!self->process_incoming_transaction_async(result, api_trx, return_failure_traces, trx_tracker, next)) { if (self->in_producing_mode()) { self->schedule_maybe_produce_block(true); } else { self->restart_speculative_block(); } } - self->_idle_trx_time = fc::time_point::now(); } CATCH_AND_CALL(exception_handler); }); @@ -782,6 +877,7 @@ class producer_plugin_impl : public std::enable_shared_from_this& next) { bool exhausted = false; chain::controller& chain = chain_plug->chain(); @@ -807,11 +903,12 @@ class producer_plugin_impl : public std::enable_shared_from_thisblock_num, subjective_bill); - _time_tracker.clear(); if (!remove_expired_trxs(preprocess_deadline)) return start_block_result::exhausted; @@ -2188,6 +2285,7 @@ producer_plugin_impl::push_result producer_plugin_impl::push_transaction(const f const transaction_metadata_ptr& trx, bool api_trx, bool return_failure_trace, + block_time_tracker::trx_time_tracker& trx_tracker, const next_function& next) { auto start = fc::time_point::now(); EOS_ASSERT(!trx->is_read_only(), producer_exception, "Unexpected read-only trx"); @@ -2211,7 +2309,6 @@ producer_plugin_impl::push_result producer_plugin_impl::push_transaction(const f log_trx_results(trx, except_ptr); next(except_ptr); } - _time_tracker.add_fail_time(fc::time_point::now() - start, trx->is_transient()); return push_result{.failed = true}; } @@ -2235,8 +2332,12 @@ producer_plugin_impl::push_result producer_plugin_impl::push_transaction(const f auto trace = chain.push_transaction(trx, block_deadline, max_trx_time, prev_billed_cpu_time_us, false, sub_bill); - return handle_push_result( - trx, next, start, chain, trace, return_failure_trace, disable_subjective_enforcement, first_auth, sub_bill, prev_billed_cpu_time_us); + auto pr = handle_push_result(trx, next, start, chain, trace, return_failure_trace, disable_subjective_enforcement, first_auth, sub_bill, prev_billed_cpu_time_us); + + if (!pr.failed) { + trx_tracker.trx_success(); + } + return pr; } producer_plugin_impl::push_result @@ -2254,17 +2355,9 @@ producer_plugin_impl::handle_push_result(const transaction_metadata_ptr& chain::subjective_billing& subjective_bill = chain.get_mutable_subjective_billing(); push_result pr; - if (trace->except) { - // Transient trxs are dry-run or read-only. - // Dry-run trxs only run in write window. Read-only trxs can run in - // both write and read windows; time spent in read window is counted - // by read window summary. - if (chain.is_write_window()) { - auto dur = end - start; - _time_tracker.add_fail_time(dur, trx->is_transient()); - } - if (exception_is_exhausted(*trace->except)) { - if (in_producing_mode()) { + if( trace->except ) { + if( exception_is_exhausted( *trace->except ) ) { + if( in_producing_mode() ) { fc_dlog(trx->is_transient() ? _transient_trx_failed_trace_log : _trx_failed_trace_log, "[TRX_TRACE] Block ${block_num} for producer ${prod} COULD NOT FIT, tx: ${txid} RETRYING ", ("block_num", chain.head_block_num() + 1)("prod", get_pending_block_producer())("txid", trx->id())); @@ -2303,14 +2396,6 @@ producer_plugin_impl::handle_push_result(const transaction_metadata_ptr& } else { fc_tlog(_log, "Subjective bill for success ${a}: ${b} elapsed ${t}us, time ${r}us", ("a", first_auth)("b", sub_bill)("t", trace->elapsed)("r", end - start)); - // Transient trxs are dry-run or read-only. - // Dry-run trxs only run in write window. Read-only trxs can run in - // both write and read windows; time spent in read window is counted - // by read window summary. - if (chain.is_write_window()) { - auto dur = end - start; - _time_tracker.add_success_time(dur, trx->is_transient()); - } log_trx_results(trx, trace, start); // if producing then trx is in objective cpu account billing if (!disable_subjective_enforcement && !in_producing_mode()) { @@ -2340,7 +2425,8 @@ bool producer_plugin_impl::process_unapplied_trxs(const fc::time_point& deadline ++num_processed; try { - push_result pr = push_transaction(deadline, itr->trx_meta, false, itr->return_failure_trace, itr->next); + auto trx_tracker = _time_tracker.start_trx(itr->trx_meta->is_transient()); + push_result pr = push_transaction(deadline, itr->trx_meta, false, itr->return_failure_trace, trx_tracker, itr->next); exhausted = pr.block_exhausted; if (exhausted) { @@ -2423,7 +2509,8 @@ void producer_plugin_impl::process_scheduled_and_incoming_trxs(const fc::time_po auto trx_meta = itr->trx_meta; bool api_trx = itr->trx_type == trx_enum_type::incoming_api; - push_result pr = push_transaction(deadline, trx_meta, api_trx, itr->return_failure_trace, itr->next); + auto trx_tracker = _time_tracker.start_trx(trx_meta->is_transient()); + push_result pr = push_transaction(deadline, trx_meta, api_trx, itr->return_failure_trace, trx_tracker, itr->next); exhausted = pr.block_exhausted; if (pr.trx_exhausted) { @@ -2451,6 +2538,7 @@ void producer_plugin_impl::process_scheduled_and_incoming_trxs(const fc::time_po try { auto start = fc::time_point::now(); + auto trx_tracker = _time_tracker.start_trx(false, start); // delayed transaction cannot be transient fc::microseconds max_trx_time = fc::milliseconds(_max_transaction_time_ms.load()); if (max_trx_time.count() < 0) max_trx_time = fc::microseconds::maximum(); @@ -2458,7 +2546,6 @@ void producer_plugin_impl::process_scheduled_and_incoming_trxs(const fc::time_po auto trace = chain.push_scheduled_transaction(trx_id, deadline, max_trx_time, 0, false); auto end = fc::time_point::now(); if (trace->except) { - _time_tracker.add_fail_time(end - start, false); // delayed transaction cannot be transient if (exception_is_exhausted(*trace->except)) { if (block_is_exhausted()) { exhausted = true; @@ -2478,7 +2565,7 @@ void producer_plugin_impl::process_scheduled_and_incoming_trxs(const fc::time_po num_failed++; } } else { - _time_tracker.add_success_time(end - start, false); // delayed transaction cannot be transient + trx_tracker.trx_success(); fc_dlog(_trx_successful_trace_log, "[TRX_TRACE] Block ${block_num} for producer ${prod} is ACCEPTING scheduled tx: ${txid}, time: ${r}, auth: ${a}, cpu: ${cpu}", ("block_num", chain.head_block_num() + 1)("prod", get_pending_block_producer())("txid", trx_id)("r", end - start) @@ -2522,7 +2609,8 @@ bool producer_plugin_impl::process_incoming_trxs(const fc::time_point& deadline, auto trx_meta = itr->trx_meta; bool api_trx = itr->trx_type == trx_enum_type::incoming_api; - push_result pr = push_transaction(deadline, trx_meta, api_trx, itr->return_failure_trace, itr->next); + auto trx_tracker = _time_tracker.start_trx(trx_meta->is_transient()); + push_result pr = push_transaction(deadline, trx_meta, api_trx, itr->return_failure_trace, trx_tracker, itr->next); exhausted = pr.block_exhausted; if (pr.trx_exhausted) { @@ -2564,8 +2652,6 @@ void producer_plugin_impl::schedule_production_loop() { auto result = start_block(); - _idle_trx_time = fc::time_point::now(); - if (result == start_block_result::failed) { elog("Failed to start a pending block, will try again later"); _timer.expires_from_now(boost::posix_time::microseconds(config::block_interval_us / 10)); @@ -2603,6 +2689,8 @@ void producer_plugin_impl::schedule_production_loop() { } else { fc_dlog(_log, "Speculative Block Created"); } + + _time_tracker.add_other_time(); } void producer_plugin_impl::schedule_maybe_produce_block(bool exhausted) { @@ -2704,6 +2792,8 @@ static auto maybe_make_debug_time_logger() -> std::optionalchain(); EOS_ASSERT(chain.is_building_block(), missing_pending_block_state, @@ -2747,8 +2837,6 @@ void producer_plugin_impl::produce_block() { block_state_ptr new_bs = chain.head_block_state(); - _time_tracker.report(_idle_trx_time, new_bs->block_num); - br.total_time += fc::time_point::now() - start; if (_update_produced_block_metrics) { @@ -2769,6 +2857,10 @@ void producer_plugin_impl::produce_block() { ("p", new_bs->header.producer)("id", new_bs->id.str().substr(8, 16))("n", new_bs->block_num)("t", new_bs->header.timestamp) ("count", new_bs->block->transactions.size())("lib", chain.last_irreversible_block_num())("net", br.total_net_usage) ("cpu", br.total_cpu_usage_us)("et", br.total_elapsed_time)("tt", br.total_time)("confs", new_bs->header.confirmed)); + + _time_tracker.add_other_time(); + _time_tracker.report(new_bs->block_num, new_bs->block->producer); + _time_tracker.clear(); } void producer_plugin::received_block(uint32_t block_num) { @@ -2815,9 +2907,10 @@ void producer_plugin_impl::start_write_window() { app().executor().set_to_write_window(); chain.set_to_write_window(); chain.unset_db_read_only_mode(); - _idle_trx_time = _ro_window_deadline = fc::time_point::now(); + auto now = fc::time_point::now(); + _time_tracker.unpause(now); - _ro_window_deadline += _ro_write_window_time_us; // not allowed on block producers, so no need to limit to block deadline + _ro_window_deadline = now + _ro_write_window_time_us; // not allowed on block producers, so no need to limit to block deadline auto expire_time = boost::posix_time::microseconds(_ro_write_window_time_us.count()); _ro_timer.expires_from_now(expire_time); _ro_timer.async_wait(app().executor().wrap( // stay on app thread @@ -2837,7 +2930,7 @@ void producer_plugin_impl::switch_to_read_window() { EOS_ASSERT(chain.is_write_window(), producer_exception, "expected to be in write window"); EOS_ASSERT(_ro_num_active_exec_tasks.load() == 0 && _ro_exec_tasks_fut.empty(), producer_exception, "_ro_exec_tasks_fut expected to be empty"); - _time_tracker.add_idle_time(fc::time_point::now() - _idle_trx_time); + _time_tracker.pause(); // we are in write window, so no read-only trx threads are processing transactions. if (app().executor().read_only_queue().empty()) { // no read-only tasks to process. stay in write window @@ -2953,10 +3046,10 @@ bool producer_plugin_impl::push_read_only_transaction(transaction_metadata_ptr t chain.unset_db_read_only_mode(); }); - if (chain.is_write_window()) { + std::optional trx_tracker; + if ( chain.is_write_window() ) { chain.set_db_read_only_mode(); - auto idle_time = fc::time_point::now() - _idle_trx_time; - _time_tracker.add_idle_time(idle_time); + trx_tracker.emplace(_time_tracker.start_trx(true, start)); } // use read-window/write-window deadline if there are read/write windows, otherwise use block_deadline if only the app thead @@ -2978,8 +3071,8 @@ bool producer_plugin_impl::push_read_only_transaction(transaction_metadata_ptr t _ro_exhausted_trx_queue.push_front({std::move(trx), std::move(next)}); } - if (chain.is_write_window()) { - _idle_trx_time = fc::time_point::now(); + if ( chain.is_write_window() && !pr.failed ) { + trx_tracker->trx_success(); } } catch (const guard_exception& e) { chain_plugin::handle_guard_exception(e); diff --git a/plugins/producer_plugin/test/test_options.cpp b/plugins/producer_plugin/test/test_options.cpp index 23cdea785d..3fe429b6a9 100644 --- a/plugins/producer_plugin/test/test_options.cpp +++ b/plugins/producer_plugin/test/test_options.cpp @@ -30,17 +30,21 @@ BOOST_AUTO_TEST_CASE(state_dir) { std::promise> plugin_promise; std::future> plugin_fut = plugin_promise.get_future(); std::thread app_thread( [&]() { - fc::logger::get(DEFAULT_LOGGER).set_log_level(fc::log_level::debug); - std::vector argv = - {"test", - "--data-dir", temp_dir_str.c_str(), - "--state-dir", custom_state_dir_str.c_str(), - "--config-dir", temp_dir_str.c_str(), - "-p", "eosio", "-e" }; - app->initialize( argv.size(), (char**) &argv[0] ); - app->startup(); - plugin_promise.set_value( {app->find_plugin(), app->find_plugin()} ); - app->exec(); + try { + fc::logger::get(DEFAULT_LOGGER).set_log_level(fc::log_level::debug); + std::vector argv = + {"test", + "--data-dir", temp_dir_str.c_str(), + "--state-dir", custom_state_dir_str.c_str(), + "--config-dir", temp_dir_str.c_str(), + "-p", "eosio", "-e" }; + app->initialize( argv.size(), (char**) &argv[0] ); + app->startup(); + plugin_promise.set_value( {app->find_plugin(), app->find_plugin()} ); + app->exec(); + return; + } FC_LOG_AND_DROP() + BOOST_CHECK(!"app threw exception see logged error"); } ); auto[prod_plug, chain_plug] = plugin_fut.get(); diff --git a/plugins/producer_plugin/test/test_trx_full.cpp b/plugins/producer_plugin/test/test_trx_full.cpp index 34ddcc6ea9..129b135114 100644 --- a/plugins/producer_plugin/test/test_trx_full.cpp +++ b/plugins/producer_plugin/test/test_trx_full.cpp @@ -108,15 +108,19 @@ BOOST_AUTO_TEST_CASE(producer) { std::promise> plugin_promise; std::future> plugin_fut = plugin_promise.get_future(); std::thread app_thread( [&]() { - fc::logger::get(DEFAULT_LOGGER).set_log_level(fc::log_level::debug); - std::vector argv = - {"test", "--data-dir", temp_dir_str.c_str(), "--config-dir", temp_dir_str.c_str(), - "-p", "eosio", "-e", "--disable-subjective-p2p-billing=true" }; - app->initialize( argv.size(), (char**) &argv[0] ); - app->startup(); - plugin_promise.set_value( - {app->find_plugin(), app->find_plugin()} ); - app->exec(); + try { + fc::logger::get(DEFAULT_LOGGER).set_log_level(fc::log_level::debug); + std::vector argv = + {"test", "--data-dir", temp_dir_str.c_str(), "--config-dir", temp_dir_str.c_str(), + "-p", "eosio", "-e", "--disable-subjective-p2p-billing=true" }; + app->initialize( argv.size(), (char**) &argv[0] ); + app->startup(); + plugin_promise.set_value( + {app->find_plugin(), app->find_plugin()} ); + app->exec(); + return; + } FC_LOG_AND_DROP() + BOOST_CHECK(!"app threw exception see logged error"); } ); auto[prod_plug, chain_plug] = plugin_fut.get(); diff --git a/programs/cleos/CMakeLists.txt b/programs/cleos/CMakeLists.txt index bf184cf927..71b9d6c866 100644 --- a/programs/cleos/CMakeLists.txt +++ b/programs/cleos/CMakeLists.txt @@ -13,7 +13,7 @@ set(LOCALEDOMAIN ${CLI_CLIENT_EXECUTABLE_NAME}) target_include_directories(${CLI_CLIENT_EXECUTABLE_NAME} PUBLIC ${CMAKE_CURRENT_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR}) target_link_libraries( ${CLI_CLIENT_EXECUTABLE_NAME} - PRIVATE appbase version leap-cli11 chain_api_plugin producer_plugin chain_plugin http_plugin eosio_chain fc ${CMAKE_DL_LIBS} ${PLATFORM_SPECIFIC_LIBS} ) + PRIVATE appbase version leap-cli11 chain_api_plugin producer_plugin chain_plugin http_plugin eosio_chain fc ${CMAKE_DL_LIBS} ${PLATFORM_SPECIFIC_LIBS} Boost::process Boost::dll ) if (CURL_FOUND) target_sources(${CLI_CLIENT_EXECUTABLE_NAME} PRIVATE do_http_post_libcurl.cpp) diff --git a/programs/nodeos/CMakeLists.txt b/programs/nodeos/CMakeLists.txt index 23ac7269c2..493c9e3a43 100644 --- a/programs/nodeos/CMakeLists.txt +++ b/programs/nodeos/CMakeLists.txt @@ -40,7 +40,8 @@ target_link_libraries( ${NODE_EXECUTABLE_NAME} PRIVATE -Wl,${whole_archive_flag} prometheus_plugin -Wl,${no_whole_archive_flag} PRIVATE -Wl,${build_id_flag} PRIVATE chain_plugin http_plugin producer_plugin http_client_plugin - PRIVATE eosio_chain_wrap fc ${CMAKE_DL_LIBS} ${PLATFORM_SPECIFIC_LIBS} ) + PRIVATE eosio_chain_wrap fc ${CMAKE_DL_LIBS} ${PLATFORM_SPECIFIC_LIBS} + Boost::dll ) include(additionalPlugins) diff --git a/programs/nodeos/main.cpp b/programs/nodeos/main.cpp index 6c99a2fdd4..6e2feeba91 100644 --- a/programs/nodeos/main.cpp +++ b/programs/nodeos/main.cpp @@ -173,6 +173,7 @@ int main(int argc, char** argv) if(!app->initialize(argc, argv, initialize_logging)) { const auto& opts = app->get_options(); if( opts.count("help") || opts.count("version") || opts.count("full-version") || opts.count("print-default-config") ) { + on_exit.cancel(); return SUCCESS; } return INITIALIZE_FAIL; diff --git a/scripts/pinned_build.sh b/scripts/pinned_build.sh index 0616e92aaf..ebf37d29d7 100755 --- a/scripts/pinned_build.sh +++ b/scripts/pinned_build.sh @@ -30,7 +30,6 @@ DEP_DIR="$(realpath "$1")" LEAP_DIR="$2" JOBS="$3" CLANG_VER=11.0.1 -BOOST_VER=1.82.0 LLVM_VER=11.0.1 SCRIPT_DIR="$( cd -- "$( dirname -- "${BASH_SOURCE[0]:-$0}"; )" &> /dev/null && pwd 2> /dev/null; )"; START_DIR="$(pwd)" @@ -101,27 +100,10 @@ install_llvm() { export LLVM_DIR="${LLVM_DIR}" } -install_boost() { - BOOST_DIR="$1" - - if [ ! -d "${BOOST_DIR}" ]; then - echo "Installing Boost ${BOOST_VER} @ ${BOOST_DIR}" - try wget -O "boost_${BOOST_VER//\./_}.tar.gz" "https://boostorg.jfrog.io/artifactory/main/release/${BOOST_VER}/source/boost_${BOOST_VER//\./_}.tar.gz" - try tar -xvzf "boost_${BOOST_VER//\./_}.tar.gz" -C "${DEP_DIR}" - pushdir "${BOOST_DIR}" - try ./bootstrap.sh -with-toolset=clang --prefix="${BOOST_DIR}/bin" - ./b2 toolset=clang cxxflags="-stdlib=libc++ -D__STRICT_ANSI__ -nostdinc++ -I\${CLANG_DIR}/include/c++/v1 -D_FORTIFY_SOURCE=2 -fstack-protector-strong -fPIE" linkflags='-stdlib=libc++ -pie' link=static threading=multi --with-iostreams --with-date_time --with-system --with-program_options --with-chrono --with-test -q -j "${JOBS}" install - popdir "${DEP_DIR}" - rm "boost_${BOOST_VER//\./_}.tar.gz" - fi - export BOOST_DIR="${BOOST_DIR}" -} - pushdir "${DEP_DIR}" install_clang "${DEP_DIR}/clang-${CLANG_VER}" install_llvm "${DEP_DIR}/llvm-${LLVM_VER}" -install_boost "${DEP_DIR}/boost_${BOOST_VER//\./_}" # go back to the directory where the script starts popdir "${START_DIR}" @@ -130,7 +112,7 @@ pushdir "${LEAP_DIR}" # build Leap echo "Building Leap ${SCRIPT_DIR}" -try cmake -DCMAKE_TOOLCHAIN_FILE="${SCRIPT_DIR}/pinned_toolchain.cmake" -DCMAKE_INSTALL_PREFIX=${LEAP_PINNED_INSTALL_PREFIX:-/usr/local} -DCMAKE_BUILD_TYPE=Release -DCMAKE_PREFIX_PATH="${LLVM_DIR}/lib/cmake" -DCMAKE_PREFIX_PATH="${BOOST_DIR}/bin" "${SCRIPT_DIR}/.." +try cmake -DCMAKE_TOOLCHAIN_FILE="${SCRIPT_DIR}/pinned_toolchain.cmake" -DCMAKE_INSTALL_PREFIX=${LEAP_PINNED_INSTALL_PREFIX:-/usr/local} -DCMAKE_BUILD_TYPE=Release -DCMAKE_PREFIX_PATH="${LLVM_DIR}/lib/cmake" "${SCRIPT_DIR}/.." try make -j "${JOBS}" try cpack diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index a90031dec8..c9cf6bbc12 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -49,6 +49,8 @@ configure_file(${CMAKE_CURRENT_SOURCE_DIR}/ship_streamer_test.py ${CMAKE_CURRENT configure_file(${CMAKE_CURRENT_SOURCE_DIR}/large-lib-test.py ${CMAKE_CURRENT_BINARY_DIR}/large-lib-test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/http_plugin_test.py ${CMAKE_CURRENT_BINARY_DIR}/http_plugin_test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/p2p_high_latency_test.py ${CMAKE_CURRENT_BINARY_DIR}/p2p_high_latency_test.py COPYONLY) +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/p2p_multiple_listen_test.py ${CMAKE_CURRENT_BINARY_DIR}/p2p_multiple_listen_test.py COPYONLY) +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/p2p_no_listen_test.py ${CMAKE_CURRENT_BINARY_DIR}/p2p_no_listen_test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/compute_transaction_test.py ${CMAKE_CURRENT_BINARY_DIR}/compute_transaction_test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/subjective_billing_test.py ${CMAKE_CURRENT_BINARY_DIR}/subjective_billing_test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/get_account_test.py ${CMAKE_CURRENT_BINARY_DIR}/get_account_test.py COPYONLY) @@ -104,9 +106,9 @@ add_subdirectory( performance_tests ) find_package(Threads) add_executable(ship_client ship_client.cpp) -target_link_libraries(ship_client abieos Boost::program_options Boost::system Threads::Threads) +target_link_libraries(ship_client abieos Boost::program_options Boost::system Boost::algorithm Boost::asio Boost::beast Threads::Threads) add_executable(ship_streamer ship_streamer.cpp) -target_link_libraries(ship_streamer abieos Boost::program_options Boost::system Threads::Threads) +target_link_libraries(ship_streamer abieos Boost::program_options Boost::system Boost::asio Boost::beast Threads::Threads) add_test(NAME cluster_launcher COMMAND tests/cluster_launcher.py -v ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST cluster_launcher PROPERTY LABELS nonparallelizable_tests) @@ -183,7 +185,10 @@ set_property(TEST nested_container_multi_index_test PROPERTY LABELS nonparalleli add_test(NAME nodeos_run_check_test COMMAND tests/nodeos_run_test.py -v ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_run_check_test PROPERTY LABELS nonparallelizable_tests) - +add_test(NAME p2p_multiple_listen_test COMMAND tests/p2p_multiple_listen_test.py -v ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +set_property(TEST p2p_multiple_listen_test PROPERTY LABELS nonparallelizable_tests) +add_test(NAME p2p_no_listen_test COMMAND tests/p2p_no_listen_test.py -v ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +set_property(TEST p2p_no_listen_test PROPERTY LABELS nonparallelizable_tests) # needs iproute-tc or iproute2 depending on platform #add_test(NAME p2p_high_latency_test COMMAND tests/p2p_high_latency_test.py -v WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) @@ -263,7 +268,7 @@ set_property(TEST nodeos_repeat_transaction_lr_test PROPERTY LABELS long_running add_test(NAME light_validation_sync_test COMMAND tests/light_validation_sync_test.py -v ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST light_validation_sync_test PROPERTY LABELS nonparallelizable_tests) -add_test(NAME auto_bp_peering_test COMMAND tests/auto_bp_peering_test.py ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME auto_bp_peering_test COMMAND tests/auto_bp_peering_test.py -v ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST auto_bp_peering_test PROPERTY LABELS long_running_tests) add_test(NAME gelf_test COMMAND tests/gelf_test.py ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) diff --git a/tests/TestHarness/launcher.py b/tests/TestHarness/launcher.py index 665ed4fc13..9fca7c85c4 100644 --- a/tests/TestHarness/launcher.py +++ b/tests/TestHarness/launcher.py @@ -200,8 +200,8 @@ def comma_separated(string): cfg.add_argument('--enable-gelf-logging', action='store_true', help='enable gelf logging appender in logging configuration file', default=False) cfg.add_argument('--gelf-endpoint', help='hostname:port or ip:port of GELF endpoint', default='128.0.0.1:12201') cfg.add_argument('--template', help='the startup script template', default='testnet.template') - cfg.add_argument('--max-block-cpu-usage', type=int, help='the "max-block-cpu-usage" value to use in the genesis.json file', default=200000) - cfg.add_argument('--max-transaction-cpu-usage', type=int, help='the "max-transaction-cpu-usage" value to use in the genesis.json file', default=150000) + cfg.add_argument('--max-block-cpu-usage', type=int, help='the "max-block-cpu-usage" value to use in the genesis.json file', default=None) + cfg.add_argument('--max-transaction-cpu-usage', type=int, help='the "max-transaction-cpu-usage" value to use in the genesis.json file', default=None) cfg.add_argument('--logging-level', type=fc_log_level, help='Provide the "level" value to use in the logging.json file') cfg.add_argument('--logging-level-map', type=json.loads, help='JSON string of a logging level dictionary to use in the logging.json file for specific nodes, matching based on node number. Ex: {"bios":"off","00":"info"}') cfg.add_argument('--is-nodeos-v2', action='store_true', help='Toggles old nodeos compatibility', default=False) @@ -359,9 +359,9 @@ def init_genesis(self): 'net_usage_leeway': 500, 'context_free_discount_net_usage_num': 20, 'context_free_discount_net_usage_den': 100, - 'max_block_cpu_usage': self.args.max_block_cpu_usage, + 'max_block_cpu_usage': 500000 if self.args.max_block_cpu_usage is None else self.args.max_block_cpu_usage, 'target_block_cpu_usage_pct': 1000, - 'max_transaction_cpu_usage': self.args.max_transaction_cpu_usage, + 'max_transaction_cpu_usage': 475000 if self.args.max_transaction_cpu_usage is None else self.args.max_transaction_cpu_usage, 'min_transaction_cpu_usage': 100, 'max_transaction_lifetime': 3600, 'deferred_trx_expiration_window': 600, @@ -375,8 +375,8 @@ def init_genesis(self): with open(genesis_path, 'r') as f: genesis = json.load(f) genesis['initial_key'] = self.network.nodes['bios'].keys[0].pubkey - genesis['initial_configuration']['max_block_cpu_usage'] = self.args.max_block_cpu_usage - genesis['initial_configuration']['max_transaction_cpu_usage'] = self.args.max_transaction_cpu_usage + if self.args.max_block_cpu_usage is not None: genesis['initial_configuration']['max_block_cpu_usage'] = self.args.max_block_cpu_usage + if self.args.max_transaction_cpu_usage is not None: genesis['initial_configuration']['max_transaction_cpu_usage'] = self.args.max_transaction_cpu_usage return genesis def write_genesis_file(self, node, genesis): diff --git a/tests/auto_bp_peering_test.py b/tests/auto_bp_peering_test.py index 912ab10e3c..a55bdd8807 100755 --- a/tests/auto_bp_peering_test.py +++ b/tests/auto_bp_peering_test.py @@ -1,10 +1,8 @@ #!/usr/bin/env python3 -import re -import signal -import time +import socket -from TestHarness import Cluster, TestHelper, Utils, WalletMgr, ReturnType +from TestHarness import Cluster, TestHelper, Utils, WalletMgr ############################################################### # auto_bp_peering_test @@ -35,7 +33,7 @@ dumpErrorDetails = args.dump_error_details keepLogs = args.keep_logs -# Setup cluster and it's wallet manager +# Setup cluster and its wallet manager walletMgr = WalletMgr(True) cluster = Cluster(unshared=args.unshared, keepRunning=args.leave_running, keepLogs=args.keep_logs) cluster.setWalletMgr(walletMgr) @@ -47,12 +45,17 @@ for nodeId in range(0, producerNodes): producer_name = "defproducer" + chr(ord('a') + nodeId) port = cluster.p2pBasePort + nodeId - hostname = "localhost:" + str(port) + if producer_name == 'defproducerf': + hostname = 'ext-ip0:9999' + elif producer_name == 'defproducerk': + hostname = socket.gethostname() + ':9886' + else: + hostname = "localhost:" + str(port) peer_names[hostname] = producer_name auto_bp_peer_args += (" --p2p-auto-bp-peer " + producer_name + "," + hostname) -def neigbors_in_schedule(name, schedule): +def neighbors_in_schedule(name, schedule): index = schedule.index(name) result = [] num = len(schedule) @@ -71,6 +74,9 @@ def neigbors_in_schedule(name, schedule): for nodeId in range(0, producerNodes): specificNodeosArgs[nodeId] = auto_bp_peer_args + specificNodeosArgs[5] = specificNodeosArgs[5] + ' --p2p-server-address ext-ip0:9999' + specificNodeosArgs[10] = specificNodeosArgs[10] + ' --p2p-server-address ""' + TestHelper.printSystemInfo("BEGIN") cluster.launch( prodCount=producerCountInEachNode, @@ -113,7 +119,7 @@ def neigbors_in_schedule(name, schedule): peers = peers.sort() name = "defproducer" + chr(ord('a') + nodeId) - expected_peers = neigbors_in_schedule(name, scheduled_producers) + expected_peers = neighbors_in_schedule(name, scheduled_producers) if peers != expected_peers: Utils.Print("ERROR: expect {} has connections to {}, got connections to {}".format( name, expected_peers, peers)) diff --git a/tests/p2p_multiple_listen_test.py b/tests/p2p_multiple_listen_test.py new file mode 100755 index 0000000000..62f1534c63 --- /dev/null +++ b/tests/p2p_multiple_listen_test.py @@ -0,0 +1,103 @@ +#!/usr/bin/env python3 + +import signal + +from TestHarness import Cluster, TestHelper, Utils, WalletMgr + +############################################################### +# p2p_multiple_listen_test +# +# Test nodeos ability to listen on multiple ports for p2p +# +############################################################### + +Print=Utils.Print +errorExit=Utils.errorExit + +args=TestHelper.parse_args({"-p","-n","-d","--keep-logs" + ,"--dump-error-details","-v" + ,"--leave-running","--unshared"}) +pnodes=args.p +delay=args.d +debug=args.v +total_nodes=5 +dumpErrorDetails=args.dump_error_details + +Utils.Debug=debug +testSuccessful=False + +cluster=Cluster(unshared=args.unshared, keepRunning=args.leave_running, keepLogs=args.keep_logs) +walletMgr=WalletMgr(True) + +try: + TestHelper.printSystemInfo("BEGIN") + + cluster.setWalletMgr(walletMgr) + + Print(f'producing nodes: {pnodes}, delay between nodes launch: {delay} second{"s" if delay != 1 else ""}') + + Print("Stand up cluster") + specificArgs = { + '0': '--agent-name node-00 --p2p-listen-endpoint 0.0.0.0:9876 --p2p-listen-endpoint 0.0.0.0:9779 --p2p-server-address ext-ip0:20000 --p2p-server-address ext-ip1:20001 --plugin eosio::net_api_plugin', + '2': '--agent-name node-02 --p2p-peer-address localhost:9779 --plugin eosio::net_api_plugin', + '4': '--agent-name node-04 --p2p-peer-address localhost:9876 --plugin eosio::net_api_plugin', + } + if cluster.launch(pnodes=pnodes, totalNodes=total_nodes, topo='line', delay=delay, + specificExtraNodeosArgs=specificArgs) is False: + errorExit("Failed to stand up eos cluster.") + + # Be sure all nodes start out connected (bios node omitted from diagram for brevity) + # node00 node01 node02 node03 node04 + # localhost:9876 -> localhost:9877 -> localhost:9878 -> localhost:9879 -> localhost:9880 + # localhost:9779 ^ | | + # ^ +---------------------------+ | + # +------------------------------------------------------------------------+ + cluster.waitOnClusterSync(blockAdvancing=5) + # Shut down bios node, which is connected to all other nodes in all topologies + cluster.biosNode.kill(signal.SIGTERM) + # Shut down second node, interrupting the default connections between it and nodes 00 and 02 + cluster.getNode(1).kill(signal.SIGTERM) + # Shut down the fourth node, interrupting the default connections between it and nodes 02 and 04 + cluster.getNode(3).kill(signal.SIGTERM) + # Be sure all remaining nodes continue to sync via the two listen ports on node 00 + # node00 node01 node02 node03 node04 + # localhost:9876 offline localhost:9878 offline localhost:9880 + # localhost:9779 ^ | | + # ^ +---------------------------+ | + # +------------------------------------------------------------------------+ + cluster.waitOnClusterSync(blockAdvancing=5) + connections = cluster.nodes[0].processUrllibRequest('net', 'connections') + open_socket_count = 0 + for conn in connections['payload']: + if conn['is_socket_open']: + open_socket_count += 1 + if conn['last_handshake']['agent'] == 'node-02': + assert conn['last_handshake']['p2p_address'].split()[0] == 'localhost:9878', f"Connected node is listening on '{conn['last_handshake']['p2p_address'].split()[0]}' instead of port 9878" + elif conn['last_handshake']['agent'] == 'node-04': + assert conn['last_handshake']['p2p_address'].split()[0] == 'localhost:9880', f"Connected node is listening on '{conn['last_handshake']['p2p_address'].split()[0]}' instead of port 9880" + assert open_socket_count == 2, 'Node 0 is expected to have only two open sockets' + + connections = cluster.nodes[2].processUrllibRequest('net', 'connections') + open_socket_count = 0 + for conn in connections['payload']: + if conn['is_socket_open']: + open_socket_count += 1 + assert conn['last_handshake']['agent'] == 'node-00', f"Connected node identifed as '{conn['last_handshake']['agent']}' instead of node-00" + assert conn['last_handshake']['p2p_address'].split()[0] == 'ext-ip0:20000', f"Connected node is advertising '{conn['last_handshake']['p2p_address'].split()[0]}' instead of ext-ip0:20000" + assert open_socket_count == 1, 'Node 2 is expected to have only one open socket' + + connections = cluster.nodes[4].processUrllibRequest('net', 'connections') + open_socket_count = 0 + for conn in connections['payload']: + if conn['is_socket_open']: + open_socket_count += 1 + assert conn['last_handshake']['agent'] == 'node-00', f"Connected node identifed as '{conn['last_handshake']['agent']}' instead of node-00" + assert conn['last_handshake']['p2p_address'].split()[0] == 'ext-ip1:20001', f"Connected node is advertising '{conn['last_handshake']['p2p_address'].split()[0]} 'instead of ext-ip1:20001" + assert open_socket_count == 1, 'Node 4 is expected to have only one open socket' + + testSuccessful=True +finally: + TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, dumpErrorDetails=dumpErrorDetails) + +exitCode = 0 if testSuccessful else 1 +exit(exitCode) diff --git a/tests/p2p_no_listen_test.py b/tests/p2p_no_listen_test.py new file mode 100755 index 0000000000..76b3c76886 --- /dev/null +++ b/tests/p2p_no_listen_test.py @@ -0,0 +1,76 @@ +#!/usr/bin/env python3 + +import errno +import pathlib +import shutil +import signal +import socket +import time + +from TestHarness import Node, TestHelper, Utils + +############################################################### +# p2p_no_listen_test +# +# Test nodeos disabling p2p +# +############################################################### + +Print=Utils.Print +errorExit=Utils.errorExit + +args=TestHelper.parse_args({"--keep-logs","-v","--leave-running","--unshared"}) +debug=args.v + +Utils.Debug=debug +testSuccessful=False + +try: + TestHelper.printSystemInfo("BEGIN") + + cmd = [ + Utils.EosServerPath, + '-e', + '-p', + 'eosio', + '--p2p-listen-endpoint', + '', + '--plugin', + 'eosio::chain_api_plugin', + '--config-dir', + Utils.ConfigDir, + '--data-dir', + Utils.DataDir, + '--http-server-address', + 'localhost:8888' + ] + node = Node('localhost', '8888', '00', data_dir=pathlib.Path(Utils.DataDir), + config_dir=pathlib.Path(Utils.ConfigDir), cmd=cmd) + + time.sleep(1) + if not node.verifyAlive(): + raise RuntimeError + time.sleep(10) + node.waitForBlock(5) + + s = socket.socket() + err = s.connect_ex(('localhost',9876)) + assert err == errno.ECONNREFUSED, 'Connection to port 9876 must be refused' + + testSuccessful=True +finally: + Utils.ShuttingDown=True + + if not args.leave_running: + node.kill(signal.SIGTERM) + + if not (args.leave_running or args.keep_logs or not testSuccessful): + shutil.rmtree(Utils.DataPath, ignore_errors=True) + + if testSuccessful: + Utils.Print("Test succeeded.") + else: + Utils.Print("Test failed.") + +exitCode = 0 if testSuccessful else 1 +exit(exitCode) diff --git a/tests/test_read_only_trx.cpp b/tests/test_read_only_trx.cpp index db36eea1e9..49134a54a7 100644 --- a/tests/test_read_only_trx.cpp +++ b/tests/test_read_only_trx.cpp @@ -88,90 +88,101 @@ BOOST_AUTO_TEST_CASE(not_check_configs_if_no_read_only_threads) { } void test_trxs_common(std::vector& specific_args, bool test_disable_tierup = false) { - fc::scoped_exit> on_exit = []() { - chain::wasm_interface_collection::test_disable_tierup = false; - }; - chain::wasm_interface_collection::test_disable_tierup = test_disable_tierup; - - using namespace std::chrono_literals; - fc::temp_directory temp; - appbase::scoped_app app; - auto temp_dir_str = temp.path().string(); - producer_plugin::set_test_mode(true); - - std::promise> plugin_promise; - std::future> plugin_fut = plugin_promise.get_future(); - std::thread app_thread( [&]() { - fc::logger::get(DEFAULT_LOGGER).set_log_level(fc::log_level::debug); - std::vector argv = {"test", "--data-dir", temp_dir_str.c_str(), "--config-dir", temp_dir_str.c_str()}; - argv.insert( argv.end(), specific_args.begin(), specific_args.end() ); - app->initialize( argv.size(), (char**) &argv[0] ); - app->find_plugin()->chain(); - app->startup(); - plugin_promise.set_value( {app->find_plugin(), app->find_plugin()} ); - app->exec(); - } ); - - auto[prod_plug, chain_plug] = plugin_fut.get(); - - activate_protocol_features_set_bios_contract(app, chain_plug); - - std::atomic next_calls = 0; - std::atomic num_get_account_calls = 0; - std::atomic num_posts = 0; - std::atomic trace_with_except = 0; - std::atomic trx_match = true; - const size_t num_pushes = 4242; - - for( size_t i = 1; i <= num_pushes; ++i ) { - auto ptrx = i % 3 == 0 ? make_unique_trx() : make_bios_ro_trx(chain_plug->chain()); - app->executor().post( priority::low, exec_queue::read_only, [&chain_plug=chain_plug, &num_get_account_calls]() { - chain_plug->get_read_only_api(fc::seconds(90)).get_account(chain_apis::read_only::get_account_params{.account_name=config::system_account_name}, fc::time_point::now()+fc::seconds(90)); - ++num_get_account_calls; - }); - app->executor().post( priority::low, exec_queue::read_only, [ptrx, &next_calls, &num_posts, &trace_with_except, &trx_match, &app]() { - ++num_posts; - bool return_failure_traces = true; - app->get_method()(ptrx, - false, // api_trx - transaction_metadata::trx_type::read_only, // trx_type - return_failure_traces, - [ptrx, &next_calls, &trace_with_except, &trx_match, return_failure_traces] - (const next_function_variant& result) { - if( !std::holds_alternative( result ) && !std::get( result )->except ) { - if( std::get( result )->id != ptrx->id() ) { - elog( "trace not for trx ${id}: ${t}", - ("id", ptrx->id())("t", fc::json::to_pretty_string(*std::get(result))) ); - trx_match = false; - } - } else if( !return_failure_traces && !std::holds_alternative( result ) && std::get( result )->except ) { - elog( "trace with except ${e}", - ("e", fc::json::to_pretty_string( *std::get( result ) )) ); - ++trace_with_except; - } - ++next_calls; - }); - }); - app->executor().post( priority::low, exec_queue::read_only, [&chain_plug=chain_plug]() { - chain_plug->get_read_only_api(fc::seconds(90)).get_consensus_parameters(chain_apis::read_only::get_consensus_parameters_params{}, fc::time_point::now()+fc::seconds(90)); - }); - } - - // Wait long enough such that all transactions are executed - auto start = fc::time_point::now(); - auto hard_deadline = start + fc::seconds(10); // To protect against waiting forever - while ( (next_calls < num_pushes || num_get_account_calls < num_pushes) && fc::time_point::now() < hard_deadline ){ - std::this_thread::sleep_for( 100ms );; - } - - app->quit(); - app_thread.join(); - - BOOST_CHECK_EQUAL( trace_with_except, 0 ); // should not have any traces with except in it - BOOST_CHECK_EQUAL( num_pushes, num_posts ); - BOOST_CHECK_EQUAL( num_pushes, next_calls.load() ); - BOOST_CHECK_EQUAL( num_pushes, num_get_account_calls.load() ); - BOOST_CHECK( trx_match.load() ); // trace should match the transaction + try { + fc::scoped_exit> on_exit = []() { + chain::wasm_interface_collection::test_disable_tierup = false; + }; + chain::wasm_interface_collection::test_disable_tierup = test_disable_tierup; + + using namespace std::chrono_literals; + fc::temp_directory temp; + appbase::scoped_app app; + auto temp_dir_str = temp.path().string(); + producer_plugin::set_test_mode(true); + + std::atomic next_calls = 0; + std::atomic num_get_account_calls = 0; + std::atomic num_posts = 0; + std::atomic trace_with_except = 0; + std::atomic trx_match = true; + const size_t num_pushes = 4242; + + { + std::promise> plugin_promise; + std::future> plugin_fut = plugin_promise.get_future(); + std::thread app_thread( [&]() { + try { + fc::logger::get(DEFAULT_LOGGER).set_log_level(fc::log_level::debug); + std::vector argv = {"test", "--data-dir", temp_dir_str.c_str(), "--config-dir", temp_dir_str.c_str()}; + argv.insert(argv.end(), specific_args.begin(), specific_args.end()); + app->initialize(argv.size(), (char**)&argv[0]); + app->find_plugin()->chain(); + app->startup(); + plugin_promise.set_value({app->find_plugin(), app->find_plugin()}); + app->exec(); + return; + } FC_LOG_AND_DROP() + BOOST_CHECK(!"app threw exception see logged error"); + } ); + fc::scoped_exit> on_except = [&](){ + if (app_thread.joinable()) + app_thread.join(); + }; + + auto[prod_plug, chain_plug] = plugin_fut.get(); + + activate_protocol_features_set_bios_contract(app, chain_plug); + + for( size_t i = 1; i <= num_pushes; ++i ) { + auto ptrx = i % 3 == 0 ? make_unique_trx() : make_bios_ro_trx(chain_plug->chain()); + app->executor().post( priority::low, exec_queue::read_only, [&chain_plug=chain_plug, &num_get_account_calls]() { + chain_plug->get_read_only_api(fc::seconds(90)).get_account(chain_apis::read_only::get_account_params{.account_name=config::system_account_name}, fc::time_point::now()+fc::seconds(90)); + ++num_get_account_calls; + }); + app->executor().post( priority::low, exec_queue::read_only, [ptrx, &next_calls, &num_posts, &trace_with_except, &trx_match, &app]() { + ++num_posts; + bool return_failure_traces = true; + app->get_method()(ptrx, + false, // api_trx + transaction_metadata::trx_type::read_only, // trx_type + return_failure_traces, + [ptrx, &next_calls, &trace_with_except, &trx_match, return_failure_traces] + (const next_function_variant& result) { + if( !std::holds_alternative( result ) && !std::get( result )->except ) { + if( std::get( result )->id != ptrx->id() ) { + elog( "trace not for trx ${id}: ${t}", + ("id", ptrx->id())("t", fc::json::to_pretty_string(*std::get(result))) ); + trx_match = false; + } + } else if( !return_failure_traces && !std::holds_alternative( result ) && std::get( result )->except ) { + elog( "trace with except ${e}", + ("e", fc::json::to_pretty_string( *std::get( result ) )) ); + ++trace_with_except; + } + ++next_calls; + }); + }); + app->executor().post( priority::low, exec_queue::read_only, [&chain_plug=chain_plug]() { + chain_plug->get_read_only_api(fc::seconds(90)).get_consensus_parameters(chain_apis::read_only::get_consensus_parameters_params{}, fc::time_point::now()+fc::seconds(90)); + }); + } + + // Wait long enough such that all transactions are executed + auto start = fc::time_point::now(); + auto hard_deadline = start + fc::seconds(10); // To protect against waiting forever + while ( (next_calls < num_pushes || num_get_account_calls < num_pushes) && fc::time_point::now() < hard_deadline ){ + std::this_thread::sleep_for( 100ms ); + } + + app->quit(); + } + + BOOST_CHECK_EQUAL( trace_with_except, 0 ); // should not have any traces with except in it + BOOST_CHECK_EQUAL( num_pushes, num_posts ); + BOOST_CHECK_EQUAL( num_pushes, next_calls.load() ); + BOOST_CHECK_EQUAL( num_pushes, num_get_account_calls.load() ); + BOOST_CHECK( trx_match.load() ); // trace should match the transaction + } FC_LOG_AND_RETHROW() } // test read-only trxs on main thread (no --read-only-threads) diff --git a/tests/test_snapshot_scheduler.cpp b/tests/test_snapshot_scheduler.cpp index bb8f6d8742..84c4410d5d 100644 --- a/tests/test_snapshot_scheduler.cpp +++ b/tests/test_snapshot_scheduler.cpp @@ -61,15 +61,19 @@ BOOST_AUTO_TEST_CASE(snapshot_scheduler_test) { std::future> plugin_fut = plugin_promise.get_future(); std::thread app_thread([&]() { - fc::logger::get(DEFAULT_LOGGER).set_log_level(fc::log_level::debug); - std::vector argv = - {"test", "--data-dir", temp.c_str(), "--config-dir", temp.c_str(), - "-p", "eosio", "-e"}; - app->initialize(argv.size(), (char**) &argv[0]); - app->startup(); - plugin_promise.set_value( - {app->find_plugin(), app->find_plugin()}); - app->exec(); + try { + fc::logger::get(DEFAULT_LOGGER).set_log_level(fc::log_level::debug); + std::vector argv = + {"test", "--data-dir", temp.c_str(), "--config-dir", temp.c_str(), + "-p", "eosio", "-e"}; + app->initialize(argv.size(), (char**) &argv[0]); + app->startup(); + plugin_promise.set_value( + {app->find_plugin(), app->find_plugin()}); + app->exec(); + return; + } FC_LOG_AND_DROP() + BOOST_CHECK(!"app threw exception see logged error"); }); auto [prod_plug, chain_plug] = plugin_fut.get(); diff --git a/unittests/test_utils.hpp b/unittests/test_utils.hpp index d14815580e..974fee3927 100644 --- a/unittests/test_utils.hpp +++ b/unittests/test_utils.hpp @@ -4,11 +4,15 @@ #include #include #include +#include #include #include #include +#include + #include +#include #include #include #include @@ -60,7 +64,7 @@ auto make_bios_ro_trx(eosio::chain::controller& control) { // Push an input transaction to controller and return trx trace // If account is eosio then signs with the default private key -auto push_input_trx(eosio::chain::controller& control, account_name account, signed_transaction& trx) { +auto push_input_trx(appbase::scoped_app& app, eosio::chain::controller& control, account_name account, signed_transaction& trx) { trx.expiration = fc::time_point_sec{fc::time_point::now() + fc::seconds(30)}; trx.set_reference_block( control.head_block_id() ); if (account == config::system_account_name) { @@ -70,13 +74,42 @@ auto push_input_trx(eosio::chain::controller& control, account_name account, sig trx.sign(testing::tester::get_private_key(account, "active"), control.get_chain_id()); } auto ptrx = std::make_shared( trx, packed_transaction::compression_type::zlib ); - auto fut = transaction_metadata::start_recover_keys( ptrx, control.get_thread_pool(), control.get_chain_id(), fc::microseconds::maximum(), transaction_metadata::trx_type::input ); - auto r = control.push_transaction( fut.get(), fc::time_point::maximum(), fc::microseconds::maximum(), 0, false, 0 ); - return r; + + auto trx_promise = std::make_shared>(); + std::future trx_future = trx_promise->get_future(); + + app->executor().post( priority::low, exec_queue::read_write, [&ptrx, &app, trx_promise]() { + app->get_method()(ptrx, + false, // api_trx + transaction_metadata::trx_type::input, // trx_type + true, // return_failure_traces + [trx_promise](const next_function_variant& result) { + if( std::holds_alternative( result ) ) { + try { + std::get(result)->dynamic_rethrow_exception(); + } catch(...) { + trx_promise->set_exception(std::current_exception()); + } + } else if ( std::get( result )->except ) { + try { + std::get(result)->except->dynamic_rethrow_exception(); + } catch(...) { + trx_promise->set_exception(std::current_exception()); + } + } else { + trx_promise->set_value(std::get(result)); + } + }); + }); + + if (trx_future.wait_for(std::chrono::seconds(5)) == std::future_status::timeout) + throw std::runtime_error("failed to execute trx: " + ptrx->get_transaction().actions.at(0).name.to_string() + " to account: " + account.to_string()); + + return trx_future.get(); } // Push setcode trx to controller and return trx trace -auto set_code(eosio::chain::controller& control, account_name account, const vector& wasm) { +auto set_code(appbase::scoped_app& app, eosio::chain::controller& control, account_name account, const vector& wasm) { signed_transaction trx; trx.actions.emplace_back(std::vector{{account, config::active_name}}, chain::setcode{ @@ -85,56 +118,56 @@ auto set_code(eosio::chain::controller& control, account_name account, const vec .vmversion = 0, .code = bytes(wasm.begin(), wasm.end()) }); - return push_input_trx(control, account, trx); + return push_input_trx(app, control, account, trx); } void activate_protocol_features_set_bios_contract(appbase::scoped_app& app, chain_plugin* chain_plug) { using namespace appbase; - std::promise feature_promise; - std::future feature_future = feature_promise.get_future(); - app->executor().post( priority::high, exec_queue::read_write, [&chain_plug=chain_plug, &feature_promise](){ - const auto& pfm = chain_plug->chain().get_protocol_feature_manager(); - auto preactivate_feature_digest = pfm.get_builtin_digest(builtin_protocol_feature_t::preactivate_feature); - BOOST_CHECK( preactivate_feature_digest ); - chain_plug->chain().preactivate_feature( *preactivate_feature_digest, false ); - std::vector pfs{ - builtin_protocol_feature_t::only_link_to_existing_permission, - builtin_protocol_feature_t::replace_deferred, - builtin_protocol_feature_t::no_duplicate_deferred_id, - builtin_protocol_feature_t::fix_linkauth_restriction, - builtin_protocol_feature_t::disallow_empty_producer_schedule, - builtin_protocol_feature_t::restrict_action_to_self, - builtin_protocol_feature_t::only_bill_first_authorizer, - builtin_protocol_feature_t::forward_setcode, - builtin_protocol_feature_t::get_sender, - builtin_protocol_feature_t::ram_restrictions, - builtin_protocol_feature_t::webauthn_key, - builtin_protocol_feature_t::wtmsig_block_signatures }; - for (const auto t : pfs) { - auto feature_digest = pfm.get_builtin_digest(t); - BOOST_CHECK( feature_digest ); - chain_plug->chain().preactivate_feature( *feature_digest, false ); - } - feature_promise.set_value(); - }); + auto feature_set = std::make_shared>(false); + // has to execute when pending block is not null + for (int tries = 0; tries < 100; ++tries) { + app->executor().post( priority::high, exec_queue::read_write, [&chain_plug=chain_plug, feature_set](){ + try { + if (!chain_plug->chain().is_building_block() || *feature_set) + return; + const auto& pfm = chain_plug->chain().get_protocol_feature_manager(); + auto preactivate_feature_digest = pfm.get_builtin_digest(builtin_protocol_feature_t::preactivate_feature); + BOOST_CHECK( preactivate_feature_digest ); + chain_plug->chain().preactivate_feature( *preactivate_feature_digest, false ); + std::vector pfs{ + builtin_protocol_feature_t::only_link_to_existing_permission, + builtin_protocol_feature_t::replace_deferred, + builtin_protocol_feature_t::no_duplicate_deferred_id, + builtin_protocol_feature_t::fix_linkauth_restriction, + builtin_protocol_feature_t::disallow_empty_producer_schedule, + builtin_protocol_feature_t::restrict_action_to_self, + builtin_protocol_feature_t::only_bill_first_authorizer, + builtin_protocol_feature_t::forward_setcode, + builtin_protocol_feature_t::get_sender, + builtin_protocol_feature_t::ram_restrictions, + builtin_protocol_feature_t::webauthn_key, + builtin_protocol_feature_t::wtmsig_block_signatures }; + for (const auto t : pfs) { + auto feature_digest = pfm.get_builtin_digest(t); + BOOST_CHECK( feature_digest ); + chain_plug->chain().preactivate_feature( *feature_digest, false ); + } + *feature_set = true; + return; + } FC_LOG_AND_DROP() + BOOST_CHECK(!"exception setting protocol features"); + }); + if (*feature_set) + break; + std::this_thread::sleep_for(std::chrono::milliseconds(50)); + } // Wait for next block std::this_thread::sleep_for( std::chrono::milliseconds(config::block_interval_ms) ); - if (feature_future.wait_for(std::chrono::seconds(5)) == std::future_status::timeout) - throw std::runtime_error("failed to preactivate features"); - - std::promise setcode_promise; - std::future setcode_future = setcode_promise.get_future(); - app->executor().post( priority::high, exec_queue::read_write, [&chain_plug=chain_plug, &setcode_promise](){ - auto r = set_code(chain_plug->chain(), config::system_account_name, testing::contracts::eosio_bios_wasm()); - BOOST_CHECK(r->receipt && r->receipt->status == transaction_receipt_header::executed); - setcode_promise.set_value(); - }); - - if (setcode_future.wait_for(std::chrono::seconds(5)) == std::future_status::timeout) - throw std::runtime_error("failed to setcode"); + auto r = set_code(app, chain_plug->chain(), config::system_account_name, testing::contracts::eosio_bios_wasm()); + BOOST_CHECK(r->receipt && r->receipt->status == transaction_receipt_header::executed); }