From b42df934d7fc05050af96050bf5a2ab51f1f9239 Mon Sep 17 00:00:00 2001 From: 766C6164 Date: Wed, 31 May 2023 16:21:11 -0400 Subject: [PATCH 01/22] Added better handling ow zero/empty params --- .../eosio/chain/snapshot_scheduler.hpp | 12 +++++++- libraries/chain/snapshot_scheduler.cpp | 28 ++----------------- .../producer_api_plugin.cpp | 2 +- .../eosio/producer_plugin/producer_plugin.hpp | 2 +- plugins/producer_plugin/producer_plugin.cpp | 13 ++++++++- tests/test_snapshot_scheduler.cpp | 26 ++++++++--------- 6 files changed, 39 insertions(+), 44 deletions(-) diff --git a/libraries/chain/include/eosio/chain/snapshot_scheduler.hpp b/libraries/chain/include/eosio/chain/snapshot_scheduler.hpp index 2aebf62c9b..f9b3b421cc 100644 --- a/libraries/chain/include/eosio/chain/snapshot_scheduler.hpp +++ b/libraries/chain/include/eosio/chain/snapshot_scheduler.hpp @@ -38,10 +38,19 @@ class snapshot_scheduler { struct snapshot_request_information { uint32_t block_spacing = 0; uint32_t start_block_num = 0; - uint32_t end_block_num = 0; + uint32_t end_block_num = UINT32_MAX - 1; std::string snapshot_description = ""; }; + // this struct used to hold request params in api call + // it is differentiate between 0 and empty values + struct snapshot_request_params { + std::optional block_spacing; + std::optional start_block_num; + std::optional end_block_num; + std::optional snapshot_description; + }; + struct snapshot_request_id_information { uint32_t snapshot_request_id = 0; }; @@ -205,6 +214,7 @@ class snapshot_scheduler { FC_REFLECT(eosio::chain::snapshot_scheduler::snapshot_information, (head_block_id) (head_block_num) (head_block_time) (version) (snapshot_name)) FC_REFLECT(eosio::chain::snapshot_scheduler::snapshot_request_information, (block_spacing) (start_block_num) (end_block_num) (snapshot_description)) +FC_REFLECT(eosio::chain::snapshot_scheduler::snapshot_request_params, (block_spacing) (start_block_num) (end_block_num) (snapshot_description)) FC_REFLECT(eosio::chain::snapshot_scheduler::snapshot_request_id_information, (snapshot_request_id)) FC_REFLECT(eosio::chain::snapshot_scheduler::get_snapshot_requests_result, (snapshot_requests)) FC_REFLECT_DERIVED(eosio::chain::snapshot_scheduler::snapshot_schedule_information, (eosio::chain::snapshot_scheduler::snapshot_request_id_information)(eosio::chain::snapshot_scheduler::snapshot_request_information), (pending_snapshots)) diff --git a/libraries/chain/snapshot_scheduler.cpp b/libraries/chain/snapshot_scheduler.cpp index ee7a356fe0..bd92230d90 100644 --- a/libraries/chain/snapshot_scheduler.cpp +++ b/libraries/chain/snapshot_scheduler.cpp @@ -8,7 +8,6 @@ namespace eosio::chain { // snapshot_scheduler_listener void snapshot_scheduler::on_start_block(uint32_t height, chain::controller& chain) { - bool serialize_needed = false; bool snapshot_executed = false; auto execute_snapshot_with_log = [this, height, &snapshot_executed, &chain](const auto& req) { @@ -28,18 +27,7 @@ void snapshot_scheduler::on_start_block(uint32_t height, chain::controller& chai bool recurring_snapshot = req.block_spacing && (height >= req.start_block_num + 1) && (!((height - req.start_block_num - 1) % req.block_spacing)); bool onetime_snapshot = (!req.block_spacing) && (height == req.start_block_num + 1); - // assume "asap" for snapshot with missed/zero start, it can have spacing - if(!req.start_block_num) { - // update start_block_num with current height only if this is recurring - // if non recurring, will be executed and unscheduled - if(req.block_spacing && height) { - auto& snapshot_by_id = _snapshot_requests.get(); - auto it = snapshot_by_id.find(req.snapshot_request_id); - _snapshot_requests.modify(it, [&height](auto& p) { p.start_block_num = height - 1; }); - serialize_needed = true; - } - execute_snapshot_with_log(req); - } else if(recurring_snapshot || onetime_snapshot) { + if(recurring_snapshot || onetime_snapshot) { execute_snapshot_with_log(req); } @@ -54,9 +42,6 @@ void snapshot_scheduler::on_start_block(uint32_t height, chain::controller& chai for(const auto& i: unschedule_snapshot_request_ids) { unschedule_snapshot(i); } - - // store db to filesystem - if(serialize_needed) x_serialize(); } void snapshot_scheduler::on_irreversible_block(const signed_block_ptr& lib, const chain::controller& chain) { @@ -80,15 +65,8 @@ snapshot_scheduler::snapshot_schedule_result snapshot_scheduler::schedule_snapsh auto& snapshot_by_value = _snapshot_requests.get(); auto existing = snapshot_by_value.find(std::make_tuple(sri.block_spacing, sri.start_block_num, sri.end_block_num)); EOS_ASSERT(existing == snapshot_by_value.end(), chain::duplicate_snapshot_request, "Duplicate snapshot request"); - - if(sri.end_block_num > 0) { - // if "end" is specified, it should be greater then start - EOS_ASSERT(sri.start_block_num <= sri.end_block_num, chain::invalid_snapshot_request, "End block number should be greater or equal to start block number"); - // if also block_spacing specified, check it - if(sri.block_spacing > 0) { - EOS_ASSERT(sri.start_block_num + sri.block_spacing <= sri.end_block_num, chain::invalid_snapshot_request, "Block spacing exceeds defined by start and end range"); - } - } + EOS_ASSERT(sri.start_block_num <= sri.end_block_num, chain::invalid_snapshot_request, "End block number should be greater or equal to start block number"); + EOS_ASSERT(sri.start_block_num + sri.block_spacing <= sri.end_block_num, chain::invalid_snapshot_request, "Block spacing exceeds defined by start and end range"); _snapshot_requests.emplace(snapshot_schedule_information{{_snapshot_id++}, {sri.block_spacing, sri.start_block_num, sri.end_block_num, sri.snapshot_description}, {}}); x_serialize(); diff --git a/plugins/producer_api_plugin/producer_api_plugin.cpp b/plugins/producer_api_plugin/producer_api_plugin.cpp index 0b825a10fd..65cbe2f58b 100644 --- a/plugins/producer_api_plugin/producer_api_plugin.cpp +++ b/plugins/producer_api_plugin/producer_api_plugin.cpp @@ -132,7 +132,7 @@ void producer_api_plugin::plugin_startup() { CALL_ASYNC(producer, snapshot, producer, create_snapshot, chain::snapshot_scheduler::snapshot_information, INVOKE_R_V_ASYNC(producer, create_snapshot), 201), CALL_WITH_400(producer, snapshot, producer, schedule_snapshot, - INVOKE_R_R_II(producer, schedule_snapshot, chain::snapshot_scheduler::snapshot_request_information), 201), + INVOKE_R_R_II(producer, schedule_snapshot, chain::snapshot_scheduler::snapshot_request_params), 201), CALL_WITH_400(producer, snapshot, producer, unschedule_snapshot, INVOKE_R_R(producer, unschedule_snapshot, chain::snapshot_scheduler::snapshot_request_id_information), 201), CALL_WITH_400(producer, producer_rw, producer, get_integrity_hash, diff --git a/plugins/producer_plugin/include/eosio/producer_plugin/producer_plugin.hpp b/plugins/producer_plugin/include/eosio/producer_plugin/producer_plugin.hpp index 84fa2a9f2b..823266d1fa 100644 --- a/plugins/producer_plugin/include/eosio/producer_plugin/producer_plugin.hpp +++ b/plugins/producer_plugin/include/eosio/producer_plugin/producer_plugin.hpp @@ -98,7 +98,7 @@ class producer_plugin : public appbase::plugin { integrity_hash_information get_integrity_hash() const; void create_snapshot(next_function next); - chain::snapshot_scheduler::snapshot_schedule_result schedule_snapshot(const chain::snapshot_scheduler::snapshot_request_information& schedule); + chain::snapshot_scheduler::snapshot_schedule_result schedule_snapshot(const chain::snapshot_scheduler::snapshot_request_params& srp); chain::snapshot_scheduler::snapshot_schedule_result unschedule_snapshot(const chain::snapshot_scheduler::snapshot_request_id_information& schedule); chain::snapshot_scheduler::get_snapshot_requests_result get_snapshot_requests() const; diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 81267e8c2f..b42ab50197 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -1511,7 +1511,18 @@ void producer_plugin::create_snapshot(producer_plugin::next_functionchain_plug->chain(); + const auto head_block_num = chain.head_block_num(); + + // missing start/end is set to head block num, missing end to UINT32_MAX + chain::snapshot_scheduler::snapshot_request_information sri = { + .block_spacing = srp.block_spacing ? *srp.block_spacing : 0, + .start_block_num = srp.start_block_num ? *srp.start_block_num : head_block_num, + .end_block_num = srp.end_block_num ? *srp.end_block_num : UINT32_MAX - 1, + .snapshot_description = srp.snapshot_description ? *srp.snapshot_description : "" + }; + return my->_snapshot_scheduler.schedule_snapshot(sri); } diff --git a/tests/test_snapshot_scheduler.cpp b/tests/test_snapshot_scheduler.cpp index 0b66d8865b..1844bee776 100644 --- a/tests/test_snapshot_scheduler.cpp +++ b/tests/test_snapshot_scheduler.cpp @@ -8,13 +8,14 @@ using namespace eosio; using namespace eosio::chain; using snapshot_request_information = snapshot_scheduler::snapshot_request_information; +using snapshot_request_params = snapshot_scheduler::snapshot_request_params; using snapshot_request_id_information = snapshot_scheduler::snapshot_request_id_information; BOOST_AUTO_TEST_SUITE(producer_snapshot_scheduler_tests) BOOST_AUTO_TEST_CASE(snapshot_scheduler_test) { fc::logger log; - producer_plugin scheduler; + snapshot_scheduler scheduler; { // add/remove test @@ -30,19 +31,14 @@ BOOST_AUTO_TEST_CASE(snapshot_scheduler_test) { return e.to_detail_string().find("Duplicate snapshot request") != std::string::npos; }); - snapshot_request_id_information sri_delete_1 = {.snapshot_request_id = 0}; - scheduler.unschedule_snapshot(sri_delete_1); - + scheduler.unschedule_snapshot(0); BOOST_CHECK_EQUAL(1, scheduler.get_snapshot_requests().snapshot_requests.size()); - snapshot_request_id_information sri_delete_none = {.snapshot_request_id = 2}; - BOOST_CHECK_EXCEPTION(scheduler.unschedule_snapshot(sri_delete_none), snapshot_request_not_found, [](const fc::assert_exception& e) { + BOOST_CHECK_EXCEPTION(scheduler.unschedule_snapshot(0), snapshot_request_not_found, [](const fc::assert_exception& e) { return e.to_detail_string().find("Snapshot request not found") != std::string::npos; }); - snapshot_request_id_information sri_delete_2 = {.snapshot_request_id = 1}; - scheduler.unschedule_snapshot(sri_delete_2); - + scheduler.unschedule_snapshot(1); BOOST_CHECK_EQUAL(0, scheduler.get_snapshot_requests().snapshot_requests.size()); snapshot_request_information sri_large_spacing = {.block_spacing = 1000, .start_block_num = 5000, .end_block_num = 5010}; @@ -104,11 +100,11 @@ BOOST_AUTO_TEST_CASE(snapshot_scheduler_test) { } }); - snapshot_request_information sri1 = {.block_spacing = 8, .start_block_num = 1, .end_block_num = 300000, .snapshot_description = "Example of recurring snapshot 1"}; - snapshot_request_information sri2 = {.block_spacing = 5000, .start_block_num = 100000, .end_block_num = 300000, .snapshot_description = "Example of recurring snapshot 2 that will never happen"}; - snapshot_request_information sri3 = {.block_spacing = 2, .start_block_num = 0, .end_block_num = 3, .snapshot_description = "Example of recurring snapshot 3 that will expire"}; - snapshot_request_information sri4 = {.start_block_num = 1, .snapshot_description = "One time snapshot on first block"}; - snapshot_request_information sri5 = {.block_spacing = 10, .snapshot_description = "Recurring every 10 blocks snapshot starting now"}; + snapshot_request_params sri1 = {.block_spacing = 8, .start_block_num = 1, .end_block_num = 300000, .snapshot_description = "Example of recurring snapshot 1"}; + snapshot_request_params sri2 = {.block_spacing = 5000, .start_block_num = 100000, .end_block_num = 300000, .snapshot_description = "Example of recurring snapshot 2 that will never happen"}; + snapshot_request_params sri3 = {.block_spacing = 2, .start_block_num = 0, .end_block_num = 3, .snapshot_description = "Example of recurring snapshot 3 that will expire"}; + snapshot_request_params sri4 = {.start_block_num = 1, .snapshot_description = "One time snapshot on first block"}; + snapshot_request_params sri5 = {.block_spacing = 10, .snapshot_description = "Recurring every 10 blocks snapshot starting now"}; pp->schedule_snapshot(sri1); pp->schedule_snapshot(sri2); @@ -142,7 +138,7 @@ BOOST_AUTO_TEST_CASE(snapshot_scheduler_test) { db.set_path(temp / "snapshots"); db >> ssi; BOOST_CHECK_EQUAL(3, ssi.size()); - BOOST_CHECK_EQUAL(ssi.begin()->block_spacing, sri1.block_spacing); + BOOST_CHECK_EQUAL(ssi.begin()->block_spacing, *sri1.block_spacing); } catch(...) { throw; } From 3382c4953052cfeaba00f6b2f4e7387d24493094 Mon Sep 17 00:00:00 2001 From: 766C6164 Date: Wed, 31 May 2023 16:42:36 -0400 Subject: [PATCH 02/22] fix --- plugins/producer_plugin/producer_plugin.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index b42ab50197..b4020f5c72 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -1518,7 +1518,7 @@ producer_plugin::schedule_snapshot(const chain::snapshot_scheduler::snapshot_req // missing start/end is set to head block num, missing end to UINT32_MAX chain::snapshot_scheduler::snapshot_request_information sri = { .block_spacing = srp.block_spacing ? *srp.block_spacing : 0, - .start_block_num = srp.start_block_num ? *srp.start_block_num : head_block_num, + .start_block_num = srp.start_block_num ? *srp.start_block_num : head_block_num + 1, .end_block_num = srp.end_block_num ? *srp.end_block_num : UINT32_MAX - 1, .snapshot_description = srp.snapshot_description ? *srp.snapshot_description : "" }; From 618915c30f26cd49e8aa90fba221dde20dfc17df Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Wed, 31 May 2023 18:34:02 -0400 Subject: [PATCH 03/22] allow ROtrx threads unlimited time to start --- plugins/producer_plugin/producer_plugin.cpp | 33 +++++++++++++-------- 1 file changed, 20 insertions(+), 13 deletions(-) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index d4bd85fbb1..896ec39aec 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -1266,26 +1266,33 @@ void producer_plugin::plugin_startup() } if ( my->_ro_thread_pool_size > 0 ) { - std::atomic num_threads_started = 0; + std::atomic threads_remaining = my->_ro_thread_pool_size; + std::exception_ptr ep; + std::mutex ep_mutex; + std::promise done_promise; + my->_ro_thread_pool.start( my->_ro_thread_pool_size, []( const fc::exception& e ) { fc_elog( _log, "Exception in read-only thread pool, exiting: ${e}", ("e", e.to_detail_string()) ); app().quit(); }, [&]() { - chain.init_thread_local_data(); - ++num_threads_started; - }); + try { + chain.init_thread_local_data(); + } + catch(...) { + std::lock_guard l(ep_mutex); + ep = std::current_exception(); + } - // This will be changed with std::latch or std::atomic<>::wait - // when C++20 is used. - auto time_slept_ms = 0; - constexpr auto max_time_slept_ms = 1000; - while ( num_threads_started.load() < my->_ro_thread_pool_size && time_slept_ms < max_time_slept_ms ) { - std::this_thread::sleep_for( 1ms ); - ++time_slept_ms; - } - EOS_ASSERT(num_threads_started.load() == my->_ro_thread_pool_size, producer_exception, "read-only threads failed to start. num_threads_started: ${n}, time_slept_ms: ${t}ms", ("n", num_threads_started.load())("t", time_slept_ms)); + if(threads_remaining.fetch_sub(1u) == 1u) { + if(ep) + done_promise.set_exception(ep); + else + done_promise.set_value(); + } + }); + done_promise.get_future().wait(); my->start_write_window(); } From 2389bc1cd7cbad434afade13d63c19018d1b1d7b Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Thu, 1 Jun 2023 23:07:01 -0400 Subject: [PATCH 04/22] move blocking start&init to named_thread_pool's start() --- .../include/eosio/chain/thread_utils.hpp | 60 +++++++++++++++---- plugins/producer_plugin/producer_plugin.cpp | 21 +------ 2 files changed, 50 insertions(+), 31 deletions(-) diff --git a/libraries/chain/include/eosio/chain/thread_utils.hpp b/libraries/chain/include/eosio/chain/thread_utils.hpp index 9dfd988d0c..3cf4d8f223 100644 --- a/libraries/chain/include/eosio/chain/thread_utils.hpp +++ b/libraries/chain/include/eosio/chain/thread_utils.hpp @@ -37,9 +37,13 @@ namespace eosio { namespace chain { /// Spawn threads, can be re-started after stop(). /// Assumes start()/stop() called from the same thread or externally protected. + /// Blocks until all threads are created and completed their init function, or an exception is thrown + /// during thread startup or an init function. Exceptions thrown during these stages are rethrown from start() + /// but some threads might still have been started. Calling stop() after such a failure is safe. /// @param num_threads is number of threads spawned /// @param on_except is the function to call if io_context throws an exception, is called from thread pool thread. - /// if an empty function then logs and rethrows exception on thread which will terminate. + /// if an empty function then logs and rethrows exception on thread which will terminate. Not called + /// for exceptions during the init function (such exceptions are rethrown from start()) /// @param init is an optional function to call at startup to initialize any data. /// @throw assert_exception if already started and not stopped. void start( size_t num_threads, on_except_t on_except, init_t init = {} ) { @@ -47,9 +51,17 @@ namespace eosio { namespace chain { _ioc_work.emplace( boost::asio::make_work_guard( _ioc ) ); _ioc.restart(); _thread_pool.reserve( num_threads ); + + std::promise start_complete; + std::atomic threads_remaining = num_threads; + std::exception_ptr pending_exception; + std::mutex pending_exception_mutex; + for( size_t i = 0; i < num_threads; ++i ) { - _thread_pool.emplace_back( std::thread( &named_thread_pool::run_thread, this, i, on_except, init ) ); + _thread_pool.emplace_back( std::thread( &named_thread_pool::run_thread, this, i, on_except, init, std::ref(start_complete), + std::ref(threads_remaining), std::ref(pending_exception), std::ref(pending_exception_mutex) ) ); } + start_complete.get_future().get(); } /// destroy work guard, stop io_context, join thread_pool @@ -63,16 +75,42 @@ namespace eosio { namespace chain { } private: - void run_thread( size_t i, const on_except_t& on_except, const init_t& init ) { - std::string tn = boost::core::demangle(typeid(this).name()); - auto offset = tn.rfind("::"); - if (offset != std::string::npos) - tn.erase(0, offset+2); - tn = tn.substr(0, tn.find('>')) + "-" + std::to_string( i ); + void run_thread( size_t i, const on_except_t& on_except, const init_t& init, std::promise& start_complete, + std::atomic& threads_remaining, std::exception_ptr& pending_exception, std::mutex& pending_exception_mutex ) { + + std::string tn; + + auto decrement_remaining = [&]() { + if( !--threads_remaining ) { + if( pending_exception ) + start_complete.set_exception( pending_exception ); + else + start_complete.set_value(); + } + }; + + try { + try { + tn = boost::core::demangle(typeid(this).name()); + auto offset = tn.rfind("::"); + if (offset != std::string::npos) + tn.erase(0, offset+2); + tn = tn.substr(0, tn.find('>')) + "-" + std::to_string( i ); + fc::set_os_thread_name( tn ); + if ( init ) + init(); + } FC_LOG_AND_RETHROW() + } + catch( ... ) { + std::lock_guard l( pending_exception_mutex ); + pending_exception = std::current_exception(); + decrement_remaining(); + return; + } + + decrement_remaining(); + try { - fc::set_os_thread_name( tn ); - if ( init ) - init(); _ioc.run(); } catch( const fc::exception& e ) { if( on_except ) { diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 896ec39aec..d9121e11de 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -1266,33 +1266,14 @@ void producer_plugin::plugin_startup() } if ( my->_ro_thread_pool_size > 0 ) { - std::atomic threads_remaining = my->_ro_thread_pool_size; - std::exception_ptr ep; - std::mutex ep_mutex; - std::promise done_promise; - my->_ro_thread_pool.start( my->_ro_thread_pool_size, []( const fc::exception& e ) { fc_elog( _log, "Exception in read-only thread pool, exiting: ${e}", ("e", e.to_detail_string()) ); app().quit(); }, [&]() { - try { - chain.init_thread_local_data(); - } - catch(...) { - std::lock_guard l(ep_mutex); - ep = std::current_exception(); - } - - if(threads_remaining.fetch_sub(1u) == 1u) { - if(ep) - done_promise.set_exception(ep); - else - done_promise.set_value(); - } + chain.init_thread_local_data(); }); - done_promise.get_future().wait(); my->start_write_window(); } From be76e1085da7b935fa955fc133e767535f9ae249 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Fri, 2 Jun 2023 13:21:37 -0400 Subject: [PATCH 05/22] catch and handle exceptions from std::thread's ctor --- .../chain/include/eosio/chain/thread_utils.hpp | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/libraries/chain/include/eosio/chain/thread_utils.hpp b/libraries/chain/include/eosio/chain/thread_utils.hpp index 3cf4d8f223..3a4f0f1d15 100644 --- a/libraries/chain/include/eosio/chain/thread_utils.hpp +++ b/libraries/chain/include/eosio/chain/thread_utils.hpp @@ -57,9 +57,17 @@ namespace eosio { namespace chain { std::exception_ptr pending_exception; std::mutex pending_exception_mutex; - for( size_t i = 0; i < num_threads; ++i ) { - _thread_pool.emplace_back( std::thread( &named_thread_pool::run_thread, this, i, on_except, init, std::ref(start_complete), - std::ref(threads_remaining), std::ref(pending_exception), std::ref(pending_exception_mutex) ) ); + try { + for( size_t i = 0; i < num_threads; ++i ) { + _thread_pool.emplace_back( std::thread( &named_thread_pool::run_thread, this, i, on_except, init, std::ref(start_complete), + std::ref(threads_remaining), std::ref(pending_exception), std::ref(pending_exception_mutex) ) ); + } + } + catch( ... ) { + /// only an exception from std::thread's ctor should end up here. shut down all threads to ensure no + /// potential access to the promise, atomic, etc above performed after throwing out of start + stop(); + throw; } start_complete.get_future().get(); } From 3902a9e0e1af64665203bb698b4aeda3d1d477e1 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Fri, 2 Jun 2023 12:59:49 -0500 Subject: [PATCH 06/22] Rename to include 'type' for the endpoint api to differentiate with upcoming additions including specifying actual api endpoints for use. --- tests/TestHarness/Cluster.py | 2 +- .../launch_transaction_generators.py | 17 +++++++++-------- tests/performance_tests/CMakeLists.txt | 2 +- tests/performance_tests/README.md | 14 +++++++------- tests/performance_tests/performance_test.py | 8 ++++---- .../performance_test_basic.py | 18 +++++++++--------- 6 files changed, 31 insertions(+), 30 deletions(-) diff --git a/tests/TestHarness/Cluster.py b/tests/TestHarness/Cluster.py index 32d06f4f66..bde3fb7d73 100644 --- a/tests/TestHarness/Cluster.py +++ b/tests/TestHarness/Cluster.py @@ -1562,7 +1562,7 @@ def launchTrxGenerators(self, contractOwnerAcctName: str, acctNamesList: list, a self.preExistingFirstTrxFiles = glob.glob(f"{Utils.DataDir}/first_trx_*.txt") connectionPairList = [f"{self.host}:{self.getNodeP2pPort(nodeId)}"] - tpsTrxGensConfig = TpsTrxGensConfig(targetTps=targetTps, tpsLimitPerGenerator=tpsLimitPerGenerator, connectionPairList=connectionPairList, endpointApi="p2p") + tpsTrxGensConfig = TpsTrxGensConfig(targetTps=targetTps, tpsLimitPerGenerator=tpsLimitPerGenerator, connectionPairList=connectionPairList, endpointApiType="p2p") self.trxGenLauncher = TransactionGeneratorsLauncher(chainId=chainId, lastIrreversibleBlockId=lib_id, contractOwnerAccount=contractOwnerAcctName, accts=','.join(map(str, acctNamesList)), privateKeys=','.join(map(str, acctPrivKeysList)), trxGenDurationSec=durationSec, logDir=Utils.DataDir, diff --git a/tests/TestHarness/launch_transaction_generators.py b/tests/TestHarness/launch_transaction_generators.py index 3f10e35f92..cfeb7b3c26 100644 --- a/tests/TestHarness/launch_transaction_generators.py +++ b/tests/TestHarness/launch_transaction_generators.py @@ -16,7 +16,7 @@ class TpsTrxGensConfig: - def __init__(self, targetTps: int, tpsLimitPerGenerator: int, connectionPairList: list, endpointApi: str): + def __init__(self, targetTps: int, tpsLimitPerGenerator: int, connectionPairList: list, endpointApiType: str): self.targetTps: int = targetTps self.tpsLimitPerGenerator: int = tpsLimitPerGenerator self.connectionPairList = connectionPairList @@ -27,7 +27,7 @@ def __init__(self, targetTps: int, tpsLimitPerGenerator: int, connectionPairList self.modTps = self.targetTps % self.numGenerators self.cleanlyDivisible = self.modTps == 0 self.incrementPoint = self.numGenerators + 1 - self.modTps - self.endpointApi = endpointApi + self.endpointApiType = endpointApiType self.targetTpsPerGenList = [] curTps = self.initialTpsPerGenerator @@ -68,7 +68,7 @@ def launch(self, waitToComplete=True): '--trx-gen-duration', f'{self.trxGenDurationSec}', '--target-tps', f'{targetTps}', '--log-dir', f'{self.logDir}', - '--peer-endpoint-type', f'{self.tpsTrxGensConfig.endpointApi}', + '--peer-endpoint-type', f'{self.tpsTrxGensConfig.endpointApiType}', '--peer-endpoint', f'{connectionPair[0]}', '--port', f'{connectionPair[1]}'] if self.abiFile is not None and self.actionsData is not None and self.actionsAuths is not None: @@ -106,10 +106,11 @@ def parseArgs(): parser.add_argument("actions_data", type=str, help="The json actions data file or json actions data description string to use") parser.add_argument("actions_auths", type=str, help="The json actions auth file or json actions auths description string to use, containting authAcctName to activePrivateKey pairs.") parser.add_argument("connection_pair_list", type=str, help="Comma separated list of endpoint:port combinations to send transactions to", default="localhost:9876") - parser.add_argument("endpoint_api", type=str, help="Endpoint API mode (\"p2p\", \"http\"). \ - In \"p2p\" mode transactions will be directed to the p2p endpoint on a producer node. \ - In \"http\" mode transactions will be directed to the http endpoint on an api node.", - choices=["p2p", "http"], default="p2p") + parser.add_argument("endpoint_api_type", type=str, help="Endpoint API mode (\"p2p\", \"http\"). \ + In \"p2p\" mode transactions will be directed to the p2p endpoint on a producer node. \ + In \"http\" mode transactions will be directed to the http endpoint on an api node.", + choices=["p2p", "http"], default="p2p") + args = parser.parse_args() return args @@ -123,7 +124,7 @@ def main(): privateKeys=args.priv_keys, trxGenDurationSec=args.trx_gen_duration, logDir=args.log_dir, abiFile=args.abi_file, actionsData=args.actions_data, actionsAuths=args.actions_auths, tpsTrxGensConfig=TpsTrxGensConfig(targetTps=args.target_tps, tpsLimitPerGenerator=args.tps_limit_per_generator, - connectionPairList=connectionPairList, endpointApi=args.endpoint_api)) + connectionPairList=connectionPairList, endpointApiType=args.endpoint_api_type)) exit_codes = trxGenLauncher.launch() diff --git a/tests/performance_tests/CMakeLists.txt b/tests/performance_tests/CMakeLists.txt index 0f6d5e740d..d18eccf824 100644 --- a/tests/performance_tests/CMakeLists.txt +++ b/tests/performance_tests/CMakeLists.txt @@ -17,7 +17,7 @@ add_test(NAME performance_test_bp COMMAND tests/performance_tests/performance_te add_test(NAME performance_test_api COMMAND tests/performance_tests/performance_test.py testApiOpMode --max-tps-to-test 50 --test-iteration-min-step 10 --test-iteration-duration-sec 10 --final-iterations-duration-sec 10 --calc-chain-threads lmax overrideBasicTestConfig -v --tps-limit-per-generator 25 --chain-state-db-size-mb 200 ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) add_test(NAME performance_test_ex_cpu_trx_spec COMMAND tests/performance_tests/performance_test.py testBpOpMode --max-tps-to-test 50 --test-iteration-min-step 10 --test-iteration-duration-sec 10 --final-iterations-duration-sec 10 overrideBasicTestConfig -v --tps-limit-per-generator 25 --chain-state-db-size-mb 200 --account-name "c" --abi-file eosmechanics.abi --wasm-file eosmechanics.wasm --contract-dir unittests/contracts/eosio.mechanics --user-trx-data-file tests/performance_tests/cpuTrxData.json ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) add_test(NAME performance_test_basic_p2p COMMAND tests/performance_tests/performance_test_basic.py -v --producer-nodes 1 --validation-nodes 1 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --chain-state-db-size-mb 200 ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -add_test(NAME performance_test_basic_http COMMAND tests/performance_tests/performance_test_basic.py -v --endpoint-api http --producer-nodes 1 --validation-nodes 1 --api-nodes 1 --target-tps 10 --tps-limit-per-generator 10 --test-duration-sec 5 --chain-state-db-size-mb 200 ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME performance_test_basic_http COMMAND tests/performance_tests/performance_test_basic.py -v --endpoint-api-type http --producer-nodes 1 --validation-nodes 1 --api-nodes 1 --target-tps 10 --tps-limit-per-generator 10 --test-duration-sec 5 --chain-state-db-size-mb 200 ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) add_test(NAME performance_test_basic_ex_transfer_trx_spec COMMAND tests/performance_tests/performance_test_basic.py -v --producer-nodes 1 --validation-nodes 1 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --chain-state-db-size-mb 200 --user-trx-data-file tests/performance_tests/userTrxDataTransfer.json ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) add_test(NAME performance_test_basic_ex_new_acct_trx_spec COMMAND tests/performance_tests/performance_test_basic.py -v --producer-nodes 1 --validation-nodes 1 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --chain-state-db-size-mb 200 --user-trx-data-file tests/performance_tests/userTrxDataNewAccount.json ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) add_test(NAME performance_test_basic_ex_cpu_trx_spec COMMAND tests/performance_tests/performance_test_basic.py -v --producer-nodes 1 --validation-nodes 1 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --chain-state-db-size-mb 200 --account-name "c" --abi-file eosmechanics.abi --wasm-file eosmechanics.wasm --contract-dir unittests/contracts/eosio.mechanics --user-trx-data-file tests/performance_tests/cpuTrxData.json ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) diff --git a/tests/performance_tests/README.md b/tests/performance_tests/README.md index 961d3e9e79..d75e6ee28b 100644 --- a/tests/performance_tests/README.md +++ b/tests/performance_tests/README.md @@ -459,7 +459,7 @@ Advanced Configuration Options: ``` usage: performance_test.py testBpOpMode overrideBasicTestConfig [-h] [-d D] [--dump-error-details] [-v] [--leave-running] [--unshared] - [--endpoint-api {p2p,http}] + [--endpoint-api-type {p2p,http}] [--producer-nodes PRODUCER_NODES] [--validation-nodes VALIDATION_NODES] [--api-nodes API_NODES] [--tps-limit-per-generator TPS_LIMIT_PER_GENERATOR] [--genesis GENESIS] [--num-blocks-to-prune NUM_BLOCKS_TO_PRUNE] @@ -508,7 +508,7 @@ Test Helper Arguments: Performance Test Basic Base: Performance Test Basic base configuration items. - --endpoint-api {p2p,http} + --endpoint-api-type {p2p,http} Endpointt API mode ("p2p", "http"). In "p2p" mode transactions will be directed to the p2p endpoint on a producer node. In "http" mode transactions will be directed to the http endpoint on an api node. --producer-nodes PRODUCER_NODES Producing nodes count @@ -604,7 +604,7 @@ The following scripts are typically used by the Performance Harness main script usage: performance_test_basic.py [-h] [-d D] [--dump-error-details] [-v] [--leave-running] [--unshared] - [--endpoint-api {p2p,http}] + [--endpoint-api-type {p2p,http}] [--producer-nodes PRODUCER_NODES] [--validation-nodes VALIDATION_NODES] [--api-nodes API_NODES] @@ -662,7 +662,7 @@ Test Helper Arguments: Performance Test Basic Base: Performance Test Basic base configuration items. - --endpoint-api {p2p,http} + --endpoint-api-type {p2p,http} Endpointt API mode ("p2p", "http"). In "p2p" mode transactions will be directed to the p2p endpoint on a producer node. In "http" mode transactions will be directed to the http endpoint on an api node. (default: p2p) --producer-nodes PRODUCER_NODES @@ -1729,7 +1729,7 @@ Finally, the full detail test report for each of the determined max TPS throughp "calcChainThreads": "lmax", "calcNetThreads": "lmax", "userTrxDataFile": null, - "endpointApi": "p2p", + "endpointApiType": "p2p", "opModeCmd": "testBpOpMode", "logDirBase": "performance_test", "logDirTimestamp": "2023-05-17_21-28-39", @@ -1758,7 +1758,7 @@ The Performance Test Basic generates, by default, a report that details results ``` json { - "targetApiEndpoint": "p2p", + "targetApiEndpointType": "p2p", "Result": { "testStart": "2023-05-17T23:05:38.835496", "testEnd": "2023-05-17T23:07:01.937623", @@ -2393,7 +2393,7 @@ The Performance Test Basic generates, by default, a report that details results "expectedTransactionsSent": 140010, "printMissingTransactions": false, "userTrxDataFile": null, - "endpointApi": "p2p", + "endpointApiType": "p2p", "logDirBase": "performance_test/2023-05-17_21-28-39/testRunLogs/performance_test", "logDirTimestamp": "2023-05-17_23-05-38", "logDirTimestampedOptSuffix": "-14001", diff --git a/tests/performance_tests/performance_test.py b/tests/performance_tests/performance_test.py index e845e128c2..7a7696b815 100755 --- a/tests/performance_tests/performance_test.py +++ b/tests/performance_tests/performance_test.py @@ -48,7 +48,7 @@ class PtConfig: calcChainThreads: str="none" calcNetThreads: str="none" userTrxDataFile: Path=None - endpointApi: str="p2p" + endpointApiType: str="p2p" opModeCmd: str="" def __post_init__(self): @@ -113,7 +113,7 @@ def performPtbBinarySearch(self, clusterConfig: PerformanceTestBasic.ClusterConf scenarioResult = PerformanceTest.PerfTestSearchIndivResult(success=False, searchTarget=binSearchTarget, searchFloor=floor, searchCeiling=ceiling) ptbConfig = PerformanceTestBasic.PtbConfig(targetTps=binSearchTarget, testTrxGenDurationSec=self.ptConfig.testDurationSec, tpsLimitPerGenerator=self.ptConfig.tpsLimitPerGenerator, numAddlBlocksToPrune=self.ptConfig.numAddlBlocksToPrune, logDirRoot=logDirRoot, delReport=delReport, - quiet=quiet, userTrxDataFile=self.ptConfig.userTrxDataFile, endpointApi=self.ptConfig.endpointApi) + quiet=quiet, userTrxDataFile=self.ptConfig.userTrxDataFile, endpointApiType=self.ptConfig.endpointApiType) myTest = PerformanceTestBasic(testHelperConfig=self.testHelperConfig, clusterConfig=clusterConfig, ptbConfig=ptbConfig, testNamePath="performance_test") myTest.runTest() @@ -155,7 +155,7 @@ def performPtbReverseLinearSearch(self, tpsInitial: int) -> TpsTestResult.PerfTe scenarioResult = PerformanceTest.PerfTestSearchIndivResult(success=False, searchTarget=searchTarget, searchFloor=absFloor, searchCeiling=absCeiling) ptbConfig = PerformanceTestBasic.PtbConfig(targetTps=searchTarget, testTrxGenDurationSec=self.ptConfig.testDurationSec, tpsLimitPerGenerator=self.ptConfig.tpsLimitPerGenerator, numAddlBlocksToPrune=self.ptConfig.numAddlBlocksToPrune, logDirRoot=self.loggingConfig.ptbLogsDirPath, delReport=self.ptConfig.delReport, - quiet=self.ptConfig.quiet, delPerfLogs=self.ptConfig.delPerfLogs, userTrxDataFile=self.ptConfig.userTrxDataFile, endpointApi=self.ptConfig.endpointApi) + quiet=self.ptConfig.quiet, delPerfLogs=self.ptConfig.delPerfLogs, userTrxDataFile=self.ptConfig.userTrxDataFile, endpointApiType=self.ptConfig.endpointApiType) myTest = PerformanceTestBasic(testHelperConfig=self.testHelperConfig, clusterConfig=self.clusterConfig, ptbConfig=ptbConfig, testNamePath="performance_test") myTest.runTest() @@ -546,7 +546,7 @@ def main(): calcChainThreads=args.calc_chain_threads, calcNetThreads=args.calc_net_threads, userTrxDataFile=Path(args.user_trx_data_file) if args.user_trx_data_file is not None else None, - endpointApi=args.endpoint_api, + endpointApiType=args.endpoint_api_type, opModeCmd=args.op_mode_sub_cmd) myTest = PerformanceTest(testHelperConfig=testHelperConfig, clusterConfig=testClusterConfig, ptConfig=ptConfig) diff --git a/tests/performance_tests/performance_test_basic.py b/tests/performance_tests/performance_test_basic.py index f750d7af43..22b40c0a6c 100755 --- a/tests/performance_tests/performance_test_basic.py +++ b/tests/performance_tests/performance_test_basic.py @@ -165,7 +165,7 @@ class PtbConfig: expectedTransactionsSent: int = field(default_factory=int, init=False) printMissingTransactions: bool=False userTrxDataFile: Path=None - endpointApi: str="p2p" + endpointApiType: str="p2p" def __post_init__(self): self.expectedTransactionsSent = self.testTrxGenDurationSec * self.targetTps @@ -387,10 +387,10 @@ def runTpsTest(self) -> PtbTpsTestResult: self.connectionPairList = [] def configureConnections(): - if(self.ptbConfig.endpointApi == "http"): + if(self.ptbConfig.endpointApiType == "http"): for apiNodeId in self.clusterConfig._apiNodeIds: self.connectionPairList.append(f"{self.cluster.getNode(apiNodeId).host}:{self.cluster.getNode(apiNodeId).port}") - else: # endpointApi == p2p + else: # endpointApiType == p2p for producerId in self.clusterConfig._producerNodeIds: self.connectionPairList.append(f"{self.cluster.getNode(producerId).host}:{self.cluster.getNodeP2pPort(producerId)}") @@ -439,7 +439,7 @@ def configureConnections(): self.cluster.biosNode.kill(signal.SIGTERM) self.data.startBlock = self.waitForEmptyBlocks(self.validationNode, self.emptyBlockGoal) - tpsTrxGensConfig = TpsTrxGensConfig(targetTps=self.ptbConfig.targetTps, tpsLimitPerGenerator=self.ptbConfig.tpsLimitPerGenerator, connectionPairList=self.connectionPairList, endpointApi=self.ptbConfig.endpointApi) + tpsTrxGensConfig = TpsTrxGensConfig(targetTps=self.ptbConfig.targetTps, tpsLimitPerGenerator=self.ptbConfig.tpsLimitPerGenerator, connectionPairList=self.connectionPairList, endpointApiType=self.ptbConfig.endpointApiType) self.cluster.trxGenLauncher = TransactionGeneratorsLauncher(chainId=chainId, lastIrreversibleBlockId=lib_id, contractOwnerAccount=self.clusterConfig.specifiedContract.account.name, accts=','.join(map(str, self.accountNames)), privateKeys=','.join(map(str, self.accountPrivKeys)), @@ -484,9 +484,9 @@ def captureLowLevelArtifacts(self): print(f"Failed to move '{self.cluster.nodeosLogPath}' to '{self.varLogsDirPath}': {type(e)}: {e}") def createReport(self, logAnalysis: log_reader.LogAnalysis, tpsTestConfig: log_reader.TpsTestConfig, argsDict: dict, nodeosVers: str, - targetApiEndpoint: str, testResult: PerfTestBasicResult) -> dict: + targetApiEndpointType: str, testResult: PerfTestBasicResult) -> dict: report = {} - report['targetApiEndpoint'] = targetApiEndpoint + report['targetApiEndpointType'] = targetApiEndpointType report['Result'] = asdict(testResult) report['Analysis'] = {} report['Analysis']['BlockSize'] = asdict(logAnalysis.blockSizeStats) @@ -551,7 +551,7 @@ def analyzeResultsAndReport(self, testResult: PtbTpsTestResult): print(f"testRunSuccessful: {self.testResult.testRunSuccessful} testPassed: {self.testResult.testPassed} tpsExpectationMet: {self.testResult.tpsExpectMet} trxExpectationMet: {self.testResult.trxExpectMet}") self.report = self.createReport(logAnalysis=self.logAnalysis, tpsTestConfig=tpsTestConfig, argsDict=args, nodeosVers=self.clusterConfig.nodeosVers, - targetApiEndpoint=self.ptbConfig.endpointApi, testResult=self.testResult) + targetApiEndpointType=self.ptbConfig.endpointApiType, testResult=self.testResult) jsonReport = None if not self.ptbConfig.quiet or not self.ptbConfig.delReport: @@ -653,7 +653,7 @@ def _createBaseArgumentParser(defEndpointApiDef: str, defProdNodeCnt: int, defVa ptbBaseGrpDescription="Performance Test Basic base configuration items." ptbBaseParserGroup = ptbBaseParser.add_argument_group(title=None if suppressHelp else ptbBaseGrpTitle, description=None if suppressHelp else ptbBaseGrpDescription) - ptbBaseParserGroup.add_argument("--endpoint-api", type=str, help=argparse.SUPPRESS if suppressHelp else "Endpointt API mode (\"p2p\", \"http\"). \ + ptbBaseParserGroup.add_argument("--endpoint-api-type", type=str, help=argparse.SUPPRESS if suppressHelp else "Endpointt API mode (\"p2p\", \"http\"). \ In \"p2p\" mode transactions will be directed to the p2p endpoint on a producer node. \ In \"http\" mode transactions will be directed to the http endpoint on an api node.", choices=["p2p", "http"], default=defEndpointApiDef) @@ -760,7 +760,7 @@ def main(): delPerfLogs=args.del_perf_logs, printMissingTransactions=args.print_missing_transactions, userTrxDataFile=Path(args.user_trx_data_file) if args.user_trx_data_file is not None else None, - endpointApi=args.endpoint_api) + endpointApiType=args.endpoint_api_type) myTest = PerformanceTestBasic(testHelperConfig=testHelperConfig, clusterConfig=testClusterConfig, ptbConfig=ptbConfig) From 1da6a8373bfd55fe35a27f986485dae231062e7f Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Fri, 2 Jun 2023 13:07:17 -0500 Subject: [PATCH 07/22] Erase map entry if resulting locate of key words turns up empty. Previously it was leaving the empty placeholder in the map which made it trigger the map as non empty and do unnecessary work in the generator. --- tests/trx_generator/trx_generator.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/trx_generator/trx_generator.cpp b/tests/trx_generator/trx_generator.cpp index b6d92378b5..4464ef8bdb 100644 --- a/tests/trx_generator/trx_generator.cpp +++ b/tests/trx_generator/trx_generator.cpp @@ -146,6 +146,9 @@ namespace eosio::testing { for (size_t i = 0; i < action_array.size(); ++i) { auto action_mvo = fc::mutable_variant_object(action_array[i]); locate_key_words_in_action_mvo(acct_gen_fields_out[i], action_mvo, key_word); + if(acct_gen_fields_out[i].empty()) { + acct_gen_fields_out.erase(i); + } } } From f7ebd6c1c5863ff912667474e5bf20d208db7574 Mon Sep 17 00:00:00 2001 From: 766C6164 Date: Fri, 2 Jun 2023 15:07:58 -0400 Subject: [PATCH 08/22] Addressed feedback --- .../eosio/chain/snapshot_scheduler.hpp | 4 +++- libraries/chain/snapshot_scheduler.cpp | 11 +++++---- plugins/producer_plugin/producer_plugin.cpp | 6 ++--- tests/test_snapshot_scheduler.cpp | 24 +++++++++++-------- 4 files changed, 26 insertions(+), 19 deletions(-) diff --git a/libraries/chain/include/eosio/chain/snapshot_scheduler.hpp b/libraries/chain/include/eosio/chain/snapshot_scheduler.hpp index f9b3b421cc..fd9475d438 100644 --- a/libraries/chain/include/eosio/chain/snapshot_scheduler.hpp +++ b/libraries/chain/include/eosio/chain/snapshot_scheduler.hpp @@ -20,6 +20,8 @@ #include #include +#include + namespace eosio::chain { namespace bmi = boost::multi_index; @@ -38,7 +40,7 @@ class snapshot_scheduler { struct snapshot_request_information { uint32_t block_spacing = 0; uint32_t start_block_num = 0; - uint32_t end_block_num = UINT32_MAX - 1; + uint32_t end_block_num = std::numeric_limits::max(); std::string snapshot_description = ""; }; diff --git a/libraries/chain/snapshot_scheduler.cpp b/libraries/chain/snapshot_scheduler.cpp index bd92230d90..38191222ac 100644 --- a/libraries/chain/snapshot_scheduler.cpp +++ b/libraries/chain/snapshot_scheduler.cpp @@ -24,17 +24,18 @@ void snapshot_scheduler::on_start_block(uint32_t height, chain::controller& chai std::vector unschedule_snapshot_request_ids; for(const auto& req: _snapshot_requests.get<0>()) { // -1 since its called from start block - bool recurring_snapshot = req.block_spacing && (height >= req.start_block_num + 1) && (!((height - req.start_block_num - 1) % req.block_spacing)); - bool onetime_snapshot = (!req.block_spacing) && (height == req.start_block_num + 1); + bool recurring_snapshot = req.block_spacing && (height >= req.start_block_num + 1) && (!((height - req.start_block_num - 1) % req.block_spacing)); + bool onetime_snapshot = (!req.block_spacing) && (height == req.start_block_num + 1); + + bool marked_for_deletion = ((!req.block_spacing) && (height >= req.start_block_num + 1)) || // if one time snapshot executed or scheduled for the past, it should be gone + (height > 0 && ((height-1) >= req.end_block_num)); // any snapshot can expire by end block num (end_block_num can be max value) if(recurring_snapshot || onetime_snapshot) { execute_snapshot_with_log(req); } // cleanup - remove expired (or invalid) request - if((!req.start_block_num && !req.block_spacing) || - (!req.block_spacing && height >= (req.start_block_num + 1)) || - (req.end_block_num > 0 && height >= (req.end_block_num + 1))) { + if(marked_for_deletion) { unschedule_snapshot_request_ids.push_back(req.snapshot_request_id); } } diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index b4020f5c72..bb58e8374a 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -1515,11 +1515,11 @@ producer_plugin::schedule_snapshot(const chain::snapshot_scheduler::snapshot_req chain::controller& chain = my->chain_plug->chain(); const auto head_block_num = chain.head_block_num(); - // missing start/end is set to head block num, missing end to UINT32_MAX - chain::snapshot_scheduler::snapshot_request_information sri = { + // missing start/end is set to head block num, missing end to UINT32_MAX + chain::snapshot_scheduler::snapshot_request_information sri = { .block_spacing = srp.block_spacing ? *srp.block_spacing : 0, .start_block_num = srp.start_block_num ? *srp.start_block_num : head_block_num + 1, - .end_block_num = srp.end_block_num ? *srp.end_block_num : UINT32_MAX - 1, + .end_block_num = srp.end_block_num ? *srp.end_block_num : std::numeric_limits::max(), .snapshot_description = srp.snapshot_description ? *srp.snapshot_description : "" }; diff --git a/tests/test_snapshot_scheduler.cpp b/tests/test_snapshot_scheduler.cpp index 1844bee776..7efae81ce2 100644 --- a/tests/test_snapshot_scheduler.cpp +++ b/tests/test_snapshot_scheduler.cpp @@ -83,42 +83,46 @@ BOOST_AUTO_TEST_CASE(snapshot_scheduler_test) { if (!pp->get_snapshot_requests().snapshot_requests.empty()) { const auto& snapshot_requests = pp->get_snapshot_requests().snapshot_requests; - auto validate_snapshot_request = [&](uint32_t sid, uint32_t block_num) { + auto validate_snapshot_request = [&](uint32_t sid, uint32_t block_num, uint32_t spacing = 0) { auto it = find_if(snapshot_requests.begin(), snapshot_requests.end(), [sid](const snapshot_scheduler::snapshot_schedule_information& obj) {return obj.snapshot_request_id == sid;}); if (it != snapshot_requests.end()) { auto& pending = it->pending_snapshots; if (pending.size()==1) { - BOOST_CHECK_EQUAL(block_num, pending.begin()->head_block_num); + auto pbn = pending.begin()->head_block_num; + BOOST_CHECK_EQUAL(block_num, spacing ? (spacing + (pbn%spacing)) : pbn); } return true; } return false; }; - BOOST_REQUIRE(validate_snapshot_request(0, 9)); // snapshot #0 should have pending snapshot at block #9 (8 + 1) and it never expires - BOOST_REQUIRE(validate_snapshot_request(4, 12)); // snapshot #4 should have pending snapshot at block # at the moment of scheduling (2) plus 10 = 12 + BOOST_REQUIRE(validate_snapshot_request(0, 9, 8)); // snapshot #0 should have pending snapshot at block #9 (8 + 1) and it never expires + BOOST_REQUIRE(validate_snapshot_request(4, 12, 10)); // snapshot #4 should have pending snapshot at block # at the moment of scheduling (2) plus 10 = 12 + BOOST_REQUIRE(validate_snapshot_request(5, 10, 10)); // snapshot #5 should have pending snapshot at block #10, #20 etc } }); snapshot_request_params sri1 = {.block_spacing = 8, .start_block_num = 1, .end_block_num = 300000, .snapshot_description = "Example of recurring snapshot 1"}; - snapshot_request_params sri2 = {.block_spacing = 5000, .start_block_num = 100000, .end_block_num = 300000, .snapshot_description = "Example of recurring snapshot 2 that will never happen"}; + snapshot_request_params sri2 = {.block_spacing = 5000, .start_block_num = 100000, .end_block_num = 300000, .snapshot_description = "Example of recurring snapshot 2 that wont happen in test"}; snapshot_request_params sri3 = {.block_spacing = 2, .start_block_num = 0, .end_block_num = 3, .snapshot_description = "Example of recurring snapshot 3 that will expire"}; snapshot_request_params sri4 = {.start_block_num = 1, .snapshot_description = "One time snapshot on first block"}; snapshot_request_params sri5 = {.block_spacing = 10, .snapshot_description = "Recurring every 10 blocks snapshot starting now"}; + snapshot_request_params sri6 = {.block_spacing = 10, .start_block_num = 0, .snapshot_description = "Recurring every 10 blocks snapshot starting from 0"}; pp->schedule_snapshot(sri1); pp->schedule_snapshot(sri2); pp->schedule_snapshot(sri3); pp->schedule_snapshot(sri4); pp->schedule_snapshot(sri5); + pp->schedule_snapshot(sri6); - // all five snapshot requests should be present now - BOOST_CHECK_EQUAL(5, pp->get_snapshot_requests().snapshot_requests.size()); + // all six snapshot requests should be present now + BOOST_CHECK_EQUAL(6, pp->get_snapshot_requests().snapshot_requests.size()); - empty_blocks_fut.wait_for(std::chrono::seconds(6)); + empty_blocks_fut.wait_for(std::chrono::seconds(10)); // two of the snapshots are done here and requests, corresponding to them should be deleted - BOOST_CHECK_EQUAL(3, pp->get_snapshot_requests().snapshot_requests.size()); + BOOST_CHECK_EQUAL(4, pp->get_snapshot_requests().snapshot_requests.size()); // check whether no pending snapshots present for a snapshot with id 0 const auto& snapshot_requests = pp->get_snapshot_requests().snapshot_requests; @@ -137,7 +141,7 @@ BOOST_AUTO_TEST_CASE(snapshot_scheduler_test) { std::vector ssi; db.set_path(temp / "snapshots"); db >> ssi; - BOOST_CHECK_EQUAL(3, ssi.size()); + BOOST_CHECK_EQUAL(4, ssi.size()); BOOST_CHECK_EQUAL(ssi.begin()->block_spacing, *sri1.block_spacing); } catch(...) { throw; From 29bdea6adf51b55011618ead21e2218f4ee3383d Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Mon, 5 Jun 2023 10:14:19 -0500 Subject: [PATCH 09/22] Add read-only transaction support through user defined transaction config. Add configuration option for api-endpoint to allow targeting specific http endpoint with transactions. Configurable number of read-only threads for the api nodes. Allow for empty authorizations in transaction action for read-only trx support. --- .../launch_transaction_generators.py | 11 +++- tests/performance_tests/CMakeLists.txt | 2 + tests/performance_tests/log_reader.py | 17 +++++- .../performance_test_basic.py | 13 +++- tests/performance_tests/readOnlyTrxData.json | 14 +++++ tests/trx_generator/main.cpp | 1 + tests/trx_generator/trx_generator.cpp | 11 +++- tests/trx_generator/trx_provider.cpp | 35 +++++++++-- tests/trx_generator/trx_provider.hpp | 59 +++++++++++++++++-- 9 files changed, 142 insertions(+), 21 deletions(-) create mode 100644 tests/performance_tests/readOnlyTrxData.json diff --git a/tests/TestHarness/launch_transaction_generators.py b/tests/TestHarness/launch_transaction_generators.py index cfeb7b3c26..6825ce071f 100644 --- a/tests/TestHarness/launch_transaction_generators.py +++ b/tests/TestHarness/launch_transaction_generators.py @@ -39,7 +39,7 @@ def __init__(self, targetTps: int, tpsLimitPerGenerator: int, connectionPairList class TransactionGeneratorsLauncher: def __init__(self, chainId: int, lastIrreversibleBlockId: int, contractOwnerAccount: str, accts: str, privateKeys: str, trxGenDurationSec: int, logDir: str, - abiFile: Path, actionsData, actionsAuths, tpsTrxGensConfig: TpsTrxGensConfig): + abiFile: Path, actionsData, actionsAuths, tpsTrxGensConfig: TpsTrxGensConfig, apiEndpoint: str=None): self.chainId = chainId self.lastIrreversibleBlockId = lastIrreversibleBlockId self.contractOwnerAccount = contractOwnerAccount @@ -51,6 +51,7 @@ def __init__(self, chainId: int, lastIrreversibleBlockId: int, contractOwnerAcco self.abiFile = abiFile self.actionsData = actionsData self.actionsAuths = actionsAuths + self.apiEndpoint = apiEndpoint def launch(self, waitToComplete=True): self.subprocess_ret_codes = [] @@ -75,6 +76,9 @@ def launch(self, waitToComplete=True): popenStringList.extend(['--abi-file', f'{self.abiFile}', '--actions-data', f'{self.actionsData}', '--actions-auths', f'{self.actionsAuths}']) + if self.apiEndpoint is not None: + popenStringList.extend(['--api-endpoint', f'{self.apiEndpoint}']) + if Utils.Debug: Print(f"Running trx_generator: {' '.join(popenStringList)}") self.subprocess_ret_codes.append(subprocess.Popen(popenStringList)) @@ -110,6 +114,8 @@ def parseArgs(): In \"p2p\" mode transactions will be directed to the p2p endpoint on a producer node. \ In \"http\" mode transactions will be directed to the http endpoint on an api node.", choices=["p2p", "http"], default="p2p") + parser.add_argument("api_endpoint", type=str, help="The api endpoint to use to submit transactions. (Only used with http api nodes currently as p2p transactions are streamed)", + default="/v1/chain/send_transaction2") args = parser.parse_args() return args @@ -124,7 +130,8 @@ def main(): privateKeys=args.priv_keys, trxGenDurationSec=args.trx_gen_duration, logDir=args.log_dir, abiFile=args.abi_file, actionsData=args.actions_data, actionsAuths=args.actions_auths, tpsTrxGensConfig=TpsTrxGensConfig(targetTps=args.target_tps, tpsLimitPerGenerator=args.tps_limit_per_generator, - connectionPairList=connectionPairList, endpointApiType=args.endpoint_api_type)) + connectionPairList=connectionPairList, endpointApiType=args.endpoint_api_type), + apiEndpoint=args.api_endpoint) exit_codes = trxGenLauncher.launch() diff --git a/tests/performance_tests/CMakeLists.txt b/tests/performance_tests/CMakeLists.txt index d18eccf824..23f21a356a 100644 --- a/tests/performance_tests/CMakeLists.txt +++ b/tests/performance_tests/CMakeLists.txt @@ -4,6 +4,7 @@ configure_file(log_reader.py . COPYONLY) configure_file(genesis.json . COPYONLY) configure_file(cpuTrxData.json . COPYONLY) configure_file(ramTrxData.json . COPYONLY) +configure_file(readOnlyTrxData.json . COPYONLY) configure_file(userTrxDataTransfer.json . COPYONLY) configure_file(userTrxDataNewAccount.json . COPYONLY) @@ -22,6 +23,7 @@ add_test(NAME performance_test_basic_ex_transfer_trx_spec COMMAND tests/performa add_test(NAME performance_test_basic_ex_new_acct_trx_spec COMMAND tests/performance_tests/performance_test_basic.py -v --producer-nodes 1 --validation-nodes 1 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --chain-state-db-size-mb 200 --user-trx-data-file tests/performance_tests/userTrxDataNewAccount.json ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) add_test(NAME performance_test_basic_ex_cpu_trx_spec COMMAND tests/performance_tests/performance_test_basic.py -v --producer-nodes 1 --validation-nodes 1 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --chain-state-db-size-mb 200 --account-name "c" --abi-file eosmechanics.abi --wasm-file eosmechanics.wasm --contract-dir unittests/contracts/eosio.mechanics --user-trx-data-file tests/performance_tests/cpuTrxData.json ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) add_test(NAME performance_test_basic_ex_ram_trx_spec COMMAND tests/performance_tests/performance_test_basic.py -v --producer-nodes 1 --validation-nodes 1 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --chain-state-db-size-mb 200 --account-name "r" --abi-file eosmechanics.abi --wasm-file eosmechanics.wasm --contract-dir unittests/contracts/eosio.mechanics --user-trx-data-file tests/performance_tests/ramTrxData.json ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME performance_test_basic_ex_read_only_trxs COMMAND tests/performance_tests/performance_test_basic.py -v --endpoint-api-type http --producer-nodes 1 --validation-nodes 1 --api-nodes 1 --api-nodes-read-only-threads 2 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --chain-state-db-size-mb 200 --account-name "payloadless" --abi-file payloadless.abi --wasm-file payloadless.wasm --contract-dir unittests/test-contracts/payloadless --user-trx-data-file tests/performance_tests/readOnlyTrxData.json ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST performance_test_bp PROPERTY LABELS long_running_tests) set_property(TEST performance_test_api PROPERTY LABELS long_running_tests) set_property(TEST performance_test_ex_cpu_trx_spec PROPERTY LABELS long_running_tests) diff --git a/tests/performance_tests/log_reader.py b/tests/performance_tests/log_reader.py index e8472e950c..70bb605fc1 100644 --- a/tests/performance_tests/log_reader.py +++ b/tests/performance_tests/log_reader.py @@ -244,11 +244,21 @@ class sentTrx(): acked: str = "" ackResponseTimeUs: int = -1 +@dataclass +class sentTrxExtTrace(): + sentTime: str = "" + acked: str = "" + ackResponseTimeUs: int = -1 + blockNum: int = -1 + cpuUsageUs: int = -1 + netUsageWords: int = -1 + blockTime: str = "" + def scrapeTrxGenLog(trxSent: dict, path): #trxGenLogs/trx_data_output_*.txt selectedopen = selectedOpen(path) with selectedopen(path, 'rt') as f: - trxSent.update(dict([(x[0], sentTrx(x[1], x[2], x[3])) for x in (line.rstrip('\n').split(',') for line in f)])) + trxSent.update(dict([(x[0], sentTrx(x[1], x[2], x[3]) if len(x) == 4 else sentTrxExtTrace(x[1], x[2], x[3], x[4], x[5], x[6], x[7])) for x in (line.rstrip('\n').split(',') for line in f)])) def scrapeTrxGenTrxSentDataLogs(trxSent: dict, trxGenLogDirPath, quiet): filesScraped = [] @@ -261,7 +271,10 @@ def scrapeTrxGenTrxSentDataLogs(trxSent: dict, trxGenLogDirPath, quiet): def populateTrxSentAndAcked(trxSent: dict, trxDict: dict, notFound): for sentTrxId in trxSent.keys(): - if sentTrxId in trxDict.keys(): + if (isinstance(trxSent[sentTrxId], sentTrxExtTrace)): + trxDict[sentTrxId] = trxData(blockNum=trxSent[sentTrxId].blockNum, cpuUsageUs=trxSent[sentTrxId].cpuUsageUs, netUsageUs=trxSent[sentTrxId].netUsageWords, blockTime=trxSent[sentTrxId].blockTime, acknowledged=trxSent[sentTrxId].acked, ackRespTimeUs=trxSent[sentTrxId].ackResponseTimeUs) + trxDict[sentTrxId].sentTimestamp = trxSent[sentTrxId].sentTime + elif sentTrxId in trxDict.keys(): trxDict[sentTrxId].sentTimestamp = trxSent[sentTrxId].sentTime trxDict[sentTrxId].acknowledged = trxSent[sentTrxId].acked trxDict[sentTrxId].ackRespTimeUs = trxSent[sentTrxId].ackResponseTimeUs diff --git a/tests/performance_tests/performance_test_basic.py b/tests/performance_tests/performance_test_basic.py index 22b40c0a6c..80aa554682 100755 --- a/tests/performance_tests/performance_test_basic.py +++ b/tests/performance_tests/performance_test_basic.py @@ -112,6 +112,7 @@ class SpecifiedContract: _validationNodeIds: list = field(default_factory=list) _apiNodeIds: list = field(default_factory=list) nonProdsEosVmOcEnable: bool = False + apiNodesReadOnlyThreadCount: int = 0 def __post_init__(self): self._totalNodes = self.producerNodeCount + self.validationNodeCount + self.apiNodeCount @@ -137,6 +138,8 @@ def configureValidationNodes(): def configureApiNodes(): apiNodeSpecificNodeosStr = "" apiNodeSpecificNodeosStr += "--plugin eosio::chain_api_plugin " + apiNodeSpecificNodeosStr += "--plugin eosio::net_api_plugin " + apiNodeSpecificNodeosStr += f"--read-only-threads {self.apiNodesReadOnlyThreadCount} " if apiNodeSpecificNodeosStr: self.specificExtraNodeosArgs.update({f"{nodeId}" : apiNodeSpecificNodeosStr for nodeId in self._apiNodeIds}) @@ -405,6 +408,7 @@ def configureConnections(): self.data.numNodes = self.clusterConfig._totalNodes abiFile=None + apiEndpoint=None actionsDataJson=None actionsAuthsJson=None self.accountNames=[] @@ -415,6 +419,9 @@ def configureConnections(): print(f"Creating accounts specified in userTrxData: {self.userTrxDataDict['initAccounts']}") self.setupWalletAndAccounts(accountCnt=len(self.userTrxDataDict['initAccounts']), accountNames=self.userTrxDataDict['initAccounts']) abiFile = self.userTrxDataDict['abiFile'] + if 'apiEndpoint' in self.userTrxDataDict: + apiEndpoint = self.userTrxDataDict['apiEndpoint'] + print(f'API Endpoint specified: {apiEndpoint}') actionsDataJson = json.dumps(self.userTrxDataDict['actions']) @@ -445,7 +452,7 @@ def configureConnections(): accts=','.join(map(str, self.accountNames)), privateKeys=','.join(map(str, self.accountPrivKeys)), trxGenDurationSec=self.ptbConfig.testTrxGenDurationSec, logDir=self.trxGenLogDirPath, abiFile=abiFile, actionsData=actionsDataJson, actionsAuths=actionsAuthsJson, - tpsTrxGensConfig=tpsTrxGensConfig) + tpsTrxGensConfig=tpsTrxGensConfig, apiEndpoint=apiEndpoint) trxGenExitCodes = self.cluster.trxGenLauncher.launch() print(f"Transaction Generator exit codes: {trxGenExitCodes}") @@ -640,7 +647,8 @@ def setupClusterConfig(args) -> ClusterConfig: producerNodeCount=args.producer_nodes, validationNodeCount=args.validation_nodes, apiNodeCount=args.api_nodes, genesisPath=args.genesis, prodsEnableTraceApi=args.prods_enable_trace_api, extraNodeosArgs=extraNodeosArgs, specifiedContract=specifiedContract, loggingLevel=args.cluster_log_lvl, - nodeosVers=nodeosVers, nonProdsEosVmOcEnable=args.non_prods_eos_vm_oc_enable) + nodeosVers=nodeosVers, nonProdsEosVmOcEnable=args.non_prods_eos_vm_oc_enable, + apiNodesReadOnlyThreadCount=args.api_nodes_read_only_threads) class PtbArgumentsHandler(object): @staticmethod @@ -660,6 +668,7 @@ def _createBaseArgumentParser(defEndpointApiDef: str, defProdNodeCnt: int, defVa ptbBaseParserGroup.add_argument("--producer-nodes", type=int, help=argparse.SUPPRESS if suppressHelp else "Producing nodes count", default=defProdNodeCnt) ptbBaseParserGroup.add_argument("--validation-nodes", type=int, help=argparse.SUPPRESS if suppressHelp else "Validation nodes count", default=defValidationNodeCnt) ptbBaseParserGroup.add_argument("--api-nodes", type=int, help=argparse.SUPPRESS if suppressHelp else "API nodes count", default=defApiNodeCnt) + ptbBaseParserGroup.add_argument("--api-nodes-read-only-threads", type=int, help=argparse.SUPPRESS if suppressHelp else "API nodes read only threads count for use with read-only transactions", default=0) ptbBaseParserGroup.add_argument("--tps-limit-per-generator", type=int, help=argparse.SUPPRESS if suppressHelp else "Maximum amount of transactions per second a single generator can have.", default=4000) ptbBaseParserGroup.add_argument("--genesis", type=str, help=argparse.SUPPRESS if suppressHelp else "Path to genesis.json", default="tests/performance_tests/genesis.json") ptbBaseParserGroup.add_argument("--num-blocks-to-prune", type=int, help=argparse.SUPPRESS if suppressHelp else ("The number of potentially non-empty blocks, in addition to leading and trailing size 0 blocks, " diff --git a/tests/performance_tests/readOnlyTrxData.json b/tests/performance_tests/readOnlyTrxData.json new file mode 100644 index 0000000000..9c6a367b9f --- /dev/null +++ b/tests/performance_tests/readOnlyTrxData.json @@ -0,0 +1,14 @@ +{ + "initAccounts": ["payloadless"], + "abiFile": "unittests/test-contracts/payloadless/payloadless.abi", + "apiEndpoint": "/v1/chain/send_read_only_transaction", + "actions": [ + { + "actionName": "doit", + "actionData": { + }, + "actionAuthAcct": "payloadless", + "authorization": {} + } + ] +} diff --git a/tests/trx_generator/main.cpp b/tests/trx_generator/main.cpp index 50c6bb1c54..60c20e3f7e 100644 --- a/tests/trx_generator/main.cpp +++ b/tests/trx_generator/main.cpp @@ -62,6 +62,7 @@ int main(int argc, char** argv) { ("abi-file", bpo::value(&user_trx_config._abi_data_file_path), "The path to the contract abi file to use for the supplied transaction action data") ("actions-data", bpo::value(&user_trx_config._actions_data_json_file_or_str), "The json actions data file or json actions data description string to use") ("actions-auths", bpo::value(&user_trx_config._actions_auths_json_file_or_str), "The json actions auth file or json actions auths description string to use, containting authAcctName to activePrivateKey pairs.") + ("api-endpoint", bpo::value(&provider_config._api_endpoint), "The api endpoint to direct transactions to. Defaults to: '/v1/chain/send_transaction2'") ("peer-endpoint-type", bpo::value(&provider_config._peer_endpoint_type)->default_value("p2p"), "Identify the peer endpoint api type to determine how to send transactions. Allowable 'p2p' and 'http'. Default: 'p2p'") ("peer-endpoint", bpo::value(&provider_config._peer_endpoint)->default_value("127.0.0.1"), "set the peer endpoint to send transactions to") ("port", bpo::value(&provider_config._port)->default_value(9876), "set the peer endpoint port to send transactions to") diff --git a/tests/trx_generator/trx_generator.cpp b/tests/trx_generator/trx_generator.cpp index 4464ef8bdb..dda297422f 100644 --- a/tests/trx_generator/trx_generator.cpp +++ b/tests/trx_generator/trx_generator.cpp @@ -213,10 +213,15 @@ namespace eosio::testing { } EOS_RETHROW_EXCEPTIONS(chain::transaction_type_exception, "Fail to parse unpacked action data JSON") - chain::name auth_actor = chain::name(action_mvo["authorization"].get_object()["actor"].as_string()); - chain::name auth_perm = chain::name(action_mvo["authorization"].get_object()["permission"].as_string()); + std::vector auth = {}; + if (action_mvo["authorization"].get_object().find("actor") != action_mvo["authorization"].get_object().end() && + action_mvo["authorization"].get_object().find("permission") != action_mvo["authorization"].get_object().end()) { + chain::name auth_actor = chain::name(action_mvo["authorization"].get_object()["actor"].as_string()); + chain::name auth_perm = chain::name(action_mvo["authorization"].get_object()["permission"].as_string()); + auth.push_back({auth_actor, auth_perm}); + } - return chain::action({{auth_actor, auth_perm}}, _config._contract_owner_account, action_name, std::move(packed_action_data)); + return chain::action(auth, _config._contract_owner_account, action_name, std::move(packed_action_data)); }); return actions; diff --git a/tests/trx_generator/trx_provider.cpp b/tests/trx_generator/trx_provider.cpp index c75d048dc8..c48b2934bf 100644 --- a/tests/trx_generator/trx_provider.cpp +++ b/tests/trx_generator/trx_provider.cpp @@ -71,8 +71,11 @@ namespace eosio::testing { } } + bool http_connection::needs_response_trace_info() { + return _config._api_endpoint == "/v1/chain/send_read_only_transaction"; + } + void http_connection::send_transaction(const chain::packed_transaction& trx) { - const std::string target = "/v1/chain/send_transaction2"s; const int http_version = 11; const std::string content_type = "application/json"s; @@ -84,16 +87,30 @@ namespace eosio::testing { http_client_async::http_request_params params{_connection_thread_pool.get_executor(), _config._peer_endpoint, _config._port, - target, + _config._api_endpoint, http_version, content_type}; http_client_async::async_http_request( params, std::move(msg_body), - [this, trx_id = trx.id()]( - boost::beast::error_code ec, boost::beast::http::response response) { + [this, trx_id = trx.id()](boost::beast::error_code ec, + boost::beast::http::response response) { ++this->_acknowledged; trx_acknowledged(trx_id, fc::time_point::now()); - if (response.result() != boost::beast::http::status::accepted) { + + if (this->needs_response_trace_info() && response.result() == boost::beast::http::status::ok) { + try { + fc::variant resp_json = fc::json::from_string(response.body()); + record_trx_info(trx_id, resp_json["processed"]["block_num"].as_uint64(), + resp_json["processed"]["receipt"]["cpu_usage_us"].as_uint64(), + resp_json["processed"]["receipt"]["net_usage_words"].as_uint64(), + resp_json["processed"]["block_time"].as_string()); + } + EOS_RETHROW_EXCEPTIONS(chain::json_parse_exception, "Fail to parse JSON from string: ${string}", + ("string", response.body())); + } + + if (!(response.result() == boost::beast::http::status::accepted || + response.result() == boost::beast::http::status::ok)) { elog("async_http_request Failed with response http status code: ${status}", ("status", response.result_int())); } @@ -136,7 +153,13 @@ namespace eosio::testing { ack_round_trip_us = acked - data._timestamp; } out << std::string(data._trx_id) << "," << data._timestamp.to_iso_string() << "," << acked_str << "," - << ack_round_trip_us.count() << "\n"; + << ack_round_trip_us.count(); + + ackedTrxTraceInfo info = _peer_connection->get_acked_trx_trace_info(data._trx_id); + if (info._valid) { + out << std::string(",") << info._block_num << "," << info._cpu_usage_us << "," << info._net_usage_words << "," << info._block_time; + } + out << std::string("\n"); } out.close(); } diff --git a/tests/trx_generator/trx_provider.hpp b/tests/trx_generator/trx_provider.hpp index 86c9415619..260d12f03e 100644 --- a/tests/trx_generator/trx_provider.hpp +++ b/tests/trx_generator/trx_provider.hpp @@ -28,10 +28,29 @@ namespace eosio::testing { std::string _peer_endpoint_type = "p2p"; std::string _peer_endpoint = "127.0.0.1"; unsigned short _port = 9876; + // Api endpoint not truly used for p2p connections as transactions are streamed directly to p2p endpoint + std::string _api_endpoint = "/v1/chain/send_transaction2"; std::string to_string() const { std::ostringstream ss; - ss << "endpoint type: " << _peer_endpoint_type << " peer_endpoint: " << _peer_endpoint << " port: " << _port; + ss << "Provider base config endpoint type: " << _peer_endpoint_type << " peer_endpoint: " << _peer_endpoint + << " port: " << _port << " api endpoint: " << _api_endpoint; + return std::move(ss).str(); + } + }; + + struct ackedTrxTraceInfo { + bool _valid = false; + unsigned int _block_num = 0; + unsigned int _cpu_usage_us = 0; + unsigned int _net_usage_words = 0; + std::string _block_time = ""; + + std::string to_string() const { + std::ostringstream ss; + ss << "Acked Transaction Trace Info " + << "valid: " << _valid << " block num: " << _block_num << " cpu usage us: " << _cpu_usage_us + << " net usage words: " << _net_usage_words << " block time: " << _block_time; return std::move(ss).str(); } }; @@ -59,24 +78,25 @@ namespace eosio::testing { _connection_thread_pool.stop(); }; - fc::time_point get_trx_ack_time(const eosio::chain::transaction_id_type& _trx_id) { + fc::time_point get_trx_ack_time(const eosio::chain::transaction_id_type& trx_id) { fc::time_point time_acked; std::lock_guard lock(_trx_ack_map_lock); - auto search = _trxs_ack_time_map.find(_trx_id); + auto search = _trxs_ack_time_map.find(trx_id); if (search != _trxs_ack_time_map.end()) { time_acked = search->second; } else { - elog("get_trx_ack_time - Transaction acknowledge time not found for transaction with id: ${id}", ("id", _trx_id)); + elog("get_trx_ack_time - Transaction acknowledge time not found for transaction with id: ${id}", ("id", trx_id)); time_acked = fc::time_point::min(); } return time_acked; } + virtual ackedTrxTraceInfo get_acked_trx_trace_info(const eosio::chain::transaction_id_type& trx_id) = 0; virtual void send_transaction(const chain::packed_transaction& trx) = 0; - void trx_acknowledged(const eosio::chain::transaction_id_type _trx_id, const fc::time_point ack_time) { + void trx_acknowledged(const eosio::chain::transaction_id_type trx_id, const fc::time_point ack_time) { std::lock_guard lock(_trx_ack_map_lock); - _trxs_ack_time_map[_trx_id] = ack_time; + _trxs_ack_time_map[trx_id] = ack_time; } private: @@ -85,6 +105,9 @@ namespace eosio::testing { }; struct http_connection : public provider_connection { + std::mutex _trx_info_map_lock; + std::map _acked_trx_trace_info_map; + std::atomic _acknowledged{0}; std::atomic _sent{0}; @@ -92,10 +115,30 @@ namespace eosio::testing { : provider_connection(provider_config) {} void send_transaction(const chain::packed_transaction& trx) final; + void record_trx_info(eosio::chain::transaction_id_type trx_id, unsigned int block_num, unsigned int cpu_usage_us, + unsigned int net_usage_words, const std::string& block_time) { + std::lock_guard lock(_trx_info_map_lock); + _acked_trx_trace_info_map.insert({trx_id, {true, block_num, cpu_usage_us, net_usage_words, block_time}}); + } + + ackedTrxTraceInfo get_acked_trx_trace_info(const eosio::chain::transaction_id_type& trx_id) { + ackedTrxTraceInfo info; + std::lock_guard lock(_trx_info_map_lock); + auto search = _acked_trx_trace_info_map.find(trx_id); + if (search != _acked_trx_trace_info_map.end()) { + info = search->second; + } else { + elog("get_acked_trx_trace_info - Acknowledged transaction trace info not found for transaction with id: " + "${id}", + ("id", trx_id)); + } + return info; + } private: void connect() override final; void disconnect() override final; + bool needs_response_trace_info(); }; struct p2p_connection : public provider_connection { @@ -107,6 +150,10 @@ namespace eosio::testing { void send_transaction(const chain::packed_transaction& trx) final; + ackedTrxTraceInfo get_acked_trx_trace_info(const eosio::chain::transaction_id_type& trx_id) { + return ackedTrxTraceInfo(); + } + private: void connect() override final; void disconnect() override final; From b803af646a1a3fa1fbbc716c2c0cd17db7f162f2 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Mon, 5 Jun 2023 11:28:04 -0500 Subject: [PATCH 10/22] Forgot to add test to group. --- tests/performance_tests/CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/performance_tests/CMakeLists.txt b/tests/performance_tests/CMakeLists.txt index 23f21a356a..f6dec111e9 100644 --- a/tests/performance_tests/CMakeLists.txt +++ b/tests/performance_tests/CMakeLists.txt @@ -33,5 +33,6 @@ set_property(TEST performance_test_basic_ex_transfer_trx_spec PROPERTY LABELS no set_property(TEST performance_test_basic_ex_new_acct_trx_spec PROPERTY LABELS nonparallelizable_tests) set_property(TEST performance_test_basic_ex_cpu_trx_spec PROPERTY LABELS nonparallelizable_tests) set_property(TEST performance_test_basic_ex_ram_trx_spec PROPERTY LABELS nonparallelizable_tests) +set_property(TEST performance_test_basic_ex_read_only_trxs PROPERTY LABELS nonparallelizable_tests) add_subdirectory( NodeosPluginArgs ) From a41b2ff2f034fa55e6b59871a38089c22a9c5b9a Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Mon, 5 Jun 2023 11:29:52 -0500 Subject: [PATCH 11/22] Add performance load test with read-only trxs Add apiEndpoint and userTrxData to test report for reference. Simplify createReport argument list. --- tests/performance_tests/CMakeLists.txt | 2 ++ .../performance_test_basic.py | 19 +++++++++---------- 2 files changed, 11 insertions(+), 10 deletions(-) diff --git a/tests/performance_tests/CMakeLists.txt b/tests/performance_tests/CMakeLists.txt index f6dec111e9..9a231da308 100644 --- a/tests/performance_tests/CMakeLists.txt +++ b/tests/performance_tests/CMakeLists.txt @@ -16,6 +16,7 @@ endif() add_test(NAME performance_test_bp COMMAND tests/performance_tests/performance_test.py testBpOpMode --max-tps-to-test 50 --test-iteration-min-step 10 --test-iteration-duration-sec 10 --final-iterations-duration-sec 10 --calc-chain-threads lmax overrideBasicTestConfig -v --tps-limit-per-generator 25 --chain-state-db-size-mb 200 ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) add_test(NAME performance_test_api COMMAND tests/performance_tests/performance_test.py testApiOpMode --max-tps-to-test 50 --test-iteration-min-step 10 --test-iteration-duration-sec 10 --final-iterations-duration-sec 10 --calc-chain-threads lmax overrideBasicTestConfig -v --tps-limit-per-generator 25 --chain-state-db-size-mb 200 ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME performance_test_ro COMMAND tests/performance_tests/performance_test.py testApiOpMode --max-tps-to-test 50 --test-iteration-min-step 10 --test-iteration-duration-sec 10 --final-iterations-duration-sec 10 overrideBasicTestConfig -v --tps-limit-per-generator 25 --api-nodes-read-only-threads 2 --account-name "payloadless" --abi-file payloadless.abi --wasm-file payloadless.wasm --contract-dir unittests/test-contracts/payloadless --user-trx-data-file tests/performance_tests/readOnlyTrxData.json --chain-state-db-size-mb 200 ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) add_test(NAME performance_test_ex_cpu_trx_spec COMMAND tests/performance_tests/performance_test.py testBpOpMode --max-tps-to-test 50 --test-iteration-min-step 10 --test-iteration-duration-sec 10 --final-iterations-duration-sec 10 overrideBasicTestConfig -v --tps-limit-per-generator 25 --chain-state-db-size-mb 200 --account-name "c" --abi-file eosmechanics.abi --wasm-file eosmechanics.wasm --contract-dir unittests/contracts/eosio.mechanics --user-trx-data-file tests/performance_tests/cpuTrxData.json ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) add_test(NAME performance_test_basic_p2p COMMAND tests/performance_tests/performance_test_basic.py -v --producer-nodes 1 --validation-nodes 1 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --chain-state-db-size-mb 200 ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) add_test(NAME performance_test_basic_http COMMAND tests/performance_tests/performance_test_basic.py -v --endpoint-api-type http --producer-nodes 1 --validation-nodes 1 --api-nodes 1 --target-tps 10 --tps-limit-per-generator 10 --test-duration-sec 5 --chain-state-db-size-mb 200 ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) @@ -26,6 +27,7 @@ add_test(NAME performance_test_basic_ex_ram_trx_spec COMMAND tests/performance_t add_test(NAME performance_test_basic_ex_read_only_trxs COMMAND tests/performance_tests/performance_test_basic.py -v --endpoint-api-type http --producer-nodes 1 --validation-nodes 1 --api-nodes 1 --api-nodes-read-only-threads 2 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --chain-state-db-size-mb 200 --account-name "payloadless" --abi-file payloadless.abi --wasm-file payloadless.wasm --contract-dir unittests/test-contracts/payloadless --user-trx-data-file tests/performance_tests/readOnlyTrxData.json ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST performance_test_bp PROPERTY LABELS long_running_tests) set_property(TEST performance_test_api PROPERTY LABELS long_running_tests) +set_property(TEST performance_test_ro PROPERTY LABELS long_running_tests) set_property(TEST performance_test_ex_cpu_trx_spec PROPERTY LABELS long_running_tests) set_property(TEST performance_test_basic_p2p PROPERTY LABELS nonparallelizable_tests) set_property(TEST performance_test_basic_http PROPERTY LABELS nonparallelizable_tests) diff --git a/tests/performance_tests/performance_test_basic.py b/tests/performance_tests/performance_test_basic.py index d26aa67c63..cae49d9fd0 100755 --- a/tests/performance_tests/performance_test_basic.py +++ b/tests/performance_tests/performance_test_basic.py @@ -408,7 +408,6 @@ def configureConnections(): self.data.numNodes = self.clusterConfig._totalNodes abiFile=None - apiEndpoint=None actionsDataJson=None actionsAuthsJson=None self.accountNames=[] @@ -420,8 +419,8 @@ def configureConnections(): self.setupWalletAndAccounts(accountCnt=len(self.userTrxDataDict['initAccounts']), accountNames=self.userTrxDataDict['initAccounts']) abiFile = self.userTrxDataDict['abiFile'] if 'apiEndpoint' in self.userTrxDataDict: - apiEndpoint = self.userTrxDataDict['apiEndpoint'] - print(f'API Endpoint specified: {apiEndpoint}') + self.apiEndpoint = self.userTrxDataDict['apiEndpoint'] + print(f'API Endpoint specified: {self.apiEndpoint}') actionsDataJson = json.dumps(self.userTrxDataDict['actions']) @@ -452,7 +451,7 @@ def configureConnections(): accts=','.join(map(str, self.accountNames)), privateKeys=','.join(map(str, self.accountPrivKeys)), trxGenDurationSec=self.ptbConfig.testTrxGenDurationSec, logDir=self.trxGenLogDirPath, abiFile=abiFile, actionsData=actionsDataJson, actionsAuths=actionsAuthsJson, - tpsTrxGensConfig=tpsTrxGensConfig, apiEndpoint=apiEndpoint) + tpsTrxGensConfig=tpsTrxGensConfig, apiEndpoint=self.apiEndpoint) trxGenExitCodes = self.cluster.trxGenLauncher.launch() print(f"Transaction Generator exit codes: {trxGenExitCodes}") @@ -490,10 +489,10 @@ def captureLowLevelArtifacts(self): except Exception as e: print(f"Failed to move '{self.cluster.nodeosLogPath}' to '{self.varLogsDirPath}': {type(e)}: {e}") - def createReport(self, logAnalysis: log_reader.LogAnalysis, tpsTestConfig: log_reader.TpsTestConfig, argsDict: dict, nodeosVers: str, - targetApiEndpointType: str, testResult: PerfTestBasicResult) -> dict: + def createReport(self, logAnalysis: log_reader.LogAnalysis, tpsTestConfig: log_reader.TpsTestConfig, argsDict: dict, testResult: PerfTestBasicResult) -> dict: report = {} - report['targetApiEndpointType'] = targetApiEndpointType + report['targetApiEndpointType'] = self.ptbConfig.endpointApiType + report['targetApiEndpoint'] = self.apiEndpoint if self.apiEndpoint is not None else '/v1/chain/send_transaction2' if self.ptbConfig.endpointApiType == "http" else "NA for P2P" report['Result'] = asdict(testResult) report['Analysis'] = {} report['Analysis']['BlockSize'] = asdict(logAnalysis.blockSizeStats) @@ -526,8 +525,9 @@ def createReport(self, logAnalysis: log_reader.LogAnalysis, tpsTestConfig: log_r report['Analysis']['DroppedBlocks'][formattedNodeNum] = self.data.droppedBlocks[formattedNodeNum] report['Analysis']['DroppedBlocksCount'][formattedNodeNum] = len(self.data.droppedBlocks[formattedNodeNum]) report['args'] = argsDict + report['args']['userTrxData'] = self.userTrxDataDict if self.ptbConfig.userTrxDataFile is not None else "NOT CONFIGURED" report['env'] = {'system': system(), 'os': os.name, 'release': release(), 'logical_cpu_count': os.cpu_count()} - report['nodeosVersion'] = nodeosVers + report['nodeosVersion'] = self.clusterConfig.nodeosVers return report def analyzeResultsAndReport(self, testResult: PtbTpsTestResult): @@ -557,8 +557,7 @@ def analyzeResultsAndReport(self, testResult: PtbTpsTestResult): print(f"testRunSuccessful: {self.testResult.testRunSuccessful} testPassed: {self.testResult.testPassed} tpsExpectationMet: {self.testResult.tpsExpectMet} trxExpectationMet: {self.testResult.trxExpectMet}") - self.report = self.createReport(logAnalysis=self.logAnalysis, tpsTestConfig=tpsTestConfig, argsDict=args, nodeosVers=self.clusterConfig.nodeosVers, - targetApiEndpointType=self.ptbConfig.endpointApiType, testResult=self.testResult) + self.report = self.createReport(logAnalysis=self.logAnalysis, tpsTestConfig=tpsTestConfig, argsDict=args, testResult=self.testResult) jsonReport = None if not self.ptbConfig.quiet or not self.ptbConfig.delReport: From f7f487d75b8e354bebe1d439b31bb1b7668bb5ce Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Mon, 5 Jun 2023 12:53:37 -0500 Subject: [PATCH 12/22] Refactor endpointApiType and apiEndpoint a little. --- tests/TestHarness/launch_transaction_generators.py | 12 ++++++------ tests/performance_tests/performance_test_basic.py | 14 +++++++++----- 2 files changed, 15 insertions(+), 11 deletions(-) diff --git a/tests/TestHarness/launch_transaction_generators.py b/tests/TestHarness/launch_transaction_generators.py index 6825ce071f..f5790f8906 100644 --- a/tests/TestHarness/launch_transaction_generators.py +++ b/tests/TestHarness/launch_transaction_generators.py @@ -16,7 +16,7 @@ class TpsTrxGensConfig: - def __init__(self, targetTps: int, tpsLimitPerGenerator: int, connectionPairList: list, endpointApiType: str): + def __init__(self, targetTps: int, tpsLimitPerGenerator: int, connectionPairList: list): self.targetTps: int = targetTps self.tpsLimitPerGenerator: int = tpsLimitPerGenerator self.connectionPairList = connectionPairList @@ -27,7 +27,6 @@ def __init__(self, targetTps: int, tpsLimitPerGenerator: int, connectionPairList self.modTps = self.targetTps % self.numGenerators self.cleanlyDivisible = self.modTps == 0 self.incrementPoint = self.numGenerators + 1 - self.modTps - self.endpointApiType = endpointApiType self.targetTpsPerGenList = [] curTps = self.initialTpsPerGenerator @@ -39,7 +38,7 @@ def __init__(self, targetTps: int, tpsLimitPerGenerator: int, connectionPairList class TransactionGeneratorsLauncher: def __init__(self, chainId: int, lastIrreversibleBlockId: int, contractOwnerAccount: str, accts: str, privateKeys: str, trxGenDurationSec: int, logDir: str, - abiFile: Path, actionsData, actionsAuths, tpsTrxGensConfig: TpsTrxGensConfig, apiEndpoint: str=None): + abiFile: Path, actionsData, actionsAuths, tpsTrxGensConfig: TpsTrxGensConfig, endpointApiType: str, apiEndpoint: str=None): self.chainId = chainId self.lastIrreversibleBlockId = lastIrreversibleBlockId self.contractOwnerAccount = contractOwnerAccount @@ -51,6 +50,7 @@ def __init__(self, chainId: int, lastIrreversibleBlockId: int, contractOwnerAcco self.abiFile = abiFile self.actionsData = actionsData self.actionsAuths = actionsAuths + self.endpointApiType = endpointApiType self.apiEndpoint = apiEndpoint def launch(self, waitToComplete=True): @@ -69,7 +69,7 @@ def launch(self, waitToComplete=True): '--trx-gen-duration', f'{self.trxGenDurationSec}', '--target-tps', f'{targetTps}', '--log-dir', f'{self.logDir}', - '--peer-endpoint-type', f'{self.tpsTrxGensConfig.endpointApiType}', + '--peer-endpoint-type', f'{self.endpointApiType}', '--peer-endpoint', f'{connectionPair[0]}', '--port', f'{connectionPair[1]}'] if self.abiFile is not None and self.actionsData is not None and self.actionsAuths is not None: @@ -130,8 +130,8 @@ def main(): privateKeys=args.priv_keys, trxGenDurationSec=args.trx_gen_duration, logDir=args.log_dir, abiFile=args.abi_file, actionsData=args.actions_data, actionsAuths=args.actions_auths, tpsTrxGensConfig=TpsTrxGensConfig(targetTps=args.target_tps, tpsLimitPerGenerator=args.tps_limit_per_generator, - connectionPairList=connectionPairList, endpointApiType=args.endpoint_api_type), - apiEndpoint=args.api_endpoint) + connectionPairList=connectionPairList), + endpointApiType=args.endpoint_api_type, apiEndpoint=args.api_endpoint) exit_codes = trxGenLauncher.launch() diff --git a/tests/performance_tests/performance_test_basic.py b/tests/performance_tests/performance_test_basic.py index cae49d9fd0..c19d6a80d1 100755 --- a/tests/performance_tests/performance_test_basic.py +++ b/tests/performance_tests/performance_test_basic.py @@ -169,9 +169,13 @@ class PtbConfig: printMissingTransactions: bool=False userTrxDataFile: Path=None endpointApiType: str="p2p" + apiEndpoint: str=None + def __post_init__(self): self.expectedTransactionsSent = self.testTrxGenDurationSec * self.targetTps + if (self.endpointApiType == "http"): + self.apiEndpoint="/v1/chain/send_transaction2" @dataclass class LoggingConfig: @@ -419,8 +423,8 @@ def configureConnections(): self.setupWalletAndAccounts(accountCnt=len(self.userTrxDataDict['initAccounts']), accountNames=self.userTrxDataDict['initAccounts']) abiFile = self.userTrxDataDict['abiFile'] if 'apiEndpoint' in self.userTrxDataDict: - self.apiEndpoint = self.userTrxDataDict['apiEndpoint'] - print(f'API Endpoint specified: {self.apiEndpoint}') + self.ptbConfig.apiEndpoint = self.userTrxDataDict['apiEndpoint'] + print(f'API Endpoint specified: {self.ptbConfig.apiEndpoint}') actionsDataJson = json.dumps(self.userTrxDataDict['actions']) @@ -445,13 +449,13 @@ def configureConnections(): self.cluster.biosNode.kill(signal.SIGTERM) self.data.startBlock = self.waitForEmptyBlocks(self.validationNode, self.emptyBlockGoal) - tpsTrxGensConfig = TpsTrxGensConfig(targetTps=self.ptbConfig.targetTps, tpsLimitPerGenerator=self.ptbConfig.tpsLimitPerGenerator, connectionPairList=self.connectionPairList, endpointApiType=self.ptbConfig.endpointApiType) + tpsTrxGensConfig = TpsTrxGensConfig(targetTps=self.ptbConfig.targetTps, tpsLimitPerGenerator=self.ptbConfig.tpsLimitPerGenerator, connectionPairList=self.connectionPairList) self.cluster.trxGenLauncher = TransactionGeneratorsLauncher(chainId=chainId, lastIrreversibleBlockId=lib_id, contractOwnerAccount=self.clusterConfig.specifiedContract.account.name, accts=','.join(map(str, self.accountNames)), privateKeys=','.join(map(str, self.accountPrivKeys)), trxGenDurationSec=self.ptbConfig.testTrxGenDurationSec, logDir=self.trxGenLogDirPath, abiFile=abiFile, actionsData=actionsDataJson, actionsAuths=actionsAuthsJson, - tpsTrxGensConfig=tpsTrxGensConfig, apiEndpoint=self.apiEndpoint) + tpsTrxGensConfig=tpsTrxGensConfig, endpointApiType=self.ptbConfig.endpointApiType, apiEndpoint=self.ptbConfig.apiEndpoint) trxGenExitCodes = self.cluster.trxGenLauncher.launch() print(f"Transaction Generator exit codes: {trxGenExitCodes}") @@ -492,7 +496,7 @@ def captureLowLevelArtifacts(self): def createReport(self, logAnalysis: log_reader.LogAnalysis, tpsTestConfig: log_reader.TpsTestConfig, argsDict: dict, testResult: PerfTestBasicResult) -> dict: report = {} report['targetApiEndpointType'] = self.ptbConfig.endpointApiType - report['targetApiEndpoint'] = self.apiEndpoint if self.apiEndpoint is not None else '/v1/chain/send_transaction2' if self.ptbConfig.endpointApiType == "http" else "NA for P2P" + report['targetApiEndpoint'] = self.ptbConfig.apiEndpoint if self.ptbConfig.apiEndpoint is not None else "NA for P2P" report['Result'] = asdict(testResult) report['Analysis'] = {} report['Analysis']['BlockSize'] = asdict(logAnalysis.blockSizeStats) From ed5333b71660a6bcbe23019346cacce7f2ba753a Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Mon, 5 Jun 2023 13:14:23 -0500 Subject: [PATCH 13/22] Fix endpointApiType which moved from TpsTrxGensConfig onto the TransactionGeneratorsLauncher itself. --- tests/TestHarness/Cluster.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/TestHarness/Cluster.py b/tests/TestHarness/Cluster.py index bde3fb7d73..cb34371531 100644 --- a/tests/TestHarness/Cluster.py +++ b/tests/TestHarness/Cluster.py @@ -1562,11 +1562,12 @@ def launchTrxGenerators(self, contractOwnerAcctName: str, acctNamesList: list, a self.preExistingFirstTrxFiles = glob.glob(f"{Utils.DataDir}/first_trx_*.txt") connectionPairList = [f"{self.host}:{self.getNodeP2pPort(nodeId)}"] - tpsTrxGensConfig = TpsTrxGensConfig(targetTps=targetTps, tpsLimitPerGenerator=tpsLimitPerGenerator, connectionPairList=connectionPairList, endpointApiType="p2p") + tpsTrxGensConfig = TpsTrxGensConfig(targetTps=targetTps, tpsLimitPerGenerator=tpsLimitPerGenerator, connectionPairList=connectionPairList) self.trxGenLauncher = TransactionGeneratorsLauncher(chainId=chainId, lastIrreversibleBlockId=lib_id, contractOwnerAccount=contractOwnerAcctName, accts=','.join(map(str, acctNamesList)), privateKeys=','.join(map(str, acctPrivKeysList)), trxGenDurationSec=durationSec, logDir=Utils.DataDir, - abiFile=abiFile, actionsData=actionsData, actionsAuths=actionsAuths, tpsTrxGensConfig=tpsTrxGensConfig) + abiFile=abiFile, actionsData=actionsData, actionsAuths=actionsAuths, tpsTrxGensConfig=tpsTrxGensConfig, + endpointApiType="p2p") Utils.Print("Launch txn generators and start generating/sending transactions") self.trxGenLauncher.launch(waitToComplete=waitToComplete) From edd768beed8598e5beb1b872ab7033f5ca32788e Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Mon, 5 Jun 2023 14:52:01 -0500 Subject: [PATCH 14/22] Make configuration of validation and api nodes condititional. --- tests/performance_tests/performance_test_basic.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tests/performance_tests/performance_test_basic.py b/tests/performance_tests/performance_test_basic.py index c19d6a80d1..900cc1af72 100755 --- a/tests/performance_tests/performance_test_basic.py +++ b/tests/performance_tests/performance_test_basic.py @@ -143,8 +143,10 @@ def configureApiNodes(): if apiNodeSpecificNodeosStr: self.specificExtraNodeosArgs.update({f"{nodeId}" : apiNodeSpecificNodeosStr for nodeId in self._apiNodeIds}) - configureValidationNodes() - configureApiNodes() + if self.validationNodeCount > 0: + configureValidationNodes() + if self.apiNodeCount > 0: + configureApiNodes() assert self.nodeosVers != "v1" and self.nodeosVers != "v0", f"nodeos version {Utils.getNodeosVersion().split('.')[0]} is unsupported by performance test" if self.nodeosVers == "v2": From 265ae7f2363129b1556521ae51a9392e90c6dd6d Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Mon, 5 Jun 2023 14:52:19 -0500 Subject: [PATCH 15/22] Update documentation. --- tests/performance_tests/README.md | 384 +++++++++++++++--------------- 1 file changed, 197 insertions(+), 187 deletions(-) diff --git a/tests/performance_tests/README.md b/tests/performance_tests/README.md index d75e6ee28b..3687e0649e 100644 --- a/tests/performance_tests/README.md +++ b/tests/performance_tests/README.md @@ -461,6 +461,7 @@ usage: performance_test.py testBpOpMode overrideBasicTestConfig [-h] [-d D] [--dump-error-details] [-v] [--leave-running] [--unshared] [--endpoint-api-type {p2p,http}] [--producer-nodes PRODUCER_NODES] [--validation-nodes VALIDATION_NODES] [--api-nodes API_NODES] + [--api-nodes-read-only-threads API_NODES_READ_ONLY_THREADS] [--tps-limit-per-generator TPS_LIMIT_PER_GENERATOR] [--genesis GENESIS] [--num-blocks-to-prune NUM_BLOCKS_TO_PRUNE] [--signature-cpu-billable-pct SIGNATURE_CPU_BILLABLE_PCT] @@ -516,6 +517,8 @@ Performance Test Basic Base: Validation nodes count --api-nodes API_NODES API nodes count + --api-nodes-read-only-threads API_NODES_READ_ONLY_THREADS + API nodes read only threads count for use with read-only transactions --tps-limit-per-generator TPS_LIMIT_PER_GENERATOR Maximum amount of transactions per second a single generator can have. --genesis GENESIS Path to genesis.json @@ -608,6 +611,7 @@ The following scripts are typically used by the Performance Harness main script [--producer-nodes PRODUCER_NODES] [--validation-nodes VALIDATION_NODES] [--api-nodes API_NODES] + [--api-nodes-read-only-threads API_NODES_READ_ONLY_THREADS] [--tps-limit-per-generator TPS_LIMIT_PER_GENERATOR] [--genesis GENESIS] [--num-blocks-to-prune NUM_BLOCKS_TO_PRUNE] @@ -671,6 +675,8 @@ Performance Test Basic Base: Validation nodes count (default: 1) --api-nodes API_NODES API nodes count (default: 0) + --api-nodes-read-only-threads API_NODES_READ_ONLY_THREADS + API nodes read only threads count for use with read-only transactions (default: 0) --tps-limit-per-generator TPS_LIMIT_PER_GENERATOR Maximum amount of transactions per second a single generator can have. (default: 4000) --genesis GENESIS Path to genesis.json (default: tests/performance_tests/genesis.json) @@ -804,6 +810,12 @@ Transaction Generator command line options.: actions auths description string to use, containting authAcctName to activePrivateKey pairs. + --api-endpoint arg The api endpoint to direct transactions to. + Defaults to: '/v1/chain/send_transaction2' + --peer-endpoint-type arg (=p2p) Identify the peer endpoint api type to + determine how to send transactions. + Allowable 'p2p' and 'http'. Default: + 'p2p' --peer-endpoint arg (=127.0.0.1) set the peer endpoint to send transactions to --port arg (=9876) set the peer endpoint port to send @@ -822,7 +834,7 @@ The Performance Harness generates a report to summarize results of test scenario Command used to run test and generate report: ``` bash -.build/tests/performance_tests/performance_test.py testBpOpMode --test-iteration-duration-sec 10 --final-iterations-duration-sec 30 --calc-producer-threads lmax --calc-chain-threads lmax --calc-net-threads lmax +./build/tests/performance_tests/performance_test.py testBpOpMode --test-iteration-duration-sec 10 --final-iterations-duration-sec 30 --calc-producer-threads lmax --calc-chain-threads lmax --calc-net-threads lmax ``` ### Report Breakdown @@ -843,11 +855,11 @@ Next, a high level summary of the search scenario target and results is included "19001": "FAIL", "16001": "FAIL", "14501": "FAIL", - "13501": "PASS", - "14001": "PASS" + "13501": "FAIL", + "13001": "PASS" }, "LongRunningSearchScenariosSummary": { - "14001": "PASS" + "13001": "PASS" }, ``` @@ -863,20 +875,20 @@ Next, a summary of the search scenario conducted and respective results is inclu "searchFloor": 1, "searchCeiling": 24501, "basicTestResult": { - "testStart": "2023-05-17T22:57:41.801991", - "testEnd": "2023-05-17T22:58:57.941356", - "testDuration": "0:01:16.139365", + "testStart": "2023-06-05T19:13:42.528121", + "testEnd": "2023-06-05T19:15:00.441933", + "testDuration": "0:01:17.913812", "testPassed": true, "testRunSuccessful": true, "testRunCompleted": true, "tpsExpectMet": true, "trxExpectMet": true, "targetTPS": 12501, - "resultAvgTps": 12530.375, + "resultAvgTps": 12523.6875, "expectedTxns": 125010, "resultTxns": 125010, "testAnalysisBlockCnt": 17, - "logsDir": "performance_test/2023-05-17_21-28-39/testRunLogs/performance_test/2023-05-17_22-57-41-12501" + "logsDir": "performance_test/2023-06-05_17-59-49/testRunLogs/performance_test/2023-06-05_19-13-42-12501" } } ``` @@ -912,15 +924,15 @@ Finally, the full detail test report for each of the determined max TPS throughp ``` json { - "perfTestsBegin": "2023-05-17T21:28:39.926423", - "perfTestsFinish": "2023-05-17T23:07:02.076216", - "perfTestsDuration": "1:38:22.149793", + "perfTestsBegin": "2023-06-05T17:59:49.175441", + "perfTestsFinish": "2023-06-05T19:23:03.723738", + "perfTestsDuration": "1:23:14.548297", "operationalMode": "Block Producer Operational Mode", - "InitialMaxTpsAchieved": 14001, - "LongRunningMaxTpsAchieved": 14001, - "tpsTestStart": "2023-05-17T22:54:38.770858", - "tpsTestFinish": "2023-05-17T23:07:02.076202", - "tpsTestDuration": "0:12:23.305344", + "InitialMaxTpsAchieved": 13001, + "LongRunningMaxTpsAchieved": 13001, + "tpsTestStart": "2023-06-05T19:10:32.123231", + "tpsTestFinish": "2023-06-05T19:23:03.723722", + "tpsTestDuration": "0:12:31.600491", "InitialSearchScenariosSummary": { "50000": "FAIL", "25001": "FAIL", @@ -928,11 +940,11 @@ Finally, the full detail test report for each of the determined max TPS throughp "19001": "FAIL", "16001": "FAIL", "14501": "FAIL", - "13501": "PASS", - "14001": "PASS" + "13501": "FAIL", + "13001": "PASS" }, "LongRunningSearchScenariosSummary": { - "14001": "PASS" + "13001": "PASS" }, "InitialSearchResults": { "0": { @@ -941,20 +953,20 @@ Finally, the full detail test report for each of the determined max TPS throughp "searchFloor": 1, "searchCeiling": 50000, "basicTestResult": { - "testStart": "2023-05-17T22:54:38.770895", - "testEnd": "2023-05-17T22:56:13.025658", - "testDuration": "0:01:34.254763", + "testStart": "2023-06-05T19:10:32.123282", + "testEnd": "2023-06-05T19:12:12.746349", + "testDuration": "0:01:40.623067", "testPassed": false, "testRunSuccessful": false, "testRunCompleted": true, "tpsExpectMet": false, "trxExpectMet": false, "targetTPS": 50000, - "resultAvgTps": 12108.542857142857, + "resultAvgTps": 14015.564102564103, "expectedTxns": 500000, - "resultTxns": 242869, - "testAnalysisBlockCnt": 36, - "logsDir": "performance_test/2023-05-17_21-28-39/testRunLogs/performance_test/2023-05-17_22-54-38-50000" + "resultTxns": 309515, + "testAnalysisBlockCnt": 40, + "logsDir": "performance_test/2023-06-05_17-59-49/testRunLogs/performance_test/2023-06-05_19-10-32-50000" } }, "1": { @@ -963,20 +975,20 @@ Finally, the full detail test report for each of the determined max TPS throughp "searchFloor": 1, "searchCeiling": 49500, "basicTestResult": { - "testStart": "2023-05-17T22:56:13.130264", - "testEnd": "2023-05-17T22:57:41.712682", - "testDuration": "0:01:28.582418", + "testStart": "2023-06-05T19:12:12.749120", + "testEnd": "2023-06-05T19:13:42.524984", + "testDuration": "0:01:29.775864", "testPassed": false, "testRunSuccessful": false, "testRunCompleted": true, "tpsExpectMet": false, "trxExpectMet": false, "targetTPS": 25001, - "resultAvgTps": 14207.161290322581, + "resultAvgTps": 13971.5, "expectedTxns": 250010, - "resultTxns": 249688, - "testAnalysisBlockCnt": 32, - "logsDir": "performance_test/2023-05-17_21-28-39/testRunLogs/performance_test/2023-05-17_22-56-13-25001" + "resultTxns": 249981, + "testAnalysisBlockCnt": 33, + "logsDir": "performance_test/2023-06-05_17-59-49/testRunLogs/performance_test/2023-06-05_19-12-12-25001" } }, "2": { @@ -985,20 +997,20 @@ Finally, the full detail test report for each of the determined max TPS throughp "searchFloor": 1, "searchCeiling": 24501, "basicTestResult": { - "testStart": "2023-05-17T22:57:41.801991", - "testEnd": "2023-05-17T22:58:57.941356", - "testDuration": "0:01:16.139365", + "testStart": "2023-06-05T19:13:42.528121", + "testEnd": "2023-06-05T19:15:00.441933", + "testDuration": "0:01:17.913812", "testPassed": true, "testRunSuccessful": true, "testRunCompleted": true, "tpsExpectMet": true, "trxExpectMet": true, "targetTPS": 12501, - "resultAvgTps": 12530.375, + "resultAvgTps": 12523.6875, "expectedTxns": 125010, "resultTxns": 125010, "testAnalysisBlockCnt": 17, - "logsDir": "performance_test/2023-05-17_21-28-39/testRunLogs/performance_test/2023-05-17_22-57-41-12501" + "logsDir": "performance_test/2023-06-05_17-59-49/testRunLogs/performance_test/2023-06-05_19-13-42-12501" } }, "3": { @@ -1007,20 +1019,20 @@ Finally, the full detail test report for each of the determined max TPS throughp "searchFloor": 13001, "searchCeiling": 24501, "basicTestResult": { - "testStart": "2023-05-17T22:58:58.035578", - "testEnd": "2023-05-17T23:00:21.801656", - "testDuration": "0:01:23.766078", + "testStart": "2023-06-05T19:15:00.444109", + "testEnd": "2023-06-05T19:16:25.749654", + "testDuration": "0:01:25.305545", "testPassed": false, "testRunSuccessful": false, "testRunCompleted": true, "tpsExpectMet": false, "trxExpectMet": false, "targetTPS": 19001, - "resultAvgTps": 14720.045454545454, + "resultAvgTps": 14858.095238095239, "expectedTxns": 190010, - "resultTxns": 190008, - "testAnalysisBlockCnt": 23, - "logsDir": "performance_test/2023-05-17_21-28-39/testRunLogs/performance_test/2023-05-17_22-58-58-19001" + "resultTxns": 189891, + "testAnalysisBlockCnt": 22, + "logsDir": "performance_test/2023-06-05_17-59-49/testRunLogs/performance_test/2023-06-05_19-15-00-19001" } }, "4": { @@ -1029,20 +1041,20 @@ Finally, the full detail test report for each of the determined max TPS throughp "searchFloor": 13001, "searchCeiling": 18501, "basicTestResult": { - "testStart": "2023-05-17T23:00:21.902609", - "testEnd": "2023-05-17T23:01:42.674652", - "testDuration": "0:01:20.772043", + "testStart": "2023-06-05T19:16:25.751860", + "testEnd": "2023-06-05T19:17:48.336896", + "testDuration": "0:01:22.585036", "testPassed": false, - "testRunSuccessful": true, + "testRunSuccessful": false, "testRunCompleted": true, "tpsExpectMet": false, - "trxExpectMet": true, + "trxExpectMet": false, "targetTPS": 16001, - "resultAvgTps": 13972.578947368422, + "resultAvgTps": 14846.0, "expectedTxns": 160010, - "resultTxns": 160010, - "testAnalysisBlockCnt": 20, - "logsDir": "performance_test/2023-05-17_21-28-39/testRunLogs/performance_test/2023-05-17_23-00-21-16001" + "resultTxns": 159988, + "testAnalysisBlockCnt": 19, + "logsDir": "performance_test/2023-06-05_17-59-49/testRunLogs/performance_test/2023-06-05_19-16-25-16001" } }, "5": { @@ -1051,64 +1063,64 @@ Finally, the full detail test report for each of the determined max TPS throughp "searchFloor": 13001, "searchCeiling": 15501, "basicTestResult": { - "testStart": "2023-05-17T23:01:42.780751", - "testEnd": "2023-05-17T23:03:02.321649", - "testDuration": "0:01:19.540898", + "testStart": "2023-06-05T19:17:48.339990", + "testEnd": "2023-06-05T19:19:07.843311", + "testDuration": "0:01:19.503321", "testPassed": false, "testRunSuccessful": false, "testRunCompleted": true, "tpsExpectMet": false, "trxExpectMet": false, "targetTPS": 14501, - "resultAvgTps": 13710.176470588236, + "resultAvgTps": 13829.588235294117, "expectedTxns": 145010, - "resultTxns": 144729, + "resultTxns": 144964, "testAnalysisBlockCnt": 18, - "logsDir": "performance_test/2023-05-17_21-28-39/testRunLogs/performance_test/2023-05-17_23-01-42-14501" + "logsDir": "performance_test/2023-06-05_17-59-49/testRunLogs/performance_test/2023-06-05_19-17-48-14501" } }, "6": { - "success": true, + "success": false, "searchTarget": 13501, "searchFloor": 13001, "searchCeiling": 14001, "basicTestResult": { - "testStart": "2023-05-17T23:03:02.417778", - "testEnd": "2023-05-17T23:04:20.138769", - "testDuration": "0:01:17.720991", - "testPassed": true, - "testRunSuccessful": true, + "testStart": "2023-06-05T19:19:07.845657", + "testEnd": "2023-06-05T19:20:27.815030", + "testDuration": "0:01:19.969373", + "testPassed": false, + "testRunSuccessful": false, "testRunCompleted": true, "tpsExpectMet": true, - "trxExpectMet": true, + "trxExpectMet": false, "targetTPS": 13501, - "resultAvgTps": 13508.4375, + "resultAvgTps": 13470.375, "expectedTxns": 135010, - "resultTxns": 135010, + "resultTxns": 135000, "testAnalysisBlockCnt": 17, - "logsDir": "performance_test/2023-05-17_21-28-39/testRunLogs/performance_test/2023-05-17_23-03-02-13501" + "logsDir": "performance_test/2023-06-05_17-59-49/testRunLogs/performance_test/2023-06-05_19-19-07-13501" } }, "7": { "success": true, - "searchTarget": 14001, - "searchFloor": 14001, - "searchCeiling": 14001, + "searchTarget": 13001, + "searchFloor": 13001, + "searchCeiling": 13001, "basicTestResult": { - "testStart": "2023-05-17T23:04:20.234990", - "testEnd": "2023-05-17T23:05:38.702787", - "testDuration": "0:01:18.467797", + "testStart": "2023-06-05T19:20:27.817483", + "testEnd": "2023-06-05T19:21:44.846130", + "testDuration": "0:01:17.028647", "testPassed": true, "testRunSuccessful": true, "testRunCompleted": true, "tpsExpectMet": true, "trxExpectMet": true, - "targetTPS": 14001, - "resultAvgTps": 13935.3125, - "expectedTxns": 140010, - "resultTxns": 140010, + "targetTPS": 13001, + "resultAvgTps": 13032.5625, + "expectedTxns": 130010, + "resultTxns": 130010, "testAnalysisBlockCnt": 17, - "logsDir": "performance_test/2023-05-17_21-28-39/testRunLogs/performance_test/2023-05-17_23-04-20-14001" + "logsDir": "performance_test/2023-06-05_17-59-49/testRunLogs/performance_test/2023-06-05_19-20-27-13001" } } }, @@ -1131,24 +1143,24 @@ Finally, the full detail test report for each of the determined max TPS throughp "LongRunningSearchResults": { "0": { "success": true, - "searchTarget": 14001, + "searchTarget": 13001, "searchFloor": 1, - "searchCeiling": 14001, + "searchCeiling": 13001, "basicTestResult": { - "testStart": "2023-05-17T23:05:38.835496", - "testEnd": "2023-05-17T23:07:01.937623", - "testDuration": "0:01:23.102127", + "testStart": "2023-06-05T19:21:44.879637", + "testEnd": "2023-06-05T19:23:03.697671", + "testDuration": "0:01:18.818034", "testPassed": true, "testRunSuccessful": true, "testRunCompleted": true, "tpsExpectMet": true, "trxExpectMet": true, - "targetTPS": 14001, - "resultAvgTps": 13977.4375, - "expectedTxns": 140010, - "resultTxns": 140010, + "targetTPS": 13001, + "resultAvgTps": 13027.0, + "expectedTxns": 130010, + "resultTxns": 130010, "testAnalysisBlockCnt": 17, - "logsDir": "performance_test/2023-05-17_21-28-39/testRunLogs/performance_test/2023-05-17_23-05-38-14001" + "logsDir": "performance_test/2023-06-05_17-59-49/testRunLogs/performance_test/2023-06-05_19-21-44-13001" } } }, @@ -1169,49 +1181,44 @@ Finally, the full detail test report for each of the determined max TPS throughp }, "ProducerThreadAnalysis": { - "recommendedThreadCount": 3, + "recommendedThreadCount": 2, "threadToMaxTpsDict": { - "2": 13001, - "3": 13501, - "4": 11501 + "2": 12001, + "3": 12001 }, - "analysisStart": "2023-05-17T21:28:39.947138", - "analysisFinish": "2023-05-17T22:01:33.079889" + "analysisStart": "2023-06-05T17:59:49.197967", + "analysisFinish": "2023-06-05T18:18:33.449126" }, "ChainThreadAnalysis": { - "recommendedThreadCount": 2, + "recommendedThreadCount": 3, "threadToMaxTpsDict": { - "2": 13501, - "3": 13001 + "2": 4001, + "3": 13001, + "4": 5501 }, - "analysisStart": "2023-05-17T22:01:33.080513", - "analysisFinish": "2023-05-17T22:23:35.604304" + "analysisStart": "2023-06-05T18:18:33.449689", + "analysisFinish": "2023-06-05T18:48:02.262053" }, "NetThreadAnalysis": { - "recommendedThreadCount": 5, + "recommendedThreadCount": 4, "threadToMaxTpsDict": { - "4": 12001, - "5": 14001, - "6": 10001 + "4": 14501, + "5": 13501 }, - "analysisStart": "2023-05-17T22:23:35.605115", - "analysisFinish": "2023-05-17T22:54:38.770570" + "analysisStart": "2023-06-05T18:48:02.262594", + "analysisFinish": "2023-06-05T19:10:32.123003" }, "args": { "rawCmdLine ": "./tests/performance_tests/performance_test.py testBpOpMode --test-iteration-duration-sec 10 --final-iterations-duration-sec 30 --calc-producer-threads lmax --calc-chain-threads lmax --calc-net-threads lmax", - "killAll": false, - "dontKill": false, - "keepLogs": true, "dumpErrorDetails": false, "delay": 1, "nodesFile": null, "verbose": false, "unshared": false, - "_killEosInstances": true, - "_killWallet": true, "producerNodeCount": 1, "validationNodeCount": 1, "apiNodeCount": 0, + "dontKill": false, "extraNodeosArgs": { "chainPluginArgs": { "_pluginNamespace": "eosio", @@ -1495,8 +1502,11 @@ Finally, the full detail test report for each of the determined max TPS throughp "_netThreadsNodeosDefault": 4, "_netThreadsNodeosArg": "--net-threads", "syncFetchSpan": null, - "_syncFetchSpanNodeosDefault": 100, + "_syncFetchSpanNodeosDefault": 1000, "_syncFetchSpanNodeosArg": "--sync-fetch-span", + "syncPeerLimit": null, + "_syncPeerLimitNodeosDefault": 3, + "_syncPeerLimitNodeosArg": "--sync-peer-limit", "useSocketReadWatermark": null, "_useSocketReadWatermarkNodeosDefault": 0, "_useSocketReadWatermarkNodeosArg": "--use-socket-read-watermark", @@ -1564,16 +1574,13 @@ Finally, the full detail test report for each of the determined max TPS throughp "incomingTransactionQueueSizeMb": null, "_incomingTransactionQueueSizeMbNodeosDefault": 1024, "_incomingTransactionQueueSizeMbNodeosArg": "--incoming-transaction-queue-size-mb", - "disableSubjectiveBilling": true, - "_disableSubjectiveBillingNodeosDefault": 1, - "_disableSubjectiveBillingNodeosArg": "--disable-subjective-billing", "disableSubjectiveAccountBilling": null, "_disableSubjectiveAccountBillingNodeosDefault": false, "_disableSubjectiveAccountBillingNodeosArg": "--disable-subjective-account-billing", - "disableSubjectiveP2pBilling": null, + "disableSubjectiveP2pBilling": true, "_disableSubjectiveP2pBillingNodeosDefault": 1, "_disableSubjectiveP2pBillingNodeosArg": "--disable-subjective-p2p-billing", - "disableSubjectiveApiBilling": null, + "disableSubjectiveApiBilling": true, "_disableSubjectiveApiBillingNodeosDefault": 1, "_disableSubjectiveApiBillingNodeosArg": "--disable-subjective-api-billing", "producerThreads": 2, @@ -1690,6 +1697,7 @@ Finally, the full detail test report for each of the determined max TPS throughp "genesisPath": "tests/performance_tests/genesis.json", "maximumP2pPerHost": 5000, "maximumClients": 0, + "keepLogs": true, "loggingLevel": "info", "loggingDict": { "bios": "off" @@ -1698,7 +1706,7 @@ Finally, the full detail test report for each of the determined max TPS throughp "nodeosVers": "v4", "specificExtraNodeosArgs": { "1": "--plugin eosio::trace_api_plugin ", - "2": "--plugin eosio::chain_api_plugin " + "2": "--plugin eosio::chain_api_plugin --plugin eosio::net_api_plugin --read-only-threads 0 " }, "_totalNodes": 2, "_pNodes": 1, @@ -1712,6 +1720,7 @@ Finally, the full detail test report for each of the determined max TPS throughp 2 ], "nonProdsEosVmOcEnable": false, + "apiNodesReadOnlyThreadCount": 0, "testDurationSec": 10, "finalDurationSec": 30, "delPerfLogs": false, @@ -1732,10 +1741,10 @@ Finally, the full detail test report for each of the determined max TPS throughp "endpointApiType": "p2p", "opModeCmd": "testBpOpMode", "logDirBase": "performance_test", - "logDirTimestamp": "2023-05-17_21-28-39", - "logDirPath": "performance_test/2023-05-17_21-28-39", - "ptbLogsDirPath": "performance_test/2023-05-17_21-28-39/testRunLogs", - "pluginThreadOptLogsDirPath": "performance_test/2023-05-17_21-28-39/pluginThreadOptRunLogs" + "logDirTimestamp": "2023-06-05_17-59-49", + "logDirPath": "performance_test/2023-06-05_17-59-49", + "ptbLogsDirPath": "performance_test/2023-06-05_17-59-49/testRunLogs", + "pluginThreadOptLogsDirPath": "performance_test/2023-06-05_17-59-49/pluginThreadOptRunLogs" }, "env": { "system": "Linux", @@ -1759,96 +1768,97 @@ The Performance Test Basic generates, by default, a report that details results ``` json { "targetApiEndpointType": "p2p", + "targetApiEndpoint": "NA for P2P", "Result": { - "testStart": "2023-05-17T23:05:38.835496", - "testEnd": "2023-05-17T23:07:01.937623", - "testDuration": "0:01:23.102127", + "testStart": "2023-06-05T19:21:44.879637", + "testEnd": "2023-06-05T19:23:03.697671", + "testDuration": "0:01:18.818034", "testPassed": true, "testRunSuccessful": true, "testRunCompleted": true, "tpsExpectMet": true, "trxExpectMet": true, - "targetTPS": 14001, - "resultAvgTps": 13977.4375, - "expectedTxns": 140010, - "resultTxns": 140010, + "targetTPS": 13001, + "resultAvgTps": 13027.0, + "expectedTxns": 130010, + "resultTxns": 130010, "testAnalysisBlockCnt": 17, - "logsDir": "performance_test/2023-05-17_21-28-39/testRunLogs/performance_test/2023-05-17_23-05-38-14001" + "logsDir": "performance_test/2023-06-05_17-59-49/testRunLogs/performance_test/2023-06-05_19-21-44-13001" }, "Analysis": { "BlockSize": { - "min": 153909, - "max": 192200, - "avg": 173840.70588235295, - "sigma": 9231.908863633565, + "min": 153503, + "max": 169275, + "avg": 162269.76470588235, + "sigma": 3152.279353278714, "emptyBlocks": 0, "numBlocks": 17 }, "BlocksGuide": { - "firstBlockNum": 112, - "lastBlockNum": 152, - "totalBlocks": 41, - "testStartBlockNum": 112, - "testEndBlockNum": 152, + "firstBlockNum": 110, + "lastBlockNum": 140, + "totalBlocks": 31, + "testStartBlockNum": 110, + "testEndBlockNum": 140, "setupBlocksCnt": 0, "tearDownBlocksCnt": 0, "leadingEmptyBlocksCnt": 1, - "trailingEmptyBlocksCnt": 19, + "trailingEmptyBlocksCnt": 9, "configAddlDropCnt": 2, "testAnalysisBlockCnt": 17 }, "TPS": { - "min": 13234, - "max": 14812, - "avg": 13977.4375, - "sigma": 398.5379481225721, + "min": 12775, + "max": 13285, + "avg": 13027.0, + "sigma": 92.70854868888844, "emptyBlocks": 0, "numBlocks": 17, - "configTps": 14001, + "configTps": 13001, "configTestDuration": 10, "tpsPerGenerator": [ - 3500, - 3500, - 3500, - 3501 + 3250, + 3250, + 3250, + 3251 ], "generatorCount": 4 }, "TrxCPU": { - "min": 7.0, - "max": 3649.0, - "avg": 26.156724519677166, - "sigma": 21.41749466859243, - "samples": 140010 + "min": 8.0, + "max": 1180.0, + "avg": 25.89257749403892, + "sigma": 12.604252354938811, + "samples": 130010 }, "TrxLatency": { "min": 0.0009999275207519531, - "max": 0.5820000171661377, - "avg": 0.2768004499855284, - "sigma": 0.1456695649820771, - "samples": 140010, + "max": 0.5399999618530273, + "avg": 0.2522121298066488, + "sigma": 0.14457374598663084, + "samples": 130010, "units": "seconds" }, "TrxNet": { "min": 24.0, "max": 25.0, - "avg": 24.85718162988358, - "sigma": 0.3498875294629824, - "samples": 140010 + "avg": 24.846196446427196, + "sigma": 0.3607603366241642, + "samples": 130010 }, "TrxAckResponseTime": { "min": -1.0, "max": -1.0, "avg": -1.0, "sigma": 0.0, - "samples": 140010, + "samples": 130010, "measurementApplicable": "NOT APPLICABLE", "units": "microseconds" }, - "ExpectedTransactions": 140010, + "ExpectedTransactions": 130010, "DroppedTransactions": 0, - "ProductionWindowsTotal": 0, - "ProductionWindowsAverageSize": 0, + "ProductionWindowsTotal": 2, + "ProductionWindowsAverageSize": 12.0, "ProductionWindowsMissed": 0, "ForkedBlocks": { "00": [], @@ -1869,19 +1879,15 @@ The Performance Test Basic generates, by default, a report that details results }, "args": { "rawCmdLine ": "./tests/performance_tests/performance_test.py testBpOpMode --test-iteration-duration-sec 10 --final-iterations-duration-sec 30 --calc-producer-threads lmax --calc-chain-threads lmax --calc-net-threads lmax", - "killAll": false, - "dontKill": false, - "keepLogs": true, "dumpErrorDetails": false, "delay": 1, "nodesFile": null, "verbose": false, "unshared": false, - "_killEosInstances": true, - "_killWallet": true, "producerNodeCount": 1, "validationNodeCount": 1, "apiNodeCount": 0, + "dontKill": false, "extraNodeosArgs": { "chainPluginArgs": { "_pluginNamespace": "eosio", @@ -2165,8 +2171,11 @@ The Performance Test Basic generates, by default, a report that details results "_netThreadsNodeosDefault": 4, "_netThreadsNodeosArg": "--net-threads", "syncFetchSpan": null, - "_syncFetchSpanNodeosDefault": 100, + "_syncFetchSpanNodeosDefault": 1000, "_syncFetchSpanNodeosArg": "--sync-fetch-span", + "syncPeerLimit": null, + "_syncPeerLimitNodeosDefault": 3, + "_syncPeerLimitNodeosArg": "--sync-peer-limit", "useSocketReadWatermark": null, "_useSocketReadWatermarkNodeosDefault": 0, "_useSocketReadWatermarkNodeosArg": "--use-socket-read-watermark", @@ -2234,16 +2243,13 @@ The Performance Test Basic generates, by default, a report that details results "incomingTransactionQueueSizeMb": null, "_incomingTransactionQueueSizeMbNodeosDefault": 1024, "_incomingTransactionQueueSizeMbNodeosArg": "--incoming-transaction-queue-size-mb", - "disableSubjectiveBilling": true, - "_disableSubjectiveBillingNodeosDefault": 1, - "_disableSubjectiveBillingNodeosArg": "--disable-subjective-billing", "disableSubjectiveAccountBilling": null, "_disableSubjectiveAccountBillingNodeosDefault": false, "_disableSubjectiveAccountBillingNodeosArg": "--disable-subjective-account-billing", - "disableSubjectiveP2pBilling": null, + "disableSubjectiveP2pBilling": true, "_disableSubjectiveP2pBillingNodeosDefault": 1, "_disableSubjectiveP2pBillingNodeosArg": "--disable-subjective-p2p-billing", - "disableSubjectiveApiBilling": null, + "disableSubjectiveApiBilling": true, "_disableSubjectiveApiBillingNodeosDefault": 1, "_disableSubjectiveApiBillingNodeosArg": "--disable-subjective-api-billing", "producerThreads": 2, @@ -2360,6 +2366,7 @@ The Performance Test Basic generates, by default, a report that details results "genesisPath": "tests/performance_tests/genesis.json", "maximumP2pPerHost": 5000, "maximumClients": 0, + "keepLogs": true, "loggingLevel": "info", "loggingDict": { "bios": "off" @@ -2368,7 +2375,7 @@ The Performance Test Basic generates, by default, a report that details results "nodeosVers": "v4", "specificExtraNodeosArgs": { "1": "--plugin eosio::trace_api_plugin ", - "2": "--plugin eosio::chain_api_plugin " + "2": "--plugin eosio::chain_api_plugin --plugin eosio::net_api_plugin --read-only-threads 0 " }, "_totalNodes": 2, "_pNodes": 1, @@ -2382,22 +2389,25 @@ The Performance Test Basic generates, by default, a report that details results 2 ], "nonProdsEosVmOcEnable": false, - "targetTps": 14001, + "apiNodesReadOnlyThreadCount": 0, + "targetTps": 13001, "testTrxGenDurationSec": 10, "tpsLimitPerGenerator": 4000, "numAddlBlocksToPrune": 2, - "logDirRoot": "performance_test/2023-05-17_21-28-39/testRunLogs", + "logDirRoot": "performance_test/2023-06-05_17-59-49/testRunLogs", "delReport": false, "quiet": false, "delPerfLogs": false, - "expectedTransactionsSent": 140010, + "expectedTransactionsSent": 130010, "printMissingTransactions": false, "userTrxDataFile": null, "endpointApiType": "p2p", - "logDirBase": "performance_test/2023-05-17_21-28-39/testRunLogs/performance_test", - "logDirTimestamp": "2023-05-17_23-05-38", - "logDirTimestampedOptSuffix": "-14001", - "logDirPath": "performance_test/2023-05-17_21-28-39/testRunLogs/performance_test/2023-05-17_23-05-38-14001" + "apiEndpoint": null, + "logDirBase": "performance_test/2023-06-05_17-59-49/testRunLogs/performance_test", + "logDirTimestamp": "2023-06-05_19-21-44", + "logDirTimestampedOptSuffix": "-13001", + "logDirPath": "performance_test/2023-06-05_17-59-49/testRunLogs/performance_test/2023-06-05_19-21-44-13001", + "userTrxData": "NOT CONFIGURED" }, "env": { "system": "Linux", From f9838a118b4018c23a0b113543aa17ef0e116412 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Mon, 5 Jun 2023 15:25:32 -0500 Subject: [PATCH 16/22] Address peer review comments. --- tests/trx_generator/trx_provider.cpp | 6 +++--- tests/trx_generator/trx_provider.hpp | 22 ++++++++++------------ 2 files changed, 13 insertions(+), 15 deletions(-) diff --git a/tests/trx_generator/trx_provider.cpp b/tests/trx_generator/trx_provider.cpp index c48b2934bf..d5455a567a 100644 --- a/tests/trx_generator/trx_provider.cpp +++ b/tests/trx_generator/trx_provider.cpp @@ -155,11 +155,11 @@ namespace eosio::testing { out << std::string(data._trx_id) << "," << data._timestamp.to_iso_string() << "," << acked_str << "," << ack_round_trip_us.count(); - ackedTrxTraceInfo info = _peer_connection->get_acked_trx_trace_info(data._trx_id); + acked_trx_trace_info info = _peer_connection->get_acked_trx_trace_info(data._trx_id); if (info._valid) { - out << std::string(",") << info._block_num << "," << info._cpu_usage_us << "," << info._net_usage_words << "," << info._block_time; + out << "," << info._block_num << "," << info._cpu_usage_us << "," << info._net_usage_words << "," << info._block_time; } - out << std::string("\n"); + out << "\n"; } out.close(); } diff --git a/tests/trx_generator/trx_provider.hpp b/tests/trx_generator/trx_provider.hpp index 260d12f03e..47cde4fa8b 100644 --- a/tests/trx_generator/trx_provider.hpp +++ b/tests/trx_generator/trx_provider.hpp @@ -35,11 +35,11 @@ namespace eosio::testing { std::ostringstream ss; ss << "Provider base config endpoint type: " << _peer_endpoint_type << " peer_endpoint: " << _peer_endpoint << " port: " << _port << " api endpoint: " << _api_endpoint; - return std::move(ss).str(); + return ss.str(); } }; - struct ackedTrxTraceInfo { + struct acked_trx_trace_info { bool _valid = false; unsigned int _block_num = 0; unsigned int _cpu_usage_us = 0; @@ -91,7 +91,7 @@ namespace eosio::testing { return time_acked; } - virtual ackedTrxTraceInfo get_acked_trx_trace_info(const eosio::chain::transaction_id_type& trx_id) = 0; + virtual acked_trx_trace_info get_acked_trx_trace_info(const eosio::chain::transaction_id_type& trx_id) = 0; virtual void send_transaction(const chain::packed_transaction& trx) = 0; void trx_acknowledged(const eosio::chain::transaction_id_type trx_id, const fc::time_point ack_time) { @@ -106,7 +106,7 @@ namespace eosio::testing { struct http_connection : public provider_connection { std::mutex _trx_info_map_lock; - std::map _acked_trx_trace_info_map; + std::map _acked_trx_trace_info_map; std::atomic _acknowledged{0}; std::atomic _sent{0}; @@ -121,16 +121,14 @@ namespace eosio::testing { _acked_trx_trace_info_map.insert({trx_id, {true, block_num, cpu_usage_us, net_usage_words, block_time}}); } - ackedTrxTraceInfo get_acked_trx_trace_info(const eosio::chain::transaction_id_type& trx_id) { - ackedTrxTraceInfo info; + acked_trx_trace_info get_acked_trx_trace_info(const eosio::chain::transaction_id_type& trx_id) override { + acked_trx_trace_info info; std::lock_guard lock(_trx_info_map_lock); auto search = _acked_trx_trace_info_map.find(trx_id); if (search != _acked_trx_trace_info_map.end()) { info = search->second; } else { - elog("get_acked_trx_trace_info - Acknowledged transaction trace info not found for transaction with id: " - "${id}", - ("id", trx_id)); + elog("get_acked_trx_trace_info - Acknowledged transaction trace info not found for transaction with id: ""${id}", ("id", trx_id)); } return info; } @@ -150,8 +148,8 @@ namespace eosio::testing { void send_transaction(const chain::packed_transaction& trx) final; - ackedTrxTraceInfo get_acked_trx_trace_info(const eosio::chain::transaction_id_type& trx_id) { - return ackedTrxTraceInfo(); + acked_trx_trace_info get_acked_trx_trace_info(const eosio::chain::transaction_id_type& trx_id) override { + return {}; } private: @@ -217,7 +215,7 @@ namespace eosio::testing { std::string to_string() const { std::ostringstream ss; ss << "Trx Tps Tester Config: duration: " << _gen_duration_seconds << " target tps: " << _target_tps; - return std::move(ss).str(); + return ss.str(); }; }; From 566e12fa8d7976e96311006bc5b88933c095b710 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Mon, 5 Jun 2023 16:34:53 -0500 Subject: [PATCH 17/22] Address peer review comments & move function implementations into cpp files. Address possibility of optional receipt not having data if a transaction fails. --- tests/trx_generator/trx_provider.cpp | 85 ++++++++++++++++++++++++++-- tests/trx_generator/trx_provider.hpp | 54 +++--------------- 2 files changed, 88 insertions(+), 51 deletions(-) diff --git a/tests/trx_generator/trx_provider.cpp b/tests/trx_generator/trx_provider.cpp index d5455a567a..892fd73094 100644 --- a/tests/trx_generator/trx_provider.cpp +++ b/tests/trx_generator/trx_provider.cpp @@ -34,6 +34,37 @@ namespace eosio::testing { return send_buffer; } + void provider_connection::init_and_connect() { + _connection_thread_pool.start( + 1, [](const fc::exception& e) { elog("provider_connection exception ${e}", ("e", e)); }); + connect(); + }; + + void provider_connection::cleanup_and_disconnect() { + disconnect(); + _connection_thread_pool.stop(); + }; + + fc::time_point provider_connection::get_trx_ack_time(const eosio::chain::transaction_id_type& trx_id) { + fc::time_point time_acked; + std::lock_guard lock(_trx_ack_map_lock); + auto search = _trxs_ack_time_map.find(trx_id); + if (search != _trxs_ack_time_map.end()) { + time_acked = search->second; + } else { + elog("get_trx_ack_time - Transaction acknowledge time not found for transaction with id: ${id}", + ("id", trx_id)); + time_acked = fc::time_point::min(); + } + return time_acked; + } + + void provider_connection::trx_acknowledged(const eosio::chain::transaction_id_type trx_id, + const fc::time_point ack_time) { + std::lock_guard lock(_trx_ack_map_lock); + _trxs_ack_time_map[trx_id] = ack_time; + } + void p2p_connection::connect() { ilog("Attempting P2P connection to ${ip}:${port}.", ("ip", _config._peer_endpoint)("port", _config._port)); tcp::resolver r(_connection_thread_pool.get_executor()); @@ -54,6 +85,10 @@ namespace eosio::testing { trx_acknowledged(trx.id(), fc::time_point::min()); //using min to identify ack time as not applicable for p2p } + acked_trx_trace_info p2p_connection::get_acked_trx_trace_info(const eosio::chain::transaction_id_type& trx_id) { + return {}; + } + void http_connection::connect() {} void http_connection::disconnect() { @@ -100,10 +135,35 @@ namespace eosio::testing { if (this->needs_response_trace_info() && response.result() == boost::beast::http::status::ok) { try { fc::variant resp_json = fc::json::from_string(response.body()); - record_trx_info(trx_id, resp_json["processed"]["block_num"].as_uint64(), - resp_json["processed"]["receipt"]["cpu_usage_us"].as_uint64(), - resp_json["processed"]["receipt"]["net_usage_words"].as_uint64(), - resp_json["processed"]["block_time"].as_string()); + if (resp_json.is_object() && resp_json.get_object().contains("processed")) { + const auto& processed = resp_json["processed"]; + const auto& block_num = processed["block_num"].as_uint64(); + const auto& transaction_id = processed["id"].as_string(); + const auto& block_time = processed["block_time"].as_string(); + std::string status = "failed"; + int64_t net = -1; + int64_t cpu = -1; + if (processed.get_object().contains("receipt")) { + const auto& receipt = processed["receipt"]; + if (receipt.is_object()) { + status = receipt["status"].as_string(); + net = receipt["net_usage_words"].as_int64() * 8; + cpu = receipt["cpu_usage_us"].as_int64(); + } + if (status == "executed") { + record_trx_info(trx_id, block_num, cpu, net, block_time); + } else { + elog("async_http_request Transaction receipt status not executed: ${string}", + ("string", response.body())); + } + } else { + elog("async_http_request Transaction failed, no receipt: ${string}", + ("string", response.body())); + } + } else { + elog("async_http_request Transaction failed, transaction not processed: ${string}", + ("string", response.body())); + } } EOS_RETHROW_EXCEPTIONS(chain::json_parse_exception, "Fail to parse JSON from string: ${string}", ("string", response.body())); @@ -117,6 +177,23 @@ namespace eosio::testing { }); ++_sent; } + void http_connection::record_trx_info(eosio::chain::transaction_id_type trx_id, unsigned int block_num, unsigned int cpu_usage_us, + unsigned int net_usage_words, const std::string& block_time) { + std::lock_guard lock(_trx_info_map_lock); + _acked_trx_trace_info_map.insert({trx_id, {true, block_num, cpu_usage_us, net_usage_words, block_time}}); + } + + acked_trx_trace_info http_connection::get_acked_trx_trace_info(const eosio::chain::transaction_id_type& trx_id) { + acked_trx_trace_info info; + std::lock_guard lock(_trx_info_map_lock); + auto search = _acked_trx_trace_info_map.find(trx_id); + if (search != _acked_trx_trace_info_map.end()) { + info = search->second; + } else { + elog("get_acked_trx_trace_info - Acknowledged transaction trace info not found for transaction with id: ${id}", ("id", trx_id)); + } + return info; + } trx_provider::trx_provider(const provider_base_config& provider_config) { if (provider_config._peer_endpoint_type == "http") { diff --git a/tests/trx_generator/trx_provider.hpp b/tests/trx_generator/trx_provider.hpp index 47cde4fa8b..2e00167e71 100644 --- a/tests/trx_generator/trx_provider.hpp +++ b/tests/trx_generator/trx_provider.hpp @@ -67,38 +67,14 @@ namespace eosio::testing { virtual ~provider_connection() = default; - void init_and_connect() { - _connection_thread_pool.start( - 1, [](const fc::exception& e) { elog("provider_connection exception ${e}", ("e", e)); }); - connect(); - }; - - void cleanup_and_disconnect() { - disconnect(); - _connection_thread_pool.stop(); - }; - - fc::time_point get_trx_ack_time(const eosio::chain::transaction_id_type& trx_id) { - fc::time_point time_acked; - std::lock_guard lock(_trx_ack_map_lock); - auto search = _trxs_ack_time_map.find(trx_id); - if (search != _trxs_ack_time_map.end()) { - time_acked = search->second; - } else { - elog("get_trx_ack_time - Transaction acknowledge time not found for transaction with id: ${id}", ("id", trx_id)); - time_acked = fc::time_point::min(); - } - return time_acked; - } + void init_and_connect(); + void cleanup_and_disconnect(); + fc::time_point get_trx_ack_time(const eosio::chain::transaction_id_type& trx_id); + void trx_acknowledged(const eosio::chain::transaction_id_type trx_id, const fc::time_point ack_time); virtual acked_trx_trace_info get_acked_trx_trace_info(const eosio::chain::transaction_id_type& trx_id) = 0; virtual void send_transaction(const chain::packed_transaction& trx) = 0; - void trx_acknowledged(const eosio::chain::transaction_id_type trx_id, const fc::time_point ack_time) { - std::lock_guard lock(_trx_ack_map_lock); - _trxs_ack_time_map[trx_id] = ack_time; - } - private: virtual void connect() = 0; virtual void disconnect() = 0; @@ -116,22 +92,8 @@ namespace eosio::testing { void send_transaction(const chain::packed_transaction& trx) final; void record_trx_info(eosio::chain::transaction_id_type trx_id, unsigned int block_num, unsigned int cpu_usage_us, - unsigned int net_usage_words, const std::string& block_time) { - std::lock_guard lock(_trx_info_map_lock); - _acked_trx_trace_info_map.insert({trx_id, {true, block_num, cpu_usage_us, net_usage_words, block_time}}); - } - - acked_trx_trace_info get_acked_trx_trace_info(const eosio::chain::transaction_id_type& trx_id) override { - acked_trx_trace_info info; - std::lock_guard lock(_trx_info_map_lock); - auto search = _acked_trx_trace_info_map.find(trx_id); - if (search != _acked_trx_trace_info_map.end()) { - info = search->second; - } else { - elog("get_acked_trx_trace_info - Acknowledged transaction trace info not found for transaction with id: ""${id}", ("id", trx_id)); - } - return info; - } + unsigned int net_usage_words, const std::string& block_time); + acked_trx_trace_info get_acked_trx_trace_info(const eosio::chain::transaction_id_type& trx_id) override final; private: void connect() override final; @@ -148,9 +110,7 @@ namespace eosio::testing { void send_transaction(const chain::packed_transaction& trx) final; - acked_trx_trace_info get_acked_trx_trace_info(const eosio::chain::transaction_id_type& trx_id) override { - return {}; - } + acked_trx_trace_info get_acked_trx_trace_info(const eosio::chain::transaction_id_type& trx_id) override final; private: void connect() override final; From 8f865a93ef80e598a6bb62386d9ef400d99ee43c Mon Sep 17 00:00:00 2001 From: Jonathan Giszczak Date: Mon, 5 Jun 2023 18:07:01 -0500 Subject: [PATCH 18/22] Avoid trying to kill a node twice to prevent orphaned processes. --- tests/TestHarness/Node.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tests/TestHarness/Node.py b/tests/TestHarness/Node.py index 5bfd4417d6..8ae9da0633 100644 --- a/tests/TestHarness/Node.py +++ b/tests/TestHarness/Node.py @@ -272,7 +272,7 @@ def kill(self, killSignal): if self.popenProc is not None: self.popenProc.send_signal(killSignal) self.popenProc.wait() - else: + elif self.pid is not None: os.kill(self.pid, killSignal) # wait for kill validation @@ -286,6 +286,8 @@ def myFunc(): if not Utils.waitForBool(myFunc): Utils.Print("ERROR: Failed to validate node shutdown.") return False + else: + if Utils.Debug: Utils.Print(f"Called kill on node {self.nodeId} but it has already exited.") except OSError as ex: Utils.Print("ERROR: Failed to kill node (%s)." % (self.cmd), ex) return False @@ -377,6 +379,7 @@ def relaunch(self, chainArg=None, newChain=False, skipGenesis=True, timeout=Util if chainArg: cmdArr.extend(shlex.split(chainArg)) self.popenProc=self.launchCmd(cmdArr, self.data_dir, launch_time=datetime.now().strftime('%Y_%m_%d_%H_%M_%S')) + self.pid=self.popenProc.pid def isNodeAlive(): """wait for node to be responsive.""" From 776040ac411924088fd311ff33a07946f6a1d5a9 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Tue, 6 Jun 2023 09:33:38 -0500 Subject: [PATCH 19/22] Another round of peer review comments addressed. --- tests/trx_generator/trx_provider.cpp | 18 ++++++++++-------- tests/trx_generator/trx_provider.hpp | 18 +++++++++--------- 2 files changed, 19 insertions(+), 17 deletions(-) diff --git a/tests/trx_generator/trx_provider.cpp b/tests/trx_generator/trx_provider.cpp index 892fd73094..c9b5b8fc85 100644 --- a/tests/trx_generator/trx_provider.cpp +++ b/tests/trx_generator/trx_provider.cpp @@ -59,8 +59,8 @@ namespace eosio::testing { return time_acked; } - void provider_connection::trx_acknowledged(const eosio::chain::transaction_id_type trx_id, - const fc::time_point ack_time) { + void provider_connection::trx_acknowledged(const eosio::chain::transaction_id_type& trx_id, + const fc::time_point& ack_time) { std::lock_guard lock(_trx_ack_map_lock); _trxs_ack_time_map[trx_id] = ack_time; } @@ -141,14 +141,14 @@ namespace eosio::testing { const auto& transaction_id = processed["id"].as_string(); const auto& block_time = processed["block_time"].as_string(); std::string status = "failed"; - int64_t net = -1; - int64_t cpu = -1; + uint32_t net = 0; + uint32_t cpu = 0; if (processed.get_object().contains("receipt")) { const auto& receipt = processed["receipt"]; if (receipt.is_object()) { status = receipt["status"].as_string(); - net = receipt["net_usage_words"].as_int64() * 8; - cpu = receipt["cpu_usage_us"].as_int64(); + net = receipt["net_usage_words"].as_uint64() * 8; + cpu = receipt["cpu_usage_us"].as_uint64(); } if (status == "executed") { record_trx_info(trx_id, block_num, cpu, net, block_time); @@ -177,8 +177,10 @@ namespace eosio::testing { }); ++_sent; } - void http_connection::record_trx_info(eosio::chain::transaction_id_type trx_id, unsigned int block_num, unsigned int cpu_usage_us, - unsigned int net_usage_words, const std::string& block_time) { + + void http_connection::record_trx_info(const eosio::chain::transaction_id_type& trx_id, uint32_t block_num, + uint32_t cpu_usage_us, uint32_t net_usage_words, + const std::string& block_time) { std::lock_guard lock(_trx_info_map_lock); _acked_trx_trace_info_map.insert({trx_id, {true, block_num, cpu_usage_us, net_usage_words, block_time}}); } diff --git a/tests/trx_generator/trx_provider.hpp b/tests/trx_generator/trx_provider.hpp index 2e00167e71..8f8dd9200d 100644 --- a/tests/trx_generator/trx_provider.hpp +++ b/tests/trx_generator/trx_provider.hpp @@ -40,18 +40,18 @@ namespace eosio::testing { }; struct acked_trx_trace_info { - bool _valid = false; - unsigned int _block_num = 0; - unsigned int _cpu_usage_us = 0; - unsigned int _net_usage_words = 0; - std::string _block_time = ""; + bool _valid = false; + uint32_t _block_num = 0; + uint32_t _cpu_usage_us = 0; + uint32_t _net_usage_words = 0; + std::string _block_time = ""; std::string to_string() const { std::ostringstream ss; ss << "Acked Transaction Trace Info " << "valid: " << _valid << " block num: " << _block_num << " cpu usage us: " << _cpu_usage_us << " net usage words: " << _net_usage_words << " block time: " << _block_time; - return std::move(ss).str(); + return ss.str(); } }; @@ -70,7 +70,7 @@ namespace eosio::testing { void init_and_connect(); void cleanup_and_disconnect(); fc::time_point get_trx_ack_time(const eosio::chain::transaction_id_type& trx_id); - void trx_acknowledged(const eosio::chain::transaction_id_type trx_id, const fc::time_point ack_time); + void trx_acknowledged(const eosio::chain::transaction_id_type& trx_id, const fc::time_point& ack_time); virtual acked_trx_trace_info get_acked_trx_trace_info(const eosio::chain::transaction_id_type& trx_id) = 0; virtual void send_transaction(const chain::packed_transaction& trx) = 0; @@ -91,8 +91,8 @@ namespace eosio::testing { : provider_connection(provider_config) {} void send_transaction(const chain::packed_transaction& trx) final; - void record_trx_info(eosio::chain::transaction_id_type trx_id, unsigned int block_num, unsigned int cpu_usage_us, - unsigned int net_usage_words, const std::string& block_time); + void record_trx_info(const eosio::chain::transaction_id_type& trx_id, uint32_t block_num, uint32_t cpu_usage_us, + uint32_t net_usage_words, const std::string& block_time); acked_trx_trace_info get_acked_trx_trace_info(const eosio::chain::transaction_id_type& trx_id) override final; private: From 620964759cbe70aa10bddcbc973cdbff2515f81c Mon Sep 17 00:00:00 2001 From: 766C6164 Date: Tue, 6 Jun 2023 12:26:17 -0400 Subject: [PATCH 20/22] Desensitize test in case of auto determining start block --- tests/test_snapshot_scheduler.cpp | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tests/test_snapshot_scheduler.cpp b/tests/test_snapshot_scheduler.cpp index 7efae81ce2..e58ee99d3e 100644 --- a/tests/test_snapshot_scheduler.cpp +++ b/tests/test_snapshot_scheduler.cpp @@ -89,7 +89,10 @@ BOOST_AUTO_TEST_CASE(snapshot_scheduler_test) { auto& pending = it->pending_snapshots; if (pending.size()==1) { auto pbn = pending.begin()->head_block_num; - BOOST_CHECK_EQUAL(block_num, spacing ? (spacing + (pbn%spacing)) : pbn); + pbn = spacing ? (spacing + (pbn%spacing)) : pbn; + // if snapshot scheduled with empty start_block_num depending on the timing + // it can be scheduled either for block_num or block_num+1 + BOOST_CHECK(block_num==pbn || ((block_num+1)==pbn)); } return true; } From a8f559cf9f3c7273db69aa3239924534f7df5fec Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Tue, 6 Jun 2023 15:21:07 -0500 Subject: [PATCH 21/22] Address peer review comments. Rename and simplify test naming for clarity. Rename endpoint api type to simply be endpoint mode for clarity. --- tests/TestHarness/Cluster.py | 2 +- .../launch_transaction_generators.py | 10 +++---- tests/performance_tests/CMakeLists.txt | 30 +++++++++---------- tests/performance_tests/README.md | 16 +++++----- tests/performance_tests/performance_test.py | 8 ++--- .../performance_test_basic.py | 16 +++++----- 6 files changed, 41 insertions(+), 41 deletions(-) diff --git a/tests/TestHarness/Cluster.py b/tests/TestHarness/Cluster.py index cb34371531..b46f0120b6 100644 --- a/tests/TestHarness/Cluster.py +++ b/tests/TestHarness/Cluster.py @@ -1567,7 +1567,7 @@ def launchTrxGenerators(self, contractOwnerAcctName: str, acctNamesList: list, a contractOwnerAccount=contractOwnerAcctName, accts=','.join(map(str, acctNamesList)), privateKeys=','.join(map(str, acctPrivKeysList)), trxGenDurationSec=durationSec, logDir=Utils.DataDir, abiFile=abiFile, actionsData=actionsData, actionsAuths=actionsAuths, tpsTrxGensConfig=tpsTrxGensConfig, - endpointApiType="p2p") + endpointMode="p2p") Utils.Print("Launch txn generators and start generating/sending transactions") self.trxGenLauncher.launch(waitToComplete=waitToComplete) diff --git a/tests/TestHarness/launch_transaction_generators.py b/tests/TestHarness/launch_transaction_generators.py index f5790f8906..353e361333 100644 --- a/tests/TestHarness/launch_transaction_generators.py +++ b/tests/TestHarness/launch_transaction_generators.py @@ -38,7 +38,7 @@ def __init__(self, targetTps: int, tpsLimitPerGenerator: int, connectionPairList class TransactionGeneratorsLauncher: def __init__(self, chainId: int, lastIrreversibleBlockId: int, contractOwnerAccount: str, accts: str, privateKeys: str, trxGenDurationSec: int, logDir: str, - abiFile: Path, actionsData, actionsAuths, tpsTrxGensConfig: TpsTrxGensConfig, endpointApiType: str, apiEndpoint: str=None): + abiFile: Path, actionsData, actionsAuths, tpsTrxGensConfig: TpsTrxGensConfig, endpointMode: str, apiEndpoint: str=None): self.chainId = chainId self.lastIrreversibleBlockId = lastIrreversibleBlockId self.contractOwnerAccount = contractOwnerAccount @@ -50,7 +50,7 @@ def __init__(self, chainId: int, lastIrreversibleBlockId: int, contractOwnerAcco self.abiFile = abiFile self.actionsData = actionsData self.actionsAuths = actionsAuths - self.endpointApiType = endpointApiType + self.endpointMode = endpointMode self.apiEndpoint = apiEndpoint def launch(self, waitToComplete=True): @@ -69,7 +69,7 @@ def launch(self, waitToComplete=True): '--trx-gen-duration', f'{self.trxGenDurationSec}', '--target-tps', f'{targetTps}', '--log-dir', f'{self.logDir}', - '--peer-endpoint-type', f'{self.endpointApiType}', + '--peer-endpoint-type', f'{self.endpointMode}', '--peer-endpoint', f'{connectionPair[0]}', '--port', f'{connectionPair[1]}'] if self.abiFile is not None and self.actionsData is not None and self.actionsAuths is not None: @@ -110,7 +110,7 @@ def parseArgs(): parser.add_argument("actions_data", type=str, help="The json actions data file or json actions data description string to use") parser.add_argument("actions_auths", type=str, help="The json actions auth file or json actions auths description string to use, containting authAcctName to activePrivateKey pairs.") parser.add_argument("connection_pair_list", type=str, help="Comma separated list of endpoint:port combinations to send transactions to", default="localhost:9876") - parser.add_argument("endpoint_api_type", type=str, help="Endpoint API mode (\"p2p\", \"http\"). \ + parser.add_argument("endpoint_mode", type=str, help="Endpoint mode (\"p2p\", \"http\"). \ In \"p2p\" mode transactions will be directed to the p2p endpoint on a producer node. \ In \"http\" mode transactions will be directed to the http endpoint on an api node.", choices=["p2p", "http"], default="p2p") @@ -131,7 +131,7 @@ def main(): abiFile=args.abi_file, actionsData=args.actions_data, actionsAuths=args.actions_auths, tpsTrxGensConfig=TpsTrxGensConfig(targetTps=args.target_tps, tpsLimitPerGenerator=args.tps_limit_per_generator, connectionPairList=connectionPairList), - endpointApiType=args.endpoint_api_type, apiEndpoint=args.api_endpoint) + endpointMode=args.endpoint_mode, apiEndpoint=args.api_endpoint) exit_codes = trxGenLauncher.launch() diff --git a/tests/performance_tests/CMakeLists.txt b/tests/performance_tests/CMakeLists.txt index 9a231da308..c770cb2dfc 100644 --- a/tests/performance_tests/CMakeLists.txt +++ b/tests/performance_tests/CMakeLists.txt @@ -16,25 +16,25 @@ endif() add_test(NAME performance_test_bp COMMAND tests/performance_tests/performance_test.py testBpOpMode --max-tps-to-test 50 --test-iteration-min-step 10 --test-iteration-duration-sec 10 --final-iterations-duration-sec 10 --calc-chain-threads lmax overrideBasicTestConfig -v --tps-limit-per-generator 25 --chain-state-db-size-mb 200 ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) add_test(NAME performance_test_api COMMAND tests/performance_tests/performance_test.py testApiOpMode --max-tps-to-test 50 --test-iteration-min-step 10 --test-iteration-duration-sec 10 --final-iterations-duration-sec 10 --calc-chain-threads lmax overrideBasicTestConfig -v --tps-limit-per-generator 25 --chain-state-db-size-mb 200 ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -add_test(NAME performance_test_ro COMMAND tests/performance_tests/performance_test.py testApiOpMode --max-tps-to-test 50 --test-iteration-min-step 10 --test-iteration-duration-sec 10 --final-iterations-duration-sec 10 overrideBasicTestConfig -v --tps-limit-per-generator 25 --api-nodes-read-only-threads 2 --account-name "payloadless" --abi-file payloadless.abi --wasm-file payloadless.wasm --contract-dir unittests/test-contracts/payloadless --user-trx-data-file tests/performance_tests/readOnlyTrxData.json --chain-state-db-size-mb 200 ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -add_test(NAME performance_test_ex_cpu_trx_spec COMMAND tests/performance_tests/performance_test.py testBpOpMode --max-tps-to-test 50 --test-iteration-min-step 10 --test-iteration-duration-sec 10 --final-iterations-duration-sec 10 overrideBasicTestConfig -v --tps-limit-per-generator 25 --chain-state-db-size-mb 200 --account-name "c" --abi-file eosmechanics.abi --wasm-file eosmechanics.wasm --contract-dir unittests/contracts/eosio.mechanics --user-trx-data-file tests/performance_tests/cpuTrxData.json ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME performance_test_read_only_trxs COMMAND tests/performance_tests/performance_test.py testApiOpMode --max-tps-to-test 50 --test-iteration-min-step 10 --test-iteration-duration-sec 10 --final-iterations-duration-sec 10 overrideBasicTestConfig -v --tps-limit-per-generator 25 --api-nodes-read-only-threads 2 --account-name "payloadless" --abi-file payloadless.abi --wasm-file payloadless.wasm --contract-dir unittests/test-contracts/payloadless --user-trx-data-file tests/performance_tests/readOnlyTrxData.json --chain-state-db-size-mb 200 ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME performance_test_cpu_trx_spec COMMAND tests/performance_tests/performance_test.py testBpOpMode --max-tps-to-test 50 --test-iteration-min-step 10 --test-iteration-duration-sec 10 --final-iterations-duration-sec 10 overrideBasicTestConfig -v --tps-limit-per-generator 25 --chain-state-db-size-mb 200 --account-name "c" --abi-file eosmechanics.abi --wasm-file eosmechanics.wasm --contract-dir unittests/contracts/eosio.mechanics --user-trx-data-file tests/performance_tests/cpuTrxData.json ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) add_test(NAME performance_test_basic_p2p COMMAND tests/performance_tests/performance_test_basic.py -v --producer-nodes 1 --validation-nodes 1 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --chain-state-db-size-mb 200 ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -add_test(NAME performance_test_basic_http COMMAND tests/performance_tests/performance_test_basic.py -v --endpoint-api-type http --producer-nodes 1 --validation-nodes 1 --api-nodes 1 --target-tps 10 --tps-limit-per-generator 10 --test-duration-sec 5 --chain-state-db-size-mb 200 ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -add_test(NAME performance_test_basic_ex_transfer_trx_spec COMMAND tests/performance_tests/performance_test_basic.py -v --producer-nodes 1 --validation-nodes 1 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --chain-state-db-size-mb 200 --user-trx-data-file tests/performance_tests/userTrxDataTransfer.json ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -add_test(NAME performance_test_basic_ex_new_acct_trx_spec COMMAND tests/performance_tests/performance_test_basic.py -v --producer-nodes 1 --validation-nodes 1 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --chain-state-db-size-mb 200 --user-trx-data-file tests/performance_tests/userTrxDataNewAccount.json ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -add_test(NAME performance_test_basic_ex_cpu_trx_spec COMMAND tests/performance_tests/performance_test_basic.py -v --producer-nodes 1 --validation-nodes 1 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --chain-state-db-size-mb 200 --account-name "c" --abi-file eosmechanics.abi --wasm-file eosmechanics.wasm --contract-dir unittests/contracts/eosio.mechanics --user-trx-data-file tests/performance_tests/cpuTrxData.json ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -add_test(NAME performance_test_basic_ex_ram_trx_spec COMMAND tests/performance_tests/performance_test_basic.py -v --producer-nodes 1 --validation-nodes 1 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --chain-state-db-size-mb 200 --account-name "r" --abi-file eosmechanics.abi --wasm-file eosmechanics.wasm --contract-dir unittests/contracts/eosio.mechanics --user-trx-data-file tests/performance_tests/ramTrxData.json ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -add_test(NAME performance_test_basic_ex_read_only_trxs COMMAND tests/performance_tests/performance_test_basic.py -v --endpoint-api-type http --producer-nodes 1 --validation-nodes 1 --api-nodes 1 --api-nodes-read-only-threads 2 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --chain-state-db-size-mb 200 --account-name "payloadless" --abi-file payloadless.abi --wasm-file payloadless.wasm --contract-dir unittests/test-contracts/payloadless --user-trx-data-file tests/performance_tests/readOnlyTrxData.json ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME performance_test_basic_http COMMAND tests/performance_tests/performance_test_basic.py -v --endpoint-mode http --producer-nodes 1 --validation-nodes 1 --api-nodes 1 --target-tps 10 --tps-limit-per-generator 10 --test-duration-sec 5 --chain-state-db-size-mb 200 ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME performance_test_basic_transfer_trx_spec COMMAND tests/performance_tests/performance_test_basic.py -v --producer-nodes 1 --validation-nodes 1 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --chain-state-db-size-mb 200 --user-trx-data-file tests/performance_tests/userTrxDataTransfer.json ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME performance_test_basic_new_acct_trx_spec COMMAND tests/performance_tests/performance_test_basic.py -v --producer-nodes 1 --validation-nodes 1 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --chain-state-db-size-mb 200 --user-trx-data-file tests/performance_tests/userTrxDataNewAccount.json ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME performance_test_basic_cpu_trx_spec COMMAND tests/performance_tests/performance_test_basic.py -v --producer-nodes 1 --validation-nodes 1 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --chain-state-db-size-mb 200 --account-name "c" --abi-file eosmechanics.abi --wasm-file eosmechanics.wasm --contract-dir unittests/contracts/eosio.mechanics --user-trx-data-file tests/performance_tests/cpuTrxData.json ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME performance_test_basic_ram_trx_spec COMMAND tests/performance_tests/performance_test_basic.py -v --producer-nodes 1 --validation-nodes 1 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --chain-state-db-size-mb 200 --account-name "r" --abi-file eosmechanics.abi --wasm-file eosmechanics.wasm --contract-dir unittests/contracts/eosio.mechanics --user-trx-data-file tests/performance_tests/ramTrxData.json ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME performance_test_basic_read_only_trxs COMMAND tests/performance_tests/performance_test_basic.py -v --endpoint-mode http --producer-nodes 1 --validation-nodes 1 --api-nodes 1 --api-nodes-read-only-threads 2 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --chain-state-db-size-mb 200 --account-name "payloadless" --abi-file payloadless.abi --wasm-file payloadless.wasm --contract-dir unittests/test-contracts/payloadless --user-trx-data-file tests/performance_tests/readOnlyTrxData.json ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST performance_test_bp PROPERTY LABELS long_running_tests) set_property(TEST performance_test_api PROPERTY LABELS long_running_tests) -set_property(TEST performance_test_ro PROPERTY LABELS long_running_tests) -set_property(TEST performance_test_ex_cpu_trx_spec PROPERTY LABELS long_running_tests) +set_property(TEST performance_test_read_only_trxs PROPERTY LABELS long_running_tests) +set_property(TEST performance_test_cpu_trx_spec PROPERTY LABELS long_running_tests) set_property(TEST performance_test_basic_p2p PROPERTY LABELS nonparallelizable_tests) set_property(TEST performance_test_basic_http PROPERTY LABELS nonparallelizable_tests) -set_property(TEST performance_test_basic_ex_transfer_trx_spec PROPERTY LABELS nonparallelizable_tests) -set_property(TEST performance_test_basic_ex_new_acct_trx_spec PROPERTY LABELS nonparallelizable_tests) -set_property(TEST performance_test_basic_ex_cpu_trx_spec PROPERTY LABELS nonparallelizable_tests) -set_property(TEST performance_test_basic_ex_ram_trx_spec PROPERTY LABELS nonparallelizable_tests) -set_property(TEST performance_test_basic_ex_read_only_trxs PROPERTY LABELS nonparallelizable_tests) +set_property(TEST performance_test_basic_transfer_trx_spec PROPERTY LABELS nonparallelizable_tests) +set_property(TEST performance_test_basic_new_acct_trx_spec PROPERTY LABELS nonparallelizable_tests) +set_property(TEST performance_test_basic_cpu_trx_spec PROPERTY LABELS nonparallelizable_tests) +set_property(TEST performance_test_basic_ram_trx_spec PROPERTY LABELS nonparallelizable_tests) +set_property(TEST performance_test_basic_read_only_trxs PROPERTY LABELS nonparallelizable_tests) add_subdirectory( NodeosPluginArgs ) diff --git a/tests/performance_tests/README.md b/tests/performance_tests/README.md index 3687e0649e..68de0e5ba9 100644 --- a/tests/performance_tests/README.md +++ b/tests/performance_tests/README.md @@ -459,7 +459,7 @@ Advanced Configuration Options: ``` usage: performance_test.py testBpOpMode overrideBasicTestConfig [-h] [-d D] [--dump-error-details] [-v] [--leave-running] [--unshared] - [--endpoint-api-type {p2p,http}] + [--endpoint-mode {p2p,http}] [--producer-nodes PRODUCER_NODES] [--validation-nodes VALIDATION_NODES] [--api-nodes API_NODES] [--api-nodes-read-only-threads API_NODES_READ_ONLY_THREADS] [--tps-limit-per-generator TPS_LIMIT_PER_GENERATOR] @@ -509,8 +509,8 @@ Test Helper Arguments: Performance Test Basic Base: Performance Test Basic base configuration items. - --endpoint-api-type {p2p,http} - Endpointt API mode ("p2p", "http"). In "p2p" mode transactions will be directed to the p2p endpoint on a producer node. In "http" mode transactions will be directed to the http endpoint on an api node. + --endpoint-mode {p2p,http} + Endpoint Mode ("p2p", "http"). In "p2p" mode transactions will be directed to the p2p endpoint on a producer node. In "http" mode transactions will be directed to the http endpoint on an api node. --producer-nodes PRODUCER_NODES Producing nodes count --validation-nodes VALIDATION_NODES @@ -607,7 +607,7 @@ The following scripts are typically used by the Performance Harness main script usage: performance_test_basic.py [-h] [-d D] [--dump-error-details] [-v] [--leave-running] [--unshared] - [--endpoint-api-type {p2p,http}] + [--endpoint-mode {p2p,http}] [--producer-nodes PRODUCER_NODES] [--validation-nodes VALIDATION_NODES] [--api-nodes API_NODES] @@ -666,8 +666,8 @@ Test Helper Arguments: Performance Test Basic Base: Performance Test Basic base configuration items. - --endpoint-api-type {p2p,http} - Endpointt API mode ("p2p", "http"). In "p2p" mode transactions will be directed to the p2p endpoint on a producer node. In "http" mode transactions will be directed to the http endpoint on an api node. + --endpoint-mode {p2p,http} + Endpoint Mode ("p2p", "http"). In "p2p" mode transactions will be directed to the p2p endpoint on a producer node. In "http" mode transactions will be directed to the http endpoint on an api node. (default: p2p) --producer-nodes PRODUCER_NODES Producing nodes count (default: 1) @@ -1738,7 +1738,7 @@ Finally, the full detail test report for each of the determined max TPS throughp "calcChainThreads": "lmax", "calcNetThreads": "lmax", "userTrxDataFile": null, - "endpointApiType": "p2p", + "endpointMode": "p2p", "opModeCmd": "testBpOpMode", "logDirBase": "performance_test", "logDirTimestamp": "2023-06-05_17-59-49", @@ -2401,7 +2401,7 @@ The Performance Test Basic generates, by default, a report that details results "expectedTransactionsSent": 130010, "printMissingTransactions": false, "userTrxDataFile": null, - "endpointApiType": "p2p", + "endpointMode": "p2p", "apiEndpoint": null, "logDirBase": "performance_test/2023-06-05_17-59-49/testRunLogs/performance_test", "logDirTimestamp": "2023-06-05_19-21-44", diff --git a/tests/performance_tests/performance_test.py b/tests/performance_tests/performance_test.py index 7a7696b815..537f1926a2 100755 --- a/tests/performance_tests/performance_test.py +++ b/tests/performance_tests/performance_test.py @@ -48,7 +48,7 @@ class PtConfig: calcChainThreads: str="none" calcNetThreads: str="none" userTrxDataFile: Path=None - endpointApiType: str="p2p" + endpointMode: str="p2p" opModeCmd: str="" def __post_init__(self): @@ -113,7 +113,7 @@ def performPtbBinarySearch(self, clusterConfig: PerformanceTestBasic.ClusterConf scenarioResult = PerformanceTest.PerfTestSearchIndivResult(success=False, searchTarget=binSearchTarget, searchFloor=floor, searchCeiling=ceiling) ptbConfig = PerformanceTestBasic.PtbConfig(targetTps=binSearchTarget, testTrxGenDurationSec=self.ptConfig.testDurationSec, tpsLimitPerGenerator=self.ptConfig.tpsLimitPerGenerator, numAddlBlocksToPrune=self.ptConfig.numAddlBlocksToPrune, logDirRoot=logDirRoot, delReport=delReport, - quiet=quiet, userTrxDataFile=self.ptConfig.userTrxDataFile, endpointApiType=self.ptConfig.endpointApiType) + quiet=quiet, userTrxDataFile=self.ptConfig.userTrxDataFile, endpointMode=self.ptConfig.endpointMode) myTest = PerformanceTestBasic(testHelperConfig=self.testHelperConfig, clusterConfig=clusterConfig, ptbConfig=ptbConfig, testNamePath="performance_test") myTest.runTest() @@ -155,7 +155,7 @@ def performPtbReverseLinearSearch(self, tpsInitial: int) -> TpsTestResult.PerfTe scenarioResult = PerformanceTest.PerfTestSearchIndivResult(success=False, searchTarget=searchTarget, searchFloor=absFloor, searchCeiling=absCeiling) ptbConfig = PerformanceTestBasic.PtbConfig(targetTps=searchTarget, testTrxGenDurationSec=self.ptConfig.testDurationSec, tpsLimitPerGenerator=self.ptConfig.tpsLimitPerGenerator, numAddlBlocksToPrune=self.ptConfig.numAddlBlocksToPrune, logDirRoot=self.loggingConfig.ptbLogsDirPath, delReport=self.ptConfig.delReport, - quiet=self.ptConfig.quiet, delPerfLogs=self.ptConfig.delPerfLogs, userTrxDataFile=self.ptConfig.userTrxDataFile, endpointApiType=self.ptConfig.endpointApiType) + quiet=self.ptConfig.quiet, delPerfLogs=self.ptConfig.delPerfLogs, userTrxDataFile=self.ptConfig.userTrxDataFile, endpointMode=self.ptConfig.endpointMode) myTest = PerformanceTestBasic(testHelperConfig=self.testHelperConfig, clusterConfig=self.clusterConfig, ptbConfig=ptbConfig, testNamePath="performance_test") myTest.runTest() @@ -546,7 +546,7 @@ def main(): calcChainThreads=args.calc_chain_threads, calcNetThreads=args.calc_net_threads, userTrxDataFile=Path(args.user_trx_data_file) if args.user_trx_data_file is not None else None, - endpointApiType=args.endpoint_api_type, + endpointMode=args.endpoint_mode, opModeCmd=args.op_mode_sub_cmd) myTest = PerformanceTest(testHelperConfig=testHelperConfig, clusterConfig=testClusterConfig, ptConfig=ptConfig) diff --git a/tests/performance_tests/performance_test_basic.py b/tests/performance_tests/performance_test_basic.py index 900cc1af72..fd82bc6d15 100755 --- a/tests/performance_tests/performance_test_basic.py +++ b/tests/performance_tests/performance_test_basic.py @@ -170,13 +170,13 @@ class PtbConfig: expectedTransactionsSent: int = field(default_factory=int, init=False) printMissingTransactions: bool=False userTrxDataFile: Path=None - endpointApiType: str="p2p" + endpointMode: str="p2p" apiEndpoint: str=None def __post_init__(self): self.expectedTransactionsSent = self.testTrxGenDurationSec * self.targetTps - if (self.endpointApiType == "http"): + if (self.endpointMode == "http"): self.apiEndpoint="/v1/chain/send_transaction2" @dataclass @@ -396,10 +396,10 @@ def runTpsTest(self) -> PtbTpsTestResult: self.connectionPairList = [] def configureConnections(): - if(self.ptbConfig.endpointApiType == "http"): + if(self.ptbConfig.endpointMode == "http"): for apiNodeId in self.clusterConfig._apiNodeIds: self.connectionPairList.append(f"{self.cluster.getNode(apiNodeId).host}:{self.cluster.getNode(apiNodeId).port}") - else: # endpointApiType == p2p + else: # endpointMode == p2p for producerId in self.clusterConfig._producerNodeIds: self.connectionPairList.append(f"{self.cluster.getNode(producerId).host}:{self.cluster.getNodeP2pPort(producerId)}") @@ -457,7 +457,7 @@ def configureConnections(): accts=','.join(map(str, self.accountNames)), privateKeys=','.join(map(str, self.accountPrivKeys)), trxGenDurationSec=self.ptbConfig.testTrxGenDurationSec, logDir=self.trxGenLogDirPath, abiFile=abiFile, actionsData=actionsDataJson, actionsAuths=actionsAuthsJson, - tpsTrxGensConfig=tpsTrxGensConfig, endpointApiType=self.ptbConfig.endpointApiType, apiEndpoint=self.ptbConfig.apiEndpoint) + tpsTrxGensConfig=tpsTrxGensConfig, endpointMode=self.ptbConfig.endpointMode, apiEndpoint=self.ptbConfig.apiEndpoint) trxGenExitCodes = self.cluster.trxGenLauncher.launch() print(f"Transaction Generator exit codes: {trxGenExitCodes}") @@ -497,7 +497,7 @@ def captureLowLevelArtifacts(self): def createReport(self, logAnalysis: log_reader.LogAnalysis, tpsTestConfig: log_reader.TpsTestConfig, argsDict: dict, testResult: PerfTestBasicResult) -> dict: report = {} - report['targetApiEndpointType'] = self.ptbConfig.endpointApiType + report['targetApiEndpointType'] = self.ptbConfig.endpointMode report['targetApiEndpoint'] = self.ptbConfig.apiEndpoint if self.ptbConfig.apiEndpoint is not None else "NA for P2P" report['Result'] = asdict(testResult) report['Analysis'] = {} @@ -667,7 +667,7 @@ def _createBaseArgumentParser(defEndpointApiDef: str, defProdNodeCnt: int, defVa ptbBaseGrpDescription="Performance Test Basic base configuration items." ptbBaseParserGroup = ptbBaseParser.add_argument_group(title=None if suppressHelp else ptbBaseGrpTitle, description=None if suppressHelp else ptbBaseGrpDescription) - ptbBaseParserGroup.add_argument("--endpoint-api-type", type=str, help=argparse.SUPPRESS if suppressHelp else "Endpointt API mode (\"p2p\", \"http\"). \ + ptbBaseParserGroup.add_argument("--endpoint-mode", type=str, help=argparse.SUPPRESS if suppressHelp else "Endpoint mode (\"p2p\", \"http\"). \ In \"p2p\" mode transactions will be directed to the p2p endpoint on a producer node. \ In \"http\" mode transactions will be directed to the http endpoint on an api node.", choices=["p2p", "http"], default=defEndpointApiDef) @@ -775,7 +775,7 @@ def main(): delPerfLogs=args.del_perf_logs, printMissingTransactions=args.print_missing_transactions, userTrxDataFile=Path(args.user_trx_data_file) if args.user_trx_data_file is not None else None, - endpointApiType=args.endpoint_api_type) + endpointMode=args.endpoint_mode) myTest = PerformanceTestBasic(testHelperConfig=testHelperConfig, clusterConfig=testClusterConfig, ptbConfig=ptbConfig) From 27d049b3b18feabfc220584d671c7e86b58c2c58 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 7 Jun 2023 10:52:11 -0500 Subject: [PATCH 22/22] GH-1217 Fix process of state-history-log-retain-blocks option --- .../state_history_plugin.cpp | 2 +- .../tests/plugin_config_test.cpp | 17 +++++++++++++++++ 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/plugins/state_history_plugin/state_history_plugin.cpp b/plugins/state_history_plugin/state_history_plugin.cpp index 16a5d56379..7d301f30df 100644 --- a/plugins/state_history_plugin/state_history_plugin.cpp +++ b/plugins/state_history_plugin/state_history_plugin.cpp @@ -463,7 +463,7 @@ void state_history_plugin::plugin_initialize(const variables_map& options) { state_history_log_config ship_log_conf; if (options.count("state-history-log-retain-blocks")) { - auto ship_log_prune_conf = ship_log_conf.emplace(); + auto& ship_log_prune_conf = ship_log_conf.emplace(); ship_log_prune_conf.prune_blocks = options.at("state-history-log-retain-blocks").as(); //the arbitrary limit of 1000 here is mainly so that there is enough buffer for newly applied forks to be delivered to clients // before getting pruned out. ideally pruning would have been smart enough to know not to prune reversible blocks diff --git a/plugins/state_history_plugin/tests/plugin_config_test.cpp b/plugins/state_history_plugin/tests/plugin_config_test.cpp index b369e9c9d3..48bf085757 100644 --- a/plugins/state_history_plugin/tests/plugin_config_test.cpp +++ b/plugins/state_history_plugin/tests/plugin_config_test.cpp @@ -20,4 +20,21 @@ BOOST_AUTO_TEST_CASE(state_history_plugin_default_tests) { auto* config = std::get_if(&plugin.trace_log()->config()); BOOST_REQUIRE(config); BOOST_CHECK_EQUAL(config->max_retained_files, UINT32_MAX); +} + +BOOST_AUTO_TEST_CASE(state_history_plugin_retain_blocks_tests) { + fc::temp_directory tmp; + appbase::scoped_app app; + + auto tmp_path = tmp.path().string(); + std::array args = {"test_state_history", "--trace-history", "--state-history-log-retain-blocks", "4242", + "--disable-replay-opts", "--data-dir", tmp_path.c_str()}; + + BOOST_CHECK(app->initialize(args.size(), const_cast(args.data()))); + auto& plugin = app->get_plugin(); + + BOOST_REQUIRE(plugin.trace_log()); + auto* config = std::get_if(&plugin.trace_log()->config()); + BOOST_REQUIRE(config); + BOOST_CHECK_EQUAL(config->prune_blocks, 4242); } \ No newline at end of file