diff --git a/libraries/chain/include/eosio/chain/snapshot_scheduler.hpp b/libraries/chain/include/eosio/chain/snapshot_scheduler.hpp index 2aebf62c9b..fd9475d438 100644 --- a/libraries/chain/include/eosio/chain/snapshot_scheduler.hpp +++ b/libraries/chain/include/eosio/chain/snapshot_scheduler.hpp @@ -20,6 +20,8 @@ #include #include +#include + namespace eosio::chain { namespace bmi = boost::multi_index; @@ -38,10 +40,19 @@ class snapshot_scheduler { struct snapshot_request_information { uint32_t block_spacing = 0; uint32_t start_block_num = 0; - uint32_t end_block_num = 0; + uint32_t end_block_num = std::numeric_limits::max(); std::string snapshot_description = ""; }; + // this struct used to hold request params in api call + // it is differentiate between 0 and empty values + struct snapshot_request_params { + std::optional block_spacing; + std::optional start_block_num; + std::optional end_block_num; + std::optional snapshot_description; + }; + struct snapshot_request_id_information { uint32_t snapshot_request_id = 0; }; @@ -205,6 +216,7 @@ class snapshot_scheduler { FC_REFLECT(eosio::chain::snapshot_scheduler::snapshot_information, (head_block_id) (head_block_num) (head_block_time) (version) (snapshot_name)) FC_REFLECT(eosio::chain::snapshot_scheduler::snapshot_request_information, (block_spacing) (start_block_num) (end_block_num) (snapshot_description)) +FC_REFLECT(eosio::chain::snapshot_scheduler::snapshot_request_params, (block_spacing) (start_block_num) (end_block_num) (snapshot_description)) FC_REFLECT(eosio::chain::snapshot_scheduler::snapshot_request_id_information, (snapshot_request_id)) FC_REFLECT(eosio::chain::snapshot_scheduler::get_snapshot_requests_result, (snapshot_requests)) FC_REFLECT_DERIVED(eosio::chain::snapshot_scheduler::snapshot_schedule_information, (eosio::chain::snapshot_scheduler::snapshot_request_id_information)(eosio::chain::snapshot_scheduler::snapshot_request_information), (pending_snapshots)) diff --git a/libraries/chain/include/eosio/chain/thread_utils.hpp b/libraries/chain/include/eosio/chain/thread_utils.hpp index 9dfd988d0c..3a4f0f1d15 100644 --- a/libraries/chain/include/eosio/chain/thread_utils.hpp +++ b/libraries/chain/include/eosio/chain/thread_utils.hpp @@ -37,9 +37,13 @@ namespace eosio { namespace chain { /// Spawn threads, can be re-started after stop(). /// Assumes start()/stop() called from the same thread or externally protected. + /// Blocks until all threads are created and completed their init function, or an exception is thrown + /// during thread startup or an init function. Exceptions thrown during these stages are rethrown from start() + /// but some threads might still have been started. Calling stop() after such a failure is safe. /// @param num_threads is number of threads spawned /// @param on_except is the function to call if io_context throws an exception, is called from thread pool thread. - /// if an empty function then logs and rethrows exception on thread which will terminate. + /// if an empty function then logs and rethrows exception on thread which will terminate. Not called + /// for exceptions during the init function (such exceptions are rethrown from start()) /// @param init is an optional function to call at startup to initialize any data. /// @throw assert_exception if already started and not stopped. void start( size_t num_threads, on_except_t on_except, init_t init = {} ) { @@ -47,9 +51,25 @@ namespace eosio { namespace chain { _ioc_work.emplace( boost::asio::make_work_guard( _ioc ) ); _ioc.restart(); _thread_pool.reserve( num_threads ); - for( size_t i = 0; i < num_threads; ++i ) { - _thread_pool.emplace_back( std::thread( &named_thread_pool::run_thread, this, i, on_except, init ) ); + + std::promise start_complete; + std::atomic threads_remaining = num_threads; + std::exception_ptr pending_exception; + std::mutex pending_exception_mutex; + + try { + for( size_t i = 0; i < num_threads; ++i ) { + _thread_pool.emplace_back( std::thread( &named_thread_pool::run_thread, this, i, on_except, init, std::ref(start_complete), + std::ref(threads_remaining), std::ref(pending_exception), std::ref(pending_exception_mutex) ) ); + } } + catch( ... ) { + /// only an exception from std::thread's ctor should end up here. shut down all threads to ensure no + /// potential access to the promise, atomic, etc above performed after throwing out of start + stop(); + throw; + } + start_complete.get_future().get(); } /// destroy work guard, stop io_context, join thread_pool @@ -63,16 +83,42 @@ namespace eosio { namespace chain { } private: - void run_thread( size_t i, const on_except_t& on_except, const init_t& init ) { - std::string tn = boost::core::demangle(typeid(this).name()); - auto offset = tn.rfind("::"); - if (offset != std::string::npos) - tn.erase(0, offset+2); - tn = tn.substr(0, tn.find('>')) + "-" + std::to_string( i ); + void run_thread( size_t i, const on_except_t& on_except, const init_t& init, std::promise& start_complete, + std::atomic& threads_remaining, std::exception_ptr& pending_exception, std::mutex& pending_exception_mutex ) { + + std::string tn; + + auto decrement_remaining = [&]() { + if( !--threads_remaining ) { + if( pending_exception ) + start_complete.set_exception( pending_exception ); + else + start_complete.set_value(); + } + }; + + try { + try { + tn = boost::core::demangle(typeid(this).name()); + auto offset = tn.rfind("::"); + if (offset != std::string::npos) + tn.erase(0, offset+2); + tn = tn.substr(0, tn.find('>')) + "-" + std::to_string( i ); + fc::set_os_thread_name( tn ); + if ( init ) + init(); + } FC_LOG_AND_RETHROW() + } + catch( ... ) { + std::lock_guard l( pending_exception_mutex ); + pending_exception = std::current_exception(); + decrement_remaining(); + return; + } + + decrement_remaining(); + try { - fc::set_os_thread_name( tn ); - if ( init ) - init(); _ioc.run(); } catch( const fc::exception& e ) { if( on_except ) { diff --git a/libraries/chain/snapshot_scheduler.cpp b/libraries/chain/snapshot_scheduler.cpp index ee7a356fe0..38191222ac 100644 --- a/libraries/chain/snapshot_scheduler.cpp +++ b/libraries/chain/snapshot_scheduler.cpp @@ -8,7 +8,6 @@ namespace eosio::chain { // snapshot_scheduler_listener void snapshot_scheduler::on_start_block(uint32_t height, chain::controller& chain) { - bool serialize_needed = false; bool snapshot_executed = false; auto execute_snapshot_with_log = [this, height, &snapshot_executed, &chain](const auto& req) { @@ -25,28 +24,18 @@ void snapshot_scheduler::on_start_block(uint32_t height, chain::controller& chai std::vector unschedule_snapshot_request_ids; for(const auto& req: _snapshot_requests.get<0>()) { // -1 since its called from start block - bool recurring_snapshot = req.block_spacing && (height >= req.start_block_num + 1) && (!((height - req.start_block_num - 1) % req.block_spacing)); - bool onetime_snapshot = (!req.block_spacing) && (height == req.start_block_num + 1); - - // assume "asap" for snapshot with missed/zero start, it can have spacing - if(!req.start_block_num) { - // update start_block_num with current height only if this is recurring - // if non recurring, will be executed and unscheduled - if(req.block_spacing && height) { - auto& snapshot_by_id = _snapshot_requests.get(); - auto it = snapshot_by_id.find(req.snapshot_request_id); - _snapshot_requests.modify(it, [&height](auto& p) { p.start_block_num = height - 1; }); - serialize_needed = true; - } - execute_snapshot_with_log(req); - } else if(recurring_snapshot || onetime_snapshot) { + bool recurring_snapshot = req.block_spacing && (height >= req.start_block_num + 1) && (!((height - req.start_block_num - 1) % req.block_spacing)); + bool onetime_snapshot = (!req.block_spacing) && (height == req.start_block_num + 1); + + bool marked_for_deletion = ((!req.block_spacing) && (height >= req.start_block_num + 1)) || // if one time snapshot executed or scheduled for the past, it should be gone + (height > 0 && ((height-1) >= req.end_block_num)); // any snapshot can expire by end block num (end_block_num can be max value) + + if(recurring_snapshot || onetime_snapshot) { execute_snapshot_with_log(req); } // cleanup - remove expired (or invalid) request - if((!req.start_block_num && !req.block_spacing) || - (!req.block_spacing && height >= (req.start_block_num + 1)) || - (req.end_block_num > 0 && height >= (req.end_block_num + 1))) { + if(marked_for_deletion) { unschedule_snapshot_request_ids.push_back(req.snapshot_request_id); } } @@ -54,9 +43,6 @@ void snapshot_scheduler::on_start_block(uint32_t height, chain::controller& chai for(const auto& i: unschedule_snapshot_request_ids) { unschedule_snapshot(i); } - - // store db to filesystem - if(serialize_needed) x_serialize(); } void snapshot_scheduler::on_irreversible_block(const signed_block_ptr& lib, const chain::controller& chain) { @@ -80,15 +66,8 @@ snapshot_scheduler::snapshot_schedule_result snapshot_scheduler::schedule_snapsh auto& snapshot_by_value = _snapshot_requests.get(); auto existing = snapshot_by_value.find(std::make_tuple(sri.block_spacing, sri.start_block_num, sri.end_block_num)); EOS_ASSERT(existing == snapshot_by_value.end(), chain::duplicate_snapshot_request, "Duplicate snapshot request"); - - if(sri.end_block_num > 0) { - // if "end" is specified, it should be greater then start - EOS_ASSERT(sri.start_block_num <= sri.end_block_num, chain::invalid_snapshot_request, "End block number should be greater or equal to start block number"); - // if also block_spacing specified, check it - if(sri.block_spacing > 0) { - EOS_ASSERT(sri.start_block_num + sri.block_spacing <= sri.end_block_num, chain::invalid_snapshot_request, "Block spacing exceeds defined by start and end range"); - } - } + EOS_ASSERT(sri.start_block_num <= sri.end_block_num, chain::invalid_snapshot_request, "End block number should be greater or equal to start block number"); + EOS_ASSERT(sri.start_block_num + sri.block_spacing <= sri.end_block_num, chain::invalid_snapshot_request, "Block spacing exceeds defined by start and end range"); _snapshot_requests.emplace(snapshot_schedule_information{{_snapshot_id++}, {sri.block_spacing, sri.start_block_num, sri.end_block_num, sri.snapshot_description}, {}}); x_serialize(); diff --git a/plugins/producer_api_plugin/producer_api_plugin.cpp b/plugins/producer_api_plugin/producer_api_plugin.cpp index 0b825a10fd..65cbe2f58b 100644 --- a/plugins/producer_api_plugin/producer_api_plugin.cpp +++ b/plugins/producer_api_plugin/producer_api_plugin.cpp @@ -132,7 +132,7 @@ void producer_api_plugin::plugin_startup() { CALL_ASYNC(producer, snapshot, producer, create_snapshot, chain::snapshot_scheduler::snapshot_information, INVOKE_R_V_ASYNC(producer, create_snapshot), 201), CALL_WITH_400(producer, snapshot, producer, schedule_snapshot, - INVOKE_R_R_II(producer, schedule_snapshot, chain::snapshot_scheduler::snapshot_request_information), 201), + INVOKE_R_R_II(producer, schedule_snapshot, chain::snapshot_scheduler::snapshot_request_params), 201), CALL_WITH_400(producer, snapshot, producer, unschedule_snapshot, INVOKE_R_R(producer, unschedule_snapshot, chain::snapshot_scheduler::snapshot_request_id_information), 201), CALL_WITH_400(producer, producer_rw, producer, get_integrity_hash, diff --git a/plugins/producer_plugin/include/eosio/producer_plugin/producer_plugin.hpp b/plugins/producer_plugin/include/eosio/producer_plugin/producer_plugin.hpp index 84fa2a9f2b..823266d1fa 100644 --- a/plugins/producer_plugin/include/eosio/producer_plugin/producer_plugin.hpp +++ b/plugins/producer_plugin/include/eosio/producer_plugin/producer_plugin.hpp @@ -98,7 +98,7 @@ class producer_plugin : public appbase::plugin { integrity_hash_information get_integrity_hash() const; void create_snapshot(next_function next); - chain::snapshot_scheduler::snapshot_schedule_result schedule_snapshot(const chain::snapshot_scheduler::snapshot_request_information& schedule); + chain::snapshot_scheduler::snapshot_schedule_result schedule_snapshot(const chain::snapshot_scheduler::snapshot_request_params& srp); chain::snapshot_scheduler::snapshot_schedule_result unschedule_snapshot(const chain::snapshot_scheduler::snapshot_request_id_information& schedule); chain::snapshot_scheduler::get_snapshot_requests_result get_snapshot_requests() const; diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 6aaa9648f8..1ac3381ac8 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -1327,7 +1327,6 @@ void producer_plugin_impl::plugin_startup() { } if (_ro_thread_pool_size > 0) { - std::atomic num_threads_started = 0; _ro_thread_pool.start( _ro_thread_pool_size, [](const fc::exception& e) { @@ -1336,21 +1335,8 @@ void producer_plugin_impl::plugin_startup() { }, [&]() { chain.init_thread_local_data(); - ++num_threads_started; }); - // This will be changed with std::latch or std::atomic<>::wait - // when C++20 is used. - auto time_slept_ms = 0; - constexpr auto max_time_slept_ms = 1000; - while (num_threads_started.load() < _ro_thread_pool_size && time_slept_ms < max_time_slept_ms) { - std::this_thread::sleep_for(1ms); - ++time_slept_ms; - } - EOS_ASSERT(num_threads_started.load() == _ro_thread_pool_size, producer_exception, - "read-only threads failed to start. num_threads_started: ${n}, time_slept_ms: ${t}ms", - ("n", num_threads_started.load())("t", time_slept_ms)); - start_write_window(); } diff --git a/plugins/state_history_plugin/state_history_plugin.cpp b/plugins/state_history_plugin/state_history_plugin.cpp index f4ebf1c95c..27e3e6379e 100644 --- a/plugins/state_history_plugin/state_history_plugin.cpp +++ b/plugins/state_history_plugin/state_history_plugin.cpp @@ -358,7 +358,7 @@ void state_history_plugin_impl::plugin_initialize(const variables_map& options) state_history_log_config ship_log_conf; if (options.count("state-history-log-retain-blocks")) { - auto ship_log_prune_conf = ship_log_conf.emplace(); + auto& ship_log_prune_conf = ship_log_conf.emplace(); ship_log_prune_conf.prune_blocks = options.at("state-history-log-retain-blocks").as(); //the arbitrary limit of 1000 here is mainly so that there is enough buffer for newly applied forks to be delivered to clients // before getting pruned out. ideally pruning would have been smart enough to know not to prune reversible blocks diff --git a/plugins/state_history_plugin/tests/plugin_config_test.cpp b/plugins/state_history_plugin/tests/plugin_config_test.cpp index b369e9c9d3..48bf085757 100644 --- a/plugins/state_history_plugin/tests/plugin_config_test.cpp +++ b/plugins/state_history_plugin/tests/plugin_config_test.cpp @@ -20,4 +20,21 @@ BOOST_AUTO_TEST_CASE(state_history_plugin_default_tests) { auto* config = std::get_if(&plugin.trace_log()->config()); BOOST_REQUIRE(config); BOOST_CHECK_EQUAL(config->max_retained_files, UINT32_MAX); +} + +BOOST_AUTO_TEST_CASE(state_history_plugin_retain_blocks_tests) { + fc::temp_directory tmp; + appbase::scoped_app app; + + auto tmp_path = tmp.path().string(); + std::array args = {"test_state_history", "--trace-history", "--state-history-log-retain-blocks", "4242", + "--disable-replay-opts", "--data-dir", tmp_path.c_str()}; + + BOOST_CHECK(app->initialize(args.size(), const_cast(args.data()))); + auto& plugin = app->get_plugin(); + + BOOST_REQUIRE(plugin.trace_log()); + auto* config = std::get_if(&plugin.trace_log()->config()); + BOOST_REQUIRE(config); + BOOST_CHECK_EQUAL(config->prune_blocks, 4242); } \ No newline at end of file diff --git a/tests/TestHarness/Cluster.py b/tests/TestHarness/Cluster.py index 32d06f4f66..b46f0120b6 100644 --- a/tests/TestHarness/Cluster.py +++ b/tests/TestHarness/Cluster.py @@ -1562,11 +1562,12 @@ def launchTrxGenerators(self, contractOwnerAcctName: str, acctNamesList: list, a self.preExistingFirstTrxFiles = glob.glob(f"{Utils.DataDir}/first_trx_*.txt") connectionPairList = [f"{self.host}:{self.getNodeP2pPort(nodeId)}"] - tpsTrxGensConfig = TpsTrxGensConfig(targetTps=targetTps, tpsLimitPerGenerator=tpsLimitPerGenerator, connectionPairList=connectionPairList, endpointApi="p2p") + tpsTrxGensConfig = TpsTrxGensConfig(targetTps=targetTps, tpsLimitPerGenerator=tpsLimitPerGenerator, connectionPairList=connectionPairList) self.trxGenLauncher = TransactionGeneratorsLauncher(chainId=chainId, lastIrreversibleBlockId=lib_id, contractOwnerAccount=contractOwnerAcctName, accts=','.join(map(str, acctNamesList)), privateKeys=','.join(map(str, acctPrivKeysList)), trxGenDurationSec=durationSec, logDir=Utils.DataDir, - abiFile=abiFile, actionsData=actionsData, actionsAuths=actionsAuths, tpsTrxGensConfig=tpsTrxGensConfig) + abiFile=abiFile, actionsData=actionsData, actionsAuths=actionsAuths, tpsTrxGensConfig=tpsTrxGensConfig, + endpointMode="p2p") Utils.Print("Launch txn generators and start generating/sending transactions") self.trxGenLauncher.launch(waitToComplete=waitToComplete) diff --git a/tests/TestHarness/Node.py b/tests/TestHarness/Node.py index 5bfd4417d6..8ae9da0633 100644 --- a/tests/TestHarness/Node.py +++ b/tests/TestHarness/Node.py @@ -272,7 +272,7 @@ def kill(self, killSignal): if self.popenProc is not None: self.popenProc.send_signal(killSignal) self.popenProc.wait() - else: + elif self.pid is not None: os.kill(self.pid, killSignal) # wait for kill validation @@ -286,6 +286,8 @@ def myFunc(): if not Utils.waitForBool(myFunc): Utils.Print("ERROR: Failed to validate node shutdown.") return False + else: + if Utils.Debug: Utils.Print(f"Called kill on node {self.nodeId} but it has already exited.") except OSError as ex: Utils.Print("ERROR: Failed to kill node (%s)." % (self.cmd), ex) return False @@ -377,6 +379,7 @@ def relaunch(self, chainArg=None, newChain=False, skipGenesis=True, timeout=Util if chainArg: cmdArr.extend(shlex.split(chainArg)) self.popenProc=self.launchCmd(cmdArr, self.data_dir, launch_time=datetime.now().strftime('%Y_%m_%d_%H_%M_%S')) + self.pid=self.popenProc.pid def isNodeAlive(): """wait for node to be responsive.""" diff --git a/tests/TestHarness/launch_transaction_generators.py b/tests/TestHarness/launch_transaction_generators.py index 3f10e35f92..353e361333 100644 --- a/tests/TestHarness/launch_transaction_generators.py +++ b/tests/TestHarness/launch_transaction_generators.py @@ -16,7 +16,7 @@ class TpsTrxGensConfig: - def __init__(self, targetTps: int, tpsLimitPerGenerator: int, connectionPairList: list, endpointApi: str): + def __init__(self, targetTps: int, tpsLimitPerGenerator: int, connectionPairList: list): self.targetTps: int = targetTps self.tpsLimitPerGenerator: int = tpsLimitPerGenerator self.connectionPairList = connectionPairList @@ -27,7 +27,6 @@ def __init__(self, targetTps: int, tpsLimitPerGenerator: int, connectionPairList self.modTps = self.targetTps % self.numGenerators self.cleanlyDivisible = self.modTps == 0 self.incrementPoint = self.numGenerators + 1 - self.modTps - self.endpointApi = endpointApi self.targetTpsPerGenList = [] curTps = self.initialTpsPerGenerator @@ -39,7 +38,7 @@ def __init__(self, targetTps: int, tpsLimitPerGenerator: int, connectionPairList class TransactionGeneratorsLauncher: def __init__(self, chainId: int, lastIrreversibleBlockId: int, contractOwnerAccount: str, accts: str, privateKeys: str, trxGenDurationSec: int, logDir: str, - abiFile: Path, actionsData, actionsAuths, tpsTrxGensConfig: TpsTrxGensConfig): + abiFile: Path, actionsData, actionsAuths, tpsTrxGensConfig: TpsTrxGensConfig, endpointMode: str, apiEndpoint: str=None): self.chainId = chainId self.lastIrreversibleBlockId = lastIrreversibleBlockId self.contractOwnerAccount = contractOwnerAccount @@ -51,6 +50,8 @@ def __init__(self, chainId: int, lastIrreversibleBlockId: int, contractOwnerAcco self.abiFile = abiFile self.actionsData = actionsData self.actionsAuths = actionsAuths + self.endpointMode = endpointMode + self.apiEndpoint = apiEndpoint def launch(self, waitToComplete=True): self.subprocess_ret_codes = [] @@ -68,13 +69,16 @@ def launch(self, waitToComplete=True): '--trx-gen-duration', f'{self.trxGenDurationSec}', '--target-tps', f'{targetTps}', '--log-dir', f'{self.logDir}', - '--peer-endpoint-type', f'{self.tpsTrxGensConfig.endpointApi}', + '--peer-endpoint-type', f'{self.endpointMode}', '--peer-endpoint', f'{connectionPair[0]}', '--port', f'{connectionPair[1]}'] if self.abiFile is not None and self.actionsData is not None and self.actionsAuths is not None: popenStringList.extend(['--abi-file', f'{self.abiFile}', '--actions-data', f'{self.actionsData}', '--actions-auths', f'{self.actionsAuths}']) + if self.apiEndpoint is not None: + popenStringList.extend(['--api-endpoint', f'{self.apiEndpoint}']) + if Utils.Debug: Print(f"Running trx_generator: {' '.join(popenStringList)}") self.subprocess_ret_codes.append(subprocess.Popen(popenStringList)) @@ -106,10 +110,13 @@ def parseArgs(): parser.add_argument("actions_data", type=str, help="The json actions data file or json actions data description string to use") parser.add_argument("actions_auths", type=str, help="The json actions auth file or json actions auths description string to use, containting authAcctName to activePrivateKey pairs.") parser.add_argument("connection_pair_list", type=str, help="Comma separated list of endpoint:port combinations to send transactions to", default="localhost:9876") - parser.add_argument("endpoint_api", type=str, help="Endpoint API mode (\"p2p\", \"http\"). \ - In \"p2p\" mode transactions will be directed to the p2p endpoint on a producer node. \ - In \"http\" mode transactions will be directed to the http endpoint on an api node.", - choices=["p2p", "http"], default="p2p") + parser.add_argument("endpoint_mode", type=str, help="Endpoint mode (\"p2p\", \"http\"). \ + In \"p2p\" mode transactions will be directed to the p2p endpoint on a producer node. \ + In \"http\" mode transactions will be directed to the http endpoint on an api node.", + choices=["p2p", "http"], default="p2p") + parser.add_argument("api_endpoint", type=str, help="The api endpoint to use to submit transactions. (Only used with http api nodes currently as p2p transactions are streamed)", + default="/v1/chain/send_transaction2") + args = parser.parse_args() return args @@ -123,7 +130,8 @@ def main(): privateKeys=args.priv_keys, trxGenDurationSec=args.trx_gen_duration, logDir=args.log_dir, abiFile=args.abi_file, actionsData=args.actions_data, actionsAuths=args.actions_auths, tpsTrxGensConfig=TpsTrxGensConfig(targetTps=args.target_tps, tpsLimitPerGenerator=args.tps_limit_per_generator, - connectionPairList=connectionPairList, endpointApi=args.endpoint_api)) + connectionPairList=connectionPairList), + endpointMode=args.endpoint_mode, apiEndpoint=args.api_endpoint) exit_codes = trxGenLauncher.launch() diff --git a/tests/performance_tests/CMakeLists.txt b/tests/performance_tests/CMakeLists.txt index 0f6d5e740d..c770cb2dfc 100644 --- a/tests/performance_tests/CMakeLists.txt +++ b/tests/performance_tests/CMakeLists.txt @@ -4,6 +4,7 @@ configure_file(log_reader.py . COPYONLY) configure_file(genesis.json . COPYONLY) configure_file(cpuTrxData.json . COPYONLY) configure_file(ramTrxData.json . COPYONLY) +configure_file(readOnlyTrxData.json . COPYONLY) configure_file(userTrxDataTransfer.json . COPYONLY) configure_file(userTrxDataNewAccount.json . COPYONLY) @@ -15,21 +16,25 @@ endif() add_test(NAME performance_test_bp COMMAND tests/performance_tests/performance_test.py testBpOpMode --max-tps-to-test 50 --test-iteration-min-step 10 --test-iteration-duration-sec 10 --final-iterations-duration-sec 10 --calc-chain-threads lmax overrideBasicTestConfig -v --tps-limit-per-generator 25 --chain-state-db-size-mb 200 ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) add_test(NAME performance_test_api COMMAND tests/performance_tests/performance_test.py testApiOpMode --max-tps-to-test 50 --test-iteration-min-step 10 --test-iteration-duration-sec 10 --final-iterations-duration-sec 10 --calc-chain-threads lmax overrideBasicTestConfig -v --tps-limit-per-generator 25 --chain-state-db-size-mb 200 ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -add_test(NAME performance_test_ex_cpu_trx_spec COMMAND tests/performance_tests/performance_test.py testBpOpMode --max-tps-to-test 50 --test-iteration-min-step 10 --test-iteration-duration-sec 10 --final-iterations-duration-sec 10 overrideBasicTestConfig -v --tps-limit-per-generator 25 --chain-state-db-size-mb 200 --account-name "c" --abi-file eosmechanics.abi --wasm-file eosmechanics.wasm --contract-dir unittests/contracts/eosio.mechanics --user-trx-data-file tests/performance_tests/cpuTrxData.json ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME performance_test_read_only_trxs COMMAND tests/performance_tests/performance_test.py testApiOpMode --max-tps-to-test 50 --test-iteration-min-step 10 --test-iteration-duration-sec 10 --final-iterations-duration-sec 10 overrideBasicTestConfig -v --tps-limit-per-generator 25 --api-nodes-read-only-threads 2 --account-name "payloadless" --abi-file payloadless.abi --wasm-file payloadless.wasm --contract-dir unittests/test-contracts/payloadless --user-trx-data-file tests/performance_tests/readOnlyTrxData.json --chain-state-db-size-mb 200 ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME performance_test_cpu_trx_spec COMMAND tests/performance_tests/performance_test.py testBpOpMode --max-tps-to-test 50 --test-iteration-min-step 10 --test-iteration-duration-sec 10 --final-iterations-duration-sec 10 overrideBasicTestConfig -v --tps-limit-per-generator 25 --chain-state-db-size-mb 200 --account-name "c" --abi-file eosmechanics.abi --wasm-file eosmechanics.wasm --contract-dir unittests/contracts/eosio.mechanics --user-trx-data-file tests/performance_tests/cpuTrxData.json ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) add_test(NAME performance_test_basic_p2p COMMAND tests/performance_tests/performance_test_basic.py -v --producer-nodes 1 --validation-nodes 1 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --chain-state-db-size-mb 200 ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -add_test(NAME performance_test_basic_http COMMAND tests/performance_tests/performance_test_basic.py -v --endpoint-api http --producer-nodes 1 --validation-nodes 1 --api-nodes 1 --target-tps 10 --tps-limit-per-generator 10 --test-duration-sec 5 --chain-state-db-size-mb 200 ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -add_test(NAME performance_test_basic_ex_transfer_trx_spec COMMAND tests/performance_tests/performance_test_basic.py -v --producer-nodes 1 --validation-nodes 1 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --chain-state-db-size-mb 200 --user-trx-data-file tests/performance_tests/userTrxDataTransfer.json ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -add_test(NAME performance_test_basic_ex_new_acct_trx_spec COMMAND tests/performance_tests/performance_test_basic.py -v --producer-nodes 1 --validation-nodes 1 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --chain-state-db-size-mb 200 --user-trx-data-file tests/performance_tests/userTrxDataNewAccount.json ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -add_test(NAME performance_test_basic_ex_cpu_trx_spec COMMAND tests/performance_tests/performance_test_basic.py -v --producer-nodes 1 --validation-nodes 1 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --chain-state-db-size-mb 200 --account-name "c" --abi-file eosmechanics.abi --wasm-file eosmechanics.wasm --contract-dir unittests/contracts/eosio.mechanics --user-trx-data-file tests/performance_tests/cpuTrxData.json ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -add_test(NAME performance_test_basic_ex_ram_trx_spec COMMAND tests/performance_tests/performance_test_basic.py -v --producer-nodes 1 --validation-nodes 1 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --chain-state-db-size-mb 200 --account-name "r" --abi-file eosmechanics.abi --wasm-file eosmechanics.wasm --contract-dir unittests/contracts/eosio.mechanics --user-trx-data-file tests/performance_tests/ramTrxData.json ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME performance_test_basic_http COMMAND tests/performance_tests/performance_test_basic.py -v --endpoint-mode http --producer-nodes 1 --validation-nodes 1 --api-nodes 1 --target-tps 10 --tps-limit-per-generator 10 --test-duration-sec 5 --chain-state-db-size-mb 200 ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME performance_test_basic_transfer_trx_spec COMMAND tests/performance_tests/performance_test_basic.py -v --producer-nodes 1 --validation-nodes 1 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --chain-state-db-size-mb 200 --user-trx-data-file tests/performance_tests/userTrxDataTransfer.json ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME performance_test_basic_new_acct_trx_spec COMMAND tests/performance_tests/performance_test_basic.py -v --producer-nodes 1 --validation-nodes 1 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --chain-state-db-size-mb 200 --user-trx-data-file tests/performance_tests/userTrxDataNewAccount.json ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME performance_test_basic_cpu_trx_spec COMMAND tests/performance_tests/performance_test_basic.py -v --producer-nodes 1 --validation-nodes 1 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --chain-state-db-size-mb 200 --account-name "c" --abi-file eosmechanics.abi --wasm-file eosmechanics.wasm --contract-dir unittests/contracts/eosio.mechanics --user-trx-data-file tests/performance_tests/cpuTrxData.json ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME performance_test_basic_ram_trx_spec COMMAND tests/performance_tests/performance_test_basic.py -v --producer-nodes 1 --validation-nodes 1 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --chain-state-db-size-mb 200 --account-name "r" --abi-file eosmechanics.abi --wasm-file eosmechanics.wasm --contract-dir unittests/contracts/eosio.mechanics --user-trx-data-file tests/performance_tests/ramTrxData.json ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME performance_test_basic_read_only_trxs COMMAND tests/performance_tests/performance_test_basic.py -v --endpoint-mode http --producer-nodes 1 --validation-nodes 1 --api-nodes 1 --api-nodes-read-only-threads 2 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --chain-state-db-size-mb 200 --account-name "payloadless" --abi-file payloadless.abi --wasm-file payloadless.wasm --contract-dir unittests/test-contracts/payloadless --user-trx-data-file tests/performance_tests/readOnlyTrxData.json ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST performance_test_bp PROPERTY LABELS long_running_tests) set_property(TEST performance_test_api PROPERTY LABELS long_running_tests) -set_property(TEST performance_test_ex_cpu_trx_spec PROPERTY LABELS long_running_tests) +set_property(TEST performance_test_read_only_trxs PROPERTY LABELS long_running_tests) +set_property(TEST performance_test_cpu_trx_spec PROPERTY LABELS long_running_tests) set_property(TEST performance_test_basic_p2p PROPERTY LABELS nonparallelizable_tests) set_property(TEST performance_test_basic_http PROPERTY LABELS nonparallelizable_tests) -set_property(TEST performance_test_basic_ex_transfer_trx_spec PROPERTY LABELS nonparallelizable_tests) -set_property(TEST performance_test_basic_ex_new_acct_trx_spec PROPERTY LABELS nonparallelizable_tests) -set_property(TEST performance_test_basic_ex_cpu_trx_spec PROPERTY LABELS nonparallelizable_tests) -set_property(TEST performance_test_basic_ex_ram_trx_spec PROPERTY LABELS nonparallelizable_tests) +set_property(TEST performance_test_basic_transfer_trx_spec PROPERTY LABELS nonparallelizable_tests) +set_property(TEST performance_test_basic_new_acct_trx_spec PROPERTY LABELS nonparallelizable_tests) +set_property(TEST performance_test_basic_cpu_trx_spec PROPERTY LABELS nonparallelizable_tests) +set_property(TEST performance_test_basic_ram_trx_spec PROPERTY LABELS nonparallelizable_tests) +set_property(TEST performance_test_basic_read_only_trxs PROPERTY LABELS nonparallelizable_tests) add_subdirectory( NodeosPluginArgs ) diff --git a/tests/performance_tests/README.md b/tests/performance_tests/README.md index 961d3e9e79..68de0e5ba9 100644 --- a/tests/performance_tests/README.md +++ b/tests/performance_tests/README.md @@ -459,8 +459,9 @@ Advanced Configuration Options: ``` usage: performance_test.py testBpOpMode overrideBasicTestConfig [-h] [-d D] [--dump-error-details] [-v] [--leave-running] [--unshared] - [--endpoint-api {p2p,http}] + [--endpoint-mode {p2p,http}] [--producer-nodes PRODUCER_NODES] [--validation-nodes VALIDATION_NODES] [--api-nodes API_NODES] + [--api-nodes-read-only-threads API_NODES_READ_ONLY_THREADS] [--tps-limit-per-generator TPS_LIMIT_PER_GENERATOR] [--genesis GENESIS] [--num-blocks-to-prune NUM_BLOCKS_TO_PRUNE] [--signature-cpu-billable-pct SIGNATURE_CPU_BILLABLE_PCT] @@ -508,14 +509,16 @@ Test Helper Arguments: Performance Test Basic Base: Performance Test Basic base configuration items. - --endpoint-api {p2p,http} - Endpointt API mode ("p2p", "http"). In "p2p" mode transactions will be directed to the p2p endpoint on a producer node. In "http" mode transactions will be directed to the http endpoint on an api node. + --endpoint-mode {p2p,http} + Endpoint Mode ("p2p", "http"). In "p2p" mode transactions will be directed to the p2p endpoint on a producer node. In "http" mode transactions will be directed to the http endpoint on an api node. --producer-nodes PRODUCER_NODES Producing nodes count --validation-nodes VALIDATION_NODES Validation nodes count --api-nodes API_NODES API nodes count + --api-nodes-read-only-threads API_NODES_READ_ONLY_THREADS + API nodes read only threads count for use with read-only transactions --tps-limit-per-generator TPS_LIMIT_PER_GENERATOR Maximum amount of transactions per second a single generator can have. --genesis GENESIS Path to genesis.json @@ -604,10 +607,11 @@ The following scripts are typically used by the Performance Harness main script usage: performance_test_basic.py [-h] [-d D] [--dump-error-details] [-v] [--leave-running] [--unshared] - [--endpoint-api {p2p,http}] + [--endpoint-mode {p2p,http}] [--producer-nodes PRODUCER_NODES] [--validation-nodes VALIDATION_NODES] [--api-nodes API_NODES] + [--api-nodes-read-only-threads API_NODES_READ_ONLY_THREADS] [--tps-limit-per-generator TPS_LIMIT_PER_GENERATOR] [--genesis GENESIS] [--num-blocks-to-prune NUM_BLOCKS_TO_PRUNE] @@ -662,8 +666,8 @@ Test Helper Arguments: Performance Test Basic Base: Performance Test Basic base configuration items. - --endpoint-api {p2p,http} - Endpointt API mode ("p2p", "http"). In "p2p" mode transactions will be directed to the p2p endpoint on a producer node. In "http" mode transactions will be directed to the http endpoint on an api node. + --endpoint-mode {p2p,http} + Endpoint Mode ("p2p", "http"). In "p2p" mode transactions will be directed to the p2p endpoint on a producer node. In "http" mode transactions will be directed to the http endpoint on an api node. (default: p2p) --producer-nodes PRODUCER_NODES Producing nodes count (default: 1) @@ -671,6 +675,8 @@ Performance Test Basic Base: Validation nodes count (default: 1) --api-nodes API_NODES API nodes count (default: 0) + --api-nodes-read-only-threads API_NODES_READ_ONLY_THREADS + API nodes read only threads count for use with read-only transactions (default: 0) --tps-limit-per-generator TPS_LIMIT_PER_GENERATOR Maximum amount of transactions per second a single generator can have. (default: 4000) --genesis GENESIS Path to genesis.json (default: tests/performance_tests/genesis.json) @@ -804,6 +810,12 @@ Transaction Generator command line options.: actions auths description string to use, containting authAcctName to activePrivateKey pairs. + --api-endpoint arg The api endpoint to direct transactions to. + Defaults to: '/v1/chain/send_transaction2' + --peer-endpoint-type arg (=p2p) Identify the peer endpoint api type to + determine how to send transactions. + Allowable 'p2p' and 'http'. Default: + 'p2p' --peer-endpoint arg (=127.0.0.1) set the peer endpoint to send transactions to --port arg (=9876) set the peer endpoint port to send @@ -822,7 +834,7 @@ The Performance Harness generates a report to summarize results of test scenario Command used to run test and generate report: ``` bash -.build/tests/performance_tests/performance_test.py testBpOpMode --test-iteration-duration-sec 10 --final-iterations-duration-sec 30 --calc-producer-threads lmax --calc-chain-threads lmax --calc-net-threads lmax +./build/tests/performance_tests/performance_test.py testBpOpMode --test-iteration-duration-sec 10 --final-iterations-duration-sec 30 --calc-producer-threads lmax --calc-chain-threads lmax --calc-net-threads lmax ``` ### Report Breakdown @@ -843,11 +855,11 @@ Next, a high level summary of the search scenario target and results is included "19001": "FAIL", "16001": "FAIL", "14501": "FAIL", - "13501": "PASS", - "14001": "PASS" + "13501": "FAIL", + "13001": "PASS" }, "LongRunningSearchScenariosSummary": { - "14001": "PASS" + "13001": "PASS" }, ``` @@ -863,20 +875,20 @@ Next, a summary of the search scenario conducted and respective results is inclu "searchFloor": 1, "searchCeiling": 24501, "basicTestResult": { - "testStart": "2023-05-17T22:57:41.801991", - "testEnd": "2023-05-17T22:58:57.941356", - "testDuration": "0:01:16.139365", + "testStart": "2023-06-05T19:13:42.528121", + "testEnd": "2023-06-05T19:15:00.441933", + "testDuration": "0:01:17.913812", "testPassed": true, "testRunSuccessful": true, "testRunCompleted": true, "tpsExpectMet": true, "trxExpectMet": true, "targetTPS": 12501, - "resultAvgTps": 12530.375, + "resultAvgTps": 12523.6875, "expectedTxns": 125010, "resultTxns": 125010, "testAnalysisBlockCnt": 17, - "logsDir": "performance_test/2023-05-17_21-28-39/testRunLogs/performance_test/2023-05-17_22-57-41-12501" + "logsDir": "performance_test/2023-06-05_17-59-49/testRunLogs/performance_test/2023-06-05_19-13-42-12501" } } ``` @@ -912,15 +924,15 @@ Finally, the full detail test report for each of the determined max TPS throughp ``` json { - "perfTestsBegin": "2023-05-17T21:28:39.926423", - "perfTestsFinish": "2023-05-17T23:07:02.076216", - "perfTestsDuration": "1:38:22.149793", + "perfTestsBegin": "2023-06-05T17:59:49.175441", + "perfTestsFinish": "2023-06-05T19:23:03.723738", + "perfTestsDuration": "1:23:14.548297", "operationalMode": "Block Producer Operational Mode", - "InitialMaxTpsAchieved": 14001, - "LongRunningMaxTpsAchieved": 14001, - "tpsTestStart": "2023-05-17T22:54:38.770858", - "tpsTestFinish": "2023-05-17T23:07:02.076202", - "tpsTestDuration": "0:12:23.305344", + "InitialMaxTpsAchieved": 13001, + "LongRunningMaxTpsAchieved": 13001, + "tpsTestStart": "2023-06-05T19:10:32.123231", + "tpsTestFinish": "2023-06-05T19:23:03.723722", + "tpsTestDuration": "0:12:31.600491", "InitialSearchScenariosSummary": { "50000": "FAIL", "25001": "FAIL", @@ -928,11 +940,11 @@ Finally, the full detail test report for each of the determined max TPS throughp "19001": "FAIL", "16001": "FAIL", "14501": "FAIL", - "13501": "PASS", - "14001": "PASS" + "13501": "FAIL", + "13001": "PASS" }, "LongRunningSearchScenariosSummary": { - "14001": "PASS" + "13001": "PASS" }, "InitialSearchResults": { "0": { @@ -941,20 +953,20 @@ Finally, the full detail test report for each of the determined max TPS throughp "searchFloor": 1, "searchCeiling": 50000, "basicTestResult": { - "testStart": "2023-05-17T22:54:38.770895", - "testEnd": "2023-05-17T22:56:13.025658", - "testDuration": "0:01:34.254763", + "testStart": "2023-06-05T19:10:32.123282", + "testEnd": "2023-06-05T19:12:12.746349", + "testDuration": "0:01:40.623067", "testPassed": false, "testRunSuccessful": false, "testRunCompleted": true, "tpsExpectMet": false, "trxExpectMet": false, "targetTPS": 50000, - "resultAvgTps": 12108.542857142857, + "resultAvgTps": 14015.564102564103, "expectedTxns": 500000, - "resultTxns": 242869, - "testAnalysisBlockCnt": 36, - "logsDir": "performance_test/2023-05-17_21-28-39/testRunLogs/performance_test/2023-05-17_22-54-38-50000" + "resultTxns": 309515, + "testAnalysisBlockCnt": 40, + "logsDir": "performance_test/2023-06-05_17-59-49/testRunLogs/performance_test/2023-06-05_19-10-32-50000" } }, "1": { @@ -963,20 +975,20 @@ Finally, the full detail test report for each of the determined max TPS throughp "searchFloor": 1, "searchCeiling": 49500, "basicTestResult": { - "testStart": "2023-05-17T22:56:13.130264", - "testEnd": "2023-05-17T22:57:41.712682", - "testDuration": "0:01:28.582418", + "testStart": "2023-06-05T19:12:12.749120", + "testEnd": "2023-06-05T19:13:42.524984", + "testDuration": "0:01:29.775864", "testPassed": false, "testRunSuccessful": false, "testRunCompleted": true, "tpsExpectMet": false, "trxExpectMet": false, "targetTPS": 25001, - "resultAvgTps": 14207.161290322581, + "resultAvgTps": 13971.5, "expectedTxns": 250010, - "resultTxns": 249688, - "testAnalysisBlockCnt": 32, - "logsDir": "performance_test/2023-05-17_21-28-39/testRunLogs/performance_test/2023-05-17_22-56-13-25001" + "resultTxns": 249981, + "testAnalysisBlockCnt": 33, + "logsDir": "performance_test/2023-06-05_17-59-49/testRunLogs/performance_test/2023-06-05_19-12-12-25001" } }, "2": { @@ -985,20 +997,20 @@ Finally, the full detail test report for each of the determined max TPS throughp "searchFloor": 1, "searchCeiling": 24501, "basicTestResult": { - "testStart": "2023-05-17T22:57:41.801991", - "testEnd": "2023-05-17T22:58:57.941356", - "testDuration": "0:01:16.139365", + "testStart": "2023-06-05T19:13:42.528121", + "testEnd": "2023-06-05T19:15:00.441933", + "testDuration": "0:01:17.913812", "testPassed": true, "testRunSuccessful": true, "testRunCompleted": true, "tpsExpectMet": true, "trxExpectMet": true, "targetTPS": 12501, - "resultAvgTps": 12530.375, + "resultAvgTps": 12523.6875, "expectedTxns": 125010, "resultTxns": 125010, "testAnalysisBlockCnt": 17, - "logsDir": "performance_test/2023-05-17_21-28-39/testRunLogs/performance_test/2023-05-17_22-57-41-12501" + "logsDir": "performance_test/2023-06-05_17-59-49/testRunLogs/performance_test/2023-06-05_19-13-42-12501" } }, "3": { @@ -1007,20 +1019,20 @@ Finally, the full detail test report for each of the determined max TPS throughp "searchFloor": 13001, "searchCeiling": 24501, "basicTestResult": { - "testStart": "2023-05-17T22:58:58.035578", - "testEnd": "2023-05-17T23:00:21.801656", - "testDuration": "0:01:23.766078", + "testStart": "2023-06-05T19:15:00.444109", + "testEnd": "2023-06-05T19:16:25.749654", + "testDuration": "0:01:25.305545", "testPassed": false, "testRunSuccessful": false, "testRunCompleted": true, "tpsExpectMet": false, "trxExpectMet": false, "targetTPS": 19001, - "resultAvgTps": 14720.045454545454, + "resultAvgTps": 14858.095238095239, "expectedTxns": 190010, - "resultTxns": 190008, - "testAnalysisBlockCnt": 23, - "logsDir": "performance_test/2023-05-17_21-28-39/testRunLogs/performance_test/2023-05-17_22-58-58-19001" + "resultTxns": 189891, + "testAnalysisBlockCnt": 22, + "logsDir": "performance_test/2023-06-05_17-59-49/testRunLogs/performance_test/2023-06-05_19-15-00-19001" } }, "4": { @@ -1029,20 +1041,20 @@ Finally, the full detail test report for each of the determined max TPS throughp "searchFloor": 13001, "searchCeiling": 18501, "basicTestResult": { - "testStart": "2023-05-17T23:00:21.902609", - "testEnd": "2023-05-17T23:01:42.674652", - "testDuration": "0:01:20.772043", + "testStart": "2023-06-05T19:16:25.751860", + "testEnd": "2023-06-05T19:17:48.336896", + "testDuration": "0:01:22.585036", "testPassed": false, - "testRunSuccessful": true, + "testRunSuccessful": false, "testRunCompleted": true, "tpsExpectMet": false, - "trxExpectMet": true, + "trxExpectMet": false, "targetTPS": 16001, - "resultAvgTps": 13972.578947368422, + "resultAvgTps": 14846.0, "expectedTxns": 160010, - "resultTxns": 160010, - "testAnalysisBlockCnt": 20, - "logsDir": "performance_test/2023-05-17_21-28-39/testRunLogs/performance_test/2023-05-17_23-00-21-16001" + "resultTxns": 159988, + "testAnalysisBlockCnt": 19, + "logsDir": "performance_test/2023-06-05_17-59-49/testRunLogs/performance_test/2023-06-05_19-16-25-16001" } }, "5": { @@ -1051,64 +1063,64 @@ Finally, the full detail test report for each of the determined max TPS throughp "searchFloor": 13001, "searchCeiling": 15501, "basicTestResult": { - "testStart": "2023-05-17T23:01:42.780751", - "testEnd": "2023-05-17T23:03:02.321649", - "testDuration": "0:01:19.540898", + "testStart": "2023-06-05T19:17:48.339990", + "testEnd": "2023-06-05T19:19:07.843311", + "testDuration": "0:01:19.503321", "testPassed": false, "testRunSuccessful": false, "testRunCompleted": true, "tpsExpectMet": false, "trxExpectMet": false, "targetTPS": 14501, - "resultAvgTps": 13710.176470588236, + "resultAvgTps": 13829.588235294117, "expectedTxns": 145010, - "resultTxns": 144729, + "resultTxns": 144964, "testAnalysisBlockCnt": 18, - "logsDir": "performance_test/2023-05-17_21-28-39/testRunLogs/performance_test/2023-05-17_23-01-42-14501" + "logsDir": "performance_test/2023-06-05_17-59-49/testRunLogs/performance_test/2023-06-05_19-17-48-14501" } }, "6": { - "success": true, + "success": false, "searchTarget": 13501, "searchFloor": 13001, "searchCeiling": 14001, "basicTestResult": { - "testStart": "2023-05-17T23:03:02.417778", - "testEnd": "2023-05-17T23:04:20.138769", - "testDuration": "0:01:17.720991", - "testPassed": true, - "testRunSuccessful": true, + "testStart": "2023-06-05T19:19:07.845657", + "testEnd": "2023-06-05T19:20:27.815030", + "testDuration": "0:01:19.969373", + "testPassed": false, + "testRunSuccessful": false, "testRunCompleted": true, "tpsExpectMet": true, - "trxExpectMet": true, + "trxExpectMet": false, "targetTPS": 13501, - "resultAvgTps": 13508.4375, + "resultAvgTps": 13470.375, "expectedTxns": 135010, - "resultTxns": 135010, + "resultTxns": 135000, "testAnalysisBlockCnt": 17, - "logsDir": "performance_test/2023-05-17_21-28-39/testRunLogs/performance_test/2023-05-17_23-03-02-13501" + "logsDir": "performance_test/2023-06-05_17-59-49/testRunLogs/performance_test/2023-06-05_19-19-07-13501" } }, "7": { "success": true, - "searchTarget": 14001, - "searchFloor": 14001, - "searchCeiling": 14001, + "searchTarget": 13001, + "searchFloor": 13001, + "searchCeiling": 13001, "basicTestResult": { - "testStart": "2023-05-17T23:04:20.234990", - "testEnd": "2023-05-17T23:05:38.702787", - "testDuration": "0:01:18.467797", + "testStart": "2023-06-05T19:20:27.817483", + "testEnd": "2023-06-05T19:21:44.846130", + "testDuration": "0:01:17.028647", "testPassed": true, "testRunSuccessful": true, "testRunCompleted": true, "tpsExpectMet": true, "trxExpectMet": true, - "targetTPS": 14001, - "resultAvgTps": 13935.3125, - "expectedTxns": 140010, - "resultTxns": 140010, + "targetTPS": 13001, + "resultAvgTps": 13032.5625, + "expectedTxns": 130010, + "resultTxns": 130010, "testAnalysisBlockCnt": 17, - "logsDir": "performance_test/2023-05-17_21-28-39/testRunLogs/performance_test/2023-05-17_23-04-20-14001" + "logsDir": "performance_test/2023-06-05_17-59-49/testRunLogs/performance_test/2023-06-05_19-20-27-13001" } } }, @@ -1131,24 +1143,24 @@ Finally, the full detail test report for each of the determined max TPS throughp "LongRunningSearchResults": { "0": { "success": true, - "searchTarget": 14001, + "searchTarget": 13001, "searchFloor": 1, - "searchCeiling": 14001, + "searchCeiling": 13001, "basicTestResult": { - "testStart": "2023-05-17T23:05:38.835496", - "testEnd": "2023-05-17T23:07:01.937623", - "testDuration": "0:01:23.102127", + "testStart": "2023-06-05T19:21:44.879637", + "testEnd": "2023-06-05T19:23:03.697671", + "testDuration": "0:01:18.818034", "testPassed": true, "testRunSuccessful": true, "testRunCompleted": true, "tpsExpectMet": true, "trxExpectMet": true, - "targetTPS": 14001, - "resultAvgTps": 13977.4375, - "expectedTxns": 140010, - "resultTxns": 140010, + "targetTPS": 13001, + "resultAvgTps": 13027.0, + "expectedTxns": 130010, + "resultTxns": 130010, "testAnalysisBlockCnt": 17, - "logsDir": "performance_test/2023-05-17_21-28-39/testRunLogs/performance_test/2023-05-17_23-05-38-14001" + "logsDir": "performance_test/2023-06-05_17-59-49/testRunLogs/performance_test/2023-06-05_19-21-44-13001" } } }, @@ -1169,49 +1181,44 @@ Finally, the full detail test report for each of the determined max TPS throughp }, "ProducerThreadAnalysis": { - "recommendedThreadCount": 3, + "recommendedThreadCount": 2, "threadToMaxTpsDict": { - "2": 13001, - "3": 13501, - "4": 11501 + "2": 12001, + "3": 12001 }, - "analysisStart": "2023-05-17T21:28:39.947138", - "analysisFinish": "2023-05-17T22:01:33.079889" + "analysisStart": "2023-06-05T17:59:49.197967", + "analysisFinish": "2023-06-05T18:18:33.449126" }, "ChainThreadAnalysis": { - "recommendedThreadCount": 2, + "recommendedThreadCount": 3, "threadToMaxTpsDict": { - "2": 13501, - "3": 13001 + "2": 4001, + "3": 13001, + "4": 5501 }, - "analysisStart": "2023-05-17T22:01:33.080513", - "analysisFinish": "2023-05-17T22:23:35.604304" + "analysisStart": "2023-06-05T18:18:33.449689", + "analysisFinish": "2023-06-05T18:48:02.262053" }, "NetThreadAnalysis": { - "recommendedThreadCount": 5, + "recommendedThreadCount": 4, "threadToMaxTpsDict": { - "4": 12001, - "5": 14001, - "6": 10001 + "4": 14501, + "5": 13501 }, - "analysisStart": "2023-05-17T22:23:35.605115", - "analysisFinish": "2023-05-17T22:54:38.770570" + "analysisStart": "2023-06-05T18:48:02.262594", + "analysisFinish": "2023-06-05T19:10:32.123003" }, "args": { "rawCmdLine ": "./tests/performance_tests/performance_test.py testBpOpMode --test-iteration-duration-sec 10 --final-iterations-duration-sec 30 --calc-producer-threads lmax --calc-chain-threads lmax --calc-net-threads lmax", - "killAll": false, - "dontKill": false, - "keepLogs": true, "dumpErrorDetails": false, "delay": 1, "nodesFile": null, "verbose": false, "unshared": false, - "_killEosInstances": true, - "_killWallet": true, "producerNodeCount": 1, "validationNodeCount": 1, "apiNodeCount": 0, + "dontKill": false, "extraNodeosArgs": { "chainPluginArgs": { "_pluginNamespace": "eosio", @@ -1495,8 +1502,11 @@ Finally, the full detail test report for each of the determined max TPS throughp "_netThreadsNodeosDefault": 4, "_netThreadsNodeosArg": "--net-threads", "syncFetchSpan": null, - "_syncFetchSpanNodeosDefault": 100, + "_syncFetchSpanNodeosDefault": 1000, "_syncFetchSpanNodeosArg": "--sync-fetch-span", + "syncPeerLimit": null, + "_syncPeerLimitNodeosDefault": 3, + "_syncPeerLimitNodeosArg": "--sync-peer-limit", "useSocketReadWatermark": null, "_useSocketReadWatermarkNodeosDefault": 0, "_useSocketReadWatermarkNodeosArg": "--use-socket-read-watermark", @@ -1564,16 +1574,13 @@ Finally, the full detail test report for each of the determined max TPS throughp "incomingTransactionQueueSizeMb": null, "_incomingTransactionQueueSizeMbNodeosDefault": 1024, "_incomingTransactionQueueSizeMbNodeosArg": "--incoming-transaction-queue-size-mb", - "disableSubjectiveBilling": true, - "_disableSubjectiveBillingNodeosDefault": 1, - "_disableSubjectiveBillingNodeosArg": "--disable-subjective-billing", "disableSubjectiveAccountBilling": null, "_disableSubjectiveAccountBillingNodeosDefault": false, "_disableSubjectiveAccountBillingNodeosArg": "--disable-subjective-account-billing", - "disableSubjectiveP2pBilling": null, + "disableSubjectiveP2pBilling": true, "_disableSubjectiveP2pBillingNodeosDefault": 1, "_disableSubjectiveP2pBillingNodeosArg": "--disable-subjective-p2p-billing", - "disableSubjectiveApiBilling": null, + "disableSubjectiveApiBilling": true, "_disableSubjectiveApiBillingNodeosDefault": 1, "_disableSubjectiveApiBillingNodeosArg": "--disable-subjective-api-billing", "producerThreads": 2, @@ -1690,6 +1697,7 @@ Finally, the full detail test report for each of the determined max TPS throughp "genesisPath": "tests/performance_tests/genesis.json", "maximumP2pPerHost": 5000, "maximumClients": 0, + "keepLogs": true, "loggingLevel": "info", "loggingDict": { "bios": "off" @@ -1698,7 +1706,7 @@ Finally, the full detail test report for each of the determined max TPS throughp "nodeosVers": "v4", "specificExtraNodeosArgs": { "1": "--plugin eosio::trace_api_plugin ", - "2": "--plugin eosio::chain_api_plugin " + "2": "--plugin eosio::chain_api_plugin --plugin eosio::net_api_plugin --read-only-threads 0 " }, "_totalNodes": 2, "_pNodes": 1, @@ -1712,6 +1720,7 @@ Finally, the full detail test report for each of the determined max TPS throughp 2 ], "nonProdsEosVmOcEnable": false, + "apiNodesReadOnlyThreadCount": 0, "testDurationSec": 10, "finalDurationSec": 30, "delPerfLogs": false, @@ -1729,13 +1738,13 @@ Finally, the full detail test report for each of the determined max TPS throughp "calcChainThreads": "lmax", "calcNetThreads": "lmax", "userTrxDataFile": null, - "endpointApi": "p2p", + "endpointMode": "p2p", "opModeCmd": "testBpOpMode", "logDirBase": "performance_test", - "logDirTimestamp": "2023-05-17_21-28-39", - "logDirPath": "performance_test/2023-05-17_21-28-39", - "ptbLogsDirPath": "performance_test/2023-05-17_21-28-39/testRunLogs", - "pluginThreadOptLogsDirPath": "performance_test/2023-05-17_21-28-39/pluginThreadOptRunLogs" + "logDirTimestamp": "2023-06-05_17-59-49", + "logDirPath": "performance_test/2023-06-05_17-59-49", + "ptbLogsDirPath": "performance_test/2023-06-05_17-59-49/testRunLogs", + "pluginThreadOptLogsDirPath": "performance_test/2023-06-05_17-59-49/pluginThreadOptRunLogs" }, "env": { "system": "Linux", @@ -1758,97 +1767,98 @@ The Performance Test Basic generates, by default, a report that details results ``` json { - "targetApiEndpoint": "p2p", + "targetApiEndpointType": "p2p", + "targetApiEndpoint": "NA for P2P", "Result": { - "testStart": "2023-05-17T23:05:38.835496", - "testEnd": "2023-05-17T23:07:01.937623", - "testDuration": "0:01:23.102127", + "testStart": "2023-06-05T19:21:44.879637", + "testEnd": "2023-06-05T19:23:03.697671", + "testDuration": "0:01:18.818034", "testPassed": true, "testRunSuccessful": true, "testRunCompleted": true, "tpsExpectMet": true, "trxExpectMet": true, - "targetTPS": 14001, - "resultAvgTps": 13977.4375, - "expectedTxns": 140010, - "resultTxns": 140010, + "targetTPS": 13001, + "resultAvgTps": 13027.0, + "expectedTxns": 130010, + "resultTxns": 130010, "testAnalysisBlockCnt": 17, - "logsDir": "performance_test/2023-05-17_21-28-39/testRunLogs/performance_test/2023-05-17_23-05-38-14001" + "logsDir": "performance_test/2023-06-05_17-59-49/testRunLogs/performance_test/2023-06-05_19-21-44-13001" }, "Analysis": { "BlockSize": { - "min": 153909, - "max": 192200, - "avg": 173840.70588235295, - "sigma": 9231.908863633565, + "min": 153503, + "max": 169275, + "avg": 162269.76470588235, + "sigma": 3152.279353278714, "emptyBlocks": 0, "numBlocks": 17 }, "BlocksGuide": { - "firstBlockNum": 112, - "lastBlockNum": 152, - "totalBlocks": 41, - "testStartBlockNum": 112, - "testEndBlockNum": 152, + "firstBlockNum": 110, + "lastBlockNum": 140, + "totalBlocks": 31, + "testStartBlockNum": 110, + "testEndBlockNum": 140, "setupBlocksCnt": 0, "tearDownBlocksCnt": 0, "leadingEmptyBlocksCnt": 1, - "trailingEmptyBlocksCnt": 19, + "trailingEmptyBlocksCnt": 9, "configAddlDropCnt": 2, "testAnalysisBlockCnt": 17 }, "TPS": { - "min": 13234, - "max": 14812, - "avg": 13977.4375, - "sigma": 398.5379481225721, + "min": 12775, + "max": 13285, + "avg": 13027.0, + "sigma": 92.70854868888844, "emptyBlocks": 0, "numBlocks": 17, - "configTps": 14001, + "configTps": 13001, "configTestDuration": 10, "tpsPerGenerator": [ - 3500, - 3500, - 3500, - 3501 + 3250, + 3250, + 3250, + 3251 ], "generatorCount": 4 }, "TrxCPU": { - "min": 7.0, - "max": 3649.0, - "avg": 26.156724519677166, - "sigma": 21.41749466859243, - "samples": 140010 + "min": 8.0, + "max": 1180.0, + "avg": 25.89257749403892, + "sigma": 12.604252354938811, + "samples": 130010 }, "TrxLatency": { "min": 0.0009999275207519531, - "max": 0.5820000171661377, - "avg": 0.2768004499855284, - "sigma": 0.1456695649820771, - "samples": 140010, + "max": 0.5399999618530273, + "avg": 0.2522121298066488, + "sigma": 0.14457374598663084, + "samples": 130010, "units": "seconds" }, "TrxNet": { "min": 24.0, "max": 25.0, - "avg": 24.85718162988358, - "sigma": 0.3498875294629824, - "samples": 140010 + "avg": 24.846196446427196, + "sigma": 0.3607603366241642, + "samples": 130010 }, "TrxAckResponseTime": { "min": -1.0, "max": -1.0, "avg": -1.0, "sigma": 0.0, - "samples": 140010, + "samples": 130010, "measurementApplicable": "NOT APPLICABLE", "units": "microseconds" }, - "ExpectedTransactions": 140010, + "ExpectedTransactions": 130010, "DroppedTransactions": 0, - "ProductionWindowsTotal": 0, - "ProductionWindowsAverageSize": 0, + "ProductionWindowsTotal": 2, + "ProductionWindowsAverageSize": 12.0, "ProductionWindowsMissed": 0, "ForkedBlocks": { "00": [], @@ -1869,19 +1879,15 @@ The Performance Test Basic generates, by default, a report that details results }, "args": { "rawCmdLine ": "./tests/performance_tests/performance_test.py testBpOpMode --test-iteration-duration-sec 10 --final-iterations-duration-sec 30 --calc-producer-threads lmax --calc-chain-threads lmax --calc-net-threads lmax", - "killAll": false, - "dontKill": false, - "keepLogs": true, "dumpErrorDetails": false, "delay": 1, "nodesFile": null, "verbose": false, "unshared": false, - "_killEosInstances": true, - "_killWallet": true, "producerNodeCount": 1, "validationNodeCount": 1, "apiNodeCount": 0, + "dontKill": false, "extraNodeosArgs": { "chainPluginArgs": { "_pluginNamespace": "eosio", @@ -2165,8 +2171,11 @@ The Performance Test Basic generates, by default, a report that details results "_netThreadsNodeosDefault": 4, "_netThreadsNodeosArg": "--net-threads", "syncFetchSpan": null, - "_syncFetchSpanNodeosDefault": 100, + "_syncFetchSpanNodeosDefault": 1000, "_syncFetchSpanNodeosArg": "--sync-fetch-span", + "syncPeerLimit": null, + "_syncPeerLimitNodeosDefault": 3, + "_syncPeerLimitNodeosArg": "--sync-peer-limit", "useSocketReadWatermark": null, "_useSocketReadWatermarkNodeosDefault": 0, "_useSocketReadWatermarkNodeosArg": "--use-socket-read-watermark", @@ -2234,16 +2243,13 @@ The Performance Test Basic generates, by default, a report that details results "incomingTransactionQueueSizeMb": null, "_incomingTransactionQueueSizeMbNodeosDefault": 1024, "_incomingTransactionQueueSizeMbNodeosArg": "--incoming-transaction-queue-size-mb", - "disableSubjectiveBilling": true, - "_disableSubjectiveBillingNodeosDefault": 1, - "_disableSubjectiveBillingNodeosArg": "--disable-subjective-billing", "disableSubjectiveAccountBilling": null, "_disableSubjectiveAccountBillingNodeosDefault": false, "_disableSubjectiveAccountBillingNodeosArg": "--disable-subjective-account-billing", - "disableSubjectiveP2pBilling": null, + "disableSubjectiveP2pBilling": true, "_disableSubjectiveP2pBillingNodeosDefault": 1, "_disableSubjectiveP2pBillingNodeosArg": "--disable-subjective-p2p-billing", - "disableSubjectiveApiBilling": null, + "disableSubjectiveApiBilling": true, "_disableSubjectiveApiBillingNodeosDefault": 1, "_disableSubjectiveApiBillingNodeosArg": "--disable-subjective-api-billing", "producerThreads": 2, @@ -2360,6 +2366,7 @@ The Performance Test Basic generates, by default, a report that details results "genesisPath": "tests/performance_tests/genesis.json", "maximumP2pPerHost": 5000, "maximumClients": 0, + "keepLogs": true, "loggingLevel": "info", "loggingDict": { "bios": "off" @@ -2368,7 +2375,7 @@ The Performance Test Basic generates, by default, a report that details results "nodeosVers": "v4", "specificExtraNodeosArgs": { "1": "--plugin eosio::trace_api_plugin ", - "2": "--plugin eosio::chain_api_plugin " + "2": "--plugin eosio::chain_api_plugin --plugin eosio::net_api_plugin --read-only-threads 0 " }, "_totalNodes": 2, "_pNodes": 1, @@ -2382,22 +2389,25 @@ The Performance Test Basic generates, by default, a report that details results 2 ], "nonProdsEosVmOcEnable": false, - "targetTps": 14001, + "apiNodesReadOnlyThreadCount": 0, + "targetTps": 13001, "testTrxGenDurationSec": 10, "tpsLimitPerGenerator": 4000, "numAddlBlocksToPrune": 2, - "logDirRoot": "performance_test/2023-05-17_21-28-39/testRunLogs", + "logDirRoot": "performance_test/2023-06-05_17-59-49/testRunLogs", "delReport": false, "quiet": false, "delPerfLogs": false, - "expectedTransactionsSent": 140010, + "expectedTransactionsSent": 130010, "printMissingTransactions": false, "userTrxDataFile": null, - "endpointApi": "p2p", - "logDirBase": "performance_test/2023-05-17_21-28-39/testRunLogs/performance_test", - "logDirTimestamp": "2023-05-17_23-05-38", - "logDirTimestampedOptSuffix": "-14001", - "logDirPath": "performance_test/2023-05-17_21-28-39/testRunLogs/performance_test/2023-05-17_23-05-38-14001" + "endpointMode": "p2p", + "apiEndpoint": null, + "logDirBase": "performance_test/2023-06-05_17-59-49/testRunLogs/performance_test", + "logDirTimestamp": "2023-06-05_19-21-44", + "logDirTimestampedOptSuffix": "-13001", + "logDirPath": "performance_test/2023-06-05_17-59-49/testRunLogs/performance_test/2023-06-05_19-21-44-13001", + "userTrxData": "NOT CONFIGURED" }, "env": { "system": "Linux", diff --git a/tests/performance_tests/log_reader.py b/tests/performance_tests/log_reader.py index e8472e950c..70bb605fc1 100644 --- a/tests/performance_tests/log_reader.py +++ b/tests/performance_tests/log_reader.py @@ -244,11 +244,21 @@ class sentTrx(): acked: str = "" ackResponseTimeUs: int = -1 +@dataclass +class sentTrxExtTrace(): + sentTime: str = "" + acked: str = "" + ackResponseTimeUs: int = -1 + blockNum: int = -1 + cpuUsageUs: int = -1 + netUsageWords: int = -1 + blockTime: str = "" + def scrapeTrxGenLog(trxSent: dict, path): #trxGenLogs/trx_data_output_*.txt selectedopen = selectedOpen(path) with selectedopen(path, 'rt') as f: - trxSent.update(dict([(x[0], sentTrx(x[1], x[2], x[3])) for x in (line.rstrip('\n').split(',') for line in f)])) + trxSent.update(dict([(x[0], sentTrx(x[1], x[2], x[3]) if len(x) == 4 else sentTrxExtTrace(x[1], x[2], x[3], x[4], x[5], x[6], x[7])) for x in (line.rstrip('\n').split(',') for line in f)])) def scrapeTrxGenTrxSentDataLogs(trxSent: dict, trxGenLogDirPath, quiet): filesScraped = [] @@ -261,7 +271,10 @@ def scrapeTrxGenTrxSentDataLogs(trxSent: dict, trxGenLogDirPath, quiet): def populateTrxSentAndAcked(trxSent: dict, trxDict: dict, notFound): for sentTrxId in trxSent.keys(): - if sentTrxId in trxDict.keys(): + if (isinstance(trxSent[sentTrxId], sentTrxExtTrace)): + trxDict[sentTrxId] = trxData(blockNum=trxSent[sentTrxId].blockNum, cpuUsageUs=trxSent[sentTrxId].cpuUsageUs, netUsageUs=trxSent[sentTrxId].netUsageWords, blockTime=trxSent[sentTrxId].blockTime, acknowledged=trxSent[sentTrxId].acked, ackRespTimeUs=trxSent[sentTrxId].ackResponseTimeUs) + trxDict[sentTrxId].sentTimestamp = trxSent[sentTrxId].sentTime + elif sentTrxId in trxDict.keys(): trxDict[sentTrxId].sentTimestamp = trxSent[sentTrxId].sentTime trxDict[sentTrxId].acknowledged = trxSent[sentTrxId].acked trxDict[sentTrxId].ackRespTimeUs = trxSent[sentTrxId].ackResponseTimeUs diff --git a/tests/performance_tests/performance_test.py b/tests/performance_tests/performance_test.py index e845e128c2..537f1926a2 100755 --- a/tests/performance_tests/performance_test.py +++ b/tests/performance_tests/performance_test.py @@ -48,7 +48,7 @@ class PtConfig: calcChainThreads: str="none" calcNetThreads: str="none" userTrxDataFile: Path=None - endpointApi: str="p2p" + endpointMode: str="p2p" opModeCmd: str="" def __post_init__(self): @@ -113,7 +113,7 @@ def performPtbBinarySearch(self, clusterConfig: PerformanceTestBasic.ClusterConf scenarioResult = PerformanceTest.PerfTestSearchIndivResult(success=False, searchTarget=binSearchTarget, searchFloor=floor, searchCeiling=ceiling) ptbConfig = PerformanceTestBasic.PtbConfig(targetTps=binSearchTarget, testTrxGenDurationSec=self.ptConfig.testDurationSec, tpsLimitPerGenerator=self.ptConfig.tpsLimitPerGenerator, numAddlBlocksToPrune=self.ptConfig.numAddlBlocksToPrune, logDirRoot=logDirRoot, delReport=delReport, - quiet=quiet, userTrxDataFile=self.ptConfig.userTrxDataFile, endpointApi=self.ptConfig.endpointApi) + quiet=quiet, userTrxDataFile=self.ptConfig.userTrxDataFile, endpointMode=self.ptConfig.endpointMode) myTest = PerformanceTestBasic(testHelperConfig=self.testHelperConfig, clusterConfig=clusterConfig, ptbConfig=ptbConfig, testNamePath="performance_test") myTest.runTest() @@ -155,7 +155,7 @@ def performPtbReverseLinearSearch(self, tpsInitial: int) -> TpsTestResult.PerfTe scenarioResult = PerformanceTest.PerfTestSearchIndivResult(success=False, searchTarget=searchTarget, searchFloor=absFloor, searchCeiling=absCeiling) ptbConfig = PerformanceTestBasic.PtbConfig(targetTps=searchTarget, testTrxGenDurationSec=self.ptConfig.testDurationSec, tpsLimitPerGenerator=self.ptConfig.tpsLimitPerGenerator, numAddlBlocksToPrune=self.ptConfig.numAddlBlocksToPrune, logDirRoot=self.loggingConfig.ptbLogsDirPath, delReport=self.ptConfig.delReport, - quiet=self.ptConfig.quiet, delPerfLogs=self.ptConfig.delPerfLogs, userTrxDataFile=self.ptConfig.userTrxDataFile, endpointApi=self.ptConfig.endpointApi) + quiet=self.ptConfig.quiet, delPerfLogs=self.ptConfig.delPerfLogs, userTrxDataFile=self.ptConfig.userTrxDataFile, endpointMode=self.ptConfig.endpointMode) myTest = PerformanceTestBasic(testHelperConfig=self.testHelperConfig, clusterConfig=self.clusterConfig, ptbConfig=ptbConfig, testNamePath="performance_test") myTest.runTest() @@ -546,7 +546,7 @@ def main(): calcChainThreads=args.calc_chain_threads, calcNetThreads=args.calc_net_threads, userTrxDataFile=Path(args.user_trx_data_file) if args.user_trx_data_file is not None else None, - endpointApi=args.endpoint_api, + endpointMode=args.endpoint_mode, opModeCmd=args.op_mode_sub_cmd) myTest = PerformanceTest(testHelperConfig=testHelperConfig, clusterConfig=testClusterConfig, ptConfig=ptConfig) diff --git a/tests/performance_tests/performance_test_basic.py b/tests/performance_tests/performance_test_basic.py index b3699c9014..fd82bc6d15 100755 --- a/tests/performance_tests/performance_test_basic.py +++ b/tests/performance_tests/performance_test_basic.py @@ -112,6 +112,7 @@ class SpecifiedContract: _validationNodeIds: list = field(default_factory=list) _apiNodeIds: list = field(default_factory=list) nonProdsEosVmOcEnable: bool = False + apiNodesReadOnlyThreadCount: int = 0 def __post_init__(self): self._totalNodes = self.producerNodeCount + self.validationNodeCount + self.apiNodeCount @@ -137,11 +138,15 @@ def configureValidationNodes(): def configureApiNodes(): apiNodeSpecificNodeosStr = "" apiNodeSpecificNodeosStr += "--plugin eosio::chain_api_plugin " + apiNodeSpecificNodeosStr += "--plugin eosio::net_api_plugin " + apiNodeSpecificNodeosStr += f"--read-only-threads {self.apiNodesReadOnlyThreadCount} " if apiNodeSpecificNodeosStr: self.specificExtraNodeosArgs.update({f"{nodeId}" : apiNodeSpecificNodeosStr for nodeId in self._apiNodeIds}) - configureValidationNodes() - configureApiNodes() + if self.validationNodeCount > 0: + configureValidationNodes() + if self.apiNodeCount > 0: + configureApiNodes() assert self.nodeosVers != "v1" and self.nodeosVers != "v0", f"nodeos version {Utils.getNodeosVersion().split('.')[0]} is unsupported by performance test" if self.nodeosVers == "v2": @@ -165,10 +170,14 @@ class PtbConfig: expectedTransactionsSent: int = field(default_factory=int, init=False) printMissingTransactions: bool=False userTrxDataFile: Path=None - endpointApi: str="p2p" + endpointMode: str="p2p" + apiEndpoint: str=None + def __post_init__(self): self.expectedTransactionsSent = self.testTrxGenDurationSec * self.targetTps + if (self.endpointMode == "http"): + self.apiEndpoint="/v1/chain/send_transaction2" @dataclass class LoggingConfig: @@ -387,10 +396,10 @@ def runTpsTest(self) -> PtbTpsTestResult: self.connectionPairList = [] def configureConnections(): - if(self.ptbConfig.endpointApi == "http"): + if(self.ptbConfig.endpointMode == "http"): for apiNodeId in self.clusterConfig._apiNodeIds: self.connectionPairList.append(f"{self.cluster.getNode(apiNodeId).host}:{self.cluster.getNode(apiNodeId).port}") - else: # endpointApi == p2p + else: # endpointMode == p2p for producerId in self.clusterConfig._producerNodeIds: self.connectionPairList.append(f"{self.cluster.getNode(producerId).host}:{self.cluster.getNodeP2pPort(producerId)}") @@ -415,6 +424,9 @@ def configureConnections(): print(f"Creating accounts specified in userTrxData: {self.userTrxDataDict['initAccounts']}") self.setupWalletAndAccounts(accountCnt=len(self.userTrxDataDict['initAccounts']), accountNames=self.userTrxDataDict['initAccounts']) abiFile = self.userTrxDataDict['abiFile'] + if 'apiEndpoint' in self.userTrxDataDict: + self.ptbConfig.apiEndpoint = self.userTrxDataDict['apiEndpoint'] + print(f'API Endpoint specified: {self.ptbConfig.apiEndpoint}') actionsDataJson = json.dumps(self.userTrxDataDict['actions']) @@ -439,13 +451,13 @@ def configureConnections(): self.cluster.biosNode.kill(signal.SIGTERM) self.data.startBlock = self.waitForEmptyBlocks(self.validationNode, self.emptyBlockGoal) - tpsTrxGensConfig = TpsTrxGensConfig(targetTps=self.ptbConfig.targetTps, tpsLimitPerGenerator=self.ptbConfig.tpsLimitPerGenerator, connectionPairList=self.connectionPairList, endpointApi=self.ptbConfig.endpointApi) + tpsTrxGensConfig = TpsTrxGensConfig(targetTps=self.ptbConfig.targetTps, tpsLimitPerGenerator=self.ptbConfig.tpsLimitPerGenerator, connectionPairList=self.connectionPairList) self.cluster.trxGenLauncher = TransactionGeneratorsLauncher(chainId=chainId, lastIrreversibleBlockId=lib_id, contractOwnerAccount=self.clusterConfig.specifiedContract.account.name, accts=','.join(map(str, self.accountNames)), privateKeys=','.join(map(str, self.accountPrivKeys)), trxGenDurationSec=self.ptbConfig.testTrxGenDurationSec, logDir=self.trxGenLogDirPath, abiFile=abiFile, actionsData=actionsDataJson, actionsAuths=actionsAuthsJson, - tpsTrxGensConfig=tpsTrxGensConfig) + tpsTrxGensConfig=tpsTrxGensConfig, endpointMode=self.ptbConfig.endpointMode, apiEndpoint=self.ptbConfig.apiEndpoint) trxGenExitCodes = self.cluster.trxGenLauncher.launch() print(f"Transaction Generator exit codes: {trxGenExitCodes}") @@ -483,10 +495,10 @@ def captureLowLevelArtifacts(self): except Exception as e: print(f"Failed to move '{self.cluster.nodeosLogPath}' to '{self.varLogsDirPath}': {type(e)}: {e}") - def createReport(self, logAnalysis: log_reader.LogAnalysis, tpsTestConfig: log_reader.TpsTestConfig, argsDict: dict, nodeosVers: str, - targetApiEndpoint: str, testResult: PerfTestBasicResult) -> dict: + def createReport(self, logAnalysis: log_reader.LogAnalysis, tpsTestConfig: log_reader.TpsTestConfig, argsDict: dict, testResult: PerfTestBasicResult) -> dict: report = {} - report['targetApiEndpoint'] = targetApiEndpoint + report['targetApiEndpointType'] = self.ptbConfig.endpointMode + report['targetApiEndpoint'] = self.ptbConfig.apiEndpoint if self.ptbConfig.apiEndpoint is not None else "NA for P2P" report['Result'] = asdict(testResult) report['Analysis'] = {} report['Analysis']['BlockSize'] = asdict(logAnalysis.blockSizeStats) @@ -519,8 +531,9 @@ def createReport(self, logAnalysis: log_reader.LogAnalysis, tpsTestConfig: log_r report['Analysis']['DroppedBlocks'][formattedNodeNum] = self.data.droppedBlocks[formattedNodeNum] report['Analysis']['DroppedBlocksCount'][formattedNodeNum] = len(self.data.droppedBlocks[formattedNodeNum]) report['args'] = argsDict + report['args']['userTrxData'] = self.userTrxDataDict if self.ptbConfig.userTrxDataFile is not None else "NOT CONFIGURED" report['env'] = {'system': system(), 'os': os.name, 'release': release(), 'logical_cpu_count': os.cpu_count()} - report['nodeosVersion'] = nodeosVers + report['nodeosVersion'] = self.clusterConfig.nodeosVers return report def analyzeResultsAndReport(self, testResult: PtbTpsTestResult): @@ -550,8 +563,7 @@ def analyzeResultsAndReport(self, testResult: PtbTpsTestResult): print(f"testRunSuccessful: {self.testResult.testRunSuccessful} testPassed: {self.testResult.testPassed} tpsExpectationMet: {self.testResult.tpsExpectMet} trxExpectationMet: {self.testResult.trxExpectMet}") - self.report = self.createReport(logAnalysis=self.logAnalysis, tpsTestConfig=tpsTestConfig, argsDict=args, nodeosVers=self.clusterConfig.nodeosVers, - targetApiEndpoint=self.ptbConfig.endpointApi, testResult=self.testResult) + self.report = self.createReport(logAnalysis=self.logAnalysis, tpsTestConfig=tpsTestConfig, argsDict=args, testResult=self.testResult) jsonReport = None if not self.ptbConfig.quiet or not self.ptbConfig.delReport: @@ -641,7 +653,8 @@ def setupClusterConfig(args) -> ClusterConfig: producerNodeCount=args.producer_nodes, validationNodeCount=args.validation_nodes, apiNodeCount=args.api_nodes, genesisPath=args.genesis, prodsEnableTraceApi=args.prods_enable_trace_api, extraNodeosArgs=extraNodeosArgs, specifiedContract=specifiedContract, loggingLevel=args.cluster_log_lvl, - nodeosVers=nodeosVers, nonProdsEosVmOcEnable=args.non_prods_eos_vm_oc_enable) + nodeosVers=nodeosVers, nonProdsEosVmOcEnable=args.non_prods_eos_vm_oc_enable, + apiNodesReadOnlyThreadCount=args.api_nodes_read_only_threads) class PtbArgumentsHandler(object): @staticmethod @@ -654,13 +667,14 @@ def _createBaseArgumentParser(defEndpointApiDef: str, defProdNodeCnt: int, defVa ptbBaseGrpDescription="Performance Test Basic base configuration items." ptbBaseParserGroup = ptbBaseParser.add_argument_group(title=None if suppressHelp else ptbBaseGrpTitle, description=None if suppressHelp else ptbBaseGrpDescription) - ptbBaseParserGroup.add_argument("--endpoint-api", type=str, help=argparse.SUPPRESS if suppressHelp else "Endpointt API mode (\"p2p\", \"http\"). \ + ptbBaseParserGroup.add_argument("--endpoint-mode", type=str, help=argparse.SUPPRESS if suppressHelp else "Endpoint mode (\"p2p\", \"http\"). \ In \"p2p\" mode transactions will be directed to the p2p endpoint on a producer node. \ In \"http\" mode transactions will be directed to the http endpoint on an api node.", choices=["p2p", "http"], default=defEndpointApiDef) ptbBaseParserGroup.add_argument("--producer-nodes", type=int, help=argparse.SUPPRESS if suppressHelp else "Producing nodes count", default=defProdNodeCnt) ptbBaseParserGroup.add_argument("--validation-nodes", type=int, help=argparse.SUPPRESS if suppressHelp else "Validation nodes count", default=defValidationNodeCnt) ptbBaseParserGroup.add_argument("--api-nodes", type=int, help=argparse.SUPPRESS if suppressHelp else "API nodes count", default=defApiNodeCnt) + ptbBaseParserGroup.add_argument("--api-nodes-read-only-threads", type=int, help=argparse.SUPPRESS if suppressHelp else "API nodes read only threads count for use with read-only transactions", default=0) ptbBaseParserGroup.add_argument("--tps-limit-per-generator", type=int, help=argparse.SUPPRESS if suppressHelp else "Maximum amount of transactions per second a single generator can have.", default=4000) ptbBaseParserGroup.add_argument("--genesis", type=str, help=argparse.SUPPRESS if suppressHelp else "Path to genesis.json", default="tests/performance_tests/genesis.json") ptbBaseParserGroup.add_argument("--num-blocks-to-prune", type=int, help=argparse.SUPPRESS if suppressHelp else ("The number of potentially non-empty blocks, in addition to leading and trailing size 0 blocks, " @@ -761,7 +775,7 @@ def main(): delPerfLogs=args.del_perf_logs, printMissingTransactions=args.print_missing_transactions, userTrxDataFile=Path(args.user_trx_data_file) if args.user_trx_data_file is not None else None, - endpointApi=args.endpoint_api) + endpointMode=args.endpoint_mode) myTest = PerformanceTestBasic(testHelperConfig=testHelperConfig, clusterConfig=testClusterConfig, ptbConfig=ptbConfig) diff --git a/tests/performance_tests/readOnlyTrxData.json b/tests/performance_tests/readOnlyTrxData.json new file mode 100644 index 0000000000..9c6a367b9f --- /dev/null +++ b/tests/performance_tests/readOnlyTrxData.json @@ -0,0 +1,14 @@ +{ + "initAccounts": ["payloadless"], + "abiFile": "unittests/test-contracts/payloadless/payloadless.abi", + "apiEndpoint": "/v1/chain/send_read_only_transaction", + "actions": [ + { + "actionName": "doit", + "actionData": { + }, + "actionAuthAcct": "payloadless", + "authorization": {} + } + ] +} diff --git a/tests/test_snapshot_scheduler.cpp b/tests/test_snapshot_scheduler.cpp index 0b66d8865b..e58ee99d3e 100644 --- a/tests/test_snapshot_scheduler.cpp +++ b/tests/test_snapshot_scheduler.cpp @@ -8,13 +8,14 @@ using namespace eosio; using namespace eosio::chain; using snapshot_request_information = snapshot_scheduler::snapshot_request_information; +using snapshot_request_params = snapshot_scheduler::snapshot_request_params; using snapshot_request_id_information = snapshot_scheduler::snapshot_request_id_information; BOOST_AUTO_TEST_SUITE(producer_snapshot_scheduler_tests) BOOST_AUTO_TEST_CASE(snapshot_scheduler_test) { fc::logger log; - producer_plugin scheduler; + snapshot_scheduler scheduler; { // add/remove test @@ -30,19 +31,14 @@ BOOST_AUTO_TEST_CASE(snapshot_scheduler_test) { return e.to_detail_string().find("Duplicate snapshot request") != std::string::npos; }); - snapshot_request_id_information sri_delete_1 = {.snapshot_request_id = 0}; - scheduler.unschedule_snapshot(sri_delete_1); - + scheduler.unschedule_snapshot(0); BOOST_CHECK_EQUAL(1, scheduler.get_snapshot_requests().snapshot_requests.size()); - snapshot_request_id_information sri_delete_none = {.snapshot_request_id = 2}; - BOOST_CHECK_EXCEPTION(scheduler.unschedule_snapshot(sri_delete_none), snapshot_request_not_found, [](const fc::assert_exception& e) { + BOOST_CHECK_EXCEPTION(scheduler.unschedule_snapshot(0), snapshot_request_not_found, [](const fc::assert_exception& e) { return e.to_detail_string().find("Snapshot request not found") != std::string::npos; }); - snapshot_request_id_information sri_delete_2 = {.snapshot_request_id = 1}; - scheduler.unschedule_snapshot(sri_delete_2); - + scheduler.unschedule_snapshot(1); BOOST_CHECK_EQUAL(0, scheduler.get_snapshot_requests().snapshot_requests.size()); snapshot_request_information sri_large_spacing = {.block_spacing = 1000, .start_block_num = 5000, .end_block_num = 5010}; @@ -87,42 +83,49 @@ BOOST_AUTO_TEST_CASE(snapshot_scheduler_test) { if (!pp->get_snapshot_requests().snapshot_requests.empty()) { const auto& snapshot_requests = pp->get_snapshot_requests().snapshot_requests; - auto validate_snapshot_request = [&](uint32_t sid, uint32_t block_num) { + auto validate_snapshot_request = [&](uint32_t sid, uint32_t block_num, uint32_t spacing = 0) { auto it = find_if(snapshot_requests.begin(), snapshot_requests.end(), [sid](const snapshot_scheduler::snapshot_schedule_information& obj) {return obj.snapshot_request_id == sid;}); if (it != snapshot_requests.end()) { auto& pending = it->pending_snapshots; if (pending.size()==1) { - BOOST_CHECK_EQUAL(block_num, pending.begin()->head_block_num); + auto pbn = pending.begin()->head_block_num; + pbn = spacing ? (spacing + (pbn%spacing)) : pbn; + // if snapshot scheduled with empty start_block_num depending on the timing + // it can be scheduled either for block_num or block_num+1 + BOOST_CHECK(block_num==pbn || ((block_num+1)==pbn)); } return true; } return false; }; - BOOST_REQUIRE(validate_snapshot_request(0, 9)); // snapshot #0 should have pending snapshot at block #9 (8 + 1) and it never expires - BOOST_REQUIRE(validate_snapshot_request(4, 12)); // snapshot #4 should have pending snapshot at block # at the moment of scheduling (2) plus 10 = 12 + BOOST_REQUIRE(validate_snapshot_request(0, 9, 8)); // snapshot #0 should have pending snapshot at block #9 (8 + 1) and it never expires + BOOST_REQUIRE(validate_snapshot_request(4, 12, 10)); // snapshot #4 should have pending snapshot at block # at the moment of scheduling (2) plus 10 = 12 + BOOST_REQUIRE(validate_snapshot_request(5, 10, 10)); // snapshot #5 should have pending snapshot at block #10, #20 etc } }); - snapshot_request_information sri1 = {.block_spacing = 8, .start_block_num = 1, .end_block_num = 300000, .snapshot_description = "Example of recurring snapshot 1"}; - snapshot_request_information sri2 = {.block_spacing = 5000, .start_block_num = 100000, .end_block_num = 300000, .snapshot_description = "Example of recurring snapshot 2 that will never happen"}; - snapshot_request_information sri3 = {.block_spacing = 2, .start_block_num = 0, .end_block_num = 3, .snapshot_description = "Example of recurring snapshot 3 that will expire"}; - snapshot_request_information sri4 = {.start_block_num = 1, .snapshot_description = "One time snapshot on first block"}; - snapshot_request_information sri5 = {.block_spacing = 10, .snapshot_description = "Recurring every 10 blocks snapshot starting now"}; + snapshot_request_params sri1 = {.block_spacing = 8, .start_block_num = 1, .end_block_num = 300000, .snapshot_description = "Example of recurring snapshot 1"}; + snapshot_request_params sri2 = {.block_spacing = 5000, .start_block_num = 100000, .end_block_num = 300000, .snapshot_description = "Example of recurring snapshot 2 that wont happen in test"}; + snapshot_request_params sri3 = {.block_spacing = 2, .start_block_num = 0, .end_block_num = 3, .snapshot_description = "Example of recurring snapshot 3 that will expire"}; + snapshot_request_params sri4 = {.start_block_num = 1, .snapshot_description = "One time snapshot on first block"}; + snapshot_request_params sri5 = {.block_spacing = 10, .snapshot_description = "Recurring every 10 blocks snapshot starting now"}; + snapshot_request_params sri6 = {.block_spacing = 10, .start_block_num = 0, .snapshot_description = "Recurring every 10 blocks snapshot starting from 0"}; pp->schedule_snapshot(sri1); pp->schedule_snapshot(sri2); pp->schedule_snapshot(sri3); pp->schedule_snapshot(sri4); pp->schedule_snapshot(sri5); + pp->schedule_snapshot(sri6); - // all five snapshot requests should be present now - BOOST_CHECK_EQUAL(5, pp->get_snapshot_requests().snapshot_requests.size()); + // all six snapshot requests should be present now + BOOST_CHECK_EQUAL(6, pp->get_snapshot_requests().snapshot_requests.size()); - empty_blocks_fut.wait_for(std::chrono::seconds(6)); + empty_blocks_fut.wait_for(std::chrono::seconds(10)); // two of the snapshots are done here and requests, corresponding to them should be deleted - BOOST_CHECK_EQUAL(3, pp->get_snapshot_requests().snapshot_requests.size()); + BOOST_CHECK_EQUAL(4, pp->get_snapshot_requests().snapshot_requests.size()); // check whether no pending snapshots present for a snapshot with id 0 const auto& snapshot_requests = pp->get_snapshot_requests().snapshot_requests; @@ -141,8 +144,8 @@ BOOST_AUTO_TEST_CASE(snapshot_scheduler_test) { std::vector ssi; db.set_path(temp / "snapshots"); db >> ssi; - BOOST_CHECK_EQUAL(3, ssi.size()); - BOOST_CHECK_EQUAL(ssi.begin()->block_spacing, sri1.block_spacing); + BOOST_CHECK_EQUAL(4, ssi.size()); + BOOST_CHECK_EQUAL(ssi.begin()->block_spacing, *sri1.block_spacing); } catch(...) { throw; } diff --git a/tests/trx_generator/main.cpp b/tests/trx_generator/main.cpp index 50c6bb1c54..60c20e3f7e 100644 --- a/tests/trx_generator/main.cpp +++ b/tests/trx_generator/main.cpp @@ -62,6 +62,7 @@ int main(int argc, char** argv) { ("abi-file", bpo::value(&user_trx_config._abi_data_file_path), "The path to the contract abi file to use for the supplied transaction action data") ("actions-data", bpo::value(&user_trx_config._actions_data_json_file_or_str), "The json actions data file or json actions data description string to use") ("actions-auths", bpo::value(&user_trx_config._actions_auths_json_file_or_str), "The json actions auth file or json actions auths description string to use, containting authAcctName to activePrivateKey pairs.") + ("api-endpoint", bpo::value(&provider_config._api_endpoint), "The api endpoint to direct transactions to. Defaults to: '/v1/chain/send_transaction2'") ("peer-endpoint-type", bpo::value(&provider_config._peer_endpoint_type)->default_value("p2p"), "Identify the peer endpoint api type to determine how to send transactions. Allowable 'p2p' and 'http'. Default: 'p2p'") ("peer-endpoint", bpo::value(&provider_config._peer_endpoint)->default_value("127.0.0.1"), "set the peer endpoint to send transactions to") ("port", bpo::value(&provider_config._port)->default_value(9876), "set the peer endpoint port to send transactions to") diff --git a/tests/trx_generator/trx_generator.cpp b/tests/trx_generator/trx_generator.cpp index b6d92378b5..dda297422f 100644 --- a/tests/trx_generator/trx_generator.cpp +++ b/tests/trx_generator/trx_generator.cpp @@ -146,6 +146,9 @@ namespace eosio::testing { for (size_t i = 0; i < action_array.size(); ++i) { auto action_mvo = fc::mutable_variant_object(action_array[i]); locate_key_words_in_action_mvo(acct_gen_fields_out[i], action_mvo, key_word); + if(acct_gen_fields_out[i].empty()) { + acct_gen_fields_out.erase(i); + } } } @@ -210,10 +213,15 @@ namespace eosio::testing { } EOS_RETHROW_EXCEPTIONS(chain::transaction_type_exception, "Fail to parse unpacked action data JSON") - chain::name auth_actor = chain::name(action_mvo["authorization"].get_object()["actor"].as_string()); - chain::name auth_perm = chain::name(action_mvo["authorization"].get_object()["permission"].as_string()); + std::vector auth = {}; + if (action_mvo["authorization"].get_object().find("actor") != action_mvo["authorization"].get_object().end() && + action_mvo["authorization"].get_object().find("permission") != action_mvo["authorization"].get_object().end()) { + chain::name auth_actor = chain::name(action_mvo["authorization"].get_object()["actor"].as_string()); + chain::name auth_perm = chain::name(action_mvo["authorization"].get_object()["permission"].as_string()); + auth.push_back({auth_actor, auth_perm}); + } - return chain::action({{auth_actor, auth_perm}}, _config._contract_owner_account, action_name, std::move(packed_action_data)); + return chain::action(auth, _config._contract_owner_account, action_name, std::move(packed_action_data)); }); return actions; diff --git a/tests/trx_generator/trx_provider.cpp b/tests/trx_generator/trx_provider.cpp index c75d048dc8..c9b5b8fc85 100644 --- a/tests/trx_generator/trx_provider.cpp +++ b/tests/trx_generator/trx_provider.cpp @@ -34,6 +34,37 @@ namespace eosio::testing { return send_buffer; } + void provider_connection::init_and_connect() { + _connection_thread_pool.start( + 1, [](const fc::exception& e) { elog("provider_connection exception ${e}", ("e", e)); }); + connect(); + }; + + void provider_connection::cleanup_and_disconnect() { + disconnect(); + _connection_thread_pool.stop(); + }; + + fc::time_point provider_connection::get_trx_ack_time(const eosio::chain::transaction_id_type& trx_id) { + fc::time_point time_acked; + std::lock_guard lock(_trx_ack_map_lock); + auto search = _trxs_ack_time_map.find(trx_id); + if (search != _trxs_ack_time_map.end()) { + time_acked = search->second; + } else { + elog("get_trx_ack_time - Transaction acknowledge time not found for transaction with id: ${id}", + ("id", trx_id)); + time_acked = fc::time_point::min(); + } + return time_acked; + } + + void provider_connection::trx_acknowledged(const eosio::chain::transaction_id_type& trx_id, + const fc::time_point& ack_time) { + std::lock_guard lock(_trx_ack_map_lock); + _trxs_ack_time_map[trx_id] = ack_time; + } + void p2p_connection::connect() { ilog("Attempting P2P connection to ${ip}:${port}.", ("ip", _config._peer_endpoint)("port", _config._port)); tcp::resolver r(_connection_thread_pool.get_executor()); @@ -54,6 +85,10 @@ namespace eosio::testing { trx_acknowledged(trx.id(), fc::time_point::min()); //using min to identify ack time as not applicable for p2p } + acked_trx_trace_info p2p_connection::get_acked_trx_trace_info(const eosio::chain::transaction_id_type& trx_id) { + return {}; + } + void http_connection::connect() {} void http_connection::disconnect() { @@ -71,8 +106,11 @@ namespace eosio::testing { } } + bool http_connection::needs_response_trace_info() { + return _config._api_endpoint == "/v1/chain/send_read_only_transaction"; + } + void http_connection::send_transaction(const chain::packed_transaction& trx) { - const std::string target = "/v1/chain/send_transaction2"s; const int http_version = 11; const std::string content_type = "application/json"s; @@ -84,16 +122,55 @@ namespace eosio::testing { http_client_async::http_request_params params{_connection_thread_pool.get_executor(), _config._peer_endpoint, _config._port, - target, + _config._api_endpoint, http_version, content_type}; http_client_async::async_http_request( params, std::move(msg_body), - [this, trx_id = trx.id()]( - boost::beast::error_code ec, boost::beast::http::response response) { + [this, trx_id = trx.id()](boost::beast::error_code ec, + boost::beast::http::response response) { ++this->_acknowledged; trx_acknowledged(trx_id, fc::time_point::now()); - if (response.result() != boost::beast::http::status::accepted) { + + if (this->needs_response_trace_info() && response.result() == boost::beast::http::status::ok) { + try { + fc::variant resp_json = fc::json::from_string(response.body()); + if (resp_json.is_object() && resp_json.get_object().contains("processed")) { + const auto& processed = resp_json["processed"]; + const auto& block_num = processed["block_num"].as_uint64(); + const auto& transaction_id = processed["id"].as_string(); + const auto& block_time = processed["block_time"].as_string(); + std::string status = "failed"; + uint32_t net = 0; + uint32_t cpu = 0; + if (processed.get_object().contains("receipt")) { + const auto& receipt = processed["receipt"]; + if (receipt.is_object()) { + status = receipt["status"].as_string(); + net = receipt["net_usage_words"].as_uint64() * 8; + cpu = receipt["cpu_usage_us"].as_uint64(); + } + if (status == "executed") { + record_trx_info(trx_id, block_num, cpu, net, block_time); + } else { + elog("async_http_request Transaction receipt status not executed: ${string}", + ("string", response.body())); + } + } else { + elog("async_http_request Transaction failed, no receipt: ${string}", + ("string", response.body())); + } + } else { + elog("async_http_request Transaction failed, transaction not processed: ${string}", + ("string", response.body())); + } + } + EOS_RETHROW_EXCEPTIONS(chain::json_parse_exception, "Fail to parse JSON from string: ${string}", + ("string", response.body())); + } + + if (!(response.result() == boost::beast::http::status::accepted || + response.result() == boost::beast::http::status::ok)) { elog("async_http_request Failed with response http status code: ${status}", ("status", response.result_int())); } @@ -101,6 +178,25 @@ namespace eosio::testing { ++_sent; } + void http_connection::record_trx_info(const eosio::chain::transaction_id_type& trx_id, uint32_t block_num, + uint32_t cpu_usage_us, uint32_t net_usage_words, + const std::string& block_time) { + std::lock_guard lock(_trx_info_map_lock); + _acked_trx_trace_info_map.insert({trx_id, {true, block_num, cpu_usage_us, net_usage_words, block_time}}); + } + + acked_trx_trace_info http_connection::get_acked_trx_trace_info(const eosio::chain::transaction_id_type& trx_id) { + acked_trx_trace_info info; + std::lock_guard lock(_trx_info_map_lock); + auto search = _acked_trx_trace_info_map.find(trx_id); + if (search != _acked_trx_trace_info_map.end()) { + info = search->second; + } else { + elog("get_acked_trx_trace_info - Acknowledged transaction trace info not found for transaction with id: ${id}", ("id", trx_id)); + } + return info; + } + trx_provider::trx_provider(const provider_base_config& provider_config) { if (provider_config._peer_endpoint_type == "http") { _conn.emplace(provider_config); @@ -136,7 +232,13 @@ namespace eosio::testing { ack_round_trip_us = acked - data._timestamp; } out << std::string(data._trx_id) << "," << data._timestamp.to_iso_string() << "," << acked_str << "," - << ack_round_trip_us.count() << "\n"; + << ack_round_trip_us.count(); + + acked_trx_trace_info info = _peer_connection->get_acked_trx_trace_info(data._trx_id); + if (info._valid) { + out << "," << info._block_num << "," << info._cpu_usage_us << "," << info._net_usage_words << "," << info._block_time; + } + out << "\n"; } out.close(); } diff --git a/tests/trx_generator/trx_provider.hpp b/tests/trx_generator/trx_provider.hpp index 86c9415619..8f8dd9200d 100644 --- a/tests/trx_generator/trx_provider.hpp +++ b/tests/trx_generator/trx_provider.hpp @@ -28,11 +28,30 @@ namespace eosio::testing { std::string _peer_endpoint_type = "p2p"; std::string _peer_endpoint = "127.0.0.1"; unsigned short _port = 9876; + // Api endpoint not truly used for p2p connections as transactions are streamed directly to p2p endpoint + std::string _api_endpoint = "/v1/chain/send_transaction2"; std::string to_string() const { std::ostringstream ss; - ss << "endpoint type: " << _peer_endpoint_type << " peer_endpoint: " << _peer_endpoint << " port: " << _port; - return std::move(ss).str(); + ss << "Provider base config endpoint type: " << _peer_endpoint_type << " peer_endpoint: " << _peer_endpoint + << " port: " << _port << " api endpoint: " << _api_endpoint; + return ss.str(); + } + }; + + struct acked_trx_trace_info { + bool _valid = false; + uint32_t _block_num = 0; + uint32_t _cpu_usage_us = 0; + uint32_t _net_usage_words = 0; + std::string _block_time = ""; + + std::string to_string() const { + std::ostringstream ss; + ss << "Acked Transaction Trace Info " + << "valid: " << _valid << " block num: " << _block_num << " cpu usage us: " << _cpu_usage_us + << " net usage words: " << _net_usage_words << " block time: " << _block_time; + return ss.str(); } }; @@ -48,43 +67,23 @@ namespace eosio::testing { virtual ~provider_connection() = default; - void init_and_connect() { - _connection_thread_pool.start( - 1, [](const fc::exception& e) { elog("provider_connection exception ${e}", ("e", e)); }); - connect(); - }; - - void cleanup_and_disconnect() { - disconnect(); - _connection_thread_pool.stop(); - }; - - fc::time_point get_trx_ack_time(const eosio::chain::transaction_id_type& _trx_id) { - fc::time_point time_acked; - std::lock_guard lock(_trx_ack_map_lock); - auto search = _trxs_ack_time_map.find(_trx_id); - if (search != _trxs_ack_time_map.end()) { - time_acked = search->second; - } else { - elog("get_trx_ack_time - Transaction acknowledge time not found for transaction with id: ${id}", ("id", _trx_id)); - time_acked = fc::time_point::min(); - } - return time_acked; - } + void init_and_connect(); + void cleanup_and_disconnect(); + fc::time_point get_trx_ack_time(const eosio::chain::transaction_id_type& trx_id); + void trx_acknowledged(const eosio::chain::transaction_id_type& trx_id, const fc::time_point& ack_time); + virtual acked_trx_trace_info get_acked_trx_trace_info(const eosio::chain::transaction_id_type& trx_id) = 0; virtual void send_transaction(const chain::packed_transaction& trx) = 0; - void trx_acknowledged(const eosio::chain::transaction_id_type _trx_id, const fc::time_point ack_time) { - std::lock_guard lock(_trx_ack_map_lock); - _trxs_ack_time_map[_trx_id] = ack_time; - } - private: virtual void connect() = 0; virtual void disconnect() = 0; }; struct http_connection : public provider_connection { + std::mutex _trx_info_map_lock; + std::map _acked_trx_trace_info_map; + std::atomic _acknowledged{0}; std::atomic _sent{0}; @@ -92,10 +91,14 @@ namespace eosio::testing { : provider_connection(provider_config) {} void send_transaction(const chain::packed_transaction& trx) final; + void record_trx_info(const eosio::chain::transaction_id_type& trx_id, uint32_t block_num, uint32_t cpu_usage_us, + uint32_t net_usage_words, const std::string& block_time); + acked_trx_trace_info get_acked_trx_trace_info(const eosio::chain::transaction_id_type& trx_id) override final; private: void connect() override final; void disconnect() override final; + bool needs_response_trace_info(); }; struct p2p_connection : public provider_connection { @@ -107,6 +110,8 @@ namespace eosio::testing { void send_transaction(const chain::packed_transaction& trx) final; + acked_trx_trace_info get_acked_trx_trace_info(const eosio::chain::transaction_id_type& trx_id) override final; + private: void connect() override final; void disconnect() override final; @@ -170,7 +175,7 @@ namespace eosio::testing { std::string to_string() const { std::ostringstream ss; ss << "Trx Tps Tester Config: duration: " << _gen_duration_seconds << " target tps: " << _target_tps; - return std::move(ss).str(); + return ss.str(); }; };