From 5a3cd673e9ce8cd63fd1bb5df6fc522bb9cac34d Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 3 Jul 2023 10:04:57 -0500 Subject: [PATCH 1/9] GH-1349 Close on async_read closed socket. Always shutdown socket. --- plugins/net_plugin/net_plugin.cpp | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 963f1a8bb8..2e4952dabd 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -1022,10 +1022,8 @@ namespace eosio { void connection::_close( connection* self, bool reconnect, bool shutdown ) { self->socket_open = false; boost::system::error_code ec; - if( self->socket->is_open() ) { - self->socket->shutdown( tcp::socket::shutdown_both, ec ); - self->socket->close( ec ); - } + self->socket->shutdown( tcp::socket::shutdown_both, ec ); + self->socket->close( ec ); self->socket.reset( new tcp::socket( my_impl->thread_pool.get_executor() ) ); self->flush_queues(); self->connecting = false; @@ -2489,7 +2487,18 @@ namespace eosio { boost::asio::bind_executor( strand, [conn = shared_from_this(), socket=socket]( boost::system::error_code ec, std::size_t bytes_transferred ) { // may have closed connection and cleared pending_message_buffer - if( !conn->socket_is_open() || socket != conn->socket ) return; + if (!conn->socket_is_open() && conn->socket_open) { // if socket_open then close not called + peer_dlog( conn, "async_read socket not open, closing"); + conn->close(); + return; + } + if (socket != conn->socket ) { // different socket, conn must have created a new socket, make sure previous is closed + peer_dlog( conn, "async_read diff socket closing"); + boost::system::error_code ec; + socket->shutdown( tcp::socket::shutdown_both, ec ); + socket->close( ec ); + return; + } bool close_connection = false; try { From d46d7e903ad51bca5ec1584b12b1fd33525a5766 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 3 Jul 2023 10:05:23 -0500 Subject: [PATCH 2/9] GH-1349 Cleanup duplicate check --- plugins/net_plugin/net_plugin.cpp | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 2e4952dabd..bd93128d96 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -1050,6 +1050,7 @@ namespace eosio { self->cancel_wait(); self->latest_msg_time = std::chrono::system_clock::time_point::min(); self->latest_blk_time = std::chrono::system_clock::time_point::min(); + self->org = std::chrono::nanoseconds{0}; if( reconnect && !shutdown ) { my_impl->start_conn_timer( std::chrono::milliseconds( 100 ), connection_wptr() ); @@ -2851,6 +2852,7 @@ namespace eosio { peer_lib_num = msg.last_irreversible_block_num; std::unique_lock g_conn( conn_mtx ); last_handshake_recv = msg; + auto c_time = last_handshake_sent.time; g_conn.unlock(); connecting = false; @@ -2876,14 +2878,9 @@ namespace eosio { return; } - if( peer_address().empty() ) { + if( incoming() ) { set_connection_type( msg.p2p_address ); - } - std::unique_lock g_conn( conn_mtx ); - if( peer_address().empty() || last_handshake_recv.node_id == fc::sha256()) { - auto c_time = last_handshake_sent.time; - g_conn.unlock(); peer_dlog( this, "checking for duplicate" ); std::shared_lock g_cnts( my_impl->connections_mtx ); for(const auto& check : my_impl->connections) { @@ -2929,9 +2926,7 @@ namespace eosio { } } } else { - peer_dlog( this, "skipping duplicate check, addr == ${pa}, id = ${ni}", - ("pa", peer_address())( "ni", last_handshake_recv.node_id ) ); - g_conn.unlock(); + peer_dlog(this, "skipping duplicate check, addr == ${pa}, id = ${ni}", ("pa", peer_address())("ni", msg.node_id)); } if( msg.chain_id != my_impl->chain_id ) { From 6dd606d3dc27182aefd4cfccdc9fd05afaf16c54 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Mon, 3 Jul 2023 16:12:04 -0400 Subject: [PATCH 3/9] add `*.gdb_history` to `.gitignore` --- .gitignore | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.gitignore b/.gitignore index d4636fd221..012739af57 100644 --- a/.gitignore +++ b/.gitignore @@ -75,6 +75,8 @@ witness_node_data_dir *.pyc *.pyo +*.gdb_history + Testing/* build.tar.gz [Bb]uild*/* From bd7ff14a6136219ff2f9c902718d63e892dc7a83 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 3 Jul 2023 20:15:08 -0500 Subject: [PATCH 4/9] GH-1349 If socket closed, but close not call, call close --- plugins/net_plugin/net_plugin.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index bd93128d96..1f051eca00 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -1242,7 +1242,7 @@ namespace eosio { try { c->buffer_queue.clear_out_queue(); // May have closed connection and cleared buffer_queue - if( !c->socket_is_open() || socket != c->socket ) { + if( !c->socket->is_open() || socket != c->socket ) { peer_ilog( c, "async write socket ${r} before callback", ("r", c->socket_is_open() ? "changed" : "closed") ); c->close(); return; @@ -2488,7 +2488,7 @@ namespace eosio { boost::asio::bind_executor( strand, [conn = shared_from_this(), socket=socket]( boost::system::error_code ec, std::size_t bytes_transferred ) { // may have closed connection and cleared pending_message_buffer - if (!conn->socket_is_open() && conn->socket_open) { // if socket_open then close not called + if (!conn->socket->is_open() && conn->socket_is_open()) { // if socket_open then close not called peer_dlog( conn, "async_read socket not open, closing"); conn->close(); return; From 59b0d08749ee864d0c431ca2f68a9147a03085a5 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 5 Jul 2023 07:42:17 -0500 Subject: [PATCH 5/9] GH-1349 close mismatched socket --- plugins/net_plugin/net_plugin.cpp | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 1f051eca00..7271d393cc 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -1242,11 +1242,18 @@ namespace eosio { try { c->buffer_queue.clear_out_queue(); // May have closed connection and cleared buffer_queue - if( !c->socket->is_open() || socket != c->socket ) { - peer_ilog( c, "async write socket ${r} before callback", ("r", c->socket_is_open() ? "changed" : "closed") ); + if (!c->socket->is_open() && c->socket_is_open()) { // if socket_open then close not called + peer_ilog(c, "async write socket closed before callback"); c->close(); return; } + if (socket != c->socket ) { // different socket, c must have created a new socket, make sure previous is closed + peer_ilog( c, "async write socket changed before callback"); + boost::system::error_code ec; + socket->shutdown( tcp::socket::shutdown_both, ec ); + socket->close( ec ); + return; + } if( ec ) { if( ec.value() != boost::asio::error::eof ) { From 633fda695caa8e8f4f498e41a84634af35e41a99 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 6 Jul 2023 13:23:59 -0500 Subject: [PATCH 6/9] GH-1340 Correctly handle possibly missing prev_block --- tests/ship_streamer.cpp | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tests/ship_streamer.cpp b/tests/ship_streamer.cpp index f95e9eb3d3..8ce32f096f 100644 --- a/tests/ship_streamer.cpp +++ b/tests/ship_streamer.cpp @@ -162,8 +162,10 @@ int main(int argc, char* argv[]) { this_block_id = result_document[1]["this_block"]["block_id"].GetString(); } std::string prev_block_id; - if( result_document[1]["prev_block"].HasMember("block_id") && result_document[1]["prev_block"]["block_id"].IsString() ) { - prev_block_id = result_document[1]["prev_block"]["block_id"].GetString(); + if( result_document[1].HasMember("prev_block") && result_document[1]["prev_block"].IsObject() ) { + if ( result_document[1]["prev_block"].HasMember("block_id") && result_document[1]["prev_block"]["block_id"].IsString() ) { + prev_block_id = result_document[1]["prev_block"]["block_id"].GetString(); + } } if( !irreversible_only && !this_block_id.empty() && !prev_block_id.empty() ) { // verify forks were sent From 30161af688df71d2d458d79281850ca077e46ed5 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 6 Jul 2023 13:26:27 -0500 Subject: [PATCH 7/9] GH-1340 Add integration test that verifies ship delta data available immediately after loading a snapshot --- tests/TestHarness/Cluster.py | 2 +- tests/TestHarness/testUtils.py | 4 +- tests/ship_streamer_test.py | 85 ++++++++++++++++++++++++++++++++-- 3 files changed, 85 insertions(+), 6 deletions(-) diff --git a/tests/TestHarness/Cluster.py b/tests/TestHarness/Cluster.py index fa6a290eda..c512f4dfe1 100644 --- a/tests/TestHarness/Cluster.py +++ b/tests/TestHarness/Cluster.py @@ -1731,6 +1731,6 @@ def waitForTrxGeneratorsSpinup(self, nodeId: int, numGenerators: int, timeout: i firstTrxs.append(line.rstrip('\n')) Utils.Print(f"first transactions: {firstTrxs}") status = node.waitForTransactionsInBlock(firstTrxs) - if status is None: + if status is None or status is False: Utils.Print('ERROR: Failed to spin up transaction generators: never received first transactions') return status diff --git a/tests/TestHarness/testUtils.py b/tests/TestHarness/testUtils.py index 9a08c68ad1..bca96d550d 100644 --- a/tests/TestHarness/testUtils.py +++ b/tests/TestHarness/testUtils.py @@ -159,11 +159,13 @@ def getNodeDataDir(ext, relativeDir=None, trailingSlash=False): return path @staticmethod - def rmNodeDataDir(ext, rmState=True, rmBlocks=True): + def rmNodeDataDir(ext, rmState=True, rmBlocks=True, rmStateHist=True): if rmState: shutil.rmtree(Utils.getNodeDataDir(ext, "state")) if rmBlocks: shutil.rmtree(Utils.getNodeDataDir(ext, "blocks")) + if rmStateHist: + shutil.rmtree(Utils.getNodeDataDir(ext, "state-history"), ignore_errors=True) @staticmethod def getNodeConfigDir(ext, relativeDir=None, trailingSlash=False): diff --git a/tests/ship_streamer_test.py b/tests/ship_streamer_test.py index 7d3816dfd3..9ee47c9338 100755 --- a/tests/ship_streamer_test.py +++ b/tests/ship_streamer_test.py @@ -55,6 +55,13 @@ WalletdName=Utils.EosWalletName shipTempDir=None +def getLatestSnapshot(nodeId): + snapshotDir = os.path.join(Utils.getNodeDataDir(nodeId), "snapshots") + snapshotDirContents = os.listdir(snapshotDir) + assert len(snapshotDirContents) > 0 + snapshotDirContents.sort() + return os.path.join(snapshotDir, snapshotDirContents[-1]) + try: TestHelper.printSystemInfo("BEGIN") @@ -71,7 +78,7 @@ shipNodeNum = 1 specificExtraNodeosArgs={} - specificExtraNodeosArgs[shipNodeNum]="--plugin eosio::state_history_plugin --disable-replay-opts --trace-history --chain-state-history --plugin eosio::net_api_plugin " + specificExtraNodeosArgs[shipNodeNum]="--plugin eosio::state_history_plugin --disable-replay-opts --trace-history --chain-state-history --plugin eosio::net_api_plugin --plugin eosio::producer_api_plugin " # producer nodes will be mapped to 0 through totalProducerNodes-1, so the number totalProducerNodes will be the non-producing node specificExtraNodeosArgs[totalProducerNodes]="--plugin eosio::test_control_api_plugin " @@ -123,13 +130,18 @@ trans=node.regproducer(cluster.defProducerAccounts[prod], "http://mysite.com", 0, waitForTransBlock=False, exitOnError=True) # create accounts via eosio as otherwise a bid is needed + transferAmount="100000000.0000 {0}".format(CORE_SYMBOL) for account in accounts: Print(f"Create new account {account.name} via {cluster.eosioAccount.name} with private key: {account.activePrivateKey}") - trans=nonProdNode.createInitializeAccount(account, cluster.eosioAccount, stakedDeposit=0, waitForTransBlock=True, stakeNet=10000, stakeCPU=10000, buyRAM=10000000, exitOnError=True) - transferAmount="100000000.0000 {0}".format(CORE_SYMBOL) + trans=nonProdNode.createInitializeAccount(account, cluster.eosioAccount, stakedDeposit=0, waitForTransBlock=False, stakeNet=10000, stakeCPU=10000, buyRAM=10000000, exitOnError=True) + nonProdNode.waitForTransBlockIfNeeded(trans, True, exitOnError=True) + for account in accounts: Print(f"Transfer funds {transferAmount} from account {cluster.eosioAccount.name} to {account.name}") - nonProdNode.transferFunds(cluster.eosioAccount, account, transferAmount, "test transfer", waitForTransBlock=True) + nonProdNode.transferFunds(cluster.eosioAccount, account, transferAmount, "test transfer", waitForTransBlock=False) + nonProdNode.waitForTransBlockIfNeeded(trans, True, exitOnError=True) + for account in accounts: trans=nonProdNode.delegatebw(account, 20000000.0000, 20000000.0000, waitForTransBlock=False, exitOnError=True) + nonProdNode.waitForTransBlockIfNeeded(trans, True, exitOnError=True) # *** vote using accounts *** @@ -150,6 +162,19 @@ cluster.waitOnClusterSync(blockAdvancing=3) Print("Shutdown unneeded bios node") cluster.biosNode.kill(signal.SIGTERM) + + Print("Configure and launch txn generators") + targetTpsPerGenerator = 10 + testTrxGenDurationSec=60*60 + numTrxGenerators=2 + cluster.launchTrxGenerators(contractOwnerAcctName=cluster.eosioAccount.name, acctNamesList=[accounts[0].name, accounts[1].name], + acctPrivKeysList=[accounts[0].activePrivateKey,accounts[1].activePrivateKey], nodeId=prodNode1.nodeId, + tpsPerGenerator=targetTpsPerGenerator, numGenerators=numTrxGenerators, durationSec=testTrxGenDurationSec, + waitToComplete=False) + + status = cluster.waitForTrxGeneratorsSpinup(nodeId=prodNode1.nodeId, numGenerators=numTrxGenerators) + assert status is not None and status is not False, "ERROR: Failed to spinup Transaction Generators" + prodNode0.waitForProducer("defproducerc") block_range = 350 @@ -226,9 +251,61 @@ block_num += 1 assert block_num-1 == end_block_num, f"{block_num-1} != {end_block_num}" + Print("Generate snapshot") + shipNode.createSnapshot() + Print("Shutdown state_history_plugin nodeos") shipNode.kill(signal.SIGTERM) + Print("Shutdown bridge node") + nonProdNode.kill(signal.SIGTERM) + + Print("Test starting ship from snapshot") + Utils.rmNodeDataDir(shipNodeNum) + isRelaunchSuccess = shipNode.relaunch(chainArg=" --snapshot {}".format(getLatestSnapshot(shipNodeNum))) + assert isRelaunchSuccess, "relaunch from snapshot failed" + + afterSnapshotBlockNum = shipNode.getBlockNum() + + Print("Verify we can stream from ship after start from a snapshot with no incoming trxs") + start_block_num = afterSnapshotBlockNum + block_range = 0 + end_block_num = start_block_num + block_range + cmd = f"{shipClient} --start-block-num {start_block_num} --end-block-num {end_block_num} --fetch-block --fetch-traces --fetch-deltas" + if Utils.Debug: Utils.Print(f"cmd: {cmd}") + clients = [] + files = [] + starts = [] + for i in range(0, args.num_clients): + start = time.perf_counter() + outFile = open(f"{shipClientFilePrefix}{i}_snapshot.out", "w") + errFile = open(f"{shipClientFilePrefix}{i}_snapshot.err", "w") + Print(f"Start client {i}") + popen=Utils.delayedCheckOutput(cmd, stdout=outFile, stderr=errFile) + starts.append(time.perf_counter()) + clients.append((popen, cmd)) + files.append((outFile, errFile)) + Print(f"Client {i} started, Ship node head is: {shipNode.getBlockNum()}") + + Print(f"Stopping all {args.num_clients} clients") + for index, (popen, _), (out, err), start in zip(range(len(clients)), clients, files, starts): + popen.wait() + Print(f"Stopped client {index}. Ran for {time.perf_counter() - start:.3f} seconds.") + out.close() + err.close() + outFile = open(f"{shipClientFilePrefix}{index}_snapshot.out", "r") + data = json.load(outFile) + block_num = start_block_num + for i in data: + # fork can cause block numbers to be repeated + this_block_num = i['get_blocks_result_v0']['this_block']['block_num'] + if this_block_num < block_num: + block_num = this_block_num + assert block_num == this_block_num, f"{block_num} != {this_block_num}" + assert isinstance(i['get_blocks_result_v0']['deltas'], str) # verify deltas in result + block_num += 1 + assert block_num-1 == end_block_num, f"{block_num-1} != {end_block_num}" + testSuccessful = True finally: TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, killEosInstances=killEosInstances, killWallet=killWallet, keepLogs=keepLogs, cleanRun=killAll, dumpErrorDetails=dumpErrorDetails) From 6c14f56ebef17017381555e5841352e2b9c8b1a1 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 6 Jul 2023 13:34:09 -0500 Subject: [PATCH 8/9] GH-1340 Make SHiP data available after a snapshot load even if no new blocks applied. Also do not repeatable send empty get_blocks_response messages but rather start sending first available block data. --- .../eosio/state_history_plugin/session.hpp | 2 +- .../state_history_plugin.cpp | 54 ++++++++++++++----- .../tests/session_test.cpp | 2 + 3 files changed, 45 insertions(+), 13 deletions(-) diff --git a/plugins/state_history_plugin/include/eosio/state_history_plugin/session.hpp b/plugins/state_history_plugin/include/eosio/state_history_plugin/session.hpp index 195d7da762..6b2e80f1d9 100644 --- a/plugins/state_history_plugin/include/eosio/state_history_plugin/session.hpp +++ b/plugins/state_history_plugin/include/eosio/state_history_plugin/session.hpp @@ -447,7 +447,7 @@ struct session : session_base, std::enable_shared_from_thislogger(), "replying get_blocks_request_v0 = ${req}", ("req", req)); - to_send_block_num = req.start_block_num; + to_send_block_num = std::max(req.start_block_num, plugin->get_first_available_block_num()); for (auto& cp : req.have_positions) { if (req.start_block_num <= cp.block_num) continue; diff --git a/plugins/state_history_plugin/state_history_plugin.cpp b/plugins/state_history_plugin/state_history_plugin.cpp index 7d301f30df..d32842fd93 100644 --- a/plugins/state_history_plugin/state_history_plugin.cpp +++ b/plugins/state_history_plugin/state_history_plugin.cpp @@ -66,6 +66,7 @@ struct state_history_plugin_impl : std::enable_shared_from_this trace_log; std::optional chain_state_log; + uint32_t first_available_block = 0; bool trace_debug_mode = false; std::optional applied_transaction_connection; std::optional block_start_connection; @@ -137,10 +138,17 @@ struct state_history_plugin_impl : std::enable_shared_from_this get_block_id(uint32_t block_num) { - if (trace_log) - return trace_log->get_block_id(block_num); - if (chain_state_log) - return chain_state_log->get_block_id(block_num); + std::optional id; + if( trace_log ) { + id = trace_log->get_block_id( block_num ); + if( id ) + return id; + } + if( chain_state_log ) { + id = chain_state_log->get_block_id( block_num ); + if( id ) + return id; + } try { return chain_plug->chain().get_block_id_for_num(block_num); } catch (...) { @@ -166,6 +174,11 @@ struct state_history_plugin_impl : std::enable_shared_from_this void post_task_main_thread_medium(Task&& task) { app().post(priority::medium, std::forward(task)); @@ -269,15 +282,18 @@ struct state_history_plugin_impl : std::enable_shared_from_thischain(); + std::lock_guard g(mtx); + head_id = chain.head_block_id(); + lib_id = chain.last_irreversible_block_id(); + head_timestamp = chain.head_block_time(); + } + // called from main thread void on_accepted_block(const block_state_ptr& block_state) { - { - const auto& chain = chain_plug->chain(); - std::lock_guard g(mtx); - head_id = chain.head_block_id(); - lib_id = chain.last_irreversible_block_id(); - head_timestamp = chain.head_block_time(); - } + update_current(); try { store_traces(block_state); @@ -492,12 +508,26 @@ void state_history_plugin::plugin_initialize(const variables_map& options) { void state_history_plugin::plugin_startup() { try { - auto bsp = my->chain_plug->chain().head_block_state(); + const auto& chain = my->chain_plug->chain(); + my->update_current(); + auto bsp = chain.head_block_state(); if( bsp && my->chain_state_log && my->chain_state_log->empty() ) { fc_ilog( _log, "Storing initial state on startup, this can take a considerable amount of time" ); my->store_chain_state( bsp ); fc_ilog( _log, "Done storing initial state on startup" ); } + my->first_available_block = chain.earliest_available_block_num(); + if (my->trace_log) { + auto first_trace_block = my->trace_log->block_range().first; + if( first_trace_block > 0 ) + my->first_available_block = std::min( my->first_available_block, first_trace_block ); + } + if (my->chain_state_log) { + auto first_state_block = my->chain_state_log->block_range().first; + if( first_state_block > 0 ) + my->first_available_block = std::min( my->first_available_block, first_state_block ); + } + fc_ilog(_log, "First available block for SHiP ${b}", ("b", my->first_available_block)); my->listen(); // use of executor assumes only one thread my->thread_pool.start( 1, [](const fc::exception& e) { diff --git a/plugins/state_history_plugin/tests/session_test.cpp b/plugins/state_history_plugin/tests/session_test.cpp index 3069ca5660..b1f13752b0 100644 --- a/plugins/state_history_plugin/tests/session_test.cpp +++ b/plugins/state_history_plugin/tests/session_test.cpp @@ -130,6 +130,8 @@ struct mock_state_history_plugin { eosio::state_history::block_position get_block_head() { return block_head; } eosio::state_history::block_position get_last_irreversible() { return block_head; } + uint32_t get_first_available_block_num() const { return 0; } + void add_session(std::shared_ptr s) { session_mgr.insert(std::move(s)); } From 0d7d2fd798196e025fbf5a6a7cc35a5012060b9d Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 6 Jul 2023 15:58:42 -0500 Subject: [PATCH 9/9] GH-1340 Code cleanup --- tests/TestHarness/Cluster.py | 2 +- tests/ship_streamer.cpp | 14 ++++++++------ 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/tests/TestHarness/Cluster.py b/tests/TestHarness/Cluster.py index c512f4dfe1..6606224611 100644 --- a/tests/TestHarness/Cluster.py +++ b/tests/TestHarness/Cluster.py @@ -1731,6 +1731,6 @@ def waitForTrxGeneratorsSpinup(self, nodeId: int, numGenerators: int, timeout: i firstTrxs.append(line.rstrip('\n')) Utils.Print(f"first transactions: {firstTrxs}") status = node.waitForTransactionsInBlock(firstTrxs) - if status is None or status is False: + if not status: Utils.Print('ERROR: Failed to spin up transaction generators: never received first transactions') return status diff --git a/tests/ship_streamer.cpp b/tests/ship_streamer.cpp index 8ce32f096f..94a3c40fc9 100644 --- a/tests/ship_streamer.cpp +++ b/tests/ship_streamer.cpp @@ -154,17 +154,19 @@ int main(int argc, char* argv[]) { // validate after streaming, so that invalid entry is included in the output uint32_t this_block_num = 0; if( result_document[1].HasMember("this_block") && result_document[1]["this_block"].IsObject() ) { - if( result_document[1]["this_block"].HasMember("block_num") && result_document[1]["this_block"]["block_num"].IsUint() ) { - this_block_num = result_document[1]["this_block"]["block_num"].GetUint(); + const auto& this_block = result_document[1]["this_block"]; + if( this_block.HasMember("block_num") && this_block["block_num"].IsUint() ) { + this_block_num = this_block["block_num"].GetUint(); } std::string this_block_id; - if( result_document[1]["this_block"].HasMember("block_id") && result_document[1]["this_block"]["block_id"].IsString() ) { - this_block_id = result_document[1]["this_block"]["block_id"].GetString(); + if( this_block.HasMember("block_id") && this_block["block_id"].IsString() ) { + this_block_id = this_block["block_id"].GetString(); } std::string prev_block_id; if( result_document[1].HasMember("prev_block") && result_document[1]["prev_block"].IsObject() ) { - if ( result_document[1]["prev_block"].HasMember("block_id") && result_document[1]["prev_block"]["block_id"].IsString() ) { - prev_block_id = result_document[1]["prev_block"]["block_id"].GetString(); + const auto& prev_block = result_document[1]["prev_block"]; + if ( prev_block.HasMember("block_id") && prev_block["block_id"].IsString() ) { + prev_block_id = prev_block["block_id"].GetString(); } } if( !irreversible_only && !this_block_id.empty() && !prev_block_id.empty() ) {