Skip to content

Commit

Permalink
Merge branch 'main' of github.com:AntelopeIO/leap into gh-672
Browse files Browse the repository at this point in the history
  • Loading branch information
greg7mdp committed Jun 14, 2023
2 parents 64ef9ea + 53ac7e8 commit 42e8824
Show file tree
Hide file tree
Showing 3 changed files with 72 additions and 11 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -491,7 +491,10 @@ struct session : session_base, std::enable_shared_from_this<session<Plugin, Sock
return;
}

auto block_id = plugin.get_block_id(to_send_block_num);
// not just an optimization, on accepted_block signal may not be able to find block_num in forkdb as it has not been validated
// until after the accepted_block signal
std::optional<chain::block_id_type> block_id =
(block_state && block_state->block_num == to_send_block_num) ? block_state->id : plugin.get_block_id(to_send_block_num);

if (block_id && position_it && (*position_it)->block_num == to_send_block_num) {
// This branch happens when the head block of nodeos is behind the head block of connecting client.
Expand Down
57 changes: 55 additions & 2 deletions tests/nodeos_forked_chain_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,10 @@
import time
import json
import signal
import os

from TestHarness import Cluster, Node, TestHelper, Utils, WalletMgr, CORE_SYMBOL, createAccountKeys
from TestHarness.TestHelper import AppArgs

###############################################################
# nodeos_forked_chain_test
Expand All @@ -27,6 +29,8 @@
# Time is allowed to progress so that the "bridge" node can catchup and both producer nodes to come to consensus
# The block log is then checked for both producer nodes to verify that the 10 producer fork is selected and that
# both nodes are in agreement on the block log.
# This test also runs a state_history_plugin (SHiP) on node 0 and uses ship_streamer to verify all blocks are received
# across the fork.
#
###############################################################

Expand Down Expand Up @@ -121,9 +125,10 @@ def getMinHeadAndLib(prodNodes):
return (headBlockNum, libNum)



appArgs = AppArgs()
extraArgs = appArgs.add(flag="--num-ship-clients", type=int, help="How many ship_streamers should be started", default=2)
args = TestHelper.parse_args({"--prod-count","--dump-error-details","--keep-logs","-v","--leave-running",
"--wallet-port","--unshared"})
"--wallet-port","--unshared"}, applicationSpecificArgs=appArgs)
Utils.Debug=args.v
totalProducerNodes=2
totalNonProducerNodes=1
Expand All @@ -133,6 +138,7 @@ def getMinHeadAndLib(prodNodes):
dumpErrorDetails=args.dump_error_details
prodCount=args.prod_count
walletPort=args.wallet_port
num_clients=args.num_ship_clients
cluster=Cluster(unshared=args.unshared, keepRunning=args.leave_running, keepLogs=args.keep_logs)

walletMgr=WalletMgr(True, port=walletPort)
Expand All @@ -147,6 +153,9 @@ def getMinHeadAndLib(prodNodes):
cluster.setWalletMgr(walletMgr)
Print("Stand up cluster")
specificExtraNodeosArgs={}
shipNodeNum = 0
specificExtraNodeosArgs[shipNodeNum]="--plugin eosio::state_history_plugin --disable-replay-opts"

# producer nodes will be mapped to 0 through totalProducerNodes-1, so the number totalProducerNodes will be the non-producing node
specificExtraNodeosArgs[totalProducerNodes]="--plugin eosio::test_control_api_plugin"

Expand Down Expand Up @@ -279,6 +288,31 @@ def getBlock(self, blockNum):
timestampStr=Node.getBlockAttribute(block, "timestamp", blockNum)
timestamp=datetime.strptime(timestampStr, Utils.TimeFmt)

shipNode = cluster.getNode(0)
block_range = 800
start_block_num = blockNum
end_block_num = start_block_num + block_range

shipClient = "tests/ship_streamer"
cmd = f"{shipClient} --start-block-num {start_block_num} --end-block-num {end_block_num} --fetch-block --fetch-traces --fetch-deltas"
if Utils.Debug: Utils.Print(f"cmd: {cmd}")
clients = []
files = []
shipTempDir = os.path.join(Utils.DataDir, "ship")
os.makedirs(shipTempDir, exist_ok = True)
shipClientFilePrefix = os.path.join(shipTempDir, "client")

starts = []
for i in range(0, num_clients):
start = time.perf_counter()
outFile = open(f"{shipClientFilePrefix}{i}.out", "w")
errFile = open(f"{shipClientFilePrefix}{i}.err", "w")
Print(f"Start client {i}")
popen=Utils.delayedCheckOutput(cmd, stdout=outFile, stderr=errFile)
starts.append(time.perf_counter())
clients.append((popen, cmd))
files.append((outFile, errFile))
Utils.Print(f"Client {i} started, Ship node head is: {shipNode.getBlockNum()}")

# *** Identify what the production cycle is ***

Expand Down Expand Up @@ -552,6 +586,25 @@ def getBlock(self, blockNum):
Utils.errorExit("Did not find find block %s (the original divergent block) in blockProducers0, test setup is wrong. blockProducers0: %s" % (killBlockNum, ", ".join(blockProducers0)))
Print("Fork resolved and determined producer %s for block %s" % (resolvedKillBlockProducer, killBlockNum))

Print(f"Stopping all {num_clients} clients")
for index, (popen, _), (out, err), start in zip(range(len(clients)), clients, files, starts):
popen.wait()
Print(f"Stopped client {index}. Ran for {time.perf_counter() - start:.3f} seconds.")
out.close()
err.close()
outFile = open(f"{shipClientFilePrefix}{index}.out", "r")
data = json.load(outFile)
block_num = start_block_num
for i in data:
# fork can cause block numbers to be repeated
this_block_num = i['get_blocks_result_v0']['this_block']['block_num']
if this_block_num < block_num:
block_num = this_block_num
assert block_num == this_block_num, f"{block_num} != {this_block_num}"
assert isinstance(i['get_blocks_result_v0']['block'], str) # verify block in result
block_num += 1
assert block_num-1 == end_block_num, f"{block_num-1} != {end_block_num}"

blockProducers0=[]
blockProducers1=[]

Expand Down
21 changes: 13 additions & 8 deletions tests/test_snapshot_scheduler.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -83,25 +83,30 @@ BOOST_AUTO_TEST_CASE(snapshot_scheduler_test) {
if (!pp->get_snapshot_requests().snapshot_requests.empty()) {
const auto& snapshot_requests = pp->get_snapshot_requests().snapshot_requests;

auto validate_snapshot_request = [&](uint32_t sid, uint32_t block_num, uint32_t spacing = 0) {
auto validate_snapshot_request = [&](uint32_t sid, uint32_t block_num, uint32_t spacing = 0, bool fuzzy_start = false) {
auto it = find_if(snapshot_requests.begin(), snapshot_requests.end(), [sid](const snapshot_scheduler::snapshot_schedule_information& obj) {return obj.snapshot_request_id == sid;});
if (it != snapshot_requests.end()) {
auto& pending = it->pending_snapshots;
if (pending.size()==1) {
// pending snapshot block number
auto pbn = pending.begin()->head_block_num;
pbn = spacing ? (spacing + (pbn%spacing)) : pbn;
// if snapshot scheduled with empty start_block_num depending on the timing
// it can be scheduled either for block_num or block_num+1
BOOST_CHECK(block_num==pbn || ((block_num+1)==pbn));

// first pending snapshot
auto ps_start = (spacing != 0) ? (spacing + (pbn%spacing)) : pbn;

// this will happen only when snapshot sheduled with no start block specified
auto deviation = fuzzy_start ? ps_start - it->start_block_num - spacing : 0;

BOOST_CHECK_EQUAL(block_num, ps_start - deviation);
}
return true;
}
return false;
};

BOOST_REQUIRE(validate_snapshot_request(0, 9, 8)); // snapshot #0 should have pending snapshot at block #9 (8 + 1) and it never expires
BOOST_REQUIRE(validate_snapshot_request(4, 12, 10)); // snapshot #4 should have pending snapshot at block # at the moment of scheduling (2) plus 10 = 12
BOOST_REQUIRE(validate_snapshot_request(5, 10, 10)); // snapshot #5 should have pending snapshot at block #10, #20 etc
BOOST_REQUIRE(validate_snapshot_request(0, 9, 8)); // snapshot #0 should have pending snapshot at block #9 (8 + 1) and it never expires
BOOST_REQUIRE(validate_snapshot_request(4, 12, 10, true)); // snapshot #4 should have pending snapshot at block # at the moment of scheduling (2) plus 10 = 12
BOOST_REQUIRE(validate_snapshot_request(5, 10, 10)); // snapshot #5 should have pending snapshot at block #10, #20 etc
}
});

Expand Down

0 comments on commit 42e8824

Please sign in to comment.