diff --git a/.github/codecov.yml b/.github/codecov.yml index ceceb9e63654..b237c9fe6b04 100644 --- a/.github/codecov.yml +++ b/.github/codecov.yml @@ -6,4 +6,10 @@ coverage: project: default: target: 1.0 - threshold: 2.0 \ No newline at end of file + threshold: 2.0 + +comment: + behavior: new + +fixes: + - "/__w/polkadot-sdk/polkadot-sdk/::" \ No newline at end of file diff --git a/.gitlab/check-each-crate.py b/.github/scripts/check-each-crate.py similarity index 81% rename from .gitlab/check-each-crate.py rename to .github/scripts/check-each-crate.py index 9b654f8071ac..7a53e812ddfc 100755 --- a/.gitlab/check-each-crate.py +++ b/.github/scripts/check-each-crate.py @@ -9,6 +9,7 @@ # # - `target_group`: Integer starting from 1, the group this script should execute. # - `groups_total`: Integer starting from 1, total number of groups. +# - `disable_forklift`: Boolean, whether to disable forklift or not. import subprocess, sys @@ -31,6 +32,9 @@ target_group = int(sys.argv[1]) - 1 groups_total = int(sys.argv[2]) +disable_forklift = bool(sys.argv[3] if len(sys.argv) > 3 else False) + +print(f"Target group: {target_group}, Total groups: {groups_total}, Disable forklift: {disable_forklift}", file=sys.stderr) if len(crates) == 0: print("No crates detected!", file=sys.stderr) @@ -55,7 +59,11 @@ print(f"Checking {crates[crate][0]}", file=sys.stderr) - res = subprocess.run(["forklift", "cargo", "check", "--locked"], cwd = crates[crate][1]) + cmd = ["cargo", "check", "--locked"] + + cmd.insert(0, 'forklift') if not disable_forklift else None + + res = subprocess.run(cmd, cwd = crates[crate][1]) if res.returncode != 0: sys.exit(1) diff --git a/.github/scripts/cmd/cmd.py b/.github/scripts/cmd/cmd.py index 63bd6a2795aa..f7dd88df4bda 100755 --- a/.github/scripts/cmd/cmd.py +++ b/.github/scripts/cmd/cmd.py @@ -5,6 +5,7 @@ import json import argparse import _help +import importlib.util _HelpAction = _help._HelpAction @@ -67,130 +68,171 @@ for arg, config in common_args.items(): parser_ui.add_argument(arg, **config) +""" +PRDOC +""" +# Import generate-prdoc.py dynamically +spec = importlib.util.spec_from_file_location("generate_prdoc", ".github/scripts/generate-prdoc.py") +generate_prdoc = importlib.util.module_from_spec(spec) +spec.loader.exec_module(generate_prdoc) + +parser_prdoc = subparsers.add_parser('prdoc', help='Generates PR documentation') +generate_prdoc.setup_parser(parser_prdoc) + +def main(): + global args, unknown, runtimesMatrix + args, unknown = parser.parse_known_args() + + print(f'args: {args}') + + if args.command == 'bench': + runtime_pallets_map = {} + failed_benchmarks = {} + successful_benchmarks = {} + + profile = "release" + + print(f'Provided runtimes: {args.runtime}') + # convert to mapped dict + runtimesMatrix = list(filter(lambda x: x['name'] in args.runtime, runtimesMatrix)) + runtimesMatrix = {x['name']: x for x in runtimesMatrix} + print(f'Filtered out runtimes: {runtimesMatrix}') + + # loop over remaining runtimes to collect available pallets + for runtime in runtimesMatrix.values(): + os.system(f"forklift cargo build -p {runtime['package']} --profile {profile} --features runtime-benchmarks") + print(f'-- listing pallets for benchmark for {runtime["name"]}') + wasm_file = f"target/{profile}/wbuild/{runtime['package']}/{runtime['package'].replace('-', '_')}.wasm" + output = os.popen( + f"frame-omni-bencher v1 benchmark pallet --no-csv-header --no-storage-info --no-min-squares --no-median-slopes --all --list --runtime={wasm_file}").read() + raw_pallets = output.strip().split('\n') + + all_pallets = set() + for pallet in raw_pallets: + if pallet: + all_pallets.add(pallet.split(',')[0].strip()) + + pallets = list(all_pallets) + print(f'Pallets in {runtime["name"]}: {pallets}') + runtime_pallets_map[runtime['name']] = pallets + + print(f'\n') + + # filter out only the specified pallets from collected runtimes/pallets + if args.pallet: + print(f'Pallets: {args.pallet}') + new_pallets_map = {} + # keep only specified pallets if they exist in the runtime + for runtime in runtime_pallets_map: + if set(args.pallet).issubset(set(runtime_pallets_map[runtime])): + new_pallets_map[runtime] = args.pallet + + runtime_pallets_map = new_pallets_map + + print(f'Filtered out runtimes & pallets: {runtime_pallets_map}\n') + + if not runtime_pallets_map: + if args.pallet and not args.runtime: + print(f"No pallets {args.pallet} found in any runtime") + elif args.runtime and not args.pallet: + print(f"{args.runtime} runtime does not have any pallets") + elif args.runtime and args.pallet: + print(f"No pallets {args.pallet} found in {args.runtime}") + else: + print('No runtimes found') + sys.exit(1) -args, unknown = parser.parse_known_args() - -print(f'args: {args}') - -if args.command == 'bench': - runtime_pallets_map = {} - failed_benchmarks = {} - successful_benchmarks = {} - - profile = "release" - - print(f'Provided runtimes: {args.runtime}') - # convert to mapped dict - runtimesMatrix = list(filter(lambda x: x['name'] in args.runtime, runtimesMatrix)) - runtimesMatrix = {x['name']: x for x in runtimesMatrix} - print(f'Filtered out runtimes: {runtimesMatrix}') - - # loop over remaining runtimes to collect available pallets - for runtime in runtimesMatrix.values(): - os.system(f"forklift cargo build -p {runtime['package']} --profile {profile} --features runtime-benchmarks") - print(f'-- listing pallets for benchmark for {runtime["name"]}') - wasm_file = f"target/{profile}/wbuild/{runtime['package']}/{runtime['package'].replace('-', '_')}.wasm" - output = os.popen( - f"frame-omni-bencher v1 benchmark pallet --no-csv-header --no-storage-info --no-min-squares --no-median-slopes --all --list --runtime={wasm_file}").read() - raw_pallets = output.strip().split('\n') - - all_pallets = set() - for pallet in raw_pallets: - if pallet: - all_pallets.add(pallet.split(',')[0].strip()) - - pallets = list(all_pallets) - print(f'Pallets in {runtime}: {pallets}') - runtime_pallets_map[runtime['name']] = pallets - - # filter out only the specified pallets from collected runtimes/pallets - if args.pallet: - print(f'Pallet: {args.pallet}') - new_pallets_map = {} - # keep only specified pallets if they exist in the runtime for runtime in runtime_pallets_map: - if set(args.pallet).issubset(set(runtime_pallets_map[runtime])): - new_pallets_map[runtime] = args.pallet - - runtime_pallets_map = new_pallets_map - - print(f'Filtered out runtimes & pallets: {runtime_pallets_map}') - - if not runtime_pallets_map: - if args.pallet and not args.runtime: - print(f"No pallets {args.pallet} found in any runtime") - elif args.runtime and not args.pallet: - print(f"{args.runtime} runtime does not have any pallets") - elif args.runtime and args.pallet: - print(f"No pallets {args.pallet} found in {args.runtime}") - else: - print('No runtimes found') - sys.exit(1) - - header_path = os.path.abspath('./substrate/HEADER-APACHE2') - - for runtime in runtime_pallets_map: - for pallet in runtime_pallets_map[runtime]: - config = runtimesMatrix[runtime] - print(f'-- config: {config}') - if runtime == 'dev': - # to support sub-modules (https://github.com/paritytech/command-bot/issues/275) - search_manifest_path = f"cargo metadata --locked --format-version 1 --no-deps | jq -r '.packages[] | select(.name == \"{pallet.replace('_', '-')}\") | .manifest_path'" - print(f'-- running: {search_manifest_path}') - manifest_path = os.popen(search_manifest_path).read() - if not manifest_path: - print(f'-- pallet {pallet} not found in dev runtime') - exit(1) - package_dir = os.path.dirname(manifest_path) - print(f'-- package_dir: {package_dir}') - print(f'-- manifest_path: {manifest_path}') - output_path = os.path.join(package_dir, "src", "weights.rs") - else: - default_path = f"./{config['path']}/src/weights" - xcm_path = f"./{config['path']}/src/weights/xcm" - output_path = default_path if not pallet.startswith("pallet_xcm_benchmarks") else xcm_path - print(f'-- benchmarking {pallet} in {runtime} into {output_path}') - cmd = f"frame-omni-bencher v1 benchmark pallet --extrinsic=* --runtime=target/{profile}/wbuild/{config['package']}/{config['package'].replace('-', '_')}.wasm --pallet={pallet} --header={header_path} --output={output_path} --wasm-execution=compiled --steps=50 --repeat=20 --heap-pages=4096 --no-storage-info --no-min-squares --no-median-slopes" - print(f'-- Running: {cmd}') - status = os.system(cmd) - if status != 0 and not args.continue_on_fail: - print(f'Failed to benchmark {pallet} in {runtime}') - sys.exit(1) - - # Otherwise collect failed benchmarks and print them at the end - # push failed pallets to failed_benchmarks - if status != 0: - failed_benchmarks[f'{runtime}'] = failed_benchmarks.get(f'{runtime}', []) + [pallet] - else: - successful_benchmarks[f'{runtime}'] = successful_benchmarks.get(f'{runtime}', []) + [pallet] - - if failed_benchmarks: - print('❌ Failed benchmarks of runtimes/pallets:') - for runtime, pallets in failed_benchmarks.items(): - print(f'-- {runtime}: {pallets}') - - if successful_benchmarks: - print('✅ Successful benchmarks of runtimes/pallets:') - for runtime, pallets in successful_benchmarks.items(): - print(f'-- {runtime}: {pallets}') - -elif args.command == 'fmt': - command = f"cargo +nightly fmt" - print(f'Formatting with `{command}`') - nightly_status = os.system(f'{command}') - taplo_status = os.system('taplo format --config .config/taplo.toml') - - if (nightly_status != 0 or taplo_status != 0) and not args.continue_on_fail: - print('❌ Failed to format code') - sys.exit(1) - -elif args.command == 'update-ui': - command = 'sh ./scripts/update-ui-tests.sh' - print(f'Updating ui with `{command}`') - status = os.system(f'{command}') - - if status != 0 and not args.continue_on_fail: - print('❌ Failed to format code') - sys.exit(1) - -print('🚀 Done') + for pallet in runtime_pallets_map[runtime]: + config = runtimesMatrix[runtime] + header_path = os.path.abspath(config['header']) + template = None + + print(f'-- config: {config}') + if runtime == 'dev': + # to support sub-modules (https://github.com/paritytech/command-bot/issues/275) + search_manifest_path = f"cargo metadata --locked --format-version 1 --no-deps | jq -r '.packages[] | select(.name == \"{pallet.replace('_', '-')}\") | .manifest_path'" + print(f'-- running: {search_manifest_path}') + manifest_path = os.popen(search_manifest_path).read() + if not manifest_path: + print(f'-- pallet {pallet} not found in dev runtime') + exit(1) + package_dir = os.path.dirname(manifest_path) + print(f'-- package_dir: {package_dir}') + print(f'-- manifest_path: {manifest_path}') + output_path = os.path.join(package_dir, "src", "weights.rs") + template = config['template'] + else: + default_path = f"./{config['path']}/src/weights" + xcm_path = f"./{config['path']}/src/weights/xcm" + output_path = default_path + if pallet.startswith("pallet_xcm_benchmarks"): + template = config['template'] + output_path = xcm_path + + print(f'-- benchmarking {pallet} in {runtime} into {output_path}') + cmd = f"frame-omni-bencher v1 benchmark pallet " \ + f"--extrinsic=* " \ + f"--runtime=target/{profile}/wbuild/{config['package']}/{config['package'].replace('-', '_')}.wasm " \ + f"--pallet={pallet} " \ + f"--header={header_path} " \ + f"--output={output_path} " \ + f"--wasm-execution=compiled " \ + f"--steps=50 " \ + f"--repeat=20 " \ + f"--heap-pages=4096 " \ + f"{f'--template={template} ' if template else ''}" \ + f"--no-storage-info --no-min-squares --no-median-slopes" + print(f'-- Running: {cmd} \n') + status = os.system(cmd) + if status != 0 and not args.continue_on_fail: + print(f'Failed to benchmark {pallet} in {runtime}') + sys.exit(1) + + # Otherwise collect failed benchmarks and print them at the end + # push failed pallets to failed_benchmarks + if status != 0: + failed_benchmarks[f'{runtime}'] = failed_benchmarks.get(f'{runtime}', []) + [pallet] + else: + successful_benchmarks[f'{runtime}'] = successful_benchmarks.get(f'{runtime}', []) + [pallet] + + if failed_benchmarks: + print('❌ Failed benchmarks of runtimes/pallets:') + for runtime, pallets in failed_benchmarks.items(): + print(f'-- {runtime}: {pallets}') + + if successful_benchmarks: + print('✅ Successful benchmarks of runtimes/pallets:') + for runtime, pallets in successful_benchmarks.items(): + print(f'-- {runtime}: {pallets}') + + elif args.command == 'fmt': + command = f"cargo +nightly fmt" + print(f'Formatting with `{command}`') + nightly_status = os.system(f'{command}') + taplo_status = os.system('taplo format --config .config/taplo.toml') + + if (nightly_status != 0 or taplo_status != 0) and not args.continue_on_fail: + print('❌ Failed to format code') + sys.exit(1) + + elif args.command == 'update-ui': + command = 'sh ./scripts/update-ui-tests.sh' + print(f'Updating ui with `{command}`') + status = os.system(f'{command}') + + if status != 0 and not args.continue_on_fail: + print('❌ Failed to format code') + sys.exit(1) + + elif args.command == 'prdoc': + # Call the main function from ./github/scripts/generate-prdoc.py module + exit_code = generate_prdoc.main(args) + if exit_code != 0 and not args.continue_on_fail: + print('❌ Failed to generate prdoc') + sys.exit(exit_code) + + print('🚀 Done') + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/.github/scripts/cmd/test_cmd.py b/.github/scripts/cmd/test_cmd.py new file mode 100644 index 000000000000..a2f29b075dae --- /dev/null +++ b/.github/scripts/cmd/test_cmd.py @@ -0,0 +1,339 @@ +import unittest +from unittest.mock import patch, mock_open, MagicMock, call +import json +import sys +import os +import argparse + +# Mock data for runtimes-matrix.json +mock_runtimes_matrix = [ + {"name": "dev", "package": "kitchensink-runtime", "path": "substrate/frame", "header": "substrate/HEADER-APACHE2", "template": "substrate/.maintain/frame-weight-template.hbs"}, + {"name": "westend", "package": "westend-runtime", "path": "polkadot/runtime/westend", "header": "polkadot/file_header.txt", "template": "polkadot/xcm/pallet-xcm-benchmarks/template.hbs"}, + {"name": "rococo", "package": "rococo-runtime", "path": "polkadot/runtime/rococo", "header": "polkadot/file_header.txt", "template": "polkadot/xcm/pallet-xcm-benchmarks/template.hbs"}, + {"name": "asset-hub-westend", "package": "asset-hub-westend-runtime", "path": "cumulus/parachains/runtimes/assets/asset-hub-westend", "header": "cumulus/file_header.txt", "template": "cumulus/templates/xcm-bench-template.hbs"}, +] + +def get_mock_bench_output(runtime, pallets, output_path, header, template = None): + return f"frame-omni-bencher v1 benchmark pallet --extrinsic=* " \ + f"--runtime=target/release/wbuild/{runtime}-runtime/{runtime.replace('-', '_')}_runtime.wasm " \ + f"--pallet={pallets} --header={header} " \ + f"--output={output_path} " \ + f"--wasm-execution=compiled " \ + f"--steps=50 --repeat=20 --heap-pages=4096 " \ + f"{f'--template={template} ' if template else ''}" \ + f"--no-storage-info --no-min-squares --no-median-slopes" + +class TestCmd(unittest.TestCase): + + def setUp(self): + self.patcher1 = patch('builtins.open', new_callable=mock_open, read_data=json.dumps(mock_runtimes_matrix)) + self.patcher2 = patch('json.load', return_value=mock_runtimes_matrix) + self.patcher3 = patch('argparse.ArgumentParser.parse_known_args') + self.patcher4 = patch('os.system', return_value=0) + self.patcher5 = patch('os.popen') + self.patcher6 = patch('importlib.util.spec_from_file_location', return_value=MagicMock()) + self.patcher7 = patch('importlib.util.module_from_spec', return_value=MagicMock()) + self.patcher8 = patch('cmd.generate_prdoc.main', return_value=0) + + self.mock_open = self.patcher1.start() + self.mock_json_load = self.patcher2.start() + self.mock_parse_args = self.patcher3.start() + self.mock_system = self.patcher4.start() + self.mock_popen = self.patcher5.start() + self.mock_spec_from_file_location = self.patcher6.start() + self.mock_module_from_spec = self.patcher7.start() + self.mock_generate_prdoc_main = self.patcher8.start() + + # Ensure that cmd.py uses the mock_runtimes_matrix + import cmd + cmd.runtimesMatrix = mock_runtimes_matrix + + def tearDown(self): + self.patcher1.stop() + self.patcher2.stop() + self.patcher3.stop() + self.patcher4.stop() + self.patcher5.stop() + self.patcher6.stop() + self.patcher7.stop() + self.patcher8.stop() + + def test_bench_command_normal_execution_all_runtimes(self): + self.mock_parse_args.return_value = (argparse.Namespace( + command='bench', + runtime=list(map(lambda x: x['name'], mock_runtimes_matrix)), + pallet=['pallet_balances'], + continue_on_fail=False, + quiet=False, + clean=False, + image=None + ), []) + + self.mock_popen.return_value.read.side_effect = [ + "pallet_balances\npallet_staking\npallet_something\n", # Output for dev runtime + "pallet_balances\npallet_staking\npallet_something\n", # Output for westend runtime + "pallet_staking\npallet_something\n", # Output for rococo runtime - no pallet here + "pallet_balances\npallet_staking\npallet_something\n", # Output for asset-hub-westend runtime + "./substrate/frame/balances/Cargo.toml\n", # Mock manifest path for dev -> pallet_balances + ] + + with patch('sys.exit') as mock_exit: + import cmd + cmd.main() + mock_exit.assert_not_called() + + expected_calls = [ + # Build calls + call("forklift cargo build -p kitchensink-runtime --profile release --features runtime-benchmarks"), + call("forklift cargo build -p westend-runtime --profile release --features runtime-benchmarks"), + call("forklift cargo build -p rococo-runtime --profile release --features runtime-benchmarks"), + call("forklift cargo build -p asset-hub-westend-runtime --profile release --features runtime-benchmarks"), + + call(get_mock_bench_output('kitchensink', 'pallet_balances', './substrate/frame/balances/src/weights.rs', os.path.abspath('substrate/HEADER-APACHE2'), "substrate/.maintain/frame-weight-template.hbs")), + call(get_mock_bench_output('westend', 'pallet_balances', './polkadot/runtime/westend/src/weights', os.path.abspath('polkadot/file_header.txt'))), + # skips rococo benchmark + call(get_mock_bench_output('asset-hub-westend', 'pallet_balances', './cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights', os.path.abspath('cumulus/file_header.txt'))), + ] + self.mock_system.assert_has_calls(expected_calls, any_order=True) + + def test_bench_command_normal_execution(self): + self.mock_parse_args.return_value = (argparse.Namespace( + command='bench', + runtime=['westend'], + pallet=['pallet_balances', 'pallet_staking'], + continue_on_fail=False, + quiet=False, + clean=False, + image=None + ), []) + header_path = os.path.abspath('polkadot/file_header.txt') + self.mock_popen.return_value.read.side_effect = [ + "pallet_balances\npallet_staking\npallet_something\n", # Output for westend runtime + ] + + with patch('sys.exit') as mock_exit: + import cmd + cmd.main() + mock_exit.assert_not_called() + + expected_calls = [ + # Build calls + call("forklift cargo build -p westend-runtime --profile release --features runtime-benchmarks"), + + # Westend runtime calls + call(get_mock_bench_output('westend', 'pallet_balances', './polkadot/runtime/westend/src/weights', header_path)), + call(get_mock_bench_output('westend', 'pallet_staking', './polkadot/runtime/westend/src/weights', header_path)), + ] + self.mock_system.assert_has_calls(expected_calls, any_order=True) + + + def test_bench_command_normal_execution_xcm(self): + self.mock_parse_args.return_value = (argparse.Namespace( + command='bench', + runtime=['westend'], + pallet=['pallet_xcm_benchmarks::generic'], + continue_on_fail=False, + quiet=False, + clean=False, + image=None + ), []) + header_path = os.path.abspath('polkadot/file_header.txt') + self.mock_popen.return_value.read.side_effect = [ + "pallet_balances\npallet_staking\npallet_something\npallet_xcm_benchmarks::generic\n", # Output for westend runtime + ] + + with patch('sys.exit') as mock_exit: + import cmd + cmd.main() + mock_exit.assert_not_called() + + expected_calls = [ + # Build calls + call("forklift cargo build -p westend-runtime --profile release --features runtime-benchmarks"), + + # Westend runtime calls + call(get_mock_bench_output( + 'westend', + 'pallet_xcm_benchmarks::generic', + './polkadot/runtime/westend/src/weights/xcm', + header_path, + "polkadot/xcm/pallet-xcm-benchmarks/template.hbs" + )), + ] + self.mock_system.assert_has_calls(expected_calls, any_order=True) + + def test_bench_command_two_runtimes_two_pallets(self): + self.mock_parse_args.return_value = (argparse.Namespace( + command='bench', + runtime=['westend', 'rococo'], + pallet=['pallet_balances', 'pallet_staking'], + continue_on_fail=False, + quiet=False, + clean=False, + image=None + ), []) + self.mock_popen.return_value.read.side_effect = [ + "pallet_staking\npallet_balances\n", # Output for westend runtime + "pallet_staking\npallet_balances\n", # Output for rococo runtime + ] + + with patch('sys.exit') as mock_exit: + import cmd + cmd.main() + mock_exit.assert_not_called() + header_path = os.path.abspath('polkadot/file_header.txt') + + expected_calls = [ + # Build calls + call("forklift cargo build -p westend-runtime --profile release --features runtime-benchmarks"), + call("forklift cargo build -p rococo-runtime --profile release --features runtime-benchmarks"), + # Westend runtime calls + call(get_mock_bench_output('westend', 'pallet_staking', './polkadot/runtime/westend/src/weights', header_path)), + call(get_mock_bench_output('westend', 'pallet_balances', './polkadot/runtime/westend/src/weights', header_path)), + # Rococo runtime calls + call(get_mock_bench_output('rococo', 'pallet_staking', './polkadot/runtime/rococo/src/weights', header_path)), + call(get_mock_bench_output('rococo', 'pallet_balances', './polkadot/runtime/rococo/src/weights', header_path)), + ] + self.mock_system.assert_has_calls(expected_calls, any_order=True) + + def test_bench_command_one_dev_runtime(self): + self.mock_parse_args.return_value = (argparse.Namespace( + command='bench', + runtime=['dev'], + pallet=['pallet_balances'], + continue_on_fail=False, + quiet=False, + clean=False, + image=None + ), []) + manifest_dir = "substrate/frame/kitchensink" + self.mock_popen.return_value.read.side_effect = [ + "pallet_balances\npallet_something", # Output for dev runtime + manifest_dir + "/Cargo.toml" # Output for manifest path in dev runtime + ] + header_path = os.path.abspath('substrate/HEADER-APACHE2') + + with patch('sys.exit') as mock_exit: + import cmd + cmd.main() + mock_exit.assert_not_called() + + expected_calls = [ + # Build calls + call("forklift cargo build -p kitchensink-runtime --profile release --features runtime-benchmarks"), + # Westend runtime calls + call(get_mock_bench_output( + 'kitchensink', + 'pallet_balances', + manifest_dir + "/src/weights.rs", + header_path, + "substrate/.maintain/frame-weight-template.hbs" + )), + ] + self.mock_system.assert_has_calls(expected_calls, any_order=True) + + def test_bench_command_one_cumulus_runtime(self): + self.mock_parse_args.return_value = (argparse.Namespace( + command='bench', + runtime=['asset-hub-westend'], + pallet=['pallet_assets'], + continue_on_fail=False, + quiet=False, + clean=False, + image=None + ), []) + self.mock_popen.return_value.read.side_effect = [ + "pallet_assets\n", # Output for asset-hub-westend runtime + ] + header_path = os.path.abspath('cumulus/file_header.txt') + + with patch('sys.exit') as mock_exit: + import cmd + cmd.main() + mock_exit.assert_not_called() + + expected_calls = [ + # Build calls + call("forklift cargo build -p asset-hub-westend-runtime --profile release --features runtime-benchmarks"), + # Asset-hub-westend runtime calls + call(get_mock_bench_output( + 'asset-hub-westend', + 'pallet_assets', + './cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights', + header_path + )), + ] + + self.mock_system.assert_has_calls(expected_calls, any_order=True) + + def test_bench_command_one_cumulus_runtime_xcm(self): + self.mock_parse_args.return_value = (argparse.Namespace( + command='bench', + runtime=['asset-hub-westend'], + pallet=['pallet_xcm_benchmarks::generic', 'pallet_assets'], + continue_on_fail=False, + quiet=False, + clean=False, + image=None + ), []) + self.mock_popen.return_value.read.side_effect = [ + "pallet_assets\npallet_xcm_benchmarks::generic\n", # Output for asset-hub-westend runtime + ] + header_path = os.path.abspath('cumulus/file_header.txt') + + with patch('sys.exit') as mock_exit: + import cmd + cmd.main() + mock_exit.assert_not_called() + + expected_calls = [ + # Build calls + call("forklift cargo build -p asset-hub-westend-runtime --profile release --features runtime-benchmarks"), + # Asset-hub-westend runtime calls + call(get_mock_bench_output( + 'asset-hub-westend', + 'pallet_xcm_benchmarks::generic', + './cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm', + header_path, + "cumulus/templates/xcm-bench-template.hbs" + )), + call(get_mock_bench_output( + 'asset-hub-westend', + 'pallet_assets', + './cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights', + header_path + )), + ] + + self.mock_system.assert_has_calls(expected_calls, any_order=True) + + @patch('argparse.ArgumentParser.parse_known_args', return_value=(argparse.Namespace(command='fmt', continue_on_fail=False), [])) + @patch('os.system', return_value=0) + def test_fmt_command(self, mock_system, mock_parse_args): + with patch('sys.exit') as mock_exit: + import cmd + cmd.main() + mock_exit.assert_not_called() + mock_system.assert_any_call('cargo +nightly fmt') + mock_system.assert_any_call('taplo format --config .config/taplo.toml') + + @patch('argparse.ArgumentParser.parse_known_args', return_value=(argparse.Namespace(command='update-ui', continue_on_fail=False), [])) + @patch('os.system', return_value=0) + def test_update_ui_command(self, mock_system, mock_parse_args): + with patch('sys.exit') as mock_exit: + import cmd + cmd.main() + mock_exit.assert_not_called() + mock_system.assert_called_with('sh ./scripts/update-ui-tests.sh') + + @patch('argparse.ArgumentParser.parse_known_args', return_value=(argparse.Namespace(command='prdoc', continue_on_fail=False), [])) + @patch('os.system', return_value=0) + def test_prdoc_command(self, mock_system, mock_parse_args): + with patch('sys.exit') as mock_exit: + import cmd + cmd.main() + mock_exit.assert_not_called() + self.mock_generate_prdoc_main.assert_called_with(mock_parse_args.return_value[0]) + +if __name__ == '__main__': + unittest.main() \ No newline at end of file diff --git a/.github/scripts/generate-prdoc.py b/.github/scripts/generate-prdoc.py index ba7def20fcb9..d3b6b523ecfd 100644 --- a/.github/scripts/generate-prdoc.py +++ b/.github/scripts/generate-prdoc.py @@ -48,9 +48,8 @@ def create_prdoc(pr, audience, title, description, patch, bump, force): else: print(f"No preexisting PrDoc for PR {pr}") - prdoc = { "doc": [{}], "crates": [] } + prdoc = { "title": title, "doc": [{}], "crates": [] } - prdoc["title"] = title prdoc["doc"][0]["audience"] = audience prdoc["doc"][0]["description"] = description @@ -58,13 +57,19 @@ def create_prdoc(pr, audience, title, description, patch, bump, force): modified_paths = [] for diff in whatthepatch.parse_patch(patch): - modified_paths.append(diff.header.new_path) + new_path = diff.header.new_path + # Sometimes this lib returns `/dev/null` as the new path... + if not new_path.startswith("/dev"): + modified_paths.append(new_path) modified_crates = {} for p in modified_paths: # Go up until we find a Cargo.toml p = os.path.join(workspace.path, p) while not os.path.exists(os.path.join(p, "Cargo.toml")): + print(f"Could not find Cargo.toml in {p}") + if p == '/': + exit(1) p = os.path.dirname(p) with open(os.path.join(p, "Cargo.toml")) as f: @@ -95,19 +100,41 @@ def create_prdoc(pr, audience, title, description, patch, bump, force): # write the parsed PR documentation back to the file with open(path, "w") as f: - yaml.dump(prdoc, f) + yaml.dump(prdoc, f, sort_keys=False) print(f"PrDoc for PR {pr} written to {path}") -def parse_args(): - parser = argparse.ArgumentParser() - parser.add_argument("--pr", type=int, required=True) - parser.add_argument("--audience", type=str, default="TODO") - parser.add_argument("--bump", type=str, default="TODO") - parser.add_argument("--force", type=str) - return parser.parse_args() +# Make the `description` a multiline string instead of escaping \r\n. +def setup_yaml(): + def yaml_multiline_string_presenter(dumper, data): + if len(data.splitlines()) > 1: + data = '\n'.join([line.rstrip() for line in data.strip().splitlines()]) + return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='|') + return dumper.represent_scalar('tag:yaml.org,2002:str', data) + + yaml.add_representer(str, yaml_multiline_string_presenter) + +# parse_args is also used by cmd/cmd.py +def setup_parser(parser=None): + if parser is None: + parser = argparse.ArgumentParser() + parser.add_argument("--pr", type=int, required=True, help="The PR number to generate the PrDoc for." ) + parser.add_argument("--audience", type=str, default="TODO", help="The audience of whom the changes may concern.") + parser.add_argument("--bump", type=str, default="TODO", help="A default bump level for all crates.") + parser.add_argument("--force", type=str, help="Whether to overwrite any existing PrDoc.") + + return parser -if __name__ == "__main__": - args = parse_args() - force = True if args.force.lower() == "true" else False +def main(args): + force = True if (args.force or "false").lower() == "true" else False print(f"Args: {args}, force: {force}") - from_pr_number(args.pr, args.audience, args.bump, force) + setup_yaml() + try: + from_pr_number(args.pr, args.audience, args.bump, force) + return 0 + except Exception as e: + print(f"Error generating prdoc: {e}") + return 1 + +if __name__ == "__main__": + args = setup_parser().parse_args() + main(args) \ No newline at end of file diff --git a/.github/scripts/generate-prdoc.requirements.txt b/.github/scripts/generate-prdoc.requirements.txt new file mode 100644 index 000000000000..c17aceff63a0 --- /dev/null +++ b/.github/scripts/generate-prdoc.requirements.txt @@ -0,0 +1,6 @@ +requests +cargo-workspace +PyGithub +whatthepatch +pyyaml +toml \ No newline at end of file diff --git a/.github/scripts/release/release_lib.sh b/.github/scripts/release/release_lib.sh new file mode 100644 index 000000000000..81a3c14edec8 --- /dev/null +++ b/.github/scripts/release/release_lib.sh @@ -0,0 +1,118 @@ +#!/usr/bin/env bash + +# Set the new version by replacing the value of the constant given as patetrn +# in the file. +# +# input: pattern, version, file +#output: none +set_version() { + pattern=$1 + version=$2 + file=$3 + + sed -i "s/$pattern/\1\"${version}\"/g" $file + return 0 +} + +# Commit changes to git with specific message. +# "|| true" does not let script to fail with exit code 1, +# in case there is nothing to commit. +# +# input: MESSAGE (any message which should be used for the commit) +# output: none +commit_with_message() { + MESSAGE=$1 + git commit -a -m "$MESSAGE" || true +} + +# Retun list of the runtimes filterd +# input: none +# output: list of filtered runtimes +get_filtered_runtimes_list() { + grep_filters=("runtime.*" "test|template|starters|substrate") + + git grep spec_version: | grep .rs: | grep -e "${grep_filters[0]}" | grep "lib.rs" | grep -vE "${grep_filters[1]}" | cut -d: -f1 +} + +# Sets provided spec version +# input: version +set_spec_versions() { + NEW_VERSION=$1 + runtimes_list=(${@:2}) + + printf "Setting spec_version to $NEW_VERSION\n" + + for f in ${runtimes_list[@]}; do + printf " processing $f" + sed -ri "s/spec_version: [0-9]+_[0-9]+_[0-9]+,/spec_version: $NEW_VERSION,/" $f + done + + commit_with_message "Bump spec_version to $NEW_VERSION" + + git_show_log 'spec_version' +} + +# Displays formated results of the git log command +# for the given pattern which needs to be found in logs +# input: pattern, count (optional, default is 10) +git_show_log() { + PATTERN="$1" + COUNT=${2:-10} + git log --pretty=format:"%h %ad | %s%d [%an]" --graph --date=iso-strict | \ + head -n $COUNT | grep -iE "$PATTERN" --color=always -z +} + +# Get a spec_version number from the crate version +# +# ## inputs +# - v1.12.0 or 1.12.0 +# +# ## output: +# 1_012_000 or 1_012_001 if SUFFIX is set +function get_spec_version() { + INPUT=$1 + SUFFIX=${SUFFIX:-000} #this variable makes it possible to set a specific ruuntime version like 93826 it can be intialised as sestem variable + [[ $INPUT =~ .*([0-9]+\.[0-9]+\.[0-9]{1,2}).* ]] + VERSION="${BASH_REMATCH[1]}" + MATCH="${BASH_REMATCH[0]}" + if [ -z $MATCH ]; then + return 1 + else + SPEC_VERSION="$(sed -e "s/\./_0/g" -e "s/_[^_]*\$/_$SUFFIX/" <<< $VERSION)" + echo "$SPEC_VERSION" + return 0 + fi +} + +# Reorganize the prdoc files for the release +# +# input: VERSION (e.g. v1.0.0) +# output: none +reorder_prdocs() { + VERSION="$1" + + printf "[+] ℹī¸ Reordering prdocs:" + + VERSION=$(sed -E 's/^v([0-9]+\.[0-9]+\.[0-9]+).*$/\1/' <<< "$VERSION") #getting reed of the 'v' prefix + mkdir -p "prdoc/$VERSION" + mv prdoc/pr_*.prdoc prdoc/$VERSION + git add -A + commit_with_message "Reordering prdocs for the release $VERSION" +} + +# Bump the binary version of the polkadot-parachain binary with the +# new bumped version and commit changes. +# +# input: version e.g. 1.16.0 +set_polkadot_parachain_binary_version() { + bumped_version="$1" + cargo_toml_file="$2" + + set_version "\(^version = \)\".*\"" $bumped_version $cargo_toml_file + + cargo update --workspace --offline # we need this to update Cargo.loc with the new versions as well + + MESSAGE="Bump versions in: ${cargo_toml_file}" + commit_with_message "$MESSAGE" + git_show_log "$MESSAGE" +} diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 5b581c45fb85..43123cdbfc41 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -21,7 +21,7 @@ workflow: - if: $CI_COMMIT_BRANCH variables: - CI_IMAGE: !reference [.ci-unified, variables, CI_IMAGE] + CI_IMAGE: !reference [ .ci-unified, variables, CI_IMAGE ] # BUILDAH_IMAGE is defined in group variables BUILDAH_COMMAND: "buildah --storage-driver overlay2" RELENG_SCRIPTS_BRANCH: "master" @@ -39,7 +39,7 @@ default: - runner_system_failure - unknown_failure - api_failure - cache: {} + cache: { } interruptible: true .collect-artifacts: @@ -68,8 +68,8 @@ default: .common-before-script: before_script: - - !reference [.job-switcher, before_script] - - !reference [.pipeline-stopper-vars, script] + - !reference [ .job-switcher, before_script ] + - !reference [ .pipeline-stopper-vars, script ] .job-switcher: before_script: @@ -78,8 +78,8 @@ default: .kubernetes-env: image: "${CI_IMAGE}" before_script: - - !reference [.common-before-script, before_script] - - !reference [.prepare-env, before_script] + - !reference [ .common-before-script, before_script ] + - !reference [ .prepare-env, before_script ] tags: - kubernetes-parity-build @@ -107,12 +107,12 @@ default: .docker-env: image: "${CI_IMAGE}" variables: - FL_FORKLIFT_VERSION: !reference [.forklift, variables, FL_FORKLIFT_VERSION] + FL_FORKLIFT_VERSION: !reference [ .forklift, variables, FL_FORKLIFT_VERSION ] before_script: - - !reference [.common-before-script, before_script] - - !reference [.prepare-env, before_script] - - !reference [.rust-info-script, script] - - !reference [.forklift-cache, before_script] + - !reference [ .common-before-script, before_script ] + - !reference [ .prepare-env, before_script ] + - !reference [ .rust-info-script, script ] + - !reference [ .forklift-cache, before_script ] tags: - linux-docker @@ -269,56 +269,6 @@ remove-cancel-pipeline-message: trigger: project: "parity/infrastructure/ci_cd/pipeline-stopper" -cancel-pipeline-cargo-check-benches1: - extends: .cancel-pipeline-template - needs: - - job: "cargo-check-benches 1/2" - -cancel-pipeline-cargo-check-benches2: - extends: .cancel-pipeline-template - needs: - - job: "cargo-check-benches 2/2" - -cancel-pipeline-cargo-check-each-crate-1: - extends: .cancel-pipeline-template - needs: - - job: "cargo-check-each-crate 1/6" - -cancel-pipeline-cargo-check-each-crate-2: - extends: .cancel-pipeline-template - needs: - - job: "cargo-check-each-crate 2/6" - -cancel-pipeline-cargo-check-each-crate-3: - extends: .cancel-pipeline-template - needs: - - job: "cargo-check-each-crate 3/6" - -cancel-pipeline-cargo-check-each-crate-4: - extends: .cancel-pipeline-template - needs: - - job: "cargo-check-each-crate 4/6" - -cancel-pipeline-cargo-check-each-crate-5: - extends: .cancel-pipeline-template - needs: - - job: "cargo-check-each-crate 5/6" - -cancel-pipeline-cargo-check-each-crate-6: - extends: .cancel-pipeline-template - needs: - - job: "cargo-check-each-crate 6/6" - -cancel-pipeline-cargo-check-each-crate-macos: - extends: .cancel-pipeline-template - needs: - - job: cargo-check-each-crate-macos - -cancel-pipeline-check-tracing: - extends: .cancel-pipeline-template - needs: - - job: check-tracing - cancel-pipeline-build-linux-stable: extends: .cancel-pipeline-template needs: @@ -334,22 +284,12 @@ cancel-pipeline-build-linux-substrate: needs: - job: build-linux-substrate -cancel-pipeline-test-node-metrics: - extends: .cancel-pipeline-template - needs: - - job: test-node-metrics - -cancel-pipeline-test-frame-ui: - extends: .cancel-pipeline-template - needs: - - job: test-frame-ui - -cancel-pipeline-test-frame-examples-compile-to-wasm: +cancel-pipeline-build-short-benchmark: extends: .cancel-pipeline-template needs: - - job: test-frame-examples-compile-to-wasm + - job: build-short-benchmark -cancel-pipeline-build-short-benchmark: +cancel-pipeline-cargo-check-each-crate-macos: extends: .cancel-pipeline-template needs: - - job: build-short-benchmark + - job: cargo-check-each-crate-macos \ No newline at end of file diff --git a/.gitlab/pipeline/check.yml b/.gitlab/pipeline/check.yml index 2212c1aeb0a8..7d1f37dddd51 100644 --- a/.gitlab/pipeline/check.yml +++ b/.gitlab/pipeline/check.yml @@ -1,25 +1,3 @@ -# from substrate -# not sure if it's needed in monorepo -check-dependency-rules: - stage: check - extends: - - .kubernetes-env - - .test-refs-no-trigger-prs-only - variables: - CI_IMAGE: "paritytech/tools:latest" - allow_failure: true - script: - - cd substrate/ - - ../.gitlab/ensure-deps.sh - -test-rust-features: - stage: check - extends: - - .kubernetes-env - - .test-refs-no-trigger-prs-only - script: - - bash .gitlab/rust-features.sh . - job-starter: stage: check image: paritytech/tools:latest @@ -29,20 +7,3 @@ job-starter: allow_failure: true script: - echo ok - -check-rust-feature-propagation: - stage: check - extends: - - .kubernetes-env - - .common-refs - script: - - zepter run check - -check-toml-format: - stage: check - extends: - - .kubernetes-env - - .common-refs - script: - - taplo format --check --config .config/taplo.toml - - echo "Please run `taplo format --config .config/taplo.toml` to fix any toml formatting issues" diff --git a/.gitlab/pipeline/test.yml b/.gitlab/pipeline/test.yml index ca3a2394fb39..0879870ae13c 100644 --- a/.gitlab/pipeline/test.yml +++ b/.gitlab/pipeline/test.yml @@ -137,159 +137,6 @@ test-rustdoc: script: - time cargo doc --workspace --all-features --no-deps -test-node-metrics: - stage: test - extends: - - .docker-env - - .common-refs - - .run-immediately - - .collect-artifacts-short - variables: - RUST_TOOLCHAIN: stable - # Enable debug assertions since we are running optimized builds for testing - # but still want to have debug assertions. - RUSTFLAGS: "-Cdebug-assertions=y -Dwarnings" - script: - # Build the required workers. - - cargo build --bin polkadot-execute-worker --bin polkadot-prepare-worker --profile testnet --verbose --locked - - mkdir -p artifacts - - time cargo test --profile testnet - --locked - --features=runtime-metrics -p polkadot-node-metrics > artifacts/log.txt - -test-deterministic-wasm: - stage: test - extends: - - .docker-env - - .common-refs - # DAG - needs: - - job: test-frame-ui - artifacts: false - script: - # build runtime - - WASM_BUILD_NO_COLOR=1 cargo build -q --locked --release -p westend-runtime -p rococo-runtime - # make checksum - - sha256sum target/release/wbuild/*-runtime/target/wasm32-unknown-unknown/release/*.wasm > checksum.sha256 - - cargo clean - # build again - - WASM_BUILD_NO_COLOR=1 cargo build -q --locked --release -p westend-runtime -p rococo-runtime - # confirm checksum - - sha256sum -c checksum.sha256 - -cargo-check-benches: - stage: test - artifacts: - expire_in: 10 days - variables: - CI_JOB_NAME: "cargo-check-benches" - extends: - - .docker-env - - .common-refs - - .run-immediately - - .collect-artifacts - - .pipeline-stopper-artifacts - before_script: - # TODO: DON'T FORGET TO CHANGE FOR PROD VALUES!!! - # merges in the master branch on PRs. skip if base is not master - - 'if [ $CI_COMMIT_REF_NAME != "master" ]; then - BASE=$(curl -s -H "Authorization: Bearer ${GITHUB_PR_TOKEN}" https://api.github.com/repos/paritytech-stg/polkadot-sdk/pulls/${CI_COMMIT_REF_NAME} | jq -r .base.ref); - printf "Merging base branch %s\n" "${BASE:=master}"; - if [ $BASE != "master" ]; then - echo "$BASE is not master, skipping merge"; - else - git config user.email "ci@gitlab.parity.io"; - git fetch origin "refs/heads/${BASE}"; - git merge --verbose --no-edit FETCH_HEAD; - fi - fi' - - !reference [.forklift-cache, before_script] - parallel: 2 - script: - - mkdir -p ./artifacts/benches/$CI_COMMIT_REF_NAME-$CI_COMMIT_SHORT_SHA - # this job is executed in parallel on two runners - - echo "___Running benchmarks___"; - - case ${CI_NODE_INDEX} in - 1) - SKIP_WASM_BUILD=1 time cargo check --locked --benches --all; - cargo run --locked --release -p node-bench -- ::trie::read::small --json - | tee ./artifacts/benches/$CI_COMMIT_REF_NAME-$CI_COMMIT_SHORT_SHA/::trie::read::small.json; - echo "___Cache could be uploaded___"; - ;; - 2) - cargo run --locked --release -p node-bench -- ::node::import::sr25519::transfer_keep_alive::paritydb::small --json - | tee ./artifacts/benches/$CI_COMMIT_REF_NAME-$CI_COMMIT_SHORT_SHA/::node::import::sr25519::transfer_keep_alive::paritydb::small.json - ;; - esac - -node-bench-regression-guard: - # it's not belong to `build` semantically, but dag jobs can't depend on each other - # within the single stage - https://gitlab.com/gitlab-org/gitlab/-/issues/30632 - # more: https://github.com/paritytech/substrate/pull/8519#discussion_r608012402 - stage: build - extends: - - .docker-env - - .common-refs - needs: - # this is a DAG - - job: cargo-check-benches - artifacts: true - # polls artifact from master to compare with current result - # need to specify both parallel jobs from master because of the bug - # https://gitlab.com/gitlab-org/gitlab/-/issues/39063 - - project: $CI_PROJECT_PATH - job: "cargo-check-benches 1/2" - ref: master - artifacts: true - - project: $CI_PROJECT_PATH - job: "cargo-check-benches 2/2" - ref: master - artifacts: true - variables: - CI_IMAGE: "paritytech/node-bench-regression-guard:latest" - before_script: [""] - script: - - if [ $(ls -la artifacts/benches/ | grep master | wc -l) == 0 ]; then - echo "Couldn't find master artifacts"; - exit 1; - fi - - echo "------- IMPORTANT -------" - - echo "node-bench-regression-guard depends on the results of a cargo-check-benches job" - - echo "In case of this job failure, check your pipeline's cargo-check-benches" - - "node-bench-regression-guard --reference artifacts/benches/master-* - --compare-with artifacts/benches/$CI_COMMIT_REF_NAME-$CI_COMMIT_SHORT_SHA" - after_script: [""] - -# if this fails run `bot update-ui` in the Pull Request or "./scripts/update-ui-tests.sh" locally -# see ./docs/contributor/CONTRIBUTING.md#ui-tests -test-frame-ui: - stage: test - extends: - - .docker-env - - .common-refs - # DAG - needs: - - job: test-frame-examples-compile-to-wasm - artifacts: false - variables: - # Enable debug assertions since we are running optimized builds for testing - # but still want to have debug assertions. - RUSTFLAGS: "-C debug-assertions -D warnings" - RUST_BACKTRACE: 1 - WASM_BUILD_NO_COLOR: 1 - WASM_BUILD_RUSTFLAGS: "-C debug-assertions -D warnings" - # Ensure we run the UI tests. - RUN_UI_TESTS: 1 - script: - - time cargo test --locked -q --profile testnet -p frame-support-test --features=frame-feature-testing,no-metadata-docs,try-runtime,experimental - - time cargo test --locked -q --profile testnet -p frame-support-test --features=frame-feature-testing,frame-feature-testing-2,no-metadata-docs,try-runtime,experimental - - time cargo test --locked -q --profile testnet -p xcm-procedural - - time cargo test --locked -q --profile testnet -p frame-election-provider-solution-type - - time cargo test --locked -q --profile testnet -p sp-api-test - # There is multiple version of sp-runtime-interface in the repo. So we point to the manifest. - - time cargo test --locked -q --profile testnet --manifest-path substrate/primitives/runtime-interface/Cargo.toml - - cat /cargo_target_dir/debug/.fingerprint/memory_units-759eddf317490d2b/lib-memory_units.json || true - quick-benchmarks-omni: stage: test extends: @@ -307,90 +154,6 @@ quick-benchmarks-omni: - time cargo build --locked --quiet --release -p asset-hub-westend-runtime --features runtime-benchmarks - time cargo run --locked --release -p frame-omni-bencher --quiet -- v1 benchmark pallet --runtime target/release/wbuild/asset-hub-westend-runtime/asset_hub_westend_runtime.compact.compressed.wasm --all --steps 2 --repeat 1 --quiet -test-frame-examples-compile-to-wasm: - # into one job - stage: test - extends: - - .docker-env - - .common-refs - # DAG - needs: - - job: test-full-crypto-feature - artifacts: false - variables: - # Enable debug assertions since we are running optimized builds for testing - # but still want to have debug assertions. - RUSTFLAGS: "-C debug-assertions" - RUST_BACKTRACE: 1 - script: - - cd ./substrate/frame/examples/offchain-worker/ - - cargo build --locked --target=wasm32-unknown-unknown --no-default-features - - cd ../basic - - cargo build --locked --target=wasm32-unknown-unknown --no-default-features - # FIXME - allow_failure: true - -# more information about this job can be found here: -# https://github.com/paritytech/substrate/pull/6916 -check-tracing: - stage: test - extends: - - .docker-env - - .common-refs - - .run-immediately - - .pipeline-stopper-artifacts - script: - # with-tracing must be explicitly activated, we run a test to ensure this works as expected in both cases - - time cargo test --locked --manifest-path ./substrate/primitives/tracing/Cargo.toml --no-default-features - - time cargo test --locked --manifest-path ./substrate/primitives/tracing/Cargo.toml --no-default-features --features=with-tracing - -# Check that `westend-runtime` compiles with the `metadata-hash` feature enabled. -check-metadata-hash: - stage: test - extends: - - .docker-env - - .common-refs - - .run-immediately - - .pipeline-stopper-artifacts - script: - - time cargo build --locked -p westend-runtime --features metadata-hash - -# more information about this job can be found here: -# https://github.com/paritytech/substrate/pull/3778 -test-full-crypto-feature: - stage: test - extends: - - .docker-env - - .common-refs - - .run-immediately - variables: - # Enable debug assertions since we are running optimized builds for testing - # but still want to have debug assertions. - RUSTFLAGS: "-C debug-assertions" - RUST_BACKTRACE: 1 - script: - - cd substrate/primitives/core/ - - time cargo build --locked --no-default-features --features full_crypto - - cd ../application-crypto - - time cargo build --locked --no-default-features --features full_crypto - -cargo-check-each-crate: - stage: test - extends: - - .docker-env - - .common-refs - - .run-immediately - # - .collect-artifacts - variables: - RUSTFLAGS: "-D warnings" - # $CI_JOB_NAME is set manually so that cache could be shared for all jobs - # "cargo-check-each-crate I/N" jobs - CI_JOB_NAME: cargo-check-each-crate - timeout: 2h - script: - - PYTHONUNBUFFERED=x time .gitlab/check-each-crate.py "$CI_NODE_INDEX" "$CI_NODE_TOTAL" - parallel: 6 - cargo-check-each-crate-macos: stage: test extends: @@ -412,38 +175,3 @@ cargo-check-each-crate-macos: timeout: 2h tags: - osx - -cargo-hfuzz: - stage: test - extends: - - .docker-env - - .common-refs - # DAG - needs: - - job: check-tracing - artifacts: false - variables: - # max 10s per iteration, 60s per file - HFUZZ_RUN_ARGS: > - --exit_upon_crash - --exit_code_upon_crash 1 - --timeout 10 - --run_time 60 - # use git version of honggfuzz-rs until v0.5.56 is out, we need a few recent changes: - # https://github.com/rust-fuzz/honggfuzz-rs/pull/75 to avoid breakage on debian - # https://github.com/rust-fuzz/honggfuzz-rs/pull/81 fix to the above pr - # https://github.com/rust-fuzz/honggfuzz-rs/pull/82 fix for handling absolute CARGO_TARGET_DIR - HFUZZ_BUILD_ARGS: > - --config=patch.crates-io.honggfuzz.git="https://github.com/altaua/honggfuzz-rs" - --config=patch.crates-io.honggfuzz.rev="205f7c8c059a0d98fe1cb912cdac84f324cb6981" - artifacts: - name: "hfuzz-$CI_COMMIT_SHORT_SHA" - expire_in: 7 days - when: on_failure - paths: - - substrate/primitives/arithmetic/fuzzer/hfuzz_workspace/ - script: - - cd ./substrate/primitives/arithmetic/fuzzer - - cargo hfuzz build - - for target in $(cargo read-manifest | jq -r '.targets | .[] | .name'); do - cargo hfuzz run "$target" || { printf "fuzzing failure for %s\n" "$target"; exit 1; }; done diff --git a/Cargo.lock b/Cargo.lock index 7c4a730af42f..85520f21853e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -157,7 +157,7 @@ dependencies = [ "dunce", "heck 0.4.1", "proc-macro-error", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", "syn-solidity", @@ -284,7 +284,7 @@ dependencies = [ "include_dir", "itertools 0.10.5", "proc-macro-error", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -518,7 +518,7 @@ checksum = "7abe79b0e4288889c4574159ab790824d0033b9fdcb2a112a3182fac2e514565" dependencies = [ "num-bigint", "num-traits", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 1.0.109", ] @@ -620,7 +620,7 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae3281bc6d0fd7e549af32b52511e1302185bd688fd3359fa36423346ff682ea" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 1.0.109", ] @@ -724,7 +724,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "726535892e8eae7e70657b4c8ea93d26b8553afb1ce617caee529ef96d7dee6c" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 1.0.109", "synstructure 0.12.6", @@ -736,7 +736,7 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7378575ff571966e99a744addeff0bff98b8ada0dedf1956d59e634db95eaac1" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", "synstructure 0.13.1", @@ -748,7 +748,7 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2777730b2039ac0f95f093556e61b6d26cebed5393ca6f152717777cec3a42ed" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 1.0.109", ] @@ -759,7 +759,7 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7b18050c2cd6fe86c3a76584ef5e0baf286d038cda203eb6223df2cc413565f7" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -1290,7 +1290,7 @@ version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -1303,11 +1303,11 @@ checksum = "ecc7ab41815b3c653ccd2978ec3255c81349336702dfdf62ee6f7069b12a3aae" [[package]] name = "async-trait" -version = "0.1.80" +version = "0.1.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6fa2087f2753a7da8cc1c0dbfcf89579dd57458e36769de5ac750b4671737ca" +checksum = "a27b8a3a6e1a44fa4c8baf1f653e4172e81486d4941f2237e20dc2d0cf4ddff1" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -1366,7 +1366,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fee3da8ef1276b0bee5dd1c7258010d8fffd31801447323115a25560e1327b89" dependencies = [ "proc-macro-error", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 1.0.109", ] @@ -1502,8 +1502,8 @@ dependencies = [ "lazy_static", "lazycell", "peeking_take_while", - "prettyplease 0.2.12", - "proc-macro2 1.0.82", + "prettyplease", + "proc-macro2 1.0.86", "quote 1.0.37", "regex", "rustc-hash 1.1.0", @@ -2421,12 +2421,6 @@ dependencies = [ "tuplex", ] -[[package]] -name = "bs58" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "771fe0050b883fcc3ea2359b1a96bcfbc090b7116eae7c3c512c7a083fdf23d3" - [[package]] name = "bs58" version = "0.5.1" @@ -2488,9 +2482,9 @@ checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] name = "bytes" -version = "1.6.0" +version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "514de17de45fdb8dc022b1a7975556c53c86f9f0aa5f534b98977b171857c2c9" +checksum = "8318a53db07bb3f8dca91a600466bdb3f2eaadeedfdbcf02e1accbad9271ba50" [[package]] name = "bzip2-sys" @@ -2850,7 +2844,7 @@ checksum = "ae6371b8bdc8b7d3959e9cf7b22d4435ef3e79e138688421ec654acf8c81b008" dependencies = [ "heck 0.4.1", "proc-macro-error", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 1.0.109", ] @@ -2862,7 +2856,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5d029b67f89d30bbb547c89fd5161293c0aec155fc691d7924b64550662db93e" dependencies = [ "heck 0.5.0", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -3050,7 +3044,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d51beaa537d73d2d1ff34ee70bc095f170420ab2ec5d687ecd3ec2b0d092514b" dependencies = [ "nom", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 1.0.109", ] @@ -3633,21 +3627,6 @@ dependencies = [ "wasmtime-types", ] -[[package]] -name = "crc" -version = "3.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2b432c56615136f8dba245fed7ec3d5518c500a31108661067e61e72fe7e6bc" -dependencies = [ - "crc-catalog", -] - -[[package]] -name = "crc-catalog" -version = "2.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19d374276b40fb8bbdee95aef7c7fa6b5316ec764510eb64b8dd0e2ed0d7e7f5" - [[package]] name = "crc32fast" version = "1.3.2" @@ -4173,7 +4152,7 @@ name = "cumulus-pallet-parachain-system-proc-macro" version = "0.6.0" dependencies = [ "proc-macro-crate 3.1.0", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -4709,7 +4688,7 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "83fdaf97f4804dcebfa5862639bc9ce4121e82140bec2a987ac5140294865b5b" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -4748,7 +4727,7 @@ dependencies = [ "cc", "codespan-reporting", "once_cell", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "scratch", "syn 2.0.65", @@ -4766,7 +4745,7 @@ version = "1.0.106" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "50c49547d73ba8dcfd4ad7325d64c6d5391ff4224d498fc39a6f3f49825a530d" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -4872,7 +4851,7 @@ version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 1.0.109", ] @@ -4883,7 +4862,7 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d65d7ce8132b7c0e54497a4d9a55a1c2a0912a0d786cf894472ba818fba45762" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -4894,7 +4873,7 @@ version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "67e77553c4162a157adbf834ebae5b415acbecbeafc7a74b0e886657506a7611" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -4906,7 +4885,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" dependencies = [ "convert_case", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "rustc_version 0.4.0", "syn 1.0.109", @@ -5002,7 +4981,7 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -5062,7 +5041,7 @@ dependencies = [ "common-path", "derive-syn-parse", "once_cell", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "regex", "syn 2.0.65", @@ -5111,7 +5090,7 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "558e40ea573c374cf53507fd240b7ee2f5477df7cfebdb97323ec61c719399c5" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 1.0.109", ] @@ -5259,7 +5238,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c9720bba047d567ffc8a3cba48bf19126600e249ab7f128e9233e6376976a116" dependencies = [ "heck 0.4.1", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 1.0.109", ] @@ -5271,7 +5250,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5ffccbb6966c05b32ef8fbac435df276c4ae4d3dc55a8cd0eb9745e6c12f546a" dependencies = [ "heck 0.4.1", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -5291,7 +5270,7 @@ version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5e9a1f9f7d83e59740248a6e14ecf93929ade55027844dfcea78beafccc15745" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -5302,7 +5281,7 @@ version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6fd000fd6988e73bbe993ea3db9b1aa64906ab88766d654973924340c8cddb42" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -5504,8 +5483,8 @@ dependencies = [ "blake2 0.10.6", "file-guard", "fs-err", - "prettyplease 0.2.12", - "proc-macro2 1.0.82", + "prettyplease", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -5577,7 +5556,7 @@ dependencies = [ "expander", "indexmap 2.2.3", "proc-macro-crate 3.1.0", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -5756,21 +5735,6 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" -[[package]] -name = "foreign-types" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" -dependencies = [ - "foreign-types-shared", -] - -[[package]] -name = "foreign-types-shared" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" - [[package]] name = "fork-tree" version = "12.0.0" @@ -5909,7 +5873,7 @@ dependencies = [ "frame-support", "parity-scale-codec", "proc-macro-crate 3.1.0", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "scale-info", "sp-arithmetic", @@ -6104,7 +6068,7 @@ dependencies = [ "parity-scale-codec", "pretty_assertions", "proc-macro-warning 1.0.0", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "regex", "scale-info", @@ -6123,7 +6087,7 @@ version = "10.0.0" dependencies = [ "frame-support-procedural-tools-derive", "proc-macro-crate 3.1.0", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -6132,7 +6096,7 @@ dependencies = [ name = "frame-support-procedural-tools-derive" version = "11.0.0" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -6383,7 +6347,7 @@ version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -6795,6 +6759,51 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6fe2267d4ed49bc07b63801559be28c718ea06c4738b7a03c94df7386d2cde46" +[[package]] +name = "hickory-proto" +version = "0.24.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07698b8420e2f0d6447a436ba999ec85d8fbf2a398bbd737b82cac4a2e96e512" +dependencies = [ + "async-trait", + "cfg-if", + "data-encoding", + "enum-as-inner 0.6.0", + "futures-channel", + "futures-io", + "futures-util", + "idna 0.4.0", + "ipnet", + "once_cell", + "rand", + "thiserror", + "tinyvec", + "tokio", + "tracing", + "url", +] + +[[package]] +name = "hickory-resolver" +version = "0.24.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28757f23aa75c98f254cf0405e6d8c25b831b32921b050a66692427679b1f243" +dependencies = [ + "cfg-if", + "futures-util", + "hickory-proto", + "ipconfig", + "lru-cache", + "once_cell", + "parking_lot 0.12.3", + "rand", + "resolv-conf", + "smallvec", + "thiserror", + "tokio", + "tracing", +] + [[package]] name = "hkdf" version = "0.12.4" @@ -7176,7 +7185,7 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 1.0.109", ] @@ -7196,7 +7205,7 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b139284b5cf57ecfa712bcc66950bb635b31aff41c188e8a4cfc758eca374a3f" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", ] @@ -7555,7 +7564,7 @@ checksum = "6b07a2daf52077ab1b197aea69a5c990c060143835bf04c77070e98903791715" dependencies = [ "heck 0.5.0", "proc-macro-crate 3.1.0", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -7733,9 +7742,9 @@ dependencies = [ [[package]] name = "lazy_static" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" +checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" [[package]] name = "lazycell" @@ -7751,9 +7760,9 @@ checksum = "884e2677b40cc8c339eaefcb701c32ef1fd2493d71118dc0ca4b6a736c93bd67" [[package]] name = "libc" -version = "0.2.155" +version = "0.2.158" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97b3888a4aecf77e811145cadf6eef5901f4782c53886191b2f693f24761847c" +checksum = "d8adc4bb1803a324070e64a98ae98f38934d91957a99cfb3a43dcbc01bc56439" [[package]] name = "libflate" @@ -7946,7 +7955,7 @@ version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "55cca1eb2bc1fd29f099f3daaab7effd01e1a54b7c577d0ed082521034d912e8" dependencies = [ - "bs58 0.5.1", + "bs58", "ed25519-dalek", "hkdf", "multihash 0.19.1", @@ -8083,7 +8092,7 @@ dependencies = [ "libp2p-tls", "log", "parking_lot 0.12.3", - "quinn 0.10.2", + "quinn", "rand", "ring 0.16.20", "rustls 0.21.7", @@ -8141,7 +8150,7 @@ checksum = "c4d5ec2a3df00c7836d7696c136274c9c59705bac69133253696a6c932cd1d74" dependencies = [ "heck 0.4.1", "proc-macro-warning 0.4.2", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -8404,21 +8413,22 @@ dependencies = [ [[package]] name = "litep2p" -version = "0.6.2" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f46c51c205264b834ceed95c8b195026e700494bc3991aaba3b4ea9e20626d9" +checksum = "d4ab2528b02b6dbbc3e6ec4b55ccde885647c622a315b7da45081ed2dfe4b813" dependencies = [ "async-trait", - "bs58 0.4.0", + "bs58", "bytes", "cid 0.10.1", "ed25519-dalek", "futures", "futures-timer", "hex-literal", + "hickory-resolver", "indexmap 2.2.3", "libc", - "mockall 0.12.1", + "mockall 0.13.0", "multiaddr 0.17.1", "multihash 0.17.0", "network-interface", @@ -8426,8 +8436,7 @@ dependencies = [ "parking_lot 0.12.3", "pin-project", "prost 0.12.6", - "prost-build 0.11.9", - "quinn 0.9.4", + "prost-build 0.13.2", "rand", "rcgen", "ring 0.16.20", @@ -8439,18 +8448,15 @@ dependencies = [ "snow", "socket2 0.5.7", "static_assertions", - "str0m", "thiserror", "tokio", "tokio-stream", "tokio-tungstenite", "tokio-util", "tracing", - "trust-dns-resolver", "uint", "unsigned-varint 0.8.0", "url", - "webpki", "x25519-dalek", "x509-parser 0.16.0", "yasna", @@ -8569,7 +8575,7 @@ dependencies = [ "const-random", "derive-syn-parse", "macro_magic_core_macros", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -8580,7 +8586,7 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b02abfe41815b5bd98dbd4260173db2c116dda171dc0fe7838cb206333b83308" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -8905,15 +8911,14 @@ dependencies = [ [[package]] name = "mockall" -version = "0.12.1" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43766c2b5203b10de348ffe19f7e54564b64f3d6018ff7648d1e2d6d3a0f0a48" +checksum = "d4c28b3fb6d753d28c20e826cd46ee611fda1cf3cde03a443a974043247c065a" dependencies = [ "cfg-if", "downcast", "fragile", - "lazy_static", - "mockall_derive 0.12.1", + "mockall_derive 0.13.0", "predicates 3.0.3", "predicates-tree", ] @@ -8925,19 +8930,19 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "22ce75669015c4f47b289fd4d4f56e894e4c96003ffdf3ac51313126f94c6cbb" dependencies = [ "cfg-if", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 1.0.109", ] [[package]] name = "mockall_derive" -version = "0.12.1" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af7cbce79ec385a1d4f54baa90a76401eb15d9cab93685f62e7e9f942aa00ae2" +checksum = "341014e7f530314e9a1fdbc7400b244efea7122662c96bfa248c31da5bfb2020" dependencies = [ "cfg-if", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -9049,7 +9054,7 @@ checksum = "fc076939022111618a5026d3be019fd8b366e76314538ff9a1b59ffbcbf98bcd" dependencies = [ "proc-macro-crate 1.3.1", "proc-macro-error", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 1.0.109", "synstructure 0.12.6", @@ -9097,7 +9102,7 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "91761aed67d03ad966ef783ae962ef9bbaca728d2dd7ceb7939ec110fffad998" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 1.0.109", ] @@ -9488,7 +9493,7 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -9643,47 +9648,12 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" -[[package]] -name = "openssl" -version = "0.10.64" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95a0481286a310808298130d22dd1fef0fa571e05a8f44ec801801e84b216b1f" -dependencies = [ - "bitflags 2.6.0", - "cfg-if", - "foreign-types", - "libc", - "once_cell", - "openssl-macros", - "openssl-sys", -] - -[[package]] -name = "openssl-macros" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" -dependencies = [ - "proc-macro2 1.0.82", - "quote 1.0.37", - "syn 2.0.65", -] - [[package]] name = "openssl-probe" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" -[[package]] -name = "openssl-src" -version = "300.2.3+3.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cff92b6f71555b61bb9315f7c64da3ca43d87531622120fea0195fc761b4843" -dependencies = [ - "cc", -] - [[package]] name = "openssl-sys" version = "0.9.102" @@ -9692,7 +9662,6 @@ checksum = "c597637d56fbc83893a35eb0dd04b2b8e7a50c91e64e9493e398b5df4fb45fa2" dependencies = [ "cc", "libc", - "openssl-src", "pkg-config", "vcpkg", ] @@ -9731,7 +9700,7 @@ dependencies = [ "itertools 0.11.0", "petgraph", "proc-macro-crate 3.1.0", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 1.0.109", ] @@ -10446,7 +10415,7 @@ dependencies = [ name = "pallet-contracts-proc-macro" version = "18.0.0" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -11549,7 +11518,7 @@ dependencies = [ name = "pallet-revive-proc-macro" version = "0.1.0" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -11793,7 +11762,7 @@ name = "pallet-staking-reward-curve" version = "11.0.0" dependencies = [ "proc-macro-crate 3.1.0", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "sp-runtime", "syn 2.0.65", @@ -12439,7 +12408,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d830939c76d294956402033aee57a6da7b438f2294eb94864c37b0569053a42c" dependencies = [ "proc-macro-crate 3.1.0", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 1.0.109", ] @@ -12468,7 +12437,7 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f557c32c6d268a07c921471619c0295f5efad3a0e76d4f97a05c091a51d110b2" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "syn 1.0.109", "synstructure 0.12.6", ] @@ -12888,7 +12857,7 @@ checksum = "68ca01446f50dbda87c1786af8770d535423fa8a53aec03b8f4e3d7eb10e0929" dependencies = [ "pest", "pest_meta", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -12929,7 +12898,7 @@ version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -13799,7 +13768,7 @@ name = "polkadot-node-metrics" version = "7.0.0" dependencies = [ "assert_cmd", - "bs58 0.5.1", + "bs58", "futures", "futures-timer", "http-body-util", @@ -14259,7 +14228,7 @@ dependencies = [ name = "polkadot-runtime-metrics" version = "7.0.0" dependencies = [ - "bs58 0.5.1", + "bs58", "frame-benchmarking", "parity-scale-codec", "polkadot-primitives", @@ -15346,7 +15315,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c4fdfc49717fb9a196e74a5d28e0bc764eb394a2c803eb11133a31ac996c60c" dependencies = [ "polkavm-common 0.9.0", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -15358,7 +15327,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7855353a5a783dd5d09e3b915474bddf66575f5a3cf45dec8d1c5e051ba320dc" dependencies = [ "polkavm-common 0.10.0", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -15590,23 +15559,13 @@ dependencies = [ "yansi", ] -[[package]] -name = "prettyplease" -version = "0.1.25" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c8646e95016a7a6c4adea95bafa8a16baab64b583356217f2c85db4a39d9a86" -dependencies = [ - "proc-macro2 1.0.82", - "syn 1.0.109", -] - [[package]] name = "prettyplease" version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c64d9ba0963cdcea2e1b2230fbae2bab30eb25a174be395c41e764bfb65dd62" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "syn 2.0.65", ] @@ -15667,7 +15626,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" dependencies = [ "proc-macro-error-attr", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 1.0.109", "version_check", @@ -15679,7 +15638,7 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "version_check", ] @@ -15696,7 +15655,7 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3d1eaa7fa0aa1929ffdf7eeb6eac234dde6268914a14ad44d23521ab6a9b258e" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -15707,7 +15666,7 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b698b0b09d40e9b7c1a47b132d66a8b54bcd20583d9b6d06e4535e383b4405c" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -15723,9 +15682,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.82" +version = "1.0.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ad3d49ab951a01fbaafe34f2ec74122942fe18a3f9814c3268f1bb72042131b" +checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77" dependencies = [ "unicode-ident", ] @@ -15788,7 +15747,7 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -15845,33 +15804,42 @@ dependencies = [ "prost-derive 0.12.6", ] +[[package]] +name = "prost" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b2ecbe40f08db5c006b5764a2645f7f3f141ce756412ac9e1dd6087e6d32995" +dependencies = [ + "bytes", + "prost-derive 0.13.2", +] + [[package]] name = "prost-build" -version = "0.11.9" +version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "119533552c9a7ffacc21e099c24a0ac8bb19c2a2a3f363de84cd9b844feab270" +checksum = "80b776a1b2dc779f5ee0641f8ade0125bc1298dd41a9a0c16d8bd57b42d222b1" dependencies = [ "bytes", - "heck 0.4.1", - "itertools 0.10.5", - "lazy_static", + "heck 0.5.0", + "itertools 0.11.0", "log", "multimap", + "once_cell", "petgraph", - "prettyplease 0.1.25", - "prost 0.11.9", - "prost-types 0.11.9", + "prettyplease", + "prost 0.12.6", + "prost-types 0.12.4", "regex", - "syn 1.0.109", + "syn 2.0.65", "tempfile", - "which", ] [[package]] name = "prost-build" -version = "0.12.4" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80b776a1b2dc779f5ee0641f8ade0125bc1298dd41a9a0c16d8bd57b42d222b1" +checksum = "f8650aabb6c35b860610e9cff5dc1af886c9e25073b7b1712a68972af4281302" dependencies = [ "bytes", "heck 0.5.0", @@ -15880,9 +15848,9 @@ dependencies = [ "multimap", "once_cell", "petgraph", - "prettyplease 0.2.12", - "prost 0.12.6", - "prost-types 0.12.4", + "prettyplease", + "prost 0.13.2", + "prost-types 0.13.2", "regex", "syn 2.0.65", "tempfile", @@ -15896,7 +15864,7 @@ checksum = "e5d2d8d10f3c6ded6da8b05b5fb3b8a5082514344d56c9f871412d29b4e075b4" dependencies = [ "anyhow", "itertools 0.10.5", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 1.0.109", ] @@ -15909,18 +15877,22 @@ checksum = "81bddcdb20abf9501610992b6759a4c888aef7d1a7247ef75e2404275ac24af1" dependencies = [ "anyhow", "itertools 0.11.0", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] [[package]] -name = "prost-types" -version = "0.11.9" +name = "prost-derive" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "213622a1460818959ac1181aaeb2dc9c7f63df720db7d788b3e24eacd1983e13" +checksum = "acf0c195eebb4af52c752bec4f52f645da98b6e92077a04110c7f349477ae5ac" dependencies = [ - "prost 0.11.9", + "anyhow", + "itertools 0.11.0", + "proc-macro2 1.0.86", + "quote 1.0.37", + "syn 2.0.65", ] [[package]] @@ -15932,6 +15904,15 @@ dependencies = [ "prost 0.12.6", ] +[[package]] +name = "prost-types" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60caa6738c7369b940c3d49246a8d1749323674c65cb13010134f5c9bad5b519" +dependencies = [ + "prost 0.13.2", +] + [[package]] name = "psm" version = "0.1.21" @@ -16037,24 +16018,6 @@ dependencies = [ "rand", ] -[[package]] -name = "quinn" -version = "0.9.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e8b432585672228923edbbf64b8b12c14e1112f62e88737655b4a083dbcd78e" -dependencies = [ - "bytes", - "pin-project-lite", - "quinn-proto 0.9.6", - "quinn-udp 0.3.2", - "rustc-hash 1.1.0", - "rustls 0.20.9", - "thiserror", - "tokio", - "tracing", - "webpki", -] - [[package]] name = "quinn" version = "0.10.2" @@ -16064,8 +16027,8 @@ dependencies = [ "bytes", "futures-io", "pin-project-lite", - "quinn-proto 0.10.6", - "quinn-udp 0.4.1", + "quinn-proto", + "quinn-udp", "rustc-hash 1.1.0", "rustls 0.21.7", "thiserror", @@ -16073,24 +16036,6 @@ dependencies = [ "tracing", ] -[[package]] -name = "quinn-proto" -version = "0.9.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94b0b33c13a79f669c85defaf4c275dc86a0c0372807d0ca3d78e0bb87274863" -dependencies = [ - "bytes", - "rand", - "ring 0.16.20", - "rustc-hash 1.1.0", - "rustls 0.20.9", - "slab", - "thiserror", - "tinyvec", - "tracing", - "webpki", -] - [[package]] name = "quinn-proto" version = "0.10.6" @@ -16108,19 +16053,6 @@ dependencies = [ "tracing", ] -[[package]] -name = "quinn-udp" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "641538578b21f5e5c8ea733b736895576d0fe329bb883b937db6f4d163dbaaf4" -dependencies = [ - "libc", - "quinn-proto 0.9.6", - "socket2 0.4.9", - "tracing", - "windows-sys 0.42.0", -] - [[package]] name = "quinn-udp" version = "0.4.1" @@ -16149,7 +16081,7 @@ version = "1.0.37" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", ] [[package]] @@ -16348,7 +16280,7 @@ version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bcc303e793d3734489387d205e9b186fac9c6cfacedd98cbb2e8a5943595f3e6" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -16898,7 +16830,7 @@ checksum = "d428f8247852f894ee1be110b375111b586d4fa431f6c46e64ba5a0dcccbe605" dependencies = [ "cfg-if", "glob", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "regex", "relative-path", @@ -17392,7 +17324,7 @@ name = "sc-chain-spec-derive" version = "11.0.0" dependencies = [ "proc-macro-crate 3.1.0", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -18252,7 +18184,7 @@ dependencies = [ name = "sc-network-types" version = "0.10.0" dependencies = [ - "bs58 0.5.1", + "bs58", "ed25519-dalek", "libp2p-identity", "litep2p", @@ -18687,7 +18619,7 @@ name = "sc-tracing-proc-macro" version = "11.0.0" dependencies = [ "proc-macro-crate 3.1.0", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -18801,7 +18733,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2d35494501194174bda522a32605929eefc9ecf7e0a326c26db1fdd85881eb62" dependencies = [ "proc-macro-crate 3.1.0", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 1.0.109", ] @@ -18839,7 +18771,7 @@ version = "0.8.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0f696e21e10fa546b7ffb1c9672c6de8fbc7a81acf59524386d8639bf12737" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "serde_derive_internals", "syn 1.0.109", @@ -18919,21 +18851,6 @@ dependencies = [ "untrusted 0.7.1", ] -[[package]] -name = "sctp-proto" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6220f78bb44c15f326b0596113305f6101097a18755d53727a575c97e09fb24" -dependencies = [ - "bytes", - "crc", - "fxhash", - "log", - "rand", - "slab", - "thiserror", -] - [[package]] name = "sec1" version = "0.7.3" @@ -19108,9 +19025,9 @@ checksum = "f97841a747eef040fcd2e7b3b9a220a7205926e60488e673d9e4926d27772ce5" [[package]] name = "serde" -version = "1.0.209" +version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99fce0ffe7310761ca6bf9faf5115afbc19688edd00171d81b1bb1b116c63e09" +checksum = "c8e3592472072e6e22e0a54d5904d9febf8508f65fb8552499a1abc7d1078c3a" dependencies = [ "serde_derive", ] @@ -19135,11 +19052,11 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.209" +version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5831b979fd7b5439637af1752d535ff49f4860c0f341d1baeb6faf0f4242170" +checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -19150,7 +19067,7 @@ version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85bf8229e7920a9f636479437026331ce11aa132b4dde37d121944a44d6e5f3c" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 1.0.109", ] @@ -19166,9 +19083,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.127" +version = "1.0.128" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8043c06d9f82bd7271361ed64f415fe5e12a77fdb52e573e7f06a516dea329ad" +checksum = "6ff5456707a1de34e7e37f2a6fd3d3f808c318259cbd01ab6377795054b483d8" dependencies = [ "indexmap 2.2.3", "itoa", @@ -19241,7 +19158,7 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "91d129178576168c589c9ec973feedf7d3126c01ac2bf08795109aa35b69fb8f" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -19259,18 +19176,6 @@ dependencies = [ "opaque-debug 0.3.0", ] -[[package]] -name = "sha-1" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5058ada175748e33390e40e872bd0fe59a19f265d0158daa551c5a88a76009c" -dependencies = [ - "cfg-if", - "cpufeatures", - "digest 0.10.7", - "sha1-asm", -] - [[package]] name = "sha1" version = "0.10.6" @@ -19282,15 +19187,6 @@ dependencies = [ "digest 0.10.7", ] -[[package]] -name = "sha1-asm" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ba6947745e7f86be3b8af00b7355857085dbdf8901393c89514510eb61f4e21" -dependencies = [ - "cc", -] - [[package]] name = "sha2" version = "0.9.9" @@ -19420,9 +19316,9 @@ dependencies = [ [[package]] name = "simple-dns" -version = "0.5.7" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cae9a3fcdadafb6d97f4c0e007e4247b114ee0f119f650c3cbf3a8b3a1479694" +checksum = "4c80e565e7dcc4f1ef247e2f395550d4cf7d777746d5988e7e4e3156b71077fc" dependencies = [ "bitflags 2.6.0", ] @@ -19533,7 +19429,7 @@ dependencies = [ "base64 0.21.2", "bip39", "blake2-rfc", - "bs58 0.5.1", + "bs58", "chacha20", "crossbeam-queue", "derive_more", @@ -19987,7 +19883,7 @@ dependencies = [ "httparse", "log", "rand", - "sha-1 0.9.8", + "sha-1", ] [[package]] @@ -20117,7 +20013,7 @@ dependencies = [ "blake2 0.10.6", "expander", "proc-macro-crate 3.1.0", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -20379,7 +20275,7 @@ dependencies = [ "bitflags 1.3.2", "blake2 0.10.6", "bounded-collections", - "bs58 0.5.1", + "bs58", "criterion", "dyn-clonable", "ed25519-zebra", @@ -20520,7 +20416,7 @@ name = "sp-debug-derive" version = "8.0.0" source = "git+https://github.com/paritytech/polkadot-sdk#82912acb33a9030c0ef3bf590a34fca09b72dc5f" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -20529,7 +20425,7 @@ dependencies = [ name = "sp-debug-derive" version = "14.0.0" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -20802,7 +20698,7 @@ source = "git+https://github.com/paritytech/polkadot-sdk#82912acb33a9030c0ef3bf5 dependencies = [ "Inflector", "proc-macro-crate 1.3.1", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -20814,7 +20710,7 @@ dependencies = [ "Inflector", "expander", "proc-macro-crate 3.1.0", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -21075,7 +20971,8 @@ name = "sp-version-proc-macro" version = "13.0.0" dependencies = [ "parity-scale-codec", - "proc-macro2 1.0.82", + "proc-macro-warning 1.0.0", + "proc-macro2 1.0.86", "quote 1.0.37", "sp-version", "syn 2.0.65", @@ -21160,7 +21057,7 @@ checksum = "5e6915280e2d0db8911e5032a5c275571af6bdded2916abd691a659be25d3439" dependencies = [ "Inflector", "num-format", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "serde", "serde_json", @@ -21185,7 +21082,7 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f07d54c4d01a1713eb363b55ba51595da15f6f1211435b71466460da022aa140" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 1.0.109", ] @@ -21380,31 +21277,11 @@ checksum = "70a2595fc3aa78f2d0e45dd425b22282dd863273761cc77780914b2cf3003acf" dependencies = [ "cfg_aliases", "memchr", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 1.0.109", ] -[[package]] -name = "str0m" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6706347e49b13373f7ddfafad47df7583ed52083d6fc8a594eb2c80497ef959d" -dependencies = [ - "combine", - "crc", - "fastrand 2.1.0", - "hmac 0.12.1", - "once_cell", - "openssl", - "openssl-sys", - "sctp-proto", - "serde", - "sha-1 0.10.1", - "thiserror", - "tracing", -] - [[package]] name = "string-interner" version = "0.17.0" @@ -21453,7 +21330,7 @@ checksum = "dcb5ae327f9cc13b68763b5749770cb9e048a99bd9dfdfa58d0cf05d5f64afe0" dependencies = [ "heck 0.3.3", "proc-macro-error", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 1.0.109", ] @@ -21489,7 +21366,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59" dependencies = [ "heck 0.4.1", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "rustversion", "syn 1.0.109", @@ -21502,7 +21379,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "23dc1fa9ac9c169a78ba62f0b841814b7abae11bdd047b9c58f893439e309ea0" dependencies = [ "heck 0.4.1", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "rustversion", "syn 2.0.65", @@ -21515,7 +21392,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c6cf59daf282c0a494ba14fd21610a0325f9f90ec9d1231dea26bcb1d696c946" dependencies = [ "heck 0.4.1", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "rustversion", "syn 2.0.65", @@ -21966,7 +21843,7 @@ version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "unicode-ident", ] @@ -21977,7 +21854,7 @@ version = "2.0.65" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d2863d96a84c6439701d7a38f9de935ec562c8832cc55d1dde0f513b52fad106" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "unicode-ident", ] @@ -21989,7 +21866,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "86b837ef12ab88835251726eb12237655e61ec8dc8a280085d1961cdc3dfd047" dependencies = [ "paste", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -22000,7 +21877,7 @@ version = "0.12.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 1.0.109", "unicode-xid 0.2.4", @@ -22012,7 +21889,7 @@ version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -22131,7 +22008,7 @@ version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5999e24eaa32083191ba4e425deb75cdf25efefabe5aaccb7446dd0d4122a3f5" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -22295,7 +22172,7 @@ version = "1.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "10ac1c5050e43014d16b2f94d0d2ce79e65ffdd8b38d8048f9c8f6a8a6da62ac" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 1.0.109", ] @@ -22306,7 +22183,7 @@ version = "1.0.61" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "46c3384250002a6d5af4d114f2845d37b57521033f30d5c3f46c4d70e1197533" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -22472,7 +22349,7 @@ version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -22690,7 +22567,7 @@ version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -22732,7 +22609,7 @@ dependencies = [ "assert_matches", "expander", "proc-macro-crate 3.1.0", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -23303,7 +23180,7 @@ dependencies = [ "bumpalo", "log", "once_cell", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", "wasm-bindgen-shared", @@ -23337,7 +23214,7 @@ version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "afc340c74d9005395cf9dd098506f7f44e38f2b4a21c6aaacf9a105ea5e1e836" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", "wasm-bindgen-backend", @@ -23370,7 +23247,7 @@ version = "0.3.37" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ecb993dd8c836930ed130e020e77d9b2e65dd0fbab1b67c790b0f5d80b11a575" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", ] @@ -23948,17 +23825,6 @@ dependencies = [ "westend-emulated-chain", ] -[[package]] -name = "which" -version = "4.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2441c784c52b289a054b7201fc93253e288f094e2f4be9058343127c4226a269" -dependencies = [ - "either", - "libc", - "once_cell", -] - [[package]] name = "wide" version = "0.7.11" @@ -24053,21 +23919,6 @@ dependencies = [ "windows-targets 0.52.0", ] -[[package]] -name = "windows-sys" -version = "0.42.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" -dependencies = [ - "windows_aarch64_gnullvm 0.42.2", - "windows_aarch64_msvc 0.42.2", - "windows_i686_gnu 0.42.2", - "windows_i686_msvc 0.42.2", - "windows_x86_64_gnu 0.42.2", - "windows_x86_64_gnullvm 0.42.2", - "windows_x86_64_msvc 0.42.2", -] - [[package]] name = "windows-sys" version = "0.45.0" @@ -24445,7 +24296,7 @@ name = "xcm-procedural" version = "7.0.0" dependencies = [ "Inflector", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "staging-xcm", "syn 2.0.65", @@ -24611,7 +24462,7 @@ version = "0.7.32" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -24631,7 +24482,7 @@ version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] diff --git a/Cargo.toml b/Cargo.toml index 6bd401d8e15f..7e48fa14ccc2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -822,7 +822,7 @@ kvdb-memorydb = { version = "0.13.0" } kvdb-rocksdb = { version = "0.19.0" } kvdb-shared-tests = { version = "0.11.0" } landlock = { version = "0.3.0" } -lazy_static = { version = "1.4.0" } +lazy_static = { version = "1.5.0" } libc = { version = "0.2.155" } libfuzzer-sys = { version = "0.4" } libp2p = { version = "0.52.4" } @@ -832,7 +832,7 @@ linked-hash-map = { version = "0.5.4" } linked_hash_set = { version = "0.1.4" } linregress = { version = "0.5.1" } lite-json = { version = "0.2.0", default-features = false } -litep2p = { version = "0.6.2" } +litep2p = { version = "0.7.0", features = ["websocket"] } log = { version = "0.4.22", default-features = false } macro_magic = { version = "0.5.1" } maplit = { version = "1.0.2" } @@ -1079,7 +1079,7 @@ pretty_assertions = { version = "1.3.0" } primitive-types = { version = "0.12.1", default-features = false } proc-macro-crate = { version = "3.0.0" } proc-macro-warning = { version = "1.0.0", default-features = false } -proc-macro2 = { version = "1.0.64" } +proc-macro2 = { version = "1.0.86" } procfs = { version = "0.16.0" } prometheus = { version = "0.13.0", default-features = false } prometheus-endpoint = { path = "substrate/utils/prometheus", default-features = false, package = "substrate-prometheus-endpoint" } @@ -1185,10 +1185,10 @@ secp256k1 = { version = "0.28.0", default-features = false } secrecy = { version = "0.8.0", default-features = false } seedling-runtime = { path = "cumulus/parachains/runtimes/starters/seedling" } separator = { version = "0.4.1" } -serde = { version = "1.0.209", default-features = false } +serde = { version = "1.0.210", default-features = false } serde-big-array = { version = "0.3.2" } serde_derive = { version = "1.0.117" } -serde_json = { version = "1.0.127", default-features = false } +serde_json = { version = "1.0.128", default-features = false } serde_yaml = { version = "0.9" } serial_test = { version = "2.0.0" } sha1 = { version = "0.10.6" } diff --git a/bridges/snowbridge/pallets/system/src/lib.rs b/bridges/snowbridge/pallets/system/src/lib.rs index 16e9a6bb6361..8f3235279956 100644 --- a/bridges/snowbridge/pallets/system/src/lib.rs +++ b/bridges/snowbridge/pallets/system/src/lib.rs @@ -177,8 +177,8 @@ pub mod pallet { /// This chain's Universal Location. type UniversalLocation: Get; - // The bridges configured Ethereum network with chain id. - type EthereumNetwork: Get; + // The bridges configured Ethereum location + type EthereumLocation: Get; #[cfg(feature = "runtime-benchmarks")] type Helper: BenchmarkHelper; @@ -266,15 +266,15 @@ pub mod pallet { pub type PricingParameters = StorageValue<_, PricingParametersOf, ValueQuery, T::DefaultPricingParameters>; - /// Lookup table for foreign to native token ID + /// Lookup table for foreign token ID to native location relative to ethereum #[pallet::storage] pub type ForeignToNativeId = - StorageMap<_, Twox64Concat, TokenId, xcm::v4::Location, OptionQuery>; + StorageMap<_, Blake2_128Concat, TokenId, xcm::v4::Location, OptionQuery>; - /// Lookup table for native to foreign token ID + /// Lookup table for native location relative to ethereum to foreign token ID #[pallet::storage] pub type NativeToForeignId = - StorageMap<_, Twox64Concat, xcm::v4::Location, TokenId, OptionQuery>; + StorageMap<_, Blake2_128Concat, xcm::v4::Location, TokenId, OptionQuery>; #[pallet::genesis_config] #[derive(frame_support::DefaultNoBound)] @@ -732,18 +732,20 @@ pub mod pallet { metadata: AssetMetadata, pays_fee: PaysFee, ) -> Result<(), DispatchError> { - let bridge_location = Location::new(2, [GlobalConsensus(T::EthereumNetwork::get())]); - let mut location = location.clone(); - location - .reanchor(&bridge_location, &T::UniversalLocation::get()) + let ethereum_location = T::EthereumLocation::get(); + // reanchor to Ethereum context + let location = location + .clone() + .reanchored(ðereum_location, &T::UniversalLocation::get()) .map_err(|_| Error::::LocationConversionFailed)?; - // Record the token id or fail if it has already been created let token_id = TokenIdOf::convert_location(&location) .ok_or(Error::::LocationConversionFailed)?; - ForeignToNativeId::::insert(token_id, location.clone()); - NativeToForeignId::::insert(location.clone(), token_id); + if !ForeignToNativeId::::contains_key(token_id) { + NativeToForeignId::::insert(location.clone(), token_id); + ForeignToNativeId::::insert(token_id, location.clone()); + } let command = Command::RegisterForeignToken { token_id, diff --git a/bridges/snowbridge/pallets/system/src/mock.rs b/bridges/snowbridge/pallets/system/src/mock.rs index d8c905449d9a..47b089866a53 100644 --- a/bridges/snowbridge/pallets/system/src/mock.rs +++ b/bridges/snowbridge/pallets/system/src/mock.rs @@ -166,11 +166,12 @@ impl snowbridge_pallet_outbound_queue::Config for Test { parameter_types! { pub const SS58Prefix: u8 = 42; pub const AnyNetwork: Option = None; - pub const RelayNetwork: Option = Some(NetworkId::Kusama); + pub const RelayNetwork: Option = Some(NetworkId::Polkadot); pub const RelayLocation: Location = Location::parent(); pub UniversalLocation: InteriorLocation = [GlobalConsensus(RelayNetwork::get().unwrap()), Parachain(1013)].into(); pub EthereumNetwork: NetworkId = NetworkId::Ethereum { chain_id: 11155111 }; + pub EthereumDestination: Location = Location::new(2,[GlobalConsensus(EthereumNetwork::get())]); } pub const DOT: u128 = 10_000_000_000; @@ -209,7 +210,7 @@ impl crate::Config for Test { type WeightInfo = (); type InboundDeliveryCost = InboundDeliveryCost; type UniversalLocation = UniversalLocation; - type EthereumNetwork = EthereumNetwork; + type EthereumLocation = EthereumDestination; #[cfg(feature = "runtime-benchmarks")] type Helper = (); } diff --git a/bridges/snowbridge/pallets/system/src/tests.rs b/bridges/snowbridge/pallets/system/src/tests.rs index 0745f435ba05..b3699ec2f24d 100644 --- a/bridges/snowbridge/pallets/system/src/tests.rs +++ b/bridges/snowbridge/pallets/system/src/tests.rs @@ -633,10 +633,10 @@ fn no_genesis_build_is_uninitialized() { } #[test] -fn register_token_with_root_yeilds_success() { +fn register_token_with_signed_yields_bad_origin() { new_test_ext(true).execute_with(|| { - let origin = RuntimeOrigin::root(); - let location = Location::new(1, []); + let origin = RuntimeOrigin::signed([14; 32].into()); + let location = Location::new(1, [Parachain(2000)]); let versioned_location: Box = Box::new(location.clone().into()); let asset_metadata = AssetMetadata { decimals: 10, @@ -644,164 +644,135 @@ fn register_token_with_root_yeilds_success() { symbol: b"DOT".to_vec().try_into().unwrap(), }; - assert_ok!(EthereumSystem::register_token(origin, versioned_location, asset_metadata)); - - let expected_token_id = - hex!("03b6054d0c576dd8391e34e1609cf398f68050c23009d19ce93c000922bcd852").into(); - let expected_location = Location::new(1, [GlobalConsensus(Kusama)]); - - System::assert_last_event(RuntimeEvent::EthereumSystem( - crate::Event::::RegisterToken { - location: expected_location.into(), - foreign_token_id: expected_token_id, - }, - )); + assert_noop!( + EthereumSystem::register_token(origin, versioned_location, asset_metadata), + BadOrigin + ); }); } -#[test] -fn register_token_with_relative_address_reanchors_to_ethereum_and_succeeds() { - new_test_ext(true).execute_with(|| { - let origin = RuntimeOrigin::root(); - let location = Location::new(1, []); - let versioned_location: Box = Box::new(location.clone().into()); - let asset_metadata = AssetMetadata { - decimals: 10, - name: b"Dot".to_vec().try_into().unwrap(), - symbol: b"DOT".to_vec().try_into().unwrap(), - }; - - assert_ok!(EthereumSystem::register_token(origin, versioned_location, asset_metadata)); - - let expected_token_id = - hex!("03b6054d0c576dd8391e34e1609cf398f68050c23009d19ce93c000922bcd852").into(); - let expected_location = Location::new(1, [GlobalConsensus(Kusama)]); - - System::assert_last_event(RuntimeEvent::EthereumSystem( - crate::Event::::RegisterToken { - location: expected_location.into(), - foreign_token_id: expected_token_id, - }, - )); - }); +pub struct TokenInfo { + pub location: Location, + pub metadata: AssetMetadata, + pub foreign_token_id: TokenId, } #[test] -fn register_token_with_complex_location_simplifies_and_succeeds() { - new_test_ext(true).execute_with(|| { - let origin = RuntimeOrigin::root(); - let location = Location::new(2, [GlobalConsensus(Kusama)]); - let versioned_location: Box = Box::new(location.clone().into()); - let asset_metadata = AssetMetadata { - decimals: 10, - name: b"Dot".to_vec().try_into().unwrap(), - symbol: b"DOT".to_vec().try_into().unwrap(), - }; - - assert_ok!(EthereumSystem::register_token(origin, versioned_location, asset_metadata)); - - let expected_token_id = - hex!("03b6054d0c576dd8391e34e1609cf398f68050c23009d19ce93c000922bcd852").into(); - let expected_location = Location::new(1, [GlobalConsensus(Kusama)]); - - System::assert_last_event(RuntimeEvent::EthereumSystem( - crate::Event::::RegisterToken { - location: expected_location.into(), - foreign_token_id: expected_token_id, +fn register_all_tokens_succeeds() { + let assets = vec![ + // DOT + TokenInfo { + location: Location::parent(), + metadata: AssetMetadata { + decimals: 10, + name: b"DOT".to_vec().try_into().unwrap(), + symbol: b"DOT".to_vec().try_into().unwrap(), }, - )); - }); -} - -#[test] -fn register_token_with_doubled_bridged_polkadot_location_succeeds() { - new_test_ext(true).execute_with(|| { - let origin = RuntimeOrigin::root(); - let location = Location::new(2, [GlobalConsensus(Rococo)]); - let versioned_location: Box = Box::new(location.clone().into()); - let asset_metadata = AssetMetadata { - decimals: 10, - name: b"Dot".to_vec().try_into().unwrap(), - symbol: b"DOT".to_vec().try_into().unwrap(), - }; - - assert_ok!(EthereumSystem::register_token(origin, versioned_location, asset_metadata)); - - let expected_token_id = - hex!("62e8f33b7fb0e7e2d2276564061a2f3c7bcb612e733b8bf5733ea16cee0ecba6").into(); - let expected_location = Location::new(1, [GlobalConsensus(Rococo)]); - - System::assert_last_event(RuntimeEvent::EthereumSystem( - crate::Event::::RegisterToken { - location: expected_location.into(), - foreign_token_id: expected_token_id, + foreign_token_id: hex!( + "4e241583d94b5d48a27a22064cd49b2ed6f5231d2d950e432f9b7c2e0ade52b2" + ) + .into(), + }, + // GLMR (Some Polkadot parachain currency) + TokenInfo { + location: Location::new(1, [Parachain(2004)]), + metadata: AssetMetadata { + decimals: 12, + name: b"GLMR".to_vec().try_into().unwrap(), + symbol: b"GLMR".to_vec().try_into().unwrap(), }, - )); - }); -} - -#[test] -fn register_token_with_ethereum_address_reanchors_to_relative_and_fails() { + foreign_token_id: hex!( + "34c08fc90409b6924f0e8eabb7c2aaa0c749e23e31adad9f6d217b577737fafb" + ) + .into(), + }, + // USDT + TokenInfo { + location: Location::new(1, [Parachain(1000), PalletInstance(50), GeneralIndex(1984)]), + metadata: AssetMetadata { + decimals: 6, + name: b"USDT".to_vec().try_into().unwrap(), + symbol: b"USDT".to_vec().try_into().unwrap(), + }, + foreign_token_id: hex!( + "14b0579be12d7d7f9971f1d4b41f0e88384b9b74799b0150d4aa6cd01afb4444" + ) + .into(), + }, + // KSM + TokenInfo { + location: Location::new(2, [GlobalConsensus(Kusama)]), + metadata: AssetMetadata { + decimals: 12, + name: b"KSM".to_vec().try_into().unwrap(), + symbol: b"KSM".to_vec().try_into().unwrap(), + }, + foreign_token_id: hex!( + "03b6054d0c576dd8391e34e1609cf398f68050c23009d19ce93c000922bcd852" + ) + .into(), + }, + // KAR (Some Kusama parachain currency) + TokenInfo { + location: Location::new(2, [GlobalConsensus(Kusama), Parachain(2000)]), + metadata: AssetMetadata { + decimals: 12, + name: b"KAR".to_vec().try_into().unwrap(), + symbol: b"KAR".to_vec().try_into().unwrap(), + }, + foreign_token_id: hex!( + "d3e39ad6ea4cee68c9741181e94098823b2ea34a467577d0875c036f0fce5be0" + ) + .into(), + }, + ]; + for asset in assets.iter() { + new_test_ext(true).execute_with(|| { + let origin = RuntimeOrigin::root(); + let versioned_location: Box = + Box::new(asset.location.clone().into()); + let asset_metadata = asset.metadata.clone(); + + assert_ok!(EthereumSystem::register_token(origin, versioned_location, asset_metadata)); + + let location = asset + .location + .clone() + .reanchored(&EthereumDestination::get(), &UniversalLocation::get()) + .unwrap(); + + System::assert_last_event(RuntimeEvent::EthereumSystem(Event::::RegisterToken { + location: location.into(), + foreign_token_id: asset.foreign_token_id, + })); + }); + } +} + +#[test] +fn register_ethereum_native_token_fails() { new_test_ext(true).execute_with(|| { let origin = RuntimeOrigin::root(); - let location = Location::new(2, [GlobalConsensus(Ethereum { chain_id: 11155111 })]); - let versioned_location: Box = Box::new(location.clone().into()); - let asset_metadata = AssetMetadata { - decimals: 10, - name: b"Dot".to_vec().try_into().unwrap(), - symbol: b"DOT".to_vec().try_into().unwrap(), - }; - - assert_noop!( - EthereumSystem::register_token(origin, versioned_location, asset_metadata), - Error::::LocationConversionFailed + let location = Location::new( + 2, + [ + GlobalConsensus(Ethereum { chain_id: 11155111 }), + AccountKey20 { + network: None, + key: hex!("87d1f7fdfEe7f651FaBc8bFCB6E086C278b77A7d"), + }, + ], ); - }); -} - -#[test] -fn register_token_with_double_bridged_ethereum_address_succeeds() { - new_test_ext(true).execute_with(|| { - let origin = RuntimeOrigin::root(); - const NETWORK: NetworkId = Ethereum { chain_id: 1 }; - let location = Location::new(2, [GlobalConsensus(NETWORK)]); - let versioned_location: Box = Box::new(location.clone().into()); - let asset_metadata = AssetMetadata { - decimals: 10, - name: b"Dot".to_vec().try_into().unwrap(), - symbol: b"DOT".to_vec().try_into().unwrap(), - }; - - assert_ok!(EthereumSystem::register_token(origin, versioned_location, asset_metadata)); - - let expected_token_id: H256 = - hex!("37fd94739deb1c2a8929b45a4f70ffcb52de8b54791609ee13ee0a2b33730269").into(); - let expected_location = Location::new(1, [GlobalConsensus(NETWORK)]); - - System::assert_last_event(RuntimeEvent::EthereumSystem( - crate::Event::::RegisterToken { - location: expected_location.into(), - foreign_token_id: expected_token_id, - }, - )); - }); -} - -#[test] -fn register_token_with_signed_yeilds_bad_origin() { - new_test_ext(true).execute_with(|| { - let origin = RuntimeOrigin::signed([14; 32].into()); - let location = Location::new(1, [Parachain(2000)]); let versioned_location: Box = Box::new(location.clone().into()); let asset_metadata = AssetMetadata { - decimals: 10, - name: b"Dot".to_vec().try_into().unwrap(), - symbol: b"DOT".to_vec().try_into().unwrap(), + decimals: 18, + name: b"WETH".to_vec().try_into().unwrap(), + symbol: b"WETH".to_vec().try_into().unwrap(), }; assert_noop!( EthereumSystem::register_token(origin, versioned_location, asset_metadata), - BadOrigin + Error::::LocationConversionFailed ); }); } diff --git a/bridges/snowbridge/primitives/core/src/location.rs b/bridges/snowbridge/primitives/core/src/location.rs index c126618bba35..fe8db0e35283 100644 --- a/bridges/snowbridge/primitives/core/src/location.rs +++ b/bridges/snowbridge/primitives/core/src/location.rs @@ -29,7 +29,8 @@ pub type AgentIdOf = pub type TokenId = H256; -/// Convert a token location to a stable ID that can be used on the Ethereum side +/// Convert a token location (relative to Ethereum) to a stable ID that can be used on the Ethereum +/// side pub type TokenIdOf = HashedDescription< TokenId, DescribeGlobalPrefix<(DescribeTerminus, DescribeFamily)>, diff --git a/bridges/snowbridge/primitives/router/src/inbound/mod.rs b/bridges/snowbridge/primitives/router/src/inbound/mod.rs index 3ff6852dd001..c4a76bcadaac 100644 --- a/bridges/snowbridge/primitives/router/src/inbound/mod.rs +++ b/bridges/snowbridge/primitives/router/src/inbound/mod.rs @@ -102,14 +102,14 @@ pub struct MessageToXcm< AccountId, Balance, ConvertAssetId, - UniversalLocation, + EthereumUniversalLocation, GlobalAssetHubLocation, > where CreateAssetCall: Get, CreateAssetDeposit: Get, Balance: BalanceT, ConvertAssetId: MaybeEquivalence, - UniversalLocation: Get, + EthereumUniversalLocation: Get, GlobalAssetHubLocation: Get, { _phantom: PhantomData<( @@ -119,7 +119,7 @@ pub struct MessageToXcm< AccountId, Balance, ConvertAssetId, - UniversalLocation, + EthereumUniversalLocation, GlobalAssetHubLocation, )>, } @@ -156,7 +156,7 @@ impl< AccountId, Balance, ConvertAssetId, - UniversalLocation, + EthereumUniversalLocation, GlobalAssetHubLocation, > ConvertMessage for MessageToXcm< @@ -166,7 +166,7 @@ impl< AccountId, Balance, ConvertAssetId, - UniversalLocation, + EthereumUniversalLocation, GlobalAssetHubLocation, > where @@ -176,7 +176,7 @@ where Balance: BalanceT + From, AccountId: Into<[u8; 32]>, ConvertAssetId: MaybeEquivalence, - UniversalLocation: Get, + EthereumUniversalLocation: Get, GlobalAssetHubLocation: Get, { type Balance = Balance; @@ -215,7 +215,7 @@ impl< AccountId, Balance, ConvertAssetId, - UniversalLocation, + EthereumUniversalLocation, GlobalAssetHubLocation, > MessageToXcm< @@ -225,7 +225,7 @@ impl< AccountId, Balance, ConvertAssetId, - UniversalLocation, + EthereumUniversalLocation, GlobalAssetHubLocation, > where @@ -235,7 +235,7 @@ where Balance: BalanceT + From, AccountId: Into<[u8; 32]>, ConvertAssetId: MaybeEquivalence, - UniversalLocation: Get, + EthereumUniversalLocation: Get, GlobalAssetHubLocation: Get, { fn convert_register_token( @@ -398,90 +398,43 @@ where let network = Ethereum { chain_id }; let asset_hub_fee_asset: Asset = (Location::parent(), asset_hub_fee).into(); - let (dest_para_id, beneficiary, dest_para_fee) = match destination { + let beneficiary = match destination { // Final destination is a 32-byte account on AssetHub Destination::AccountId32 { id } => - (None, Location::new(0, [AccountId32 { network: None, id }]), 0), - // Final destination is a 32-byte account on a sibling of AssetHub - Destination::ForeignAccountId32 { para_id, id, fee } => - (Some(para_id), Location::new(0, [AccountId32 { network: None, id }]), fee), - // Final destination is a 20-byte account on a sibling of AssetHub - Destination::ForeignAccountId20 { para_id, id, fee } => - (Some(para_id), Location::new(0, [AccountKey20 { network: None, key: id }]), fee), - }; + Ok(Location::new(0, [AccountId32 { network: None, id }])), + _ => Err(ConvertMessageError::InvalidDestination), + }?; - let total_fees = asset_hub_fee.saturating_add(dest_para_fee); - let total_fee_asset: Asset = (Location::parent(), total_fees).into(); + let total_fee_asset: Asset = (Location::parent(), asset_hub_fee).into(); let asset_loc = ConvertAssetId::convert(&token_id).ok_or(ConvertMessageError::InvalidToken)?; let mut reanchored_asset_loc = asset_loc.clone(); reanchored_asset_loc - .reanchor(&GlobalAssetHubLocation::get(), &UniversalLocation::get()) + .reanchor(&GlobalAssetHubLocation::get(), &EthereumUniversalLocation::get()) .map_err(|_| ConvertMessageError::CannotReanchor)?; let asset: Asset = (reanchored_asset_loc, amount).into(); let inbound_queue_pallet_index = InboundQueuePalletInstance::get(); - let mut instructions = vec![ + let instructions = vec![ ReceiveTeleportedAsset(total_fee_asset.clone().into()), BuyExecution { fees: asset_hub_fee_asset, weight_limit: Unlimited }, DescendOrigin(PalletInstance(inbound_queue_pallet_index).into()), UniversalOrigin(GlobalConsensus(network)), WithdrawAsset(asset.clone().into()), + // Deposit both asset and fees to beneficiary so the fees will not get + // trapped. Another benefit is when fees left more than ED on AssetHub could be + // used to create the beneficiary account in case it does not exist. + DepositAsset { assets: Wild(AllCounted(2)), beneficiary }, + SetTopic(message_id.into()), ]; - let bridge_location = Location::new(2, GlobalConsensus(network)); - - match dest_para_id { - Some(dest_para_id) => { - let dest_para_fee_asset: Asset = (Location::parent(), dest_para_fee).into(); - - instructions.extend(vec![ - // `SetFeesMode` to pay transport fee from bridge sovereign, which depends on - // unspent AH fees deposited to the bridge sovereign, - // more context and analysis in https://github.com/paritytech/polkadot-sdk/pull/5546#discussion_r1744682864 - SetFeesMode { jit_withdraw: true }, - // `SetAppendix` ensures that `fees` are not trapped in any case - SetAppendix(Xcm(vec![DepositAsset { - assets: AllCounted(2).into(), - beneficiary: bridge_location, - }])), - // Perform a reserve withdraw to send to destination chain. Leave half of the - // asset_hub_fee for the delivery cost - InitiateReserveWithdraw { - assets: Definite( - vec![asset.clone(), (Location::parent(), dest_para_fee).into()].into(), - ), - reserve: Location::new(1, [Parachain(dest_para_id)]), - xcm: vec![ - // Buy execution on target. - BuyExecution { fees: dest_para_fee_asset, weight_limit: Unlimited }, - // Deposit asset and fee to beneficiary. - DepositAsset { assets: Wild(AllCounted(2)), beneficiary }, - // Forward message id to destination parachain. - SetTopic(message_id.into()), - ] - .into(), - }, - ]); - }, - None => { - instructions.extend(vec![ - // Deposit both asset and fees to beneficiary so the fees will not get - // trapped. Another benefit is when fees left more than ED on AssetHub could be - // used to create the beneficiary account in case it does not exist. - DepositAsset { assets: Wild(AllCounted(2)), beneficiary }, - ]); - }, - } - - // Forward message id to Asset Hub. - instructions.push(SetTopic(message_id.into())); - - Ok((instructions.into(), total_fees.into())) + // `total_fees` to burn on this chain when sending `instructions` to run on AH (which also + // teleport fees) + Ok((instructions.into(), asset_hub_fee.into())) } } diff --git a/bridges/snowbridge/primitives/router/src/inbound/tests.rs b/bridges/snowbridge/primitives/router/src/inbound/tests.rs index 7b4469c1a68c..1c018f3ab0c4 100644 --- a/bridges/snowbridge/primitives/router/src/inbound/tests.rs +++ b/bridges/snowbridge/primitives/router/src/inbound/tests.rs @@ -40,68 +40,35 @@ fn test_contract_location_with_incorrect_location_fails_convert() { } #[test] -fn test_reanchor_relay_token() { - let asset_id: Location = Location::parent(); - let ah_context: InteriorLocation = [GlobalConsensus(Westend), Parachain(1000)].into(); - let ethereum = Location::new(2, [GlobalConsensus(Ethereum { chain_id: 1 })]); - let mut reanchored_asset = asset_id.clone(); - assert_ok!(reanchored_asset.reanchor(ðereum, &ah_context)); - assert_eq!( - reanchored_asset, - Location { parents: 1, interior: [GlobalConsensus(Westend)].into() } - ); - let bh_context: InteriorLocation = [GlobalConsensus(Westend), Parachain(1002)].into(); - let ah = Location::new(1, [GlobalConsensus(Westend), Parachain(1000)]); - let mut reanchored_asset = reanchored_asset.clone(); - assert_ok!(reanchored_asset.reanchor(&ah, &bh_context)); - assert_eq!(reanchored_asset, asset_id); -} - -#[test] -fn test_reanchor_pna_from_ah() { - let asset_id_in_ah: Location = - Location { parents: 0, interior: [PalletInstance(50), GeneralIndex(2)].into() }; - let asset_id: Location = Location { - parents: 1, - interior: [Parachain(1000), PalletInstance(50), GeneralIndex(2)].into(), - }; - let bh_context: InteriorLocation = [GlobalConsensus(Westend), Parachain(1002)].into(); - let ethereum = Location::new(2, [GlobalConsensus(Ethereum { chain_id: 1 })]); - let mut reanchored_asset = asset_id.clone(); - assert_ok!(reanchored_asset.reanchor(ðereum, &bh_context)); - assert_eq!( - reanchored_asset, - Location { - parents: 1, - interior: [ - GlobalConsensus(Westend), - Parachain(1000), - PalletInstance(50), - GeneralIndex(2) - ] - .into() - } - ); - let ah = Location::new(1, [GlobalConsensus(Westend), Parachain(1000)]); - let mut reanchored_asset = reanchored_asset.clone(); - assert_ok!(reanchored_asset.reanchor(&ah, &bh_context)); - assert_eq!(reanchored_asset, asset_id_in_ah); -} - -#[test] -fn test_reanchor_pna_from_para() { - let asset_id_in_ah: Location = Location { parents: 1, interior: [Parachain(2000)].into() }; - let asset_id: Location = Location { parents: 1, interior: [Parachain(2000)].into() }; - let bh_context: InteriorLocation = [GlobalConsensus(Westend), Parachain(1002)].into(); - let ethereum = Location::new(2, [GlobalConsensus(Ethereum { chain_id: 1 })]); - let mut reanchored_asset = asset_id.clone(); - assert_ok!(reanchored_asset.reanchor(ðereum, &bh_context)); - assert_eq!( - reanchored_asset, - Location { parents: 1, interior: [GlobalConsensus(Westend), Parachain(2000)].into() } - ); - let ah = Location::new(1, [GlobalConsensus(Westend), Parachain(1000)]); - let mut reanchored_asset = reanchored_asset.clone(); - assert_ok!(reanchored_asset.reanchor(&ah, &bh_context)); - assert_eq!(reanchored_asset, asset_id_in_ah); +fn test_reanchor_all_assets() { + let ethereum_context: InteriorLocation = [GlobalConsensus(Ethereum { chain_id: 1 })].into(); + let ethereum = Location::new(2, ethereum_context.clone()); + let ah_context: InteriorLocation = [GlobalConsensus(Polkadot), Parachain(1000)].into(); + let global_ah = Location::new(1, ah_context.clone()); + let bh_context: InteriorLocation = [GlobalConsensus(Polkadot), Parachain(1002)].into(); + let assets = vec![ + // DOT + Location::new(1, []), + // GLMR (Some Polkadot parachain currency) + Location::new(1, [Parachain(2004)]), + // AH asset + Location::new(0, [PalletInstance(50), GeneralIndex(42)]), + // KSM + Location::new(2, [GlobalConsensus(Kusama)]), + // KAR (Some Kusama parachain currency) + Location::new(2, [GlobalConsensus(Kusama), Parachain(2000)]), + ]; + for asset in assets.iter() { + // reanchor logic in pallet_xcm on AH + let mut reanchored_asset = asset.clone(); + assert_ok!(reanchored_asset.reanchor(ðereum, &ah_context)); + // reanchor back to original location in context of Ethereum + let mut reanchored_asset_with_ethereum_context = reanchored_asset.clone(); + assert_ok!(reanchored_asset_with_ethereum_context.reanchor(&global_ah, ðereum_context)); + assert_eq!(reanchored_asset_with_ethereum_context, asset.clone()); + // reanchor back to original location in context of BH + let mut reanchored_asset_with_bh_context = reanchored_asset.clone(); + assert_ok!(reanchored_asset_with_bh_context.reanchor(&global_ah, &bh_context)); + assert_eq!(reanchored_asset_with_bh_context, asset.clone()); + } } diff --git a/cumulus/client/cli/src/lib.rs b/cumulus/client/cli/src/lib.rs index 564d7b58c94d..b08ad75c430d 100644 --- a/cumulus/client/cli/src/lib.rs +++ b/cumulus/client/cli/src/lib.rs @@ -96,7 +96,7 @@ impl PurgeChainCmd { Some('y') | Some('Y') => {}, _ => { println!("Aborted"); - return Ok(()) + return Ok(()); }, } } @@ -432,19 +432,19 @@ impl sc_cli::CliConfiguration for NormalizedRunCmd { } fn rpc_max_request_size(&self) -> sc_cli::Result { - Ok(self.base.rpc_max_request_size) + self.base.rpc_max_request_size() } fn rpc_max_response_size(&self) -> sc_cli::Result { - Ok(self.base.rpc_max_response_size) + self.base.rpc_max_response_size() } fn rpc_max_subscriptions_per_connection(&self) -> sc_cli::Result { - Ok(self.base.rpc_max_subscriptions_per_connection) + self.base.rpc_max_subscriptions_per_connection() } fn rpc_buffer_capacity_per_connection(&self) -> sc_cli::Result { - Ok(self.base.rpc_message_buffer_capacity_per_connection) + Ok(self.base.rpc_params.rpc_message_buffer_capacity_per_connection) } fn rpc_batch_config(&self) -> sc_cli::Result { diff --git a/cumulus/client/network/src/tests.rs b/cumulus/client/network/src/tests.rs index cde73c4c5180..81c2d9f24f28 100644 --- a/cumulus/client/network/src/tests.rs +++ b/cumulus/client/network/src/tests.rs @@ -323,7 +323,7 @@ impl RelayChainInterface for DummyRelayChainInterface { impl_version: 0, apis: Cow::Owned(apis), transaction_version: 5, - state_version: 1, + system_version: 1, }) } } diff --git a/cumulus/client/pov-recovery/src/tests.rs b/cumulus/client/pov-recovery/src/tests.rs index 6f274ed18b6b..f300bdc5f2ba 100644 --- a/cumulus/client/pov-recovery/src/tests.rs +++ b/cumulus/client/pov-recovery/src/tests.rs @@ -329,7 +329,7 @@ impl RelayChainInterface for Relaychain { impl_version: 0, apis: Cow::Owned(apis), transaction_version: 5, - state_version: 1, + system_version: 1, }) } diff --git a/cumulus/pallets/parachain-system/src/mock.rs b/cumulus/pallets/parachain-system/src/mock.rs index b4d118aadf04..247de3a29b69 100644 --- a/cumulus/pallets/parachain-system/src/mock.rs +++ b/cumulus/pallets/parachain-system/src/mock.rs @@ -64,7 +64,7 @@ parameter_types! { impl_version: 1, apis: sp_version::create_apis_vec!([]), transaction_version: 1, - state_version: 1, + system_version: 1, }; pub const ParachainId: ParaId = ParaId::new(200); pub const ReservedXcmpWeight: Weight = Weight::zero(); diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/genesis.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/genesis.rs index 73c5ef863309..a9cfcda0dacd 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/genesis.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/genesis.rs @@ -86,7 +86,7 @@ pub fn genesis() -> Storage { ( PenpalBTeleportableAssetLocation::get(), PenpalBSiblingSovereignAccount::get(), - true, + false, ED, ), ], diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/lib.rs index 98f640a46800..5e0462d14882 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/lib.rs @@ -52,10 +52,7 @@ mod imports { BridgeHubWestendParaPallet as BridgeHubWestendPallet, BridgeHubWestendXcmConfig, }, penpal_emulated_chain::{ - penpal_runtime::xcm_config::{ - LocalTeleportableToAssetHub as PenpalLocalTeleportableToAssetHub, - UniversalLocation as PenpalUniversalLocation, - }, + penpal_runtime::xcm_config::UniversalLocation as PenpalUniversalLocation, PenpalAssetOwner, PenpalBParaPallet as PenpalBPallet, }, westend_emulated_chain::{ @@ -69,9 +66,8 @@ mod imports { BridgeHubWestendPara as BridgeHubWestend, BridgeHubWestendParaReceiver as BridgeHubWestendReceiver, BridgeHubWestendParaSender as BridgeHubWestendSender, PenpalBPara as PenpalB, - PenpalBParaReceiver as PenpalBReceiver, PenpalBParaSender as PenpalBSender, - WestendRelay as Westend, WestendRelayReceiver as WestendReceiver, - WestendRelaySender as WestendSender, + PenpalBParaSender as PenpalBSender, WestendRelay as Westend, + WestendRelayReceiver as WestendReceiver, WestendRelaySender as WestendSender, }; pub const ASSET_MIN_BALANCE: u128 = 1000; diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge.rs index e1b50fbe39f9..ee877d936e75 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge.rs @@ -16,15 +16,10 @@ use crate::imports::*; use asset_hub_westend_runtime::xcm_config::bridging::to_ethereum::DefaultBridgeHubEthereumBaseFee; use bridge_hub_westend_runtime::EthereumInboundQueue; use codec::{Decode, Encode}; -use emulated_integration_tests_common::{ - PenpalBSiblingSovereignAccount, RESERVABLE_ASSET_ID, TELEPORTABLE_ASSET_ID, -}; +use emulated_integration_tests_common::RESERVABLE_ASSET_ID; use frame_support::pallet_prelude::TypeInfo; use hex_literal::hex; -use rococo_westend_system_emulated_network::{ - asset_hub_westend_emulated_chain::genesis::AssetHubWestendAssetOwner, - penpal_emulated_chain::penpal_runtime::xcm_config::RelayLocation, -}; +use rococo_westend_system_emulated_network::asset_hub_westend_emulated_chain::genesis::AssetHubWestendAssetOwner; use snowbridge_core::{outbound::OperatingMode, AssetMetadata, TokenIdOf}; use snowbridge_router_primitives::inbound::{ Command, Destination, GlobalConsensusEthereumConvertsFor, MessageV1, VersionedMessage, @@ -599,346 +594,3 @@ fn transfer_ah_token() { ); }); } - -#[test] -fn transfer_penpal_native_token() { - let assethub_location = BridgeHubWestend::sibling_location_of(AssetHubWestend::para_id()); - let assethub_sovereign = BridgeHubWestend::sovereign_account_id_of(assethub_location); - BridgeHubWestend::fund_accounts(vec![(assethub_sovereign.clone(), INITIAL_FUND)]); - - let ethereum_destination = Location::new(2, [GlobalConsensus(Ethereum { chain_id: CHAIN_ID })]); - let ethereum_sovereign: AccountId = - GlobalConsensusEthereumConvertsFor::<[u8; 32]>::convert_location(ðereum_destination) - .unwrap() - .into(); - AssetHubWestend::fund_accounts(vec![(ethereum_sovereign.clone(), INITIAL_FUND)]); - - let penpal_asset_location = Location::new(1, [Parachain(PenpalB::para_id().into())]); - - let penpal_asset_location_after_reanchored = - Location::new(1, [GlobalConsensus(Westend), Parachain(PenpalB::para_id().into())]); - - let token_id = TokenIdOf::convert_location(&penpal_asset_location_after_reanchored).unwrap(); - - // Register token on AH - AssetHubWestend::force_create_foreign_asset( - penpal_asset_location.clone().try_into().unwrap(), - PenpalBSiblingSovereignAccount::get().clone(), - false, - ASSET_MIN_BALANCE, - vec![], - ); - - // Fund sender on AH - AssetHubWestend::mint_foreign_asset( - ::RuntimeOrigin::signed(PenpalBSiblingSovereignAccount::get()), - penpal_asset_location.clone().try_into().unwrap(), - AssetHubWestendSender::get(), - TOKEN_AMOUNT, - ); - - // Fund sov of AH on penpal - let ah_sovereign = - PenpalB::sovereign_account_id_of(PenpalB::sibling_location_of(AssetHubWestend::para_id())); - PenpalB::fund_accounts(vec![(ah_sovereign.clone(), INITIAL_FUND)]); - PenpalB::mint_foreign_asset( - ::RuntimeOrigin::signed(PenpalAssetOwner::get()), - RelayLocation::get(), - ah_sovereign.clone(), - INITIAL_FUND, - ); - - // Create token - BridgeHubWestend::execute_with(|| { - type RuntimeOrigin = ::RuntimeOrigin; - - assert_ok!(::EthereumSystem::register_token( - RuntimeOrigin::root(), - Box::new(VersionedLocation::V4(penpal_asset_location.clone())), - AssetMetadata { - name: "penpal_asset".as_bytes().to_vec().try_into().unwrap(), - symbol: "penpal_asset".as_bytes().to_vec().try_into().unwrap(), - decimals: 12, - }, - )); - }); - - // Send token to Ethereum - AssetHubWestend::execute_with(|| { - type RuntimeOrigin = ::RuntimeOrigin; - type RuntimeEvent = ::RuntimeEvent; - - let assets = vec![Asset { - id: penpal_asset_location.clone().into(), - fun: Fungible(TOKEN_AMOUNT / 10), - }]; - - let beneficiary = VersionedLocation::V4(Location::new( - 0, - [AccountKey20 { network: None, key: ETHEREUM_DESTINATION_ADDRESS.into() }], - )); - - assert_ok!(::PolkadotXcm::limited_reserve_transfer_assets( - RuntimeOrigin::signed(AssetHubWestendSender::get()), - Box::new(VersionedLocation::from(ethereum_destination)), - Box::new(beneficiary), - Box::new(VersionedAssets::from(Assets::from(assets))), - 0, - Unlimited, - )); - - assert_expected_events!( - AssetHubWestend, - vec![RuntimeEvent::ForeignAssets(pallet_assets::Event::Transferred{..}) => {},] - ); - - let events = AssetHubWestend::events(); - // Check that the native asset transferred to some reserved account(sovereign of Ethereum) - assert!( - events.iter().any(|event| matches!( - event, - RuntimeEvent::ForeignAssets(pallet_assets::Event::Transferred { amount, to, ..}) - if *amount == TOKEN_AMOUNT/10 && *to == ethereum_sovereign - )), - "native token reserved to Ethereum sovereign account." - ); - }); - - // Send token back from Ethereum - BridgeHubWestend::execute_with(|| { - type RuntimeEvent = ::RuntimeEvent; - - // Check that the transfer token back to Ethereum message was queue in the Ethereum - // Outbound Queue - assert_expected_events!( - BridgeHubWestend, - vec![RuntimeEvent::EthereumOutboundQueue(snowbridge_pallet_outbound_queue::Event::MessageQueued{..}) => {},] - ); - - let message = VersionedMessage::V1(MessageV1 { - chain_id: CHAIN_ID, - command: Command::SendNativeToken { - token_id, - destination: Destination::ForeignAccountId32 { - para_id: PenpalB::para_id().into(), - id: PenpalBReceiver::get().into(), - fee: XCM_FEE, - }, - amount: TOKEN_AMOUNT / 10, - fee: XCM_FEE, - }, - }); - - // Convert the message to XCM - let (xcm, _) = EthereumInboundQueue::do_convert([0; 32].into(), message).unwrap(); - // Send the XCM - let _ = EthereumInboundQueue::send_xcm(xcm, AssetHubWestend::para_id().into()).unwrap(); - - assert_expected_events!( - BridgeHubWestend, - vec![RuntimeEvent::XcmpQueue(cumulus_pallet_xcmp_queue::Event::XcmpMessageSent { .. }) => {},] - ); - }); - - AssetHubWestend::execute_with(|| { - type RuntimeEvent = ::RuntimeEvent; - - // Check that token burnt from some reserved account - assert_expected_events!( - AssetHubWestend, - vec![RuntimeEvent::ForeignAssets(pallet_assets::Event::Burned { .. }) => {},] - ); - }); - - PenpalB::execute_with(|| { - type RuntimeEvent = ::RuntimeEvent; - - // Check that token issued to beneficial - assert_expected_events!( - PenpalB, - vec![RuntimeEvent::Balances(pallet_balances::Event::Minted { .. }) => {},] - ); - - let events = PenpalB::events(); - - // Check that token issued to destination account - assert!( - events.iter().any(|event| matches!( - event, - RuntimeEvent::Balances(pallet_balances::Event::Minted { amount, who, ..}) - if *amount == TOKEN_AMOUNT/10 && *who == PenpalBReceiver::get() - )), - "Token minted to beneficiary." - ); - }) -} - -#[test] -fn transfer_penpal_asset() { - let assethub_location = BridgeHubWestend::sibling_location_of(AssetHubWestend::para_id()); - let assethub_sovereign = BridgeHubWestend::sovereign_account_id_of(assethub_location); - BridgeHubWestend::fund_accounts(vec![(assethub_sovereign.clone(), INITIAL_FUND)]); - - let ethereum_destination = Location::new(2, [GlobalConsensus(Ethereum { chain_id: CHAIN_ID })]); - let ethereum_sovereign: AccountId = - GlobalConsensusEthereumConvertsFor::<[u8; 32]>::convert_location(ðereum_destination) - .unwrap() - .into(); - - AssetHubWestend::fund_accounts(vec![(ethereum_sovereign.clone(), INITIAL_FUND)]); - - let penpal_asset_location = Location::new(1, [Parachain(PenpalB::para_id().into())]) - .appended_with(PenpalLocalTeleportableToAssetHub::get()) - .unwrap(); - - let penpal_asset_location_after_reanchored = - Location::new(1, [GlobalConsensus(Westend), Parachain(PenpalB::para_id().into())]) - .appended_with(PenpalLocalTeleportableToAssetHub::get()) - .unwrap(); - - let token_id = TokenIdOf::convert_location(&penpal_asset_location_after_reanchored).unwrap(); - - // Fund sender on AH - AssetHubWestend::mint_foreign_asset( - ::RuntimeOrigin::signed(PenpalBSiblingSovereignAccount::get()), - penpal_asset_location.clone().try_into().unwrap(), - AssetHubWestendSender::get(), - TOKEN_AMOUNT, - ); - - // Fund sov of AH on penpal - let ah_sovereign = - PenpalB::sovereign_account_id_of(PenpalB::sibling_location_of(AssetHubWestend::para_id())); - PenpalB::fund_accounts(vec![(ah_sovereign.clone(), INITIAL_FUND)]); - PenpalB::mint_foreign_asset( - ::RuntimeOrigin::signed(PenpalAssetOwner::get()), - RelayLocation::get(), - ah_sovereign.clone(), - INITIAL_FUND, - ); - PenpalB::mint_asset( - ::RuntimeOrigin::signed(PenpalAssetOwner::get()), - TELEPORTABLE_ASSET_ID, - ah_sovereign.clone(), - TOKEN_AMOUNT, - ); - - // create token - BridgeHubWestend::execute_with(|| { - type Runtime = ::Runtime; - - snowbridge_pallet_system::ForeignToNativeId::::insert( - token_id, - penpal_asset_location_after_reanchored.clone(), - ); - }); - - // Send token to Ethereum - AssetHubWestend::execute_with(|| { - type RuntimeOrigin = ::RuntimeOrigin; - type RuntimeEvent = ::RuntimeEvent; - - let assets = vec![Asset { - id: penpal_asset_location.clone().into(), - fun: Fungible(TOKEN_AMOUNT / 10), - }]; - - let beneficiary = VersionedLocation::V4(Location::new( - 0, - [AccountKey20 { network: None, key: ETHEREUM_DESTINATION_ADDRESS.into() }], - )); - - assert_ok!(::PolkadotXcm::limited_reserve_transfer_assets( - RuntimeOrigin::signed(AssetHubWestendSender::get()), - Box::new(VersionedLocation::from(ethereum_destination)), - Box::new(beneficiary), - Box::new(VersionedAssets::from(Assets::from(assets))), - 0, - Unlimited, - )); - - assert_expected_events!( - AssetHubWestend, - vec![RuntimeEvent::ForeignAssets(pallet_assets::Event::Transferred{..}) => {},] - ); - - let events = AssetHubWestend::events(); - // Check that the native asset transferred to some reserved account(sovereign of Ethereum) - assert!( - events.iter().any(|event| matches!( - event, - RuntimeEvent::ForeignAssets(pallet_assets::Event::Transferred { amount, to, ..}) - if *amount == TOKEN_AMOUNT/10 && *to == ethereum_sovereign - )), - "native token reserved to Ethereum sovereign account." - ); - }); - - // Send token back from Ethereum - BridgeHubWestend::execute_with(|| { - type RuntimeEvent = ::RuntimeEvent; - - // Check that the transfer token back to Ethereum message was queue in the Ethereum - // Outbound Queue - assert_expected_events!( - BridgeHubWestend, - vec![RuntimeEvent::EthereumOutboundQueue(snowbridge_pallet_outbound_queue::Event::MessageQueued{..}) => {},] - ); - - let message = VersionedMessage::V1(MessageV1 { - chain_id: CHAIN_ID, - command: Command::SendNativeToken { - token_id, - destination: Destination::ForeignAccountId32 { - para_id: PenpalB::para_id().into(), - id: PenpalBReceiver::get().into(), - fee: XCM_FEE, - }, - amount: TOKEN_AMOUNT / 10, - fee: XCM_FEE, - }, - }); - - // Convert the message to XCM - let (xcm, _) = EthereumInboundQueue::do_convert([0; 32].into(), message).unwrap(); - // Send the XCM - let _ = EthereumInboundQueue::send_xcm(xcm, AssetHubWestend::para_id().into()).unwrap(); - - assert_expected_events!( - BridgeHubWestend, - vec![RuntimeEvent::XcmpQueue(cumulus_pallet_xcmp_queue::Event::XcmpMessageSent { .. }) => {},] - ); - }); - - AssetHubWestend::execute_with(|| { - type RuntimeEvent = ::RuntimeEvent; - - // Check that token burnt from some reserved account - assert_expected_events!( - AssetHubWestend, - vec![RuntimeEvent::ForeignAssets(pallet_assets::Event::Burned { .. }) => {},] - ); - }); - - PenpalB::execute_with(|| { - type RuntimeEvent = ::RuntimeEvent; - - // Check that token issued to beneficial - assert_expected_events!( - PenpalB, - vec![RuntimeEvent::Assets(pallet_assets::Event::Issued { .. }) => {},] - ); - - let events = PenpalB::events(); - - // Check that token issued to destination account - assert!( - events.iter().any(|event| matches!( - event, - RuntimeEvent::Assets(pallet_assets::Event::Issued { amount, owner, ..}) - if *amount == TOKEN_AMOUNT/10 && *owner == PenpalBReceiver::get() - )), - "Token minted to beneficiary." - ); - }) -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index 2f3fb6b68c4a..a4a2554b7afc 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -128,7 +128,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 16, - state_version: 1, + system_version: 1, }; /// The version information used to identify this runtime when compiled natively. diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index 5d988f89d25c..c050f169fc80 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -126,7 +126,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 16, - state_version: 1, + system_version: 1, }; /// The version information used to identify this runtime when compiled natively. diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_ethereum_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_ethereum_config.rs index 1e096f19ef80..d9a3869bd6e1 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_ethereum_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_ethereum_config.rs @@ -29,7 +29,7 @@ use sp_core::H160; use testnet_parachains_constants::rococo::{ currency::*, fee::WeightToFee, - snowbridge::{EthereumNetwork, INBOUND_QUEUE_PALLET_INDEX}, + snowbridge::{EthereumLocation, EthereumNetwork, INBOUND_QUEUE_PALLET_INDEX}, }; use crate::xcm_config::RelayNetwork; @@ -41,7 +41,7 @@ use sp_runtime::{ traits::{ConstU32, ConstU8, Keccak256}, FixedU128, }; -use xcm::prelude::{GlobalConsensus, Location, Parachain}; +use xcm::prelude::{GlobalConsensus, InteriorLocation, Location, Parachain}; /// Exports message to the Ethereum Gateway contract. pub type SnowbridgeExporter = EthereumBlobExporter< @@ -67,6 +67,7 @@ parameter_types! { multiplier: FixedU128::from_rational(1, 1), }; pub GlobalAssetHub: Location = Location::new(1,[GlobalConsensus(RelayNetwork::get()),Parachain(rococo_runtime_constants::system_parachain::ASSET_HUB_ID)]); + pub EthereumUniversalLocation: InteriorLocation = [GlobalConsensus(EthereumNetwork::get())].into(); } impl snowbridge_pallet_inbound_queue::Config for Runtime { @@ -88,7 +89,7 @@ impl snowbridge_pallet_inbound_queue::Config for Runtime { AccountId, Balance, EthereumSystem, - UniversalLocation, + EthereumUniversalLocation, GlobalAssetHub, >; type WeightToFee = WeightToFee; @@ -189,7 +190,7 @@ impl snowbridge_pallet_system::Config for Runtime { type DefaultPricingParameters = Parameters; type InboundDeliveryCost = EthereumInboundQueue; type UniversalLocation = UniversalLocation; - type EthereumNetwork = EthereumNetwork; + type EthereumLocation = EthereumLocation; } #[cfg(feature = "runtime-benchmarks")] diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index a29ef7945ce8..9dec806475c5 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -238,7 +238,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 5, - state_version: 1, + system_version: 1, }; /// The version information used to identify this runtime when compiled natively. diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_ethereum_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_ethereum_config.rs index fee9f9a2f610..1bd425ab4075 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_ethereum_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_ethereum_config.rs @@ -30,7 +30,7 @@ use sp_core::H160; use testnet_parachains_constants::westend::{ currency::*, fee::WeightToFee, - snowbridge::{EthereumNetwork, INBOUND_QUEUE_PALLET_INDEX}, + snowbridge::{EthereumLocation, EthereumNetwork, INBOUND_QUEUE_PALLET_INDEX}, }; use crate::xcm_config::RelayNetwork; @@ -42,7 +42,7 @@ use sp_runtime::{ traits::{ConstU32, ConstU8, Keccak256}, FixedU128, }; -use xcm::prelude::{GlobalConsensus, Location, Parachain}; +use xcm::prelude::{GlobalConsensus, InteriorLocation, Location, Parachain}; pub const SLOTS_PER_EPOCH: u32 = snowbridge_pallet_ethereum_client::config::SLOTS_PER_EPOCH as u32; @@ -70,8 +70,8 @@ parameter_types! { multiplier: FixedU128::from_rational(1, 1), }; pub GlobalAssetHub: Location = Location::new(1,[GlobalConsensus(RelayNetwork::get()),Parachain(westend_runtime_constants::system_parachain::ASSET_HUB_ID)]); + pub EthereumUniversalLocation: InteriorLocation = [GlobalConsensus(EthereumNetwork::get())].into(); } - impl snowbridge_pallet_inbound_queue::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Verifier = snowbridge_pallet_ethereum_client::Pallet; @@ -91,7 +91,7 @@ impl snowbridge_pallet_inbound_queue::Config for Runtime { AccountId, Balance, EthereumSystem, - UniversalLocation, + EthereumUniversalLocation, GlobalAssetHub, >; type WeightToFee = WeightToFee; @@ -189,7 +189,7 @@ impl snowbridge_pallet_system::Config for Runtime { type DefaultPricingParameters = Parameters; type InboundDeliveryCost = EthereumInboundQueue; type UniversalLocation = UniversalLocation; - type EthereumNetwork = EthereumNetwork; + type EthereumLocation = EthereumLocation; } #[cfg(feature = "runtime-benchmarks")] diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs index 5717db456a77..ddd40dbf60e0 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs @@ -213,7 +213,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 5, - state_version: 1, + system_version: 1, }; /// The version information used to identify this runtime when compiled natively. diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs index dea2eb03db3a..f22feb70382a 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs @@ -129,7 +129,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 6, - state_version: 1, + system_version: 1, }; /// The version information used to identify this runtime when compiled natively. diff --git a/cumulus/parachains/runtimes/constants/src/rococo.rs b/cumulus/parachains/runtimes/constants/src/rococo.rs index 56f4868371c1..be4b5c9711cc 100644 --- a/cumulus/parachains/runtimes/constants/src/rococo.rs +++ b/cumulus/parachains/runtimes/constants/src/rococo.rs @@ -148,7 +148,7 @@ pub mod time { pub mod snowbridge { use frame_support::parameter_types; - use xcm::opaque::lts::NetworkId; + use xcm::prelude::{Location, NetworkId}; /// The pallet index of the Ethereum inbound queue pallet in the bridge hub runtime. pub const INBOUND_QUEUE_PALLET_INDEX: u8 = 80; @@ -159,6 +159,7 @@ pub mod snowbridge { /// /// pub EthereumNetwork: NetworkId = NetworkId::Ethereum { chain_id: 11155111 }; + pub EthereumLocation: Location = Location::new(2, EthereumNetwork::get()); } } diff --git a/cumulus/parachains/runtimes/constants/src/westend.rs b/cumulus/parachains/runtimes/constants/src/westend.rs index fec66cec2eb6..47ba8f7e97ae 100644 --- a/cumulus/parachains/runtimes/constants/src/westend.rs +++ b/cumulus/parachains/runtimes/constants/src/westend.rs @@ -171,7 +171,7 @@ pub mod time { pub mod snowbridge { use frame_support::parameter_types; - use xcm::opaque::lts::NetworkId; + use xcm::prelude::{Location, NetworkId}; /// The pallet index of the Ethereum inbound queue pallet in the bridge hub runtime. pub const INBOUND_QUEUE_PALLET_INDEX: u8 = 80; @@ -182,5 +182,6 @@ pub mod snowbridge { /// /// pub EthereumNetwork: NetworkId = NetworkId::Ethereum { chain_id: 11155111 }; + pub EthereumLocation: Location = Location::new(2, EthereumNetwork::get()); } } diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs index bf173fb618af..55770515d73f 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs @@ -148,7 +148,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 7, - state_version: 1, + system_version: 1, }; /// The version information used to identify this runtime when compiled natively. diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs index 25324bf17764..aea2bf232cbc 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs @@ -150,7 +150,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 2, - state_version: 1, + system_version: 1, }; /// The version information used to identify this runtime when compiled natively. diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs index a3051e4bf271..218afaab924d 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs @@ -149,7 +149,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 2, - state_version: 1, + system_version: 1, }; /// The version information used to identify this runtime when compiled natively. diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs index 942e11e0b257..abf13a596a7d 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs @@ -106,7 +106,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 1, - state_version: 1, + system_version: 1, }; /// The version information used to identify this runtime when compiled natively. diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs index 77bfb99669c6..cb9177d0c23b 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs @@ -138,7 +138,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 1, - state_version: 1, + system_version: 1, }; /// The version information used to identify this runtime when compiled natively. diff --git a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs index 3343d2be749d..9813c5cb6acc 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs @@ -138,7 +138,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 1, - state_version: 1, + system_version: 1, }; /// The version information used to identify this runtime when compiled natively. diff --git a/cumulus/parachains/runtimes/starters/seedling/src/lib.rs b/cumulus/parachains/runtimes/starters/seedling/src/lib.rs index 1fe72604d373..f126ee861fa7 100644 --- a/cumulus/parachains/runtimes/starters/seedling/src/lib.rs +++ b/cumulus/parachains/runtimes/starters/seedling/src/lib.rs @@ -81,7 +81,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 2, - state_version: 0, + system_version: 1, }; /// The version information used to identify this runtime when compiled natively. diff --git a/cumulus/parachains/runtimes/starters/shell/src/lib.rs b/cumulus/parachains/runtimes/starters/shell/src/lib.rs index 1dfbe2b6c41c..fac2d1312c0f 100644 --- a/cumulus/parachains/runtimes/starters/shell/src/lib.rs +++ b/cumulus/parachains/runtimes/starters/shell/src/lib.rs @@ -89,7 +89,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 1, - state_version: 0, + system_version: 1, }; /// The version information used to identify this runtime when compiled natively. diff --git a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs index 7d19c0ed8d85..266894c3e4ed 100644 --- a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs @@ -249,7 +249,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 1, - state_version: 1, + system_version: 1, }; /// This determines the average expected block time that we are targeting. diff --git a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs index dff7046f1972..34646f84aedb 100644 --- a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs @@ -113,7 +113,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 6, - state_version: 0, + system_version: 0, }; pub const MILLISECS_PER_BLOCK: u64 = 6000; diff --git a/cumulus/primitives/storage-weight-reclaim/src/lib.rs b/cumulus/primitives/storage-weight-reclaim/src/lib.rs index a557e881e26b..2529297691e8 100644 --- a/cumulus/primitives/storage-weight-reclaim/src/lib.rs +++ b/cumulus/primitives/storage-weight-reclaim/src/lib.rs @@ -183,13 +183,15 @@ where if consumed_weight > benchmarked_weight { log::error!( target: LOG_TARGET, - "Benchmarked storage weight smaller than consumed storage weight. benchmarked: {benchmarked_weight} consumed: {consumed_weight} unspent: {unspent}" + "Benchmarked storage weight smaller than consumed storage weight. extrinsic: {} benchmarked: {benchmarked_weight} consumed: {consumed_weight} unspent: {unspent}", + frame_system::Pallet::::extrinsic_index().unwrap_or(0) ); current.accrue(Weight::from_parts(0, storage_size_diff), info.class) } else { log::trace!( target: LOG_TARGET, - "Reclaiming storage weight. benchmarked: {benchmarked_weight}, consumed: {consumed_weight} unspent: {unspent}" + "Reclaiming storage weight. extrinsic: {} benchmarked: {benchmarked_weight} consumed: {consumed_weight} unspent: {unspent}", + frame_system::Pallet::::extrinsic_index().unwrap_or(0) ); current.reduce(Weight::from_parts(0, storage_size_diff), info.class) } diff --git a/cumulus/test/runtime/src/lib.rs b/cumulus/test/runtime/src/lib.rs index 274f16ab630d..ba0a3487011a 100644 --- a/cumulus/test/runtime/src/lib.rs +++ b/cumulus/test/runtime/src/lib.rs @@ -132,7 +132,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { impl_version: 1, apis: RUNTIME_API_VERSIONS, transaction_version: 1, - state_version: 1, + system_version: 1, }; #[cfg(feature = "increment-spec-version")] @@ -146,7 +146,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { impl_version: 1, apis: RUNTIME_API_VERSIONS, transaction_version: 1, - state_version: 1, + system_version: 1, }; pub const EPOCH_DURATION_IN_BLOCKS: u32 = 10 * MINUTES; diff --git a/docs/contributor/commands-readme.md b/docs/contributor/commands-readme.md index 2bb9bd7e7d58..861c3ac784d5 100644 --- a/docs/contributor/commands-readme.md +++ b/docs/contributor/commands-readme.md @@ -9,13 +9,14 @@ Note: it works only for members of the `paritytech` organization. `/cmd --help` to see the usage of a specific command - ### Commands - `/cmd fmt` to format the code in the PR. It commits back with the formatted code (fmt) and configs (taplo). - `/cmd bench` to generate weights for a runtime. Read more about [Weight Generation](weight-generation.md) +- `/cmd prdoc` to generate a prdoc for a PR. Read more about [PRDoc](prdoc.md) + ### Flags 1.`--quiet` to suppress the output of the command in the comments. @@ -32,12 +33,14 @@ The pipeline logs will include what is failed (like which runtimes/pallets), the or they keep failing, and you're rerunning them again, it's handy to add this flag to keep a PR clean. ### Adding new Commands + Feel free to add new commands to the workflow, however **_note_** that triggered workflows will use the actions from `main` (default) branch, meaning they will take effect only after the PR with new changes/command is merged. If you want to test the new command, it's better to test in your fork and local-to-fork PRs, where you control the default branch. ### Examples + The regex in cmd.yml is: `^(\/cmd )([-\/\s\w.=:]+)$` accepts only alphanumeric, space, "-", "/", "=", ":", "." chars. `/cmd bench --runtime bridge-hub-westend --pallet=pallet_name` diff --git a/docs/contributor/prdoc.md b/docs/contributor/prdoc.md index 0c8165af40f4..4a1a3c1f0688 100644 --- a/docs/contributor/prdoc.md +++ b/docs/contributor/prdoc.md @@ -14,28 +14,39 @@ the [CODEOWNERS](../../.github/CODEOWNERS) for advice. A `.prdoc` file is a YAML file with a defined structure (ie JSON Schema). Please follow these steps to generate one: -1. Install the [`prdoc` CLI](https://github.com/paritytech/prdoc) by running `cargo install prdoc`. +1. Install the [`prdoc` CLI](https://github.com/paritytech/prdoc) by running `cargo install parity-prdoc`. 1. Open a Pull Request and get the PR number. 1. Generate the file with `prdoc generate `. The output filename will be printed. 1. Optional: Install the `prdoc/schema_user.json` schema in your editor, for example -[VsCode](https://github.com/paritytech/prdoc?tab=readme-ov-file#schemas). + [VsCode](https://github.com/paritytech/prdoc?tab=readme-ov-file#schemas). 1. Edit your `.prdoc` file according to the [Audience](#pick-an-audience) and [SemVer](#record-semver-changes) sections. 1. Check your prdoc with `prdoc check -n `. This is optional since the CI will also check it. > **Tip:** GitHub CLI and jq can be used to provide the number of your PR to generate the correct file: > `prdoc generate $(gh pr view --json number | jq '.number') -o prdoc` +Alternatively you can call the prdoc from PR via `/cmd prdoc` (see args with `/cmd prdoc --help`) +in a comment to PR to trigger it from CI. + +Options: + +- `pr`: The PR number to generate the PrDoc for. +- `audience`: The audience of whom the changes may concern. +- `bump`: A default bump level for all crates. + The PrDoc will likely need to be edited to reflect the actual changes after generation. +- `force`: Whether to overwrite any existing PrDoc. + ## Pick An Audience While describing a PR, the author needs to consider which audience(s) need to be addressed. The list of valid audiences is described and documented in the JSON schema as follow: - `Node Dev`: Those who build around the client side code. Alternative client builders, SMOLDOT, those who consume RPCs. - These are people who are oblivious to the runtime changes. They only care about the meta-protocol, not the protocol - itself. + These are people who are oblivious to the runtime changes. They only care about the meta-protocol, not the protocol + itself. - `Runtime Dev`: All of those who rely on the runtime. A parachain team that is using a pallet. A DApp that is using a - pallet. These are people who care about the protocol (WASM), not the meta-protocol (client). + pallet. These are people who care about the protocol (WASM), not the meta-protocol (client). - `Node Operator`: Those who don't write any code and only run code. @@ -64,10 +75,10 @@ For example when you modified two crates and record the changes: ```yaml crates: -- name: frame-example - bump: major -- name: frame-example-pallet - bump: minor + - name: frame-example + bump: major + - name: frame-example-pallet + bump: minor ``` It means that downstream code using `frame-example-pallet` is still guaranteed to work as before, while code using diff --git a/docs/sdk/src/reference_docs/chain_spec_runtime/src/runtime.rs b/docs/sdk/src/reference_docs/chain_spec_runtime/src/runtime.rs index 195d1b124474..5be3a59dc7bb 100644 --- a/docs/sdk/src/reference_docs/chain_spec_runtime/src/runtime.rs +++ b/docs/sdk/src/reference_docs/chain_spec_runtime/src/runtime.rs @@ -46,7 +46,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { impl_version: 1, apis: RUNTIME_API_VERSIONS, transaction_version: 1, - state_version: 1, + system_version: 1, }; /// The signed extensions that are added to the runtime. diff --git a/polkadot/node/core/candidate-validation/src/lib.rs b/polkadot/node/core/candidate-validation/src/lib.rs index 103d29e8d269..a9732e934414 100644 --- a/polkadot/node/core/candidate-validation/src/lib.rs +++ b/polkadot/node/core/candidate-validation/src/lib.rs @@ -47,7 +47,7 @@ use polkadot_primitives::{ }, AuthorityDiscoveryId, CandidateCommitments, CandidateDescriptor, CandidateEvent, CandidateReceipt, ExecutorParams, Hash, OccupiedCoreAssumption, PersistedValidationData, - PvfExecKind, PvfPrepKind, SessionIndex, ValidationCode, ValidationCodeHash, + PvfExecKind, PvfPrepKind, SessionIndex, ValidationCode, ValidationCodeHash, ValidatorId, }; use sp_application_crypto::{AppCrypto, ByteArray}; use sp_keystore::KeystorePtr; @@ -427,14 +427,15 @@ where .iter() .any(|v| keystore.has_keys(&[(v.to_raw_vec(), AuthorityDiscoveryId::ID)])); - let is_present_authority = session_info - .discovery_keys + // We could've checked discovery_keys but on Kusama validators.len() < discovery_keys.len(). + let is_present_validator = session_info + .validators .iter() - .any(|v| keystore.has_keys(&[(v.to_raw_vec(), AuthorityDiscoveryId::ID)])); + .any(|v| keystore.has_keys(&[(v.to_raw_vec(), ValidatorId::ID)])); // There is still a chance to be a previous session authority, but this extra work does not // affect the finalization. - is_past_present_or_future_authority && !is_present_authority + is_past_present_or_future_authority && !is_present_validator } // Sends PVF with unknown code hashes to the validation host returning the list of code hashes sent. diff --git a/polkadot/node/core/candidate-validation/src/tests.rs b/polkadot/node/core/candidate-validation/src/tests.rs index 55282fdf4ee1..0dcd84bab6cf 100644 --- a/polkadot/node/core/candidate-validation/src/tests.rs +++ b/polkadot/node/core/candidate-validation/src/tests.rs @@ -25,13 +25,12 @@ use polkadot_node_subsystem::messages::AllMessages; use polkadot_node_subsystem_util::reexports::SubsystemContext; use polkadot_overseer::ActivatedLeaf; use polkadot_primitives::{ - CoreIndex, GroupIndex, HeadData, Id as ParaId, IndexedVec, SessionInfo, UpwardMessage, - ValidatorId, ValidatorIndex, + CoreIndex, GroupIndex, HeadData, Id as ParaId, SessionInfo, UpwardMessage, ValidatorId, }; use polkadot_primitives_test_helpers::{ dummy_collator, dummy_collator_signature, dummy_hash, make_valid_candidate_descriptor, }; -use sp_core::testing::TaskExecutor; +use sp_core::{sr25519::Public, testing::TaskExecutor}; use sp_keyring::Sr25519Keyring; use sp_keystore::{testing::MemoryKeystore, Keystore}; @@ -1194,10 +1193,10 @@ fn dummy_candidate_backed( ) } -fn dummy_session_info(discovery_keys: Vec) -> SessionInfo { +fn dummy_session_info(keys: Vec) -> SessionInfo { SessionInfo { - validators: IndexedVec::::from(vec![]), - discovery_keys, + validators: keys.iter().cloned().map(Into::into).collect(), + discovery_keys: keys.iter().cloned().map(Into::into).collect(), assignment_keys: vec![], validator_groups: Default::default(), n_cores: 4u32, @@ -1246,7 +1245,7 @@ fn maybe_prepare_validation_golden_path() { ctx_handle.recv().await, AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::SessionInfo(index, tx))) => { assert_eq!(index, 1); - let _ = tx.send(Ok(Some(dummy_session_info(vec![Sr25519Keyring::Bob.public().into()])))); + let _ = tx.send(Ok(Some(dummy_session_info(vec![Sr25519Keyring::Bob.public()])))); } ); @@ -1364,7 +1363,7 @@ fn maybe_prepare_validation_resets_state_on_a_new_session() { ctx_handle.recv().await, AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::SessionInfo(index, tx))) => { assert_eq!(index, 2); - let _ = tx.send(Ok(Some(dummy_session_info(vec![Sr25519Keyring::Bob.public().into()])))); + let _ = tx.send(Ok(Some(dummy_session_info(vec![Sr25519Keyring::Bob.public()])))); } ); }; @@ -1510,7 +1509,7 @@ fn maybe_prepare_validation_does_not_prepare_pvfs_if_not_a_validator_in_the_next ctx_handle.recv().await, AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::SessionInfo(index, tx))) => { assert_eq!(index, 1); - let _ = tx.send(Ok(Some(dummy_session_info(vec![Sr25519Keyring::Bob.public().into()])))); + let _ = tx.send(Ok(Some(dummy_session_info(vec![Sr25519Keyring::Bob.public()])))); } ); }; @@ -1557,7 +1556,7 @@ fn maybe_prepare_validation_does_not_prepare_pvfs_if_a_validator_in_the_current_ ctx_handle.recv().await, AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::SessionInfo(index, tx))) => { assert_eq!(index, 1); - let _ = tx.send(Ok(Some(dummy_session_info(vec![Sr25519Keyring::Alice.public().into()])))); + let _ = tx.send(Ok(Some(dummy_session_info(vec![Sr25519Keyring::Alice.public()])))); } ); }; @@ -1604,7 +1603,7 @@ fn maybe_prepare_validation_prepares_a_limited_number_of_pvfs() { ctx_handle.recv().await, AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::SessionInfo(index, tx))) => { assert_eq!(index, 1); - let _ = tx.send(Ok(Some(dummy_session_info(vec![Sr25519Keyring::Bob.public().into()])))); + let _ = tx.send(Ok(Some(dummy_session_info(vec![Sr25519Keyring::Bob.public()])))); } ); diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index 6b046e190830..6ec49c5830f7 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -174,7 +174,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 26, - state_version: 1, + system_version: 1, }; /// The BABE epoch configuration at genesis. diff --git a/polkadot/runtime/test-runtime/src/lib.rs b/polkadot/runtime/test-runtime/src/lib.rs index 72d024e9a878..b03231569113 100644 --- a/polkadot/runtime/test-runtime/src/lib.rs +++ b/polkadot/runtime/test-runtime/src/lib.rs @@ -125,7 +125,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 1, - state_version: 1, + system_version: 1, }; /// The BABE epoch configuration at genesis. diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index d4273210a2de..bdff37eb11e2 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -175,7 +175,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 26, - state_version: 1, + system_version: 1, }; /// The BABE epoch configuration at genesis. diff --git a/prdoc/pr_4257.prdoc b/prdoc/pr_4257.prdoc new file mode 100644 index 000000000000..860b85a4888e --- /dev/null +++ b/prdoc/pr_4257.prdoc @@ -0,0 +1,76 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Rename `state_version` in `RuntimeVersion` to `system_version`. + +doc: + - audience: Runtime Dev + description: | + This PR renames `state_version` in `RuntimeVersion` to `system_version`. `system_version=2` signifies + that extrinsic root derivation uses `StateVersion::V1`. + + - audience: Runtime User + description: | + `RuntimeVersion`'s `state_version` is renamed to `system_version`. Applications using that type and its field + must update their code to reflect the changes. For easier migration serde serialization produces both new + `systemVersion` and old `stateVersion` fields and deserialization supports `stateVersion` as an alias as too. + +crates: + - name: frame-system + bump: major + - name: sp-api + bump: none + - name: sp-version + bump: major + - name: sp-storage + bump: minor + - name: sp-version-proc-macro + bump: minor + - name: sc-block-builder + bump: major + - name: sc-executor + bump: major + - name: sc-rpc + bump: none + - name: sc-rpc-spec-v2 + bump: none + - name: cumulus-pallet-parachain-system + bump: none + - name: cumulus-client-pov-recovery + bump: none + - name: cumulus-client-network + bump: none + - name: rococo-runtime + bump: major + - name: westend-runtime + bump: major + - name: asset-hub-rococo-runtime + bump: major + - name: asset-hub-westend-runtime + bump: major + - name: bridge-hub-rococo-runtime + bump: major + - name: bridge-hub-westend-runtime + bump: major + - name: collectives-westend-runtime + bump: major + - name: coretime-rococo-runtime + bump: major + - name: coretime-westend-runtime + bump: major + - name: people-rococo-runtime + bump: major + - name: people-westend-runtime + bump: major + - name: penpal-runtime + bump: major + - name: contracts-rococo-runtime + bump: major + - name: glutton-westend-runtime + bump: major + - name: seedling-runtime + bump: major + - name: shell-runtime + bump: major + - name: rococo-parachain-runtime + bump: major diff --git a/prdoc/pr_5038.prdoc b/prdoc/pr_5038.prdoc new file mode 100644 index 000000000000..2bab8ef69f89 --- /dev/null +++ b/prdoc/pr_5038.prdoc @@ -0,0 +1,15 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Plumb RPC listener up to caller + +doc: + - audience: Node Dev + description: + This PR allows the RPC server's socket address to be returned when initializing the server. + This allows the library consumer to easily programmatically determine which port the RPC server is listening on. +crates: + - name: sc-rpc-server + bump: major + - name: sc-service + bump: major diff --git a/prdoc/pr_5343.prdoc b/prdoc/pr_5343.prdoc new file mode 100644 index 000000000000..3cec70de93cb --- /dev/null +++ b/prdoc/pr_5343.prdoc @@ -0,0 +1,19 @@ +title: Allow to disable gap creation during block import + +doc: + - audience: Node Dev + description: | + New property `BlockImportParams::create_gap` allows to change whether to create block gap in case block + has no parent (defaults to `true` keeping existing behavior), which is helpful for sync protocols that do not need + to sync the gap after this happens. `BlockImportOperation::create_gap()` method was also introduced, though in + most cases `BlockImportParams::create_gap` will be used. + +crates: + - name: sc-client-api + bump: major + - name: sc-consensus + bump: minor + - name: sc-client-db + bump: minor + - name: sc-service + bump: minor diff --git a/prdoc/pr_5469.prdoc b/prdoc/pr_5469.prdoc new file mode 100644 index 000000000000..1e6aa3c0c072 --- /dev/null +++ b/prdoc/pr_5469.prdoc @@ -0,0 +1,11 @@ +title: Syncing strategy refactoring + +doc: + - audience: Node Dev + description: | + Mostly internal changes to syncing strategies that is a step towards making them configurable/extensible in the + future. It is unlikely that external developers will need to change their code. + +crates: + - name: sc-network-sync + bump: major diff --git a/prdoc/pr_5546.prdoc b/prdoc/pr_5546.prdoc index 16e2bd993463..95f02dbe13b2 100644 --- a/prdoc/pr_5546.prdoc +++ b/prdoc/pr_5546.prdoc @@ -13,11 +13,14 @@ crates: - name: snowbridge-pallet-outbound-queue bump: patch - name: snowbridge-pallet-system - bump: major + bump: minor + validate: false - name: snowbridge-core - bump: major + bump: minor + validate: false - name: snowbridge-router-primitives - bump: major + bump: minor + validate: false - name: bridge-hub-westend-runtime bump: patch - name: bridge-hub-rococo-runtime diff --git a/prdoc/pr_5580.prdoc b/prdoc/pr_5580.prdoc new file mode 100644 index 000000000000..e03b946070aa --- /dev/null +++ b/prdoc/pr_5580.prdoc @@ -0,0 +1,13 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Fix error message on pallet macro + +doc: + - audience: Runtime Dev + description: | + Improve error message for pallet macro generated code. + +crates: + - name: frame-support-procedural + bump: patch diff --git a/prdoc/pr_5592.prdoc b/prdoc/pr_5592.prdoc new file mode 100644 index 000000000000..9d51917db7b1 --- /dev/null +++ b/prdoc/pr_5592.prdoc @@ -0,0 +1,26 @@ +title: Introduce `BlockGap` + +doc: + - audience: Node Dev + description: | + This is the first step towards https://github.com/paritytech/polkadot-sdk/issues/5406, + refactoring the representation of block gap. This refactor converts the existing + `(NumberFor, NumberFor)` into a dedicated `BlockGap>` + struct. This change is purely structural and does not alter existing logic, but lays + the groundwork for the follow-up PR. The compatibility concern in the database caused + by the new structure transition is addressed as well. + + The `BlockGap` refactoring results in breaking changes in the `Info` structure returned + in `client.info()`. + +crates: + - name: sc-consensus-babe + bump: none + - name: sc-client-db + bump: none + - name: sc-network-sync + bump: none + - name: sc-service + bump: none + - name: sp-blockchain + bump: major diff --git a/prdoc/pr_5594.prdoc b/prdoc/pr_5594.prdoc new file mode 100644 index 000000000000..dbdc7937b73d --- /dev/null +++ b/prdoc/pr_5594.prdoc @@ -0,0 +1,13 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "Add debugging info for `StorageWeightReclaim`" + +doc: + - audience: Runtime Dev + description: | + - Includes extrinsic index to be displayed in the logs when the consumed weight is higher than the measured one. + +crates: + - name: cumulus-primitives-storage-weight-reclaim + bump: patch diff --git a/prdoc/pr_5601.prdoc b/prdoc/pr_5601.prdoc new file mode 100644 index 000000000000..3a0ec9ee8714 --- /dev/null +++ b/prdoc/pr_5601.prdoc @@ -0,0 +1,12 @@ +title: Introduce `RpcParams` in sc-cli + +doc: + - audience: Node Dev + description: | + Refactors and consolidates all RPC-related parameters in the run command into a dedicated `RpcParams` struct. This change allows downstream users to build custom run command without duplicating code. + +crates: + - name: cumulus-client-cli + bump: none + - name: sc-cli + bump: major diff --git a/prdoc/pr_5606.prdoc b/prdoc/pr_5606.prdoc new file mode 100644 index 000000000000..46883c5722cd --- /dev/null +++ b/prdoc/pr_5606.prdoc @@ -0,0 +1,13 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Fix PVF precompilation for Kusama + +doc: + - audience: Node Operator + description: | + Tweaks the PVF precompilation on Kusama to allow prepare PVFs when the node is an authority but not a validator. + +crates: + - name: polkadot-node-core-candidate-validation + bump: patch diff --git a/prdoc/pr_5608.prdoc b/prdoc/pr_5608.prdoc new file mode 100644 index 000000000000..9a0748e46bab --- /dev/null +++ b/prdoc/pr_5608.prdoc @@ -0,0 +1,16 @@ +title: "[pallet-revive] update runtime types" + +doc: + - audience: Runtime Dev + description: | + Refactor the Ext trait to use U256 instead of BalanceOf or MomentOf + +crates: + - name: pallet-revive + bump: major + - name: pallet-revive-uapi + bump: patch + - name: pallet-revive-fixtures + bump: patch + + diff --git a/prdoc/pr_5609.prdoc b/prdoc/pr_5609.prdoc new file mode 100644 index 000000000000..799071f04c1e --- /dev/null +++ b/prdoc/pr_5609.prdoc @@ -0,0 +1,21 @@ +title: Update litep2p network backend to v0.7.0 + +doc: + - audience: [ Node Dev, Node Operator ] + description: | + This PR updates the Litep2p network backend to version 0.7.0. + This new release introduces several new features, improvements, and fixes to the litep2p library. + Key updates include enhanced error handling propagated through metrics, configurable connection limits, + and a new API for managing public addresses. + + The Identify protocol no longer includes public addresses in its configuration. + Instead, we rely on the `litep2p.public_addresses` interface to propagate external addresses of the node. + + Litep2p uses hickory DNS resolver (formerly known as trust DNS). + Similarly to the trust DNS, the hickory logs are silenced. + +crates: + - name: sc-network + bump: patch + - name: sc-tracing + bump: minor diff --git a/prdoc/pr_5632.prdoc b/prdoc/pr_5632.prdoc new file mode 100644 index 000000000000..f76428bbc8f6 --- /dev/null +++ b/prdoc/pr_5632.prdoc @@ -0,0 +1,13 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Fix `alloc` not found error in `format_runtime_string!` + +doc: + - audience: Runtime Dev + description: | + Fixes the macro hygiene in the `format_runtime_string!` macro to fix the `alloc` not found build error. + +crates: + - name: sp-runtime + bump: patch diff --git a/prdoc/pr_5635.prdoc b/prdoc/pr_5635.prdoc new file mode 100644 index 000000000000..168d65970c95 --- /dev/null +++ b/prdoc/pr_5635.prdoc @@ -0,0 +1,13 @@ +title: Fix edge case where state sync is not triggered + +doc: + - audience: Node Dev + description: | + There is an edge case where the finalized block notification is received, but the conditions required to initiate the + state sync are not fully met. In such cases, state sync would fail to start as expected and remain stalled. + This patch addresses it by storing the pending attempt and trying to start the state sync later when the conditions + are satisfied. + +crates: + - name: sc-network-sync + bump: patch diff --git a/prdoc/pr_5640.prdoc b/prdoc/pr_5640.prdoc new file mode 100644 index 000000000000..fdd7f5e1b893 --- /dev/null +++ b/prdoc/pr_5640.prdoc @@ -0,0 +1,10 @@ +title: "[pallet-revive] Move event's topics" + +doc: + - audience: Runtime Dev + description: | + Move event's topics inside body + +crates: + - name: pallet-revive + bump: major diff --git a/prdoc/pr_5644.prdoc b/prdoc/pr_5644.prdoc new file mode 100644 index 000000000000..3300d557fce4 --- /dev/null +++ b/prdoc/pr_5644.prdoc @@ -0,0 +1,8 @@ +title: 'pallet-utility: Improve weight annotations' +doc: +- audience: Runtime Dev + description: |- + Prevent allocations when calculating the weights. +crates: +- name: pallet-utility + bump: patch diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs index 001b2273c9b2..c8409078af57 100644 --- a/substrate/bin/node/runtime/src/lib.rs +++ b/substrate/bin/node/runtime/src/lib.rs @@ -171,7 +171,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 2, - state_version: 1, + system_version: 1, }; /// The BABE epoch configuration at genesis. @@ -507,8 +507,7 @@ impl pallet_babe::Config for Runtime { type WeightInfo = (); type MaxAuthorities = MaxAuthorities; type MaxNominators = MaxNominators; - type KeyOwnerProof = - >::Proof; + type KeyOwnerProof = sp_session::MembershipProof; type EquivocationReportSystem = pallet_babe::EquivocationReportSystem; } @@ -1418,10 +1417,6 @@ impl pallet_revive::Config for Runtime { type UploadOrigin = EnsureSigned; type InstantiateOrigin = EnsureSigned; type RuntimeHoldReason = RuntimeHoldReason; - #[cfg(not(feature = "runtime-benchmarks"))] - type Migrations = (); - #[cfg(feature = "runtime-benchmarks")] - type Migrations = pallet_revive::migration::codegen::BenchMigrations; type CodeHashLockupDepositPercent = CodeHashLockupDepositPercent; type Debug = (); type Xcm = (); @@ -1534,7 +1529,7 @@ impl pallet_grandpa::Config for Runtime { type MaxAuthorities = MaxAuthorities; type MaxNominators = MaxNominators; type MaxSetIdSessionEntries = MaxSetIdSessionEntries; - type KeyOwnerProof = >::Proof; + type KeyOwnerProof = sp_session::MembershipProof; type EquivocationReportSystem = pallet_grandpa::EquivocationReportSystem; } @@ -2593,7 +2588,6 @@ type Migrations = ( pallet_nomination_pools::migration::versioned::V6ToV7, pallet_alliance::migration::Migration, pallet_contracts::Migration, - pallet_revive::Migration, pallet_identity::migration::versioned::V0ToV1, ); @@ -2614,7 +2608,7 @@ impl pallet_beefy::Config for Runtime { type OnNewValidatorSet = MmrLeaf; type AncestryHelper = MmrLeaf; type WeightInfo = (); - type KeyOwnerProof = >::Proof; + type KeyOwnerProof = sp_session::MembershipProof; type EquivocationReportSystem = pallet_beefy::EquivocationReportSystem; } diff --git a/substrate/client/api/src/backend.rs b/substrate/client/api/src/backend.rs index 0b2a34952401..9c9601a912ac 100644 --- a/substrate/client/api/src/backend.rs +++ b/substrate/client/api/src/backend.rs @@ -232,6 +232,9 @@ pub trait BlockImportOperation { /// Add a transaction index operation. fn update_transaction_index(&mut self, index: Vec) -> sp_blockchain::Result<()>; + + /// Configure whether to create a block gap if newly imported block is missing parent + fn set_create_gap(&mut self, create_gap: bool); } /// Interface for performing operations on the backend. diff --git a/substrate/client/api/src/in_mem.rs b/substrate/client/api/src/in_mem.rs index ba89aede9147..c045a393bb21 100644 --- a/substrate/client/api/src/in_mem.rs +++ b/substrate/client/api/src/in_mem.rs @@ -584,6 +584,8 @@ impl backend::BlockImportOperation for BlockImportOperatio ) -> sp_blockchain::Result<()> { Ok(()) } + + fn set_create_gap(&mut self, _create_gap: bool) {} } /// In-memory backend. Keeps all states and blocks in memory. diff --git a/substrate/client/block-builder/src/lib.rs b/substrate/client/block-builder/src/lib.rs index 2f22cd42591f..d02d0e321805 100644 --- a/substrate/client/block-builder/src/lib.rs +++ b/substrate/client/block-builder/src/lib.rs @@ -320,7 +320,7 @@ where header.extrinsics_root().clone(), HashingFor::::ordered_trie_root( self.extrinsics.iter().map(Encode::encode).collect(), - sp_runtime::StateVersion::V0, + self.api.version(self.parent_hash)?.extrinsics_root_state_version(), ), ); diff --git a/substrate/client/cli/src/commands/run_cmd.rs b/substrate/client/cli/src/commands/run_cmd.rs index f47baf2644e7..f91d18aca749 100644 --- a/substrate/client/cli/src/commands/run_cmd.rs +++ b/substrate/client/cli/src/commands/run_cmd.rs @@ -17,15 +17,12 @@ // along with this program. If not, see . use crate::{ - arg_enums::{Cors, RpcMethods}, error::{Error, Result}, params::{ ImportParams, KeystoreParams, NetworkParams, OffchainWorkerParams, RpcEndpoint, SharedParams, TransactionPoolParams, }, - CliConfiguration, PrometheusParams, RuntimeParams, TelemetryParams, - RPC_DEFAULT_MAX_CONNECTIONS, RPC_DEFAULT_MAX_REQUEST_SIZE_MB, RPC_DEFAULT_MAX_RESPONSE_SIZE_MB, - RPC_DEFAULT_MAX_SUBS_PER_CONN, RPC_DEFAULT_MESSAGE_CAPACITY_PER_CONN, + CliConfiguration, PrometheusParams, RpcParams, RuntimeParams, TelemetryParams, }; use clap::Parser; use regex::Regex; @@ -36,10 +33,7 @@ use sc_service::{ ChainSpec, Role, }; use sc_telemetry::TelemetryEndpoints; -use std::{ - net::{Ipv4Addr, Ipv6Addr, SocketAddr}, - num::NonZeroU32, -}; +use std::num::NonZeroU32; /// The `run` command used to run a node. #[derive(Debug, Clone, Parser)] @@ -59,154 +53,16 @@ pub struct RunCmd { #[arg(long)] pub no_grandpa: bool, - /// Listen to all RPC interfaces (default: local). - /// - /// Not all RPC methods are safe to be exposed publicly. - /// - /// Use an RPC proxy server to filter out dangerous methods. More details: - /// . - /// - /// Use `--unsafe-rpc-external` to suppress the warning if you understand the risks. - #[arg(long)] - pub rpc_external: bool, - - /// Listen to all RPC interfaces. - /// - /// Same as `--rpc-external`. - #[arg(long)] - pub unsafe_rpc_external: bool, - - /// RPC methods to expose. - #[arg( - long, - value_name = "METHOD SET", - value_enum, - ignore_case = true, - default_value_t = RpcMethods::Auto, - verbatim_doc_comment - )] - pub rpc_methods: RpcMethods, - - /// RPC rate limiting (calls/minute) for each connection. - /// - /// This is disabled by default. - /// - /// For example `--rpc-rate-limit 10` will maximum allow - /// 10 calls per minute per connection. - #[arg(long)] - pub rpc_rate_limit: Option, - - /// Disable RPC rate limiting for certain ip addresses. - /// - /// Each IP address must be in CIDR notation such as `1.2.3.4/24`. - #[arg(long, num_args = 1..)] - pub rpc_rate_limit_whitelisted_ips: Vec, - - /// Trust proxy headers for disable rate limiting. - /// - /// By default the rpc server will not trust headers such `X-Real-IP`, `X-Forwarded-For` and - /// `Forwarded` and this option will make the rpc server to trust these headers. - /// - /// For instance this may be secure if the rpc server is behind a reverse proxy and that the - /// proxy always sets these headers. - #[arg(long)] - pub rpc_rate_limit_trust_proxy_headers: bool, - - /// Set the maximum RPC request payload size for both HTTP and WS in megabytes. - #[arg(long, default_value_t = RPC_DEFAULT_MAX_REQUEST_SIZE_MB)] - pub rpc_max_request_size: u32, - - /// Set the maximum RPC response payload size for both HTTP and WS in megabytes. - #[arg(long, default_value_t = RPC_DEFAULT_MAX_RESPONSE_SIZE_MB)] - pub rpc_max_response_size: u32, - - /// Set the maximum concurrent subscriptions per connection. - #[arg(long, default_value_t = RPC_DEFAULT_MAX_SUBS_PER_CONN)] - pub rpc_max_subscriptions_per_connection: u32, - - /// Specify JSON-RPC server TCP port. - #[arg(long, value_name = "PORT")] - pub rpc_port: Option, - - /// EXPERIMENTAL: Specify the JSON-RPC server interface and this option which can be enabled - /// several times if you want expose several RPC interfaces with different configurations. - /// - /// The format for this option is: - /// `--experimental-rpc-endpoint" listen-addr=,,..."` where each option is - /// separated by a comma and `listen-addr` is the only required param. - /// - /// The following options are available: - /// â€ĸ listen-addr: The socket address (ip:port) to listen on. Be careful to not expose the - /// server to the public internet unless you know what you're doing. (required) - /// â€ĸ disable-batch-requests: Disable batch requests (optional) - /// â€ĸ max-connections: The maximum number of concurrent connections that the server will - /// accept (optional) - /// â€ĸ max-request-size: The maximum size of a request body in megabytes (optional) - /// â€ĸ max-response-size: The maximum size of a response body in megabytes (optional) - /// â€ĸ max-subscriptions-per-connection: The maximum number of subscriptions per connection - /// (optional) - /// â€ĸ max-buffer-capacity-per-connection: The maximum buffer capacity per connection - /// (optional) - /// â€ĸ max-batch-request-len: The maximum number of requests in a batch (optional) - /// â€ĸ cors: The CORS allowed origins, this can enabled more than once (optional) - /// â€ĸ methods: Which RPC methods to allow, valid values are "safe", "unsafe" and "auto" - /// (optional) - /// â€ĸ optional: If the listen address is optional i.e the interface is not required to be - /// available For example this may be useful if some platforms doesn't support ipv6 - /// (optional) - /// â€ĸ rate-limit: The rate limit in calls per minute for each connection (optional) - /// â€ĸ rate-limit-trust-proxy-headers: Trust proxy headers for disable rate limiting (optional) - /// â€ĸ rate-limit-whitelisted-ips: Disable rate limiting for certain ip addresses, this can be - /// enabled more than once (optional) â€ĸ retry-random-port: If the port is already in use, - /// retry with a random port (optional) - /// - /// Use with care, this flag is unstable and subject to change. - #[arg( - long, - num_args = 1.., - verbatim_doc_comment, - conflicts_with_all = &["rpc_external", "unsafe_rpc_external", "rpc_port", "rpc_cors", "rpc_rate_limit_trust_proxy_headers", "rpc_rate_limit", "rpc_rate_limit_whitelisted_ips", "rpc_message_buffer_capacity_per_connection", "rpc_disable_batch_requests", "rpc_max_subscriptions_per_connection", "rpc_max_request_size", "rpc_max_response_size"] - )] - pub experimental_rpc_endpoint: Vec, - - /// Maximum number of RPC server connections. - #[arg(long, value_name = "COUNT", default_value_t = RPC_DEFAULT_MAX_CONNECTIONS)] - pub rpc_max_connections: u32, - - /// The number of messages the RPC server is allowed to keep in memory. - /// - /// If the buffer becomes full then the server will not process - /// new messages until the connected client start reading the - /// underlying messages. - /// - /// This applies per connection which includes both - /// JSON-RPC methods calls and subscriptions. - #[arg(long, default_value_t = RPC_DEFAULT_MESSAGE_CAPACITY_PER_CONN)] - pub rpc_message_buffer_capacity_per_connection: u32, - - /// Disable RPC batch requests - #[arg(long, alias = "rpc_no_batch_requests", conflicts_with_all = &["rpc_max_batch_request_len"])] - pub rpc_disable_batch_requests: bool, - - /// Limit the max length per RPC batch request - #[arg(long, conflicts_with_all = &["rpc_disable_batch_requests"], value_name = "LEN")] - pub rpc_max_batch_request_len: Option, - - /// Specify browser *origins* allowed to access the HTTP & WS RPC servers. - /// - /// A comma-separated list of origins (protocol://domain or special `null` - /// value). Value of `all` will disable origin validation. Default is to - /// allow localhost and origins. When running in - /// `--dev` mode the default is to allow all origins. - #[arg(long, value_name = "ORIGINS")] - pub rpc_cors: Option, - /// The human-readable name for this node. /// /// It's used as network node name. #[arg(long, value_name = "NAME")] pub name: Option, + #[allow(missing_docs)] + #[clap(flatten)] + pub rpc_params: RpcParams, + #[allow(missing_docs)] #[clap(flatten)] pub telemetry_params: TelemetryParams, @@ -437,136 +293,51 @@ impl CliConfiguration for RunCmd { } fn rpc_max_connections(&self) -> Result { - Ok(self.rpc_max_connections) + Ok(self.rpc_params.rpc_max_connections) } fn rpc_cors(&self, is_dev: bool) -> Result>> { - Ok(self - .rpc_cors - .clone() - .unwrap_or_else(|| { - if is_dev { - log::warn!("Running in --dev mode, RPC CORS has been disabled."); - Cors::All - } else { - Cors::List(vec![ - "http://localhost:*".into(), - "http://127.0.0.1:*".into(), - "https://localhost:*".into(), - "https://127.0.0.1:*".into(), - "https://polkadot.js.org".into(), - ]) - } - }) - .into()) + self.rpc_params.rpc_cors(is_dev) } fn rpc_addr(&self, default_listen_port: u16) -> Result>> { - if !self.experimental_rpc_endpoint.is_empty() { - for endpoint in &self.experimental_rpc_endpoint { - // Technically, `0.0.0.0` isn't a public IP address, but it's a way to listen on - // all interfaces. Thus, we consider it as a public endpoint and warn about it. - if endpoint.rpc_methods == RpcMethods::Unsafe && endpoint.is_global() || - endpoint.listen_addr.ip().is_unspecified() - { - log::warn!( - "It isn't safe to expose RPC publicly without a proxy server that filters \ - available set of RPC methods." - ); - } - } - - return Ok(Some(self.experimental_rpc_endpoint.clone())); - } - - let (ipv4, ipv6) = rpc_interface( - self.rpc_external, - self.unsafe_rpc_external, - self.rpc_methods, - self.validator, - )?; - - let cors = self.rpc_cors(self.is_dev()?)?; - let port = self.rpc_port.unwrap_or(default_listen_port); - - Ok(Some(vec![ - RpcEndpoint { - batch_config: self.rpc_batch_config()?, - max_connections: self.rpc_max_connections, - listen_addr: SocketAddr::new(std::net::IpAddr::V4(ipv4), port), - rpc_methods: self.rpc_methods, - rate_limit: self.rpc_rate_limit, - rate_limit_trust_proxy_headers: self.rpc_rate_limit_trust_proxy_headers, - rate_limit_whitelisted_ips: self.rpc_rate_limit_whitelisted_ips.clone(), - max_payload_in_mb: self.rpc_max_request_size, - max_payload_out_mb: self.rpc_max_response_size, - max_subscriptions_per_connection: self.rpc_max_subscriptions_per_connection, - max_buffer_capacity_per_connection: self.rpc_message_buffer_capacity_per_connection, - cors: cors.clone(), - retry_random_port: true, - is_optional: false, - }, - RpcEndpoint { - batch_config: self.rpc_batch_config()?, - max_connections: self.rpc_max_connections, - listen_addr: SocketAddr::new(std::net::IpAddr::V6(ipv6), port), - rpc_methods: self.rpc_methods, - rate_limit: self.rpc_rate_limit, - rate_limit_trust_proxy_headers: self.rpc_rate_limit_trust_proxy_headers, - rate_limit_whitelisted_ips: self.rpc_rate_limit_whitelisted_ips.clone(), - max_payload_in_mb: self.rpc_max_request_size, - max_payload_out_mb: self.rpc_max_response_size, - max_subscriptions_per_connection: self.rpc_max_subscriptions_per_connection, - max_buffer_capacity_per_connection: self.rpc_message_buffer_capacity_per_connection, - cors: cors.clone(), - retry_random_port: true, - is_optional: true, - }, - ])) + self.rpc_params.rpc_addr(self.is_dev()?, self.validator, default_listen_port) } fn rpc_methods(&self) -> Result { - Ok(self.rpc_methods.into()) + Ok(self.rpc_params.rpc_methods.into()) } fn rpc_max_request_size(&self) -> Result { - Ok(self.rpc_max_request_size) + Ok(self.rpc_params.rpc_max_request_size) } fn rpc_max_response_size(&self) -> Result { - Ok(self.rpc_max_response_size) + Ok(self.rpc_params.rpc_max_response_size) } fn rpc_max_subscriptions_per_connection(&self) -> Result { - Ok(self.rpc_max_subscriptions_per_connection) + Ok(self.rpc_params.rpc_max_subscriptions_per_connection) } fn rpc_buffer_capacity_per_connection(&self) -> Result { - Ok(self.rpc_message_buffer_capacity_per_connection) + Ok(self.rpc_params.rpc_message_buffer_capacity_per_connection) } fn rpc_batch_config(&self) -> Result { - let cfg = if self.rpc_disable_batch_requests { - RpcBatchRequestConfig::Disabled - } else if let Some(l) = self.rpc_max_batch_request_len { - RpcBatchRequestConfig::Limit(l) - } else { - RpcBatchRequestConfig::Unlimited - }; - - Ok(cfg) + self.rpc_params.rpc_batch_config() } fn rpc_rate_limit(&self) -> Result> { - Ok(self.rpc_rate_limit) + Ok(self.rpc_params.rpc_rate_limit) } fn rpc_rate_limit_whitelisted_ips(&self) -> Result> { - Ok(self.rpc_rate_limit_whitelisted_ips.clone()) + Ok(self.rpc_params.rpc_rate_limit_whitelisted_ips.clone()) } fn rpc_rate_limit_trust_proxy_headers(&self) -> Result { - Ok(self.rpc_rate_limit_trust_proxy_headers) + Ok(self.rpc_params.rpc_rate_limit_trust_proxy_headers) } fn transaction_pool(&self, is_dev: bool) -> Result { @@ -600,57 +371,28 @@ pub fn is_node_name_valid(_name: &str) -> std::result::Result<(), &str> { let name = _name.to_string(); if name.is_empty() { - return Err("Node name cannot be empty") + return Err("Node name cannot be empty"); } if name.chars().count() >= crate::NODE_NAME_MAX_LENGTH { - return Err("Node name too long") + return Err("Node name too long"); } let invalid_chars = r"[\\.@]"; let re = Regex::new(invalid_chars).unwrap(); if re.is_match(&name) { - return Err("Node name should not contain invalid chars such as '.' and '@'") + return Err("Node name should not contain invalid chars such as '.' and '@'"); } let invalid_patterns = r"^https?:"; let re = Regex::new(invalid_patterns).unwrap(); if re.is_match(&name) { - return Err("Node name should not contain urls") + return Err("Node name should not contain urls"); } Ok(()) } -fn rpc_interface( - is_external: bool, - is_unsafe_external: bool, - rpc_methods: RpcMethods, - is_validator: bool, -) -> Result<(Ipv4Addr, Ipv6Addr)> { - if is_external && is_validator && rpc_methods != RpcMethods::Unsafe { - return Err(Error::Input( - "--rpc-external option shouldn't be used if the node is running as \ - a validator. Use `--unsafe-rpc-external` or `--rpc-methods=unsafe` if you understand \ - the risks. See the options description for more information." - .to_owned(), - )) - } - - if is_external || is_unsafe_external { - if rpc_methods == RpcMethods::Unsafe { - log::warn!( - "It isn't safe to expose RPC publicly without a proxy server that filters \ - available set of RPC methods." - ); - } - - Ok((Ipv4Addr::UNSPECIFIED, Ipv6Addr::UNSPECIFIED)) - } else { - Ok((Ipv4Addr::LOCALHOST, Ipv6Addr::LOCALHOST)) - } -} - #[cfg(test)] mod tests { use super::*; diff --git a/substrate/client/cli/src/params/rpc_params.rs b/substrate/client/cli/src/params/rpc_params.rs index d0ec1fd00443..f9937b59bbaf 100644 --- a/substrate/client/cli/src/params/rpc_params.rs +++ b/substrate/client/cli/src/params/rpc_params.rs @@ -17,12 +17,16 @@ // along with this program. If not, see . use crate::{ - arg_enums::RpcMethods, + arg_enums::{Cors, RpcMethods}, params::{IpNetwork, RpcBatchRequestConfig}, RPC_DEFAULT_MAX_CONNECTIONS, RPC_DEFAULT_MAX_REQUEST_SIZE_MB, RPC_DEFAULT_MAX_RESPONSE_SIZE_MB, RPC_DEFAULT_MAX_SUBS_PER_CONN, RPC_DEFAULT_MESSAGE_CAPACITY_PER_CONN, }; -use std::{net::SocketAddr, num::NonZeroU32}; +use clap::Args; +use std::{ + net::{Ipv4Addr, Ipv6Addr, SocketAddr}, + num::NonZeroU32, +}; const RPC_LISTEN_ADDR: &str = "listen-addr"; const RPC_CORS: &str = "cors"; @@ -40,6 +44,288 @@ const RPC_OPTIONAL: &str = "optional"; const RPC_DISABLE_BATCH: &str = "disable-batch-requests"; const RPC_BATCH_LIMIT: &str = "max-batch-request-len"; +/// Parameters of RPC. +#[derive(Debug, Clone, Args)] +pub struct RpcParams { + /// Listen to all RPC interfaces (default: local). + /// + /// Not all RPC methods are safe to be exposed publicly. + /// + /// Use an RPC proxy server to filter out dangerous methods. More details: + /// . + /// + /// Use `--unsafe-rpc-external` to suppress the warning if you understand the risks. + #[arg(long)] + pub rpc_external: bool, + + /// Listen to all RPC interfaces. + /// + /// Same as `--rpc-external`. + #[arg(long)] + pub unsafe_rpc_external: bool, + + /// RPC methods to expose. + #[arg( + long, + value_name = "METHOD SET", + value_enum, + ignore_case = true, + default_value_t = RpcMethods::Auto, + verbatim_doc_comment + )] + pub rpc_methods: RpcMethods, + + /// RPC rate limiting (calls/minute) for each connection. + /// + /// This is disabled by default. + /// + /// For example `--rpc-rate-limit 10` will maximum allow + /// 10 calls per minute per connection. + #[arg(long)] + pub rpc_rate_limit: Option, + + /// Disable RPC rate limiting for certain ip addresses. + /// + /// Each IP address must be in CIDR notation such as `1.2.3.4/24`. + #[arg(long, num_args = 1..)] + pub rpc_rate_limit_whitelisted_ips: Vec, + + /// Trust proxy headers for disable rate limiting. + /// + /// By default the rpc server will not trust headers such `X-Real-IP`, `X-Forwarded-For` and + /// `Forwarded` and this option will make the rpc server to trust these headers. + /// + /// For instance this may be secure if the rpc server is behind a reverse proxy and that the + /// proxy always sets these headers. + #[arg(long)] + pub rpc_rate_limit_trust_proxy_headers: bool, + + /// Set the maximum RPC request payload size for both HTTP and WS in megabytes. + #[arg(long, default_value_t = RPC_DEFAULT_MAX_REQUEST_SIZE_MB)] + pub rpc_max_request_size: u32, + + /// Set the maximum RPC response payload size for both HTTP and WS in megabytes. + #[arg(long, default_value_t = RPC_DEFAULT_MAX_RESPONSE_SIZE_MB)] + pub rpc_max_response_size: u32, + + /// Set the maximum concurrent subscriptions per connection. + #[arg(long, default_value_t = RPC_DEFAULT_MAX_SUBS_PER_CONN)] + pub rpc_max_subscriptions_per_connection: u32, + + /// Specify JSON-RPC server TCP port. + #[arg(long, value_name = "PORT")] + pub rpc_port: Option, + + /// EXPERIMENTAL: Specify the JSON-RPC server interface and this option which can be enabled + /// several times if you want expose several RPC interfaces with different configurations. + /// + /// The format for this option is: + /// `--experimental-rpc-endpoint" listen-addr=,,..."` where each option is + /// separated by a comma and `listen-addr` is the only required param. + /// + /// The following options are available: + /// â€ĸ listen-addr: The socket address (ip:port) to listen on. Be careful to not expose the + /// server to the public internet unless you know what you're doing. (required) + /// â€ĸ disable-batch-requests: Disable batch requests (optional) + /// â€ĸ max-connections: The maximum number of concurrent connections that the server will + /// accept (optional) + /// â€ĸ max-request-size: The maximum size of a request body in megabytes (optional) + /// â€ĸ max-response-size: The maximum size of a response body in megabytes (optional) + /// â€ĸ max-subscriptions-per-connection: The maximum number of subscriptions per connection + /// (optional) + /// â€ĸ max-buffer-capacity-per-connection: The maximum buffer capacity per connection + /// (optional) + /// â€ĸ max-batch-request-len: The maximum number of requests in a batch (optional) + /// â€ĸ cors: The CORS allowed origins, this can enabled more than once (optional) + /// â€ĸ methods: Which RPC methods to allow, valid values are "safe", "unsafe" and "auto" + /// (optional) + /// â€ĸ optional: If the listen address is optional i.e the interface is not required to be + /// available For example this may be useful if some platforms doesn't support ipv6 + /// (optional) + /// â€ĸ rate-limit: The rate limit in calls per minute for each connection (optional) + /// â€ĸ rate-limit-trust-proxy-headers: Trust proxy headers for disable rate limiting (optional) + /// â€ĸ rate-limit-whitelisted-ips: Disable rate limiting for certain ip addresses, this can be + /// enabled more than once (optional) â€ĸ retry-random-port: If the port is already in use, + /// retry with a random port (optional) + /// + /// Use with care, this flag is unstable and subject to change. + #[arg( + long, + num_args = 1.., + verbatim_doc_comment, + conflicts_with_all = &["rpc_external", "unsafe_rpc_external", "rpc_port", "rpc_cors", "rpc_rate_limit_trust_proxy_headers", "rpc_rate_limit", "rpc_rate_limit_whitelisted_ips", "rpc_message_buffer_capacity_per_connection", "rpc_disable_batch_requests", "rpc_max_subscriptions_per_connection", "rpc_max_request_size", "rpc_max_response_size"] + )] + pub experimental_rpc_endpoint: Vec, + + /// Maximum number of RPC server connections. + #[arg(long, value_name = "COUNT", default_value_t = RPC_DEFAULT_MAX_CONNECTIONS)] + pub rpc_max_connections: u32, + + /// The number of messages the RPC server is allowed to keep in memory. + /// + /// If the buffer becomes full then the server will not process + /// new messages until the connected client start reading the + /// underlying messages. + /// + /// This applies per connection which includes both + /// JSON-RPC methods calls and subscriptions. + #[arg(long, default_value_t = RPC_DEFAULT_MESSAGE_CAPACITY_PER_CONN)] + pub rpc_message_buffer_capacity_per_connection: u32, + + /// Disable RPC batch requests + #[arg(long, alias = "rpc_no_batch_requests", conflicts_with_all = &["rpc_max_batch_request_len"])] + pub rpc_disable_batch_requests: bool, + + /// Limit the max length per RPC batch request + #[arg(long, conflicts_with_all = &["rpc_disable_batch_requests"], value_name = "LEN")] + pub rpc_max_batch_request_len: Option, + + /// Specify browser *origins* allowed to access the HTTP & WS RPC servers. + /// + /// A comma-separated list of origins (protocol://domain or special `null` + /// value). Value of `all` will disable origin validation. Default is to + /// allow localhost and origins. When running in + /// `--dev` mode the default is to allow all origins. + #[arg(long, value_name = "ORIGINS")] + pub rpc_cors: Option, +} + +impl RpcParams { + /// Returns the RPC CORS configuration. + pub fn rpc_cors(&self, is_dev: bool) -> crate::Result>> { + Ok(self + .rpc_cors + .clone() + .unwrap_or_else(|| { + if is_dev { + log::warn!("Running in --dev mode, RPC CORS has been disabled."); + Cors::All + } else { + Cors::List(vec![ + "http://localhost:*".into(), + "http://127.0.0.1:*".into(), + "https://localhost:*".into(), + "https://127.0.0.1:*".into(), + "https://polkadot.js.org".into(), + ]) + } + }) + .into()) + } + + /// Returns the RPC endpoints. + pub fn rpc_addr( + &self, + is_dev: bool, + is_validator: bool, + default_listen_port: u16, + ) -> crate::Result>> { + if !self.experimental_rpc_endpoint.is_empty() { + for endpoint in &self.experimental_rpc_endpoint { + // Technically, `0.0.0.0` isn't a public IP address, but it's a way to listen on + // all interfaces. Thus, we consider it as a public endpoint and warn about it. + if endpoint.rpc_methods == RpcMethods::Unsafe && endpoint.is_global() || + endpoint.listen_addr.ip().is_unspecified() + { + eprintln!( + "It isn't safe to expose RPC publicly without a proxy server that filters \ + available set of RPC methods." + ); + } + } + + return Ok(Some(self.experimental_rpc_endpoint.clone())); + } + + let (ipv4, ipv6) = rpc_interface( + self.rpc_external, + self.unsafe_rpc_external, + self.rpc_methods, + is_validator, + )?; + + let cors = self.rpc_cors(is_dev)?; + let port = self.rpc_port.unwrap_or(default_listen_port); + + Ok(Some(vec![ + RpcEndpoint { + batch_config: self.rpc_batch_config()?, + max_connections: self.rpc_max_connections, + listen_addr: SocketAddr::new(std::net::IpAddr::V4(ipv4), port), + rpc_methods: self.rpc_methods, + rate_limit: self.rpc_rate_limit, + rate_limit_trust_proxy_headers: self.rpc_rate_limit_trust_proxy_headers, + rate_limit_whitelisted_ips: self.rpc_rate_limit_whitelisted_ips.clone(), + max_payload_in_mb: self.rpc_max_request_size, + max_payload_out_mb: self.rpc_max_response_size, + max_subscriptions_per_connection: self.rpc_max_subscriptions_per_connection, + max_buffer_capacity_per_connection: self.rpc_message_buffer_capacity_per_connection, + cors: cors.clone(), + retry_random_port: true, + is_optional: false, + }, + RpcEndpoint { + batch_config: self.rpc_batch_config()?, + max_connections: self.rpc_max_connections, + listen_addr: SocketAddr::new(std::net::IpAddr::V6(ipv6), port), + rpc_methods: self.rpc_methods, + rate_limit: self.rpc_rate_limit, + rate_limit_trust_proxy_headers: self.rpc_rate_limit_trust_proxy_headers, + rate_limit_whitelisted_ips: self.rpc_rate_limit_whitelisted_ips.clone(), + max_payload_in_mb: self.rpc_max_request_size, + max_payload_out_mb: self.rpc_max_response_size, + max_subscriptions_per_connection: self.rpc_max_subscriptions_per_connection, + max_buffer_capacity_per_connection: self.rpc_message_buffer_capacity_per_connection, + cors: cors.clone(), + retry_random_port: true, + is_optional: true, + }, + ])) + } + + /// Returns the configuration for batch RPC requests. + pub fn rpc_batch_config(&self) -> crate::Result { + let cfg = if self.rpc_disable_batch_requests { + RpcBatchRequestConfig::Disabled + } else if let Some(l) = self.rpc_max_batch_request_len { + RpcBatchRequestConfig::Limit(l) + } else { + RpcBatchRequestConfig::Unlimited + }; + + Ok(cfg) + } +} + +fn rpc_interface( + is_external: bool, + is_unsafe_external: bool, + rpc_methods: RpcMethods, + is_validator: bool, +) -> crate::Result<(Ipv4Addr, Ipv6Addr)> { + if is_external && is_validator && rpc_methods != RpcMethods::Unsafe { + return Err(crate::Error::Input( + "--rpc-external option shouldn't be used if the node is running as \ + a validator. Use `--unsafe-rpc-external` or `--rpc-methods=unsafe` if you understand \ + the risks. See the options description for more information." + .to_owned(), + )); + } + + if is_external || is_unsafe_external { + if rpc_methods == RpcMethods::Unsafe { + eprintln!( + "It isn't safe to expose RPC publicly without a proxy server that filters \ + available set of RPC methods." + ); + } + + Ok((Ipv4Addr::UNSPECIFIED, Ipv6Addr::UNSPECIFIED)) + } else { + Ok((Ipv4Addr::LOCALHOST, Ipv6Addr::LOCALHOST)) + } +} + /// Represent a single RPC endpoint with its configuration. #[derive(Debug, Clone)] pub struct RpcEndpoint { diff --git a/substrate/client/consensus/babe/src/lib.rs b/substrate/client/consensus/babe/src/lib.rs index 9770b16871e1..4cf66302ec85 100644 --- a/substrate/client/consensus/babe/src/lib.rs +++ b/substrate/client/consensus/babe/src/lib.rs @@ -1146,7 +1146,9 @@ where let info = self.client.info(); let number = *block.header.number(); - if info.block_gap.map_or(false, |(s, e)| s <= number && number <= e) || block.with_state() { + if info.block_gap.map_or(false, |gap| gap.start <= number && number <= gap.end) || + block.with_state() + { // Verification for imported blocks is skipped in two cases: // 1. When importing blocks below the last finalized block during network initial // synchronization. @@ -1420,7 +1422,7 @@ where // Skip babe logic if block already in chain or importing blocks during initial sync, // otherwise the check for epoch changes will error because trying to re-import an // epoch change or because of missing epoch data in the tree, respectively. - if info.block_gap.map_or(false, |(s, e)| s <= number && number <= e) || + if info.block_gap.map_or(false, |gap| gap.start <= number && number <= gap.end) || block_status == BlockStatus::InChain { // When re-importing existing block strip away intermediates. diff --git a/substrate/client/consensus/common/src/block_import.rs b/substrate/client/consensus/common/src/block_import.rs index 4d7b89f37d86..0fcf96a96368 100644 --- a/substrate/client/consensus/common/src/block_import.rs +++ b/substrate/client/consensus/common/src/block_import.rs @@ -214,6 +214,8 @@ pub struct BlockImportParams { pub fork_choice: Option, /// Re-validate existing block. pub import_existing: bool, + /// Whether to create "block gap" in case this block doesn't have parent. + pub create_gap: bool, /// Cached full header hash (with post-digests applied). pub post_hash: Option, } @@ -234,6 +236,7 @@ impl BlockImportParams { auxiliary: Vec::new(), fork_choice: None, import_existing: false, + create_gap: true, post_hash: None, } } diff --git a/substrate/client/db/src/lib.rs b/substrate/client/db/src/lib.rs index eadb26254a18..72707c306f58 100644 --- a/substrate/client/db/src/lib.rs +++ b/substrate/client/db/src/lib.rs @@ -61,6 +61,7 @@ use codec::{Decode, Encode}; use hash_db::Prefix; use sc_client_api::{ backend::NewBlockState, + blockchain::{BlockGap, BlockGapType}, leaves::{FinalizationOutcome, LeafSet}, utils::is_descendent_of, IoInfo, MemoryInfo, MemorySize, UsageInfo, @@ -91,6 +92,7 @@ use sp_state_machine::{ StorageValue, UsageInfo as StateUsageInfo, }; use sp_trie::{cache::SharedTrieCache, prefixed_key, MemoryDB, MerkleValue, PrefixedMemoryDB}; +use utils::BLOCK_GAP_CURRENT_VERSION; // Re-export the Database trait so that one can pass an implementation of it. pub use sc_state_db::PruningMode; @@ -522,7 +524,7 @@ impl BlockchainDb { } } - fn update_block_gap(&self, gap: Option<(NumberFor, NumberFor)>) { + fn update_block_gap(&self, gap: Option>>) { let mut meta = self.meta.write(); meta.block_gap = gap; } @@ -832,6 +834,7 @@ pub struct BlockImportOperation { finalized_blocks: Vec<(Block::Hash, Option)>, set_head: Option, commit_state: bool, + create_gap: bool, index_ops: Vec, } @@ -986,6 +989,10 @@ impl sc_client_api::backend::BlockImportOperation self.index_ops = index_ops; Ok(()) } + + fn set_create_gap(&mut self, create_gap: bool) { + self.create_gap = create_gap; + } } struct StorageDb { @@ -1671,35 +1678,57 @@ impl Backend { ); } - if let Some((mut start, end)) = block_gap { - if number == start { - start += One::one(); - utils::insert_number_to_key_mapping( - &mut transaction, - columns::KEY_LOOKUP, - number, - hash, - )?; - if start > end { - transaction.remove(columns::META, meta_keys::BLOCK_GAP); - block_gap = None; - debug!(target: "db", "Removed block gap."); - } else { - block_gap = Some((start, end)); - debug!(target: "db", "Update block gap. {block_gap:?}"); - transaction.set( - columns::META, - meta_keys::BLOCK_GAP, - &(start, end).encode(), - ); - } - block_gap_updated = true; + if let Some(mut gap) = block_gap { + match gap.gap_type { + BlockGapType::MissingHeaderAndBody => + if number == gap.start { + gap.start += One::one(); + utils::insert_number_to_key_mapping( + &mut transaction, + columns::KEY_LOOKUP, + number, + hash, + )?; + if gap.start > gap.end { + transaction.remove(columns::META, meta_keys::BLOCK_GAP); + transaction.remove(columns::META, meta_keys::BLOCK_GAP_VERSION); + block_gap = None; + debug!(target: "db", "Removed block gap."); + } else { + block_gap = Some(gap); + debug!(target: "db", "Update block gap. {block_gap:?}"); + transaction.set( + columns::META, + meta_keys::BLOCK_GAP, + &gap.encode(), + ); + transaction.set( + columns::META, + meta_keys::BLOCK_GAP_VERSION, + &BLOCK_GAP_CURRENT_VERSION.encode(), + ); + } + block_gap_updated = true; + }, + BlockGapType::MissingBody => { + unreachable!("Unsupported block gap. TODO: https://github.com/paritytech/polkadot-sdk/issues/5406") + }, } - } else if number > best_num + One::one() && - number > One::one() && self.blockchain.header(parent_hash)?.is_none() + } else if operation.create_gap && + number > best_num + One::one() && + self.blockchain.header(parent_hash)?.is_none() { - let gap = (best_num + One::one(), number - One::one()); + let gap = BlockGap { + start: best_num + One::one(), + end: number - One::one(), + gap_type: BlockGapType::MissingHeaderAndBody, + }; transaction.set(columns::META, meta_keys::BLOCK_GAP, &gap.encode()); + transaction.set( + columns::META, + meta_keys::BLOCK_GAP_VERSION, + &BLOCK_GAP_CURRENT_VERSION.encode(), + ); block_gap = Some(gap); block_gap_updated = true; debug!(target: "db", "Detected block gap {block_gap:?}"); @@ -2049,6 +2078,7 @@ impl sc_client_api::backend::Backend for Backend { finalized_blocks: Vec::new(), set_head: None, commit_state: false, + create_gap: true, index_ops: Default::default(), }) } diff --git a/substrate/client/db/src/utils.rs b/substrate/client/db/src/utils.rs index b532e0d46662..0b591c967e60 100644 --- a/substrate/client/db/src/utils.rs +++ b/substrate/client/db/src/utils.rs @@ -25,10 +25,14 @@ use log::{debug, info}; use crate::{Database, DatabaseSource, DbHash}; use codec::Decode; +use sc_client_api::blockchain::{BlockGap, BlockGapType}; use sp_database::Transaction; use sp_runtime::{ generic::BlockId, - traits::{Block as BlockT, Header as HeaderT, UniqueSaturatedFrom, UniqueSaturatedInto, Zero}, + traits::{ + Block as BlockT, Header as HeaderT, NumberFor, UniqueSaturatedFrom, UniqueSaturatedInto, + Zero, + }, }; use sp_trie::DBValue; @@ -38,6 +42,9 @@ pub const NUM_COLUMNS: u32 = 13; /// Meta column. The set of keys in the column is shared by full && light storages. pub const COLUMN_META: u32 = 0; +/// Current block gap version. +pub const BLOCK_GAP_CURRENT_VERSION: u32 = 1; + /// Keys of entries in COLUMN_META. pub mod meta_keys { /// Type of storage (full or light). @@ -50,6 +57,8 @@ pub mod meta_keys { pub const FINALIZED_STATE: &[u8; 6] = b"fstate"; /// Block gap. pub const BLOCK_GAP: &[u8; 3] = b"gap"; + /// Block gap version. + pub const BLOCK_GAP_VERSION: &[u8; 7] = b"gap_ver"; /// Genesis block hash. pub const GENESIS_HASH: &[u8; 3] = b"gen"; /// Leaves prefix list key. @@ -73,8 +82,8 @@ pub struct Meta { pub genesis_hash: H, /// Finalized state, if any pub finalized_state: Option<(H, N)>, - /// Block gap, start and end inclusive, if any. - pub block_gap: Option<(N, N)>, + /// Block gap, if any. + pub block_gap: Option>, } /// A block lookup key: used for canonical lookup from block number to hash @@ -197,7 +206,7 @@ fn open_database_at( open_kvdb_rocksdb::(path, db_type, create, *cache_size)?, DatabaseSource::Custom { db, require_create_flag } => { if *require_create_flag && !create { - return Err(OpenDbError::DoesNotExist) + return Err(OpenDbError::DoesNotExist); } db.clone() }, @@ -364,7 +373,7 @@ pub fn check_database_type( return Err(OpenDbError::UnexpectedDbType { expected: db_type, found: stored_type.to_owned(), - }) + }); }, None => { let mut transaction = Transaction::new(); @@ -515,9 +524,31 @@ where } else { None }; - let block_gap = db - .get(COLUMN_META, meta_keys::BLOCK_GAP) - .and_then(|d| Decode::decode(&mut d.as_slice()).ok()); + let block_gap = match db + .get(COLUMN_META, meta_keys::BLOCK_GAP_VERSION) + .and_then(|d| u32::decode(&mut d.as_slice()).ok()) + { + None => { + let old_block_gap: Option<(NumberFor, NumberFor)> = db + .get(COLUMN_META, meta_keys::BLOCK_GAP) + .and_then(|d| Decode::decode(&mut d.as_slice()).ok()); + + old_block_gap.map(|(start, end)| BlockGap { + start, + end, + gap_type: BlockGapType::MissingHeaderAndBody, + }) + }, + Some(version) => match version { + BLOCK_GAP_CURRENT_VERSION => db + .get(COLUMN_META, meta_keys::BLOCK_GAP) + .and_then(|d| Decode::decode(&mut d.as_slice()).ok()), + v => + return Err(sp_blockchain::Error::Backend(format!( + "Unsupported block gap DB version: {v}" + ))), + }, + }; debug!(target: "db", "block_gap={:?}", block_gap); Ok(Meta { diff --git a/substrate/client/executor/src/wasm_runtime.rs b/substrate/client/executor/src/wasm_runtime.rs index be8344ba79b7..77dfc09c8807 100644 --- a/substrate/client/executor/src/wasm_runtime.rs +++ b/substrate/client/executor/src/wasm_runtime.rs @@ -480,7 +480,7 @@ mod tests { let version = decode_version(&old_runtime_version.encode()).unwrap(); assert_eq!(1, version.transaction_version); - assert_eq!(0, version.state_version); + assert_eq!(0, version.system_version); } #[test] @@ -507,12 +507,12 @@ mod tests { impl_version: 1, apis: create_apis_vec!([(>::ID, 3)]), transaction_version: 3, - state_version: 4, + system_version: 4, }; let version = decode_version(&old_runtime_version.encode()).unwrap(); assert_eq!(3, version.transaction_version); - assert_eq!(0, version.state_version); + assert_eq!(0, version.system_version); let old_runtime_version = RuntimeVersion { spec_name: "test".into(), @@ -522,12 +522,12 @@ mod tests { impl_version: 1, apis: create_apis_vec!([(>::ID, 4)]), transaction_version: 3, - state_version: 4, + system_version: 4, }; let version = decode_version(&old_runtime_version.encode()).unwrap(); assert_eq!(3, version.transaction_version); - assert_eq!(4, version.state_version); + assert_eq!(4, version.system_version); } #[test] @@ -545,7 +545,7 @@ mod tests { impl_version: 100, apis: create_apis_vec!([(>::ID, 4)]), transaction_version: 100, - state_version: 1, + system_version: 1, }; let embedded = sp_version::embed::embed_runtime_version(&wasm, runtime_version.clone()) diff --git a/substrate/client/network/src/litep2p/discovery.rs b/substrate/client/network/src/litep2p/discovery.rs index 62d5f0fb6f06..bf2005df34d7 100644 --- a/substrate/client/network/src/litep2p/discovery.rs +++ b/substrate/client/network/src/litep2p/discovery.rs @@ -243,11 +243,9 @@ impl Discovery { ) -> (Self, PingConfig, IdentifyConfig, KademliaConfig, Option) { let (ping_config, ping_event_stream) = PingConfig::default(); let user_agent = format!("{} ({})", config.client_version, config.node_name); - let (identify_config, identify_event_stream) = IdentifyConfig::new( - "/substrate/1.0".to_string(), - Some(user_agent), - config.public_addresses.clone().into_iter().map(Into::into).collect(), - ); + + let (identify_config, identify_event_stream) = + IdentifyConfig::new("/substrate/1.0".to_string(), Some(user_agent)); let (mdns_config, mdns_event_stream) = match config.transport { crate::config::TransportConfig::Normal { enable_mdns, .. } => match enable_mdns { diff --git a/substrate/client/network/src/litep2p/mod.rs b/substrate/client/network/src/litep2p/mod.rs index 521f1a5fe0f7..277f0759729c 100644 --- a/substrate/client/network/src/litep2p/mod.rs +++ b/substrate/client/network/src/litep2p/mod.rs @@ -54,6 +54,7 @@ use libp2p::kad::{PeerRecord, Record as P2PRecord, RecordKey}; use litep2p::{ config::ConfigBuilder, crypto::ed25519::Keypair, + error::{DialError, NegotiationError}, executor::Executor, protocol::{ libp2p::{ @@ -64,15 +65,14 @@ use litep2p::{ }, transport::{ tcp::config::Config as TcpTransportConfig, - websocket::config::Config as WebSocketTransportConfig, Endpoint, + websocket::config::Config as WebSocketTransportConfig, ConnectionLimitsConfig, Endpoint, }, types::{ multiaddr::{Multiaddr, Protocol}, ConnectionId, }, - Error as Litep2pError, Litep2p, Litep2pEvent, ProtocolName as Litep2pProtocolName, + Litep2p, Litep2pEvent, ProtocolName as Litep2pProtocolName, }; -use parking_lot::RwLock; use prometheus_endpoint::Registry; use sc_client_api::BlockBackend; @@ -183,9 +183,6 @@ pub struct Litep2pNetworkBackend { /// Prometheus metrics. metrics: Option, - - /// External addresses. - external_addresses: Arc>>, } impl Litep2pNetworkBackend { @@ -557,6 +554,9 @@ impl NetworkBackend for Litep2pNetworkBac .with_libp2p_ping(ping_config) .with_libp2p_identify(identify_config) .with_libp2p_kademlia(kademlia_config) + .with_connection_limits(ConnectionLimitsConfig::default().max_incoming_connections( + Some(crate::MAX_CONNECTIONS_ESTABLISHED_INCOMING as usize), + )) .with_executor(executor); if let Some(config) = maybe_mdns_config { @@ -570,15 +570,22 @@ impl NetworkBackend for Litep2pNetworkBac let litep2p = Litep2p::new(config_builder.build()).map_err(|error| Error::Litep2p(error))?; - let external_addresses: Arc>> = Arc::new(RwLock::new( - HashSet::from_iter(network_config.public_addresses.iter().cloned().map(Into::into)), - )); litep2p.listen_addresses().for_each(|address| { log::debug!(target: LOG_TARGET, "listening on: {address}"); listen_addresses.write().insert(address.clone()); }); + let public_addresses = litep2p.public_addresses(); + for address in network_config.public_addresses.iter() { + if let Err(err) = public_addresses.add_address(address.clone().into()) { + log::warn!( + target: LOG_TARGET, + "failed to add public address {address:?}: {err:?}", + ); + } + } + let network_service = Arc::new(Litep2pNetworkService::new( local_peer_id, keypair.clone(), @@ -588,7 +595,7 @@ impl NetworkBackend for Litep2pNetworkBac block_announce_protocol.clone(), request_response_senders, Arc::clone(&listen_addresses), - Arc::clone(&external_addresses), + public_addresses, )); // register rest of the metrics now that `Litep2p` has been created @@ -614,7 +621,6 @@ impl NetworkBackend for Litep2pNetworkBac event_streams: out_events::OutChannels::new(None)?, peers: HashMap::new(), litep2p, - external_addresses, }) } @@ -917,10 +923,16 @@ impl NetworkBackend for Litep2pNetworkBac self.discovery.add_self_reported_address(peer, supported_protocols, listen_addresses).await; } Some(DiscoveryEvent::ExternalAddressDiscovered { address }) => { - let mut addresses = self.external_addresses.write(); - - if addresses.insert(address.clone()) { - log::info!(target: LOG_TARGET, "🔍 Discovered new external address for our node: {address}"); + match self.litep2p.public_addresses().add_address(address.clone().into()) { + Ok(inserted) => if inserted { + log::info!(target: LOG_TARGET, "🔍 Discovered new external address for our node: {address}"); + }, + Err(err) => { + log::warn!( + target: LOG_TARGET, + "🔍 Failed to add discovered external address {address:?}: {err:?}", + ); + }, } } Some(DiscoveryEvent::Ping { peer, rtt }) => { @@ -1006,20 +1018,40 @@ impl NetworkBackend for Litep2pNetworkBac } } Some(Litep2pEvent::DialFailure { address, error }) => { - log::trace!( + log::debug!( target: LOG_TARGET, "failed to dial peer at {address:?}: {error:?}", ); - let reason = match error { - Litep2pError::PeerIdMismatch(_, _) => "invalid-peer-id", - Litep2pError::Timeout | Litep2pError::TransportError(_) | - Litep2pError::IoError(_) | Litep2pError::WebSocket(_) => "transport-error", - _ => "other", - }; + if let Some(metrics) = &self.metrics { + let reason = match error { + DialError::Timeout => "timeout", + DialError::AddressError(_) => "invalid-address", + DialError::DnsError(_) => "cannot-resolve-dns", + DialError::NegotiationError(error) => match error { + NegotiationError::Timeout => "timeout", + NegotiationError::PeerIdMissing => "missing-peer-id", + NegotiationError::StateMismatch => "state-mismatch", + NegotiationError::PeerIdMismatch(_,_) => "peer-id-missmatch", + NegotiationError::MultistreamSelectError(_) => "multistream-select-error", + NegotiationError::SnowError(_) => "noise-error", + NegotiationError::ParseError(_) => "parse-error", + NegotiationError::IoError(_) => "io-error", + NegotiationError::WebSocket(_) => "webscoket-error", + } + }; + + metrics.pending_connections_errors_total.with_label_values(&[&reason]).inc(); + } + } + Some(Litep2pEvent::ListDialFailures { errors }) => { + log::debug!( + target: LOG_TARGET, + "failed to dial peer on multiple addresses {errors:?}", + ); if let Some(metrics) = &self.metrics { - metrics.pending_connections_errors_total.with_label_values(&[reason]).inc(); + metrics.pending_connections_errors_total.with_label_values(&["transport-errors"]).inc(); } } _ => {} diff --git a/substrate/client/network/src/litep2p/service.rs b/substrate/client/network/src/litep2p/service.rs index 67fc44e6bfe0..693217f5ad94 100644 --- a/substrate/client/network/src/litep2p/service.rs +++ b/substrate/client/network/src/litep2p/service.rs @@ -36,7 +36,10 @@ use crate::litep2p::Record; use codec::DecodeAll; use futures::{channel::oneshot, stream::BoxStream}; use libp2p::{identity::SigningError, kad::record::Key as KademliaKey}; -use litep2p::{crypto::ed25519::Keypair, types::multiaddr::Multiaddr as LiteP2pMultiaddr}; +use litep2p::{ + addresses::PublicAddresses, crypto::ed25519::Keypair, + types::multiaddr::Multiaddr as LiteP2pMultiaddr, +}; use parking_lot::RwLock; use sc_network_common::{ @@ -196,7 +199,7 @@ pub struct Litep2pNetworkService { listen_addresses: Arc>>, /// External addresses. - external_addresses: Arc>>, + external_addresses: PublicAddresses, } impl Litep2pNetworkService { @@ -210,7 +213,7 @@ impl Litep2pNetworkService { block_announce_protocol: ProtocolName, request_response_protocols: HashMap>, listen_addresses: Arc>>, - external_addresses: Arc>>, + external_addresses: PublicAddresses, ) -> Self { Self { local_peer_id, @@ -323,9 +326,8 @@ impl NetworkStatusProvider for Litep2pNetworkService { .collect(), external_addresses: self .external_addresses - .read() - .iter() - .cloned() + .get_addresses() + .into_iter() .map(|a| Multiaddr::from(a).into()) .collect(), connected_peers: HashMap::new(), @@ -491,7 +493,7 @@ impl NetworkEventStream for Litep2pNetworkService { impl NetworkStateInfo for Litep2pNetworkService { fn external_addresses(&self) -> Vec { - self.external_addresses.read().iter().cloned().map(Into::into).collect() + self.external_addresses.get_addresses().into_iter().map(Into::into).collect() } fn listen_addresses(&self) -> Vec { diff --git a/substrate/client/network/src/litep2p/shim/request_response/mod.rs b/substrate/client/network/src/litep2p/shim/request_response/mod.rs index a77acb464144..bfd7a60ef9fe 100644 --- a/substrate/client/network/src/litep2p/shim/request_response/mod.rs +++ b/substrate/client/network/src/litep2p/shim/request_response/mod.rs @@ -29,8 +29,10 @@ use crate::{ use futures::{channel::oneshot, future::BoxFuture, stream::FuturesUnordered, StreamExt}; use litep2p::{ + error::{ImmediateDialError, NegotiationError, SubstreamError}, protocol::request_response::{ - DialOptions, RequestResponseError, RequestResponseEvent, RequestResponseHandle, + DialOptions, RejectReason, RequestResponseError, RequestResponseEvent, + RequestResponseHandle, }, types::RequestId, }; @@ -372,7 +374,32 @@ impl RequestResponseProtocol { let status = match error { RequestResponseError::NotConnected => Some((RequestFailure::NotConnected, "not-connected")), - RequestResponseError::Rejected => Some((RequestFailure::Refused, "rejected")), + RequestResponseError::Rejected(reason) => { + let reason = match reason { + RejectReason::ConnectionClosed => "connection-closed", + RejectReason::SubstreamClosed => "substream-closed", + RejectReason::SubstreamOpenError(substream_error) => match substream_error { + SubstreamError::NegotiationError(NegotiationError::Timeout) => + "substream-timeout", + _ => "substream-open-error", + }, + RejectReason::DialFailed(None) => "dial-failed", + RejectReason::DialFailed(Some(ImmediateDialError::AlreadyConnected)) => + "dial-already-connected", + RejectReason::DialFailed(Some(ImmediateDialError::PeerIdMissing)) => + "dial-peerid-missing", + RejectReason::DialFailed(Some(ImmediateDialError::TriedToDialSelf)) => + "dial-tried-to-dial-self", + RejectReason::DialFailed(Some(ImmediateDialError::NoAddressAvailable)) => + "dial-no-address-available", + RejectReason::DialFailed(Some(ImmediateDialError::TaskClosed)) => + "dial-task-closed", + RejectReason::DialFailed(Some(ImmediateDialError::ChannelClogged)) => + "dial-channel-clogged", + }; + + Some((RequestFailure::Refused, reason)) + }, RequestResponseError::Timeout => Some((RequestFailure::Network(OutboundFailure::Timeout), "timeout")), RequestResponseError::Canceled => { diff --git a/substrate/client/network/src/litep2p/shim/request_response/tests.rs b/substrate/client/network/src/litep2p/shim/request_response/tests.rs index e3e82aa395c5..78b6ef0a481c 100644 --- a/substrate/client/network/src/litep2p/shim/request_response/tests.rs +++ b/substrate/client/network/src/litep2p/shim/request_response/tests.rs @@ -271,7 +271,12 @@ async fn too_many_inbound_requests() { match handle2.next().await { Some(RequestResponseEvent::RequestFailed { peer, error, .. }) => { assert_eq!(peer, peer1); - assert_eq!(error, RequestResponseError::Rejected); + assert_eq!( + error, + RequestResponseError::Rejected( + litep2p::protocol::request_response::RejectReason::SubstreamClosed + ) + ); }, event => panic!("inavlid event: {event:?}"), } diff --git a/substrate/client/network/sync/src/engine.rs b/substrate/client/network/sync/src/engine.rs index 4b6ccb085834..86c1a7abf744 100644 --- a/substrate/client/network/sync/src/engine.rs +++ b/substrate/client/network/sync/src/engine.rs @@ -33,7 +33,7 @@ use crate::{ }, strategy::{ warp::{EncodedProof, WarpProofRequest, WarpSyncConfig}, - StrategyKey, SyncingAction, SyncingConfig, SyncingStrategy, + PolkadotSyncingStrategy, StrategyKey, SyncingAction, SyncingConfig, SyncingStrategy, }, types::{ BadPeer, ExtendedPeerInfo, OpaqueStateRequest, OpaqueStateResponse, PeerRequest, SyncEvent, @@ -189,7 +189,7 @@ pub struct Peer { pub struct SyncingEngine { /// Syncing strategy. - strategy: SyncingStrategy, + strategy: PolkadotSyncingStrategy, /// Blockchain client. client: Arc, @@ -389,7 +389,8 @@ where ); // Initialize syncing strategy. - let strategy = SyncingStrategy::new(syncing_config, client.clone(), warp_sync_config)?; + let strategy = + PolkadotSyncingStrategy::new(syncing_config, client.clone(), warp_sync_config)?; let block_announce_protocol_name = block_announce_config.protocol_name().clone(); let (tx, service_rx) = tracing_unbounded("mpsc_chain_sync", 100_000); @@ -697,7 +698,7 @@ where number, ) }, - // Nothing to do, this is handled internally by `SyncingStrategy`. + // Nothing to do, this is handled internally by `PolkadotSyncingStrategy`. SyncingAction::Finished => {}, } } diff --git a/substrate/client/network/sync/src/strategy.rs b/substrate/client/network/sync/src/strategy.rs index ad3a9461c93b..f8d6976bbaa0 100644 --- a/substrate/client/network/sync/src/strategy.rs +++ b/substrate/client/network/sync/src/strategy.rs @@ -16,7 +16,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -//! [`SyncingStrategy`] is a proxy between [`crate::engine::SyncingEngine`] +//! [`PolkadotSyncingStrategy`] is a proxy between [`crate::engine::SyncingEngine`] //! and specific syncing algorithms. pub mod chain_sync; @@ -29,7 +29,7 @@ use crate::{ types::{BadPeer, OpaqueStateRequest, OpaqueStateResponse, SyncStatus}, LOG_TARGET, }; -use chain_sync::{ChainSync, ChainSyncAction, ChainSyncMode}; +use chain_sync::{ChainSync, ChainSyncMode}; use log::{debug, error, info}; use prometheus_endpoint::Registry; use sc_client_api::{BlockBackend, ProofProvider}; @@ -59,6 +59,108 @@ fn chain_sync_mode(sync_mode: SyncMode) -> ChainSyncMode { } } +/// Syncing strategy for syncing engine to use +pub trait SyncingStrategy: Send +where + B: BlockT, +{ + /// Notify syncing state machine that a new sync peer has connected. + fn add_peer(&mut self, peer_id: PeerId, best_hash: B::Hash, best_number: NumberFor); + + /// Notify that a sync peer has disconnected. + fn remove_peer(&mut self, peer_id: &PeerId); + + /// Submit a validated block announcement. + /// + /// Returns new best hash & best number of the peer if they are updated. + #[must_use] + fn on_validated_block_announce( + &mut self, + is_best: bool, + peer_id: PeerId, + announce: &BlockAnnounce, + ) -> Option<(B::Hash, NumberFor)>; + + /// Configure an explicit fork sync request in case external code has detected that there is a + /// stale fork missing. + /// + /// Note that this function should not be used for recent blocks. + /// Sync should be able to download all the recent forks normally. + /// + /// Passing empty `peers` set effectively removes the sync request. + fn set_sync_fork_request(&mut self, peers: Vec, hash: &B::Hash, number: NumberFor); + + /// Request extra justification. + fn request_justification(&mut self, hash: &B::Hash, number: NumberFor); + + /// Clear extra justification requests. + fn clear_justification_requests(&mut self); + + /// Report a justification import (successful or not). + fn on_justification_import(&mut self, hash: B::Hash, number: NumberFor, success: bool); + + /// Process block response. + fn on_block_response( + &mut self, + peer_id: PeerId, + key: StrategyKey, + request: BlockRequest, + blocks: Vec>, + ); + + /// Process state response. + fn on_state_response( + &mut self, + peer_id: PeerId, + key: StrategyKey, + response: OpaqueStateResponse, + ); + + /// Process warp proof response. + fn on_warp_proof_response( + &mut self, + peer_id: &PeerId, + key: StrategyKey, + response: EncodedProof, + ); + + /// A batch of blocks that have been processed, with or without errors. + /// + /// Call this when a batch of blocks that have been processed by the import queue, with or + /// without errors. + fn on_blocks_processed( + &mut self, + imported: usize, + count: usize, + results: Vec<(Result>, BlockImportError>, B::Hash)>, + ); + + /// Notify a syncing strategy that a block has been finalized. + fn on_block_finalized(&mut self, hash: &B::Hash, number: NumberFor); + + /// Inform sync about a new best imported block. + fn update_chain_info(&mut self, best_hash: &B::Hash, best_number: NumberFor); + + // Are we in major sync mode? + fn is_major_syncing(&self) -> bool; + + /// Get the number of peers known to the syncing strategy. + fn num_peers(&self) -> usize; + + /// Returns the current sync status. + fn status(&self) -> SyncStatus; + + /// Get the total number of downloaded blocks. + fn num_downloaded_blocks(&self) -> usize; + + /// Get an estimate of the number of parallel sync requests. + fn num_sync_requests(&self) -> usize; + + /// Get actions that should be performed by the owner on the strategy's behalf + #[must_use] + fn actions(&mut self) -> Result>, ClientError>; +} + /// Syncing configuration containing data for all strategies. #[derive(Clone, Debug)] pub struct SyncingConfig { @@ -104,7 +206,7 @@ pub enum SyncingAction { number: NumberFor, justifications: Justifications, }, - /// Strategy finished. Nothing to do, this is handled by `SyncingStrategy`. + /// Strategy finished. Nothing to do, this is handled by `PolkadotSyncingStrategy`. Finished, } @@ -140,26 +242,8 @@ impl From> for SyncingAction { } } -impl From> for SyncingAction { - fn from(action: ChainSyncAction) -> Self { - match action { - ChainSyncAction::SendBlockRequest { peer_id, request } => - SyncingAction::SendBlockRequest { peer_id, key: StrategyKey::ChainSync, request }, - ChainSyncAction::SendStateRequest { peer_id, request } => - SyncingAction::SendStateRequest { peer_id, key: StrategyKey::ChainSync, request }, - ChainSyncAction::CancelRequest { peer_id } => - SyncingAction::CancelRequest { peer_id, key: StrategyKey::ChainSync }, - ChainSyncAction::DropPeer(bad_peer) => SyncingAction::DropPeer(bad_peer), - ChainSyncAction::ImportBlocks { origin, blocks } => - SyncingAction::ImportBlocks { origin, blocks }, - ChainSyncAction::ImportJustifications { peer_id, hash, number, justifications } => - SyncingAction::ImportJustifications { peer_id, hash, number, justifications }, - } - } -} - -/// Proxy to specific syncing strategies. -pub struct SyncingStrategy { +/// Proxy to specific syncing strategies used in Polkadot. +pub struct PolkadotSyncingStrategy { /// Initial syncing configuration. config: SyncingConfig, /// Client used by syncing strategies. @@ -171,11 +255,11 @@ pub struct SyncingStrategy { /// `ChainSync` strategy.` chain_sync: Option>, /// Connected peers and their best blocks used to seed a new strategy when switching to it in - /// [`SyncingStrategy::proceed_to_next`]. + /// `PolkadotSyncingStrategy::proceed_to_next`. peer_best_blocks: HashMap)>, } -impl SyncingStrategy +impl SyncingStrategy for PolkadotSyncingStrategy where B: BlockT, Client: HeaderBackend @@ -186,46 +270,7 @@ where + Sync + 'static, { - /// Initialize a new syncing strategy. - pub fn new( - config: SyncingConfig, - client: Arc, - warp_sync_config: Option>, - ) -> Result { - if let SyncMode::Warp = config.mode { - let warp_sync_config = warp_sync_config - .expect("Warp sync configuration must be supplied in warp sync mode."); - let warp_sync = WarpSync::new(client.clone(), warp_sync_config); - Ok(Self { - config, - client, - warp: Some(warp_sync), - state: None, - chain_sync: None, - peer_best_blocks: Default::default(), - }) - } else { - let chain_sync = ChainSync::new( - chain_sync_mode(config.mode), - client.clone(), - config.max_parallel_downloads, - config.max_blocks_per_request, - config.metrics_registry.as_ref(), - std::iter::empty(), - )?; - Ok(Self { - config, - client, - warp: None, - state: None, - chain_sync: Some(chain_sync), - peer_best_blocks: Default::default(), - }) - } - } - - /// Notify that a new peer has connected. - pub fn add_peer(&mut self, peer_id: PeerId, best_hash: B::Hash, best_number: NumberFor) { + fn add_peer(&mut self, peer_id: PeerId, best_hash: B::Hash, best_number: NumberFor) { self.peer_best_blocks.insert(peer_id, (best_hash, best_number)); self.warp.as_mut().map(|s| s.add_peer(peer_id, best_hash, best_number)); @@ -233,8 +278,7 @@ where self.chain_sync.as_mut().map(|s| s.add_peer(peer_id, best_hash, best_number)); } - /// Notify that a peer has disconnected. - pub fn remove_peer(&mut self, peer_id: &PeerId) { + fn remove_peer(&mut self, peer_id: &PeerId) { self.warp.as_mut().map(|s| s.remove_peer(peer_id)); self.state.as_mut().map(|s| s.remove_peer(peer_id)); self.chain_sync.as_mut().map(|s| s.remove_peer(peer_id)); @@ -242,10 +286,7 @@ where self.peer_best_blocks.remove(peer_id); } - /// Submit a validated block announcement. - /// - /// Returns new best hash & best number of the peer if they are updated. - pub fn on_validated_block_announce( + fn on_validated_block_announce( &mut self, is_best: bool, peer_id: PeerId, @@ -278,46 +319,35 @@ where new_best } - /// Configure an explicit fork sync request in case external code has detected that there is a - /// stale fork missing. - pub fn set_sync_fork_request( - &mut self, - peers: Vec, - hash: &B::Hash, - number: NumberFor, - ) { + fn set_sync_fork_request(&mut self, peers: Vec, hash: &B::Hash, number: NumberFor) { // Fork requests are only handled by `ChainSync`. if let Some(ref mut chain_sync) = self.chain_sync { chain_sync.set_sync_fork_request(peers.clone(), hash, number); } } - /// Request extra justification. - pub fn request_justification(&mut self, hash: &B::Hash, number: NumberFor) { + fn request_justification(&mut self, hash: &B::Hash, number: NumberFor) { // Justifications can only be requested via `ChainSync`. if let Some(ref mut chain_sync) = self.chain_sync { chain_sync.request_justification(hash, number); } } - /// Clear extra justification requests. - pub fn clear_justification_requests(&mut self) { + fn clear_justification_requests(&mut self) { // Justification requests can only be cleared by `ChainSync`. if let Some(ref mut chain_sync) = self.chain_sync { chain_sync.clear_justification_requests(); } } - /// Report a justification import (successful or not). - pub fn on_justification_import(&mut self, hash: B::Hash, number: NumberFor, success: bool) { + fn on_justification_import(&mut self, hash: B::Hash, number: NumberFor, success: bool) { // Only `ChainSync` is interested in justification import. if let Some(ref mut chain_sync) = self.chain_sync { chain_sync.on_justification_import(hash, number, success); } } - /// Process block response. - pub fn on_block_response( + fn on_block_response( &mut self, peer_id: PeerId, key: StrategyKey, @@ -329,7 +359,7 @@ where } else if let (StrategyKey::ChainSync, Some(ref mut chain_sync)) = (key, &mut self.chain_sync) { - chain_sync.on_block_response(peer_id, request, blocks); + chain_sync.on_block_response(peer_id, key, request, blocks); } else { error!( target: LOG_TARGET, @@ -340,8 +370,7 @@ where } } - /// Process state response. - pub fn on_state_response( + fn on_state_response( &mut self, peer_id: PeerId, key: StrategyKey, @@ -352,7 +381,7 @@ where } else if let (StrategyKey::ChainSync, Some(ref mut chain_sync)) = (key, &mut self.chain_sync) { - chain_sync.on_state_response(peer_id, response); + chain_sync.on_state_response(peer_id, key, response); } else { error!( target: LOG_TARGET, @@ -363,8 +392,7 @@ where } } - /// Process warp proof response. - pub fn on_warp_proof_response( + fn on_warp_proof_response( &mut self, peer_id: &PeerId, key: StrategyKey, @@ -382,8 +410,7 @@ where } } - /// A batch of blocks have been processed, with or without errors. - pub fn on_blocks_processed( + fn on_blocks_processed( &mut self, imported: usize, count: usize, @@ -397,24 +424,21 @@ where } } - /// Notify a syncing strategy that a block has been finalized. - pub fn on_block_finalized(&mut self, hash: &B::Hash, number: NumberFor) { + fn on_block_finalized(&mut self, hash: &B::Hash, number: NumberFor) { // Only `ChainSync` is interested in block finalization notifications. if let Some(ref mut chain_sync) = self.chain_sync { chain_sync.on_block_finalized(hash, number); } } - /// Inform sync about a new best imported block. - pub fn update_chain_info(&mut self, best_hash: &B::Hash, best_number: NumberFor) { + fn update_chain_info(&mut self, best_hash: &B::Hash, best_number: NumberFor) { // This is relevant to `ChainSync` only. if let Some(ref mut chain_sync) = self.chain_sync { chain_sync.update_chain_info(best_hash, best_number); } } - // Are we in major sync mode? - pub fn is_major_syncing(&self) -> bool { + fn is_major_syncing(&self) -> bool { self.warp.is_some() || self.state.is_some() || match self.chain_sync { @@ -423,13 +447,11 @@ where } } - /// Get the number of peers known to the syncing strategy. - pub fn num_peers(&self) -> usize { + fn num_peers(&self) -> usize { self.peer_best_blocks.len() } - /// Returns the current sync status. - pub fn status(&self) -> SyncStatus { + fn status(&self) -> SyncStatus { // This function presumes that strategies are executed serially and must be refactored // once we have parallel strategies. if let Some(ref warp) = self.warp { @@ -443,21 +465,17 @@ where } } - /// Get the total number of downloaded blocks. - pub fn num_downloaded_blocks(&self) -> usize { + fn num_downloaded_blocks(&self) -> usize { self.chain_sync .as_ref() .map_or(0, |chain_sync| chain_sync.num_downloaded_blocks()) } - /// Get an estimate of the number of parallel sync requests. - pub fn num_sync_requests(&self) -> usize { + fn num_sync_requests(&self) -> usize { self.chain_sync.as_ref().map_or(0, |chain_sync| chain_sync.num_sync_requests()) } - /// Get actions that should be performed by the owner on the strategy's behalf - #[must_use] - pub fn actions(&mut self) -> Result>, ClientError> { + fn actions(&mut self) -> Result>, ClientError> { // This function presumes that strategies are executed serially and must be refactored once // we have parallel strategies. let actions: Vec<_> = if let Some(ref mut warp) = self.warp { @@ -465,7 +483,7 @@ where } else if let Some(ref mut state) = self.state { state.actions().map(Into::into).collect() } else if let Some(ref mut chain_sync) = self.chain_sync { - chain_sync.actions().map(Into::into).collect() + chain_sync.actions()? } else { unreachable!("At least one syncing strategy is always active; qed") }; @@ -476,6 +494,56 @@ where Ok(actions) } +} + +impl PolkadotSyncingStrategy +where + B: BlockT, + Client: HeaderBackend + + BlockBackend + + HeaderMetadata + + ProofProvider + + Send + + Sync + + 'static, +{ + /// Initialize a new syncing strategy. + pub fn new( + config: SyncingConfig, + client: Arc, + warp_sync_config: Option>, + ) -> Result { + if let SyncMode::Warp = config.mode { + let warp_sync_config = warp_sync_config + .expect("Warp sync configuration must be supplied in warp sync mode."); + let warp_sync = WarpSync::new(client.clone(), warp_sync_config); + Ok(Self { + config, + client, + warp: Some(warp_sync), + state: None, + chain_sync: None, + peer_best_blocks: Default::default(), + }) + } else { + let chain_sync = ChainSync::new( + chain_sync_mode(config.mode), + client.clone(), + config.max_parallel_downloads, + config.max_blocks_per_request, + config.metrics_registry.as_ref(), + std::iter::empty(), + )?; + Ok(Self { + config, + client, + warp: None, + state: None, + chain_sync: Some(chain_sync), + peer_best_blocks: Default::default(), + }) + } + } /// Proceed with the next strategy if the active one finished. pub fn proceed_to_next(&mut self) -> Result<(), ClientError> { diff --git a/substrate/client/network/sync/src/strategy/chain_sync.rs b/substrate/client/network/sync/src/strategy/chain_sync.rs index 21e474048625..a8ba5558d1bc 100644 --- a/substrate/client/network/sync/src/strategy/chain_sync.rs +++ b/substrate/client/network/sync/src/strategy/chain_sync.rs @@ -35,7 +35,8 @@ use crate::{ strategy::{ disconnected_peers::DisconnectedPeers, state_sync::{ImportResult, StateSync, StateSyncProvider}, - warp::{WarpSyncPhase, WarpSyncProgress}, + warp::{EncodedProof, WarpSyncPhase, WarpSyncProgress}, + StrategyKey, SyncingAction, SyncingStrategy, }, types::{BadPeer, OpaqueStateRequest, OpaqueStateResponse, SyncState, SyncStatus}, LOG_TARGET, @@ -44,7 +45,7 @@ use crate::{ use codec::Encode; use log::{debug, error, info, trace, warn}; use prometheus_endpoint::{register, Gauge, PrometheusError, Registry, U64}; -use sc_client_api::{BlockBackend, ProofProvider}; +use sc_client_api::{blockchain::BlockGap, BlockBackend, ProofProvider}; use sc_consensus::{BlockImportError, BlockImportStatus, IncomingBlock}; use sc_network_common::sync::message::{ BlockAnnounce, BlockAttributes, BlockData, BlockRequest, BlockResponse, Direction, FromBlock, @@ -197,28 +198,6 @@ struct GapSync { target: NumberFor, } -/// Action that the parent of [`ChainSync`] should perform after reporting a network or block event. -#[derive(Debug)] -pub enum ChainSyncAction { - /// Send block request to peer. Always implies dropping a stale block request to the same peer. - SendBlockRequest { peer_id: PeerId, request: BlockRequest }, - /// Send state request to peer. - SendStateRequest { peer_id: PeerId, request: OpaqueStateRequest }, - /// Drop stale request. - CancelRequest { peer_id: PeerId }, - /// Peer misbehaved. Disconnect, report it and cancel the block request to it. - DropPeer(BadPeer), - /// Import blocks. - ImportBlocks { origin: BlockOrigin, blocks: Vec> }, - /// Import justifications. - ImportJustifications { - peer_id: PeerId, - hash: B::Hash, - number: NumberFor, - justifications: Justifications, - }, -} - /// Sync operation mode. #[derive(Copy, Clone, Debug, Eq, PartialEq)] pub enum ChainSyncMode { @@ -233,50 +212,6 @@ pub enum ChainSyncMode { }, } -/// The main data structure which contains all the state for a chains -/// active syncing strategy. -pub struct ChainSync { - /// Chain client. - client: Arc, - /// The active peers that we are using to sync and their PeerSync status - peers: HashMap>, - disconnected_peers: DisconnectedPeers, - /// A `BlockCollection` of blocks that are being downloaded from peers - blocks: BlockCollection, - /// The best block number in our queue of blocks to import - best_queued_number: NumberFor, - /// The best block hash in our queue of blocks to import - best_queued_hash: B::Hash, - /// Current mode (full/light) - mode: ChainSyncMode, - /// Any extra justification requests. - extra_justifications: ExtraRequests, - /// A set of hashes of blocks that are being downloaded or have been - /// downloaded and are queued for import. - queue_blocks: HashSet, - /// Fork sync targets. - fork_targets: HashMap>, - /// A set of peers for which there might be potential block requests - allowed_requests: AllowedRequests, - /// Maximum number of peers to ask the same blocks in parallel. - max_parallel_downloads: u32, - /// Maximum blocks per request. - max_blocks_per_request: u32, - /// Total number of downloaded blocks. - downloaded_blocks: usize, - /// State sync in progress, if any. - state_sync: Option>, - /// Enable importing existing blocks. This is used used after the state download to - /// catch up to the latest state while re-importing blocks. - import_existing: bool, - /// Gap download process. - gap_sync: Option>, - /// Pending actions. - actions: Vec>, - /// Prometheus metrics. - metrics: Option, -} - /// All the data we have about a Peer that we are trying to sync with #[derive(Debug, Clone)] pub(crate) struct PeerSync { @@ -346,7 +281,59 @@ impl PeerSyncState { } } -impl ChainSync +/// The main data structure which contains all the state for a chains +/// active syncing strategy. +pub struct ChainSync { + /// Chain client. + client: Arc, + /// The active peers that we are using to sync and their PeerSync status + peers: HashMap>, + disconnected_peers: DisconnectedPeers, + /// A `BlockCollection` of blocks that are being downloaded from peers + blocks: BlockCollection, + /// The best block number in our queue of blocks to import + best_queued_number: NumberFor, + /// The best block hash in our queue of blocks to import + best_queued_hash: B::Hash, + /// Current mode (full/light) + mode: ChainSyncMode, + /// Any extra justification requests. + extra_justifications: ExtraRequests, + /// A set of hashes of blocks that are being downloaded or have been + /// downloaded and are queued for import. + queue_blocks: HashSet, + /// A pending attempt to start the state sync. + /// + /// The initiation of state sync may be deferred in cases where other conditions + /// are not yet met when the finalized block notification is received, such as + /// when `queue_blocks` is not empty or there are no peers. This field holds the + /// necessary information to attempt the state sync at a later point when + /// conditions are satisfied. + pending_state_sync_attempt: Option<(B::Hash, NumberFor, bool)>, + /// Fork sync targets. + fork_targets: HashMap>, + /// A set of peers for which there might be potential block requests + allowed_requests: AllowedRequests, + /// Maximum number of peers to ask the same blocks in parallel. + max_parallel_downloads: u32, + /// Maximum blocks per request. + max_blocks_per_request: u32, + /// Total number of downloaded blocks. + downloaded_blocks: usize, + /// State sync in progress, if any. + state_sync: Option>, + /// Enable importing existing blocks. This is used used after the state download to + /// catch up to the latest state while re-importing blocks. + import_existing: bool, + /// Gap download process. + gap_sync: Option>, + /// Pending actions. + actions: Vec>, + /// Prometheus metrics. + metrics: Option, +} + +impl SyncingStrategy for ChainSync where B: BlockT, Client: HeaderBackend @@ -357,119 +344,608 @@ where + Sync + 'static, { - /// Create a new instance. - pub fn new( - mode: ChainSyncMode, - client: Arc, - max_parallel_downloads: u32, - max_blocks_per_request: u32, - metrics_registry: Option<&Registry>, - initial_peers: impl Iterator)>, - ) -> Result { - let mut sync = Self { - client, - peers: HashMap::new(), - disconnected_peers: DisconnectedPeers::new(), - blocks: BlockCollection::new(), - best_queued_hash: Default::default(), - best_queued_number: Zero::zero(), - extra_justifications: ExtraRequests::new("justification", metrics_registry), - mode, - queue_blocks: Default::default(), - fork_targets: Default::default(), - allowed_requests: Default::default(), - max_parallel_downloads, - max_blocks_per_request, - downloaded_blocks: 0, - state_sync: None, - import_existing: false, - gap_sync: None, - actions: Vec::new(), - metrics: metrics_registry.and_then(|r| match Metrics::register(r) { - Ok(metrics) => Some(metrics), - Err(err) => { - log::error!( - target: LOG_TARGET, - "Failed to register `ChainSync` metrics {err:?}", - ); - None - }, + fn add_peer(&mut self, peer_id: PeerId, best_hash: B::Hash, best_number: NumberFor) { + match self.add_peer_inner(peer_id, best_hash, best_number) { + Ok(Some(request)) => self.actions.push(SyncingAction::SendBlockRequest { + peer_id, + key: StrategyKey::ChainSync, + request, }), - }; + Ok(None) => {}, + Err(bad_peer) => self.actions.push(SyncingAction::DropPeer(bad_peer)), + } + } - sync.reset_sync_start_point()?; - initial_peers.for_each(|(peer_id, best_hash, best_number)| { - sync.add_peer(peer_id, best_hash, best_number); + fn remove_peer(&mut self, peer_id: &PeerId) { + self.blocks.clear_peer_download(peer_id); + if let Some(gap_sync) = &mut self.gap_sync { + gap_sync.blocks.clear_peer_download(peer_id) + } + + if let Some(state) = self.peers.remove(peer_id) { + if !state.state.is_available() { + if let Some(bad_peer) = + self.disconnected_peers.on_disconnect_during_request(*peer_id) + { + self.actions.push(SyncingAction::DropPeer(bad_peer)); + } + } + } + + self.extra_justifications.peer_disconnected(peer_id); + self.allowed_requests.set_all(); + self.fork_targets.retain(|_, target| { + target.peers.remove(peer_id); + !target.peers.is_empty() }); + if let Some(metrics) = &self.metrics { + metrics.fork_targets.set(self.fork_targets.len().try_into().unwrap_or(u64::MAX)); + } - Ok(sync) + let blocks = self.ready_blocks(); + + if !blocks.is_empty() { + self.validate_and_queue_blocks(blocks, false); + } } - /// Returns the current sync status. - pub fn status(&self) -> SyncStatus { - let median_seen = self.median_seen(); - let best_seen_block = - median_seen.and_then(|median| (median > self.best_queued_number).then_some(median)); - let sync_state = if let Some(target) = median_seen { - // A chain is classified as downloading if the provided best block is - // more than `MAJOR_SYNC_BLOCKS` behind the best block or as importing - // if the same can be said about queued blocks. - let best_block = self.client.info().best_number; - if target > best_block && target - best_block > MAJOR_SYNC_BLOCKS.into() { - // If target is not queued, we're downloading, otherwise importing. - if target > self.best_queued_number { - SyncState::Downloading { target } - } else { - SyncState::Importing { target } - } - } else { - SyncState::Idle - } + fn on_validated_block_announce( + &mut self, + is_best: bool, + peer_id: PeerId, + announce: &BlockAnnounce, + ) -> Option<(B::Hash, NumberFor)> { + let number = *announce.header.number(); + let hash = announce.header.hash(); + let parent_status = + self.block_status(announce.header.parent_hash()).unwrap_or(BlockStatus::Unknown); + let known_parent = parent_status != BlockStatus::Unknown; + let ancient_parent = parent_status == BlockStatus::InChainPruned; + + let known = self.is_known(&hash); + let peer = if let Some(peer) = self.peers.get_mut(&peer_id) { + peer } else { - SyncState::Idle + error!(target: LOG_TARGET, "💔 Called `on_validated_block_announce` with a bad peer ID {peer_id}"); + return Some((hash, number)) }; - let warp_sync_progress = self.gap_sync.as_ref().map(|gap_sync| WarpSyncProgress { - phase: WarpSyncPhase::DownloadingBlocks(gap_sync.best_queued_number), - total_bytes: 0, + if let PeerSyncState::AncestorSearch { .. } = peer.state { + trace!(target: LOG_TARGET, "Peer {} is in the ancestor search state.", peer_id); + return None + } + + let peer_info = is_best.then(|| { + // update their best block + peer.best_number = number; + peer.best_hash = hash; + + (hash, number) }); - SyncStatus { - state: sync_state, - best_seen_block, - num_peers: self.peers.len() as u32, - queued_blocks: self.queue_blocks.len() as u32, - state_sync: self.state_sync.as_ref().map(|s| s.progress()), - warp_sync: warp_sync_progress, + // If the announced block is the best they have and is not ahead of us, our common number + // is either one further ahead or it's the one they just announced, if we know about it. + if is_best { + if known && self.best_queued_number >= number { + self.update_peer_common_number(&peer_id, number); + } else if announce.header.parent_hash() == &self.best_queued_hash || + known_parent && self.best_queued_number >= number + { + self.update_peer_common_number(&peer_id, number.saturating_sub(One::one())); + } } - } + self.allowed_requests.add(&peer_id); - /// Get an estimate of the number of parallel sync requests. - pub fn num_sync_requests(&self) -> usize { - self.fork_targets - .values() - .filter(|f| f.number <= self.best_queued_number) - .count() - } + // known block case + if known || self.is_already_downloading(&hash) { + trace!(target: LOG_TARGET, "Known block announce from {}: {}", peer_id, hash); + if let Some(target) = self.fork_targets.get_mut(&hash) { + target.peers.insert(peer_id); + } + return peer_info + } - /// Get the total number of downloaded blocks. - pub fn num_downloaded_blocks(&self) -> usize { - self.downloaded_blocks - } + if ancient_parent { + trace!( + target: LOG_TARGET, + "Ignored ancient block announced from {}: {} {:?}", + peer_id, + hash, + announce.header, + ); + return peer_info + } + + if self.status().state == SyncState::Idle { + trace!( + target: LOG_TARGET, + "Added sync target for block announced from {}: {} {:?}", + peer_id, + hash, + announce.summary(), + ); + self.fork_targets + .entry(hash) + .or_insert_with(|| { + if let Some(metrics) = &self.metrics { + metrics.fork_targets.inc(); + } + + ForkTarget { + number, + parent_hash: Some(*announce.header.parent_hash()), + peers: Default::default(), + } + }) + .peers + .insert(peer_id); + } + + peer_info + } + + // The implementation is similar to `on_validated_block_announce` with unknown parent hash. + fn set_sync_fork_request( + &mut self, + mut peers: Vec, + hash: &B::Hash, + number: NumberFor, + ) { + if peers.is_empty() { + peers = self + .peers + .iter() + // Only request blocks from peers who are ahead or on a par. + .filter(|(_, peer)| peer.best_number >= number) + .map(|(id, _)| *id) + .collect(); + + debug!( + target: LOG_TARGET, + "Explicit sync request for block {hash:?} with no peers specified. \ + Syncing from these peers {peers:?} instead.", + ); + } else { + debug!( + target: LOG_TARGET, + "Explicit sync request for block {hash:?} with {peers:?}", + ); + } + + if self.is_known(hash) { + debug!(target: LOG_TARGET, "Refusing to sync known hash {hash:?}"); + return + } + + trace!(target: LOG_TARGET, "Downloading requested old fork {hash:?}"); + for peer_id in &peers { + if let Some(peer) = self.peers.get_mut(peer_id) { + if let PeerSyncState::AncestorSearch { .. } = peer.state { + continue + } + + if number > peer.best_number { + peer.best_number = number; + peer.best_hash = *hash; + } + self.allowed_requests.add(peer_id); + } + } + + self.fork_targets + .entry(*hash) + .or_insert_with(|| { + if let Some(metrics) = &self.metrics { + metrics.fork_targets.inc(); + } + + ForkTarget { number, peers: Default::default(), parent_hash: None } + }) + .peers + .extend(peers); + } + + fn request_justification(&mut self, hash: &B::Hash, number: NumberFor) { + let client = &self.client; + self.extra_justifications + .schedule((*hash, number), |base, block| is_descendent_of(&**client, base, block)) + } + + fn clear_justification_requests(&mut self) { + self.extra_justifications.reset(); + } + + fn on_justification_import(&mut self, hash: B::Hash, number: NumberFor, success: bool) { + let finalization_result = if success { Ok((hash, number)) } else { Err(()) }; + self.extra_justifications + .try_finalize_root((hash, number), finalization_result, true); + self.allowed_requests.set_all(); + } + + fn on_block_response( + &mut self, + peer_id: PeerId, + key: StrategyKey, + request: BlockRequest, + blocks: Vec>, + ) { + if key != StrategyKey::ChainSync { + error!( + target: LOG_TARGET, + "`on_block_response()` called with unexpected key {key:?} for chain sync", + ); + debug_assert!(false); + } + let block_response = BlockResponse:: { id: request.id, blocks }; + + let blocks_range = || match ( + block_response + .blocks + .first() + .and_then(|b| b.header.as_ref().map(|h| h.number())), + block_response.blocks.last().and_then(|b| b.header.as_ref().map(|h| h.number())), + ) { + (Some(first), Some(last)) if first != last => format!(" ({}..{})", first, last), + (Some(first), Some(_)) => format!(" ({})", first), + _ => Default::default(), + }; + trace!( + target: LOG_TARGET, + "BlockResponse {} from {} with {} blocks {}", + block_response.id, + peer_id, + block_response.blocks.len(), + blocks_range(), + ); + + let res = if request.fields == BlockAttributes::JUSTIFICATION { + self.on_block_justification(peer_id, block_response) + } else { + self.on_block_data(&peer_id, Some(request), block_response) + }; + + if let Err(bad_peer) = res { + self.actions.push(SyncingAction::DropPeer(bad_peer)); + } + } + + fn on_state_response( + &mut self, + peer_id: PeerId, + key: StrategyKey, + response: OpaqueStateResponse, + ) { + if key != StrategyKey::ChainSync { + error!( + target: LOG_TARGET, + "`on_state_response()` called with unexpected key {key:?} for chain sync", + ); + debug_assert!(false); + } + if let Err(bad_peer) = self.on_state_data(&peer_id, response) { + self.actions.push(SyncingAction::DropPeer(bad_peer)); + } + } + + fn on_warp_proof_response( + &mut self, + _peer_id: &PeerId, + _key: StrategyKey, + _response: EncodedProof, + ) { + error!( + target: LOG_TARGET, + "`on_warp_proof_response()` called for chain sync strategy", + ); + debug_assert!(false); + } + + fn on_blocks_processed( + &mut self, + imported: usize, + count: usize, + results: Vec<(Result>, BlockImportError>, B::Hash)>, + ) { + trace!(target: LOG_TARGET, "Imported {imported} of {count}"); + + let mut has_error = false; + for (_, hash) in &results { + if self.queue_blocks.remove(hash) { + if let Some(metrics) = &self.metrics { + metrics.queued_blocks.dec(); + } + } + self.blocks.clear_queued(hash); + if let Some(gap_sync) = &mut self.gap_sync { + gap_sync.blocks.clear_queued(hash); + } + } + for (result, hash) in results { + if has_error { + break + } + + has_error |= result.is_err(); + + match result { + Ok(BlockImportStatus::ImportedKnown(number, peer_id)) => + if let Some(peer) = peer_id { + self.update_peer_common_number(&peer, number); + }, + Ok(BlockImportStatus::ImportedUnknown(number, aux, peer_id)) => { + if aux.clear_justification_requests { + trace!( + target: LOG_TARGET, + "Block imported clears all pending justification requests {number}: {hash:?}", + ); + self.clear_justification_requests(); + } + + if aux.needs_justification { + trace!( + target: LOG_TARGET, + "Block imported but requires justification {number}: {hash:?}", + ); + self.request_justification(&hash, number); + } + + if aux.bad_justification { + if let Some(ref peer) = peer_id { + warn!("💔 Sent block with bad justification to import"); + self.actions.push(SyncingAction::DropPeer(BadPeer( + *peer, + rep::BAD_JUSTIFICATION, + ))); + } + } + + if let Some(peer) = peer_id { + self.update_peer_common_number(&peer, number); + } + let state_sync_complete = + self.state_sync.as_ref().map_or(false, |s| s.target_hash() == hash); + if state_sync_complete { + info!( + target: LOG_TARGET, + "State sync is complete ({} MiB), restarting block sync.", + self.state_sync.as_ref().map_or(0, |s| s.progress().size / (1024 * 1024)), + ); + self.state_sync = None; + self.mode = ChainSyncMode::Full; + self.restart(); + } + let gap_sync_complete = + self.gap_sync.as_ref().map_or(false, |s| s.target == number); + if gap_sync_complete { + info!( + target: LOG_TARGET, + "Block history download is complete." + ); + self.gap_sync = None; + } + }, + Err(BlockImportError::IncompleteHeader(peer_id)) => + if let Some(peer) = peer_id { + warn!( + target: LOG_TARGET, + "💔 Peer sent block with incomplete header to import", + ); + self.actions + .push(SyncingAction::DropPeer(BadPeer(peer, rep::INCOMPLETE_HEADER))); + self.restart(); + }, + Err(BlockImportError::VerificationFailed(peer_id, e)) => { + let extra_message = peer_id + .map_or_else(|| "".into(), |peer| format!(" received from ({peer})")); + + warn!( + target: LOG_TARGET, + "💔 Verification failed for block {hash:?}{extra_message}: {e:?}", + ); + + if let Some(peer) = peer_id { + self.actions + .push(SyncingAction::DropPeer(BadPeer(peer, rep::VERIFICATION_FAIL))); + } + + self.restart(); + }, + Err(BlockImportError::BadBlock(peer_id)) => + if let Some(peer) = peer_id { + warn!( + target: LOG_TARGET, + "💔 Block {hash:?} received from peer {peer} has been blacklisted", + ); + self.actions.push(SyncingAction::DropPeer(BadPeer(peer, rep::BAD_BLOCK))); + }, + Err(BlockImportError::MissingState) => { + // This may happen if the chain we were requesting upon has been discarded + // in the meantime because other chain has been finalized. + // Don't mark it as bad as it still may be synced if explicitly requested. + trace!(target: LOG_TARGET, "Obsolete block {hash:?}"); + }, + e @ Err(BlockImportError::UnknownParent) | e @ Err(BlockImportError::Other(_)) => { + warn!(target: LOG_TARGET, "💔 Error importing block {hash:?}: {}", e.unwrap_err()); + self.state_sync = None; + self.restart(); + }, + Err(BlockImportError::Cancelled) => {}, + }; + } + + self.allowed_requests.set_all(); + } + + fn on_block_finalized(&mut self, hash: &B::Hash, number: NumberFor) { + let client = &self.client; + let r = self.extra_justifications.on_block_finalized(hash, number, |base, block| { + is_descendent_of(&**client, base, block) + }); + + if let ChainSyncMode::LightState { skip_proofs, .. } = &self.mode { + if self.state_sync.is_none() { + if !self.peers.is_empty() && self.queue_blocks.is_empty() { + self.attempt_state_sync(*hash, number, *skip_proofs); + } else { + self.pending_state_sync_attempt.replace((*hash, number, *skip_proofs)); + } + } + } + + if let Err(err) = r { + warn!( + target: LOG_TARGET, + "💔 Error cleaning up pending extra justification data requests: {err}", + ); + } + } + + fn update_chain_info(&mut self, best_hash: &B::Hash, best_number: NumberFor) { + self.on_block_queued(best_hash, best_number); + } + + fn is_major_syncing(&self) -> bool { + self.status().state.is_major_syncing() + } + + fn num_peers(&self) -> usize { + self.peers.len() + } + + fn status(&self) -> SyncStatus { + let median_seen = self.median_seen(); + let best_seen_block = + median_seen.and_then(|median| (median > self.best_queued_number).then_some(median)); + let sync_state = if let Some(target) = median_seen { + // A chain is classified as downloading if the provided best block is + // more than `MAJOR_SYNC_BLOCKS` behind the best block or as importing + // if the same can be said about queued blocks. + let best_block = self.client.info().best_number; + if target > best_block && target - best_block > MAJOR_SYNC_BLOCKS.into() { + // If target is not queued, we're downloading, otherwise importing. + if target > self.best_queued_number { + SyncState::Downloading { target } + } else { + SyncState::Importing { target } + } + } else { + SyncState::Idle + } + } else { + SyncState::Idle + }; + + let warp_sync_progress = self.gap_sync.as_ref().map(|gap_sync| WarpSyncProgress { + phase: WarpSyncPhase::DownloadingBlocks(gap_sync.best_queued_number), + total_bytes: 0, + }); + + SyncStatus { + state: sync_state, + best_seen_block, + num_peers: self.peers.len() as u32, + queued_blocks: self.queue_blocks.len() as u32, + state_sync: self.state_sync.as_ref().map(|s| s.progress()), + warp_sync: warp_sync_progress, + } + } + + fn num_downloaded_blocks(&self) -> usize { + self.downloaded_blocks + } + + fn num_sync_requests(&self) -> usize { + self.fork_targets + .values() + .filter(|f| f.number <= self.best_queued_number) + .count() + } + + fn actions(&mut self) -> Result>, ClientError> { + if !self.peers.is_empty() && self.queue_blocks.is_empty() { + if let Some((hash, number, skip_proofs)) = self.pending_state_sync_attempt.take() { + self.attempt_state_sync(hash, number, skip_proofs); + } + } + + let block_requests = self.block_requests().into_iter().map(|(peer_id, request)| { + SyncingAction::SendBlockRequest { peer_id, key: StrategyKey::ChainSync, request } + }); + self.actions.extend(block_requests); + + let justification_requests = + self.justification_requests().into_iter().map(|(peer_id, request)| { + SyncingAction::SendBlockRequest { peer_id, key: StrategyKey::ChainSync, request } + }); + self.actions.extend(justification_requests); + + let state_request = self.state_request().into_iter().map(|(peer_id, request)| { + SyncingAction::SendStateRequest { peer_id, key: StrategyKey::ChainSync, request } + }); + self.actions.extend(state_request); + + Ok(std::mem::take(&mut self.actions)) + } +} + +impl ChainSync +where + B: BlockT, + Client: HeaderBackend + + BlockBackend + + HeaderMetadata + + ProofProvider + + Send + + Sync + + 'static, +{ + /// Create a new instance. + pub fn new( + mode: ChainSyncMode, + client: Arc, + max_parallel_downloads: u32, + max_blocks_per_request: u32, + metrics_registry: Option<&Registry>, + initial_peers: impl Iterator)>, + ) -> Result { + let mut sync = Self { + client, + peers: HashMap::new(), + disconnected_peers: DisconnectedPeers::new(), + blocks: BlockCollection::new(), + best_queued_hash: Default::default(), + best_queued_number: Zero::zero(), + extra_justifications: ExtraRequests::new("justification", metrics_registry), + mode, + queue_blocks: Default::default(), + pending_state_sync_attempt: None, + fork_targets: Default::default(), + allowed_requests: Default::default(), + max_parallel_downloads, + max_blocks_per_request, + downloaded_blocks: 0, + state_sync: None, + import_existing: false, + gap_sync: None, + actions: Vec::new(), + metrics: metrics_registry.and_then(|r| match Metrics::register(r) { + Ok(metrics) => Some(metrics), + Err(err) => { + log::error!( + target: LOG_TARGET, + "Failed to register `ChainSync` metrics {err:?}", + ); + None + }, + }), + }; - /// Get the number of peers known to the syncing state machine. - pub fn num_peers(&self) -> usize { - self.peers.len() - } + sync.reset_sync_start_point()?; + initial_peers.for_each(|(peer_id, best_hash, best_number)| { + sync.add_peer(peer_id, best_hash, best_number); + }); - /// Notify syncing state machine that a new sync peer has connected. - pub fn add_peer(&mut self, peer_id: PeerId, best_hash: B::Hash, best_number: NumberFor) { - match self.add_peer_inner(peer_id, best_hash, best_number) { - Ok(Some(request)) => - self.actions.push(ChainSyncAction::SendBlockRequest { peer_id, request }), - Ok(None) => {}, - Err(bad_peer) => self.actions.push(ChainSyncAction::DropPeer(bad_peer)), - } + Ok(sync) } #[must_use] @@ -497,7 +973,7 @@ where "💔 New peer {} with unknown genesis hash {} ({}).", peer_id, best_hash, best_number, ); - return Err(BadPeer(peer_id, rep::GENESIS_MISMATCH)) + return Err(BadPeer(peer_id, rep::GENESIS_MISMATCH)); } // If there are more than `MAJOR_SYNC_BLOCKS` in the import queue then we have @@ -521,7 +997,7 @@ where state: PeerSyncState::Available, }, ); - return Ok(None) + return Ok(None); } // If we are at genesis, just start downloading. @@ -590,91 +1066,6 @@ where } } - /// Inform sync about a new best imported block. - pub fn update_chain_info(&mut self, best_hash: &B::Hash, best_number: NumberFor) { - self.on_block_queued(best_hash, best_number); - } - - /// Request extra justification. - pub fn request_justification(&mut self, hash: &B::Hash, number: NumberFor) { - let client = &self.client; - self.extra_justifications - .schedule((*hash, number), |base, block| is_descendent_of(&**client, base, block)) - } - - /// Clear extra justification requests. - pub fn clear_justification_requests(&mut self) { - self.extra_justifications.reset(); - } - - /// Configure an explicit fork sync request in case external code has detected that there is a - /// stale fork missing. - /// - /// Note that this function should not be used for recent blocks. - /// Sync should be able to download all the recent forks normally. - /// - /// Passing empty `peers` set effectively removes the sync request. - // The implementation is similar to `on_validated_block_announce` with unknown parent hash. - pub fn set_sync_fork_request( - &mut self, - mut peers: Vec, - hash: &B::Hash, - number: NumberFor, - ) { - if peers.is_empty() { - peers = self - .peers - .iter() - // Only request blocks from peers who are ahead or on a par. - .filter(|(_, peer)| peer.best_number >= number) - .map(|(id, _)| *id) - .collect(); - - debug!( - target: LOG_TARGET, - "Explicit sync request for block {hash:?} with no peers specified. \ - Syncing from these peers {peers:?} instead.", - ); - } else { - debug!( - target: LOG_TARGET, - "Explicit sync request for block {hash:?} with {peers:?}", - ); - } - - if self.is_known(hash) { - debug!(target: LOG_TARGET, "Refusing to sync known hash {hash:?}"); - return - } - - trace!(target: LOG_TARGET, "Downloading requested old fork {hash:?}"); - for peer_id in &peers { - if let Some(peer) = self.peers.get_mut(peer_id) { - if let PeerSyncState::AncestorSearch { .. } = peer.state { - continue - } - - if number > peer.best_number { - peer.best_number = number; - peer.best_hash = *hash; - } - self.allowed_requests.add(peer_id); - } - } - - self.fork_targets - .entry(*hash) - .or_insert_with(|| { - if let Some(metrics) = &self.metrics { - metrics.fork_targets.inc(); - } - - ForkTarget { number, peers: Default::default(), parent_hash: None } - }) - .peers - .extend(peers); - } - /// Submit a block response for processing. #[must_use] fn on_block_data( @@ -748,14 +1139,14 @@ where blocks } else { debug!(target: LOG_TARGET, "Unexpected gap block response from {peer_id}"); - return Err(BadPeer(*peer_id, rep::NO_BLOCK)) + return Err(BadPeer(*peer_id, rep::NO_BLOCK)); } }, PeerSyncState::DownloadingStale(_) => { peer.state = PeerSyncState::Available; if blocks.is_empty() { debug!(target: LOG_TARGET, "Empty block response from {peer_id}"); - return Err(BadPeer(*peer_id, rep::NO_BLOCK)) + return Err(BadPeer(*peer_id, rep::NO_BLOCK)); } validate_blocks::(&blocks, peer_id, Some(request))?; blocks @@ -796,14 +1187,14 @@ where target: LOG_TARGET, "Invalid response when searching for ancestor from {peer_id}", ); - return Err(BadPeer(*peer_id, rep::UNKNOWN_ANCESTOR)) + return Err(BadPeer(*peer_id, rep::UNKNOWN_ANCESTOR)); }, (_, Err(e)) => { info!( target: LOG_TARGET, "❌ Error answering legitimate blockchain query: {e}", ); - return Err(BadPeer(*peer_id, rep::BLOCKCHAIN_READ_ERROR)) + return Err(BadPeer(*peer_id, rep::BLOCKCHAIN_READ_ERROR)); }, }; if matching_hash.is_some() { @@ -837,7 +1228,7 @@ where target: LOG_TARGET, "Ancestry search: genesis mismatch for peer {peer_id}", ); - return Err(BadPeer(*peer_id, rep::GENESIS_MISMATCH)) + return Err(BadPeer(*peer_id, rep::GENESIS_MISMATCH)); } if let Some((next_state, next_num)) = handle_ancestor_search_state(state, *current, matching_hash.is_some()) @@ -848,11 +1239,12 @@ where state: next_state, }; let request = ancestry_request::(next_num); - self.actions.push(ChainSyncAction::SendBlockRequest { + self.actions.push(SyncingAction::SendBlockRequest { peer_id: *peer_id, + key: StrategyKey::ChainSync, request, }); - return Ok(()) + return Ok(()); } else { // Ancestry search is complete. Check if peer is on a stale fork unknown // to us and add it to sync targets if necessary. @@ -892,7 +1284,7 @@ where .insert(*peer_id); } peer.state = PeerSyncState::Available; - return Ok(()) + return Ok(()); } }, PeerSyncState::Available | @@ -925,7 +1317,7 @@ where } } else { // We don't know of this peer, so we also did not request anything from it. - return Err(BadPeer(*peer_id, rep::NOT_REQUESTED)) + return Err(BadPeer(*peer_id, rep::NOT_REQUESTED)); }; self.validate_and_queue_blocks(new_blocks, gap); @@ -942,240 +1334,59 @@ where ) -> Result<(), BadPeer> { let peer = if let Some(peer) = self.peers.get_mut(&peer_id) { peer - } else { - error!( - target: LOG_TARGET, - "💔 Called on_block_justification with a peer ID of an unknown peer", - ); - return Ok(()) - }; - - self.allowed_requests.add(&peer_id); - if let PeerSyncState::DownloadingJustification(hash) = peer.state { - peer.state = PeerSyncState::Available; - - // We only request one justification at a time - let justification = if let Some(block) = response.blocks.into_iter().next() { - if hash != block.hash { - warn!( - target: LOG_TARGET, - "💔 Invalid block justification provided by {}: requested: {:?} got: {:?}", - peer_id, - hash, - block.hash, - ); - return Err(BadPeer(peer_id, rep::BAD_JUSTIFICATION)) - } - - block - .justifications - .or_else(|| legacy_justification_mapping(block.justification)) - } else { - // we might have asked the peer for a justification on a block that we assumed it - // had but didn't (regardless of whether it had a justification for it or not). - trace!( - target: LOG_TARGET, - "Peer {peer_id:?} provided empty response for justification request {hash:?}", - ); - - None - }; - - if let Some((peer_id, hash, number, justifications)) = - self.extra_justifications.on_response(peer_id, justification) - { - self.actions.push(ChainSyncAction::ImportJustifications { - peer_id, - hash, - number, - justifications, - }); - return Ok(()) - } - } - - Ok(()) - } - - /// Report a justification import (successful or not). - pub fn on_justification_import(&mut self, hash: B::Hash, number: NumberFor, success: bool) { - let finalization_result = if success { Ok((hash, number)) } else { Err(()) }; - self.extra_justifications - .try_finalize_root((hash, number), finalization_result, true); - self.allowed_requests.set_all(); - } - - /// Notify sync that a block has been finalized. - pub fn on_block_finalized(&mut self, hash: &B::Hash, number: NumberFor) { - let client = &self.client; - let r = self.extra_justifications.on_block_finalized(hash, number, |base, block| { - is_descendent_of(&**client, base, block) - }); - - if let ChainSyncMode::LightState { skip_proofs, .. } = &self.mode { - if self.state_sync.is_none() && !self.peers.is_empty() && self.queue_blocks.is_empty() { - // Finalized a recent block. - let mut heads: Vec<_> = self.peers.values().map(|peer| peer.best_number).collect(); - heads.sort(); - let median = heads[heads.len() / 2]; - if number + STATE_SYNC_FINALITY_THRESHOLD.saturated_into() >= median { - if let Ok(Some(header)) = self.client.header(*hash) { - log::debug!( - target: LOG_TARGET, - "Starting state sync for #{number} ({hash})", - ); - self.state_sync = Some(StateSync::new( - self.client.clone(), - header, - None, - None, - *skip_proofs, - )); - self.allowed_requests.set_all(); - } - } - } - } - - if let Err(err) = r { - warn!( - target: LOG_TARGET, - "💔 Error cleaning up pending extra justification data requests: {err}", - ); - } - } - - /// Submit a validated block announcement. - /// - /// Returns new best hash & best number of the peer if they are updated. - #[must_use] - pub fn on_validated_block_announce( - &mut self, - is_best: bool, - peer_id: PeerId, - announce: &BlockAnnounce, - ) -> Option<(B::Hash, NumberFor)> { - let number = *announce.header.number(); - let hash = announce.header.hash(); - let parent_status = - self.block_status(announce.header.parent_hash()).unwrap_or(BlockStatus::Unknown); - let known_parent = parent_status != BlockStatus::Unknown; - let ancient_parent = parent_status == BlockStatus::InChainPruned; - - let known = self.is_known(&hash); - let peer = if let Some(peer) = self.peers.get_mut(&peer_id) { - peer - } else { - error!(target: LOG_TARGET, "💔 Called `on_validated_block_announce` with a bad peer ID {peer_id}"); - return Some((hash, number)) - }; - - if let PeerSyncState::AncestorSearch { .. } = peer.state { - trace!(target: LOG_TARGET, "Peer {} is in the ancestor search state.", peer_id); - return None - } - - let peer_info = is_best.then(|| { - // update their best block - peer.best_number = number; - peer.best_hash = hash; - - (hash, number) - }); - - // If the announced block is the best they have and is not ahead of us, our common number - // is either one further ahead or it's the one they just announced, if we know about it. - if is_best { - if known && self.best_queued_number >= number { - self.update_peer_common_number(&peer_id, number); - } else if announce.header.parent_hash() == &self.best_queued_hash || - known_parent && self.best_queued_number >= number - { - self.update_peer_common_number(&peer_id, number.saturating_sub(One::one())); - } - } - self.allowed_requests.add(&peer_id); - - // known block case - if known || self.is_already_downloading(&hash) { - trace!(target: LOG_TARGET, "Known block announce from {}: {}", peer_id, hash); - if let Some(target) = self.fork_targets.get_mut(&hash) { - target.peers.insert(peer_id); - } - return peer_info - } - - if ancient_parent { - trace!( - target: LOG_TARGET, - "Ignored ancient block announced from {}: {} {:?}", - peer_id, - hash, - announce.header, - ); - return peer_info - } - - if self.status().state == SyncState::Idle { - trace!( - target: LOG_TARGET, - "Added sync target for block announced from {}: {} {:?}", - peer_id, - hash, - announce.summary(), - ); - self.fork_targets - .entry(hash) - .or_insert_with(|| { - if let Some(metrics) = &self.metrics { - metrics.fork_targets.inc(); - } - - ForkTarget { - number, - parent_hash: Some(*announce.header.parent_hash()), - peers: Default::default(), - } - }) - .peers - .insert(peer_id); - } - - peer_info - } + } else { + error!( + target: LOG_TARGET, + "💔 Called on_block_justification with a peer ID of an unknown peer", + ); + return Ok(()); + }; - /// Notify that a sync peer has disconnected. - pub fn remove_peer(&mut self, peer_id: &PeerId) { - self.blocks.clear_peer_download(peer_id); - if let Some(gap_sync) = &mut self.gap_sync { - gap_sync.blocks.clear_peer_download(peer_id) - } + self.allowed_requests.add(&peer_id); + if let PeerSyncState::DownloadingJustification(hash) = peer.state { + peer.state = PeerSyncState::Available; - if let Some(state) = self.peers.remove(peer_id) { - if !state.state.is_available() { - if let Some(bad_peer) = - self.disconnected_peers.on_disconnect_during_request(*peer_id) - { - self.actions.push(ChainSyncAction::DropPeer(bad_peer)); + // We only request one justification at a time + let justification = if let Some(block) = response.blocks.into_iter().next() { + if hash != block.hash { + warn!( + target: LOG_TARGET, + "💔 Invalid block justification provided by {}: requested: {:?} got: {:?}", + peer_id, + hash, + block.hash, + ); + return Err(BadPeer(peer_id, rep::BAD_JUSTIFICATION)); } - } - } - self.extra_justifications.peer_disconnected(peer_id); - self.allowed_requests.set_all(); - self.fork_targets.retain(|_, target| { - target.peers.remove(peer_id); - !target.peers.is_empty() - }); - if let Some(metrics) = &self.metrics { - metrics.fork_targets.set(self.fork_targets.len().try_into().unwrap_or(u64::MAX)); - } + block + .justifications + .or_else(|| legacy_justification_mapping(block.justification)) + } else { + // we might have asked the peer for a justification on a block that we assumed it + // had but didn't (regardless of whether it had a justification for it or not). + trace!( + target: LOG_TARGET, + "Peer {peer_id:?} provided empty response for justification request {hash:?}", + ); - let blocks = self.ready_blocks(); + None + }; - if !blocks.is_empty() { - self.validate_and_queue_blocks(blocks, false); + if let Some((peer_id, hash, number, justifications)) = + self.extra_justifications.on_response(peer_id, justification) + { + self.actions.push(SyncingAction::ImportJustifications { + peer_id, + hash, + number, + justifications, + }); + return Ok(()); + } } + + Ok(()) } /// Returns the median seen block number. @@ -1249,7 +1460,7 @@ where .set(self.queue_blocks.len().try_into().unwrap_or(u64::MAX)); } - self.actions.push(ChainSyncAction::ImportBlocks { origin, blocks: new_blocks }) + self.actions.push(SyncingAction::ImportBlocks { origin, blocks: new_blocks }) } fn update_peer_common_number(&mut self, peer_id: &PeerId, new_common: NumberFor) { @@ -1281,7 +1492,7 @@ where for (n, peer) in self.peers.iter_mut() { if let PeerSyncState::AncestorSearch { .. } = peer.state { // Wait for ancestry search to complete first. - continue + continue; } let new_common_number = if peer.best_number >= number { number } else { peer.best_number }; @@ -1328,7 +1539,10 @@ where PeerSyncState::DownloadingGap(_) | PeerSyncState::DownloadingState => { // Cancel a request first, as `add_peer` may generate a new request. - self.actions.push(ChainSyncAction::CancelRequest { peer_id }); + self.actions.push(SyncingAction::CancelRequest { + peer_id, + key: StrategyKey::ChainSync, + }); self.add_peer(peer_id, peer_sync.best_hash, peer_sync.best_number); }, PeerSyncState::DownloadingJustification(_) => { @@ -1381,7 +1595,7 @@ where } } - if let Some((start, end)) = info.block_gap { + if let Some(BlockGap { start, end, .. }) = info.block_gap { debug!(target: LOG_TARGET, "Starting gap sync #{start} - #{end}"); self.gap_sync = Some(GapSync { best_queued_number: start - One::one(), @@ -1401,7 +1615,7 @@ where /// What is the status of the block corresponding to the given hash? fn block_status(&self, hash: &B::Hash) -> Result { if self.queue_blocks.contains(hash) { - return Ok(BlockStatus::Queued) + return Ok(BlockStatus::Queued); } self.client.block_status(*hash) } @@ -1444,53 +1658,6 @@ where .collect() } - /// Submit blocks received in a response. - pub fn on_block_response( - &mut self, - peer_id: PeerId, - request: BlockRequest, - blocks: Vec>, - ) { - let block_response = BlockResponse:: { id: request.id, blocks }; - - let blocks_range = || match ( - block_response - .blocks - .first() - .and_then(|b| b.header.as_ref().map(|h| h.number())), - block_response.blocks.last().and_then(|b| b.header.as_ref().map(|h| h.number())), - ) { - (Some(first), Some(last)) if first != last => format!(" ({}..{})", first, last), - (Some(first), Some(_)) => format!(" ({})", first), - _ => Default::default(), - }; - trace!( - target: LOG_TARGET, - "BlockResponse {} from {} with {} blocks {}", - block_response.id, - peer_id, - block_response.blocks.len(), - blocks_range(), - ); - - let res = if request.fields == BlockAttributes::JUSTIFICATION { - self.on_block_justification(peer_id, block_response) - } else { - self.on_block_data(&peer_id, Some(request), block_response) - }; - - if let Err(bad_peer) = res { - self.actions.push(ChainSyncAction::DropPeer(bad_peer)); - } - } - - /// Submit a state received in a response. - pub fn on_state_response(&mut self, peer_id: PeerId, response: OpaqueStateResponse) { - if let Err(bad_peer) = self.on_state_data(&peer_id, response) { - self.actions.push(ChainSyncAction::DropPeer(bad_peer)); - } - } - /// Get justification requests scheduled by sync to be sent out. fn justification_requests(&mut self) -> Vec<(PeerId, BlockRequest)> { let peers = &mut self.peers; @@ -1521,12 +1688,12 @@ where /// Get block requests scheduled by sync to be sent out. fn block_requests(&mut self) -> Vec<(PeerId, BlockRequest)> { if self.allowed_requests.is_empty() || self.state_sync.is_some() { - return Vec::new() + return Vec::new(); } if self.queue_blocks.len() > MAX_IMPORTING_BLOCKS { trace!(target: LOG_TARGET, "Too many blocks in the queue."); - return Vec::new() + return Vec::new(); } let is_major_syncing = self.status().state.is_major_syncing(); let attrs = self.required_block_attributes(); @@ -1550,7 +1717,7 @@ where !allowed_requests.contains(&id) || !disconnected_peers.is_peer_available(&id) { - return None + return None; } // If our best queued is more than `MAX_BLOCKS_TO_LOOK_BACKWARDS` blocks away from @@ -1648,17 +1815,17 @@ where /// Get a state request scheduled by sync to be sent out (if any). fn state_request(&mut self) -> Option<(PeerId, OpaqueStateRequest)> { if self.allowed_requests.is_empty() { - return None + return None; } if self.state_sync.is_some() && self.peers.iter().any(|(_, peer)| peer.state == PeerSyncState::DownloadingState) { // Only one pending state request is allowed. - return None + return None; } if let Some(sync) = &self.state_sync { if sync.is_complete() { - return None + return None; } for (id, peer) in self.peers.iter_mut() { @@ -1670,7 +1837,7 @@ where let request = sync.next_request(); trace!(target: LOG_TARGET, "New StateRequest for {}: {:?}", id, request); self.allowed_requests.clear(); - return Some((*id, OpaqueStateRequest(Box::new(request)))) + return Some((*id, OpaqueStateRequest(Box::new(request)))); } } } @@ -1709,7 +1876,7 @@ where sync.import(*response) } else { debug!(target: LOG_TARGET, "Ignored obsolete state response from {peer_id}"); - return Err(BadPeer(*peer_id, rep::NOT_REQUESTED)) + return Err(BadPeer(*peer_id, rep::NOT_REQUESTED)); }; match import_result { @@ -1728,7 +1895,7 @@ where state: Some(state), }; debug!(target: LOG_TARGET, "State download is complete. Import is queued"); - self.actions.push(ChainSyncAction::ImportBlocks { origin, blocks: vec![block] }); + self.actions.push(SyncingAction::ImportBlocks { origin, blocks: vec![block] }); Ok(()) }, ImportResult::Continue => Ok(()), @@ -1739,174 +1906,39 @@ where } } - /// A batch of blocks have been processed, with or without errors. - /// - /// Call this when a batch of blocks have been processed by the import - /// queue, with or without errors. - pub fn on_blocks_processed( + fn attempt_state_sync( &mut self, - imported: usize, - count: usize, - results: Vec<(Result>, BlockImportError>, B::Hash)>, + finalized_hash: B::Hash, + finalized_number: NumberFor, + skip_proofs: bool, ) { - trace!(target: LOG_TARGET, "Imported {imported} of {count}"); - - let mut has_error = false; - for (_, hash) in &results { - if self.queue_blocks.remove(hash) { - if let Some(metrics) = &self.metrics { - metrics.queued_blocks.dec(); - } - } - self.blocks.clear_queued(hash); - if let Some(gap_sync) = &mut self.gap_sync { - gap_sync.blocks.clear_queued(hash); - } - } - for (result, hash) in results { - if has_error { - break + let mut heads: Vec<_> = self.peers.values().map(|peer| peer.best_number).collect(); + heads.sort(); + let median = heads[heads.len() / 2]; + if finalized_number + STATE_SYNC_FINALITY_THRESHOLD.saturated_into() >= median { + if let Ok(Some(header)) = self.client.header(finalized_hash) { + log::debug!( + target: LOG_TARGET, + "Starting state sync for #{finalized_number} ({finalized_hash})", + ); + self.state_sync = + Some(StateSync::new(self.client.clone(), header, None, None, skip_proofs)); + self.allowed_requests.set_all(); + } else { + log::error!( + target: LOG_TARGET, + "Failed to start state sync: header for finalized block \ + #{finalized_number} ({finalized_hash}) is not available", + ); + debug_assert!(false); } - - has_error |= result.is_err(); - - match result { - Ok(BlockImportStatus::ImportedKnown(number, peer_id)) => - if let Some(peer) = peer_id { - self.update_peer_common_number(&peer, number); - }, - Ok(BlockImportStatus::ImportedUnknown(number, aux, peer_id)) => { - if aux.clear_justification_requests { - trace!( - target: LOG_TARGET, - "Block imported clears all pending justification requests {number}: {hash:?}", - ); - self.clear_justification_requests(); - } - - if aux.needs_justification { - trace!( - target: LOG_TARGET, - "Block imported but requires justification {number}: {hash:?}", - ); - self.request_justification(&hash, number); - } - - if aux.bad_justification { - if let Some(ref peer) = peer_id { - warn!("💔 Sent block with bad justification to import"); - self.actions.push(ChainSyncAction::DropPeer(BadPeer( - *peer, - rep::BAD_JUSTIFICATION, - ))); - } - } - - if let Some(peer) = peer_id { - self.update_peer_common_number(&peer, number); - } - let state_sync_complete = - self.state_sync.as_ref().map_or(false, |s| s.target_hash() == hash); - if state_sync_complete { - info!( - target: LOG_TARGET, - "State sync is complete ({} MiB), restarting block sync.", - self.state_sync.as_ref().map_or(0, |s| s.progress().size / (1024 * 1024)), - ); - self.state_sync = None; - self.mode = ChainSyncMode::Full; - self.restart(); - } - let gap_sync_complete = - self.gap_sync.as_ref().map_or(false, |s| s.target == number); - if gap_sync_complete { - info!( - target: LOG_TARGET, - "Block history download is complete." - ); - self.gap_sync = None; - } - }, - Err(BlockImportError::IncompleteHeader(peer_id)) => - if let Some(peer) = peer_id { - warn!( - target: LOG_TARGET, - "💔 Peer sent block with incomplete header to import", - ); - self.actions - .push(ChainSyncAction::DropPeer(BadPeer(peer, rep::INCOMPLETE_HEADER))); - self.restart(); - }, - Err(BlockImportError::VerificationFailed(peer_id, e)) => { - let extra_message = peer_id - .map_or_else(|| "".into(), |peer| format!(" received from ({peer})")); - - warn!( - target: LOG_TARGET, - "💔 Verification failed for block {hash:?}{extra_message}: {e:?}", - ); - - if let Some(peer) = peer_id { - self.actions - .push(ChainSyncAction::DropPeer(BadPeer(peer, rep::VERIFICATION_FAIL))); - } - - self.restart(); - }, - Err(BlockImportError::BadBlock(peer_id)) => - if let Some(peer) = peer_id { - warn!( - target: LOG_TARGET, - "💔 Block {hash:?} received from peer {peer} has been blacklisted", - ); - self.actions.push(ChainSyncAction::DropPeer(BadPeer(peer, rep::BAD_BLOCK))); - }, - Err(BlockImportError::MissingState) => { - // This may happen if the chain we were requesting upon has been discarded - // in the meantime because other chain has been finalized. - // Don't mark it as bad as it still may be synced if explicitly requested. - trace!(target: LOG_TARGET, "Obsolete block {hash:?}"); - }, - e @ Err(BlockImportError::UnknownParent) | e @ Err(BlockImportError::Other(_)) => { - warn!(target: LOG_TARGET, "💔 Error importing block {hash:?}: {}", e.unwrap_err()); - self.state_sync = None; - self.restart(); - }, - Err(BlockImportError::Cancelled) => {}, - }; } - - self.allowed_requests.set_all(); - } - - /// Get pending actions to perform. - #[must_use] - pub fn actions(&mut self) -> impl Iterator> { - let block_requests = self - .block_requests() - .into_iter() - .map(|(peer_id, request)| ChainSyncAction::SendBlockRequest { peer_id, request }); - self.actions.extend(block_requests); - - let justification_requests = self - .justification_requests() - .into_iter() - .map(|(peer_id, request)| ChainSyncAction::SendBlockRequest { peer_id, request }); - self.actions.extend(justification_requests); - - let state_request = self - .state_request() - .into_iter() - .map(|(peer_id, request)| ChainSyncAction::SendStateRequest { peer_id, request }); - self.actions.extend(state_request); - - std::mem::take(&mut self.actions).into_iter() } /// A version of `actions()` that doesn't schedule extra requests. For testing only. #[cfg(test)] #[must_use] - fn take_actions(&mut self) -> impl Iterator> { + fn take_actions(&mut self) -> impl Iterator> { std::mem::take(&mut self.actions).into_iter() } } @@ -1964,7 +1996,7 @@ fn handle_ancestor_search_state( if block_hash_match && next_distance_to_tip == One::one() { // We found the ancestor in the first step so there is no need to execute binary // search. - return None + return None; } if block_hash_match { let left = curr_block_num; @@ -1983,7 +2015,7 @@ fn handle_ancestor_search_state( }, AncestorSearchState::BinarySearch(mut left, mut right) => { if left >= curr_block_num { - return None + return None; } if block_hash_match { left = curr_block_num; @@ -2014,7 +2046,7 @@ fn peer_block_request( ) -> Option<(Range>, BlockRequest)> { if best_num >= peer.best_number { // Will be downloaded as alternative fork instead. - return None + return None; } else if peer.common_number < finalized { trace!( target: LOG_TARGET, @@ -2103,7 +2135,7 @@ fn fork_sync_request( hash, r.number, ); - return false + return false; } if check_block(hash) != BlockStatus::Unknown { trace!( @@ -2112,7 +2144,7 @@ fn fork_sync_request( hash, r.number, ); - return false + return false; } true }); @@ -2121,7 +2153,7 @@ fn fork_sync_request( } for (hash, r) in fork_targets { if !r.peers.contains(&id) { - continue + continue; } // Download the fork only if it is behind or not too far ahead our tip of the chain // Otherwise it should be downloaded in full sync mode. @@ -2148,7 +2180,7 @@ fn fork_sync_request( direction: Direction::Descending, max: Some(count), }, - )) + )); } else { trace!(target: LOG_TARGET, "Fork too far in the future: {:?} (#{})", hash, r.number); } @@ -2167,7 +2199,7 @@ where T: HeaderMetadata + ?Sized, { if base == block { - return Ok(false) + return Ok(false); } let ancestor = sp_blockchain::lowest_common_ancestor(client, *block, *base)?; @@ -2194,7 +2226,7 @@ pub fn validate_blocks( blocks.len(), ); - return Err(BadPeer(*peer_id, rep::NOT_REQUESTED)) + return Err(BadPeer(*peer_id, rep::NOT_REQUESTED)); } let block_header = @@ -2214,7 +2246,7 @@ pub fn validate_blocks( block_header, ); - return Err(BadPeer(*peer_id, rep::NOT_REQUESTED)) + return Err(BadPeer(*peer_id, rep::NOT_REQUESTED)); } if request.fields.contains(BlockAttributes::HEADER) && @@ -2225,7 +2257,7 @@ pub fn validate_blocks( "Missing requested header for a block in response from {peer_id}.", ); - return Err(BadPeer(*peer_id, rep::BAD_RESPONSE)) + return Err(BadPeer(*peer_id, rep::BAD_RESPONSE)); } if request.fields.contains(BlockAttributes::BODY) && blocks.iter().any(|b| b.body.is_none()) @@ -2235,7 +2267,7 @@ pub fn validate_blocks( "Missing requested body for a block in response from {peer_id}.", ); - return Err(BadPeer(*peer_id, rep::BAD_RESPONSE)) + return Err(BadPeer(*peer_id, rep::BAD_RESPONSE)); } } @@ -2250,7 +2282,7 @@ pub fn validate_blocks( b.hash, hash, ); - return Err(BadPeer(*peer_id, rep::BAD_BLOCK)) + return Err(BadPeer(*peer_id, rep::BAD_BLOCK)); } } if let (Some(header), Some(body)) = (&b.header, &b.body) { @@ -2268,7 +2300,7 @@ pub fn validate_blocks( expected, got, ); - return Err(BadPeer(*peer_id, rep::BAD_BLOCK)) + return Err(BadPeer(*peer_id, rep::BAD_BLOCK)); } } } diff --git a/substrate/client/network/sync/src/strategy/chain_sync/test.rs b/substrate/client/network/sync/src/strategy/chain_sync/test.rs index 39d0c8f8d4d6..59436f387db6 100644 --- a/substrate/client/network/sync/src/strategy/chain_sync/test.rs +++ b/substrate/client/network/sync/src/strategy/chain_sync/test.rs @@ -128,10 +128,10 @@ fn restart_doesnt_affect_peers_downloading_finality_data() { // we wil send block requests to these peers // for these blocks we don't know about - let actions = sync.actions().collect::>(); + let actions = sync.actions().unwrap(); assert_eq!(actions.len(), 2); assert!(actions.iter().all(|action| match action { - ChainSyncAction::SendBlockRequest { peer_id, .. } => + SyncingAction::SendBlockRequest { peer_id, .. } => peer_id == &peer_id1 || peer_id == &peer_id2, _ => false, })); @@ -162,15 +162,15 @@ fn restart_doesnt_affect_peers_downloading_finality_data() { sync.restart(); // which should make us cancel and send out again block requests to the first two peers - let actions = sync.actions().collect::>(); + let actions = sync.actions().unwrap(); assert_eq!(actions.len(), 4); let mut cancelled_first = HashSet::new(); assert!(actions.iter().all(|action| match action { - ChainSyncAction::CancelRequest { peer_id, .. } => { + SyncingAction::CancelRequest { peer_id, .. } => { cancelled_first.insert(peer_id); peer_id == &peer_id1 || peer_id == &peer_id2 }, - ChainSyncAction::SendBlockRequest { peer_id, .. } => { + SyncingAction::SendBlockRequest { peer_id, .. } => { assert!(cancelled_first.remove(peer_id)); peer_id == &peer_id1 || peer_id == &peer_id2 }, @@ -329,7 +329,7 @@ fn do_ancestor_search_when_common_block_to_best_queued_gap_is_to_big() { assert_eq!(actions.len(), 1); assert!(matches!( &actions[0], - ChainSyncAction::ImportBlocks{ origin: _, blocks } if blocks.len() == max_blocks_to_request as usize, + SyncingAction::ImportBlocks{ origin: _, blocks } if blocks.len() == max_blocks_to_request as usize, )); best_block_num += max_blocks_to_request as u32; @@ -476,7 +476,7 @@ fn can_sync_huge_fork() { } else { assert_eq!(actions.len(), 1); match &actions[0] { - ChainSyncAction::SendBlockRequest { peer_id: _, request } => request.clone(), + SyncingAction::SendBlockRequest { peer_id: _, request, key: _ } => request.clone(), action @ _ => panic!("Unexpected action: {action:?}"), } }; @@ -508,7 +508,7 @@ fn can_sync_huge_fork() { assert_eq!(actions.len(), 1); assert!(matches!( &actions[0], - ChainSyncAction::ImportBlocks{ origin: _, blocks } if blocks.len() == sync.max_blocks_per_request as usize + SyncingAction::ImportBlocks{ origin: _, blocks } if blocks.len() == sync.max_blocks_per_request as usize )); best_block_num += sync.max_blocks_per_request as u32; @@ -610,7 +610,7 @@ fn syncs_fork_without_duplicate_requests() { } else { assert_eq!(actions.len(), 1); match &actions[0] { - ChainSyncAction::SendBlockRequest { peer_id: _, request } => request.clone(), + SyncingAction::SendBlockRequest { peer_id: _, request, key: _ } => request.clone(), action @ _ => panic!("Unexpected action: {action:?}"), } }; @@ -646,7 +646,7 @@ fn syncs_fork_without_duplicate_requests() { assert_eq!(actions.len(), 1); assert!(matches!( &actions[0], - ChainSyncAction::ImportBlocks{ origin: _, blocks } if blocks.len() == max_blocks_to_request as usize + SyncingAction::ImportBlocks{ origin: _, blocks } if blocks.len() == max_blocks_to_request as usize )); best_block_num += max_blocks_to_request as u32; @@ -839,10 +839,10 @@ fn sync_restart_removes_block_but_not_justification_requests() { let actions = sync.take_actions().collect::>(); for action in actions.iter() { match action { - ChainSyncAction::CancelRequest { peer_id } => { + SyncingAction::CancelRequest { peer_id, key: _ } => { pending_responses.remove(&peer_id); }, - ChainSyncAction::SendBlockRequest { peer_id, .. } => { + SyncingAction::SendBlockRequest { peer_id, .. } => { // we drop obsolete response, but don't register a new request, it's checked in // the `assert!` below pending_responses.remove(&peer_id); @@ -852,7 +852,7 @@ fn sync_restart_removes_block_but_not_justification_requests() { } assert!(actions.iter().any(|action| { match action { - ChainSyncAction::SendBlockRequest { peer_id, .. } => peer_id == &peers[0], + SyncingAction::SendBlockRequest { peer_id, .. } => peer_id == &peers[0], _ => false, } })); @@ -943,7 +943,7 @@ fn request_across_forks() { assert_eq!(actions.len(), 1); assert!(matches!( &actions[0], - ChainSyncAction::ImportBlocks{ origin: _, blocks } if blocks.len() == 7_usize + SyncingAction::ImportBlocks{ origin: _, blocks } if blocks.len() == 7_usize )); assert_eq!(sync.best_queued_number, 107); assert_eq!(sync.best_queued_hash, block.hash()); @@ -988,7 +988,7 @@ fn request_across_forks() { assert_eq!(actions.len(), 1); assert!(matches!( &actions[0], - ChainSyncAction::ImportBlocks{ origin: _, blocks } if blocks.len() == 1_usize + SyncingAction::ImportBlocks{ origin: _, blocks } if blocks.len() == 1_usize )); assert!(sync.is_known(&block.header.parent_hash())); } diff --git a/substrate/client/rpc-servers/src/lib.rs b/substrate/client/rpc-servers/src/lib.rs index ca74c2371c25..0472a0a2f63c 100644 --- a/substrate/client/rpc-servers/src/lib.rs +++ b/substrate/client/rpc-servers/src/lib.rs @@ -23,11 +23,13 @@ pub mod middleware; pub mod utils; -use std::{error::Error as StdError, time::Duration}; +use std::{error::Error as StdError, net::SocketAddr, time::Duration}; use jsonrpsee::{ core::BoxError, - server::{serve_with_graceful_shutdown, stop_channel, ws, PingConfig, StopHandle}, + server::{ + serve_with_graceful_shutdown, stop_channel, ws, PingConfig, ServerHandle, StopHandle, + }, Methods, RpcModule, }; use middleware::NodeHealthProxyLayer; @@ -46,8 +48,38 @@ pub use utils::{RpcEndpoint, RpcMethods}; const MEGABYTE: u32 = 1024 * 1024; -/// Type alias for the JSON-RPC server. -pub type Server = jsonrpsee::server::ServerHandle; +/// Type to encapsulate the server handle and listening address. +pub struct Server { + /// Handle to the rpc server + handle: ServerHandle, + /// Listening address of the server + listen_addrs: Vec, +} + +impl Server { + /// Creates a new Server. + pub fn new(handle: ServerHandle, listen_addrs: Vec) -> Server { + Server { handle, listen_addrs } + } + + /// Returns the `jsonrpsee::server::ServerHandle` for this Server. Can be used to stop the + /// server. + pub fn handle(&self) -> &ServerHandle { + &self.handle + } + + /// The listen address for the running RPC service. + pub fn listen_addrs(&self) -> &[SocketAddr] { + &self.listen_addrs + } +} + +impl Drop for Server { + fn drop(&mut self) { + // This doesn't not wait for the server to be stopped but fires the signal. + let _ = self.handle.stop(); + } +} /// Trait for providing subscription IDs that can be cloned. pub trait SubscriptionIdProvider: @@ -273,5 +305,5 @@ where // This is to make it work with old scripts/utils that parse the logs. log::info!("Running JSON-RPC server: addr={}", format_listen_addrs(&local_addrs)); - Ok(server_handle) + Ok(Server::new(server_handle, local_addrs)) } diff --git a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs index a638a9c7ec54..30a01b93b315 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs @@ -346,7 +346,7 @@ async fn follow_with_runtime() { [\"0x37e397fc7c91f5e4\",2],[\"0xd2bc9897eed08f15\",3],[\"0x40fe3ad401f8959a\",6],\ [\"0xbc9d89904f5b923f\",1],[\"0xc6e9a76309f39b09\",2],[\"0xdd718d5cc53262d4\",1],\ [\"0xcbca25e39f142387\",2],[\"0xf78b278be53f454c\",2],[\"0xab3c0572291feb8b\",1],\ - [\"0xed99c5acb25eedf5\",3],[\"0xfbc577b9d747efd6\",1]],\"transactionVersion\":1,\"stateVersion\":1}"; + [\"0xed99c5acb25eedf5\",3],[\"0xfbc577b9d747efd6\",1]],\"transactionVersion\":1,\"systemVersion\":1}"; let runtime: RuntimeVersion = serde_json::from_str(runtime_str).unwrap(); diff --git a/substrate/client/rpc/src/state/tests.rs b/substrate/client/rpc/src/state/tests.rs index eef795070343..6b711f2425e9 100644 --- a/substrate/client/rpc/src/state/tests.rs +++ b/substrate/client/rpc/src/state/tests.rs @@ -476,7 +476,8 @@ async fn should_return_runtime_version() { [\"0x37e397fc7c91f5e4\",2],[\"0xd2bc9897eed08f15\",3],[\"0x40fe3ad401f8959a\",6],\ [\"0xbc9d89904f5b923f\",1],[\"0xc6e9a76309f39b09\",2],[\"0xdd718d5cc53262d4\",1],\ [\"0xcbca25e39f142387\",2],[\"0xf78b278be53f454c\",2],[\"0xab3c0572291feb8b\",1],\ - [\"0xed99c5acb25eedf5\",3],[\"0xfbc577b9d747efd6\",1]],\"transactionVersion\":1,\"stateVersion\":1}"; + [\"0xed99c5acb25eedf5\",3],[\"0xfbc577b9d747efd6\",1]],\"transactionVersion\":1,\"systemVersion\":1,\ + \"stateVersion\":1}"; let runtime_version = api.runtime_version(None.into()).unwrap(); let serialized = serde_json::to_string(&runtime_version).unwrap(); diff --git a/substrate/client/service/src/builder.rs b/substrate/client/service/src/builder.rs index 0dc28d1361cb..28a76847ac06 100644 --- a/substrate/client/service/src/builder.rs +++ b/substrate/client/service/src/builder.rs @@ -19,7 +19,7 @@ use crate::{ build_network_future, build_system_rpc_future, client::{Client, ClientConfig}, - config::{Configuration, ExecutorConfiguration, KeystoreConfig, PrometheusConfig}, + config::{Configuration, ExecutorConfiguration, KeystoreConfig, Multiaddr, PrometheusConfig}, error::Error, metrics::MetricsService, start_rpc_servers, BuildGenesisBlock, GenesisBlockBuilder, RpcHandlers, SpawnTaskHandle, @@ -43,6 +43,7 @@ use sc_executor::{ use sc_keystore::LocalKeystore; use sc_network::{ config::{FullNetworkConfiguration, SyncMode}, + multiaddr::Protocol, service::{ traits::{PeerStore, RequestResponseConfig}, NotificationMetrics, @@ -527,13 +528,24 @@ where gen_rpc_module, rpc_id_provider, )?; + + let listen_addrs = rpc_server_handle + .listen_addrs() + .into_iter() + .map(|socket_addr| { + let mut multiaddr: Multiaddr = socket_addr.ip().into(); + multiaddr.push(Protocol::Tcp(socket_addr.port())); + multiaddr + }) + .collect(); + let in_memory_rpc = { let mut module = gen_rpc_module()?; module.extensions_mut().insert(DenyUnsafe::No); module }; - let in_memory_rpc_handle = RpcHandlers::new(Arc::new(in_memory_rpc)); + let in_memory_rpc_handle = RpcHandlers::new(Arc::new(in_memory_rpc), listen_addrs); // Spawn informant task spawn_handle.spawn( diff --git a/substrate/client/service/src/client/client.rs b/substrate/client/service/src/client/client.rs index 22defd7c5514..ce5b92551bf2 100644 --- a/substrate/client/service/src/client/client.rs +++ b/substrate/client/service/src/client/client.rs @@ -513,6 +513,7 @@ where fork_choice, intermediates, import_existing, + create_gap, .. } = import_block; @@ -537,6 +538,8 @@ where *self.importing_block.write() = Some(hash); + operation.op.set_create_gap(create_gap); + let result = self.execute_and_import_block( operation, origin, @@ -604,9 +607,8 @@ where } let info = self.backend.blockchain().info(); - let gap_block = info - .block_gap - .map_or(false, |(start, _)| *import_headers.post().number() == start); + let gap_block = + info.block_gap.map_or(false, |gap| *import_headers.post().number() == gap.start); // the block is lower than our last finalized block so it must revert // finality, refusing import. diff --git a/substrate/client/service/src/lib.rs b/substrate/client/service/src/lib.rs index 251eef97be84..babb76f022f0 100644 --- a/substrate/client/service/src/lib.rs +++ b/substrate/client/service/src/lib.rs @@ -34,6 +34,7 @@ mod client; mod metrics; mod task_manager; +use crate::config::Multiaddr; use std::{ collections::HashMap, net::{Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6}, @@ -50,6 +51,7 @@ use sc_network::{ }; use sc_network_sync::SyncingService; use sc_network_types::PeerId; +use sc_rpc_server::Server; use sc_utils::mpsc::TracingUnboundedReceiver; use sp_blockchain::HeaderMetadata; use sp_consensus::SyncOracle; @@ -101,14 +103,22 @@ use tokio::runtime::Handle; const DEFAULT_PROTOCOL_ID: &str = "sup"; -/// RPC handlers that can perform RPC queries. +/// A running RPC service that can perform in-memory RPC queries. #[derive(Clone)] -pub struct RpcHandlers(Arc>); +pub struct RpcHandlers { + // This is legacy and may be removed at some point, it was for WASM stuff before smoldot was a + // thing. https://github.com/paritytech/polkadot-sdk/pull/5038#discussion_r1694971805 + rpc_module: Arc>, + + // This can be used to introspect the port the RPC server is listening on. SDK consumers are + // depending on this and it should be supported even if in-memory query support is removed. + listen_addresses: Vec, +} impl RpcHandlers { /// Create PRC handlers instance. - pub fn new(inner: Arc>) -> Self { - Self(inner) + pub fn new(rpc_module: Arc>, listen_addresses: Vec) -> Self { + Self { rpc_module, listen_addresses } } /// Starts an RPC query. @@ -130,12 +140,17 @@ impl RpcHandlers { // This limit is used to prevent panics and is large enough. const TOKIO_MPSC_MAX_SIZE: usize = tokio::sync::Semaphore::MAX_PERMITS; - self.0.raw_json_request(json_query, TOKIO_MPSC_MAX_SIZE).await + self.rpc_module.raw_json_request(json_query, TOKIO_MPSC_MAX_SIZE).await } /// Provides access to the underlying `RpcModule` pub fn handle(&self) -> Arc> { - self.0.clone() + self.rpc_module.clone() + } + + /// Provides access to listen addresses + pub fn listen_addresses(&self) -> &[Multiaddr] { + &self.listen_addresses[..] } } @@ -363,20 +378,6 @@ pub async fn build_system_rpc_future< debug!("`NetworkWorker` has terminated, shutting down the system RPC future."); } -// Wrapper for HTTP and WS servers that makes sure they are properly shut down. -mod waiting { - pub struct Server(pub Option); - - impl Drop for Server { - fn drop(&mut self) { - if let Some(server) = self.0.take() { - // This doesn't not wait for the server to be stopped but fires the signal. - let _ = server.stop(); - } - } - } -} - /// Starts RPC servers. pub fn start_rpc_servers( rpc_configuration: &RpcConfiguration, @@ -384,7 +385,7 @@ pub fn start_rpc_servers( tokio_handle: &Handle, gen_rpc_module: R, rpc_id_provider: Option>, -) -> Result, error::Error> +) -> Result where R: Fn() -> Result, Error>, { @@ -451,7 +452,7 @@ where match tokio::task::block_in_place(|| { tokio_handle.block_on(sc_rpc_server::start_server(server_config)) }) { - Ok(server) => Ok(Box::new(waiting::Server(Some(server)))), + Ok(server) => Ok(server), Err(e) => Err(Error::Application(e)), } } diff --git a/substrate/client/tracing/src/logging/mod.rs b/substrate/client/tracing/src/logging/mod.rs index 74ce5f90ede9..33fec2d41881 100644 --- a/substrate/client/tracing/src/logging/mod.rs +++ b/substrate/client/tracing/src/logging/mod.rs @@ -138,6 +138,9 @@ where .add_directive( parse_default_directive("trust_dns_proto=off").expect("provided directive is valid"), ) + .add_directive( + parse_default_directive("hickory_proto=off").expect("provided directive is valid"), + ) .add_directive( parse_default_directive("libp2p_mdns::behaviour::iface=off") .expect("provided directive is valid"), diff --git a/substrate/frame/babe/src/mock.rs b/substrate/frame/babe/src/mock.rs index 912cb3e27cd5..4e4052b2b566 100644 --- a/substrate/frame/babe/src/mock.rs +++ b/substrate/frame/babe/src/mock.rs @@ -25,12 +25,12 @@ use frame_election_provider_support::{ }; use frame_support::{ derive_impl, parameter_types, - traits::{ConstU128, ConstU32, ConstU64, KeyOwnerProofSystem, OnInitialize}, + traits::{ConstU128, ConstU32, ConstU64, OnInitialize}, }; use pallet_session::historical as pallet_session_historical; use sp_consensus_babe::{AuthorityId, AuthorityPair, Randomness, Slot, VrfSignature}; use sp_core::{ - crypto::{KeyTypeId, Pair, VrfSecret}, + crypto::{Pair, VrfSecret}, U256, }; use sp_io; @@ -182,7 +182,7 @@ impl Config for Test { type WeightInfo = (); type MaxAuthorities = ConstU32<10>; type MaxNominators = ConstU32<100>; - type KeyOwnerProof = >::Proof; + type KeyOwnerProof = sp_session::MembershipProof; type EquivocationReportSystem = super::EquivocationReportSystem; } diff --git a/substrate/frame/balances/src/weights.rs b/substrate/frame/balances/src/weights.rs index e82c97160efc..55decef273f6 100644 --- a/substrate/frame/balances/src/weights.rs +++ b/substrate/frame/balances/src/weights.rs @@ -17,27 +17,27 @@ //! Autogenerated weights for `pallet_balances` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-05-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 42.0.0 +//! DATE: 2024-09-04, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-unxyhko3-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` +//! HOSTNAME: `8f4ffe8f7785`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: `1024` // Executed Command: -// target/production/substrate-node +// frame-omni-bencher +// v1 // benchmark // pallet -// --steps=50 -// --repeat=20 // --extrinsic=* +// --runtime=target/release/wbuild/kitchensink-runtime/kitchensink_runtime.wasm +// --pallet=pallet_balances +// --header=/__w/polkadot-sdk/polkadot-sdk/substrate/HEADER-APACHE2 +// --output=/__w/polkadot-sdk/polkadot-sdk/substrate/frame/balances/src/weights.rs // --wasm-execution=compiled +// --steps=50 +// --repeat=20 // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_balances -// --chain=dev -// --header=./substrate/HEADER-APACHE2 -// --output=./substrate/frame/balances/src/weights.rs -// --template=./substrate/.maintain/frame-weight-template.hbs +// --template=substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -71,8 +71,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `3593` - // Minimum execution time: 47_552_000 picoseconds. - Weight::from_parts(48_363_000, 3593) + // Minimum execution time: 75_624_000 picoseconds. + Weight::from_parts(77_290_000, 3593) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -82,8 +82,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `3593` - // Minimum execution time: 37_565_000 picoseconds. - Weight::from_parts(38_159_000, 3593) + // Minimum execution time: 60_398_000 picoseconds. + Weight::from_parts(61_290_000, 3593) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -91,10 +91,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn force_set_balance_creating() -> Weight { // Proof Size summary in bytes: - // Measured: `174` + // Measured: `52` // Estimated: `3593` - // Minimum execution time: 14_147_000 picoseconds. - Weight::from_parts(14_687_000, 3593) + // Minimum execution time: 18_963_000 picoseconds. + Weight::from_parts(19_802_000, 3593) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -102,10 +102,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn force_set_balance_killing() -> Weight { // Proof Size summary in bytes: - // Measured: `174` + // Measured: `52` // Estimated: `3593` - // Minimum execution time: 19_188_000 picoseconds. - Weight::from_parts(19_929_000, 3593) + // Minimum execution time: 30_517_000 picoseconds. + Weight::from_parts(31_293_000, 3593) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -113,10 +113,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn force_transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `103` + // Measured: `52` // Estimated: `6196` - // Minimum execution time: 48_903_000 picoseconds. - Weight::from_parts(49_944_000, 6196) + // Minimum execution time: 77_017_000 picoseconds. + Weight::from_parts(78_184_000, 6196) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -126,8 +126,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `3593` - // Minimum execution time: 46_573_000 picoseconds. - Weight::from_parts(47_385_000, 3593) + // Minimum execution time: 75_600_000 picoseconds. + Weight::from_parts(76_817_000, 3593) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -135,10 +135,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn force_unreserve() -> Weight { // Proof Size summary in bytes: - // Measured: `174` + // Measured: `52` // Estimated: `3593` - // Minimum execution time: 16_750_000 picoseconds. - Weight::from_parts(17_233_000, 3593) + // Minimum execution time: 24_503_000 picoseconds. + Weight::from_parts(25_026_000, 3593) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -149,10 +149,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0 + u * (135 Âą0)` // Estimated: `990 + u * (2603 Âą0)` - // Minimum execution time: 16_333_000 picoseconds. - Weight::from_parts(16_588_000, 990) - // Standard Error: 12_254 - .saturating_add(Weight::from_parts(13_973_659, 0).saturating_mul(u.into())) + // Minimum execution time: 24_077_000 picoseconds. + Weight::from_parts(24_339_000, 990) + // Standard Error: 18_669 + .saturating_add(Weight::from_parts(21_570_294, 0).saturating_mul(u.into())) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(u.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(u.into()))) .saturating_add(Weight::from_parts(0, 2603).saturating_mul(u.into())) @@ -161,22 +161,22 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_265_000 picoseconds. - Weight::from_parts(6_594_000, 0) + // Minimum execution time: 8_070_000 picoseconds. + Weight::from_parts(8_727_000, 0) } fn burn_allow_death() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 30_151_000 picoseconds. - Weight::from_parts(30_968_000, 0) + // Minimum execution time: 46_978_000 picoseconds. + Weight::from_parts(47_917_000, 0) } fn burn_keep_alive() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 20_055_000 picoseconds. - Weight::from_parts(20_711_000, 0) + // Minimum execution time: 31_141_000 picoseconds. + Weight::from_parts(31_917_000, 0) } } @@ -188,8 +188,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `3593` - // Minimum execution time: 47_552_000 picoseconds. - Weight::from_parts(48_363_000, 3593) + // Minimum execution time: 75_624_000 picoseconds. + Weight::from_parts(77_290_000, 3593) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -199,8 +199,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `3593` - // Minimum execution time: 37_565_000 picoseconds. - Weight::from_parts(38_159_000, 3593) + // Minimum execution time: 60_398_000 picoseconds. + Weight::from_parts(61_290_000, 3593) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -208,10 +208,10 @@ impl WeightInfo for () { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn force_set_balance_creating() -> Weight { // Proof Size summary in bytes: - // Measured: `174` + // Measured: `52` // Estimated: `3593` - // Minimum execution time: 14_147_000 picoseconds. - Weight::from_parts(14_687_000, 3593) + // Minimum execution time: 18_963_000 picoseconds. + Weight::from_parts(19_802_000, 3593) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -219,10 +219,10 @@ impl WeightInfo for () { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn force_set_balance_killing() -> Weight { // Proof Size summary in bytes: - // Measured: `174` + // Measured: `52` // Estimated: `3593` - // Minimum execution time: 19_188_000 picoseconds. - Weight::from_parts(19_929_000, 3593) + // Minimum execution time: 30_517_000 picoseconds. + Weight::from_parts(31_293_000, 3593) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -230,10 +230,10 @@ impl WeightInfo for () { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn force_transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `103` + // Measured: `52` // Estimated: `6196` - // Minimum execution time: 48_903_000 picoseconds. - Weight::from_parts(49_944_000, 6196) + // Minimum execution time: 77_017_000 picoseconds. + Weight::from_parts(78_184_000, 6196) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -243,8 +243,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `3593` - // Minimum execution time: 46_573_000 picoseconds. - Weight::from_parts(47_385_000, 3593) + // Minimum execution time: 75_600_000 picoseconds. + Weight::from_parts(76_817_000, 3593) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -252,10 +252,10 @@ impl WeightInfo for () { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn force_unreserve() -> Weight { // Proof Size summary in bytes: - // Measured: `174` + // Measured: `52` // Estimated: `3593` - // Minimum execution time: 16_750_000 picoseconds. - Weight::from_parts(17_233_000, 3593) + // Minimum execution time: 24_503_000 picoseconds. + Weight::from_parts(25_026_000, 3593) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -266,10 +266,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0 + u * (135 Âą0)` // Estimated: `990 + u * (2603 Âą0)` - // Minimum execution time: 16_333_000 picoseconds. - Weight::from_parts(16_588_000, 990) - // Standard Error: 12_254 - .saturating_add(Weight::from_parts(13_973_659, 0).saturating_mul(u.into())) + // Minimum execution time: 24_077_000 picoseconds. + Weight::from_parts(24_339_000, 990) + // Standard Error: 18_669 + .saturating_add(Weight::from_parts(21_570_294, 0).saturating_mul(u.into())) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(u.into()))) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(u.into()))) .saturating_add(Weight::from_parts(0, 2603).saturating_mul(u.into())) @@ -278,21 +278,21 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_265_000 picoseconds. - Weight::from_parts(6_594_000, 0) + // Minimum execution time: 8_070_000 picoseconds. + Weight::from_parts(8_727_000, 0) } fn burn_allow_death() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 30_151_000 picoseconds. - Weight::from_parts(30_968_000, 0) + // Minimum execution time: 46_978_000 picoseconds. + Weight::from_parts(47_917_000, 0) } fn burn_keep_alive() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 20_055_000 picoseconds. - Weight::from_parts(20_711_000, 0) + // Minimum execution time: 31_141_000 picoseconds. + Weight::from_parts(31_917_000, 0) } } diff --git a/substrate/frame/grandpa/src/mock.rs b/substrate/frame/grandpa/src/mock.rs index ae230a0209a7..caac4107cfb7 100644 --- a/substrate/frame/grandpa/src/mock.rs +++ b/substrate/frame/grandpa/src/mock.rs @@ -28,11 +28,11 @@ use frame_election_provider_support::{ }; use frame_support::{ derive_impl, parameter_types, - traits::{ConstU128, ConstU32, ConstU64, KeyOwnerProofSystem, OnFinalize, OnInitialize}, + traits::{ConstU128, ConstU32, ConstU64, OnFinalize, OnInitialize}, }; use pallet_session::historical as pallet_session_historical; use sp_consensus_grandpa::{RoundNumber, SetId, GRANDPA_ENGINE_ID}; -use sp_core::{crypto::KeyTypeId, H256}; +use sp_core::H256; use sp_keyring::Ed25519Keyring; use sp_runtime::{ curve::PiecewiseLinear, @@ -186,7 +186,7 @@ impl Config for Test { type MaxAuthorities = ConstU32<100>; type MaxNominators = ConstU32<1000>; type MaxSetIdSessionEntries = MaxSetIdSessionEntries; - type KeyOwnerProof = >::Proof; + type KeyOwnerProof = sp_session::MembershipProof; type EquivocationReportSystem = super::EquivocationReportSystem; } diff --git a/substrate/frame/revive/Cargo.toml b/substrate/frame/revive/Cargo.toml index 6b7542e89202..667328ac2d0d 100644 --- a/substrate/frame/revive/Cargo.toml +++ b/substrate/frame/revive/Cargo.toml @@ -3,7 +3,6 @@ name = "pallet-revive" version = "0.1.0" authors.workspace = true edition.workspace = true -build = "build.rs" license = "Apache-2.0" homepage.workspace = true repository.workspace = true diff --git a/substrate/frame/revive/build.rs b/substrate/frame/revive/build.rs deleted file mode 100644 index ca8e62df6047..000000000000 --- a/substrate/frame/revive/build.rs +++ /dev/null @@ -1,78 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use std::io::Write; - -/// We start with version 2 instead of 0 when adding the pallet. -/// -/// Because otherwise we can't test any migrations since they require the storage version -/// to be lower than the pallet version in order to be triggerd. With the pallet version -/// at the minimum (0) this would not work. -const LOWEST_STORAGE_VERSION: u16 = 2; - -/// Get the latest migration version. -/// -/// Find the highest version number from the available migration files. -/// Each migration file should follow the naming convention `vXX.rs`, where `XX` is the version -/// number. -fn get_latest_version() -> u16 { - let Ok(dir) = std::fs::read_dir("src/migration") else { return LOWEST_STORAGE_VERSION }; - dir.filter_map(|entry| { - let file_name = entry.as_ref().ok()?.file_name(); - let file_name = file_name.to_str()?; - if file_name.starts_with('v') && file_name.ends_with(".rs") { - let version = &file_name[1..&file_name.len() - 3]; - let version = version.parse::().ok()?; - - // Ensure that the version matches the one defined in the file. - let path = entry.unwrap().path(); - let file_content = std::fs::read_to_string(&path).ok()?; - assert!( - file_content.contains(&format!("const VERSION: u16 = {}", version)), - "Invalid MigrationStep::VERSION in {:?}", - path - ); - - return Some(version) - } - None - }) - .max() - .unwrap_or(LOWEST_STORAGE_VERSION) -} - -/// Generates a module that exposes the latest migration version, and the benchmark migrations type. -fn main() -> Result<(), Box> { - let out_dir = std::env::var("OUT_DIR")?; - let path = std::path::Path::new(&out_dir).join("migration_codegen.rs"); - let mut f = std::fs::File::create(path)?; - let version = get_latest_version(); - write!( - f, - " - pub mod codegen {{ - use crate::NoopMigration; - /// The latest migration version, pulled from the latest migration file. - pub const LATEST_MIGRATION_VERSION: u16 = {version}; - /// The Migration Steps used for benchmarking the migration framework. - pub type BenchMigrations = (NoopMigration<{}>, NoopMigration<{version}>); - }}", - version - 1, - )?; - - Ok(()) -} diff --git a/substrate/frame/revive/fixtures/contracts/balance.rs b/substrate/frame/revive/fixtures/contracts/balance.rs index 4011b8379cbf..4606135d9807 100644 --- a/substrate/frame/revive/fixtures/contracts/balance.rs +++ b/substrate/frame/revive/fixtures/contracts/balance.rs @@ -18,7 +18,7 @@ #![no_std] #![no_main] -use common::output; +use common::u64_output; use uapi::{HostFn, HostFnImpl as api}; #[no_mangle] @@ -28,9 +28,6 @@ pub extern "C" fn deploy() {} #[no_mangle] #[polkavm_derive::polkavm_export] pub extern "C" fn call() { - // Initialize buffer with 1s so that we can check that it is overwritten. - output!(balance, [1u8; 8], api::balance,); - - // Assert that the balance is 0. - assert_eq!(&[0u8; 8], balance); + let balance = u64_output!(api::balance,); + assert_eq!(balance, 0); } diff --git a/substrate/frame/revive/fixtures/contracts/call.rs b/substrate/frame/revive/fixtures/contracts/call.rs index 93687441fa50..ee51548879d9 100644 --- a/substrate/frame/revive/fixtures/contracts/call.rs +++ b/substrate/frame/revive/fixtures/contracts/call.rs @@ -38,10 +38,10 @@ pub extern "C" fn call() { api::call( uapi::CallFlags::empty(), callee_addr, - 0u64, // How much ref_time to devote for the execution. 0 = all. - 0u64, // How much proof_size to devote for the execution. 0 = all. - None, // No deposit limit. - &0u64.to_le_bytes(), // Value transferred to the contract. + 0u64, // How much ref_time to devote for the execution. 0 = all. + 0u64, // How much proof_size to devote for the execution. 0 = all. + None, // No deposit limit. + &[0u8; 32], // Value transferred to the contract. callee_input, None, ) diff --git a/substrate/frame/revive/fixtures/contracts/call_return_code.rs b/substrate/frame/revive/fixtures/contracts/call_return_code.rs index 29b77c343fe9..25370459acb4 100644 --- a/substrate/frame/revive/fixtures/contracts/call_return_code.rs +++ b/substrate/frame/revive/fixtures/contracts/call_return_code.rs @@ -21,7 +21,7 @@ #![no_std] #![no_main] -use common::input; +use common::{input, u256_bytes}; use uapi::{HostFn, HostFnImpl as api}; #[no_mangle] @@ -41,10 +41,10 @@ pub extern "C" fn call() { let err_code = match api::call( uapi::CallFlags::empty(), callee_addr, - 0u64, // How much ref_time to devote for the execution. 0 = all. - 0u64, // How much proof_size to devote for the execution. 0 = all. - None, // No deposit limit. - &100u64.to_le_bytes(), // Value transferred to the contract. + 0u64, // How much ref_time to devote for the execution. 0 = all. + 0u64, // How much proof_size to devote for the execution. 0 = all. + None, // No deposit limit. + &u256_bytes(100u64), // Value transferred to the contract. input, None, ) { diff --git a/substrate/frame/revive/fixtures/contracts/call_runtime_and_call.rs b/substrate/frame/revive/fixtures/contracts/call_runtime_and_call.rs index 7cd46849655f..8c8aee962849 100644 --- a/substrate/frame/revive/fixtures/contracts/call_runtime_and_call.rs +++ b/substrate/frame/revive/fixtures/contracts/call_runtime_and_call.rs @@ -42,10 +42,10 @@ pub extern "C" fn call() { api::call( uapi::CallFlags::empty(), callee_addr, - 0u64, // How much ref_time to devote for the execution. 0 = all. - 0u64, // How much proof_size to devote for the execution. 0 = all. - None, // No deposit limit. - &0u64.to_le_bytes(), // Value transferred to the contract. + 0u64, // How much ref_time to devote for the execution. 0 = all. + 0u64, // How much proof_size to devote for the execution. 0 = all. + None, // No deposit limit. + &[0u8; 32], // Value transferred to the contract. callee_input, None, ) diff --git a/substrate/frame/revive/fixtures/contracts/call_with_flags_and_value.rs b/substrate/frame/revive/fixtures/contracts/call_with_flags_and_value.rs index c3204c29281c..330393e706e9 100644 --- a/substrate/frame/revive/fixtures/contracts/call_with_flags_and_value.rs +++ b/substrate/frame/revive/fixtures/contracts/call_with_flags_and_value.rs @@ -19,7 +19,7 @@ #![no_std] #![no_main] -use common::input; +use common::{input, u256_bytes}; use uapi::{HostFn, HostFnImpl as api}; #[no_mangle] @@ -40,10 +40,10 @@ pub extern "C" fn call() { api::call( uapi::CallFlags::from_bits(flags).unwrap(), callee_addr, - 0u64, // How much ref_time to devote for the execution. 0 = all. - 0u64, // How much proof_size to devote for the execution. 0 = all. - None, // No deposit limit. - &value.to_le_bytes(), // Value transferred to the contract. + 0u64, // How much ref_time to devote for the execution. 0 = all. + 0u64, // How much proof_size to devote for the execution. 0 = all. + None, // No deposit limit. + &u256_bytes(value), // Value transferred to the contract. forwarded_input, None, ) diff --git a/substrate/frame/revive/fixtures/contracts/call_with_limit.rs b/substrate/frame/revive/fixtures/contracts/call_with_limit.rs index a941aa9a3421..6ab892a6b7ae 100644 --- a/substrate/frame/revive/fixtures/contracts/call_with_limit.rs +++ b/substrate/frame/revive/fixtures/contracts/call_with_limit.rs @@ -43,8 +43,8 @@ pub extern "C" fn call() { callee_addr, ref_time, proof_size, - None, // No deposit limit. - &0u64.to_le_bytes(), // value transferred to the contract. + None, // No deposit limit. + &[0u8; 32], // value transferred to the contract. forwarded_input, None, ) diff --git a/substrate/frame/revive/fixtures/contracts/caller_contract.rs b/substrate/frame/revive/fixtures/contracts/caller_contract.rs index 3b83f208d623..eb29fca87c15 100644 --- a/substrate/frame/revive/fixtures/contracts/caller_contract.rs +++ b/substrate/frame/revive/fixtures/contracts/caller_contract.rs @@ -18,7 +18,7 @@ #![no_std] #![no_main] -use common::input; +use common::{input, u256_bytes}; use uapi::{HostFn, HostFnImpl as api, ReturnErrorCode}; #[no_mangle] @@ -32,7 +32,7 @@ pub extern "C" fn call() { // The value to transfer on instantiation and calls. Chosen to be greater than existential // deposit. - let value = 32768u64.to_le_bytes(); + let value = u256_bytes(32768u64); let salt = [0u8; 32]; // Callee will use the first 4 bytes of the input to return an exit status. diff --git a/substrate/frame/revive/fixtures/contracts/chain_extension_temp_storage.rs b/substrate/frame/revive/fixtures/contracts/chain_extension_temp_storage.rs index bb5c1ccbc1d6..22d6c5b548d8 100644 --- a/substrate/frame/revive/fixtures/contracts/chain_extension_temp_storage.rs +++ b/substrate/frame/revive/fixtures/contracts/chain_extension_temp_storage.rs @@ -54,10 +54,10 @@ pub extern "C" fn call() { api::call( uapi::CallFlags::ALLOW_REENTRY, &addr, - 0u64, // How much ref_time to devote for the execution. 0 = all. - 0u64, // How much proof_size to devote for the execution. 0 = all. - None, // No deposit limit. - &0u64.to_le_bytes(), // Value transferred to the contract. + 0u64, // How much ref_time to devote for the execution. 0 = all. + 0u64, // How much proof_size to devote for the execution. 0 = all. + None, // No deposit limit. + &[0u8; 32], // Value transferred to the contract. input, None, ) diff --git a/substrate/frame/revive/fixtures/contracts/common/src/lib.rs b/substrate/frame/revive/fixtures/contracts/common/src/lib.rs index 947247e9cf74..abfba282bec1 100644 --- a/substrate/frame/revive/fixtures/contracts/common/src/lib.rs +++ b/substrate/frame/revive/fixtures/contracts/common/src/lib.rs @@ -167,3 +167,30 @@ macro_rules! unwrap_output { $host_fn($($arg,)* $output).unwrap(); }; } + +/// Call the host function and convert the [u8; 32] output to u64. +#[macro_export] +macro_rules! u64_output { + ($host_fn:path, $($arg:expr),*) => {{ + let mut buffer = [1u8; 32]; + $host_fn($($arg,)* &mut buffer); + assert!(buffer[8..].iter().all(|&x| x == 0)); + u64::from_le_bytes(buffer[..8].try_into().unwrap()) + }}; +} + +/// Convert a u64 into a [u8; 32]. +pub const fn u256_bytes(value: u64) -> [u8; 32] { + let mut buffer = [0u8; 32]; + let bytes = value.to_le_bytes(); + + buffer[0] = bytes[0]; + buffer[1] = bytes[1]; + buffer[2] = bytes[2]; + buffer[3] = bytes[3]; + buffer[4] = bytes[4]; + buffer[5] = bytes[5]; + buffer[6] = bytes[6]; + buffer[7] = bytes[7]; + buffer +} diff --git a/substrate/frame/revive/fixtures/contracts/create_storage_and_call.rs b/substrate/frame/revive/fixtures/contracts/create_storage_and_call.rs index 28d161791e5b..4fa2db0c8c1c 100644 --- a/substrate/frame/revive/fixtures/contracts/create_storage_and_call.rs +++ b/substrate/frame/revive/fixtures/contracts/create_storage_and_call.rs @@ -33,7 +33,7 @@ pub extern "C" fn call() { buffer, input: [u8; 4], callee: &[u8; 20], - deposit_limit: [u8; 8], + deposit_limit: &[u8; 32], ); // create 4 byte of storage before calling @@ -46,7 +46,7 @@ pub extern "C" fn call() { 0u64, // How much ref_time weight to devote for the execution. 0 = all. 0u64, // How much proof_size weight to devote for the execution. 0 = all. Some(deposit_limit), - &0u64.to_le_bytes(), // Value transferred to the contract. + &[0u8; 32], // Value transferred to the contract. input, None, ) diff --git a/substrate/frame/revive/fixtures/contracts/create_storage_and_instantiate.rs b/substrate/frame/revive/fixtures/contracts/create_storage_and_instantiate.rs index d87c2e8cd35a..e1372e2eb8b6 100644 --- a/substrate/frame/revive/fixtures/contracts/create_storage_and_instantiate.rs +++ b/substrate/frame/revive/fixtures/contracts/create_storage_and_instantiate.rs @@ -19,7 +19,7 @@ #![no_std] #![no_main] -use common::input; +use common::{input, u256_bytes}; use uapi::{HostFn, HostFnImpl as api}; #[no_mangle] @@ -32,10 +32,10 @@ pub extern "C" fn call() { input!( input: [u8; 4], code_hash: &[u8; 32], - deposit_limit: [u8; 8], + deposit_limit: &[u8; 32], ); - let value = 10_000u64.to_le_bytes(); + let value = u256_bytes(10_000u64); let salt = [0u8; 32]; let mut address = [0u8; 20]; diff --git a/substrate/frame/revive/fixtures/contracts/create_transient_storage_and_call.rs b/substrate/frame/revive/fixtures/contracts/create_transient_storage_and_call.rs index 753490cf26b7..d2efb26e5ceb 100644 --- a/substrate/frame/revive/fixtures/contracts/create_transient_storage_and_call.rs +++ b/substrate/frame/revive/fixtures/contracts/create_transient_storage_and_call.rs @@ -52,7 +52,7 @@ pub extern "C" fn call() { 0u64, // How much ref_time weight to devote for the execution. 0 = all. 0u64, // How much proof_size weight to devote for the execution. 0 = all. None, - &0u64.to_le_bytes(), // Value transferred to the contract. + &[0u8; 32], // Value transferred to the contract. input, None, ) diff --git a/substrate/frame/revive/fixtures/contracts/delegate_call_lib.rs b/substrate/frame/revive/fixtures/contracts/delegate_call_lib.rs index c5525423a9ee..95c1bd2aa6cd 100644 --- a/substrate/frame/revive/fixtures/contracts/delegate_call_lib.rs +++ b/substrate/frame/revive/fixtures/contracts/delegate_call_lib.rs @@ -18,7 +18,7 @@ #![no_std] #![no_main] -use common::output; +use common::u64_output; use uapi::{HostFn, HostFnImpl as api, StorageFlags}; #[no_mangle] @@ -39,9 +39,8 @@ pub extern "C" fn call() { // Assert that `value_transferred` is equal to the value // passed to the `caller` contract: 1337. - output!(value_transferred, [0u8; 8], api::value_transferred,); - let value_transferred = u64::from_le_bytes(value_transferred[..].try_into().unwrap()); - assert_eq!(value_transferred, 1337); + let value = u64_output!(api::value_transferred,); + assert_eq!(value, 1337); // Assert that ALICE is the caller of the contract. let mut caller = [0u8; 20]; diff --git a/substrate/frame/revive/fixtures/contracts/destroy_and_transfer.rs b/substrate/frame/revive/fixtures/contracts/destroy_and_transfer.rs index 4959a5e2e0ce..d381db8e398f 100644 --- a/substrate/frame/revive/fixtures/contracts/destroy_and_transfer.rs +++ b/substrate/frame/revive/fixtures/contracts/destroy_and_transfer.rs @@ -18,11 +18,11 @@ #![no_std] #![no_main] -use common::input; +use common::{input, u256_bytes}; use uapi::{HostFn, HostFnImpl as api, StorageFlags}; const ADDRESS_KEY: [u8; 32] = [0u8; 32]; -const VALUE: [u8; 8] = [0, 0, 1u8, 0, 0, 0, 0, 0]; +const VALUE: [u8; 32] = u256_bytes(65536); #[no_mangle] #[polkavm_derive::polkavm_export] diff --git a/substrate/frame/revive/fixtures/contracts/drain.rs b/substrate/frame/revive/fixtures/contracts/drain.rs index b46d4f7c8418..0d644a4238c4 100644 --- a/substrate/frame/revive/fixtures/contracts/drain.rs +++ b/substrate/frame/revive/fixtures/contracts/drain.rs @@ -18,7 +18,7 @@ #![no_std] #![no_main] -use common::output; +use common::{u256_bytes, u64_output}; use uapi::{HostFn, HostFnImpl as api}; #[no_mangle] @@ -28,17 +28,14 @@ pub extern "C" fn deploy() {} #[no_mangle] #[polkavm_derive::polkavm_export] pub extern "C" fn call() { - output!(balance, [0u8; 8], api::balance,); - let balance = u64::from_le_bytes(balance[..].try_into().unwrap()); - - output!(minimum_balance, [0u8; 8], api::minimum_balance,); - let minimum_balance = u64::from_le_bytes(minimum_balance[..].try_into().unwrap()); + let balance = u64_output!(api::balance,); + let minimum_balance = u64_output!(api::minimum_balance,); // Make the transferred value exceed the balance by adding the minimum balance. let balance = balance + minimum_balance; // Try to self-destruct by sending more balance to the 0 address. // The call will fail because a contract transfer has a keep alive requirement. - let res = api::transfer(&[0u8; 20], &balance.to_le_bytes()); + let res = api::transfer(&[0u8; 20], &u256_bytes(balance)); assert!(matches!(res, Err(uapi::ReturnErrorCode::TransferFailed))); } diff --git a/substrate/frame/revive/fixtures/contracts/event_and_return_on_deploy.rs b/substrate/frame/revive/fixtures/contracts/event_and_return_on_deploy.rs index 9186835d2911..5c438c1a75a1 100644 --- a/substrate/frame/revive/fixtures/contracts/event_and_return_on_deploy.rs +++ b/substrate/frame/revive/fixtures/contracts/event_and_return_on_deploy.rs @@ -25,7 +25,8 @@ use uapi::{HostFn, HostFnImpl as api}; #[polkavm_derive::polkavm_export] pub extern "C" fn deploy() { let buffer = [1u8, 2, 3, 4]; - api::deposit_event(&[0u8; 0], &buffer); + let topics = [[42u8; 32]; 1]; + api::deposit_event(&topics, &buffer); api::return_value(uapi::ReturnFlags::empty(), &buffer); } diff --git a/substrate/frame/revive/fixtures/contracts/event_size.rs b/substrate/frame/revive/fixtures/contracts/event_size.rs index 2b56de4bd3fd..7f04ae42765a 100644 --- a/substrate/frame/revive/fixtures/contracts/event_size.rs +++ b/substrate/frame/revive/fixtures/contracts/event_size.rs @@ -33,6 +33,7 @@ pub extern "C" fn call() { input!(len: u32,); let data = &BUFFER[..len as usize]; + let topics = [[0u8; 32]; 0]; - api::deposit_event(&[0u8; 0], data); + api::deposit_event(&topics, data); } diff --git a/substrate/frame/revive/fixtures/contracts/instantiate_return_code.rs b/substrate/frame/revive/fixtures/contracts/instantiate_return_code.rs index a81ffea943d4..c5736850960a 100644 --- a/substrate/frame/revive/fixtures/contracts/instantiate_return_code.rs +++ b/substrate/frame/revive/fixtures/contracts/instantiate_return_code.rs @@ -18,7 +18,7 @@ #![no_std] #![no_main] -use common::input; +use common::{input, u256_bytes}; use uapi::{HostFn, HostFnImpl as api}; #[no_mangle] @@ -36,8 +36,8 @@ pub extern "C" fn call() { 0u64, // How much ref_time weight to devote for the execution. 0 = all. 0u64, /* How much proof_size weight to devote for the execution. 0 = * all. */ - None, // No deposit limit. - &10_000u64.to_le_bytes(), // Value to transfer. + None, // No deposit limit. + &u256_bytes(10_000u64), // Value to transfer. input, None, None, diff --git a/substrate/frame/revive/fixtures/contracts/read_only_call.rs b/substrate/frame/revive/fixtures/contracts/read_only_call.rs index 7476b7a8366d..ea74d56867f5 100644 --- a/substrate/frame/revive/fixtures/contracts/read_only_call.rs +++ b/substrate/frame/revive/fixtures/contracts/read_only_call.rs @@ -39,10 +39,10 @@ pub extern "C" fn call() { api::call( uapi::CallFlags::READ_ONLY, callee_addr, - 0u64, // How much ref_time to devote for the execution. 0 = all. - 0u64, // How much proof_size to devote for the execution. 0 = all. - None, // No deposit limit. - &0u64.to_le_bytes(), // Value transferred to the contract. + 0u64, // How much ref_time to devote for the execution. 0 = all. + 0u64, // How much proof_size to devote for the execution. 0 = all. + None, // No deposit limit. + &[0u8; 32], // Value transferred to the contract. callee_input, None, ) diff --git a/substrate/frame/revive/fixtures/contracts/recurse.rs b/substrate/frame/revive/fixtures/contracts/recurse.rs index c15784b7f245..2e70d67d8c73 100644 --- a/substrate/frame/revive/fixtures/contracts/recurse.rs +++ b/substrate/frame/revive/fixtures/contracts/recurse.rs @@ -43,10 +43,10 @@ pub extern "C" fn call() { api::call( uapi::CallFlags::ALLOW_REENTRY, &addr, - 0u64, // How much ref_time to devote for the execution. 0 = all. - 0u64, // How much deposit_limit to devote for the execution. 0 = all. - None, // No deposit limit. - &0u64.to_le_bytes(), // Value transferred to the contract. + 0u64, // How much ref_time to devote for the execution. 0 = all. + 0u64, // How much deposit_limit to devote for the execution. 0 = all. + None, // No deposit limit. + &[0u8; 32], // Value transferred to the contract. &(calls_left - 1).to_le_bytes(), None, ) diff --git a/substrate/frame/revive/fixtures/contracts/self_destruct.rs b/substrate/frame/revive/fixtures/contracts/self_destruct.rs index 0e1e4d30e6f3..524979991ec7 100644 --- a/substrate/frame/revive/fixtures/contracts/self_destruct.rs +++ b/substrate/frame/revive/fixtures/contracts/self_destruct.rs @@ -42,10 +42,10 @@ pub extern "C" fn call() { api::call( uapi::CallFlags::ALLOW_REENTRY, &addr, - 0u64, // How much ref_time to devote for the execution. 0 = all. - 0u64, // How much proof_size to devote for the execution. 0 = all. - None, // No deposit limit. - &0u64.to_le_bytes(), // Value to transfer. + 0u64, // How much ref_time to devote for the execution. 0 = all. + 0u64, // How much proof_size to devote for the execution. 0 = all. + None, // No deposit limit. + &[0u8; 32], // Value to transfer. &[0u8; 0], None, ) diff --git a/substrate/frame/revive/fixtures/contracts/transfer_return_code.rs b/substrate/frame/revive/fixtures/contracts/transfer_return_code.rs index 3e1f2757c27a..bfeca9b8b4a4 100644 --- a/substrate/frame/revive/fixtures/contracts/transfer_return_code.rs +++ b/substrate/frame/revive/fixtures/contracts/transfer_return_code.rs @@ -18,7 +18,7 @@ #![no_std] #![no_main] -extern crate common; +use common::u256_bytes; use uapi::{HostFn, HostFnImpl as api}; #[no_mangle] @@ -28,7 +28,7 @@ pub extern "C" fn deploy() {} #[no_mangle] #[polkavm_derive::polkavm_export] pub extern "C" fn call() { - let ret_code = match api::transfer(&[0u8; 20], &100u64.to_le_bytes()) { + let ret_code = match api::transfer(&[0u8; 20], &u256_bytes(100u64)) { Ok(_) => 0u32, Err(code) => code as u32, }; diff --git a/substrate/frame/revive/src/benchmarking/call_builder.rs b/substrate/frame/revive/src/benchmarking/call_builder.rs index c000817a8a39..020a578c3a3a 100644 --- a/substrate/frame/revive/src/benchmarking/call_builder.rs +++ b/substrate/frame/revive/src/benchmarking/call_builder.rs @@ -22,12 +22,14 @@ use crate::{ storage::meter::Meter, transient_storage::MeterEntry, wasm::{ApiVersion, PreparedCall, Runtime}, - BalanceOf, Config, DebugBuffer, Error, GasMeter, Origin, TypeInfo, WasmBlob, Weight, + BalanceOf, Config, DebugBuffer, Error, GasMeter, MomentOf, Origin, TypeInfo, WasmBlob, Weight, }; use alloc::{vec, vec::Vec}; use codec::{Encode, HasCompact}; use core::fmt::Debug; use frame_benchmarking::benchmarking; +use frame_support::traits::IsType; +use sp_core::{H256, U256}; type StackExt<'a, T> = Stack<'a, T, WasmBlob>; @@ -48,6 +50,9 @@ impl Default for CallSetup where T: Config + pallet_balances::Config, as HasCompact>::Type: Clone + Eq + PartialEq + Debug + TypeInfo + Encode, + BalanceOf: Into + TryFrom, + T::Hash: IsType, + MomentOf: Into, { fn default() -> Self { Self::new(WasmModule::dummy()) @@ -57,7 +62,10 @@ where impl CallSetup where T: Config + pallet_balances::Config, + T::Hash: IsType, as HasCompact>::Type: Clone + Eq + PartialEq + Debug + TypeInfo + Encode, + BalanceOf: Into + TryFrom, + MomentOf: Into, { /// Setup a new call for the given module. pub fn new(module: WasmModule) -> Self { diff --git a/substrate/frame/revive/src/benchmarking/mod.rs b/substrate/frame/revive/src/benchmarking/mod.rs index 2c5285622843..8601f5f53542 100644 --- a/substrate/frame/revive/src/benchmarking/mod.rs +++ b/substrate/frame/revive/src/benchmarking/mod.rs @@ -25,7 +25,6 @@ use self::{call_builder::CallSetup, code::WasmModule}; use crate::{ exec::{Key, MomentOf}, limits, - migration::codegen::LATEST_MIGRATION_VERSION, storage::WriteOutcome, Pallet as Contracts, *, }; @@ -34,7 +33,6 @@ use codec::{Encode, MaxEncodedLen}; use frame_benchmarking::v2::*; use frame_support::{ self, assert_ok, - pallet_prelude::StorageVersion, storage::child, traits::{fungible::InspectHold, Currency}, weights::{Weight, WeightMeter}, @@ -65,14 +63,21 @@ const UNBALANCED_TRIE_LAYERS: u32 = 20; struct Contract { caller: T::AccountId, account_id: T::AccountId, - addr: T::AccountId, } impl Contract where T: Config + pallet_balances::Config, as HasCompact>::Type: Clone + Eq + PartialEq + Debug + TypeInfo + Encode, + BalanceOf: Into + TryFrom, + MomentOf: Into, + T::Hash: IsType, { + /// Returns the address of the contract. + fn address(&self) -> H160 { + T::AddressMapper::to_address(&self.account_id) + } + /// Create new contract and use a default account id as instantiator. fn new(module: WasmModule, data: Vec) -> Result, &'static str> { Self::with_index(0, module, data) @@ -110,7 +115,7 @@ where let address = outcome.result?.addr; let account_id = T::AddressMapper::to_account_id_contract(&address); - let result = Contract { caller, account_id: account_id.clone(), addr: account_id }; + let result = Contract { caller, account_id: account_id.clone() }; ContractInfoOf::::insert(&address, result.info()?); @@ -216,9 +221,12 @@ fn default_deposit_limit() -> BalanceOf { #[benchmarks( where - as codec::HasCompact>::Type: Clone + Eq + PartialEq + core::fmt::Debug + scale_info::TypeInfo + codec::Encode, + as HasCompact>::Type: Clone + Eq + PartialEq + Debug + TypeInfo + Encode, + BalanceOf: From< as Currency>::Balance> + Into + TryFrom, T: Config + pallet_balances::Config, - BalanceOf: From< as Currency>::Balance>, + MomentOf: Into, + ::RuntimeEvent: From>, + T::Hash: IsType, as Currency>::Balance: From>, )] mod benchmarks { @@ -246,73 +254,6 @@ mod benchmarks { Ok(()) } - // This benchmarks the weight of executing Migration::migrate to execute a noop migration. - #[benchmark(pov_mode = Measured)] - fn migration_noop() { - let version = LATEST_MIGRATION_VERSION; - StorageVersion::new(version).put::>(); - #[block] - { - Migration::::migrate(&mut WeightMeter::new()); - } - assert_eq!(StorageVersion::get::>(), version); - } - - // This benchmarks the weight of dispatching migrate to execute 1 `NoopMigration` - #[benchmark(pov_mode = Measured)] - fn migrate() { - let latest_version = LATEST_MIGRATION_VERSION; - StorageVersion::new(latest_version - 2).put::>(); - as frame_support::traits::OnRuntimeUpgrade>::on_runtime_upgrade(); - - #[extrinsic_call] - _(RawOrigin::Signed(whitelisted_caller()), Weight::MAX); - - assert_eq!(StorageVersion::get::>(), latest_version - 1); - } - - // This benchmarks the weight of running on_runtime_upgrade when there are no migration in - // progress. - #[benchmark(pov_mode = Measured)] - fn on_runtime_upgrade_noop() { - let latest_version = LATEST_MIGRATION_VERSION; - StorageVersion::new(latest_version).put::>(); - #[block] - { - as frame_support::traits::OnRuntimeUpgrade>::on_runtime_upgrade(); - } - assert!(MigrationInProgress::::get().is_none()); - } - - // This benchmarks the weight of running on_runtime_upgrade when there is a migration in - // progress. - #[benchmark(pov_mode = Measured)] - fn on_runtime_upgrade_in_progress() { - let latest_version = LATEST_MIGRATION_VERSION; - StorageVersion::new(latest_version - 2).put::>(); - let v = vec![42u8].try_into().ok(); - MigrationInProgress::::set(v.clone()); - #[block] - { - as frame_support::traits::OnRuntimeUpgrade>::on_runtime_upgrade(); - } - assert!(MigrationInProgress::::get().is_some()); - assert_eq!(MigrationInProgress::::get(), v); - } - - // This benchmarks the weight of running on_runtime_upgrade when there is a migration to - // process. - #[benchmark(pov_mode = Measured)] - fn on_runtime_upgrade() { - let latest_version = LATEST_MIGRATION_VERSION; - StorageVersion::new(latest_version - 2).put::>(); - #[block] - { - as frame_support::traits::OnRuntimeUpgrade>::on_runtime_upgrade(); - } - assert!(MigrationInProgress::::get().is_some()); - } - // This benchmarks the overhead of loading a code of size `c` byte from storage and into // the execution engine. This does **not** include the actual execution for which the gas meter // is responsible. This is achieved by generating all code to the `deploy` function @@ -326,7 +267,7 @@ mod benchmarks { let instance = Contract::::with_caller(whitelisted_caller(), WasmModule::sized(c), vec![])?; let value = Pallet::::min_balance(); - let callee = T::AddressMapper::to_address(&instance.addr); + let callee = T::AddressMapper::to_address(&instance.account_id); let storage_deposit = default_deposit_limit::(); #[extrinsic_call] @@ -434,7 +375,7 @@ mod benchmarks { Contract::::with_caller(whitelisted_caller(), WasmModule::dummy(), vec![])?; let value = Pallet::::min_balance(); let origin = RawOrigin::Signed(instance.caller.clone()); - let callee = T::AddressMapper::to_address(&instance.addr); + let callee = T::AddressMapper::to_address(&instance.account_id); let before = T::Currency::balance(&instance.account_id); let storage_deposit = default_deposit_limit::(); #[extrinsic_call] @@ -510,7 +451,7 @@ mod benchmarks { let storage_deposit = default_deposit_limit::(); let hash = >::bare_upload_code(origin.into(), code, storage_deposit)?.code_hash; - let callee = T::AddressMapper::to_address(&instance.addr); + let callee = T::AddressMapper::to_address(&instance.account_id); assert_ne!(instance.info()?.code_hash, hash); #[extrinsic_call] _(RawOrigin::Root, callee, hash); @@ -661,85 +602,67 @@ mod benchmarks { #[benchmark(pov_mode = Measured)] fn seal_balance() { - let len = ::max_encoded_len() as u32; - build_runtime!(runtime, memory: [len.to_le_bytes(), vec![0u8; len as _], ]); + build_runtime!(runtime, memory: [[0u8;32], ]); let result; #[block] { - result = runtime.bench_balance(memory.as_mut_slice(), 4, 0); + result = runtime.bench_balance(memory.as_mut_slice(), 0); } assert_ok!(result); - assert_eq!( - ::decode(&mut &memory[4..]).unwrap(), - runtime.ext().balance().into() - ); + assert_eq!(U256::from_little_endian(&memory[..]), runtime.ext().balance()); } #[benchmark(pov_mode = Measured)] fn seal_value_transferred() { - let len = ::max_encoded_len() as u32; - build_runtime!(runtime, memory: [len.to_le_bytes(), vec![0u8; len as _], ]); + build_runtime!(runtime, memory: [[0u8;32], ]); let result; #[block] { - result = runtime.bench_value_transferred(memory.as_mut_slice(), 4, 0); + result = runtime.bench_value_transferred(memory.as_mut_slice(), 0); } assert_ok!(result); - assert_eq!( - ::decode(&mut &memory[4..]).unwrap(), - runtime.ext().value_transferred().into() - ); + assert_eq!(U256::from_little_endian(&memory[..]), runtime.ext().value_transferred()); } #[benchmark(pov_mode = Measured)] fn seal_minimum_balance() { - let len = ::max_encoded_len() as u32; - build_runtime!(runtime, memory: [len.to_le_bytes(), vec![0u8; len as _], ]); + build_runtime!(runtime, memory: [[0u8;32], ]); let result; #[block] { - result = runtime.bench_minimum_balance(memory.as_mut_slice(), 4, 0); + result = runtime.bench_minimum_balance(memory.as_mut_slice(), 0); } assert_ok!(result); - assert_eq!( - ::decode(&mut &memory[4..]).unwrap(), - runtime.ext().minimum_balance().into() - ); + assert_eq!(U256::from_little_endian(&memory[..]), runtime.ext().minimum_balance()); } #[benchmark(pov_mode = Measured)] fn seal_block_number() { - let len = as MaxEncodedLen>::max_encoded_len() as u32; - build_runtime!(runtime, memory: [len.to_le_bytes(), vec![0u8; len as _], ]); + build_runtime!(runtime, memory: [[0u8;32], ]); let result; #[block] { - result = runtime.bench_block_number(memory.as_mut_slice(), 4, 0); + result = runtime.bench_block_number(memory.as_mut_slice(), 0); } assert_ok!(result); - assert_eq!( - >::decode(&mut &memory[4..]).unwrap(), - runtime.ext().block_number() - ); + assert_eq!(U256::from_little_endian(&memory[..]), runtime.ext().block_number()); } #[benchmark(pov_mode = Measured)] fn seal_now() { - let len = as MaxEncodedLen>::max_encoded_len() as u32; - build_runtime!(runtime, memory: [len.to_le_bytes(), vec![0u8; len as _], ]); + build_runtime!(runtime, memory: [[0u8;32], ]); let result; #[block] { - result = runtime.bench_now(memory.as_mut_slice(), 4, 0); + result = runtime.bench_now(memory.as_mut_slice(), 0); } assert_ok!(result); - assert_eq!(>::decode(&mut &memory[4..]).unwrap(), *runtime.ext().now()); + assert_eq!(U256::from_little_endian(&memory[..]), runtime.ext().now()); } #[benchmark(pov_mode = Measured)] fn seal_weight_to_fee() { - let len = ::max_encoded_len() as u32; - build_runtime!(runtime, memory: [len.to_le_bytes(), vec![0u8; len as _], ]); + build_runtime!(runtime, memory: [[0u8;32], ]); let weight = Weight::from_parts(500_000, 300_000); let result; #[block] @@ -748,15 +671,11 @@ mod benchmarks { memory.as_mut_slice(), weight.ref_time(), weight.proof_size(), - 4, 0, ); } assert_ok!(result); - assert_eq!( - >::decode(&mut &memory[4..]).unwrap(), - runtime.ext().get_weight_price(weight) - ); + assert_eq!(U256::from_little_endian(&memory[..]), runtime.ext().get_weight_price(weight)); } #[benchmark(pov_mode = Measured)] @@ -828,28 +747,33 @@ mod benchmarks { t: Linear<0, { limits::NUM_EVENT_TOPICS as u32 }>, n: Linear<0, { limits::PAYLOAD_BYTES }>, ) { - let topics = (0..t).map(|i| T::Hashing::hash_of(&i)).collect::>().encode(); - let topics_len = topics.len() as u32; - - build_runtime!(runtime, memory: [ - n.to_le_bytes(), - topics, - vec![0u8; n as _], - ]); + let num_topic = t as u32; + let topics = (0..t).map(|i| H256::repeat_byte(i as u8)).collect::>(); + let topics_data = + topics.iter().flat_map(|hash| hash.as_bytes().to_vec()).collect::>(); + let data = vec![42u8; n as _]; + build_runtime!(runtime, instance, memory: [ topics_data, data, ]); let result; #[block] { result = runtime.bench_deposit_event( memory.as_mut_slice(), - 4, // topics_ptr - topics_len, // topics_len - 4 + topics_len, // data_ptr - 0, // data_len + 0, // topics_ptr + num_topic, + topics_data.len() as u32, // data_ptr + n, // data_len ); } - assert_ok!(result); + + let events = System::::events(); + let record = &events[events.len() - 1]; + + assert_eq!( + record.event, + crate::Event::ContractEmitted { contract: instance.address(), data, topics }.into(), + ); } // Benchmark debug_message call @@ -1435,7 +1359,7 @@ mod benchmarks { let account_bytes = account.encode(); let account_len = account_bytes.len() as u32; - let value_bytes = value.encode(); + let value_bytes = Into::::into(value).encode(); let mut memory = memory!(account_bytes, value_bytes,); let result; @@ -1461,10 +1385,10 @@ mod benchmarks { let callee_len = callee_bytes.len() as u32; let value: BalanceOf = t.into(); - let value_bytes = value.encode(); + let value_bytes = Into::::into(value).encode(); let deposit: BalanceOf = (u32::MAX - 100).into(); - let deposit_bytes = deposit.encode(); + let deposit_bytes = Into::::into(deposit).encode(); let deposit_len = deposit_bytes.len() as u32; let mut setup = CallSetup::::default(); @@ -1536,11 +1460,11 @@ mod benchmarks { let hash_len = hash_bytes.len() as u32; let value: BalanceOf = 1u32.into(); - let value_bytes = value.encode(); + let value_bytes = Into::::into(value).encode(); let value_len = value_bytes.len() as u32; let deposit: BalanceOf = 0u32.into(); - let deposit_bytes = deposit.encode(); + let deposit_bytes = Into::::into(deposit).encode(); let deposit_len = deposit_bytes.len() as u32; let mut setup = CallSetup::::default(); diff --git a/substrate/frame/revive/src/exec.rs b/substrate/frame/revive/src/exec.rs index 649479f7790f..468f5aa8240e 100644 --- a/substrate/frame/revive/src/exec.rs +++ b/substrate/frame/revive/src/exec.rs @@ -37,7 +37,7 @@ use frame_support::{ traits::{ fungible::{Inspect, Mutate}, tokens::{Fortitude, Preservation}, - Contains, OriginTrait, Time, + Contains, IsType, OriginTrait, Time, }, weights::Weight, Blake2_128Concat, BoundedVec, StorageHasher, @@ -49,7 +49,7 @@ use frame_system::{ use sp_core::{ ecdsa::Public as ECDSAPublic, sr25519::{Public as SR25519Public, Signature as SR25519Signature}, - ConstU32, Get, H160, H256, + ConstU32, Get, H160, H256, U256, }; use sp_io::{crypto::secp256k1_ecdsa_recover_compressed, hashing::blake2_256}; use sp_runtime::{ @@ -61,9 +61,6 @@ pub type AccountIdOf = ::AccountId; pub type MomentOf = <::Time as Time>::Moment; pub type ExecResult = Result; -/// A type that represents a topic of an event. At the moment a hash is used. -pub type TopicOf = ::Hash; - /// Type for variable sized storage key. Used for transparent hashing. type VarSizedKey = BoundedVec>; @@ -184,9 +181,9 @@ pub trait Ext: sealing::Sealed { fn call( &mut self, gas_limit: Weight, - deposit_limit: BalanceOf, + deposit_limit: U256, to: &H160, - value: BalanceOf, + value: U256, input_data: Vec, allows_reentry: bool, read_only: bool, @@ -209,9 +206,9 @@ pub trait Ext: sealing::Sealed { fn instantiate( &mut self, gas_limit: Weight, - deposit_limit: BalanceOf, + deposit_limit: U256, code: H256, - value: BalanceOf, + value: U256, input_data: Vec, salt: Option<&[u8; 32]>, ) -> Result<(H160, ExecReturnValue), ExecError>; @@ -226,7 +223,7 @@ pub trait Ext: sealing::Sealed { fn terminate(&mut self, beneficiary: &H160) -> DispatchResult; /// Transfer some amount of funds into the specified account. - fn transfer(&mut self, to: &H160, value: BalanceOf) -> DispatchResult; + fn transfer(&mut self, to: &H160, value: U256) -> DispatchResult; /// Returns the storage entry of the executing account by the given `key`. /// @@ -304,30 +301,30 @@ pub trait Ext: sealing::Sealed { /// Returns the balance of the current contract. /// /// The `value_transferred` is already added. - fn balance(&self) -> BalanceOf; + fn balance(&self) -> U256; /// Returns the value transferred along with this call. - fn value_transferred(&self) -> BalanceOf; + fn value_transferred(&self) -> U256; - /// Returns a reference to the timestamp of the current block - fn now(&self) -> &MomentOf; + /// Returns the timestamp of the current block + fn now(&self) -> U256; /// Returns the minimum balance that is required for creating an account. - fn minimum_balance(&self) -> BalanceOf; + fn minimum_balance(&self) -> U256; /// Deposit an event with the given topics. /// /// There should not be any duplicates in `topics`. - fn deposit_event(&mut self, topics: Vec>, data: Vec); + fn deposit_event(&mut self, topics: Vec, data: Vec); /// Returns the current block number. - fn block_number(&self) -> BlockNumberFor; + fn block_number(&self) -> U256; /// Returns the maximum allowed size of a storage item. fn max_value_size(&self) -> u32; /// Returns the price for the specified amount of weight. - fn get_weight_price(&self, weight: Weight) -> BalanceOf; + fn get_weight_price(&self, weight: Weight) -> U256; /// Get an immutable reference to the nested gas meter. fn gas_meter(&self) -> &GasMeter; @@ -697,6 +694,9 @@ impl CachedContract { impl<'a, T, E> Stack<'a, T, E> where T: Config, + T::Hash: IsType, + BalanceOf: Into + TryFrom, + MomentOf: Into, E: Executable, { /// Create and run a new call stack by calling into `dest`. @@ -1239,16 +1239,19 @@ where impl<'a, T, E> Ext for Stack<'a, T, E> where T: Config, + T::Hash: IsType, E: Executable, + BalanceOf: Into + TryFrom, + MomentOf: Into, { type T = T; fn call( &mut self, gas_limit: Weight, - deposit_limit: BalanceOf, + deposit_limit: U256, dest: &H160, - value: BalanceOf, + value: U256, input_data: Vec, allows_reentry: bool, read_only: bool, @@ -1277,9 +1280,9 @@ where }); let executable = self.push_frame( FrameArgs::Call { dest, cached_info, delegated_call: None }, - value, + value.try_into().map_err(|_| Error::::BalanceConversionFailed)?, gas_limit, - deposit_limit, + deposit_limit.try_into().map_err(|_| Error::::BalanceConversionFailed)?, // Enable read-only access if requested; cannot disable it if already set. read_only || self.is_read_only(), )?; @@ -1322,9 +1325,9 @@ where fn instantiate( &mut self, gas_limit: Weight, - deposit_limit: BalanceOf, + deposit_limit: U256, code_hash: H256, - value: BalanceOf, + value: U256, input_data: Vec, salt: Option<&[u8; 32]>, ) -> Result<(H160, ExecReturnValue), ExecError> { @@ -1337,9 +1340,9 @@ where salt, input_data: input_data.as_ref(), }, - value, + value.try_into().map_err(|_| Error::::BalanceConversionFailed)?, gas_limit, - deposit_limit, + deposit_limit.try_into().map_err(|_| Error::::BalanceConversionFailed)?, self.is_read_only(), )?; let address = T::AddressMapper::to_address(&self.top_frame().account_id); @@ -1374,12 +1377,12 @@ where Ok(()) } - fn transfer(&mut self, to: &H160, value: BalanceOf) -> DispatchResult { + fn transfer(&mut self, to: &H160, value: U256) -> DispatchResult { Self::transfer( Preservation::Preserve, &self.top_frame().account_id, &T::AddressMapper::to_account_id(to), - value, + value.try_into().map_err(|_| Error::::BalanceConversionFailed)?, ) } @@ -1462,46 +1465,45 @@ where self.caller_is_origin() && self.origin == Origin::Root } - fn balance(&self) -> BalanceOf { + fn balance(&self) -> U256 { T::Currency::reducible_balance( &self.top_frame().account_id, Preservation::Preserve, Fortitude::Polite, ) + .into() } - fn value_transferred(&self) -> BalanceOf { - self.top_frame().value_transferred + fn value_transferred(&self) -> U256 { + self.top_frame().value_transferred.into() } - fn now(&self) -> &MomentOf { - &self.timestamp + fn now(&self) -> U256 { + self.timestamp.into() } - fn minimum_balance(&self) -> BalanceOf { - T::Currency::minimum_balance() + fn minimum_balance(&self) -> U256 { + T::Currency::minimum_balance().into() } - fn deposit_event(&mut self, topics: Vec, data: Vec) { - Contracts::::deposit_indexed_event( + fn deposit_event(&mut self, topics: Vec, data: Vec) { + Contracts::::deposit_event(Event::ContractEmitted { + contract: T::AddressMapper::to_address(self.account_id()), + data, topics, - Event::ContractEmitted { - contract: T::AddressMapper::to_address(self.account_id()), - data, - }, - ); + }); } - fn block_number(&self) -> BlockNumberFor { - self.block_number + fn block_number(&self) -> U256 { + self.block_number.into() } fn max_value_size(&self) -> u32 { limits::PAYLOAD_BYTES } - fn get_weight_price(&self, weight: Weight) -> BalanceOf { - T::WeightPrice::convert(weight) + fn get_weight_price(&self, weight: Weight) -> U256 { + T::WeightPrice::convert(weight).into() } fn gas_meter(&self) -> &GasMeter { @@ -1864,7 +1866,7 @@ mod tests { let value = 55; let success_ch = MockLoader::insert(Call, move |ctx, _| { - assert_eq!(ctx.ext.value_transferred(), value); + assert_eq!(ctx.ext.value_transferred(), U256::from(value)); Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Vec::new() }) }); @@ -1896,12 +1898,12 @@ mod tests { let value = 35; let success_ch = MockLoader::insert(Call, move |ctx, _| { - assert_eq!(ctx.ext.value_transferred(), value); + assert_eq!(ctx.ext.value_transferred(), U256::from(value)); Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Vec::new() }) }); let delegate_ch = MockLoader::insert(Call, move |ctx, _| { - assert_eq!(ctx.ext.value_transferred(), value); + assert_eq!(ctx.ext.value_transferred(), U256::from(value)); let _ = ctx.ext.delegate_call(success_ch, Vec::new())?; Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Vec::new() }) }); @@ -2112,9 +2114,9 @@ mod tests { // Try to call into yourself. let r = ctx.ext.call( Weight::zero(), - BalanceOf::::zero(), + U256::zero(), &BOB_ADDR, - 0, + U256::zero(), vec![], true, false, @@ -2175,9 +2177,9 @@ mod tests { assert_matches!( ctx.ext.call( Weight::zero(), - BalanceOf::::zero(), + U256::zero(), &CHARLIE_ADDR, - 0, + U256::zero(), vec![], true, false @@ -2316,9 +2318,9 @@ mod tests { // BOB calls CHARLIE ctx.ext.call( Weight::zero(), - BalanceOf::::zero(), + U256::zero(), &CHARLIE_ADDR, - 0, + U256::zero(), vec![], true, false, @@ -2410,9 +2412,9 @@ mod tests { // BOB calls CHARLIE. ctx.ext.call( Weight::zero(), - BalanceOf::::zero(), + U256::zero(), &CHARLIE_ADDR, - 0, + U256::zero(), vec![], true, false, @@ -2448,9 +2450,9 @@ mod tests { assert_matches!( ctx.ext.call( Weight::zero(), - BalanceOf::::zero(), + U256::zero(), &CHARLIE_ADDR, - 0, + U256::zero(), vec![], true, false @@ -2621,9 +2623,9 @@ mod tests { .ext .instantiate( Weight::zero(), - BalanceOf::::zero(), + U256::zero(), dummy_ch, - ::Currency::minimum_balance(), + ::Currency::minimum_balance().into(), vec![], Some(&[48; 32]), ) @@ -2699,9 +2701,9 @@ mod tests { assert_matches!( ctx.ext.instantiate( Weight::zero(), - BalanceOf::::zero(), + U256::zero(), dummy_ch, - ::Currency::minimum_balance(), + ::Currency::minimum_balance().into(), vec![], Some(&[0; 32]), ), @@ -2804,9 +2806,9 @@ mod tests { assert_eq!( ctx.ext.call( Weight::zero(), - BalanceOf::::zero(), + U256::zero(), &CHARLIE_ADDR, - 0, + U256::zero(), vec![], true, false @@ -2820,15 +2822,7 @@ mod tests { let code_charlie = MockLoader::insert(Call, |ctx, _| { assert!(ctx .ext - .call( - Weight::zero(), - BalanceOf::::zero(), - &BOB_ADDR, - 0, - vec![99], - true, - false - ) + .call(Weight::zero(), U256::zero(), &BOB_ADDR, U256::zero(), vec![99], true, false) .is_ok()); exec_trapped() }); @@ -2860,7 +2854,7 @@ mod tests { let addr = ::AddressMapper::to_address(&account_id); assert_matches!( - ctx.ext.call(Weight::zero(), BalanceOf::::zero(), &addr, 0, vec![], + ctx.ext.call(Weight::zero(), U256::zero(), &addr, U256::zero(), vec![], true, false), Err(ExecError{error, ..}) if error == >::ContractNotFound.into() ); exec_success() @@ -2998,7 +2992,7 @@ mod tests { let code_bob = MockLoader::insert(Call, |ctx, _| { let dest = H160::from_slice(ctx.input_data.as_ref()); ctx.ext - .call(Weight::zero(), BalanceOf::::zero(), &dest, 0, vec![], false, false) + .call(Weight::zero(), U256::zero(), &dest, U256::zero(), vec![], false, false) }); let code_charlie = MockLoader::insert(Call, |_, _| exec_success()); @@ -3043,9 +3037,9 @@ mod tests { if ctx.input_data[0] == 0 { ctx.ext.call( Weight::zero(), - BalanceOf::::zero(), + U256::zero(), &CHARLIE_ADDR, - 0, + U256::zero(), vec![], false, false, @@ -3059,9 +3053,9 @@ mod tests { let code_charlie = MockLoader::insert(Call, |ctx, _| { ctx.ext.call( Weight::zero(), - BalanceOf::::zero(), + U256::zero(), &BOB_ADDR, - 0, + U256::zero(), vec![1], true, false, @@ -3251,7 +3245,7 @@ mod tests { ctx.ext .instantiate( Weight::zero(), - BalanceOf::::zero(), + U256::zero(), fail_code, ctx.ext.minimum_balance() * 100, vec![], @@ -3268,7 +3262,7 @@ mod tests { .ext .instantiate( Weight::zero(), - BalanceOf::::zero(), + U256::zero(), success_code, ctx.ext.minimum_balance() * 100, vec![], @@ -3284,7 +3278,7 @@ mod tests { // a plain call should not influence the account counter ctx.ext - .call(Weight::zero(), BalanceOf::::zero(), &addr, 0, vec![], false, false) + .call(Weight::zero(), U256::zero(), &addr, U256::zero(), vec![], false, false) .unwrap(); assert_eq!(System::account_nonce(ALICE), alice_nonce); @@ -3822,9 +3816,9 @@ mod tests { assert_eq!( ctx.ext.call( Weight::zero(), - BalanceOf::::zero(), + U256::zero(), &CHARLIE_ADDR, - 0, + U256::zero(), vec![], true, false, @@ -3849,15 +3843,7 @@ mod tests { let code_charlie = MockLoader::insert(Call, |ctx, _| { assert!(ctx .ext - .call( - Weight::zero(), - BalanceOf::::zero(), - &BOB_ADDR, - 0, - vec![99], - true, - false - ) + .call(Weight::zero(), U256::zero(), &BOB_ADDR, U256::zero(), vec![99], true, false) .is_ok()); // CHARLIE can not read BOB`s storage. assert_eq!(ctx.ext.get_transient_storage(storage_key_1), None); @@ -3934,9 +3920,9 @@ mod tests { assert_eq!( ctx.ext.call( Weight::zero(), - BalanceOf::::zero(), + U256::zero(), &CHARLIE_ADDR, - 0, + U256::zero(), vec![], true, false @@ -3957,15 +3943,7 @@ mod tests { let code_charlie = MockLoader::insert(Call, |ctx, _| { assert!(ctx .ext - .call( - Weight::zero(), - BalanceOf::::zero(), - &BOB_ADDR, - 0, - vec![99], - true, - false - ) + .call(Weight::zero(), U256::zero(), &BOB_ADDR, U256::zero(), vec![99], true, false) .is_ok()); exec_trapped() }); diff --git a/substrate/frame/revive/src/lib.rs b/substrate/frame/revive/src/lib.rs index 393acc8c9852..d1e17fb7b390 100644 --- a/substrate/frame/revive/src/lib.rs +++ b/substrate/frame/revive/src/lib.rs @@ -27,7 +27,10 @@ mod benchmarking_dummy; mod exec; mod gas; mod primitives; +use crate::exec::MomentOf; +use frame_support::traits::IsType; pub use primitives::*; +use sp_core::U256; mod limits; mod storage; @@ -36,7 +39,6 @@ mod wasm; pub mod chain_extension; pub mod debug; -pub mod migration; pub mod test_utils; pub mod weights; @@ -54,7 +56,7 @@ use environmental::*; use frame_support::{ dispatch::{ DispatchErrorWithPostInfo, DispatchResultWithPostInfo, GetDispatchInfo, Pays, - PostDispatchInfo, RawOrigin, WithPostDispatchInfo, + PostDispatchInfo, RawOrigin, }, ensure, traits::{ @@ -79,7 +81,6 @@ use sp_runtime::{ pub use crate::{ address::{AddressMapper, DefaultAddressMapper}, debug::Tracing, - migration::{MigrateSequence, Migration, NoopMigration}, pallet::*, }; pub use weights::WeightInfo; @@ -129,6 +130,7 @@ pub mod pallet { use crate::debug::Debugger; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; + use sp_core::U256; use sp_runtime::Perbill; /// The in-code storage version. @@ -206,7 +208,7 @@ pub mod pallet { /// /// # Note /// - /// It is safe to chage this value on a live chain as all refunds are pro rata. + /// It is safe to change this value on a live chain as all refunds are pro rata. #[pallet::constant] #[pallet::no_default_bounds] type DepositPerByte: Get>; @@ -215,7 +217,7 @@ pub mod pallet { /// /// # Note /// - /// It is safe to chage this value on a live chain as all refunds are pro rata. + /// It is safe to change this value on a live chain as all refunds are pro rata. #[pallet::constant] #[pallet::no_default_bounds] type DepositPerItem: Get>; @@ -271,25 +273,6 @@ pub mod pallet { #[pallet::no_default_bounds] type InstantiateOrigin: EnsureOrigin; - /// The sequence of migration steps that will be applied during a migration. - /// - /// # Examples - /// ```ignore - /// use pallet_revive::migration::{v10, v11}; - /// # struct Runtime {}; - /// # struct Currency {}; - /// type Migrations = (v10::Migration, v11::Migration); - /// ``` - /// - /// If you have a single migration step, you can use a tuple with a single element: - /// ```ignore - /// use pallet_revive::migration::v10; - /// # struct Runtime {}; - /// # struct Currency {}; - /// type Migrations = (v10::Migration,); - /// ``` - type Migrations: MigrateSequence; - /// For most production chains, it's recommended to use the `()` implementation of this /// trait. This implementation offers additional logging when the log target /// "runtime::revive" is set to trace. @@ -305,13 +288,13 @@ pub mod pallet { BlockNumberFor, >; - /// The amount of memory in bytes that parachain nodes alot to the runtime. + /// The amount of memory in bytes that parachain nodes a lot to the runtime. /// /// This is used in [`Pallet::integrity_test`] to make sure that the runtime has enough /// memory to support this pallet if set to the correct value. type RuntimeMemory: Get; - /// The amount of memory in bytes that relay chain validators alot to the PoV. + /// The amount of memory in bytes that relay chain validators a lot to the PoV. /// /// This is used in [`Pallet::integrity_test`] to make sure that the runtime has enough /// memory to support this pallet if set to the correct value. @@ -382,7 +365,6 @@ pub mod pallet { type DepositPerByte = DepositPerByte; type DepositPerItem = DepositPerItem; type MaxCodeLen = ConstU32<{ 123 * 1024 }>; - type Migrations = (); type Time = Self; type UnsafeUnstableInterface = ConstBool; type UploadOrigin = EnsureSigned; @@ -424,6 +406,9 @@ pub mod pallet { /// Data supplied by the contract. Metadata generated during contract compilation /// is needed to decode it. data: Vec, + /// A list of topics used to index the event. + /// Number of topics is capped by [`limits::NUM_EVENT_TOPICS`]. + topics: Vec, }, /// A code with the specified hash was removed. @@ -549,10 +534,6 @@ pub mod pallet { /// A more detailed error can be found on the node console if debug messages are enabled /// by supplying `-lruntime::revive=debug`. CodeRejected, - /// A pending migration needs to complete before the extrinsic can be called. - MigrationInProgress, - /// Migrate dispatch call was attempted but no migration was performed. - NoMigrationPerformed, /// The contract has reached its maximum number of delegate dependencies. MaxDelegateDependenciesReached, /// The dependency was not found in the contract's delegate dependencies. @@ -569,6 +550,8 @@ pub mod pallet { InvalidStorageFlags, /// PolkaVM failed during code execution. Probably due to a malformed program. ExecutionFailed, + /// Failed to convert a U256 to a Balance. + BalanceConversionFailed, } /// A reason for the pallet contracts placing a hold on funds. @@ -605,12 +588,6 @@ pub mod pallet { pub(crate) type DeletionQueueCounter = StorageValue<_, DeletionQueueManager, ValueQuery>; - /// A migration can span across multiple blocks. This storage defines a cursor to track the - /// progress of the migration, enabling us to resume from the last completed position. - #[pallet::storage] - pub(crate) type MigrationInProgress = - StorageValue<_, migration::Cursor, OptionQuery>; - #[pallet::extra_constants] impl Pallet { #[pallet::constant_name(ApiVersion)] @@ -620,31 +597,17 @@ pub mod pallet { } #[pallet::hooks] - impl Hooks> for Pallet { + impl Hooks> for Pallet + where + T::Hash: IsType, + { fn on_idle(_block: BlockNumberFor, limit: Weight) -> Weight { - use migration::MigrateResult::*; let mut meter = WeightMeter::with_limit(limit); - - loop { - match Migration::::migrate(&mut meter) { - // There is not enough weight to perform a migration. - // We can't do anything more, so we return the used weight. - NoMigrationPerformed | InProgress { steps_done: 0 } => return meter.consumed(), - // Migration is still in progress, we can start the next step. - InProgress { .. } => continue, - // Either no migration is in progress, or we are done with all migrations, we - // can do some more other work with the remaining weight. - Completed | NoMigrationInProgress => break, - } - } - ContractInfo::::process_deletion_queue_batch(&mut meter); meter.consumed() } fn integrity_test() { - Migration::::integrity_test(); - // Total runtime memory limit let max_runtime_mem: u32 = T::RuntimeMemory::get(); // Memory limits for a single contract: @@ -771,7 +734,10 @@ pub mod pallet { #[pallet::call] impl Pallet where + T::Hash: IsType, as HasCompact>::Type: Clone + Eq + PartialEq + Debug + TypeInfo + Encode, + BalanceOf: Into + TryFrom, + MomentOf: Into, { /// Makes a call to an account, optionally transferring some balance. /// @@ -957,7 +923,6 @@ pub mod pallet { origin: OriginFor, code_hash: sp_core::H256, ) -> DispatchResultWithPostInfo { - Migration::::ensure_migrated()?; let origin = ensure_signed(origin)?; >::remove(&origin, code_hash)?; // we waive the fee because removing unused code is beneficial @@ -981,7 +946,6 @@ pub mod pallet { dest: H160, code_hash: sp_core::H256, ) -> DispatchResult { - Migration::::ensure_migrated()?; ensure_root(origin)?; >::try_mutate(&dest, |contract| { let contract = if let Some(contract) = contract { @@ -1000,40 +964,6 @@ pub mod pallet { Ok(()) }) } - - /// When a migration is in progress, this dispatchable can be used to run migration steps. - /// Calls that contribute to advancing the migration have their fees waived, as it's helpful - /// for the chain. Note that while the migration is in progress, the pallet will also - /// leverage the `on_idle` hooks to run migration steps. - #[pallet::call_index(6)] - #[pallet::weight(T::WeightInfo::migrate().saturating_add(*weight_limit))] - pub fn migrate(origin: OriginFor, weight_limit: Weight) -> DispatchResultWithPostInfo { - use migration::MigrateResult::*; - ensure_signed(origin)?; - - let weight_limit = weight_limit.saturating_add(T::WeightInfo::migrate()); - let mut meter = WeightMeter::with_limit(weight_limit); - let result = Migration::::migrate(&mut meter); - - match result { - Completed => Ok(PostDispatchInfo { - actual_weight: Some(meter.consumed()), - pays_fee: Pays::No, - }), - InProgress { steps_done, .. } if steps_done > 0 => Ok(PostDispatchInfo { - actual_weight: Some(meter.consumed()), - pays_fee: Pays::No, - }), - InProgress { .. } => Ok(PostDispatchInfo { - actual_weight: Some(meter.consumed()), - pays_fee: Pays::Yes, - }), - NoMigrationInProgress | NoMigrationPerformed => { - let err: DispatchError = >::NoMigrationPerformed.into(); - Err(err.with_weight(meter.consumed())) - }, - } - } } } @@ -1053,7 +983,12 @@ fn dispatch_result( .map_err(|e| DispatchErrorWithPostInfo { post_info, error: e }) } -impl Pallet { +impl Pallet +where + BalanceOf: Into + TryFrom, + MomentOf: Into, + T::Hash: IsType, +{ /// A generalized version of [`Self::call`]. /// /// Identical to [`Self::call`] but tailored towards being called by other code within the @@ -1078,7 +1013,6 @@ impl Pallet { None }; let try_call = || { - Migration::::ensure_migrated()?; let origin = Origin::from_runtime_origin(origin)?; let mut storage_meter = StorageMeter::new(&origin, storage_deposit_limit, value)?; let result = ExecStack::>::run_call( @@ -1131,7 +1065,6 @@ impl Pallet { let mut debug_message = if debug == DebugInfo::UnsafeDebug { Some(DebugBuffer::default()) } else { None }; let try_instantiate = || { - Migration::::ensure_migrated()?; let instantiate_account = T::InstantiateOrigin::ensure_origin(origin.clone())?; let (executable, upload_deposit) = match code { Code::Upload(code) => { @@ -1192,7 +1125,6 @@ impl Pallet { code: Vec, storage_deposit_limit: BalanceOf, ) -> CodeUploadResult> { - Migration::::ensure_migrated()?; let origin = T::UploadOrigin::ensure_origin(origin)?; let (module, deposit) = Self::try_upload_code(origin, code, storage_deposit_limit, None)?; Ok(CodeUploadReturnValue { code_hash: *module.code_hash(), deposit }) @@ -1200,9 +1132,6 @@ impl Pallet { /// Query storage of a specified contract under a specified key. pub fn get_storage(address: H160, key: [u8; 32]) -> GetStorageResult { - if Migration::::in_progress() { - return Err(ContractAccessError::MigrationInProgress) - } let contract_info = ContractInfoOf::::get(&address).ok_or(ContractAccessError::DoesntExist)?; @@ -1226,24 +1155,6 @@ impl Pallet { Ok((module, deposit)) } - /// Deposit a pallet contracts event. - fn deposit_event(event: Event) { - >::deposit_event(::RuntimeEvent::from(event)) - } - - /// Deposit a pallet contracts indexed event. - fn deposit_indexed_event(topics: Vec, event: Event) { - >::deposit_event_indexed( - &topics, - ::RuntimeEvent::from(event).into(), - ) - } - - /// Return the existential deposit of [`Config::Currency`]. - fn min_balance() -> BalanceOf { - >>::minimum_balance() - } - /// Run the supplied function `f` if no other instance of this pallet is on the stack. fn run_guarded Result>(f: F) -> Result { executing_contract::using_once(&mut false, || { @@ -1264,6 +1175,22 @@ impl Pallet { } } +impl Pallet +where + T: Config, + T::Hash: IsType, +{ + /// Return the existential deposit of [`Config::Currency`]. + fn min_balance() -> BalanceOf { + >>::minimum_balance() + } + + /// Deposit a pallet contracts event. + fn deposit_event(event: Event) { + >::deposit_event(::RuntimeEvent::from(event)) + } +} + // Set up a global reference to the boolean flag used for the re-entrancy guard. environmental!(executing_contract: bool); diff --git a/substrate/frame/revive/src/migration.rs b/substrate/frame/revive/src/migration.rs deleted file mode 100644 index b67467b322f5..000000000000 --- a/substrate/frame/revive/src/migration.rs +++ /dev/null @@ -1,650 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Multi-block Migration framework for pallet-revive. -//! -//! This module allows us to define a migration as a sequence of [`MigrationStep`]s that can be -//! executed across multiple blocks. -//! -//! # Usage -//! -//! A migration step is defined under `src/migration/vX.rs`, where `X` is the version number. -//! For example, `vX.rs` defines a migration from version `X - 1` to version `X`. -//! -//! ## Example: -//! -//! To configure a migration to `v11` for a runtime using `v10` of pallet-revive on the chain, -//! you would set the `Migrations` type as follows: -//! -//! ```ignore -//! use pallet_revive::migration::{v10, v11}; -//! # pub enum Runtime {}; -//! # struct Currency; -//! type Migrations = (v10::Migration, v11::Migration); -//! ``` -//! -//! ## Notes: -//! -//! - Migrations should always be tested with `try-runtime` before being deployed. -//! - By testing with `try-runtime` against a live network, you ensure that all migration steps work -//! and that you have included the required steps. -//! -//! ## Low Level / Implementation Details -//! -//! When a migration starts and [`OnRuntimeUpgrade::on_runtime_upgrade`] is called, instead of -//! performing the actual migration, we set a custom storage item [`MigrationInProgress`]. -//! This storage item defines a [`Cursor`] for the current migration. -//! -//! If the [`MigrationInProgress`] storage item exists, it means a migration is in progress, and its -//! value holds a cursor for the current migration step. These migration steps are executed during -//! [`Hooks::on_idle`] or when the [`Pallet::migrate`] dispatchable is -//! called. -//! -//! While the migration is in progress, all dispatchables except `migrate`, are blocked, and returns -//! a `MigrationInProgress` error. - -include!(concat!(env!("OUT_DIR"), "/migration_codegen.rs")); - -use crate::{weights::WeightInfo, Config, Error, MigrationInProgress, Pallet, Weight, LOG_TARGET}; -use codec::{Codec, Decode}; -use core::marker::PhantomData; -use frame_support::{ - pallet_prelude::*, - traits::{ConstU32, OnRuntimeUpgrade}, - weights::WeightMeter, -}; -use sp_runtime::Saturating; - -#[cfg(feature = "try-runtime")] -use alloc::vec::Vec; -#[cfg(feature = "try-runtime")] -use sp_runtime::TryRuntimeError; - -const PROOF_ENCODE: &str = "Tuple::max_encoded_len() < Cursor::max_encoded_len()` is verified in `Self::integrity_test()`; qed"; -const PROOF_DECODE: &str = - "We encode to the same type in this trait only. No other code touches this item; qed"; - -fn invalid_version(version: StorageVersion) -> ! { - panic!("Required migration {version:?} not supported by this runtime. This is a bug."); -} - -/// The cursor used to encode the position (usually the last iterated key) of the current migration -/// step. -pub type Cursor = BoundedVec>; - -/// IsFinished describes whether a migration is finished or not. -pub enum IsFinished { - Yes, - No, -} - -/// A trait that allows to migrate storage from one version to another. -/// -/// The migration is done in steps. The migration is finished when -/// `step()` returns `IsFinished::Yes`. -pub trait MigrationStep: Codec + MaxEncodedLen + Default { - /// Returns the version of the migration. - const VERSION: u16; - - /// Returns the maximum weight that can be consumed in a single step. - fn max_step_weight() -> Weight; - - /// Process one step of the migration. - /// - /// Returns whether the migration is finished. - fn step(&mut self, meter: &mut WeightMeter) -> IsFinished; - - /// Verify that the migration step fits into `Cursor`, and that `max_step_weight` is not greater - /// than `max_block_weight`. - fn integrity_test(max_block_weight: Weight) { - if Self::max_step_weight().any_gt(max_block_weight) { - panic!( - "Invalid max_step_weight for Migration {}. Value should be lower than {}", - Self::VERSION, - max_block_weight - ); - } - - let len = ::max_encoded_len(); - let max = Cursor::bound(); - if len > max { - panic!( - "Migration {} has size {} which is bigger than the maximum of {}", - Self::VERSION, - len, - max, - ); - } - } - - /// Execute some pre-checks prior to running the first step of this migration. - #[cfg(feature = "try-runtime")] - fn pre_upgrade_step() -> Result, TryRuntimeError> { - Ok(Vec::new()) - } - - /// Execute some post-checks after running the last step of this migration. - #[cfg(feature = "try-runtime")] - fn post_upgrade_step(_state: Vec) -> Result<(), TryRuntimeError> { - Ok(()) - } -} - -/// A noop migration that can be used when there is no migration to be done for a given version. -#[doc(hidden)] -#[derive(frame_support::DefaultNoBound, Encode, Decode, MaxEncodedLen)] -pub struct NoopMigration; - -impl MigrationStep for NoopMigration { - const VERSION: u16 = N; - fn max_step_weight() -> Weight { - Weight::zero() - } - fn step(&mut self, _meter: &mut WeightMeter) -> IsFinished { - log::debug!(target: LOG_TARGET, "Noop migration for version {}", N); - IsFinished::Yes - } -} - -mod private { - use crate::migration::MigrationStep; - pub trait Sealed {} - #[impl_trait_for_tuples::impl_for_tuples(10)] - #[tuple_types_custom_trait_bound(MigrationStep)] - impl Sealed for Tuple {} -} - -/// Defines a sequence of migrations. -/// -/// The sequence must be defined by a tuple of migrations, each of which must implement the -/// `MigrationStep` trait. Migrations must be ordered by their versions with no gaps. -pub trait MigrateSequence: private::Sealed { - /// Returns the range of versions that this migrations sequence can handle. - /// Migrations must be ordered by their versions with no gaps. - /// - /// The following code will fail to compile: - /// - /// ```compile_fail - /// # use pallet_revive::{NoopMigration, MigrateSequence}; - /// let _ = <(NoopMigration<1>, NoopMigration<3>)>::VERSION_RANGE; - /// ``` - /// The following code will compile: - /// ``` - /// # use pallet_revive::{NoopMigration, MigrateSequence}; - /// let _ = <(NoopMigration<1>, NoopMigration<2>)>::VERSION_RANGE; - /// ``` - const VERSION_RANGE: (u16, u16); - - /// Returns the default cursor for the given version. - fn new(version: StorageVersion) -> Cursor; - - #[cfg(feature = "try-runtime")] - fn pre_upgrade_step(_version: StorageVersion) -> Result, TryRuntimeError> { - Ok(Vec::new()) - } - - #[cfg(feature = "try-runtime")] - fn post_upgrade_step(_version: StorageVersion, _state: Vec) -> Result<(), TryRuntimeError> { - Ok(()) - } - - /// Execute the migration step until the available weight is consumed. - fn steps(version: StorageVersion, cursor: &[u8], meter: &mut WeightMeter) -> StepResult; - - /// Verify that the migration step fits into `Cursor`, and that `max_step_weight` is not greater - /// than `max_block_weight`. - fn integrity_test(max_block_weight: Weight); - - /// Returns whether migrating from `in_storage` to `target` is supported. - /// - /// A migration is supported if `VERSION_RANGE` is (in_storage + 1, target). - fn is_upgrade_supported(in_storage: StorageVersion, target: StorageVersion) -> bool { - let (low, high) = Self::VERSION_RANGE; - target == high && in_storage + 1 == low - } -} - -/// Performs all necessary migrations based on `StorageVersion`. -/// -/// If `TEST_ALL_STEPS == true` and `try-runtime` is enabled, this will run all the migrations -/// inside `on_runtime_upgrade`. This should be set to false in tests that want to ensure the step -/// by step migration works. -pub struct Migration(PhantomData); - -#[cfg(feature = "try-runtime")] -impl Migration { - fn run_all_steps() -> Result<(), TryRuntimeError> { - let mut meter = &mut WeightMeter::new(); - let name = >::name(); - loop { - let in_progress_version = >::on_chain_storage_version() + 1; - let state = T::Migrations::pre_upgrade_step(in_progress_version)?; - let before = meter.consumed(); - let status = Self::migrate(&mut meter); - log::info!( - target: LOG_TARGET, - "{name}: Migration step {:?} weight = {}", - in_progress_version, - meter.consumed() - before - ); - T::Migrations::post_upgrade_step(in_progress_version, state)?; - if matches!(status, MigrateResult::Completed) { - break - } - } - - let name = >::name(); - log::info!(target: LOG_TARGET, "{name}: Migration steps weight = {}", meter.consumed()); - Ok(()) - } -} - -impl OnRuntimeUpgrade for Migration { - fn on_runtime_upgrade() -> Weight { - let name = >::name(); - let in_code_version = >::in_code_storage_version(); - let on_chain_version = >::on_chain_storage_version(); - - if on_chain_version == in_code_version { - log::warn!( - target: LOG_TARGET, - "{name}: No Migration performed storage_version = latest_version = {:?}", - &on_chain_version - ); - return T::WeightInfo::on_runtime_upgrade_noop() - } - - // In case a migration is already in progress we create the next migration - // (if any) right when the current one finishes. - if Self::in_progress() { - log::warn!( - target: LOG_TARGET, - "{name}: Migration already in progress {:?}", - &on_chain_version - ); - - return T::WeightInfo::on_runtime_upgrade_in_progress() - } - - log::info!( - target: LOG_TARGET, - "{name}: Upgrading storage from {on_chain_version:?} to {in_code_version:?}.", - ); - - let cursor = T::Migrations::new(on_chain_version + 1); - MigrationInProgress::::set(Some(cursor)); - - #[cfg(feature = "try-runtime")] - if TEST_ALL_STEPS { - Self::run_all_steps().unwrap(); - } - - T::WeightInfo::on_runtime_upgrade() - } - - #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result, TryRuntimeError> { - // We can't really do much here as our migrations do not happen during the runtime upgrade. - // Instead, we call the migrations `pre_upgrade` and `post_upgrade` hooks when we iterate - // over our migrations. - let on_chain_version = >::on_chain_storage_version(); - let in_code_version = >::in_code_storage_version(); - - if on_chain_version == in_code_version { - return Ok(Default::default()) - } - - log::debug!( - target: LOG_TARGET, - "Requested migration of {} from {:?}(on-chain storage version) to {:?}(in-code storage version)", - >::name(), on_chain_version, in_code_version - ); - - ensure!( - T::Migrations::is_upgrade_supported(on_chain_version, in_code_version), - "Unsupported upgrade: VERSION_RANGE should be (on-chain storage version + 1, in-code storage version)" - ); - - Ok(Default::default()) - } - - #[cfg(feature = "try-runtime")] - fn post_upgrade(_state: Vec) -> Result<(), TryRuntimeError> { - if !TEST_ALL_STEPS { - return Ok(()) - } - - log::info!(target: LOG_TARGET, "=== POST UPGRADE CHECKS ==="); - - // Ensure that the hashing algorithm is correct for each storage map. - if let Some(hash) = crate::CodeInfoOf::::iter_keys().next() { - crate::CodeInfoOf::::get(hash).expect("CodeInfo exists for hash; qed"); - } - if let Some(hash) = crate::PristineCode::::iter_keys().next() { - crate::PristineCode::::get(hash).expect("PristineCode exists for hash; qed"); - } - if let Some(account_id) = crate::ContractInfoOf::::iter_keys().next() { - crate::ContractInfoOf::::get(account_id) - .expect("ContractInfo exists for account_id; qed"); - } - if let Some(nonce) = crate::DeletionQueue::::iter_keys().next() { - crate::DeletionQueue::::get(nonce).expect("DeletionQueue exists for nonce; qed"); - } - - Ok(()) - } -} - -/// The result of running the migration. -#[derive(Debug, PartialEq)] -pub enum MigrateResult { - /// No migration was performed - NoMigrationPerformed, - /// No migration currently in progress - NoMigrationInProgress, - /// A migration is in progress - InProgress { steps_done: u32 }, - /// All migrations are completed - Completed, -} - -/// The result of running a migration step. -#[derive(Debug, PartialEq)] -pub enum StepResult { - InProgress { cursor: Cursor, steps_done: u32 }, - Completed { steps_done: u32 }, -} - -impl Migration { - /// Verify that each migration's step of the [`Config::Migrations`] sequence fits into - /// `Cursor`. - pub(crate) fn integrity_test() { - let max_weight = ::BlockWeights::get().max_block; - T::Migrations::integrity_test(max_weight) - } - - /// Execute the multi-step migration. - /// Returns whether or not a migration is in progress - pub(crate) fn migrate(mut meter: &mut WeightMeter) -> MigrateResult { - let name = >::name(); - - if meter.try_consume(T::WeightInfo::migrate()).is_err() { - return MigrateResult::NoMigrationPerformed - } - - MigrationInProgress::::mutate_exists(|progress| { - let Some(cursor_before) = progress.as_mut() else { - meter.consume(T::WeightInfo::migration_noop()); - return MigrateResult::NoMigrationInProgress - }; - - // if a migration is running it is always upgrading to the next version - let storage_version = >::on_chain_storage_version(); - let in_progress_version = storage_version + 1; - - log::info!( - target: LOG_TARGET, - "{name}: Migrating from {:?} to {:?},", - storage_version, - in_progress_version, - ); - - let result = - match T::Migrations::steps(in_progress_version, cursor_before.as_ref(), &mut meter) - { - StepResult::InProgress { cursor, steps_done } => { - *progress = Some(cursor); - MigrateResult::InProgress { steps_done } - }, - StepResult::Completed { steps_done } => { - in_progress_version.put::>(); - if >::in_code_storage_version() != in_progress_version { - log::info!( - target: LOG_TARGET, - "{name}: Next migration is {:?},", - in_progress_version + 1 - ); - *progress = Some(T::Migrations::new(in_progress_version + 1)); - MigrateResult::InProgress { steps_done } - } else { - log::info!( - target: LOG_TARGET, - "{name}: All migrations done. At version {:?},", - in_progress_version - ); - *progress = None; - MigrateResult::Completed - } - }, - }; - - result - }) - } - - pub(crate) fn ensure_migrated() -> DispatchResult { - if Self::in_progress() { - Err(Error::::MigrationInProgress.into()) - } else { - Ok(()) - } - } - - pub(crate) fn in_progress() -> bool { - MigrationInProgress::::exists() - } -} - -#[impl_trait_for_tuples::impl_for_tuples(10)] -#[tuple_types_custom_trait_bound(MigrationStep)] -impl MigrateSequence for Tuple { - const VERSION_RANGE: (u16, u16) = { - let mut versions: (u16, u16) = (0, 0); - for_tuples!( - #( - match versions { - (0, 0) => { - versions = (Tuple::VERSION, Tuple::VERSION); - }, - (min_version, last_version) if Tuple::VERSION == last_version + 1 => { - versions = (min_version, Tuple::VERSION); - }, - _ => panic!("Migrations must be ordered by their versions with no gaps.") - } - )* - ); - versions - }; - - fn new(version: StorageVersion) -> Cursor { - for_tuples!( - #( - if version == Tuple::VERSION { - return Tuple::default().encode().try_into().expect(PROOF_ENCODE) - } - )* - ); - invalid_version(version) - } - - #[cfg(feature = "try-runtime")] - /// Execute the pre-checks of the step associated with this version. - fn pre_upgrade_step(version: StorageVersion) -> Result, TryRuntimeError> { - for_tuples!( - #( - if version == Tuple::VERSION { - return Tuple::pre_upgrade_step() - } - )* - ); - invalid_version(version) - } - - #[cfg(feature = "try-runtime")] - /// Execute the post-checks of the step associated with this version. - fn post_upgrade_step(version: StorageVersion, state: Vec) -> Result<(), TryRuntimeError> { - for_tuples!( - #( - if version == Tuple::VERSION { - return Tuple::post_upgrade_step(state) - } - )* - ); - invalid_version(version) - } - - fn steps(version: StorageVersion, mut cursor: &[u8], meter: &mut WeightMeter) -> StepResult { - for_tuples!( - #( - if version == Tuple::VERSION { - let mut migration = ::decode(&mut cursor) - .expect(PROOF_DECODE); - let max_weight = Tuple::max_step_weight(); - let mut steps_done = 0; - while meter.can_consume(max_weight) { - steps_done.saturating_accrue(1); - if matches!(migration.step(meter), IsFinished::Yes) { - return StepResult::Completed{ steps_done } - } - } - return StepResult::InProgress{cursor: migration.encode().try_into().expect(PROOF_ENCODE), steps_done } - } - )* - ); - invalid_version(version) - } - - fn integrity_test(max_block_weight: Weight) { - for_tuples!( - #( - Tuple::integrity_test(max_block_weight); - )* - ); - } -} - -#[cfg(test)] -mod test { - use super::*; - use crate::{ - migration::codegen::LATEST_MIGRATION_VERSION, - tests::{ExtBuilder, Test}, - }; - - #[derive(Default, Encode, Decode, MaxEncodedLen)] - struct MockMigration { - // MockMigration needs `N` steps to finish - count: u16, - } - - impl MigrationStep for MockMigration { - const VERSION: u16 = N; - fn max_step_weight() -> Weight { - Weight::from_all(1) - } - fn step(&mut self, meter: &mut WeightMeter) -> IsFinished { - assert!(self.count != N); - self.count += 1; - meter.consume(Weight::from_all(1)); - if self.count == N { - IsFinished::Yes - } else { - IsFinished::No - } - } - } - - #[test] - fn test_storage_version_matches_last_migration_file() { - assert_eq!(StorageVersion::new(LATEST_MIGRATION_VERSION), crate::pallet::STORAGE_VERSION); - } - - #[test] - fn version_range_works() { - let range = <(MockMigration<1>, MockMigration<2>)>::VERSION_RANGE; - assert_eq!(range, (1, 2)); - } - - #[test] - fn is_upgrade_supported_works() { - type Migrations = (MockMigration<9>, MockMigration<10>, MockMigration<11>); - assert!(Migrations::is_upgrade_supported(StorageVersion::new(8), StorageVersion::new(11))); - assert!(!Migrations::is_upgrade_supported(StorageVersion::new(9), StorageVersion::new(11))); - assert!(!Migrations::is_upgrade_supported(StorageVersion::new(8), StorageVersion::new(12))); - } - - #[test] - fn steps_works() { - type Migrations = (MockMigration<2>, MockMigration<3>); - let version = StorageVersion::new(2); - let mut cursor = Migrations::new(version); - - let mut meter = WeightMeter::with_limit(Weight::from_all(1)); - let result = Migrations::steps(version, &cursor, &mut meter); - cursor = alloc::vec![1u8, 0].try_into().unwrap(); - assert_eq!(result, StepResult::InProgress { cursor: cursor.clone(), steps_done: 1 }); - assert_eq!(meter.consumed(), Weight::from_all(1)); - - let mut meter = WeightMeter::with_limit(Weight::from_all(1)); - assert_eq!( - Migrations::steps(version, &cursor, &mut meter), - StepResult::Completed { steps_done: 1 } - ); - } - - #[test] - fn no_migration_in_progress_works() { - type TestMigration = Migration; - - ExtBuilder::default().build().execute_with(|| { - assert_eq!(StorageVersion::get::>(), LATEST_MIGRATION_VERSION); - assert_eq!( - TestMigration::migrate(&mut WeightMeter::new()), - MigrateResult::NoMigrationInProgress - ) - }); - } - - #[test] - fn migration_works() { - type TestMigration = Migration; - - ExtBuilder::default() - .set_storage_version(LATEST_MIGRATION_VERSION - 2) - .build() - .execute_with(|| { - assert_eq!(StorageVersion::get::>(), LATEST_MIGRATION_VERSION - 2); - TestMigration::on_runtime_upgrade(); - for (version, status) in [ - (LATEST_MIGRATION_VERSION - 1, MigrateResult::InProgress { steps_done: 1 }), - (LATEST_MIGRATION_VERSION, MigrateResult::Completed), - ] { - assert_eq!(TestMigration::migrate(&mut WeightMeter::new()), status); - assert_eq!( - >::on_chain_storage_version(), - StorageVersion::new(version) - ); - } - - assert_eq!( - TestMigration::migrate(&mut WeightMeter::new()), - MigrateResult::NoMigrationInProgress - ); - assert_eq!(StorageVersion::get::>(), LATEST_MIGRATION_VERSION); - }); - } -} diff --git a/substrate/frame/revive/src/primitives.rs b/substrate/frame/revive/src/primitives.rs index 98e8879457bf..1b48527d23d7 100644 --- a/substrate/frame/revive/src/primitives.rs +++ b/substrate/frame/revive/src/primitives.rs @@ -103,8 +103,6 @@ pub enum ContractAccessError { DoesntExist, /// Storage key cannot be decoded from the provided input data. KeyDecodingFailed, - /// Storage is migrating. Try again later. - MigrationInProgress, } /// Output of a contract call or instantiation which ran to completion. diff --git a/substrate/frame/revive/src/storage.rs b/substrate/frame/revive/src/storage.rs index ef7ce2db32cf..9939de1dfd19 100644 --- a/substrate/frame/revive/src/storage.rs +++ b/substrate/frame/revive/src/storage.rs @@ -33,11 +33,12 @@ use codec::{Decode, Encode, MaxEncodedLen}; use core::marker::PhantomData; use frame_support::{ storage::child::{self, ChildInfo}, + traits::IsType, weights::{Weight, WeightMeter}, CloneNoBound, DefaultNoBound, }; use scale_info::TypeInfo; -use sp_core::{ConstU32, Get, H160}; +use sp_core::{ConstU32, Get, H160, H256}; use sp_io::KillStorageResult; use sp_runtime::{ traits::{Hash, Saturating, Zero}, @@ -77,7 +78,10 @@ pub struct ContractInfo { delegate_dependencies: DelegateDependencyMap, } -impl ContractInfo { +impl ContractInfo +where + T::Hash: IsType, +{ /// Constructs a new contract info **without** writing it to storage. /// /// This returns an `Err` if an contract with the supplied `account` already exists diff --git a/substrate/frame/revive/src/storage/meter.rs b/substrate/frame/revive/src/storage/meter.rs index f6ad4c5fc346..9d70ddf85870 100644 --- a/substrate/frame/revive/src/storage/meter.rs +++ b/substrate/frame/revive/src/storage/meter.rs @@ -21,17 +21,17 @@ use crate::{ address::AddressMapper, storage::ContractInfo, AccountIdOf, BalanceOf, CodeInfo, Config, Error, Event, HoldReason, Inspect, Origin, Pallet, StorageDeposit as Deposit, System, LOG_TARGET, }; - use alloc::vec::Vec; use core::{fmt::Debug, marker::PhantomData}; use frame_support::{ traits::{ fungible::{Mutate, MutateHold}, tokens::{Fortitude, Fortitude::Polite, Precision, Preservation, Restriction}, - Get, + Get, IsType, }, DefaultNoBound, RuntimeDebugNoBound, }; +use sp_core::H256; use sp_runtime::{ traits::{Saturating, Zero}, DispatchError, FixedPointNumber, FixedU128, @@ -400,6 +400,7 @@ where impl RawMeter where T: Config, + T::Hash: IsType, E: Ext, { /// Charges `diff` from the meter. @@ -503,7 +504,10 @@ where } } -impl Ext for ReservingExt { +impl Ext for ReservingExt +where + T::Hash: IsType, +{ fn check_limit( origin: &T::AccountId, limit: BalanceOf, diff --git a/substrate/frame/revive/src/test_utils/builder.rs b/substrate/frame/revive/src/test_utils/builder.rs index b17067769c05..b17d7628fb80 100644 --- a/substrate/frame/revive/src/test_utils/builder.rs +++ b/substrate/frame/revive/src/test_utils/builder.rs @@ -54,6 +54,9 @@ macro_rules! builder { impl $name where as HasCompact>::Type: Clone + Eq + PartialEq + Debug + TypeInfo + Encode, + BalanceOf: Into + TryFrom, + crate::MomentOf: Into, + T::Hash: frame_support::traits::IsType, { $( #[doc = concat!("Set the ", stringify!($field))] diff --git a/substrate/frame/revive/src/tests.rs b/substrate/frame/revive/src/tests.rs index 447d55f0dd8d..73914c9aae07 100644 --- a/substrate/frame/revive/src/tests.rs +++ b/substrate/frame/revive/src/tests.rs @@ -33,7 +33,6 @@ use crate::{ }, exec::Key, limits, - migration::codegen::LATEST_MIGRATION_VERSION, primitives::CodeUploadReturnValue, storage::DeletionQueueManager, test_utils::*, @@ -41,8 +40,8 @@ use crate::{ wasm::Memory, weights::WeightInfo, BalanceOf, Code, CodeInfoOf, CollectEvents, Config, ContractInfo, ContractInfoOf, DebugInfo, - DefaultAddressMapper, DeletionQueueCounter, Error, HoldReason, MigrationInProgress, Origin, - Pallet, PristineCode, H160, + DefaultAddressMapper, DeletionQueueCounter, Error, HoldReason, Origin, Pallet, PristineCode, + H160, }; use crate::test_utils::builder::Contract; @@ -490,7 +489,6 @@ impl Config for Test { type UnsafeUnstableInterface = UnstableInterface; type UploadOrigin = EnsureAccount; type InstantiateOrigin = EnsureAccount; - type Migrations = crate::migration::codegen::BenchMigrations; type CodeHashLockupDepositPercent = CodeHashLockupDepositPercent; type Debug = TestDebug; } @@ -523,10 +521,6 @@ impl ExtBuilder { pub fn set_associated_consts(&self) { EXISTENTIAL_DEPOSIT.with(|v| *v.borrow_mut() = self.existential_deposit); } - pub fn set_storage_version(mut self, version: u16) -> Self { - self.storage_version = Some(StorageVersion::new(version)); - self - } pub fn build(self) -> sp_io::TestExternalities { sp_tracing::try_init_simple(); self.set_associated_consts(); @@ -593,6 +587,7 @@ impl Default for Origin { mod run_tests { use super::*; use pretty_assertions::{assert_eq, assert_ne}; + use sp_core::U256; // Perform a call to a plain account. // The actual transfer fails because we can only call contracts. @@ -616,66 +611,6 @@ mod run_tests { }); } - #[test] - fn migration_on_idle_hooks_works() { - // Defines expectations of how many migration steps can be done given the weight limit. - let tests = [ - (Weight::zero(), LATEST_MIGRATION_VERSION - 2), - (::WeightInfo::migrate() + 1.into(), LATEST_MIGRATION_VERSION - 1), - (Weight::MAX, LATEST_MIGRATION_VERSION), - ]; - - for (weight, expected_version) in tests { - ExtBuilder::default() - .set_storage_version(LATEST_MIGRATION_VERSION - 2) - .build() - .execute_with(|| { - MigrationInProgress::::set(Some(Default::default())); - Contracts::on_idle(System::block_number(), weight); - assert_eq!(StorageVersion::get::>(), expected_version); - }); - } - } - - #[test] - fn migration_in_progress_works() { - let (wasm, code_hash) = compile_module("dummy").unwrap(); - - ExtBuilder::default().existential_deposit(1).build().execute_with(|| { - let _ = ::Currency::set_balance(&ALICE, 1_000_000); - MigrationInProgress::::set(Some(Default::default())); - - assert_err!( - Contracts::upload_code( - RuntimeOrigin::signed(ALICE), - vec![], - deposit_limit::(), - ), - Error::::MigrationInProgress, - ); - assert_err!( - Contracts::remove_code(RuntimeOrigin::signed(ALICE), code_hash), - Error::::MigrationInProgress, - ); - assert_err!( - Contracts::set_code(RuntimeOrigin::signed(ALICE), BOB_ADDR, code_hash), - Error::::MigrationInProgress, - ); - assert_err_ignore_postinfo!( - builder::call(BOB_ADDR).build(), - Error::::MigrationInProgress - ); - assert_err_ignore_postinfo!( - builder::instantiate_with_code(wasm).value(100_000).build(), - Error::::MigrationInProgress, - ); - assert_err_ignore_postinfo!( - builder::instantiate(code_hash).value(100_000).build(), - Error::::MigrationInProgress, - ); - }); - } - #[test] fn instantiate_and_call_and_deposit_event() { let (wasm, code_hash) = compile_module("event_and_return_on_deploy").unwrap(); @@ -743,7 +678,8 @@ mod run_tests { phase: Phase::Initialization, event: RuntimeEvent::Contracts(crate::Event::ContractEmitted { contract: addr, - data: vec![1, 2, 3, 4] + data: vec![1, 2, 3, 4], + topics: vec![H256::repeat_byte(42)], }), topics: vec![], }, @@ -3402,7 +3338,7 @@ mod run_tests { assert_err_ignore_postinfo!( builder::call(addr_caller) .storage_deposit_limit(13) - .data((100u32, &addr_callee, 0u64).encode()) + .data((100u32, &addr_callee, U256::from(0u64)).encode()) .build(), >::StorageDepositLimitExhausted, ); @@ -3416,7 +3352,7 @@ mod run_tests { assert_err_ignore_postinfo!( builder::call(addr_caller) .storage_deposit_limit(14) - .data((101u32, &addr_callee, 0u64).encode()) + .data((101u32, &addr_callee, U256::from(0u64)).encode()) .build(), >::StorageDepositLimitExhausted, ); @@ -3429,7 +3365,7 @@ mod run_tests { assert_err_ignore_postinfo!( builder::call(addr_caller) .storage_deposit_limit(16) - .data((102u32, &addr_callee, 1u64).encode()) + .data((102u32, &addr_callee, U256::from(1u64)).encode()) .build(), >::StorageDepositLimitExhausted, ); @@ -3440,7 +3376,7 @@ mod run_tests { assert_err_ignore_postinfo!( builder::call(addr_caller) .storage_deposit_limit(0) - .data((87u32, &addr_callee, 0u64).encode()) + .data((87u32, &addr_callee, U256::from(0u64)).encode()) .build(), >::StorageDepositLimitExhausted, ); @@ -3450,7 +3386,9 @@ mod run_tests { // Require more than the sender's balance. // We don't set a special limit for the nested call. assert_err_ignore_postinfo!( - builder::call(addr_caller).data((512u32, &addr_callee, 1u64).encode()).build(), + builder::call(addr_caller) + .data((512u32, &addr_callee, U256::from(1u64)).encode()) + .build(), >::StorageDepositLimitExhausted, ); @@ -3459,7 +3397,7 @@ mod run_tests { // enforced as callee frees up storage. This should pass. assert_ok!(builder::call(addr_caller) .storage_deposit_limit(1) - .data((87u32, &addr_callee, 1u64).encode()) + .data((87u32, &addr_callee, U256::from(1u64)).encode()) .build()); }); } @@ -3500,7 +3438,7 @@ mod run_tests { builder::call(addr_caller) .origin(RuntimeOrigin::signed(BOB)) .storage_deposit_limit(callee_info_len + 2 + ED + 1) - .data((0u32, &code_hash_callee, 0u64).encode()) + .data((0u32, &code_hash_callee, U256::from(0u64)).encode()) .build(), >::StorageDepositLimitExhausted, ); @@ -3514,7 +3452,7 @@ mod run_tests { builder::call(addr_caller) .origin(RuntimeOrigin::signed(BOB)) .storage_deposit_limit(callee_info_len + 2 + ED + 2) - .data((1u32, &code_hash_callee, 0u64).encode()) + .data((1u32, &code_hash_callee, U256::from(0u64)).encode()) .build(), >::StorageDepositLimitExhausted, ); @@ -3528,7 +3466,10 @@ mod run_tests { builder::call(addr_caller) .origin(RuntimeOrigin::signed(BOB)) .storage_deposit_limit(callee_info_len + 2 + ED + 2) - .data((0u32, &code_hash_callee, callee_info_len + 2 + ED + 1).encode()) + .data( + (0u32, &code_hash_callee, U256::from(callee_info_len + 2 + ED + 1)) + .encode() + ) .build(), >::StorageDepositLimitExhausted, ); @@ -3543,7 +3484,10 @@ mod run_tests { builder::call(addr_caller) .origin(RuntimeOrigin::signed(BOB)) .storage_deposit_limit(callee_info_len + 2 + ED + 3) // enough parent limit - .data((1u32, &code_hash_callee, callee_info_len + 2 + ED + 2).encode()) + .data( + (1u32, &code_hash_callee, U256::from(callee_info_len + 2 + ED + 2)) + .encode() + ) .build(), >::StorageDepositLimitExhausted, ); @@ -3554,7 +3498,7 @@ mod run_tests { let result = builder::bare_call(addr_caller) .origin(RuntimeOrigin::signed(BOB)) .storage_deposit_limit(callee_info_len + 2 + ED + 4) - .data((1u32, &code_hash_callee, callee_info_len + 2 + ED + 3).encode()) + .data((1u32, &code_hash_callee, U256::from(callee_info_len + 2 + ED + 3)).encode()) .build(); let returned = result.result.unwrap(); diff --git a/substrate/frame/revive/src/wasm/mod.rs b/substrate/frame/revive/src/wasm/mod.rs index 9024390fd24f..5813903326bf 100644 --- a/substrate/frame/revive/src/wasm/mod.rs +++ b/substrate/frame/revive/src/wasm/mod.rs @@ -45,9 +45,9 @@ use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::{ dispatch::DispatchResult, ensure, - traits::{fungible::MutateHold, tokens::Precision::BestEffort}, + traits::{fungible::MutateHold, tokens::Precision::BestEffort, IsType}, }; -use sp_core::Get; +use sp_core::{Get, H256, U256}; use sp_runtime::DispatchError; /// Validated Wasm module ready for execution. @@ -123,7 +123,11 @@ impl Token for CodeLoadToken { } } -impl WasmBlob { +impl WasmBlob +where + T::Hash: IsType, + BalanceOf: Into + TryFrom, +{ /// We only check for size and nothing else when the code is uploaded. pub fn from_code( code: Vec, @@ -251,7 +255,11 @@ pub struct PreparedCall<'a, E: Ext> { api_version: ApiVersion, } -impl<'a, E: Ext> PreparedCall<'a, E> { +impl<'a, E: Ext> PreparedCall<'a, E> +where + BalanceOf: Into, + BalanceOf: TryFrom, +{ pub fn call(mut self) -> ExecResult { let exec_result = loop { let interrupt = self.instance.run(); @@ -315,7 +323,10 @@ impl WasmBlob { } } -impl Executable for WasmBlob { +impl Executable for WasmBlob +where + BalanceOf: Into + TryFrom, +{ fn from_storage( code_hash: sp_core::H256, gas_meter: &mut GasMeter, diff --git a/substrate/frame/revive/src/wasm/runtime.rs b/substrate/frame/revive/src/wasm/runtime.rs index 51c723493847..528b0ababfa0 100644 --- a/substrate/frame/revive/src/wasm/runtime.rs +++ b/substrate/frame/revive/src/wasm/runtime.rs @@ -19,12 +19,12 @@ use crate::{ address::AddressMapper, - exec::{ExecError, ExecResult, Ext, Key, TopicOf}, + exec::{ExecError, ExecResult, Ext, Key}, gas::{ChargedAmount, Token}, limits, primitives::ExecReturnValue, weights::WeightInfo, - BalanceOf, Config, Error, LOG_TARGET, SENTINEL, + Config, Error, LOG_TARGET, SENTINEL, }; use alloc::{boxed::Box, vec, vec::Vec}; use codec::{Decode, DecodeLimit, Encode, MaxEncodedLen}; @@ -35,15 +35,22 @@ use frame_support::{ }; use pallet_revive_proc_macro::define_env; use pallet_revive_uapi::{CallFlags, ReturnErrorCode, ReturnFlags, StorageFlags}; -use sp_core::{H160, H256}; +use sp_core::{H160, H256, U256}; use sp_io::hashing::{blake2_128, blake2_256, keccak_256, sha2_256}; -use sp_runtime::{traits::Zero, DispatchError, RuntimeDebug}; +use sp_runtime::{DispatchError, RuntimeDebug}; type CallOf = ::RuntimeCall; /// The maximum nesting depth a contract can use when encoding types. const MAX_DECODE_NESTING: u32 = 256; +/// Encode a `U256` into a 32 byte buffer. +fn as_bytes(u: U256) -> [u8; 32] { + let mut bytes = [0u8; 32]; + u.to_little_endian(&mut bytes); + bytes +} + #[derive(Clone, Copy)] pub enum ApiVersion { /// Expose all APIs even unversioned ones. Only used for testing and benchmarking. @@ -84,6 +91,32 @@ pub trait Memory { Ok(buf) } + /// Same as `read` but reads into a fixed size buffer. + fn read_array(&self, ptr: u32) -> Result<[u8; N], DispatchError> { + let mut buf = [0u8; N]; + self.read_into_buf(ptr, &mut buf)?; + Ok(buf) + } + + /// Read a `u32` from the sandbox memory. + fn read_u32(&self, ptr: u32) -> Result { + let buf: [u8; 4] = self.read_array(ptr)?; + Ok(u32::from_le_bytes(buf)) + } + + /// Read a `U256` from the sandbox memory. + fn read_u256(&self, ptr: u32) -> Result { + let buf: [u8; 32] = self.read_array(ptr)?; + Ok(U256::from_little_endian(&buf)) + } + + /// Read a `H256` from the sandbox memory. + fn read_h256(&self, ptr: u32) -> Result { + let mut code_hash = H256::default(); + self.read_into_buf(ptr, code_hash.as_bytes_mut())?; + Ok(code_hash) + } + /// Read designated chunk from the sandbox memory and attempt to decode into the specified type. /// /// Returns `Err` if one of the following conditions occurs: @@ -647,7 +680,7 @@ impl<'a, E: Ext, M: ?Sized + Memory> Runtime<'a, E, M> { } let buf_len = buf.len() as u32; - let len: u32 = memory.read_as(out_len_ptr)?; + let len = memory.read_u32(out_len_ptr)?; if len < buf_len { return Err(Error::::OutputBufferTooSmall.into()) @@ -963,13 +996,13 @@ impl<'a, E: Ext, M: ?Sized + Memory> Runtime<'a, E, M> { CallType::Call { callee_ptr, value_ptr, deposit_ptr, weight } => { let mut callee = H160::zero(); memory.read_into_buf(callee_ptr, callee.as_bytes_mut())?; - let deposit_limit: BalanceOf<::T> = if deposit_ptr == SENTINEL { - BalanceOf::<::T>::zero() + let deposit_limit = if deposit_ptr == SENTINEL { + U256::zero() } else { - memory.read_as(deposit_ptr)? + memory.read_u256(deposit_ptr)? }; let read_only = flags.contains(CallFlags::READ_ONLY); - let value: BalanceOf<::T> = memory.read_as(value_ptr)?; + let value = memory.read_u256(value_ptr)?; if value > 0u32.into() { // If the call value is non-zero and state change is not allowed, issue an // error. @@ -992,7 +1025,8 @@ impl<'a, E: Ext, M: ?Sized + Memory> Runtime<'a, E, M> { if flags.intersects(CallFlags::ALLOW_REENTRY | CallFlags::READ_ONLY) { return Err(Error::::InvalidCallFlags.into()) } - let code_hash = memory.read_as(code_hash_ptr)?; + + let code_hash = memory.read_h256(code_hash_ptr)?; self.ext.delegate_call(code_hash, input_data) }, }; @@ -1036,19 +1070,15 @@ impl<'a, E: Ext, M: ?Sized + Memory> Runtime<'a, E, M> { salt_ptr: u32, ) -> Result { self.charge_gas(RuntimeCosts::Instantiate { input_data_len })?; - let deposit_limit: BalanceOf<::T> = if deposit_ptr == SENTINEL { - BalanceOf::<::T>::zero() - } else { - memory.read_as(deposit_ptr)? - }; - let value: BalanceOf<::T> = memory.read_as(value_ptr)?; - let code_hash: H256 = memory.read_as(code_hash_ptr)?; + let deposit_limit: U256 = + if deposit_ptr == SENTINEL { U256::zero() } else { memory.read_u256(deposit_ptr)? }; + let value = memory.read_u256(value_ptr)?; + let code_hash = memory.read_h256(code_hash_ptr)?; let input_data = memory.read(input_data_ptr, input_data_len)?; let salt = if salt_ptr == SENTINEL { None } else { - let mut salt = [0u8; 32]; - memory.read_into_buf(salt_ptr, salt.as_mut_slice())?; + let salt: [u8; 32] = memory.read_array(salt_ptr)?; Some(salt) }; let instantiate_outcome = self.ext.instantiate( @@ -1194,7 +1224,7 @@ pub mod env { self.charge_gas(RuntimeCosts::Transfer)?; let mut callee = H160::zero(); memory.read_into_buf(address_ptr, callee.as_bytes_mut())?; - let value: BalanceOf<::T> = memory.read_as(value_ptr)?; + let value: U256 = memory.read_u256(value_ptr)?; let result = self.ext.transfer(&callee, value); match result { Ok(()) => Ok(ReturnErrorCode::Success), @@ -1374,7 +1404,7 @@ pub mod env { self.write_fixed_sandbox_output( memory, out_ptr, - &value.encode(), + &value.as_bytes(), false, already_charged, )?; @@ -1389,11 +1419,11 @@ pub mod env { #[api_version(0)] fn own_code_hash(&mut self, memory: &mut M, out_ptr: u32) -> Result<(), TrapReason> { self.charge_gas(RuntimeCosts::OwnCodeHash)?; - let code_hash_encoded = &self.ext.own_code_hash().encode(); + let code_hash = *self.ext.own_code_hash(); Ok(self.write_fixed_sandbox_output( memory, out_ptr, - code_hash_encoded, + code_hash.as_bytes(), false, already_charged, )?) @@ -1439,14 +1469,12 @@ pub mod env { ref_time_limit: u64, proof_size_limit: u64, out_ptr: u32, - out_len_ptr: u32, ) -> Result<(), TrapReason> { let weight = Weight::from_parts(ref_time_limit, proof_size_limit); self.charge_gas(RuntimeCosts::WeightToFee)?; - Ok(self.write_sandbox_output( + Ok(self.write_fixed_sandbox_output( memory, out_ptr, - out_len_ptr, &self.ext.get_weight_price(weight).encode(), false, already_charged, @@ -1477,18 +1505,12 @@ pub mod env { /// Stores the *free* balance of the current account into the supplied buffer. /// See [`pallet_revive_uapi::HostFn::balance`]. #[api_version(0)] - fn balance( - &mut self, - memory: &mut M, - out_ptr: u32, - out_len_ptr: u32, - ) -> Result<(), TrapReason> { + fn balance(&mut self, memory: &mut M, out_ptr: u32) -> Result<(), TrapReason> { self.charge_gas(RuntimeCosts::Balance)?; - Ok(self.write_sandbox_output( + Ok(self.write_fixed_sandbox_output( memory, out_ptr, - out_len_ptr, - &self.ext.balance().encode(), + &as_bytes(self.ext.balance()), false, already_charged, )?) @@ -1497,18 +1519,12 @@ pub mod env { /// Stores the value transferred along with this call/instantiate into the supplied buffer. /// See [`pallet_revive_uapi::HostFn::value_transferred`]. #[api_version(0)] - fn value_transferred( - &mut self, - memory: &mut M, - out_ptr: u32, - out_len_ptr: u32, - ) -> Result<(), TrapReason> { + fn value_transferred(&mut self, memory: &mut M, out_ptr: u32) -> Result<(), TrapReason> { self.charge_gas(RuntimeCosts::ValueTransferred)?; - Ok(self.write_sandbox_output( + Ok(self.write_fixed_sandbox_output( memory, out_ptr, - out_len_ptr, - &self.ext.value_transferred().encode(), + &as_bytes(self.ext.value_transferred()), false, already_charged, )?) @@ -1517,13 +1533,12 @@ pub mod env { /// Load the latest block timestamp into the supplied buffer /// See [`pallet_revive_uapi::HostFn::now`]. #[api_version(0)] - fn now(&mut self, memory: &mut M, out_ptr: u32, out_len_ptr: u32) -> Result<(), TrapReason> { + fn now(&mut self, memory: &mut M, out_ptr: u32) -> Result<(), TrapReason> { self.charge_gas(RuntimeCosts::Now)?; - Ok(self.write_sandbox_output( + Ok(self.write_fixed_sandbox_output( memory, out_ptr, - out_len_ptr, - &self.ext.now().encode(), + &as_bytes(self.ext.now()), false, already_charged, )?) @@ -1532,18 +1547,12 @@ pub mod env { /// Stores the minimum balance (a.k.a. existential deposit) into the supplied buffer. /// See [`pallet_revive_uapi::HostFn::minimum_balance`]. #[api_version(0)] - fn minimum_balance( - &mut self, - memory: &mut M, - out_ptr: u32, - out_len_ptr: u32, - ) -> Result<(), TrapReason> { + fn minimum_balance(&mut self, memory: &mut M, out_ptr: u32) -> Result<(), TrapReason> { self.charge_gas(RuntimeCosts::MinimumBalance)?; - Ok(self.write_sandbox_output( + Ok(self.write_fixed_sandbox_output( memory, out_ptr, - out_len_ptr, - &self.ext.minimum_balance().encode(), + &as_bytes(self.ext.minimum_balance()), false, already_charged, )?) @@ -1557,50 +1566,47 @@ pub mod env { &mut self, memory: &mut M, topics_ptr: u32, - topics_len: u32, + num_topic: u32, data_ptr: u32, data_len: u32, ) -> Result<(), TrapReason> { - let num_topic = topics_len - .checked_div(core::mem::size_of::>() as u32) - .ok_or("Zero sized topics are not allowed")?; self.charge_gas(RuntimeCosts::DepositEvent { num_topic, len: data_len })?; + + if num_topic > limits::NUM_EVENT_TOPICS { + return Err(Error::::TooManyTopics.into()); + } + if data_len > self.ext.max_value_size() { return Err(Error::::ValueTooLarge.into()); } - let topics: Vec::T>> = match topics_len { + let topics: Vec = match num_topic { 0 => Vec::new(), - _ => memory.read_as_unbounded(topics_ptr, topics_len)?, + _ => { + let mut v = Vec::with_capacity(num_topic as usize); + let topics_len = num_topic * H256::len_bytes() as u32; + let buf = memory.read(topics_ptr, topics_len)?; + for chunk in buf.chunks_exact(H256::len_bytes()) { + v.push(H256::from_slice(chunk)); + } + v + }, }; - // If there are more than `event_topics`, then trap. - if topics.len() as u32 > limits::NUM_EVENT_TOPICS { - return Err(Error::::TooManyTopics.into()); - } - let event_data = memory.read(data_ptr, data_len)?; - self.ext.deposit_event(topics, event_data); - Ok(()) } /// Stores the current block number of the current contract into the supplied buffer. /// See [`pallet_revive_uapi::HostFn::block_number`]. #[api_version(0)] - fn block_number( - &mut self, - memory: &mut M, - out_ptr: u32, - out_len_ptr: u32, - ) -> Result<(), TrapReason> { + fn block_number(&mut self, memory: &mut M, out_ptr: u32) -> Result<(), TrapReason> { self.charge_gas(RuntimeCosts::BlockNumber)?; - Ok(self.write_sandbox_output( + Ok(self.write_fixed_sandbox_output( memory, out_ptr, - out_len_ptr, - &self.ext.block_number().encode(), + &as_bytes(self.ext.block_number()), false, already_charged, )?) @@ -1884,7 +1890,7 @@ pub mod env { code_hash_ptr: u32, ) -> Result { self.charge_gas(RuntimeCosts::SetCodeHash)?; - let code_hash: H256 = memory.read_as(code_hash_ptr)?; + let code_hash: H256 = memory.read_h256(code_hash_ptr)?; match self.ext.set_code_hash(code_hash) { Err(err) => { let code = Self::err_into_return_code(err)?; @@ -1926,7 +1932,7 @@ pub mod env { code_hash_ptr: u32, ) -> Result<(), TrapReason> { self.charge_gas(RuntimeCosts::LockDelegateDependency)?; - let code_hash = memory.read_as(code_hash_ptr)?; + let code_hash = memory.read_h256(code_hash_ptr)?; self.ext.lock_delegate_dependency(code_hash)?; Ok(()) } @@ -1941,7 +1947,7 @@ pub mod env { code_hash_ptr: u32, ) -> Result<(), TrapReason> { self.charge_gas(RuntimeCosts::UnlockDelegateDependency)?; - let code_hash = memory.read_as(code_hash_ptr)?; + let code_hash = memory.read_h256(code_hash_ptr)?; self.ext.unlock_delegate_dependency(&code_hash)?; Ok(()) } diff --git a/substrate/frame/revive/src/weights.rs b/substrate/frame/revive/src/weights.rs index 7974cc1260e4..8913592c13bb 100644 --- a/substrate/frame/revive/src/weights.rs +++ b/substrate/frame/revive/src/weights.rs @@ -51,19 +51,6 @@ use core::marker::PhantomData; pub trait WeightInfo { fn on_process_deletion_queue_batch() -> Weight; fn on_initialize_per_trie_key(k: u32, ) -> Weight; - fn v9_migration_step(c: u32, ) -> Weight; - fn v10_migration_step() -> Weight; - fn v11_migration_step(k: u32, ) -> Weight; - fn v12_migration_step(c: u32, ) -> Weight; - fn v13_migration_step() -> Weight; - fn v14_migration_step() -> Weight; - fn v15_migration_step() -> Weight; - fn v16_migration_step() -> Weight; - fn migration_noop() -> Weight; - fn migrate() -> Weight; - fn on_runtime_upgrade_noop() -> Weight; - fn on_runtime_upgrade_in_progress() -> Weight; - fn on_runtime_upgrade() -> Weight; fn call_with_code_per_byte(c: u32, ) -> Weight; fn instantiate_with_code(c: u32, i: u32) -> Weight; fn instantiate(i: u32) -> Weight; @@ -162,182 +149,6 @@ impl WeightInfo for SubstrateWeight { .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(k.into()))) .saturating_add(Weight::from_parts(0, 70).saturating_mul(k.into())) } - /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc553022fca90611ba8b7942f8bdb3b97f6580` (r:2 w:1) - /// Proof: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc553022fca90611ba8b7942f8bdb3b97f6580` (r:2 w:1) - /// The range of component `c` is `[0, 125952]`. - fn v9_migration_step(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `211 + c * (1 Âą0)` - // Estimated: `6149 + c * (1 Âą0)` - // Minimum execution time: 7_783_000 picoseconds. - Weight::from_parts(4_462_075, 6149) - // Standard Error: 5 - .saturating_add(Weight::from_parts(1_634, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(2_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) - } - /// Storage: `Contracts::ContractInfoOf` (r:2 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - fn v10_migration_step() -> Weight { - // Proof Size summary in bytes: - // Measured: `510` - // Estimated: `6450` - // Minimum execution time: 15_971_000 picoseconds. - Weight::from_parts(16_730_000, 6450) - .saturating_add(T::DbWeight::get().reads(3_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: `Contracts::DeletionQueue` (r:1 w:1025) - /// Proof: `Contracts::DeletionQueue` (`max_values`: None, `max_size`: Some(142), added: 2617, mode: `Measured`) - /// Storage: `Contracts::DeletionQueueCounter` (r:0 w:1) - /// Proof: `Contracts::DeletionQueueCounter` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// The range of component `k` is `[0, 1024]`. - fn v11_migration_step(k: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `171 + k * (1 Âą0)` - // Estimated: `3635 + k * (1 Âą0)` - // Minimum execution time: 3_149_000 picoseconds. - Weight::from_parts(3_264_000, 3635) - // Standard Error: 559 - .saturating_add(Weight::from_parts(1_111_209, 0).saturating_mul(k.into())) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(2_u64)) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(k.into()))) - .saturating_add(Weight::from_parts(0, 1).saturating_mul(k.into())) - } - /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc553053f13fd319a03c211337c76e0fe776df` (r:2 w:0) - /// Proof: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc553053f13fd319a03c211337c76e0fe776df` (r:2 w:0) - /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc553022fca90611ba8b7942f8bdb3b97f6580` (r:1 w:1) - /// Proof: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc553022fca90611ba8b7942f8bdb3b97f6580` (r:1 w:1) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:0 w:1) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// The range of component `c` is `[0, 125952]`. - fn v12_migration_step(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `325 + c * (1 Âą0)` - // Estimated: `6263 + c * (1 Âą0)` - // Minimum execution time: 15_072_000 picoseconds. - Weight::from_parts(15_721_891, 6263) - // Standard Error: 2 - .saturating_add(Weight::from_parts(428, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(4_u64)) - .saturating_add(T::DbWeight::get().writes(2_u64)) - .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) - } - /// Storage: `Contracts::ContractInfoOf` (r:2 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - fn v13_migration_step() -> Weight { - // Proof Size summary in bytes: - // Measured: `440` - // Estimated: `6380` - // Minimum execution time: 12_047_000 picoseconds. - Weight::from_parts(12_500_000, 6380) - .saturating_add(T::DbWeight::get().reads(2_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: `Contracts::CodeInfoOf` (r:2 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Balances::Holds` (r:1 w:0) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) - fn v14_migration_step() -> Weight { - // Proof Size summary in bytes: - // Measured: `352` - // Estimated: `6292` - // Minimum execution time: 47_488_000 picoseconds. - Weight::from_parts(48_482_000, 6292) - .saturating_add(T::DbWeight::get().reads(4_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: `Contracts::ContractInfoOf` (r:2 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `System::Account` (r:2 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - fn v15_migration_step() -> Weight { - // Proof Size summary in bytes: - // Measured: `594` - // Estimated: `6534` - // Minimum execution time: 52_801_000 picoseconds. - Weight::from_parts(54_230_000, 6534) - .saturating_add(T::DbWeight::get().reads(4_u64)) - .saturating_add(T::DbWeight::get().writes(2_u64)) - } - /// Storage: `Contracts::ContractInfoOf` (r:2 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - fn v16_migration_step() -> Weight { - // Proof Size summary in bytes: - // Measured: `409` - // Estimated: `6349` - // Minimum execution time: 11_618_000 picoseconds. - Weight::from_parts(12_068_000, 6349) - .saturating_add(T::DbWeight::get().reads(2_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:1) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - fn migration_noop() -> Weight { - // Proof Size summary in bytes: - // Measured: `142` - // Estimated: `1627` - // Minimum execution time: 2_131_000 picoseconds. - Weight::from_parts(2_255_000, 1627) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:1) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:1) - /// Proof: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:1) - fn migrate() -> Weight { - // Proof Size summary in bytes: - // Measured: `166` - // Estimated: `3631` - // Minimum execution time: 10_773_000 picoseconds. - Weight::from_parts(11_118_000, 3631) - .saturating_add(T::DbWeight::get().reads(2_u64)) - .saturating_add(T::DbWeight::get().writes(2_u64)) - } - /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) - /// Proof: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) - fn on_runtime_upgrade_noop() -> Weight { - // Proof Size summary in bytes: - // Measured: `142` - // Estimated: `3607` - // Minimum execution time: 4_371_000 picoseconds. - Weight::from_parts(4_624_000, 3607) - .saturating_add(T::DbWeight::get().reads(1_u64)) - } - /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) - /// Proof: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - fn on_runtime_upgrade_in_progress() -> Weight { - // Proof Size summary in bytes: - // Measured: `167` - // Estimated: `3632` - // Minimum execution time: 5_612_000 picoseconds. - Weight::from_parts(5_838_000, 3632) - .saturating_add(T::DbWeight::get().reads(2_u64)) - } - /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) - /// Proof: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) - /// Storage: `Contracts::MigrationInProgress` (r:1 w:1) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - fn on_runtime_upgrade() -> Weight { - // Proof Size summary in bytes: - // Measured: `142` - // Estimated: `3607` - // Minimum execution time: 5_487_000 picoseconds. - Weight::from_parts(5_693_000, 3607) - .saturating_add(T::DbWeight::get().reads(2_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) @@ -1152,182 +963,6 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(k.into()))) .saturating_add(Weight::from_parts(0, 70).saturating_mul(k.into())) } - /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc553022fca90611ba8b7942f8bdb3b97f6580` (r:2 w:1) - /// Proof: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc553022fca90611ba8b7942f8bdb3b97f6580` (r:2 w:1) - /// The range of component `c` is `[0, 125952]`. - fn v9_migration_step(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `211 + c * (1 Âą0)` - // Estimated: `6149 + c * (1 Âą0)` - // Minimum execution time: 7_783_000 picoseconds. - Weight::from_parts(4_462_075, 6149) - // Standard Error: 5 - .saturating_add(Weight::from_parts(1_634, 0).saturating_mul(c.into())) - .saturating_add(RocksDbWeight::get().reads(2_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) - } - /// Storage: `Contracts::ContractInfoOf` (r:2 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - fn v10_migration_step() -> Weight { - // Proof Size summary in bytes: - // Measured: `510` - // Estimated: `6450` - // Minimum execution time: 15_971_000 picoseconds. - Weight::from_parts(16_730_000, 6450) - .saturating_add(RocksDbWeight::get().reads(3_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: `Contracts::DeletionQueue` (r:1 w:1025) - /// Proof: `Contracts::DeletionQueue` (`max_values`: None, `max_size`: Some(142), added: 2617, mode: `Measured`) - /// Storage: `Contracts::DeletionQueueCounter` (r:0 w:1) - /// Proof: `Contracts::DeletionQueueCounter` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// The range of component `k` is `[0, 1024]`. - fn v11_migration_step(k: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `171 + k * (1 Âą0)` - // Estimated: `3635 + k * (1 Âą0)` - // Minimum execution time: 3_149_000 picoseconds. - Weight::from_parts(3_264_000, 3635) - // Standard Error: 559 - .saturating_add(Weight::from_parts(1_111_209, 0).saturating_mul(k.into())) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().writes(2_u64)) - .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(k.into()))) - .saturating_add(Weight::from_parts(0, 1).saturating_mul(k.into())) - } - /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc553053f13fd319a03c211337c76e0fe776df` (r:2 w:0) - /// Proof: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc553053f13fd319a03c211337c76e0fe776df` (r:2 w:0) - /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc553022fca90611ba8b7942f8bdb3b97f6580` (r:1 w:1) - /// Proof: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc553022fca90611ba8b7942f8bdb3b97f6580` (r:1 w:1) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:0 w:1) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// The range of component `c` is `[0, 125952]`. - fn v12_migration_step(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `325 + c * (1 Âą0)` - // Estimated: `6263 + c * (1 Âą0)` - // Minimum execution time: 15_072_000 picoseconds. - Weight::from_parts(15_721_891, 6263) - // Standard Error: 2 - .saturating_add(Weight::from_parts(428, 0).saturating_mul(c.into())) - .saturating_add(RocksDbWeight::get().reads(4_u64)) - .saturating_add(RocksDbWeight::get().writes(2_u64)) - .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) - } - /// Storage: `Contracts::ContractInfoOf` (r:2 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - fn v13_migration_step() -> Weight { - // Proof Size summary in bytes: - // Measured: `440` - // Estimated: `6380` - // Minimum execution time: 12_047_000 picoseconds. - Weight::from_parts(12_500_000, 6380) - .saturating_add(RocksDbWeight::get().reads(2_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: `Contracts::CodeInfoOf` (r:2 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Balances::Holds` (r:1 w:0) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) - fn v14_migration_step() -> Weight { - // Proof Size summary in bytes: - // Measured: `352` - // Estimated: `6292` - // Minimum execution time: 47_488_000 picoseconds. - Weight::from_parts(48_482_000, 6292) - .saturating_add(RocksDbWeight::get().reads(4_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: `Contracts::ContractInfoOf` (r:2 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `System::Account` (r:2 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - fn v15_migration_step() -> Weight { - // Proof Size summary in bytes: - // Measured: `594` - // Estimated: `6534` - // Minimum execution time: 52_801_000 picoseconds. - Weight::from_parts(54_230_000, 6534) - .saturating_add(RocksDbWeight::get().reads(4_u64)) - .saturating_add(RocksDbWeight::get().writes(2_u64)) - } - /// Storage: `Contracts::ContractInfoOf` (r:2 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - fn v16_migration_step() -> Weight { - // Proof Size summary in bytes: - // Measured: `409` - // Estimated: `6349` - // Minimum execution time: 11_618_000 picoseconds. - Weight::from_parts(12_068_000, 6349) - .saturating_add(RocksDbWeight::get().reads(2_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:1) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - fn migration_noop() -> Weight { - // Proof Size summary in bytes: - // Measured: `142` - // Estimated: `1627` - // Minimum execution time: 2_131_000 picoseconds. - Weight::from_parts(2_255_000, 1627) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:1) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:1) - /// Proof: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:1) - fn migrate() -> Weight { - // Proof Size summary in bytes: - // Measured: `166` - // Estimated: `3631` - // Minimum execution time: 10_773_000 picoseconds. - Weight::from_parts(11_118_000, 3631) - .saturating_add(RocksDbWeight::get().reads(2_u64)) - .saturating_add(RocksDbWeight::get().writes(2_u64)) - } - /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) - /// Proof: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) - fn on_runtime_upgrade_noop() -> Weight { - // Proof Size summary in bytes: - // Measured: `142` - // Estimated: `3607` - // Minimum execution time: 4_371_000 picoseconds. - Weight::from_parts(4_624_000, 3607) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - } - /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) - /// Proof: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - fn on_runtime_upgrade_in_progress() -> Weight { - // Proof Size summary in bytes: - // Measured: `167` - // Estimated: `3632` - // Minimum execution time: 5_612_000 picoseconds. - Weight::from_parts(5_838_000, 3632) - .saturating_add(RocksDbWeight::get().reads(2_u64)) - } - /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) - /// Proof: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) - /// Storage: `Contracts::MigrationInProgress` (r:1 w:1) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - fn on_runtime_upgrade() -> Weight { - // Proof Size summary in bytes: - // Measured: `142` - // Estimated: `3607` - // Minimum execution time: 5_487_000 picoseconds. - Weight::from_parts(5_693_000, 3607) - .saturating_add(RocksDbWeight::get().reads(2_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) diff --git a/substrate/frame/revive/uapi/src/host.rs b/substrate/frame/revive/uapi/src/host.rs index f52ea9574025..101ae9aca465 100644 --- a/substrate/frame/revive/uapi/src/host.rs +++ b/substrate/frame/revive/uapi/src/host.rs @@ -58,21 +58,17 @@ pub trait HostFn: private::Sealed { /// Stores the *free* balance of the current account into the supplied buffer. /// - /// If the available space in `output` is less than the size of the value a trap is triggered. - /// /// # Parameters /// /// - `output`: A reference to the output data buffer to write the balance. - fn balance(output: &mut &mut [u8]); + fn balance(output: &mut [u8; 32]); /// Stores the current block number of the current contract into the supplied buffer. /// - /// If the available space in `output` is less than the size of the value a trap is triggered. - /// /// # Parameters /// /// - `output`: A reference to the output data buffer to write the block number. - fn block_number(output: &mut &mut [u8]); + fn block_number(output: &mut [u8; 32]); /// Call (possibly transferring some amount of funds) into the specified account. /// @@ -83,11 +79,10 @@ pub trait HostFn: private::Sealed { /// otherwise. /// - `ref_time_limit`: how much *ref_time* Weight to devote to the execution. /// - `proof_size_limit`: how much *proof_size* Weight to devote to the execution. - /// - `deposit`: The storage deposit limit for instantiation. Should be decodable as a - /// `Option`. Traps otherwise. Passing `None` means setting no specific limit for - /// the call, which implies storage usage up to the limit of the parent call. - /// - `value`: The value to transfer into the contract. Should be decodable as a `T::Balance`. - /// Traps otherwise. + /// - `deposit`: The storage deposit limit for instantiation. Passing `None` means setting no + /// specific limit for the call, which implies storage usage up to the limit of the parent + /// call. + /// - `value`: The value to transfer into the contract. /// - `input`: The input data buffer used to call the contract. /// - `output`: A reference to the output data buffer to write the call output buffer. If `None` /// is provided then the output buffer is not copied. @@ -106,8 +101,8 @@ pub trait HostFn: private::Sealed { callee: &[u8; 20], ref_time_limit: u64, proof_size_limit: u64, - deposit: Option<&[u8]>, - value: &[u8], + deposit: Option<&[u8; 32]>, + value: &[u8; 32], input_data: &[u8], output: Option<&mut &mut [u8]>, ) -> Result; @@ -287,8 +282,8 @@ pub trait HostFn: private::Sealed { /// /// # Parameters /// - /// - `topics`: The topics list encoded as `Vec`. It can't contain duplicates. - fn deposit_event(topics: &[u8], data: &[u8]); + /// - `topics`: The topics list. It can't contain duplicates. + fn deposit_event(topics: &[[u8; 32]], data: &[u8]); /// Recovers the ECDSA public key from the given message hash and signature. /// @@ -374,11 +369,10 @@ pub trait HostFn: private::Sealed { /// - `code_hash`: The hash of the code to be instantiated. /// - `ref_time_limit`: how much *ref_time* Weight to devote to the execution. /// - `proof_size_limit`: how much *proof_size* Weight to devote to the execution. - /// - `deposit`: The storage deposit limit for instantiation. Should be decodable as a - /// `Option`. Traps otherwise. Passing `None` means setting no specific limit for - /// the call, which implies storage usage up to the limit of the parent call. - /// - `value`: The value to transfer into the contract. Should be decodable as a `T::Balance`. - /// Traps otherwise. + /// - `deposit`: The storage deposit limit for instantiation. Passing `None` means setting no + /// specific limit for the call, which implies storage usage up to the limit of the parent + /// call. + /// - `value`: The value to transfer into the contract. /// - `input`: The input data buffer. /// - `address`: A reference to the address buffer to write the address of the contract. If /// `None` is provided then the output buffer is not copied. @@ -402,8 +396,8 @@ pub trait HostFn: private::Sealed { code_hash: &[u8; 32], ref_time_limit: u64, proof_size_limit: u64, - deposit: Option<&[u8]>, - value: &[u8], + deposit: Option<&[u8; 32]>, + value: &[u8; 32], input: &[u8], address: Option<&mut [u8; 20]>, output: Option<&mut &mut [u8]>, @@ -422,14 +416,11 @@ pub trait HostFn: private::Sealed { fn is_contract(address: &[u8; 20]) -> bool; /// Stores the minimum balance (a.k.a. existential deposit) into the supplied buffer. - /// The data is encoded as `T::Balance`. - /// - /// If the available space in `output` is less than the size of the value a trap is triggered. /// /// # Parameters /// /// - `output`: A reference to the output data buffer to write the minimum balance. - fn minimum_balance(output: &mut &mut [u8]); + fn minimum_balance(output: &mut [u8; 32]); /// Retrieve the code hash of the currently executing contract. /// @@ -440,12 +431,10 @@ pub trait HostFn: private::Sealed { /// Load the latest block timestamp into the supplied buffer /// - /// If the available space in `output` is less than the size of the value a trap is triggered. - /// /// # Parameters /// /// - `output`: A reference to the output data buffer to write the timestamp. - fn now(output: &mut &mut [u8]); + fn now(output: &mut [u8; 32]); /// Removes the delegate dependency from the contract. /// @@ -548,12 +537,12 @@ pub trait HostFn: private::Sealed { /// # Parameters /// /// - `address`: The address of the account to transfer funds to. - /// - `value`: The value to transfer. Should be decodable as a `T::Balance`. Traps otherwise. + /// - `value`: The U256 value to transfer. /// /// # Errors /// /// - [TransferFailed][`crate::ReturnErrorCode::TransferFailed] - fn transfer(address: &[u8; 20], value: &[u8]) -> Result; + fn transfer(address: &[u8; 20], value: &[u8; 32]) -> Result; /// Remove the calling account and transfer remaining **free** balance. /// @@ -573,26 +562,20 @@ pub trait HostFn: private::Sealed { fn terminate(beneficiary: &[u8; 20]) -> !; /// Stores the value transferred along with this call/instantiate into the supplied buffer. - /// The data is encoded as `T::Balance`. - /// - /// If the available space in `output` is less than the size of the value a trap is triggered. /// /// # Parameters /// /// - `output`: A reference to the output data buffer to write the transferred value. - fn value_transferred(output: &mut &mut [u8]); + fn value_transferred(output: &mut [u8; 32]); /// Stores the price for the specified amount of gas into the supplied buffer. - /// The data is encoded as `T::Balance`. - /// - /// If the available space in `output` is less than the size of the value a trap is triggered. /// /// # Parameters /// /// - `ref_time_limit`: The *ref_time* Weight limit to query the price for. /// - `proof_size_limit`: The *proof_size* Weight limit to query the price for. /// - `output`: A reference to the output data buffer to write the price. - fn weight_to_fee(ref_time_limit: u64, proof_size_limit: u64, output: &mut &mut [u8]); + fn weight_to_fee(ref_time_limit: u64, proof_size_limit: u64, output: &mut [u8; 32]); /// Execute an XCM program locally, using the contract's address as the origin. /// This is equivalent to dispatching `pallet_xcm::execute` through call_runtime, except that diff --git a/substrate/frame/revive/uapi/src/host/riscv32.rs b/substrate/frame/revive/uapi/src/host/riscv32.rs index c8218bb8f737..b7b660c40837 100644 --- a/substrate/frame/revive/uapi/src/host/riscv32.rs +++ b/substrate/frame/revive/uapi/src/host/riscv32.rs @@ -79,24 +79,19 @@ mod sys { pub fn caller_is_origin() -> ReturnCode; pub fn caller_is_root() -> ReturnCode; pub fn address(out_ptr: *mut u8); - pub fn weight_to_fee( - ref_time: u64, - proof_size: u64, - out_ptr: *mut u8, - out_len_ptr: *mut u32, - ); + pub fn weight_to_fee(ref_time: u64, proof_size: u64, out_ptr: *mut u8); pub fn weight_left(out_ptr: *mut u8, out_len_ptr: *mut u32); - pub fn balance(out_ptr: *mut u8, out_len_ptr: *mut u32); - pub fn value_transferred(out_ptr: *mut u8, out_len_ptr: *mut u32); - pub fn now(out_ptr: *mut u8, out_len_ptr: *mut u32); - pub fn minimum_balance(out_ptr: *mut u8, out_len_ptr: *mut u32); + pub fn balance(out_ptr: *mut u8); + pub fn value_transferred(out_ptr: *mut u8); + pub fn now(out_ptr: *mut u8); + pub fn minimum_balance(out_ptr: *mut u8); pub fn deposit_event( - topics_ptr: *const u8, - topics_len: u32, + topics_ptr: *const [u8; 32], + num_topic: u32, data_ptr: *const u8, data_len: u32, ); - pub fn block_number(out_ptr: *mut u8, out_len_ptr: *mut u32); + pub fn block_number(out_ptr: *mut u8); pub fn hash_sha2_256(input_ptr: *const u8, input_len: u32, out_ptr: *mut u8); pub fn hash_keccak_256(input_ptr: *const u8, input_len: u32, out_ptr: *mut u8); pub fn hash_blake2_256(input_ptr: *const u8, input_len: u32, out_ptr: *mut u8); @@ -136,21 +131,20 @@ mod sys { } } +/// A macro to implement all Host functions with a signature of `fn(&mut [u8; n])`. macro_rules! impl_wrapper_for { - ( $( $name:ident, )* ) => { - $( - fn $name(output: &mut &mut [u8]) { - let mut output_len = output.len() as u32; - unsafe { - sys::$name( - output.as_mut_ptr(), - &mut output_len, - ) - } - extract_from_slice(output, output_len as usize) - } - )* - } + (@impl_fn $name:ident, $n: literal) => { + fn $name(output: &mut [u8; $n]) { + unsafe { sys::$name(output.as_mut_ptr()) } + } + }; + + () => {}; + + ([u8; $n: literal] => $($name:ident),*; $($tail:tt)*) => { + $(impl_wrapper_for!(@impl_fn $name, $n);)* + impl_wrapper_for!($($tail)*); + }; } macro_rules! impl_hash_fn { @@ -185,7 +179,7 @@ fn ptr_len_or_sentinel(data: &mut Option<&mut &mut [u8]>) -> (*mut u8, u32) { } #[inline(always)] -fn ptr_or_sentinel(data: &Option<&[u8]>) -> *const u8 { +fn ptr_or_sentinel(data: &Option<&[u8; 32]>) -> *const u8 { match data { Some(ref data) => data.as_ptr(), None => crate::SENTINEL as _, @@ -197,8 +191,8 @@ impl HostFn for HostFnImpl { code_hash: &[u8; 32], ref_time_limit: u64, proof_size_limit: u64, - deposit_limit: Option<&[u8]>, - value: &[u8], + deposit_limit: Option<&[u8; 32]>, + value: &[u8; 32], input: &[u8], mut address: Option<&mut [u8; 20]>, mut output: Option<&mut &mut [u8]>, @@ -253,8 +247,8 @@ impl HostFn for HostFnImpl { callee: &[u8; 20], ref_time_limit: u64, proof_size_limit: u64, - deposit_limit: Option<&[u8]>, - value: &[u8], + deposit_limit: Option<&[u8; 32]>, + value: &[u8; 32], input: &[u8], mut output: Option<&mut &mut [u8]>, ) -> Result { @@ -327,12 +321,12 @@ impl HostFn for HostFnImpl { ret_code.into() } - fn transfer(address: &[u8; 20], value: &[u8]) -> Result { + fn transfer(address: &[u8; 20], value: &[u8; 32]) -> Result { let ret_code = unsafe { sys::transfer(address.as_ptr(), value.as_ptr()) }; ret_code.into() } - fn deposit_event(topics: &[u8], data: &[u8]) { + fn deposit_event(topics: &[[u8; 32]], data: &[u8]) { unsafe { sys::deposit_event( topics.as_ptr(), @@ -449,33 +443,19 @@ impl HostFn for HostFnImpl { ret_code.into() } - fn address(output: &mut [u8; 20]) { - unsafe { sys::address(output.as_mut_ptr()) } - } - - fn caller(output: &mut [u8; 20]) { - unsafe { sys::caller(output.as_mut_ptr()) } - } - impl_wrapper_for! { - block_number, balance, - value_transferred,now, minimum_balance, - weight_left, + [u8; 32] => block_number, balance, value_transferred, now, minimum_balance; + [u8; 20] => address, caller; } - fn weight_to_fee(ref_time_limit: u64, proof_size_limit: u64, output: &mut &mut [u8]) { + fn weight_left(output: &mut &mut [u8]) { let mut output_len = output.len() as u32; - { - unsafe { - sys::weight_to_fee( - ref_time_limit, - proof_size_limit, - output.as_mut_ptr(), - &mut output_len, - ) - }; - } - extract_from_slice(output, output_len as usize); + unsafe { sys::weight_left(output.as_mut_ptr(), &mut output_len) } + extract_from_slice(output, output_len as usize) + } + + fn weight_to_fee(ref_time_limit: u64, proof_size_limit: u64, output: &mut [u8; 32]) { + unsafe { sys::weight_to_fee(ref_time_limit, proof_size_limit, output.as_mut_ptr()) }; } impl_hash_fn!(sha2_256, 32); diff --git a/substrate/frame/support/procedural/src/pallet/expand/call.rs b/substrate/frame/support/procedural/src/pallet/expand/call.rs index f395872c8a80..5dc8dc3146cf 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/call.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/call.rs @@ -18,7 +18,7 @@ use crate::{ pallet::{ expand::warnings::{weight_constant_warning, weight_witness_warning}, - parse::call::CallWeightDef, + parse::{call::CallWeightDef, helper::CallReturnType}, Def, }, COUNTER, @@ -197,18 +197,36 @@ pub fn expand_call(def: &mut Def) -> proc_macro2::TokenStream { let capture_docs = if cfg!(feature = "no-metadata-docs") { "never" } else { "always" }; // Wrap all calls inside of storage layers - if let Some(syn::Item::Impl(item_impl)) = def - .call - .as_ref() - .map(|c| &mut def.item.content.as_mut().expect("Checked by def parser").1[c.index]) - { - item_impl.items.iter_mut().for_each(|i| { - if let syn::ImplItem::Fn(method) = i { + if let Some(call) = def.call.as_ref() { + let item_impl = + &mut def.item.content.as_mut().expect("Checked by def parser").1[call.index]; + let syn::Item::Impl(item_impl) = item_impl else { + unreachable!("Checked by def parser"); + }; + + item_impl.items.iter_mut().enumerate().for_each(|(i, item)| { + if let syn::ImplItem::Fn(method) = item { + let return_type = + &call.methods.get(i).expect("def should be consistent with item").return_type; + + let (ok_type, err_type) = match return_type { + CallReturnType::DispatchResult => ( + quote::quote!(()), + quote::quote!(#frame_support::pallet_prelude::DispatchError), + ), + CallReturnType::DispatchResultWithPostInfo => ( + quote::quote!(#frame_support::dispatch::PostDispatchInfo), + quote::quote!(#frame_support::dispatch::DispatchErrorWithPostInfo), + ), + }; + let block = &method.block; method.block = syn::parse_quote! {{ // We execute all dispatchable in a new storage layer, allowing them // to return an error at any point, and undoing any storage changes. - #frame_support::storage::with_storage_layer(|| #block) + #frame_support::storage::with_storage_layer::<#ok_type, #err_type, _>( + || #block + ) }}; } }); diff --git a/substrate/frame/support/procedural/src/pallet/parse/call.rs b/substrate/frame/support/procedural/src/pallet/parse/call.rs index 4e09b86fddec..68c2cb8bd1b3 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/call.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/call.rs @@ -89,6 +89,8 @@ pub struct CallVariantDef { pub cfg_attrs: Vec, /// The optional `feeless_if` attribute on the `pallet::call`. pub feeless_check: Option, + /// The return type of the call: `DispatchInfo` or `DispatchResultWithPostInfo`. + pub return_type: helper::CallReturnType, } /// Attributes for functions in call impl block. @@ -260,13 +262,7 @@ impl CallDef { }, } - if let syn::ReturnType::Type(_, type_) = &method.sig.output { - helper::check_pallet_call_return_type(type_)?; - } else { - let msg = "Invalid pallet::call, require return type \ - DispatchResultWithPostInfo"; - return Err(syn::Error::new(method.sig.span(), msg)) - } + let return_type = helper::check_pallet_call_return_type(&method.sig)?; let cfg_attrs: Vec = helper::get_item_cfg_attrs(&method.attrs); let mut call_idx_attrs = vec![]; @@ -447,6 +443,7 @@ impl CallDef { attrs: method.attrs.clone(), cfg_attrs, feeless_check, + return_type, }); } else { let msg = "Invalid pallet::call, only method accepted"; diff --git a/substrate/frame/support/procedural/src/pallet/parse/helper.rs b/substrate/frame/support/procedural/src/pallet/parse/helper.rs index d4f58a4c56df..d5ae607d90f9 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/helper.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/helper.rs @@ -597,25 +597,38 @@ pub fn check_type_value_gen( Ok(i) } +/// The possible return type of a dispatchable. +#[derive(Clone)] +pub enum CallReturnType { + DispatchResult, + DispatchResultWithPostInfo, +} + /// Check the keyword `DispatchResultWithPostInfo` or `DispatchResult`. -pub fn check_pallet_call_return_type(type_: &syn::Type) -> syn::Result<()> { - pub struct Checker; +pub fn check_pallet_call_return_type(sig: &syn::Signature) -> syn::Result { + let syn::ReturnType::Type(_, type_) = &sig.output else { + let msg = "Invalid pallet::call, require return type \ + DispatchResultWithPostInfo"; + return Err(syn::Error::new(sig.span(), msg)) + }; + + pub struct Checker(CallReturnType); impl syn::parse::Parse for Checker { fn parse(input: syn::parse::ParseStream) -> syn::Result { let lookahead = input.lookahead1(); if lookahead.peek(keyword::DispatchResultWithPostInfo) { input.parse::()?; - Ok(Self) + Ok(Self(CallReturnType::DispatchResultWithPostInfo)) } else if lookahead.peek(keyword::DispatchResult) { input.parse::()?; - Ok(Self) + Ok(Self(CallReturnType::DispatchResult)) } else { Err(lookahead.error()) } } } - syn::parse2::(type_.to_token_stream()).map(|_| ()) + syn::parse2::(type_.to_token_stream()).map(|c| c.0) } pub(crate) fn two128_str(s: &str) -> TokenStream { diff --git a/substrate/frame/support/test/compile_pass/src/lib.rs b/substrate/frame/support/test/compile_pass/src/lib.rs index 37af683fbc7f..677ef4e94c89 100644 --- a/substrate/frame/support/test/compile_pass/src/lib.rs +++ b/substrate/frame/support/test/compile_pass/src/lib.rs @@ -40,7 +40,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { impl_version: 0, apis: sp_version::create_apis_vec!([]), transaction_version: 0, - state_version: 0, + system_version: 0, }; pub type Signature = sr25519::Signature; diff --git a/substrate/frame/support/test/tests/pallet_ui/call_span_for_error.rs b/substrate/frame/support/test/tests/pallet_ui/call_span_for_error.rs new file mode 100644 index 000000000000..08b42c29a68b --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/call_span_for_error.rs @@ -0,0 +1,37 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[frame_support::pallet(dev_mode)] +mod pallet { + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::call] + impl Pallet { + pub fn foo(origin: OriginFor) -> DispatchResultWithPostInfo { + return Err(DispatchError::BadOrigin); + } + } +} + +fn main() {} diff --git a/substrate/frame/support/test/tests/pallet_ui/call_span_for_error.stderr b/substrate/frame/support/test/tests/pallet_ui/call_span_for_error.stderr new file mode 100644 index 000000000000..8f3003c02227 --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/call_span_for_error.stderr @@ -0,0 +1,26 @@ +error[E0308]: mismatched types + --> tests/pallet_ui/call_span_for_error.rs:32:15 + | +32 | return Err(DispatchError::BadOrigin); + | --- ^^^^^^^^^^^^^^^^^^^^^^^^ expected `DispatchErrorWithPostInfo`, found `DispatchError` + | | + | arguments to this enum variant are incorrect + | + = note: expected struct `DispatchErrorWithPostInfo` + found enum `frame_support::pallet_prelude::DispatchError` +help: the type constructed contains `frame_support::pallet_prelude::DispatchError` due to the type of the argument passed + --> tests/pallet_ui/call_span_for_error.rs:32:11 + | +32 | return Err(DispatchError::BadOrigin); + | ^^^^------------------------^ + | | + | this argument influences the type of `Err` +note: tuple variant defined here + --> $RUST/core/src/result.rs + | + | Err(#[stable(feature = "rust1", since = "1.0.0")] E), + | ^^^ +help: call `Into::into` on this expression to convert `frame_support::pallet_prelude::DispatchError` into `DispatchErrorWithPostInfo` + | +32 | return Err(DispatchError::BadOrigin.into()); + | +++++++ diff --git a/substrate/frame/system/src/lib.rs b/substrate/frame/system/src/lib.rs index abacfa7b62cc..662b7f1a94bf 100644 --- a/substrate/frame/system/src/lib.rs +++ b/substrate/frame/system/src/lib.rs @@ -175,6 +175,7 @@ pub use extensions::{ pub use extensions::check_mortality::CheckMortality as CheckEra; pub use frame_support::dispatch::RawOrigin; use frame_support::traits::{PostInherents, PostTransactions, PreInherents}; +use sp_core::storage::StateVersion; pub use weights::WeightInfo; const LOG_TARGET: &str = "runtime::system"; @@ -182,17 +183,20 @@ const LOG_TARGET: &str = "runtime::system"; /// Compute the trie root of a list of extrinsics. /// /// The merkle proof is using the same trie as runtime state with -/// `state_version` 0. -pub fn extrinsics_root(extrinsics: &[E]) -> H::Output { - extrinsics_data_root::(extrinsics.iter().map(codec::Encode::encode).collect()) +/// `state_version` 0 or 1. +pub fn extrinsics_root( + extrinsics: &[E], + state_version: StateVersion, +) -> H::Output { + extrinsics_data_root::(extrinsics.iter().map(codec::Encode::encode).collect(), state_version) } /// Compute the trie root of a list of extrinsics. /// /// The merkle proof is using the same trie as runtime state with -/// `state_version` 0. -pub fn extrinsics_data_root(xts: Vec>) -> H::Output { - H::ordered_trie_root(xts, sp_core::storage::StateVersion::V0) +/// `state_version` 0 or 1. +pub fn extrinsics_data_root(xts: Vec>, state_version: StateVersion) -> H::Output { + H::ordered_trie_root(xts, state_version) } /// An object to track the currently used extrinsic weight in a block. @@ -1847,7 +1851,9 @@ impl Pallet { let extrinsics = (0..ExtrinsicCount::::take().unwrap_or_default()) .map(ExtrinsicData::::take) .collect(); - let extrinsics_root = extrinsics_data_root::(extrinsics); + let extrinsics_root_state_version = T::Version::get().extrinsics_root_state_version(); + let extrinsics_root = + extrinsics_data_root::(extrinsics, extrinsics_root_state_version); // move block hash pruning window by one block let block_hash_count = T::BlockHashCount::get(); diff --git a/substrate/frame/system/src/mock.rs b/substrate/frame/system/src/mock.rs index fff848b3b0e5..f43ffe3c87ee 100644 --- a/substrate/frame/system/src/mock.rs +++ b/substrate/frame/system/src/mock.rs @@ -40,7 +40,7 @@ parameter_types! { impl_version: 1, apis: sp_version::create_apis_vec!([]), transaction_version: 1, - state_version: 1, + system_version: 1, }; pub const DbWeight: RuntimeDbWeight = RuntimeDbWeight { read: 10, diff --git a/substrate/frame/system/src/tests.rs b/substrate/frame/system/src/tests.rs index b2cd017e1e20..534ba1e863fc 100644 --- a/substrate/frame/system/src/tests.rs +++ b/substrate/frame/system/src/tests.rs @@ -789,7 +789,10 @@ fn extrinsics_root_is_calculated_correctly() { System::note_finished_extrinsics(); let header = System::finalize(); - let ext_root = extrinsics_data_root::(vec![vec![1], vec![2]]); + let ext_root = extrinsics_data_root::( + vec![vec![1], vec![2]], + sp_core::storage::StateVersion::V0, + ); assert_eq!(ext_root, *header.extrinsics_root()); }); } diff --git a/substrate/frame/utility/src/lib.rs b/substrate/frame/utility/src/lib.rs index 3ce5b4ff8649..ed5544fe55ca 100644 --- a/substrate/frame/utility/src/lib.rs +++ b/substrate/frame/utility/src/lib.rs @@ -74,7 +74,7 @@ pub use pallet::*; #[frame_support::pallet] pub mod pallet { use super::*; - use frame_support::pallet_prelude::*; + use frame_support::{dispatch::DispatchClass, pallet_prelude::*}; use frame_system::pallet_prelude::*; #[pallet::pallet] @@ -183,21 +183,8 @@ pub mod pallet { /// event is deposited. #[pallet::call_index(0)] #[pallet::weight({ - let dispatch_infos = calls.iter().map(|call| call.get_dispatch_info()).collect::>(); - let dispatch_weight = dispatch_infos.iter() - .map(|di| di.weight) - .fold(Weight::zero(), |total: Weight, weight: Weight| total.saturating_add(weight)) - .saturating_add(T::WeightInfo::batch(calls.len() as u32)); - let dispatch_class = { - let all_operational = dispatch_infos.iter() - .map(|di| di.class) - .all(|class| class == DispatchClass::Operational); - if all_operational { - DispatchClass::Operational - } else { - DispatchClass::Normal - } - }; + let (dispatch_weight, dispatch_class) = Pallet::::weight_and_dispatch_class(&calls); + let dispatch_weight = dispatch_weight.saturating_add(T::WeightInfo::batch(calls.len() as u32)); (dispatch_weight, dispatch_class) })] pub fn batch( @@ -233,13 +220,13 @@ pub mod pallet { // Take the weight of this function itself into account. let base_weight = T::WeightInfo::batch(index.saturating_add(1) as u32); // Return the actual used weight + base_weight of this call. - return Ok(Some(base_weight + weight).into()) + return Ok(Some(base_weight.saturating_add(weight)).into()) } Self::deposit_event(Event::ItemCompleted); } Self::deposit_event(Event::BatchCompleted); let base_weight = T::WeightInfo::batch(calls_len as u32); - Ok(Some(base_weight + weight).into()) + Ok(Some(base_weight.saturating_add(weight)).into()) } /// Send a call through an indexed pseudonym of the sender. @@ -305,21 +292,8 @@ pub mod pallet { /// - O(C) where C is the number of calls to be batched. #[pallet::call_index(2)] #[pallet::weight({ - let dispatch_infos = calls.iter().map(|call| call.get_dispatch_info()).collect::>(); - let dispatch_weight = dispatch_infos.iter() - .map(|di| di.weight) - .fold(Weight::zero(), |total: Weight, weight: Weight| total.saturating_add(weight)) - .saturating_add(T::WeightInfo::batch_all(calls.len() as u32)); - let dispatch_class = { - let all_operational = dispatch_infos.iter() - .map(|di| di.class) - .all(|class| class == DispatchClass::Operational); - if all_operational { - DispatchClass::Operational - } else { - DispatchClass::Normal - } - }; + let (dispatch_weight, dispatch_class) = Pallet::::weight_and_dispatch_class(&calls); + let dispatch_weight = dispatch_weight.saturating_add(T::WeightInfo::batch_all(calls.len() as u32)); (dispatch_weight, dispatch_class) })] pub fn batch_all( @@ -359,7 +333,7 @@ pub mod pallet { // Take the weight of this function itself into account. let base_weight = T::WeightInfo::batch_all(index.saturating_add(1) as u32); // Return the actual used weight + base_weight of this call. - err.post_info = Some(base_weight + weight).into(); + err.post_info = Some(base_weight.saturating_add(weight)).into(); err })?; Self::deposit_event(Event::ItemCompleted); @@ -414,21 +388,8 @@ pub mod pallet { /// - O(C) where C is the number of calls to be batched. #[pallet::call_index(4)] #[pallet::weight({ - let dispatch_infos = calls.iter().map(|call| call.get_dispatch_info()).collect::>(); - let dispatch_weight = dispatch_infos.iter() - .map(|di| di.weight) - .fold(Weight::zero(), |total: Weight, weight: Weight| total.saturating_add(weight)) - .saturating_add(T::WeightInfo::force_batch(calls.len() as u32)); - let dispatch_class = { - let all_operational = dispatch_infos.iter() - .map(|di| di.class) - .all(|class| class == DispatchClass::Operational); - if all_operational { - DispatchClass::Operational - } else { - DispatchClass::Normal - } - }; + let (dispatch_weight, dispatch_class) = Pallet::::weight_and_dispatch_class(&calls); + let dispatch_weight = dispatch_weight.saturating_add(T::WeightInfo::force_batch(calls.len() as u32)); (dispatch_weight, dispatch_class) })] pub fn force_batch( @@ -494,6 +455,27 @@ pub mod pallet { res.map(|_| ()).map_err(|e| e.error) } } + + impl Pallet { + /// Get the accumulated `weight` and the dispatch class for the given `calls`. + fn weight_and_dispatch_class( + calls: &[::RuntimeCall], + ) -> (Weight, DispatchClass) { + let dispatch_infos = calls.iter().map(|call| call.get_dispatch_info()); + let (dispatch_weight, dispatch_class) = dispatch_infos.fold( + (Weight::zero(), DispatchClass::Operational), + |(total_weight, dispatch_class): (Weight, DispatchClass), di| { + ( + total_weight.saturating_add(di.weight), + // If not all are `Operational`, we want to use `DispatchClass::Normal`. + if di.class == DispatchClass::Normal { di.class } else { dispatch_class }, + ) + }, + ); + + (dispatch_weight, dispatch_class) + } + } } /// A pallet identifier. These are per pallet and should be stored in a registry somewhere. diff --git a/substrate/primitives/api/src/lib.rs b/substrate/primitives/api/src/lib.rs index d254bf20601f..4b5c35562bde 100644 --- a/substrate/primitives/api/src/lib.rs +++ b/substrate/primitives/api/src/lib.rs @@ -341,7 +341,7 @@ pub use sp_api_proc_macro::decl_runtime_apis; /// // Here we are exposing the runtime api versions. /// apis: RUNTIME_API_VERSIONS, /// transaction_version: 1, -/// state_version: 1, +/// system_version: 1, /// }; /// /// # fn main() {} diff --git a/substrate/primitives/blockchain/src/backend.rs b/substrate/primitives/blockchain/src/backend.rs index fd0c5795cbfd..d7386a71a0d1 100644 --- a/substrate/primitives/blockchain/src/backend.rs +++ b/substrate/primitives/blockchain/src/backend.rs @@ -17,6 +17,7 @@ //! Substrate blockchain trait +use codec::{Decode, Encode}; use parking_lot::RwLock; use sp_runtime::{ generic::BlockId, @@ -109,7 +110,7 @@ pub trait ForkBackend: for block in tree_route.retracted() { expanded_forks.insert(block.hash); } - continue + continue; }, Err(_) => { // There are cases when blocks are missing (e.g. warp-sync). @@ -196,7 +197,7 @@ pub trait Backend: let info = self.info(); if info.finalized_number > *base_header.number() { // `base_header` is on a dead fork. - return Ok(None) + return Ok(None); } self.leaves()? }; @@ -207,7 +208,7 @@ pub trait Backend: // go backwards through the chain (via parent links) loop { if current_hash == base_hash { - return Ok(Some(leaf_hash)) + return Ok(Some(leaf_hash)); } let current_header = self @@ -216,7 +217,7 @@ pub trait Backend: // stop search in this chain once we go below the target's block number if current_header.number() < base_header.number() { - break + break; } current_hash = *current_header.parent_hash(); @@ -266,7 +267,7 @@ pub trait Backend: // If we have only one leaf there are no forks, and we can return early. if finalized_block_number == Zero::zero() || leaves.len() == 1 { - return Ok(DisplacedLeavesAfterFinalization::default()) + return Ok(DisplacedLeavesAfterFinalization::default()); } // Store hashes of finalized blocks for quick checking later, the last block is the @@ -332,7 +333,7 @@ pub trait Backend: elapsed = ?now.elapsed(), "Added genesis leaf to displaced leaves." ); - continue + continue; } debug!( @@ -539,6 +540,29 @@ impl DisplacedLeavesAfterFinalization { } } +/// Represents the type of block gaps that may result from either warp sync or fast sync. +#[derive(Debug, Clone, Copy, Eq, PartialEq, Encode, Decode)] +pub enum BlockGapType { + /// Both the header and body are missing, as a result of warp sync. + MissingHeaderAndBody, + /// The block body is missing, as a result of fast sync. + MissingBody, +} + +/// Represents the block gap resulted by warp sync or fast sync. +/// +/// A block gap is a range of blocks where either the bodies, or both headers and bodies are +/// missing. +#[derive(Debug, Clone, Copy, Eq, PartialEq, Encode, Decode)] +pub struct BlockGap { + /// The starting block number of the gap (inclusive). + pub start: N, + /// The ending block number of the gap (inclusive). + pub end: N, + /// The type of gap. + pub gap_type: BlockGapType, +} + /// Blockchain info #[derive(Debug, Eq, PartialEq, Clone)] pub struct Info { @@ -556,8 +580,8 @@ pub struct Info { pub finalized_state: Option<(Block::Hash, <::Header as HeaderT>::Number)>, /// Number of concurrent leave forks. pub number_leaves: usize, - /// Missing blocks after warp sync. (start, end). - pub block_gap: Option<(NumberFor, NumberFor)>, + /// Missing blocks after warp sync or fast sync. + pub block_gap: Option>>, } /// Block status. diff --git a/substrate/primitives/runtime/src/lib.rs b/substrate/primitives/runtime/src/lib.rs index ba1ea3769724..260c9a91855a 100644 --- a/substrate/primitives/runtime/src/lib.rs +++ b/substrate/primitives/runtime/src/lib.rs @@ -49,7 +49,7 @@ extern crate alloc; #[doc(hidden)] -pub use alloc::vec::Vec; +pub use alloc::{format, vec::Vec}; #[doc(hidden)] pub use codec; #[doc(hidden)] @@ -79,8 +79,6 @@ use sp_core::{ sr25519, }; -#[cfg(all(not(feature = "std"), feature = "serde"))] -use alloc::format; use alloc::vec; use codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; diff --git a/substrate/primitives/runtime/src/runtime_string.rs b/substrate/primitives/runtime/src/runtime_string.rs index 71aacf07a762..bb0347badcbb 100644 --- a/substrate/primitives/runtime/src/runtime_string.rs +++ b/substrate/primitives/runtime/src/runtime_string.rs @@ -50,7 +50,7 @@ macro_rules! format_runtime_string { } #[cfg(not(feature = "std"))] { - sp_runtime::RuntimeString::Owned(alloc::format!($($args)*).as_bytes().to_vec()) + sp_runtime::RuntimeString::Owned($crate::format!($($args)*).as_bytes().to_vec()) } }}; } diff --git a/substrate/primitives/storage/src/lib.rs b/substrate/primitives/storage/src/lib.rs index 3b9afae4ca07..4b25f85fba68 100644 --- a/substrate/primitives/storage/src/lib.rs +++ b/substrate/primitives/storage/src/lib.rs @@ -444,6 +444,7 @@ impl TryFrom for StateVersion { match val { 0 => Ok(StateVersion::V0), 1 => Ok(StateVersion::V1), + 2 => Ok(StateVersion::V1), _ => Err(()), } } diff --git a/substrate/primitives/version/proc-macro/Cargo.toml b/substrate/primitives/version/proc-macro/Cargo.toml index 35c49360b7f8..a3be654547d9 100644 --- a/substrate/primitives/version/proc-macro/Cargo.toml +++ b/substrate/primitives/version/proc-macro/Cargo.toml @@ -20,6 +20,7 @@ proc-macro = true [dependencies] codec = { features = ["derive"], workspace = true, default-features = true } +proc-macro-warning = { workspace = true } proc-macro2 = { workspace = true } quote = { workspace = true } syn = { features = ["extra-traits", "fold", "full", "visit"], workspace = true } diff --git a/substrate/primitives/version/proc-macro/src/decl_runtime_version.rs b/substrate/primitives/version/proc-macro/src/decl_runtime_version.rs index 3671d4aff6bb..b4f749c90f59 100644 --- a/substrate/primitives/version/proc-macro/src/decl_runtime_version.rs +++ b/substrate/primitives/version/proc-macro/src/decl_runtime_version.rs @@ -17,6 +17,7 @@ use codec::Encode; use proc_macro2::{Span, TokenStream}; +use proc_macro_warning::Warning; use quote::quote; use syn::{ parse::{Error, Result}, @@ -37,13 +38,19 @@ pub fn decl_runtime_version_impl(input: proc_macro::TokenStream) -> proc_macro:: } fn decl_runtime_version_impl_inner(item: ItemConst) -> Result { - let runtime_version = ParseRuntimeVersion::parse_expr(&item.expr)?.build(item.expr.span())?; + let (parsed_runtime_version, warnings) = ParseRuntimeVersion::parse_expr(&item.expr)?; + let runtime_version = parsed_runtime_version.build(item.expr.span())?; let link_section = generate_emit_link_section_decl(&runtime_version.encode(), "runtime_version"); Ok(quote! { #item #link_section + const _:() = { + #( + #warnings + )* + }; }) } @@ -63,7 +70,7 @@ struct RuntimeVersion { impl_version: u32, apis: u8, transaction_version: u32, - state_version: u8, + system_version: u8, } #[derive(Default, Debug)] @@ -74,11 +81,11 @@ struct ParseRuntimeVersion { spec_version: Option, impl_version: Option, transaction_version: Option, - state_version: Option, + system_version: Option, } impl ParseRuntimeVersion { - fn parse_expr(init_expr: &Expr) -> Result { + fn parse_expr(init_expr: &Expr) -> Result<(ParseRuntimeVersion, Vec)> { let init_expr = match init_expr { Expr::Struct(ref e) => e, _ => @@ -86,13 +93,14 @@ impl ParseRuntimeVersion { }; let mut parsed = ParseRuntimeVersion::default(); + let mut warnings = vec![]; for field_value in init_expr.fields.iter() { - parsed.parse_field_value(field_value)?; + warnings.append(&mut parsed.parse_field_value(field_value)?) } - Ok(parsed) + Ok((parsed, warnings)) } - fn parse_field_value(&mut self, field_value: &FieldValue) -> Result<()> { + fn parse_field_value(&mut self, field_value: &FieldValue) -> Result> { let field_name = match field_value.member { syn::Member::Named(ref ident) => ident, syn::Member::Unnamed(_) => @@ -112,6 +120,7 @@ impl ParseRuntimeVersion { } } + let mut warnings = vec![]; if field_name == "spec_name" { parse_once(&mut self.spec_name, field_value, Self::parse_str_literal)?; } else if field_name == "impl_name" { @@ -125,7 +134,16 @@ impl ParseRuntimeVersion { } else if field_name == "transaction_version" { parse_once(&mut self.transaction_version, field_value, Self::parse_num_literal)?; } else if field_name == "state_version" { - parse_once(&mut self.state_version, field_value, Self::parse_num_literal_u8)?; + let warning = Warning::new_deprecated("RuntimeVersion") + .old("state_version") + .new("system_version)") + .help_link("https://github.com/paritytech/polkadot-sdk/pull/4257") + .span(field_name.span()) + .build_or_panic(); + warnings.push(warning); + parse_once(&mut self.system_version, field_value, Self::parse_num_literal_u8)?; + } else if field_name == "system_version" { + parse_once(&mut self.system_version, field_value, Self::parse_num_literal_u8)?; } else if field_name == "apis" { // Intentionally ignored // @@ -136,7 +154,7 @@ impl ParseRuntimeVersion { return Err(Error::new(field_name.span(), "unknown field")) } - Ok(()) + Ok(warnings) } fn parse_num_literal(expr: &Expr) -> Result { @@ -198,7 +216,7 @@ impl ParseRuntimeVersion { spec_version, impl_version, transaction_version, - state_version, + system_version, } = self; Ok(RuntimeVersion { @@ -208,7 +226,7 @@ impl ParseRuntimeVersion { spec_version: required!(spec_version), impl_version: required!(impl_version), transaction_version: required!(transaction_version), - state_version: required!(state_version), + system_version: required!(system_version), apis: 0, }) } @@ -240,7 +258,7 @@ mod tests { impl_version: 1, apis: 0, transaction_version: 2, - state_version: 1, + system_version: 1, } .encode(); @@ -255,7 +273,7 @@ mod tests { impl_version: 1, apis: Cow::Owned(vec![]), transaction_version: 2, - state_version: 1, + system_version: 1, }, ); } diff --git a/substrate/primitives/version/src/lib.rs b/substrate/primitives/version/src/lib.rs index 55dea364eef4..a9f1c2373069 100644 --- a/substrate/primitives/version/src/lib.rs +++ b/substrate/primitives/version/src/lib.rs @@ -35,12 +35,12 @@ extern crate alloc; +#[cfg(any(feature = "std", feature = "serde"))] +use alloc::fmt; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; #[cfg(feature = "std")] use std::collections::HashSet; -#[cfg(feature = "std")] -use std::fmt; #[doc(hidden)] pub use alloc::borrow::Cow; @@ -83,7 +83,7 @@ pub mod embed; /// impl_version: 1, /// apis: RUNTIME_API_VERSIONS, /// transaction_version: 2, -/// state_version: 1, +/// system_version: 1, /// }; /// /// # const RUNTIME_API_VERSIONS: sp_version::ApisVec = sp_version::create_apis_vec!([]); @@ -160,8 +160,6 @@ macro_rules! create_apis_vec { /// `authoring_version`, absolutely not `impl_version` since they change the semantics of the /// runtime. #[derive(Clone, PartialEq, Eq, Encode, Default, sp_runtime::RuntimeDebug, TypeInfo)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -#[cfg_attr(feature = "serde", serde(rename_all = "camelCase"))] pub struct RuntimeVersion { /// Identifies the different Substrate runtimes. There'll be at least polkadot and node. /// A different on-chain spec_name to that of the native runtime would normally result @@ -200,13 +198,6 @@ pub struct RuntimeVersion { pub impl_version: u32, /// List of supported API "features" along with their versions. - #[cfg_attr( - feature = "serde", - serde( - serialize_with = "apis_serialize::serialize", - deserialize_with = "apis_serialize::deserialize", - ) - )] pub apis: ApisVec, /// All existing calls (dispatchables) are fully compatible when this number doesn't change. If @@ -230,9 +221,406 @@ pub struct RuntimeVersion { /// This number should never decrease. pub transaction_version: u32, - /// Version of the state implementation used by this runtime. + /// Version of the system implementation used by this runtime. /// Use of an incorrect version is consensus breaking. - pub state_version: u8, + pub system_version: u8, +} + +// Manual implementation in order to sprinkle `stateVersion` at the end for migration purposes +// after the field was renamed from `state_version` to `system_version` +#[cfg(feature = "serde")] +impl serde::Serialize for RuntimeVersion { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + use serde::ser::SerializeStruct; + + let mut state = serializer.serialize_struct("RuntimeVersion", 9)?; + state.serialize_field("specName", &self.spec_name)?; + state.serialize_field("implName", &self.impl_name)?; + state.serialize_field("authoringVersion", &self.authoring_version)?; + state.serialize_field("specVersion", &self.spec_version)?; + state.serialize_field("implVersion", &self.impl_version)?; + state.serialize_field("apis", { + struct SerializeWith<'a>(&'a ApisVec); + + impl<'a> serde::Serialize for SerializeWith<'a> { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + apis_serialize::serialize(self.0, serializer) + } + } + + &SerializeWith(&self.apis) + })?; + state.serialize_field("transactionVersion", &self.transaction_version)?; + state.serialize_field("systemVersion", &self.system_version)?; + state.serialize_field("stateVersion", &self.system_version)?; + state.end() + } +} + +// Manual implementation in order to allow both old `stateVersion` and new `systemVersion` to be +// present at the same time +#[cfg(feature = "serde")] +impl<'de> serde::Deserialize<'de> for RuntimeVersion { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + use core::marker::PhantomData; + + enum Field { + SpecName, + ImplName, + AuthoringVersion, + SpecVersion, + ImplVersion, + Apis, + TransactionVersion, + SystemVersion, + Ignore, + } + + struct FieldVisitor; + + impl<'de> serde::de::Visitor<'de> for FieldVisitor { + type Value = Field; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("field identifier") + } + + fn visit_u64(self, value: u64) -> Result + where + E: serde::de::Error, + { + match value { + 0 => Ok(Field::SpecName), + 1 => Ok(Field::ImplName), + 2 => Ok(Field::AuthoringVersion), + 3 => Ok(Field::SpecVersion), + 4 => Ok(Field::ImplVersion), + 5 => Ok(Field::Apis), + 6 => Ok(Field::TransactionVersion), + 7 => Ok(Field::SystemVersion), + _ => Ok(Field::Ignore), + } + } + + fn visit_str(self, value: &str) -> Result + where + E: serde::de::Error, + { + match value { + "specName" => Ok(Field::SpecName), + "implName" => Ok(Field::ImplName), + "authoringVersion" => Ok(Field::AuthoringVersion), + "specVersion" => Ok(Field::SpecVersion), + "implVersion" => Ok(Field::ImplVersion), + "apis" => Ok(Field::Apis), + "transactionVersion" => Ok(Field::TransactionVersion), + "systemVersion" | "stateVersion" => Ok(Field::SystemVersion), + _ => Ok(Field::Ignore), + } + } + + fn visit_bytes(self, value: &[u8]) -> Result + where + E: serde::de::Error, + { + match value { + b"specName" => Ok(Field::SpecName), + b"implName" => Ok(Field::ImplName), + b"authoringVersion" => Ok(Field::AuthoringVersion), + b"specVersion" => Ok(Field::SpecVersion), + b"implVersion" => Ok(Field::ImplVersion), + b"apis" => Ok(Field::Apis), + b"transactionVersion" => Ok(Field::TransactionVersion), + b"systemVersion" | b"stateVersion" => Ok(Field::SystemVersion), + _ => Ok(Field::Ignore), + } + } + } + + impl<'de> serde::Deserialize<'de> for Field { + #[inline] + fn deserialize(deserializer: E) -> Result + where + E: serde::Deserializer<'de>, + { + deserializer.deserialize_identifier(FieldVisitor) + } + } + + struct Visitor<'de> { + lifetime: PhantomData<&'de ()>, + } + impl<'de> serde::de::Visitor<'de> for Visitor<'de> { + type Value = RuntimeVersion; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("struct RuntimeVersion") + } + + #[inline] + fn visit_seq(self, mut seq: A) -> Result + where + A: serde::de::SeqAccess<'de>, + { + let spec_name = match seq.next_element()? { + Some(spec_name) => spec_name, + None => + return Err(serde::de::Error::invalid_length( + 0usize, + &"struct RuntimeVersion with 8 elements", + )), + }; + let impl_name = match seq.next_element()? { + Some(impl_name) => impl_name, + None => + return Err(serde::de::Error::invalid_length( + 1usize, + &"struct RuntimeVersion with 8 elements", + )), + }; + let authoring_version = match seq.next_element()? { + Some(authoring_version) => authoring_version, + None => + return Err(serde::de::Error::invalid_length( + 2usize, + &"struct RuntimeVersion with 8 elements", + )), + }; + let spec_version = match seq.next_element()? { + Some(spec_version) => spec_version, + None => + return Err(serde::de::Error::invalid_length( + 3usize, + &"struct RuntimeVersion with 8 elements", + )), + }; + let impl_version = match seq.next_element()? { + Some(impl_version) => impl_version, + None => + return Err(serde::de::Error::invalid_length( + 4usize, + &"struct RuntimeVersion with 8 elements", + )), + }; + let apis = match { + struct DeserializeWith<'de> { + value: ApisVec, + + phantom: PhantomData, + lifetime: PhantomData<&'de ()>, + } + impl<'de> serde::Deserialize<'de> for DeserializeWith<'de> { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + Ok(DeserializeWith { + value: apis_serialize::deserialize(deserializer)?, + phantom: PhantomData, + lifetime: PhantomData, + }) + } + } + seq.next_element::>()?.map(|wrap| wrap.value) + } { + Some(apis) => apis, + None => + return Err(serde::de::Error::invalid_length( + 5usize, + &"struct RuntimeVersion with 8 elements", + )), + }; + let transaction_version = match seq.next_element()? { + Some(transaction_version) => transaction_version, + None => + return Err(serde::de::Error::invalid_length( + 6usize, + &"struct RuntimeVersion with 8 elements", + )), + }; + let system_version = match seq.next_element()? { + Some(system_version) => system_version, + None => + return Err(serde::de::Error::invalid_length( + 7usize, + &"struct RuntimeVersion with 8 elements", + )), + }; + Ok(RuntimeVersion { + spec_name, + impl_name, + authoring_version, + spec_version, + impl_version, + apis, + transaction_version, + system_version, + }) + } + + #[inline] + fn visit_map(self, mut map: A) -> Result + where + A: serde::de::MapAccess<'de>, + { + let mut spec_name: Option = None; + let mut impl_name: Option = None; + let mut authoring_version: Option = None; + let mut spec_version: Option = None; + let mut impl_version: Option = None; + let mut apis: Option = None; + let mut transaction_version: Option = None; + let mut system_version: Option = None; + + while let Some(key) = map.next_key()? { + match key { + Field::SpecName => { + if spec_name.is_some() { + return Err(::duplicate_field( + "specName", + )); + } + spec_name = Some(map.next_value()?); + }, + Field::ImplName => { + if impl_name.is_some() { + return Err(::duplicate_field( + "implName", + )); + } + impl_name = Some(map.next_value()?); + }, + Field::AuthoringVersion => { + if authoring_version.is_some() { + return Err(::duplicate_field( + "authoringVersion", + )); + } + authoring_version = Some(map.next_value()?); + }, + Field::SpecVersion => { + if spec_version.is_some() { + return Err(::duplicate_field( + "specVersion", + )); + } + spec_version = Some(map.next_value()?); + }, + Field::ImplVersion => { + if impl_version.is_some() { + return Err(::duplicate_field( + "implVersion", + )); + } + impl_version = Some(map.next_value()?); + }, + Field::Apis => { + if apis.is_some() { + return Err(::duplicate_field("apis")); + } + apis = Some({ + struct DeserializeWith<'de> { + value: ApisVec, + lifetime: PhantomData<&'de ()>, + } + impl<'de> serde::Deserialize<'de> for DeserializeWith<'de> { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + Ok(DeserializeWith { + value: apis_serialize::deserialize(deserializer)?, + lifetime: PhantomData, + }) + } + } + + map.next_value::>()?.value + }); + }, + Field::TransactionVersion => { + if transaction_version.is_some() { + return Err(::duplicate_field( + "transactionVersion", + )); + } + transaction_version = Some(map.next_value()?); + }, + Field::SystemVersion => + if let Some(system_version) = system_version { + let new_value = map.next_value::()?; + if system_version != new_value { + return Err(::custom( + alloc::format!( + r#"Duplicated "stateVersion" and "systemVersion" \ + fields must have the same value, but different values \ + were provided: {system_version} vs {new_value}"# + ), + )); + } + } else { + system_version = Some(map.next_value()?); + }, + _ => { + let _ = map.next_value::()?; + }, + } + } + let spec_name = spec_name + .ok_or_else(|| ::missing_field("specName"))?; + let impl_name = impl_name + .ok_or_else(|| ::missing_field("implName"))?; + let authoring_version = authoring_version.ok_or_else(|| { + ::missing_field("authoringVersion") + })?; + let spec_version = spec_version + .ok_or_else(|| ::missing_field("specVersion"))?; + let impl_version = impl_version + .ok_or_else(|| ::missing_field("implVersion"))?; + let apis = + apis.ok_or_else(|| ::missing_field("apis"))?; + let transaction_version = transaction_version.ok_or_else(|| { + ::missing_field("transactionVersion") + })?; + let system_version = system_version.ok_or_else(|| { + ::missing_field("systemVersion") + })?; + Ok(RuntimeVersion { + spec_name, + impl_name, + authoring_version, + spec_version, + impl_version, + apis, + transaction_version, + system_version, + }) + } + } + + const FIELDS: &[&str] = &[ + "specName", + "implName", + "authoringVersion", + "specVersion", + "implVersion", + "apis", + "transactionVersion", + "stateVersion", + "systemVersion", + ]; + + deserializer.deserialize_struct("RuntimeVersion", FIELDS, Visitor { lifetime: PhantomData }) + } } impl RuntimeVersion { @@ -257,7 +645,7 @@ impl RuntimeVersion { if core_version.is_some() { core_version } else { core_version_from_apis(&apis) }; let transaction_version = if core_version.map(|v| v >= 3).unwrap_or(false) { Decode::decode(input)? } else { 1 }; - let state_version = + let system_version = if core_version.map(|v| v >= 4).unwrap_or(false) { Decode::decode(input)? } else { 0 }; Ok(RuntimeVersion { spec_name, @@ -267,7 +655,7 @@ impl RuntimeVersion { impl_version, apis, transaction_version, - state_version, + system_version, }) } } @@ -334,7 +722,17 @@ impl RuntimeVersion { /// Otherwise, V1 trie version will be use. pub fn state_version(&self) -> StateVersion { // If version > than 1, keep using latest version. - self.state_version.try_into().unwrap_or(StateVersion::V1) + self.system_version.try_into().unwrap_or(StateVersion::V1) + } + + /// Returns the state version to use for Extrinsics root. + pub fn extrinsics_root_state_version(&self) -> StateVersion { + match self.system_version { + // for system version 0 and 1, return V0 + 0 | 1 => StateVersion::V0, + // anything above 1, return V1 + _ => StateVersion::V1, + } } } diff --git a/substrate/test-utils/runtime/src/lib.rs b/substrate/test-utils/runtime/src/lib.rs index 514f3bcba204..840081003b84 100644 --- a/substrate/test-utils/runtime/src/lib.rs +++ b/substrate/test-utils/runtime/src/lib.rs @@ -119,7 +119,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { impl_version: 2, apis: RUNTIME_API_VERSIONS, transaction_version: 1, - state_version: 1, + system_version: 1, }; fn version() -> RuntimeVersion { diff --git a/templates/minimal/README.md b/templates/minimal/README.md index 180c229e744e..fe1317a033c7 100644 --- a/templates/minimal/README.md +++ b/templates/minimal/README.md @@ -37,6 +37,14 @@ A Polkadot SDK based project such as this one consists of: * 🛠ī¸ Depending on your operating system and Rust version, there might be additional packages required to compile this template - please take note of the Rust compiler output. +Fetch minimal template code: + +```sh +git clone https://github.com/paritytech/polkadot-sdk-minimal-template.git minimal-template + +cd minimal-template +``` + ### Build 🔨 Use the following command to build the node without launching it: diff --git a/templates/minimal/runtime/src/lib.rs b/templates/minimal/runtime/src/lib.rs index 474d9ddfb9e8..cce13c48af71 100644 --- a/templates/minimal/runtime/src/lib.rs +++ b/templates/minimal/runtime/src/lib.rs @@ -46,7 +46,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { impl_version: 1, apis: RUNTIME_API_VERSIONS, transaction_version: 1, - state_version: 1, + system_version: 1, }; /// The version information used to identify this runtime when compiled natively. diff --git a/templates/parachain/README.md b/templates/parachain/README.md index b912d8e005c7..3de85cbeb4dc 100644 --- a/templates/parachain/README.md +++ b/templates/parachain/README.md @@ -39,6 +39,14 @@ A Polkadot SDK based project such as this one consists of: * 🛠ī¸ Depending on your operating system and Rust version, there might be additional packages required to compile this template - please take note of the Rust compiler output. +Fetch parachain template code: + +```sh +git clone https://github.com/paritytech/polkadot-sdk-parachain-template.git parachain-template + +cd parachain-template +``` + ### Build 🔨 Use the following command to build the node without launching it: diff --git a/templates/parachain/runtime/src/lib.rs b/templates/parachain/runtime/src/lib.rs index 83ae15700a94..ccec648ce4c1 100644 --- a/templates/parachain/runtime/src/lib.rs +++ b/templates/parachain/runtime/src/lib.rs @@ -172,7 +172,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { impl_version: 0, apis: apis::RUNTIME_API_VERSIONS, transaction_version: 1, - state_version: 1, + system_version: 1, }; #[docify::export] diff --git a/templates/solochain/README.md b/templates/solochain/README.md index 6a5a7853f9c0..c4ce5c7f3fbb 100644 --- a/templates/solochain/README.md +++ b/templates/solochain/README.md @@ -23,9 +23,17 @@ packages required to compile this template. Check the the most common dependencies. Alternatively, you can use one of the [alternative installation](#alternatives-installations) options. +Fetch solochain template code: + +```sh +git clone https://github.com/paritytech/polkadot-sdk-solochain-template.git solochain-template + +cd solochain-template +``` + ### Build -Use the following command to build the node without launching it: +🔨 Use the following command to build the node without launching it: ```sh cargo build --release diff --git a/templates/solochain/runtime/src/lib.rs b/templates/solochain/runtime/src/lib.rs index 6cbfbb879602..ce38c65479e5 100644 --- a/templates/solochain/runtime/src/lib.rs +++ b/templates/solochain/runtime/src/lib.rs @@ -71,7 +71,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { impl_version: 1, apis: apis::RUNTIME_API_VERSIONS, transaction_version: 1, - state_version: 1, + system_version: 1, }; mod block_times {