diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 3353780f4..bee5257e1 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -34,7 +34,7 @@ jobs: if: github.event.pull_request.draft == false strategy: matrix: - crate: [messier-runtime, gargantua-runtime, nexus-runtime] + crate: [gargantua-runtime, nexus-runtime] steps: - name: Get User Permission diff --git a/Cargo.lock b/Cargo.lock index b5a6fc80c..b1c698efd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -997,6 +997,17 @@ dependencies = [ "url", ] +[[package]] +name = "atty" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" +dependencies = [ + "hermit-abi 0.1.19", + "libc", + "winapi", +] + [[package]] name = "auto_impl" version = "1.2.0" @@ -4325,6 +4336,19 @@ dependencies = [ "regex", ] +[[package]] +name = "env_logger" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a12e6657c4c97ebab115a42dcee77225f7f482cdd841cf7088c657a42e9e00e7" +dependencies = [ + "atty", + "humantime", + "log", + "regex", + "termcolor", +] + [[package]] name = "env_logger" version = "0.10.2" @@ -6062,6 +6086,7 @@ dependencies = [ "hyperbridge-client-machine", "ismp", "ismp-bsc", + "ismp-grandpa", "ismp-parachain", "ismp-parachain-runtime-api", "ismp-sync-committee", @@ -6525,6 +6550,83 @@ dependencies = [ "serde_json", ] +[[package]] +name = "grandpa-prover" +version = "0.1.0" +dependencies = [ + "anyhow", + "derive_more", + "downcast-rs", + "finality-grandpa", + "grandpa-verifier-primitives", + "hex", + "ismp", + "jsonrpsee 0.23.2", + "jsonrpsee-ws-client 0.23.2", + "parity-scale-codec", + "sc-consensus-grandpa-rpc", + "serde", + "sp-consensus-grandpa 21.0.0", + "sp-core 34.0.0", + "sp-runtime 39.0.0", + "sp-state-machine 0.43.0", + "sp-trie 37.0.0", + "subxt", + "subxt-utils", +] + +[[package]] +name = "grandpa-verifier" +version = "0.1.0" +dependencies = [ + "anyhow", + "derive_more", + "env_logger 0.9.3", + "finality-grandpa", + "frame-support 37.0.0", + "futures", + "grandpa-prover", + "grandpa-verifier-primitives", + "hex", + "hex-literal 0.3.4", + "ismp", + "log", + "parity-scale-codec", + "polkadot-core-primitives", + "sc-consensus-grandpa-rpc", + "serde", + "sp-consensus-grandpa 21.0.0", + "sp-core 34.0.0", + "sp-io 38.0.0", + "sp-runtime 39.0.0", + "sp-std 14.0.0", + "sp-storage 21.0.0", + "sp-trie 37.0.0", + "substrate-state-machine", + "subxt", + "subxt-utils", + "tokio", +] + +[[package]] +name = "grandpa-verifier-primitives" +version = "0.1.0" +dependencies = [ + "anyhow", + "finality-grandpa", + "frame-support 37.0.0", + "ismp", + "log", + "parity-scale-codec", + "sp-consensus-grandpa 21.0.0", + "sp-core 34.0.0", + "sp-io 38.0.0", + "sp-runtime 39.0.0", + "sp-std 14.0.0", + "sp-storage 21.0.0", + "sp-trie 37.0.0", +] + [[package]] name = "graphql-parser" version = "0.3.0" @@ -6713,6 +6815,15 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" +[[package]] +name = "hermit-abi" +version = "0.1.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" +dependencies = [ + "libc", +] + [[package]] name = "hermit-abi" version = "0.3.9" @@ -7074,7 +7185,6 @@ dependencies = [ "ismp-parachain-runtime-api", "jsonrpsee 0.23.2", "log", - "messier-runtime", "mmr-gadget 29.0.1", "nexus-runtime", "pallet-ismp", @@ -7168,7 +7278,7 @@ dependencies = [ "parity-scale-codec", "primitive-types", "serde", - "serde-utils", + "serde-hex-utils", "serde-wasm-bindgen 0.6.5", "serde_json", "sp-core 34.0.0", @@ -7552,7 +7662,7 @@ dependencies = [ "primitive-types", "scale-info", "serde", - "serde-utils", + "serde-hex-utils", "serde_json", ] @@ -7577,6 +7687,30 @@ dependencies = [ name = "ismp-casper-ffg" version = "0.1.1" +[[package]] +name = "ismp-grandpa" +version = "1.15.0" +dependencies = [ + "ckb-merkle-mountain-range", + "cumulus-primitives-core", + "finality-grandpa", + "frame-support 37.0.0", + "frame-system", + "grandpa-verifier", + "grandpa-verifier-primitives", + "ismp", + "pallet-ismp", + "parity-scale-codec", + "primitive-types", + "scale-info", + "sp-consensus-aura 0.40.0", + "sp-core 34.0.0", + "sp-io 38.0.0", + "sp-runtime 39.0.0", + "sp-trie 37.0.0", + "substrate-state-machine", +] + [[package]] name = "ismp-parachain" version = "1.15.0" @@ -9892,91 +10026,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "messier-runtime" -version = "0.1.1" -dependencies = [ - "cumulus-pallet-aura-ext", - "cumulus-pallet-dmp-queue", - "cumulus-pallet-parachain-system", - "cumulus-pallet-session-benchmarking", - "cumulus-pallet-xcm", - "cumulus-pallet-xcmp-queue", - "cumulus-primitives-aura", - "cumulus-primitives-core", - "cumulus-primitives-timestamp", - "cumulus-primitives-utility", - "frame-benchmarking", - "frame-executive", - "frame-metadata-hash-extension", - "frame-support 37.0.0", - "frame-system", - "frame-system-benchmarking", - "frame-system-rpc-runtime-api", - "frame-try-runtime 0.43.0", - "hex-literal 0.4.1", - "hyperbridge-client-machine", - "ismp", - "ismp-bsc", - "ismp-parachain", - "ismp-parachain-runtime-api", - "ismp-sync-committee", - "log", - "orml-traits", - "orml-xcm-support", - "pallet-asset-gateway", - "pallet-asset-rate", - "pallet-assets", - "pallet-aura", - "pallet-authorship", - "pallet-balances", - "pallet-call-decompressor", - "pallet-collator-selection", - "pallet-collective", - "pallet-ismp", - "pallet-ismp-demo", - "pallet-ismp-host-executive", - "pallet-ismp-relayer", - "pallet-ismp-runtime-api", - "pallet-message-queue", - "pallet-mmr 0.1.1", - "pallet-mmr-runtime-api", - "pallet-session", - "pallet-sudo", - "pallet-timestamp", - "pallet-token-governor", - "pallet-transaction-payment", - "pallet-transaction-payment-rpc-runtime-api", - "pallet-treasury", - "pallet-xcm", - "parachains-common", - "parity-scale-codec", - "polkadot-parachain-primitives", - "polkadot-runtime-common", - "scale-info", - "simnode-runtime-api", - "smallvec", - "sp-api 34.0.0", - "sp-block-builder 34.0.0", - "sp-consensus-aura 0.40.0", - "sp-core 34.0.0", - "sp-genesis-builder 0.15.0", - "sp-inherents 34.0.0", - "sp-mmr-primitives", - "sp-offchain 34.0.0", - "sp-runtime 39.0.0", - "sp-session 35.0.0", - "sp-std 14.0.0", - "sp-storage 21.0.0", - "sp-transaction-pool 34.0.0", - "sp-version 37.0.0", - "staging-parachain-info", - "staging-xcm", - "staging-xcm-builder", - "staging-xcm-executor", - "substrate-wasm-builder", -] - [[package]] name = "metrics" version = "0.18.1" @@ -19819,7 +19868,7 @@ dependencies = [ ] [[package]] -name = "serde-utils" +name = "serde-hex-utils" version = "0.1.0" dependencies = [ "anyhow", @@ -23016,6 +23065,7 @@ dependencies = [ "scale-info", "serde", "sp-consensus-aura 0.40.0", + "sp-consensus-babe 0.40.0", "sp-runtime 39.0.0", "sp-trie 37.0.0", ] @@ -23313,7 +23363,7 @@ dependencies = [ "parity-scale-codec", "primitive-types", "serde", - "serde-utils", + "serde-hex-utils", "ssz-rs", ] @@ -23625,7 +23675,7 @@ dependencies = [ "reqwest-middleware", "reqwest-retry", "serde", - "serde-utils", + "serde-hex-utils", "serde_json", "sp-core 34.0.0", "sp-mmr-primitives", @@ -23749,7 +23799,7 @@ dependencies = [ "parity-scale-codec", "parking_lot 0.12.3", "serde", - "serde-utils", + "serde-hex-utils", "sp-core 34.0.0", "substrate-state-machine", "subxt", diff --git a/Cargo.toml b/Cargo.toml index 026f05473..27c9c2e1e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -3,7 +3,7 @@ resolver = "2" members = [ # runtime "parachain/runtimes/gargantua", - "parachain/runtimes/messier", +# "parachain/runtimes/messier", "parachain/runtimes/nexus", # node @@ -36,6 +36,7 @@ members = [ "modules/ismp/clients/optimism", "modules/ismp/clients/sync-committee/evm-common", "modules/ismp/clients/bsc", + "modules/ismp/clients/grandpa", # cryptography "modules/consensus/sync-committee/prover", @@ -46,6 +47,9 @@ members = [ "modules/consensus/geth-primitives", "modules/consensus/bsc/verifier", "modules/consensus/bsc/prover", + "modules/consensus/grandpa/prover", + "modules/consensus/grandpa/primitives", + "modules/consensus/grandpa/verifier", "modules/trees/ethereum", "modules/trees/mmr/pallet", "modules/trees/mmr/primitives", @@ -133,7 +137,9 @@ sp-io = { version = "38.0.0", default-features = false } sp-trie = { version = "37.0.0", default-features = false } sp-block-builder = { version = "34.0.0", default-features = false } sp-consensus-aura = { version = "0.40.0", default-features = false } +sp-consensus-babe = { version = "0.40.0", default-features = false } sp-consensus-beefy = { version = "22.0.0", default-features = false } +sp-consensus-grandpa = { version = "21.0.0", default-features = false } sp-core = { version = "34.0.0", default-features = false } sp-crypto-hashing = { version = "0.1.0", default-features = false } sp-inherents = { version = "34.0.0", default-features = false } @@ -169,9 +175,9 @@ sp-state-machine = { version = "0.43.0", default-features = false } sp-storage = { version = "21.0.0", default-features = false } pallet-beefy-mmr = { version = "38.0.0", default-features = false } pallet-assets = { version = "39.0.0", default-features = false } -pallet-treasury = { version = "36.0.0", default-features = false} -pallet-asset-rate = {version = "16.0.0", default-features = false} -pallet-collective = {version = "37.0.0", default-features = false} +pallet-treasury = { version = "36.0.0", default-features = false } +pallet-asset-rate = { version = "16.0.0", default-features = false } +pallet-collective = { version = "37.0.0", default-features = false } frame-metadata-hash-extension = { default-features = false, version = "0.5.0" } sp-keyring = "39.0.0" @@ -217,6 +223,8 @@ mmr-rpc = { version = "37.0.0" } xcm-simulator-example = "16.0.0" xcm-simulator = "16.0.0" polkadot-runtime-parachains = "16.0.0" +polkadot-core-primitives = "15.0.0" +sc-consensus-grandpa-rpc = "0.29.0" # crates.io serde = { version = "1", default-features = false } @@ -225,12 +233,15 @@ hash-db = { version = "0.16.0", default-features = false } memory-db = { version = "0.32.0", default-features = false } codec = { version = "3.1.3", package = "parity-scale-codec", default-features = false } log = { version = "0.4.21", default-features = false } -anyhow = {version = "1.0.71", default-features = false } +anyhow = { version = "1.0.71", default-features = false } alloy-rlp = { version = "0.3.2", default-features = false } alloy-rlp-derive = "0.3.2" alloy-sol-macro = "0.7.4" alloy-sol-types = { version = "0.7.4", default-features = false } -ethabi = { version = "18.0.0", features = ["rlp", "parity-codec"], default-features = false } +ethabi = { version = "18.0.0", features = [ + "rlp", + "parity-codec", +], default-features = false } orml-xcm-support = { version = "=1.0.0", default-features = false } orml-traits = { version = "=1.0.0", default-features = false } primitive-types = { version = "0.12.1", default-features = false } @@ -249,7 +260,7 @@ ismp-solidity-abi = { path = "./evm/abi", default-features = false } simnode-tests = { path = "parachain/simtests" } hyperclient = { path = "modules/hyperclient", default-features = false } subxt-utils = { path = "modules/utils/subxt", default-features = false } -serde-utils = { path = "modules/utils/serde", default-features = false } +serde-hex-utils = { version = "0.1.0", path = "modules/utils/serde", default-features = false } # consensus provers & verifiers beefy-verifier-primitives = { path = "./modules/consensus/beefy/primitives", default-features = false } @@ -260,9 +271,13 @@ geth-primitives = { path = "./modules/consensus/geth-primitives", default-featur sync-committee-primitives = { path = "./modules/consensus/sync-committee/primitives", default-features = false } sync-committee-prover = { path = "./modules/consensus/sync-committee/prover" } sync-committee-verifier = { path = "./modules/consensus/sync-committee/verifier", default-features = false } +grandpa-verifier-primitives = { version = "0.1.0", path = "./modules/consensus/grandpa/primitives", default-features = false } +grandpa-verifier = { version = "0.1.0", path = "./modules/consensus/grandpa/verifier", default-features = false } +grandpa-prover = { path = "./modules/consensus/grandpa/prover" } # consensus clients ismp-bsc = { path = "./modules/ismp/clients/bsc", default-features = false } +ismp-grandpa = { version = "1.15.0", path = "./modules/ismp/clients/grandpa", default-features = false } ismp-parachain = { version = "1.15.0", path = "./modules/ismp/clients/parachain/client", default-features = false } ismp-parachain-inherent = { version = "1.15.0", path = "./modules/ismp/clients/parachain/inherent" } ismp-parachain-runtime-api = { version = "1.15.0", path = "./modules/ismp/clients/parachain/runtime-api", default-features = false } @@ -298,7 +313,7 @@ mmr-primitives = { version = "1.15.0", path = "modules/trees/mmr/primitives", de # runtimes gargantua-runtime = { path = "./parachain/runtimes/gargantua", default-features = false } -messier-runtime = { path = "./parachain/runtimes/messier", default-features = false } +#messier-runtime = { path = "./parachain/runtimes/messier", default-features = false } nexus-runtime = { path = "./parachain/runtimes/nexus", default-features = false } # tesseract @@ -312,7 +327,7 @@ tesseract = { path = "tesseract/relayer" } transaction-fees = { path = "tesseract/fees" } telemetry-server = { path = "tesseract/telemetry" } tesseract-config = { path = "tesseract/config" } -cumulus-pallet-parachain-system= { version = "0.16.0", default-features = false } +cumulus-pallet-parachain-system = { version = "0.16.0", default-features = false } [workspace.dependencies.ethers] git = "https://github.com/polytope-labs/ethers-rs" diff --git a/modules/consensus/grandpa/Cargo.toml b/modules/consensus/grandpa/Cargo.toml deleted file mode 100644 index 2820f5236..000000000 --- a/modules/consensus/grandpa/Cargo.toml +++ /dev/null @@ -1,61 +0,0 @@ -[package] -name = "ismp-grandpa" -version = "0.1.0" -edition = "2021" -publish = false - - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = [ - "derive" -] } -primitive-types = { workspace = true } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } -merkle-mountain-range = { package = "ckb-merkle-mountain-range", version = "0.5.2", default-features = false } -finality-grandpa = { version = "0.16.0", features = ["derive-codec"], default-features = false } - -# polytope labs -ismp = { git = "https://github.com/polytope-labs/ismp-rs", branch = "main", default-features = false } -primitives = { package = "ismp-grandpa-primitives", path = "primitives", default-features = false } -verifier = { package = "ismp-grandpa-verifier", path = "verifier", default-features = false} -pallet-ismp = { path = "../pallet-ismp", default-features = false } - - -# substrate -frame-support = { default-features = false, git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.42" } -frame-system = { default-features = false, git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.42" } -sp-trie = { default-features = false, git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.42" } -sp-io = { default-features = false, git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.42" } -sp-runtime = { default-features = false, git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.42" } -sp-consensus-aura = { default-features = false, git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.42" } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.42", default-features = false } - -# cumulus -cumulus-primitives-core = { git = "https://github.com/paritytech/cumulus", branch = "release-v0.9.420", default-features = false } - -ismp-primitives = { path = "../pallet-ismp/primitives", default-features = false } -substrate-state-machine = { path = "../pallet-ismp/primitives/state-machine", default-features = false } - -[features] -default = ["std"] -std = [ - "codec/std", - "frame-support/std", - "frame-system/std", - "scale-info/std", - "cumulus-primitives-core/std", - "ismp/std", - "sp-trie/std", - "sp-consensus-aura/std", - "sp-runtime/std", - "sp-io/std", - "primitive-types/std", - "pallet-ismp/std", - "sp-core/std", - "primitives/std", - "verifier/std", - "merkle-mountain-range/std", - "ismp-primitives/std", - "substrate-state-machine/std", - "finality-grandpa/std", -] diff --git a/modules/consensus/grandpa/primitives/Cargo.toml b/modules/consensus/grandpa/primitives/Cargo.toml index 2bad5241a..cd1e051d9 100644 --- a/modules/consensus/grandpa/primitives/Cargo.toml +++ b/modules/consensus/grandpa/primitives/Cargo.toml @@ -1,27 +1,33 @@ [package] -name = "ismp-grandpa-primitives" +name = "grandpa-verifier-primitives" version = "0.1.0" edition = "2021" -publish = false - +authors = ["Polytope Labs "] +license = "Apache-2.0" +repository = "https://github.com/polytope-labs/hyperbridge" +homepage = "https://docs.hyperbridge.network/developers/polkadot/integration" +documentation = "https://docs.rs/grandpa-verifier-primitives" +description = "Primitives for verifying GRANDPA consensus proofs" +keywords = ["substrate", "polkadot-sdk", "ISMP", "interoperability", "GRANDPA"] [dependencies] # crates.io -anyhow = { version = "1.0.64", default-features = false } +anyhow = { workspace = true } finality-grandpa = { version = "0.16.0", features = ["derive-codec"], default-features = false } -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false } +codec = { workspace = true } +log = { workspace = true } # substrate -sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.42", default-features = false } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.42", default-features = false } -sp-io = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.42", default-features = false } -frame-support = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.42", default-features = false } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.42", default-features = false } -sp-trie = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.42", default-features = false } -sp-storage = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.42", default-features = false } -sp-consensus-grandpa = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.42", default-features = false } +sp-core = { workspace = true } +sp-runtime = { workspace = true } +sp-io = { workspace = true } +frame-support = { workspace = true } +sp-std = { workspace = true } +sp-trie = { workspace = true } +sp-storage = { workspace = true } +sp-consensus-grandpa = { workspace = true } # polytope -ismp = { git = "https://github.com/polytope-labs/ismp-rs", branch = "main", default-features = false } +ismp = { workspace = true } [features] default = ["std"] diff --git a/modules/consensus/grandpa/primitives/src/justification.rs b/modules/consensus/grandpa/primitives/src/justification.rs index ae1e4d33b..12b3bb9bc 100644 --- a/modules/consensus/grandpa/primitives/src/justification.rs +++ b/modules/consensus/grandpa/primitives/src/justification.rs @@ -18,10 +18,9 @@ use alloc::collections::{BTreeMap, BTreeSet}; use anyhow::anyhow; use codec::{Decode, Encode}; use finality_grandpa::voter_set::VoterSet; -use frame_support::log; use sp_consensus_grandpa::{ - AuthorityId, AuthorityList, AuthoritySignature, ConsensusLog, Equivocation, RoundNumber, - ScheduledChange, SetId, GRANDPA_ENGINE_ID, + AuthorityId, AuthorityList, AuthoritySignature, ConsensusLog, Equivocation, RoundNumber, + ScheduledChange, SetId, GRANDPA_ENGINE_ID, }; use sp_core::ed25519; use sp_runtime::{generic::OpaqueDigestItemId, traits::Header as HeaderT}; @@ -38,310 +37,310 @@ use sp_std::prelude::*; #[cfg_attr(any(feature = "std", test), derive(Debug))] #[derive(Clone, Encode, Decode, PartialEq, Eq)] pub struct GrandpaJustification { - /// Current voting round number, monotonically increasing - pub round: u64, - /// Contains block hash & number that's being finalized and the signatures. - pub commit: Commit, - /// Contains the path from a [`PreCommit`]'s target hash to the GHOST finalized block. - pub votes_ancestries: Vec, + /// Current voting round number, monotonically increasing + pub round: u64, + /// Contains block hash & number that's being finalized and the signatures. + pub commit: Commit, + /// Contains the path from a [`PreCommit`]'s target hash to the GHOST finalized block. + pub votes_ancestries: Vec, } impl GrandpaJustification where - H: HeaderT, - H::Number: finality_grandpa::BlockNumberOps, + H: HeaderT, + H::Number: finality_grandpa::BlockNumberOps, { - /// Validate the commit and the votes' ancestry proofs. - pub fn verify(&self, set_id: u64, authorities: &AuthorityList) -> Result<(), anyhow::Error> { - // It's safe to assume that the authority list will not contain duplicates, - // since this list is extracted from a verified relaychain header. - let voters = - VoterSet::new(authorities.iter().cloned()).ok_or(anyhow!("Invalid AuthoritiesSet"))?; - - self.verify_with_voter_set(set_id, &voters) - } - - /// Validate the commit and the votes' ancestry proofs. - pub fn verify_with_voter_set( - &self, - set_id: u64, - voters: &VoterSet, - ) -> Result<(), anyhow::Error> { - use finality_grandpa::Chain; - - let ancestry_chain = AncestryChain::::new(&self.votes_ancestries); - - match finality_grandpa::validate_commit(&self.commit, voters, &ancestry_chain) { - Ok(ref result) if result.is_valid() => { - if result.num_duplicated_precommits() > 0 || - result.num_invalid_voters() > 0 || - result.num_equivocations() > 0 - { - Err(anyhow!("Invalid commit, found one of `duplicate precommits`, `invalid voters`, or `equivocations` {result:?}"))? - } - } - err => { - let result = err.map_err(|_| { - anyhow!("[verify_with_voter_set] Invalid ancestry while validating commit!") - })?; - Err(anyhow!("invalid commit in grandpa justification: {result:?}"))? - } - } - - // we pick the precommit for the lowest block as the base that - // should serve as the root block for populating ancestry (i.e. - // collect all headers from all precommit blocks to the base) - let base_hash = self - .commit - .precommits - .iter() - .map(|signed| &signed.precommit) - .min_by_key(|precommit| precommit.target_number) - .map(|precommit| precommit.target_hash.clone()) - .expect( - "can only fail if precommits is empty; \ + /// Validate the commit and the votes' ancestry proofs. + pub fn verify(&self, set_id: u64, authorities: &AuthorityList) -> Result<(), anyhow::Error> { + // It's safe to assume that the authority list will not contain duplicates, + // since this list is extracted from a verified relaychain header. + let voters = + VoterSet::new(authorities.iter().cloned()).ok_or(anyhow!("Invalid AuthoritiesSet"))?; + + self.verify_with_voter_set(set_id, &voters) + } + + /// Validate the commit and the votes' ancestry proofs. + pub fn verify_with_voter_set( + &self, + set_id: u64, + voters: &VoterSet, + ) -> Result<(), anyhow::Error> { + use finality_grandpa::Chain; + + let ancestry_chain = AncestryChain::::new(&self.votes_ancestries); + + match finality_grandpa::validate_commit(&self.commit, voters, &ancestry_chain) { + Ok(ref result) if result.is_valid() => { + if result.num_duplicated_precommits() > 0 || + result.num_invalid_voters() > 0 || + result.num_equivocations() > 0 + { + Err(anyhow!("Invalid commit, found one of `duplicate precommits`, `invalid voters`, or `equivocations` {result:?}"))? + } + }, + err => { + let result = err.map_err(|_| { + anyhow!("[verify_with_voter_set] Invalid ancestry while validating commit!") + })?; + Err(anyhow!("invalid commit in grandpa justification: {result:?}"))? + }, + } + + // we pick the precommit for the lowest block as the base that + // should serve as the root block for populating ancestry (i.e. + // collect all headers from all precommit blocks to the base) + let base_hash = self + .commit + .precommits + .iter() + .map(|signed| &signed.precommit) + .min_by_key(|precommit| precommit.target_number) + .map(|precommit| precommit.target_hash.clone()) + .expect( + "can only fail if precommits is empty; \ commit has been validated above; \ valid commits must include precommits; \ qed.", - ); - - let mut visited_hashes = BTreeSet::new(); - for signed in self.commit.precommits.iter() { - let message = finality_grandpa::Message::Precommit(signed.precommit.clone()); - - check_message_signature::<_, _>( - &message, - &signed.id, - &signed.signature, - self.round, - set_id, - )?; - - if base_hash == signed.precommit.target_hash { - continue - } - - let route = ancestry_chain - .ancestry(base_hash, signed.precommit.target_hash) - .map_err(|_| anyhow!("[verify_with_voter_set] Invalid ancestry!"))?; - // ancestry starts from parent hash but the precommit target hash has been - // visited - visited_hashes.insert(signed.precommit.target_hash); - for hash in route { - visited_hashes.insert(hash); - } - } - - let ancestry_hashes: BTreeSet<_> = - self.votes_ancestries.iter().map(|h: &H| h.hash()).collect(); - - if visited_hashes != ancestry_hashes { - Err(anyhow!( - "invalid precommit ancestries in grandpa justification with unused headers", - ))? - } - - Ok(()) - } - - /// The target block number and hash that this justifications proves finality for. - pub fn target(&self) -> (H::Number, H::Hash) { - (self.commit.target_number, self.commit.target_hash) - } + ); + + let mut visited_hashes = BTreeSet::new(); + for signed in self.commit.precommits.iter() { + let message = finality_grandpa::Message::Precommit(signed.precommit.clone()); + + check_message_signature::<_, _>( + &message, + &signed.id, + &signed.signature, + self.round, + set_id, + )?; + + if base_hash == signed.precommit.target_hash { + continue + } + + let route = ancestry_chain + .ancestry(base_hash, signed.precommit.target_hash) + .map_err(|_| anyhow!("[verify_with_voter_set] Invalid ancestry!"))?; + // ancestry starts from parent hash but the precommit target hash has been + // visited + visited_hashes.insert(signed.precommit.target_hash); + for hash in route { + visited_hashes.insert(hash); + } + } + + let ancestry_hashes: BTreeSet<_> = + self.votes_ancestries.iter().map(|h: &H| h.hash()).collect(); + + if visited_hashes != ancestry_hashes { + Err(anyhow!( + "invalid precommit ancestries in grandpa justification with unused headers", + ))? + } + + Ok(()) + } + + /// The target block number and hash that this justifications proves finality for. + pub fn target(&self) -> (H::Number, H::Hash) { + (self.commit.target_number, self.commit.target_hash) + } } /// A utility trait implementing `finality_grandpa::Chain` using a given set of headers. /// This is useful when validating commits, using the given set of headers to /// verify a valid ancestry route to the target commit block. pub struct AncestryChain { - ancestry: BTreeMap, + ancestry: BTreeMap, } impl AncestryChain { - /// Initialize the ancestry chain given a set of relay chain headers. - pub fn new(ancestry: &[H]) -> AncestryChain { - let ancestry: BTreeMap<_, _> = ancestry.iter().cloned().map(|h: H| (h.hash(), h)).collect(); - - AncestryChain { ancestry } - } - - /// Fetch a header from the ancestry chain, given it's hash. Returns [`None`] if it doesn't - /// exist. - pub fn header(&self, hash: &H::Hash) -> Option<&H> { - self.ancestry.get(hash) - } + /// Initialize the ancestry chain given a set of relay chain headers. + pub fn new(ancestry: &[H]) -> AncestryChain { + let ancestry: BTreeMap<_, _> = ancestry.iter().cloned().map(|h: H| (h.hash(), h)).collect(); + + AncestryChain { ancestry } + } + + /// Fetch a header from the ancestry chain, given it's hash. Returns [`None`] if it doesn't + /// exist. + pub fn header(&self, hash: &H::Hash) -> Option<&H> { + self.ancestry.get(hash) + } } impl finality_grandpa::Chain for AncestryChain where - H::Number: finality_grandpa::BlockNumberOps, + H::Number: finality_grandpa::BlockNumberOps, { - fn ancestry( - &self, - base: H::Hash, - block: H::Hash, - ) -> Result, finality_grandpa::Error> { - let mut route = vec![block]; - let mut current_hash = block; - while current_hash != base { - match self.ancestry.get(¤t_hash) { - Some(current_header) => { - current_hash = *current_header.parent_hash(); - route.push(current_hash); - } - _ => return Err(finality_grandpa::Error::NotDescendent), - }; - } - Ok(route) - } + fn ancestry( + &self, + base: H::Hash, + block: H::Hash, + ) -> Result, finality_grandpa::Error> { + let mut route = vec![block]; + let mut current_hash = block; + while current_hash != base { + match self.ancestry.get(¤t_hash) { + Some(current_header) => { + current_hash = *current_header.parent_hash(); + route.push(current_hash); + }, + _ => return Err(finality_grandpa::Error::NotDescendent), + }; + } + Ok(route) + } } /// Checks the given header for a consensus digest signalling a **standard** scheduled change and /// extracts it. pub fn find_scheduled_change(header: &H) -> Option> { - let id = OpaqueDigestItemId::Consensus(&GRANDPA_ENGINE_ID); + let id = OpaqueDigestItemId::Consensus(&GRANDPA_ENGINE_ID); - let filter_log = |log: ConsensusLog| match log { - ConsensusLog::ScheduledChange(change) => Some(change), - _ => None, - }; + let filter_log = |log: ConsensusLog| match log { + ConsensusLog::ScheduledChange(change) => Some(change), + _ => None, + }; - // find the first consensus digest with the right ID which converts to - // the right kind of consensus log. - header.digest().convert_first(|l| l.try_to(id).and_then(filter_log)) + // find the first consensus digest with the right ID which converts to + // the right kind of consensus log. + header.digest().convert_first(|l| l.try_to(id).and_then(filter_log)) } /// Checks the given header for a consensus digest signalling a **forced** scheduled change and /// extracts it. pub fn find_forced_change( - header: &H, + header: &H, ) -> Option<(H::Number, ScheduledChange)> { - let id = OpaqueDigestItemId::Consensus(&GRANDPA_ENGINE_ID); + let id = OpaqueDigestItemId::Consensus(&GRANDPA_ENGINE_ID); - let filter_log = |log: ConsensusLog| match log { - ConsensusLog::ForcedChange(delay, change) => Some((delay, change)), - _ => None, - }; + let filter_log = |log: ConsensusLog| match log { + ConsensusLog::ForcedChange(delay, change) => Some((delay, change)), + _ => None, + }; - // find the first consensus digest with the right ID which converts to - // the right kind of consensus log. - header.digest().convert_first(|l| l.try_to(id).and_then(filter_log)) + // find the first consensus digest with the right ID which converts to + // the right kind of consensus log. + header.digest().convert_first(|l| l.try_to(id).and_then(filter_log)) } /// Check a message signature by encoding the message and verifying the provided signature using the /// expected authority id. pub fn check_message_signature( - message: &finality_grandpa::Message, - id: &AuthorityId, - signature: &AuthoritySignature, - round: RoundNumber, - set_id: SetId, + message: &finality_grandpa::Message, + id: &AuthorityId, + signature: &AuthoritySignature, + round: RoundNumber, + set_id: SetId, ) -> Result<(), anyhow::Error> where - H: Encode, - N: Encode, + H: Encode, + N: Encode, { - log::trace!(target: "pallet_grandpa", "Justification Message {:?}", (round, set_id)); - let buf = (message, round, set_id).encode(); + log::trace!(target: "pallet_grandpa", "Justification Message {:?}", (round, set_id)); + let buf = (message, round, set_id).encode(); - let signature_bytes: &[u8] = signature.as_ref(); - let sp_finality_signature: ed25519::Signature = - signature_bytes.try_into().map_err(|_| anyhow!("Could not fetch signature"))?; + let signature_bytes: &[u8] = signature.as_ref(); + let sp_finality_signature: ed25519::Signature = + signature_bytes.try_into().map_err(|_| anyhow!("Could not fetch signature"))?; - let id_bytes: &[u8] = id.as_ref(); - let pub_key: ed25519::Public = - id_bytes.try_into().map_err(|_| anyhow!("Could not fetch public key"))?; + let id_bytes: &[u8] = id.as_ref(); + let pub_key: ed25519::Public = + id_bytes.try_into().map_err(|_| anyhow!("Could not fetch public key"))?; - if sp_io::crypto::ed25519_verify(&sp_finality_signature, &buf, &pub_key) { - Err(anyhow!("invalid signature for precommit in grandpa justification"))? - } + if sp_io::crypto::ed25519_verify(&sp_finality_signature, &buf, &pub_key) { + Err(anyhow!("invalid signature for precommit in grandpa justification"))? + } - Ok(()) + Ok(()) } /// Verifies the equivocation proof by making sure that both votes target /// different blocks and that its signatures are valid. pub fn check_equivocation_proof( - set_id: u64, - equivocation: Equivocation, + set_id: u64, + equivocation: Equivocation, ) -> Result<(), anyhow::Error> where - H: Clone + Encode + PartialEq, - N: Clone + Encode + PartialEq, + H: Clone + Encode + PartialEq, + N: Clone + Encode + PartialEq, { - // NOTE: the bare `Prevote` and `Precommit` types don't share any trait, - // this is implemented as a macro to avoid duplication. - macro_rules! check { - ( $equivocation:expr, $message:expr ) => { - // if both votes have the same target the equivocation is invalid. - if $equivocation.first.0.target_hash == $equivocation.second.0.target_hash && - $equivocation.first.0.target_number == $equivocation.second.0.target_number - { - return Err(anyhow!("both votes have the same target!")) - } - - // check signatures on both votes are valid - check_message_signature::<_, _>( - &$message($equivocation.first.0), - &$equivocation.identity, - &$equivocation.first.1, - $equivocation.round_number, - set_id, - )?; - - check_message_signature::<_, _>( - &$message($equivocation.second.0), - &$equivocation.identity, - &$equivocation.second.1, - $equivocation.round_number, - set_id, - )?; - - return Ok(()) - }; - } - - match equivocation { - Equivocation::Prevote(equivocation) => { - check!(equivocation, finality_grandpa::Message::Prevote); - } - Equivocation::Precommit(equivocation) => { - check!(equivocation, finality_grandpa::Message::Precommit); - } - } + // NOTE: the bare `Prevote` and `Precommit` types don't share any trait, + // this is implemented as a macro to avoid duplication. + macro_rules! check { + ( $equivocation:expr, $message:expr ) => { + // if both votes have the same target the equivocation is invalid. + if $equivocation.first.0.target_hash == $equivocation.second.0.target_hash && + $equivocation.first.0.target_number == $equivocation.second.0.target_number + { + return Err(anyhow!("both votes have the same target!")) + } + + // check signatures on both votes are valid + check_message_signature::<_, _>( + &$message($equivocation.first.0), + &$equivocation.identity, + &$equivocation.first.1, + $equivocation.round_number, + set_id, + )?; + + check_message_signature::<_, _>( + &$message($equivocation.second.0), + &$equivocation.identity, + &$equivocation.second.1, + $equivocation.round_number, + set_id, + )?; + + return Ok(()) + }; + } + + match equivocation { + Equivocation::Prevote(equivocation) => { + check!(equivocation, finality_grandpa::Message::Prevote); + }, + Equivocation::Precommit(equivocation) => { + check!(equivocation, finality_grandpa::Message::Precommit); + }, + } } #[cfg(test)] mod tests { - use super::*; - use finality_grandpa::Chain; - use sp_runtime::{generic::Header, traits::BlakeTwo256}; - - #[test] - fn test_ancestry_route() { - let mut headers: Vec> = vec![]; - for (i, h) in (40u32..=50).enumerate() { - let mut header = Header::new( - h, - Default::default(), - Default::default(), - Default::default(), - Default::default(), - ); - if i != 0 { - header.parent_hash = headers[i - 1].hash(); - } - headers.push(header); - } - - let slice = &headers[3..=6]; - let ancestry = AncestryChain::new(&headers); - - let mut route = ancestry.ancestry(slice[0].hash(), slice[3].hash()).unwrap(); - route.sort(); - let mut expected = slice.iter().map(|h| h.hash()).collect::>(); - expected.sort(); - - assert_eq!(route, expected); - } + use super::*; + use finality_grandpa::Chain; + use sp_runtime::{generic::Header, traits::BlakeTwo256}; + + #[test] + fn test_ancestry_route() { + let mut headers: Vec> = vec![]; + for (i, h) in (40u32..=50).enumerate() { + let mut header = Header::new( + h, + Default::default(), + Default::default(), + Default::default(), + Default::default(), + ); + if i != 0 { + header.parent_hash = headers[i - 1].hash(); + } + headers.push(header); + } + + let slice = &headers[3..=6]; + let ancestry = AncestryChain::new(&headers); + + let mut route = ancestry.ancestry(slice[0].hash(), slice[3].hash()).unwrap(); + route.sort(); + let mut expected = slice.iter().map(|h| h.hash()).collect::>(); + expected.sort(); + + assert_eq!(route, expected); + } } diff --git a/modules/consensus/grandpa/primitives/src/lib.rs b/modules/consensus/grandpa/primitives/src/lib.rs index c4e8f7836..6e9f519b9 100644 --- a/modules/consensus/grandpa/primitives/src/lib.rs +++ b/modules/consensus/grandpa/primitives/src/lib.rs @@ -38,10 +38,10 @@ pub mod justification; pub type Hash = H256; /// A commit message for this chain's block type. pub type Commit = finality_grandpa::Commit< - ::Hash, - ::Number, - AuthoritySignature, - AuthorityId, + ::Hash, + ::Number, + AuthoritySignature, + AuthorityId, >; /// Finality for block B is proved by providing: @@ -49,61 +49,57 @@ pub type Commit = finality_grandpa::Commit< /// 2) headers sub-chain (B; F] if B != F; #[derive(Debug, PartialEq, Encode, Decode, Clone)] pub struct FinalityProof { - /// The hash of block F for which justification is provided. - pub block: Hash, - /// Justification of the block F. - pub justification: Vec, - /// The set of headers in the range (B; F] that we believe are unknown to the caller. Ordered. - pub unknown_headers: Vec, + /// The hash of block F for which justification is provided. + pub block: Hash, + /// Justification of the block F. + pub justification: Vec, + /// The set of headers in the range (B; F] that we believe are unknown to the caller. Ordered. + pub unknown_headers: Vec, } /// Previous light client state. #[derive(Debug, PartialEq, Encode, Decode, Clone)] pub struct ConsensusState { - /// Current authority set - pub current_authorities: AuthorityList, - /// Id of the current authority set. - pub current_set_id: u64, - /// latest finalized height on relay chain or standalone chain - pub latest_height: u32, - /// State machine id StateMachine::Polkadot(0) or StateMachine::Kusama(0) or - ///StateMachine::Grandpa(ConsensusStateId) - pub state_machine: StateMachine, - /// latest finalized height on the parachains, this map will be empty for Standalone chains - /// Map of para_ids - pub para_ids: BTreeMap, - /// latest finalized hash on relay chain or standalone chain. - pub latest_hash: Hash, - /// slot duration for the chain - pub slot_duration: u64, + /// Current authority set + pub current_authorities: AuthorityList, + /// Id of the current authority set. + pub current_set_id: u64, + /// latest finalized height on relay chain or standalone chain + pub latest_height: u32, + /// latest finalized hash on relay chain or standalone chain. + pub latest_hash: Hash, + /// slot duration for the standalone chain + pub slot_duration: u64, + /// State machine for this consensus state + pub state_machine: StateMachine, } /// Holds relavant parachain proofs for both header and timestamp extrinsic. #[derive(Clone, Debug, Encode, Decode)] pub struct ParachainHeaderProofs { - /// State proofs that prove a parachain headers exists at a given relay chain height - pub state_proof: Vec>, - /// The parachain ids - pub para_ids: Vec, + /// State proofs that prove a parachain headers exists at a given relay chain height + pub state_proof: Vec>, + /// The parachain ids + pub para_ids: Vec, } /// Parachain headers with a Grandpa finality proof. #[derive(Clone, Encode, Decode)] pub struct ParachainHeadersWithFinalityProof { - /// The grandpa finality proof: contains relay chain headers from the - /// last known finalized grandpa block. - pub finality_proof: FinalityProof, - /// Contains a map of relay chain header hashes to parachain headers - /// finalzed at the relay chain height. We check for this parachain header finalization - /// via state proofs. Also contains extrinsic proof for timestamp. - pub parachain_headers: BTreeMap, + /// The grandpa finality proof: contains relay chain headers from the + /// last known finalized grandpa block. + pub finality_proof: FinalityProof, + /// Contains a map of relay chain header hashes to parachain headers + /// finalized at the relay chain height. We check for this parachain header finalization + /// via state proofs. Also contains extrinsic proof for timestamp. + pub parachain_headers: BTreeMap, } /// This returns the storage key for a parachain header on the relay chain. pub fn parachain_header_storage_key(para_id: u32) -> StorageKey { - let mut storage_key = frame_support::storage::storage_prefix(b"Paras", b"Heads").to_vec(); - let encoded_para_id = para_id.encode(); - storage_key.extend_from_slice(sp_io::hashing::twox_64(&encoded_para_id).as_slice()); - storage_key.extend_from_slice(&encoded_para_id); - StorageKey(storage_key) + let mut storage_key = frame_support::storage::storage_prefix(b"Paras", b"Heads").to_vec(); + let encoded_para_id = para_id.encode(); + storage_key.extend_from_slice(sp_io::hashing::twox_64(&encoded_para_id).as_slice()); + storage_key.extend_from_slice(&encoded_para_id); + StorageKey(storage_key) } diff --git a/modules/consensus/grandpa/prover/Cargo.toml b/modules/consensus/grandpa/prover/Cargo.toml index 3917e6380..6593355de 100644 --- a/modules/consensus/grandpa/prover/Cargo.toml +++ b/modules/consensus/grandpa/prover/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "ismp-grandpa-prover" +name = "grandpa-prover" version = "0.1.0" edition = "2021" authors = ["Polytope Labs "] @@ -10,22 +10,23 @@ publish = false [dependencies] hex = "0.4.3" anyhow.workspace = true -serde = "1.0.144" +serde = { workspace = true, default-features = true} subxt = { workspace = true, default-features = true } -codec = { package = "parity-scale-codec", version = "3.2.2", features = ["derive"] } +codec = { workspace = true, default-features = true, features = ["derive"] } derive_more = "0.99.17" downcast-rs = "1.2.0" -jsonrpsee = { version = "0.16.2", features = ["async-client", "jsonrpsee-ws-client"] } -jsonrpsee-ws-client = "0.16.2" +jsonrpsee = { version = "0.23", features = ["async-client", "jsonrpsee-ws-client"] } +jsonrpsee-ws-client = "0.23" finality-grandpa = "0.16.0" -sc-consensus-grandpa-rpc = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.42" } -sp-consensus-grandpa = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.42" } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.42" } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.42" } -sp-trie = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.42" } -sp-state-machine = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.42" } +sc-consensus-grandpa-rpc = { workspace = true, default-features = true } +sp-consensus-grandpa = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-trie = { workspace = true, default-features = true } +sp-state-machine = { workspace = true, default-features = true } -primitives = { package = "ismp-grandpa-primitives", path = "../primitives" } -ismp = { git = "https://github.com/polytope-labs/ismp-rs", branch = "main" } +grandpa-verifier-primitives = { workspace = true, default-features = true } +ismp = { workspace = true, default-features = true } +subxt-utils = { workspace = true, default-features = true } diff --git a/modules/consensus/grandpa/prover/src/lib.rs b/modules/consensus/grandpa/prover/src/lib.rs index e68501264..58b44249b 100644 --- a/modules/consensus/grandpa/prover/src/lib.rs +++ b/modules/consensus/grandpa/prover/src/lib.rs @@ -19,22 +19,17 @@ use anyhow::anyhow; use codec::{Decode, Encode}; -use ismp::host::StateMachine; -use jsonrpsee::{async_client::Client, ws_client::WsClientBuilder}; -use primitives::{ - parachain_header_storage_key, ConsensusState, FinalityProof, ParachainHeaderProofs, - ParachainHeadersWithFinalityProof, +use grandpa_verifier_primitives::{ + parachain_header_storage_key, ConsensusState, FinalityProof, ParachainHeaderProofs, + ParachainHeadersWithFinalityProof, }; -use sc_consensus_grandpa_rpc::GrandpaApiClient; +use ismp::host::StateMachine; use serde::{Deserialize, Serialize}; use sp_consensus_grandpa::{AuthorityId, AuthoritySignature}; use sp_core::H256; use sp_runtime::traits::{One, Zero}; -use std::{ - collections::{BTreeMap, BTreeSet, HashMap}, - sync::Arc, -}; -use subxt::{config::Header, Config, OnlineClient}; +use std::collections::{BTreeMap, BTreeSet}; +use subxt::{config::Header, rpc_params, Config, OnlineClient}; /// Head data for parachain #[derive(Decode, Encode)] @@ -43,18 +38,16 @@ pub struct HeadData(pub Vec); /// Contains methods useful for proving parachain and standalone-chain header finality using GRANDPA #[derive(Clone)] pub struct GrandpaProver { - /// Subxt client for the chain - pub client: OnlineClient, - /// Chain jsonrpsee client for typed rpc requests, which subxt lacks support for. - pub ws_client: Arc, - /// ParaId of the associated parachains - pub para_ids: Vec, - /// State machine identifier for the chain - pub state_machine: StateMachine, - /// Storage for babe epoch start - pub babe_epoch_start: Vec, - /// Storage key for current set id - pub current_set_id: Vec, + /// Subxt client for the chain + pub client: OnlineClient, + /// ParaId of the associated parachains + pub para_ids: Vec, + /// State machine identifier for the chain + pub state_machine: StateMachine, + /// Storage for babe epoch start + pub babe_epoch_start: Vec, + /// Storage key for current set id + pub current_set_id: Vec, } // We redefine these here because we want the header to be bounded by subxt::config::Header in the @@ -63,15 +56,15 @@ pub struct GrandpaProver { pub type Commit = finality_grandpa::Commit; /// Justification -#[cfg_attr(any(feature = "std", test), derive(Debug))] +#[cfg_attr(test, derive(Debug))] #[derive(Clone, Encode, Decode)] pub struct GrandpaJustification { - /// Current voting round number, monotonically increasing - pub round: u64, - /// Contains block hash & number that's being finalized and the signatures. - pub commit: Commit, - /// Contains the path from a [`PreCommit`]'s target hash to the GHOST finalized block. - pub votes_ancestries: Vec, + /// Current voting round number, monotonically increasing + pub round: u64, + /// Contains block hash & number that's being finalized and the signatures. + pub commit: Commit, + /// Contains the path from a [`PreCommit`]'s target hash to the GHOST finalized block. + pub votes_ancestries: Vec, } /// An encoded justification proving that the given header has been finalized @@ -80,276 +73,233 @@ pub struct JustificationNotification(pub sp_core::Bytes); impl GrandpaProver where - T: Config, - ::Number: Ord + Zero, - u32: From<::Number>, - sp_core::H256: From, - T::Header: codec::Decode, + T: Config, + ::Number: Ord + Zero, + u32: From<::Number>, + sp_core::H256: From, + T::Header: codec::Decode, { - /// Initializes the parachain and relay chain clients given the ws urls. - pub async fn new( - ws_url: &str, - para_ids: Vec, - state_machine: StateMachine, - babe_epoch_start: Vec, - current_set_id: Vec, - ) -> Result { - let ws_client = Arc::new(WsClientBuilder::default().build(ws_url).await?); - let client = OnlineClient::::from_rpc_client(ws_client.clone()).await?; - - Ok(Self { ws_client, client, para_ids, state_machine, babe_epoch_start, current_set_id }) - } - - /// Construct the initial consensus state. - pub async fn initialize_consensus_state( - &self, - slot_duration: u64, - ) -> Result { - use sp_consensus_grandpa::AuthorityList; - let latest_hash = self.client.rpc().finalized_head().await?; - let header = self - .client - .rpc() - .header(Some(latest_hash)) - .await? - .ok_or_else(|| anyhow!("Header not found for hash: {latest_hash:?}"))?; - - let current_set_id: u64 = { - let raw_id = self - .client - .storage() - .at(latest_hash) - .fetch_raw(&self.current_set_id[..]) - .await - .ok() - .flatten() - .expect("Failed to fetch current set id"); - codec::Decode::decode(&mut &*raw_id)? - }; - - let current_authorities = { - let bytes = self - .client - .rpc() - .request::( - "state_call", - subxt::rpc_params!( - "GrandpaApi_grandpa_authorities", - "0x", - Some(format!("{:?}", latest_hash)) - ), - ) - .await - .map(|res| hex::decode(&res[2..]))??; - - AuthorityList::decode(&mut &bytes[..])? - }; - - // Ensure there are no duplicates in authority list - let mut set = BTreeSet::new(); - for (id, ..) in ¤t_authorities { - if !set.insert(id) { - Err(anyhow!("Duplicate entries found in current authority set"))? - } - } - - let latest_height = u32::from(header.number()); - - Ok(ConsensusState { - current_authorities, - current_set_id: current_set_id + 1, - latest_height, - latest_hash: latest_hash.into(), - para_ids: self.para_ids.iter().map(|id| (*id, true)).collect(), - state_machine: self.state_machine, - slot_duration, - }) - } - - /// Returns the grandpa finality proof - pub async fn query_finality_proof( - &self, - previous_finalized_height: u32, - mut latest_finalized_height: u32, - ) -> Result, anyhow::Error> - where - H: Header + codec::Decode, - u32: From<::Number>, - ::Output: From, - T::Hash: From<::Output>, - H::Number: finality_grandpa::BlockNumberOps + One, - { - let encoded = GrandpaApiClient::::prove_finality( - &*self.ws_client, - latest_finalized_height, - ) - .await? - .ok_or_else(|| anyhow!("No justification found for block: {:?}", latest_finalized_height))? - .0; - - let mut finality_proof = FinalityProof::::decode(&mut &encoded[..])?; - - let justification = - GrandpaJustification::::decode(&mut &finality_proof.justification[..])?; - - finality_proof.block = justification.commit.target_hash; - - latest_finalized_height = u32::from(justification.commit.target_number); - - let mut unknown_headers = vec![]; - for height in previous_finalized_height..=latest_finalized_height { - let hash = self - .client - .rpc() - .block_hash(Some(height.into())) - .await? - .ok_or_else(|| anyhow!("Failed to fetch block has for height {height}"))?; - - let header = self - .client - .rpc() - .header(Some(hash)) - .await? - .ok_or_else(|| anyhow!("Header with hash: {hash:?} not found!"))?; - - unknown_headers.push(H::decode(&mut &header.encode()[..])?); - } - - // overwrite unknown headers - finality_proof.unknown_headers = unknown_headers; - Ok(finality_proof) - } - - /// Returns the proof for parachain headers finalized by the provided finality proof - pub async fn query_finalized_parachain_headers_with_proof( - &self, - previous_finalized_height: u32, - latest_finalized_height: u32, - finality_proof: FinalityProof, - ) -> Result, anyhow::Error> - where - H: Header + codec::Decode, - u32: From<::Number>, - ::Output: From, - T::Hash: From<::Output>, - H::Number: finality_grandpa::BlockNumberOps + One, - { - // we are interested only in the blocks where our parachain header changes. - let para_keys: Vec<_> = - self.para_ids.iter().map(|para_id| parachain_header_storage_key(*para_id)).collect(); - let keys = para_keys.iter().map(|key| key.as_ref()).collect::>(); - let mut parachain_headers_with_proof = BTreeMap::::default(); - - let start = self - .client - .rpc() - .block_hash(Some(previous_finalized_height.into())) - .await? - .ok_or_else(|| anyhow!("Failed to fetch previous finalized hash + 1"))?; - - let latest_finalized_hash = self - .client - .rpc() - .block_hash(Some(latest_finalized_height.into())) - .await? - .ok_or_else(|| anyhow!("Failed to fetch previous finalized hash + 1"))?; - - let change_set = - self.client.rpc().query_storage(keys, start, Some(latest_finalized_hash)).await?; - - for changes in change_set { - let header = self - .client - .rpc() - .header(Some(changes.block)) - .await? - .ok_or_else(|| anyhow!("block not found {:?}", changes.block))?; - let mut changed_keys = HashMap::new(); - for para_id in self.para_ids.clone() { - let (key, parachain_header_bytes) = { - let key = parachain_header_storage_key(para_id); - if let Some(raw) = - self.client.storage().at(header.hash()).fetch_raw(key.as_ref()).await? - { - let head_data: HeadData = codec::Decode::decode(&mut &*raw)?; - (key, head_data.0) - } else { - continue - } - }; - - let para_header: H = Decode::decode(&mut ¶chain_header_bytes[..])?; - let para_block_number = para_header.number(); - // skip genesis header or any unknown headers - if para_block_number == Zero::zero() { - continue - } - - changed_keys.insert(key, para_id); - } - - if !changed_keys.is_empty() { - let state_proof = self - .client - .rpc() - .read_proof( - changed_keys.keys().into_iter().map(|key| key.as_ref()), - Some(header.hash()), - ) - .await? - .proof - .into_iter() - .map(|p| p.0) - .collect(); - - let proofs = ParachainHeaderProofs { - state_proof, - para_ids: changed_keys.values().into_iter().map(|id| *id).collect(), - }; - parachain_headers_with_proof.insert(header.hash().into(), proofs); - } - } - - Ok(ParachainHeadersWithFinalityProof { - finality_proof, - parachain_headers: parachain_headers_with_proof, - }) - } - - /// Queries the block at which the epoch for the given block belongs to ends. - pub async fn session_start_and_end_for_block( - &self, - block: u32, - ) -> Result<(u32, u32), anyhow::Error> { - let block_hash = self - .client - .rpc() - .block_hash(Some(block.into())) - .await? - .ok_or(anyhow!("Failed to fetch block hash"))?; - let bytes = self - .client - .storage() - .at(block_hash) - .fetch_raw(&self.babe_epoch_start[..]) - .await? - .ok_or_else(|| anyhow!("Failed to fetch epoch information"))?; - - let (previous_epoch_start, current_epoch_start): (u32, u32) = - codec::Decode::decode(&mut &*bytes)?; - Ok(( - current_epoch_start, - current_epoch_start + (current_epoch_start - previous_epoch_start), - )) - } - - /// Returns the session length in blocks - pub async fn session_length(&self) -> Result { - let metadata = self.client.rpc().metadata().await?; - let metadata = metadata - .pallet_by_name_err("Babe")? - .constant_by_name("EpochDuration") - .ok_or(anyhow!("Failed to fetch constant"))?; - Ok(Decode::decode(&mut metadata.value())?) - } + /// Initializes the parachain and relay chain clients given the ws urls. + pub async fn new( + ws_url: &str, + para_ids: Vec, + state_machine: StateMachine, + babe_epoch_start: Vec, + current_set_id: Vec, + ) -> Result { + let max_rpc_payload_size = 15 * 1024 * 1024; + let client = subxt_utils::client::ws_client(ws_url, max_rpc_payload_size).await?; + + Ok(Self { client, para_ids, state_machine, babe_epoch_start, current_set_id }) + } + + /// Construct the initial consensus state. + pub async fn initialize_consensus_state( + &self, + slot_duration: u64, + hash: T::Hash, + ) -> Result { + use sp_consensus_grandpa::AuthorityList; + let header = self + .client + .rpc() + .header(Some(hash)) + .await? + .ok_or_else(|| anyhow!("Header not found for hash: {hash:?}"))?; + + let current_set_id: u64 = { + let raw_id = self + .client + .storage() + .at(hash) + .fetch_raw(&self.current_set_id[..]) + .await + .ok() + .flatten() + .expect("Failed to fetch current set id"); + codec::Decode::decode(&mut &*raw_id)? + }; + + let current_authorities = { + let bytes = self + .client + .rpc() + .request::( + "state_call", + subxt::rpc_params!( + "GrandpaApi_grandpa_authorities", + "0x", + Some(format!("{:?}", hash)) + ), + ) + .await + .map(|res| hex::decode(&res[2..]))??; + + AuthorityList::decode(&mut &bytes[..])? + }; + + // Ensure there are no duplicates in authority list + let mut set = BTreeSet::new(); + for (id, ..) in ¤t_authorities { + if !set.insert(id) { + Err(anyhow!("Duplicate entries found in current authority set"))? + } + } + + let latest_height = u32::from(header.number()); + + Ok(ConsensusState { + current_authorities, + current_set_id: current_set_id + 1, + latest_height, + latest_hash: hash.into(), + slot_duration, + state_machine: self.state_machine, + }) + } + + /// Returns the grandpa finality proof + pub async fn query_finality_proof( + &self, + previous_finalized_height: u32, + mut latest_finalized_height: u32, + ) -> Result, anyhow::Error> + where + H: Header + codec::Decode, + u32: From<::Number>, + ::Output: From, + T::Hash: From<::Output>, + H::Number: finality_grandpa::BlockNumberOps + One, + { + let encoded = self + .client + .rpc() + .request::>( + "grandpa_proveFinality", + rpc_params![latest_finalized_height], + ) + .await? + .ok_or_else(|| { + anyhow!("No justification found for block: {:?}", latest_finalized_height) + })? + .0; + + let mut finality_proof = FinalityProof::::decode(&mut &encoded[..])?; + + let justification = + GrandpaJustification::::decode(&mut &finality_proof.justification[..])?; + + finality_proof.block = justification.commit.target_hash; + + latest_finalized_height = u32::from(justification.commit.target_number); + + let mut unknown_headers = vec![]; + for height in previous_finalized_height..=latest_finalized_height { + let hash = self + .client + .rpc() + .block_hash(Some(height.into())) + .await? + .ok_or_else(|| anyhow!("Failed to fetch block has for height {height}"))?; + + let header = self + .client + .rpc() + .header(Some(hash)) + .await? + .ok_or_else(|| anyhow!("Header with hash: {hash:?} not found!"))?; + + unknown_headers.push(H::decode(&mut &header.encode()[..])?); + } + + // overwrite unknown headers + finality_proof.unknown_headers = unknown_headers; + Ok(finality_proof) + } + + /// Returns the proof for parachain headers finalized by the provided finality proof + pub async fn query_finalized_parachain_headers_with_proof( + &self, + _previous_finalized_height: u32, + latest_finalized_height: u32, + finality_proof: FinalityProof, + ) -> Result, anyhow::Error> + where + H: Header + codec::Decode, + u32: From<::Number>, + ::Output: From, + T::Hash: From<::Output>, + H::Number: finality_grandpa::BlockNumberOps + One, + { + // we are interested only in the blocks where our parachain header changes. + let para_keys: Vec<_> = self + .para_ids + .iter() + .map(|para_id| parachain_header_storage_key(*para_id)) + .collect(); + let keys = para_keys.iter().map(|key| key.as_ref()).collect::>(); + let mut parachain_headers_with_proof = BTreeMap::::default(); + + let latest_finalized_hash = self + .client + .rpc() + .block_hash(Some(latest_finalized_height.into())) + .await? + .ok_or_else(|| anyhow!("Failed to fetch previous finalized hash + 1"))?; + + let state_proof = self + .client + .rpc() + .read_proof(keys, Some(latest_finalized_hash)) + .await? + .proof + .into_iter() + .map(|bytes| bytes.0) + .collect::>(); + parachain_headers_with_proof.insert( + latest_finalized_hash.into(), + ParachainHeaderProofs { state_proof, para_ids: self.para_ids.clone() }, + ); + Ok(ParachainHeadersWithFinalityProof { + finality_proof, + parachain_headers: parachain_headers_with_proof, + }) + } + + /// Queries the block at which the epoch for the given block belongs to ends. + pub async fn session_start_and_end_for_block( + &self, + block: u32, + ) -> Result<(u32, u32), anyhow::Error> { + let block_hash = self + .client + .rpc() + .block_hash(Some(block.into())) + .await? + .ok_or(anyhow!("Failed to fetch block hash"))?; + let bytes = self + .client + .storage() + .at(block_hash) + .fetch_raw(&self.babe_epoch_start[..]) + .await? + .ok_or_else(|| anyhow!("Failed to fetch epoch information"))?; + + let (previous_epoch_start, current_epoch_start): (u32, u32) = + codec::Decode::decode(&mut &*bytes)?; + Ok(( + current_epoch_start, + current_epoch_start + (current_epoch_start - previous_epoch_start), + )) + } + + /// Returns the session length in blocks + pub async fn session_length(&self) -> Result { + let metadata = self.client.rpc().metadata().await?; + let metadata = metadata + .pallet_by_name_err("Babe")? + .constant_by_name("EpochDuration") + .ok_or(anyhow!("Failed to fetch constant"))?; + Ok(Decode::decode(&mut metadata.value())?) + } } diff --git a/modules/consensus/grandpa/src/consensus.rs b/modules/consensus/grandpa/src/consensus.rs deleted file mode 100644 index be96e20ce..000000000 --- a/modules/consensus/grandpa/src/consensus.rs +++ /dev/null @@ -1,313 +0,0 @@ -// Copyright (c) 2024 Polytope Labs. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific lang - -use crate::messages::ConsensusMessage; -use alloc::{boxed::Box, collections::BTreeMap, format, vec::Vec}; -use codec::{Decode, Encode}; -use core::marker::PhantomData; -use finality_grandpa::Chain; -use ismp::{ - consensus::{ - ConsensusClient, ConsensusClientId, ConsensusStateId, StateCommitment, StateMachineClient, - VerifiedCommitments, - }, - error::Error, - host::{IsmpHost, StateMachine}, - messaging::StateCommitmentHeight, -}; -use primitive_types::H256; -use primitives::{ - justification::{AncestryChain, GrandpaJustification}, - ConsensusState, FinalityProof, ParachainHeadersWithFinalityProof, -}; -use sp_runtime::traits::Header; -use substrate_state_machine::SubstrateStateMachine; -use verifier::{ - verify_grandpa_finality_proof, verify_parachain_headers_with_grandpa_finality_proof, -}; - -/// [`ConsensusStateId`] for the polkadot relay chain -pub const POLKADOT_CONSENSUS_STATE_ID: ConsensusStateId = *b"polk"; - -/// [`ConsensusStateId`] for the kusama relay chain -pub const KUSAMA_CONSENSUS_STATE_ID: ConsensusStateId = *b"sama"; - -/// [`ConsensusClientId`] for GRANDPA consensus -pub const GRANDPA_CONSENSUS_ID: ConsensusClientId = *b"GRAN"; - -pub struct GrandpaConsensusClient(PhantomData); - -impl Default for GrandpaConsensusClient { - fn default() -> Self { - Self(PhantomData) - } -} - -impl ConsensusClient for GrandpaConsensusClient -where - T::Header: Header, - T: pallet_ismp::Config + super::Config, - T::BlockNumber: Into, -{ - fn verify_consensus( - &self, - _host: &dyn IsmpHost, - _consensus_state_id: ConsensusStateId, - trusted_consensus_state: Vec, - proof: Vec, - ) -> Result<(Vec, VerifiedCommitments), Error> { - // decode the proof into consensus message - let consensus_message: ConsensusMessage = - codec::Decode::decode(&mut &proof[..]).map_err(|e| { - Error::Custom(format!( - "Cannot decode consensus message from proof: {e:?}", - )) - })?; - - // decode the consensus state - let consensus_state: ConsensusState = - codec::Decode::decode(&mut &trusted_consensus_state[..]).map_err(|e| { - Error::Custom(format!( - "Cannot decode consensus state from trusted consensus state bytes: {e:?}", - )) - })?; - - let mut intermediates = BTreeMap::new(); - - // match over the message - match consensus_message { - ConsensusMessage::RelayChainMessage(relay_chain_message) => { - let headers_with_finality_proof = ParachainHeadersWithFinalityProof { - finality_proof: relay_chain_message.finality_proof, - parachain_headers: relay_chain_message.parachain_headers, - }; - - let (consensus_state, parachain_headers) = - verify_parachain_headers_with_grandpa_finality_proof( - consensus_state, - headers_with_finality_proof, - ) - .map_err(|_| { - Error::Custom(format!("Error verifying parachain headers")) - })?; - - for (para_id, header_vec) in parachain_headers { - let mut state_commitments_vec = Vec::new(); - - let state_id: StateMachine = match consensus_state.state_machine { - StateMachine::Polkadot(_) => StateMachine::Polkadot(para_id), - StateMachine::Kusama(_) => StateMachine::Kusama(para_id), - _ => Err(Error::Custom( - "Host state machine should be a parachain".into(), - ))?, - }; - - for header in header_vec { - let (timestamp, overlay_root) = fetch_overlay_root_and_timestamp( - header.digest(), - consensus_state.slot_duration, - )?; - - if timestamp == 0 { - Err(Error::Custom( - "Timestamp or ismp root not found".into(), - ))? - } - - let height: u32 = (*header.number()).into(); - - let intermediate = StateCommitmentHeight { - commitment: StateCommitment { - timestamp, - overlay_root: Some(overlay_root), - state_root: header.state_root, - }, - height: height.into(), - }; - - state_commitments_vec.push(intermediate); - } - - intermediates.insert(state_id, state_commitments_vec); - } - - Ok((consensus_state.encode(), intermediates)) - } - - ConsensusMessage::StandaloneChainMessage(standalone_chain_message) => { - let (consensus_state, header, _, _) = verify_grandpa_finality_proof( - consensus_state, - standalone_chain_message.finality_proof, - ) - .map_err(|_| { - Error::Custom( - "Error verifying parachain headers".parse().unwrap(), - ) - })?; - let (timestamp, overlay_root) = fetch_overlay_root_and_timestamp( - header.digest(), - consensus_state.slot_duration, - )?; - - if timestamp == 0 { - Err(Error::Custom("Timestamp or ismp root not found".into()))? - } - - let height: u32 = (*header.number()).into(); - - let state_id = consensus_state.state_machine; - - let intermediate = StateCommitmentHeight { - commitment: StateCommitment { - timestamp, - overlay_root: Some(overlay_root), - state_root: header.state_root, - }, - height: height.into(), - }; - - let mut state_commitments_vec = Vec::new(); - state_commitments_vec.push(intermediate); - - intermediates.insert(state_id, state_commitments_vec); - - Ok((consensus_state.encode(), intermediates)) - } - } - } - - fn verify_fraud_proof( - &self, - _host: &dyn IsmpHost, - trusted_consensus_state: Vec, - proof_1: Vec, - proof_2: Vec, - ) -> Result<(), Error> { - // decode the consensus state - let consensus_state: ConsensusState = - codec::Decode::decode(&mut &trusted_consensus_state[..]).map_err(|e| { - Error::Custom(format!( - "Cannot decode consensus state from trusted consensus state bytes: {e:?}", - )) - })?; - - let first_proof: FinalityProof = codec::Decode::decode(&mut &proof_1[..]) - .map_err(|e| { - Error::Custom(format!( - "Cannot decode first finality proof from proof_1 bytes: {e:?}", - )) - })?; - - let second_proof: FinalityProof = codec::Decode::decode(&mut &proof_2[..]) - .map_err(|e| { - Error::Custom(format!( - "Cannot decode second finality proof from proof_2 bytes: {e:?}", - )) - })?; - - if first_proof.block == second_proof.block { - return Err(Error::Custom(format!( - "Fraud proofs are for the same block", - ))) - } - - let first_headers = AncestryChain::::new(&first_proof.unknown_headers); - let first_target = - first_proof.unknown_headers.iter().max_by_key(|h| *h.number()).ok_or_else(|| { - Error::Custom(format!("Unknown headers can't be empty!")) - })?; - - let second_headers = AncestryChain::::new(&second_proof.unknown_headers); - let second_target = - second_proof.unknown_headers.iter().max_by_key(|h| *h.number()).ok_or_else(|| { - Error::Custom(format!("Unknown headers can't be empty!")) - })?; - - if first_target.hash() != first_proof.block || second_target.hash() != second_proof.block { - return Err(Error::Custom(format!( - "Fraud proofs are not for the same chain" - ))) - } - - let first_base = - first_proof.unknown_headers.iter().min_by_key(|h| *h.number()).ok_or_else(|| { - Error::Custom(format!("Unknown headers can't be empty!")) - })?; - first_headers - .ancestry(first_base.hash(), first_target.hash()) - .map_err(|_| Error::Custom(format!("Invalid ancestry!")))?; - - let second_base = - second_proof.unknown_headers.iter().min_by_key(|h| *h.number()).ok_or_else(|| { - Error::Custom(format!("Unknown headers can't be empty!")) - })?; - second_headers - .ancestry(second_base.hash(), second_target.hash()) - .map_err(|_| Error::Custom(format!("Invalid ancestry!")))?; - - let first_parent = first_base.parent_hash(); - let second_parent = second_base.parent_hash(); - - if first_parent != second_parent { - return Err(Error::Custom(format!( - "Fraud proofs are not for the same ancestor" - ))) - } - - let first_justification = - GrandpaJustification::::decode(&mut &first_proof.justification[..]) - .map_err(|_| { - Error::Custom(format!("Could not decode first justification")) - })?; - - let second_justification = - GrandpaJustification::::decode(&mut &second_proof.justification[..]) - .map_err(|_| { - Error::Custom(format!("Could not decode second justification")) - })?; - - if first_proof.block != first_justification.commit.target_hash || - second_proof.block != second_justification.commit.target_hash - { - Err(Error::Custom( - format!("First or second finality proof block hash does not match justification target hash") - ))? - } - - if first_justification.commit.target_hash != consensus_state.latest_hash && - second_justification.commit.target_hash != consensus_state.latest_hash - { - Err(Error::Custom(format!( - "First or second justification does not match consensus latest hash" - )))? - } - - let first_valid = first_justification - .verify(consensus_state.current_set_id, &consensus_state.current_authorities) - .is_ok(); - let second_valid = second_justification - .verify(consensus_state.current_set_id, &consensus_state.current_authorities) - .is_ok(); - - if !first_valid || !second_valid { - Err(Error::Custom(format!("Invalid justification")))? - } - - Ok(()) - } - - fn state_machine(&self, _id: StateMachine) -> Result, Error> { - Ok(Box::new(SubstrateStateMachine::::default())) - } -} diff --git a/modules/consensus/grandpa/src/lib.rs b/modules/consensus/grandpa/src/lib.rs deleted file mode 100644 index 7c1c059bd..000000000 --- a/modules/consensus/grandpa/src/lib.rs +++ /dev/null @@ -1,134 +0,0 @@ -// Copyright (c) 2024 Polytope Labs. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific lang - -#![cfg_attr(not(feature = "std"), no_std)] -extern crate alloc; - -pub mod consensus; -pub mod messages; - -use alloc::vec::Vec; -pub use pallet::*; - -#[frame_support::pallet] -pub mod pallet { - use super::*; - use frame_support::pallet_prelude::*; - use frame_system::pallet_prelude::*; - use ismp::host::IsmpHost; - use primitives::ConsensusState; - - #[pallet::pallet] - pub struct Pallet(_); - - /// The config trait - #[pallet::config] - pub trait Config: frame_system::Config + pallet_ismp::Config { - /// The overarching event type - type RuntimeEvent: From> + IsType<::RuntimeEvent>; - } - - /// Events emitted by this pallet - #[pallet::event] - pub enum Event {} - - #[pallet::error] - pub enum Error { - /// Standalone Consensus State Already Exists - StandaloneConsensusStateAlreadyExists, - /// Standalone Consensus Does not Exist - StandaloneConsensusStateDontExists, - /// Error fetching consensus state - ErrorFetchingConsensusState, - /// Error decoding consensus state - ErrorDecodingConsensusState, - /// Incorrect consensus state id length - IncorrectConsensusStateIdLength, - /// Error storing consensus state - ErrorStoringConsensusState, - } - - #[pallet::call] - impl Pallet { - /// Add some new parachains to the list of parachains in the relay chain consensus state - #[pallet::call_index(0)] - #[pallet::weight((0, DispatchClass::Mandatory))] - pub fn add_parachains( - origin: OriginFor, - consensus_state_id_vec: Vec, - para_ids: Vec, - ) -> DispatchResult { - T::AdminOrigin::ensure_origin(origin)?; - - let ismp_host = Host::::default(); - let consensus_state_id = consensus_state_id_vec - .as_slice() - .try_into() - .map_err(|_| Error::::IncorrectConsensusStateIdLength)?; - - let encoded_consensus_state = ismp_host - .consensus_state(consensus_state_id) - .map_err(|_| Error::::ErrorFetchingConsensusState)?; - let mut consensus_state: ConsensusState = - codec::Decode::decode(&mut &encoded_consensus_state[..]) - .map_err(|_| Error::::ErrorDecodingConsensusState)?; - - let mut stored_para_ids = consensus_state.para_ids; - para_ids.iter().for_each(|para_id| { - stored_para_ids.entry(*para_id).or_insert(true); - }); - consensus_state.para_ids = stored_para_ids; - - let encoded_consensus_state = consensus_state.encode(); - ismp_host - .store_consensus_state(consensus_state_id, encoded_consensus_state) - .map_err(|_| Error::::ErrorStoringConsensusState)?; - Ok(()) - } - - /// Remove some parachains from the list of parachains in the relay chain consensus state - #[pallet::call_index(1)] - #[pallet::weight((0, DispatchClass::Mandatory))] - pub fn remove_parachains( - origin: OriginFor, - consensus_state_id_vec: Vec, - para_ids: Vec, - ) -> DispatchResult { - T::AdminOrigin::ensure_origin(origin)?; - - let ismp_host = Host::::default(); - let consensus_state_id = consensus_state_id_vec - .as_slice() - .try_into() - .map_err(|_| Error::::IncorrectConsensusStateIdLength)?; - - let encoded_consensus_state = ismp_host - .consensus_state(consensus_state_id) - .map_err(|_| Error::::ErrorFetchingConsensusState)?; - let mut consensus_state: ConsensusState = - codec::Decode::decode(&mut &encoded_consensus_state[..]) - .map_err(|_| Error::::ErrorDecodingConsensusState)?; - - let mut stored_para_ids = consensus_state.para_ids; - stored_para_ids.retain(|&key, _| !para_ids.contains(&key)); - consensus_state.para_ids = stored_para_ids; - - let encoded_consensus_state = consensus_state.encode(); - ismp_host - .store_consensus_state(consensus_state_id, encoded_consensus_state) - .map_err(|_| Error::::ErrorStoringConsensusState)?; - Ok(()) - } - } -} diff --git a/modules/consensus/grandpa/verifier/Cargo.toml b/modules/consensus/grandpa/verifier/Cargo.toml index 7ad672d13..8df36c99a 100644 --- a/modules/consensus/grandpa/verifier/Cargo.toml +++ b/modules/consensus/grandpa/verifier/Cargo.toml @@ -1,47 +1,50 @@ [package] -name = "ismp-grandpa-verifier" +name = "grandpa-verifier" version = "0.1.0" edition = "2021" -publish = false - +authors = ["Polytope Labs "] +license = "Apache-2.0" +repository = "https://github.com/polytope-labs/hyperbridge" +homepage = "https://docs.hyperbridge.network/developers/polkadot/integration" +documentation = "https://docs.rs/grandpa-verifier" +description = "GRANDPA consensus proof verifier" +keywords = ["substrate", "polkadot-sdk", "ISMP", "interoperability", "GRANDPA"] [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false } -anyhow = { version = "1.0.64", default-features = false } +codec = { workspace = true, features = ["derive"]} +anyhow = { workspace = true } finality-grandpa = { version = "0.16.0", features = ["derive-codec"], default-features = false } -serde = { version = "1.0.144", default-features = false, features = ["derive"] } +serde = { workspace = true, features = ["derive"] } derive_more = { version = "0.99.17", default-features = false, features = ["from"] } +sp-consensus-grandpa = { workspace = true } +frame-support = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } +sp-trie = { workspace = true } +sp-io = { workspace = true } +sp-core = { workspace = true } +sp-storage = { workspace = true } -sp-consensus-grandpa = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.42", default-features = false } -frame-support = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.42", default-features = false } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.42", default-features = false } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.42", default-features = false } -sp-trie = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.42", default-features = false } -sp-io = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.42", default-features = false } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.42", default-features = false } -sp-storage = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.42", default-features = false } - -primitives = { package = "ismp-grandpa-primitives", path = "../primitives", default-features = false } -substrate-state-machine = { path = "../../pallet-ismp/primitives/state-machine", default-features = false } +grandpa-verifier-primitives = { workspace = true } +substrate-state-machine = { workspace = true } [dev-dependencies] -polkadot-core-primitives = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.42" } +polkadot-core-primitives = { workspace = true } subxt = { workspace = true, features = ["substrate-compat"], default-features = true } futures = "0.3.24" hex = "0.4.3" env_logger = "0.9.0" log = "0.4.17" -tokio = { workspace = true, features = ["macros", "rt-multi-thread"] } +tokio = { workspace = true, features = ["macros", "rt-multi-thread", "time"] } hex-literal = "0.3.4" -grandpa-prover = { package = "ismp-grandpa-prover", path = "../prover" } -ismp = { git = "https://github.com/polytope-labs/ismp-rs", branch = "main"} -sc-finality-grandpa-rpc = "0.25.0" - - +grandpa-prover = { workspace = true} +ismp = { workspace = true } +sc-consensus-grandpa-rpc = "0.29.0" +subxt-utils = { workspace = true, default-features = true } [features] default = ["std"] @@ -55,7 +58,7 @@ std = [ "sp-trie/std", "sp-consensus-grandpa/std", "sp-io/std", - "primitives/std", + "grandpa-verifier-primitives/std", "serde/std", "sp-storage/std", "substrate-state-machine/std" diff --git a/modules/consensus/grandpa/verifier/src/lib.rs b/modules/consensus/grandpa/verifier/src/lib.rs index e52ec71ad..c471f776e 100644 --- a/modules/consensus/grandpa/verifier/src/lib.rs +++ b/modules/consensus/grandpa/verifier/src/lib.rs @@ -28,9 +28,9 @@ use alloc::collections::BTreeMap; use anyhow::anyhow; use codec::Decode; use finality_grandpa::Chain; -use primitives::{ - justification::{find_scheduled_change, AncestryChain, GrandpaJustification}, - parachain_header_storage_key, ConsensusState, FinalityProof, ParachainHeadersWithFinalityProof, +use grandpa_verifier_primitives::{ + justification::{find_scheduled_change, AncestryChain, GrandpaJustification}, + parachain_header_storage_key, ConsensusState, FinalityProof, ParachainHeadersWithFinalityProof, }; use sp_core::H256; use sp_runtime::traits::{BlakeTwo256, Header}; @@ -41,67 +41,66 @@ use substrate_state_machine::read_proof_check; /// This function verifies the GRANDPA finality proof for both standalone chain and parachain /// headers. pub fn verify_grandpa_finality_proof( - mut consensus_state: ConsensusState, - finality_proof: FinalityProof, + mut consensus_state: ConsensusState, + finality_proof: FinalityProof, ) -> Result<(ConsensusState, H, Vec, AncestryChain), anyhow::Error> where - H: Header, - H::Number: finality_grandpa::BlockNumberOps + Into, + H: Header, + H::Number: finality_grandpa::BlockNumberOps + Into, { - // First validate unknown headers. - let headers = AncestryChain::::new(&finality_proof.unknown_headers); - - let target = finality_proof - .unknown_headers - .iter() - .max_by_key(|h| *h.number()) - .ok_or_else(|| anyhow!("Unknown headers can't be empty!"))?; - - // this is illegal - if target.hash() != finality_proof.block { - Err(anyhow!("Latest finalized block should be highest block in unknown_headers"))?; - } - - let justification = GrandpaJustification::::decode(&mut &finality_proof.justification[..]) - .map_err(|e| anyhow!("Failed to decode justificatio {:?}", e))?; - - if justification.commit.target_hash != finality_proof.block { - Err(anyhow!("Justification target hash and finality proof block hash mismatch"))?; - } - - let from = consensus_state.latest_hash; - - let base = finality_proof - .unknown_headers - .iter() - .min_by_key(|h| *h.number()) - .ok_or_else(|| anyhow!("Unknown headers can't be empty!"))?; - - if base.number() < &consensus_state.latest_height { - headers.ancestry(base.hash(), consensus_state.latest_hash).map_err(|_| { - anyhow!( - "[verify_grandpa_finality_proof] Invalid ancestry (base -> latest relay block)!" - ) - })?; - } - - let mut finalized = headers - .ancestry(from, target.hash()) - .map_err(|_| anyhow!("[verify_grandpa_finality_proof] Invalid ancestry!"))?; - finalized.sort(); - - // 2. verify justification. - justification.verify(consensus_state.current_set_id, &consensus_state.current_authorities)?; - - // Sets new consensus state, optionally rotating authorities - consensus_state.latest_hash = target.hash(); - consensus_state.latest_height = (*target.number()).into(); - if let Some(scheduled_change) = find_scheduled_change::(&target) { - consensus_state.current_set_id += 1; - consensus_state.current_authorities = scheduled_change.next_authorities; - } - - Ok((consensus_state, target.clone(), finalized, headers)) + // First validate unknown headers. + let headers = AncestryChain::::new(&finality_proof.unknown_headers); + + let target = finality_proof + .unknown_headers + .iter() + .max_by_key(|h| *h.number()) + .ok_or_else(|| anyhow!("Unknown headers can't be empty!"))?; + + // this is illegal + if target.hash() != finality_proof.block { + Err(anyhow!("Latest finalized block should be highest block in unknown_headers"))?; + } + + let justification = GrandpaJustification::::decode(&mut &finality_proof.justification[..]) + .map_err(|e| anyhow!("Failed to decode justificatio {:?}", e))?; + + if justification.commit.target_hash != finality_proof.block { + Err(anyhow!("Justification target hash and finality proof block hash mismatch"))?; + } + + let from = consensus_state.latest_hash; + + let base = finality_proof + .unknown_headers + .iter() + .min_by_key(|h| *h.number()) + .ok_or_else(|| anyhow!("Unknown headers can't be empty!"))?; + + if base.number() < &consensus_state.latest_height { + headers.ancestry(base.hash(), consensus_state.latest_hash).map_err(|_| { + anyhow!( + "[verify_grandpa_finality_proof] Invalid ancestry (base -> latest relay block)!" + ) + })?; + } + + let finalized = headers + .ancestry(from, target.hash()) + .map_err(|_| anyhow!("[verify_grandpa_finality_proof] Invalid ancestry!"))?; + + // 2. verify justification. + justification.verify(consensus_state.current_set_id, &consensus_state.current_authorities)?; + + // Sets new consensus state, optionally rotating authorities + consensus_state.latest_hash = target.hash(); + consensus_state.latest_height = (*target.number()).into(); + if let Some(scheduled_change) = find_scheduled_change::(&target) { + consensus_state.current_set_id += 1; + consensus_state.current_authorities = scheduled_change.next_authorities; + } + + Ok((consensus_state, target.clone(), finalized, headers)) } /// This function verifies the GRANDPA finality proof for relay chain headers. /// @@ -110,58 +109,54 @@ where /// Returns the new Consensus state alongside a map of para id to a vector that contains a tuple of /// finalized parachain header and timestamp pub fn verify_parachain_headers_with_grandpa_finality_proof( - consensus_state: ConsensusState, - proof: ParachainHeadersWithFinalityProof, + consensus_state: ConsensusState, + proof: ParachainHeadersWithFinalityProof, ) -> Result<(ConsensusState, BTreeMap>), anyhow::Error> where - H: Header, - H::Number: finality_grandpa::BlockNumberOps + Into, + H: Header, + H::Number: finality_grandpa::BlockNumberOps + Into, { - let ParachainHeadersWithFinalityProof { finality_proof, parachain_headers } = proof; - - let (consensus_state, _, finalized_hashes, headers) = - verify_grandpa_finality_proof(consensus_state, finality_proof)?; - // verifies state proofs of parachain headers in finalized relay chain headers. - let mut verified_parachain_headers: BTreeMap> = BTreeMap::new(); - for (hash, proof) in parachain_headers { - if finalized_hashes.binary_search(&hash).is_err() { - // seems relay hash isn't in the finalized chain. - continue - } - let relay_chain_header = - headers.header(&hash).expect("Headers have been checked by AncestryChain; qed"); - let state_proof = proof.state_proof; - let mut keys = BTreeMap::new(); - for para_id in proof.para_ids { - // ensure the para id is in the consensus state before proof verification - if !consensus_state.para_ids.contains_key(¶_id) { - continue - } - - let key = parachain_header_storage_key(para_id); - - keys.insert(key.0, para_id); - } - - let proof = StorageProof::new(state_proof); - - // verify patricia-merkle state proofs - let mut result = read_proof_check::( - relay_chain_header.state_root(), - proof, - keys.keys().map(|key| key.as_slice()), - ) - .map_err(|err| anyhow!("error verifying parachain header state proof: {err:?}"))?; - for (key, para_id) in keys { - let header = result - .remove(&key) - .flatten() - .ok_or_else(|| anyhow!("Invalid proof, parachain header not found"))?; - let parachain_header = - H::decode(&mut &header[..]).map_err(|e| anyhow!("error decoding header: {e:?}"))?; - verified_parachain_headers.entry(para_id).or_default().push(parachain_header); - } - } - - Ok((consensus_state, verified_parachain_headers)) + let ParachainHeadersWithFinalityProof { finality_proof, parachain_headers } = proof; + + let (consensus_state, _, mut finalized_hashes, headers) = + verify_grandpa_finality_proof(consensus_state, finality_proof)?; + finalized_hashes.sort(); + // verifies state proofs of parachain headers in finalized relay chain headers. + let mut verified_parachain_headers: BTreeMap> = BTreeMap::new(); + for (hash, proof) in parachain_headers { + if finalized_hashes.binary_search(&hash).is_err() { + // seems relay hash isn't in the finalized chain. + continue + } + let relay_chain_header = + headers.header(&hash).expect("Headers have been checked by AncestryChain; qed"); + let state_proof = proof.state_proof; + let mut keys = BTreeMap::new(); + for para_id in proof.para_ids { + let key = parachain_header_storage_key(para_id); + + keys.insert(key.0, para_id); + } + + let proof = StorageProof::new(state_proof); + + // verify patricia-merkle state proofs + let mut result = read_proof_check::( + relay_chain_header.state_root(), + proof, + keys.keys().map(|key| key.as_slice()), + ) + .map_err(|err| anyhow!("error verifying parachain header state proof: {err:?}"))?; + for (key, para_id) in keys { + let header = result + .remove(&key) + .flatten() + .ok_or_else(|| anyhow!("Invalid proof, parachain header not found"))?; + let parachain_header = + H::decode(&mut &header[..]).map_err(|e| anyhow!("error decoding header: {e:?}"))?; + verified_parachain_headers.entry(para_id).or_default().push(parachain_header); + } + } + + Ok((consensus_state, verified_parachain_headers)) } diff --git a/modules/consensus/grandpa/verifier/src/tests.rs b/modules/consensus/grandpa/verifier/src/tests.rs index c281cd028..b560ffb44 100644 --- a/modules/consensus/grandpa/verifier/src/tests.rs +++ b/modules/consensus/grandpa/verifier/src/tests.rs @@ -2,33 +2,16 @@ use crate::verify_parachain_headers_with_grandpa_finality_proof; use codec::{Decode, Encode}; use futures::StreamExt; use grandpa_prover::GrandpaProver; +use grandpa_verifier_primitives::{ + justification::GrandpaJustification, ParachainHeadersWithFinalityProof, +}; use ismp::host::StateMachine; use polkadot_core_primitives::Header; -use primitives::{justification::GrandpaJustification, ParachainHeadersWithFinalityProof}; use serde::{Deserialize, Serialize}; -use sp_core::{crypto::AccountId32, H256}; use subxt::{ - config::{ - polkadot::PolkadotExtrinsicParams as ParachainExtrinsicParams, - substrate::{BlakeTwo256, SubstrateHeader}, - }, - rpc_params, + config::substrate::{BlakeTwo256, SubstrateHeader}, + rpc_params, }; - -pub struct DefaultConfig; - -impl subxt::config::Config for DefaultConfig { - type Index = u32; - type Hash = H256; - type AccountId = AccountId32; - type Address = sp_runtime::MultiAddress; - type Signature = sp_runtime::MultiSignature; - type Hasher = subxt::config::substrate::BlakeTwo256; - type Header = - subxt::config::substrate::SubstrateHeader; - type ExtrinsicParams = ParachainExtrinsicParams; -} - pub type Justification = GrandpaJustification
; /// An encoded justification proving that the given header has been finalized @@ -38,109 +21,111 @@ pub struct JustificationNotification(sp_core::Bytes); #[ignore] #[tokio::test] async fn follow_grandpa_justifications() { - env_logger::builder() - .filter_module("grandpa", log::LevelFilter::Trace) - .format_module_path(false) - .init(); - - let relay = std::env::var("RELAY_HOST").unwrap_or_else(|_| "127.0.0.1".to_string()); - - let relay_ws_url = format!("ws://{relay}:9944"); - - let para_ids = vec![2000, 2001]; - let babe_epoch_start_key = - hex::decode("1cb6f36e027abb2091cfb5110ab5087fe90e2fbf2d792cb324bffa9427fe1f0e").unwrap(); - let current_set_id_key = - hex::decode("5f9cc45b7a00c5899361e1c6099678dc8a2d09463effcc78a22d75b9cb87dffc").unwrap(); - - let prover = GrandpaProver::::new( - &relay_ws_url, - para_ids, - StateMachine::Polkadot(0), - babe_epoch_start_key, - current_set_id_key, - ) - .await - .unwrap(); - - println!("Waiting for grandpa proofs to become available"); - let session_length = prover.session_length().await.unwrap(); - prover - .client - .blocks() - .subscribe_finalized() - .await - .unwrap() - .filter_map(|result| futures::future::ready(result.ok())) - .skip_while(|h| futures::future::ready(h.number() < (session_length * 2) + 10)) - .take(1) - .collect::>() - .await; - - let mut subscription = prover - .client - .rpc() - .subscribe::( - "grandpa_subscribeJustifications", - rpc_params![], - "grandpa_unsubscribeJustifications", - ) - .await - .unwrap() - .take(100); - - // slot duration in milliseconds for parachains - let slot_duration = 12_000; - - let mut consensus_state = prover.initialize_consensus_state(slot_duration).await.unwrap(); - - println!("Grandpa proofs are now available"); - while let Some(Ok(_)) = subscription.next().await { - let next_relay_height = consensus_state.latest_height + 1; - - // prove finality should give us the justification for the highest finalized block of the - // authority set the block provided to it belongs - let finality_proof = prover - .query_finality_proof::>( - consensus_state.latest_height, - next_relay_height, - ) - .await - .unwrap(); - - let justification = Justification::decode(&mut &finality_proof.justification[..]).unwrap(); - - println!("current_set_id: {}", consensus_state.current_set_id); - println!("latest_relay_height: {}", consensus_state.latest_height); - println!( - "For relay chain header: Hash({:?}), Number({})", - justification.commit.target_hash, justification.commit.target_number - ); - - let proof = prover - .query_finalized_parachain_headers_with_proof::>( - consensus_state.latest_height, - justification.commit.target_number, - finality_proof.clone(), - ) - .await - .expect("Failed to fetch finalized parachain headers with proof"); - - let proof = proof.encode(); - let proof = ParachainHeadersWithFinalityProof::
::decode(&mut &*proof).unwrap(); - - let (new_consensus_state, _parachain_headers) = - verify_parachain_headers_with_grandpa_finality_proof::
( - consensus_state.clone(), - proof.clone(), - ) - .expect("Failed to verify parachain headers with grandpa finality_proof"); - - if !proof.parachain_headers.is_empty() { - assert!(new_consensus_state.latest_height > consensus_state.latest_height); - } - - consensus_state = new_consensus_state; - println!("========= Successfully verified grandpa justification ========="); - } + env_logger::builder() + .filter_module("grandpa", log::LevelFilter::Trace) + .format_module_path(false) + .init(); + + let relay_ws_url = std::env::var("RELAY_HOST") + .unwrap_or_else(|_| "wss://hyperbridge-paseo-relay.blockops.network:443".to_string()); + + // let relay_ws_url = format!("ws://{relay}:9944"); + + let para_ids = vec![2000]; + let babe_epoch_start_key = + hex::decode("1cb6f36e027abb2091cfb5110ab5087fe90e2fbf2d792cb324bffa9427fe1f0e").unwrap(); + let current_set_id_key = + hex::decode("5f9cc45b7a00c5899361e1c6099678dc8a2d09463effcc78a22d75b9cb87dffc").unwrap(); + + println!("Connecting to relay chain {relay_ws_url}"); + let prover = GrandpaProver::::new( + &relay_ws_url, + para_ids, + StateMachine::Polkadot(0), + babe_epoch_start_key, + current_set_id_key, + ) + .await + .unwrap(); + + println!("Connected to relay chain"); + + println!("Waiting for grandpa proofs to become available"); + let session_length = prover.session_length().await.unwrap(); + prover + .client + .blocks() + .subscribe_finalized() + .await + .unwrap() + .filter_map(|result| futures::future::ready(result.ok())) + .skip_while(|h| futures::future::ready(h.number() < (session_length * 2) + 10)) + .take(1) + .collect::>() + .await; + + let mut subscription = prover + .client + .rpc() + .subscribe::( + "grandpa_subscribeJustifications", + rpc_params![], + "grandpa_unsubscribeJustifications", + ) + .await + .unwrap(); + + // slot duration in milliseconds for parachains + let slot_duration = 6000; + let hash = prover.client.rpc().block_hash(Some(10u64.into())).await.unwrap().unwrap(); + let mut consensus_state = prover.initialize_consensus_state(slot_duration, hash).await.unwrap(); + println!("Grandpa proofs are now available"); + while let Some(Ok(_)) = subscription.next().await { + let next_relay_height = consensus_state.latest_height + 1; + + // prove finality should give us the justification for the highest finalized block of the + // authority set the block provided to it belongs + let finality_proof = prover + .query_finality_proof::>( + consensus_state.latest_height, + next_relay_height, + ) + .await + .unwrap(); + + let justification = Justification::decode(&mut &finality_proof.justification[..]).unwrap(); + + println!("current_set_id: {}", consensus_state.current_set_id); + println!("latest_relay_height: {}", consensus_state.latest_height); + println!( + "For relay chain header: Hash({:?}), Number({})", + justification.commit.target_hash, justification.commit.target_number + ); + + let proof = prover + .query_finalized_parachain_headers_with_proof::>( + consensus_state.latest_height, + justification.commit.target_number, + finality_proof.clone(), + ) + .await + .expect("Failed to fetch finalized parachain headers with proof"); + + let proof = proof.encode(); + let proof = ParachainHeadersWithFinalityProof::
::decode(&mut &*proof).unwrap(); + + let (new_consensus_state, _parachain_headers) = + verify_parachain_headers_with_grandpa_finality_proof::
( + consensus_state.clone(), + proof.clone(), + ) + .expect("Failed to verify parachain headers with grandpa finality_proof"); + + if !proof.parachain_headers.is_empty() { + assert!(new_consensus_state.latest_height > consensus_state.latest_height); + } + + consensus_state = new_consensus_state; + println!("========= Successfully verified grandpa justification ========="); + } } diff --git a/modules/consensus/sync-committee/primitives/Cargo.toml b/modules/consensus/sync-committee/primitives/Cargo.toml index 8de4fb467..3c08e1c3a 100644 --- a/modules/consensus/sync-committee/primitives/Cargo.toml +++ b/modules/consensus/sync-committee/primitives/Cargo.toml @@ -20,7 +20,7 @@ anyhow = {workspace = true, default-features = false} ark-ec = { version = "0.4.2", default-features = false } ark-bls12-381 = { version = "0.4.0", default-features = false } bls_on_arkworks = { version = "0.2.2", default-features = false } -serde-utils = { workspace = true, default-features = false } +serde-hex-utils = { workspace = true, default-features = false } [features] default = ["std"] @@ -35,6 +35,6 @@ std = [ "ark-bls12-381/std", "primitive-types/std", "serde", - "serde-utils/std" + "serde-hex-utils/std" ] diff --git a/modules/consensus/sync-committee/primitives/src/consensus_types.rs b/modules/consensus/sync-committee/primitives/src/consensus_types.rs index 06edd6090..85794fedb 100644 --- a/modules/consensus/sync-committee/primitives/src/consensus_types.rs +++ b/modules/consensus/sync-committee/primitives/src/consensus_types.rs @@ -13,9 +13,9 @@ use ssz_rs::{prelude::*, Deserialize, List, Vector}; #[derive(Default, Debug, SimpleSerialize, Clone, PartialEq, Eq, codec::Encode, codec::Decode)] #[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] pub struct BeaconBlockHeader { - #[cfg_attr(feature = "std", serde(with = "serde_utils::as_string"))] + #[cfg_attr(feature = "std", serde(with = "serde_hex_utils::as_string"))] pub slot: u64, - #[cfg_attr(feature = "std", serde(with = "serde_utils::as_string"))] + #[cfg_attr(feature = "std", serde(with = "serde_hex_utils::as_string"))] pub proposer_index: u64, pub parent_root: Root, pub state_root: Root, @@ -25,7 +25,7 @@ pub struct BeaconBlockHeader { #[derive(Default, Clone, Debug, SimpleSerialize, PartialEq, Eq, codec::Encode, codec::Decode)] #[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] pub struct Checkpoint { - #[cfg_attr(feature = "std", serde(with = "serde_utils::as_string"))] + #[cfg_attr(feature = "std", serde(with = "serde_hex_utils::as_string"))] pub epoch: u64, pub root: Root, } @@ -34,7 +34,7 @@ pub struct Checkpoint { #[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] pub struct Eth1Data { pub deposit_root: Root, - #[cfg_attr(feature = "std", serde(with = "serde_utils::as_string"))] + #[cfg_attr(feature = "std", serde(with = "serde_hex_utils::as_string"))] pub deposit_count: u64, pub block_hash: Hash32, } @@ -45,17 +45,17 @@ pub struct Validator { #[cfg_attr(feature = "std", serde(rename = "pubkey"))] pub public_key: BlsPublicKey, pub withdrawal_credentials: Bytes32, - #[cfg_attr(feature = "std", serde(with = "serde_utils::as_string"))] + #[cfg_attr(feature = "std", serde(with = "serde_hex_utils::as_string"))] pub effective_balance: Gwei, pub slashed: bool, // Status epochs - #[cfg_attr(feature = "std", serde(with = "serde_utils::as_string"))] + #[cfg_attr(feature = "std", serde(with = "serde_hex_utils::as_string"))] pub activation_eligibility_epoch: Epoch, - #[cfg_attr(feature = "std", serde(with = "serde_utils::as_string"))] + #[cfg_attr(feature = "std", serde(with = "serde_hex_utils::as_string"))] pub activation_epoch: Epoch, - #[cfg_attr(feature = "std", serde(with = "serde_utils::as_string"))] + #[cfg_attr(feature = "std", serde(with = "serde_hex_utils::as_string"))] pub exit_epoch: Epoch, - #[cfg_attr(feature = "std", serde(with = "serde_utils::as_string"))] + #[cfg_attr(feature = "std", serde(with = "serde_hex_utils::as_string"))] pub withdrawable_epoch: Epoch, } @@ -76,7 +76,7 @@ pub struct SignedBeaconBlockHeader { #[derive(Default, Debug, SimpleSerialize, Clone, PartialEq, Eq, codec::Encode, codec::Decode)] #[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] pub struct IndexedAttestation { - #[cfg_attr(feature = "std", serde(with = "serde_utils::seq_of_str"))] + #[cfg_attr(feature = "std", serde(with = "serde_hex_utils::seq_of_str"))] pub attesting_indices: List, pub data: AttestationData, pub signature: BlsSignature, @@ -85,9 +85,9 @@ pub struct IndexedAttestation { #[derive(Default, Clone, Debug, SimpleSerialize, PartialEq, Eq, codec::Encode, codec::Decode)] #[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] pub struct AttestationData { - #[cfg_attr(feature = "std", serde(with = "serde_utils::as_string"))] + #[cfg_attr(feature = "std", serde(with = "serde_hex_utils::as_string"))] pub slot: u64, - #[cfg_attr(feature = "std", serde(with = "serde_utils::as_string"))] + #[cfg_attr(feature = "std", serde(with = "serde_hex_utils::as_string"))] pub index: u64, pub beacon_block_root: Root, pub source: Checkpoint, @@ -122,7 +122,7 @@ pub struct DepositData { #[cfg_attr(feature = "std", serde(rename = "pubkey"))] pub public_key: BlsPublicKey, pub withdrawal_credentials: Hash32, - #[cfg_attr(feature = "std", serde(with = "serde_utils::as_string"))] + #[cfg_attr(feature = "std", serde(with = "serde_hex_utils::as_string"))] pub amount: u64, pub signature: BlsSignature, } @@ -130,9 +130,9 @@ pub struct DepositData { #[derive(Default, Debug, SimpleSerialize, codec::Encode, codec::Decode, Clone, PartialEq, Eq)] #[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] pub struct VoluntaryExit { - #[cfg_attr(feature = "std", serde(with = "serde_utils::as_string"))] + #[cfg_attr(feature = "std", serde(with = "serde_hex_utils::as_string"))] pub epoch: u64, - #[cfg_attr(feature = "std", serde(with = "serde_utils::as_string"))] + #[cfg_attr(feature = "std", serde(with = "serde_hex_utils::as_string"))] pub validator_index: u64, } @@ -162,19 +162,19 @@ pub struct SyncCommittee { #[derive(Default, Debug, Clone, SimpleSerialize, PartialEq, Eq, codec::Encode, codec::Decode)] #[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] pub struct Withdrawal { - #[cfg_attr(feature = "std", serde(with = "serde_utils::as_string"))] + #[cfg_attr(feature = "std", serde(with = "serde_hex_utils::as_string"))] pub index: WithdrawalIndex, - #[cfg_attr(feature = "std", serde(with = "serde_utils::as_string"))] + #[cfg_attr(feature = "std", serde(with = "serde_hex_utils::as_string"))] pub validator_index: ValidatorIndex, pub address: ExecutionAddress, - #[cfg_attr(feature = "std", serde(with = "serde_utils::as_string"))] + #[cfg_attr(feature = "std", serde(with = "serde_hex_utils::as_string"))] pub amount: Gwei, } #[derive(Default, Debug, Clone, SimpleSerialize, PartialEq, Eq, codec::Encode, codec::Decode)] #[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] pub struct BlsToExecutionChange { - #[cfg_attr(feature = "std", serde(with = "serde_utils::as_string"))] + #[cfg_attr(feature = "std", serde(with = "serde_hex_utils::as_string"))] pub validator_index: ValidatorIndex, #[cfg_attr(feature = "std", serde(rename = "from_bls_pubkey"))] pub from_bls_public_key: BlsPublicKey, @@ -205,22 +205,22 @@ pub struct ExecutionPayload< pub receipts_root: Bytes32, pub logs_bloom: ByteVector, pub prev_randao: Bytes32, - #[cfg_attr(feature = "std", serde(with = "serde_utils::as_string"))] + #[cfg_attr(feature = "std", serde(with = "serde_hex_utils::as_string"))] pub block_number: u64, - #[cfg_attr(feature = "std", serde(with = "serde_utils::as_string"))] + #[cfg_attr(feature = "std", serde(with = "serde_hex_utils::as_string"))] pub gas_limit: u64, - #[cfg_attr(feature = "std", serde(with = "serde_utils::as_string"))] + #[cfg_attr(feature = "std", serde(with = "serde_hex_utils::as_string"))] pub gas_used: u64, - #[cfg_attr(feature = "std", serde(with = "serde_utils::as_string"))] + #[cfg_attr(feature = "std", serde(with = "serde_hex_utils::as_string"))] pub timestamp: u64, pub extra_data: ByteList, pub base_fee_per_gas: U256, pub block_hash: Hash32, pub transactions: List, MAX_TRANSACTIONS_PER_PAYLOAD>, pub withdrawals: List, - #[cfg_attr(feature = "std", serde(with = "serde_utils::as_string"))] + #[cfg_attr(feature = "std", serde(with = "serde_hex_utils::as_string"))] pub blob_gas_used: u64, - #[cfg_attr(feature = "std", serde(with = "serde_utils::as_string"))] + #[cfg_attr(feature = "std", serde(with = "serde_hex_utils::as_string"))] pub excess_blob_gas: u64, } @@ -236,22 +236,22 @@ pub struct ExecutionPayloadHeader< pub receipts_root: Bytes32, pub logs_bloom: ByteVector, pub prev_randao: Bytes32, - #[cfg_attr(feature = "std", serde(with = "serde_utils::as_string"))] + #[cfg_attr(feature = "std", serde(with = "serde_hex_utils::as_string"))] pub block_number: u64, - #[cfg_attr(feature = "std", serde(with = "serde_utils::as_string"))] + #[cfg_attr(feature = "std", serde(with = "serde_hex_utils::as_string"))] pub gas_limit: u64, - #[cfg_attr(feature = "std", serde(with = "serde_utils::as_string"))] + #[cfg_attr(feature = "std", serde(with = "serde_hex_utils::as_string"))] pub gas_used: u64, - #[cfg_attr(feature = "std", serde(with = "serde_utils::as_string"))] + #[cfg_attr(feature = "std", serde(with = "serde_hex_utils::as_string"))] pub timestamp: u64, pub extra_data: ByteList, pub base_fee_per_gas: U256, pub block_hash: Hash32, pub transactions_root: Root, pub withdrawals_root: Root, - #[cfg_attr(feature = "std", serde(with = "serde_utils::as_string"))] + #[cfg_attr(feature = "std", serde(with = "serde_hex_utils::as_string"))] pub blob_gas_used: u64, - #[cfg_attr(feature = "std", serde(with = "serde_utils::as_string"))] + #[cfg_attr(feature = "std", serde(with = "serde_hex_utils::as_string"))] pub excess_blob_gas: u64, } @@ -312,9 +312,9 @@ pub struct BeaconBlock< const MAX_BLS_TO_EXECUTION_CHANGES: usize, const MAX_BLOB_COMMITMENTS_PER_BLOCK: usize, > { - #[cfg_attr(feature = "std", serde(with = "serde_utils::as_string"))] + #[cfg_attr(feature = "std", serde(with = "serde_hex_utils::as_string"))] pub slot: Slot, - #[cfg_attr(feature = "std", serde(with = "serde_utils::as_string"))] + #[cfg_attr(feature = "std", serde(with = "serde_hex_utils::as_string"))] pub proposer_index: ValidatorIndex, pub parent_root: Root, pub state_root: Root, @@ -338,18 +338,18 @@ pub struct BeaconBlock< #[derive(Default, Debug, SimpleSerialize, Clone, PartialEq, Eq, codec::Encode, codec::Decode)] #[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] pub struct Fork { - #[cfg_attr(feature = "std", serde(with = "serde_utils::as_hex"))] + #[cfg_attr(feature = "std", serde(with = "serde_hex_utils::as_hex"))] pub previous_version: Version, - #[cfg_attr(feature = "std", serde(with = "serde_utils::as_hex"))] + #[cfg_attr(feature = "std", serde(with = "serde_hex_utils::as_hex"))] pub current_version: Version, - #[cfg_attr(feature = "std", serde(with = "serde_utils::as_string"))] + #[cfg_attr(feature = "std", serde(with = "serde_hex_utils::as_string"))] pub epoch: Epoch, } #[derive(Default, Debug, SimpleSerialize, Clone, codec::Encode, codec::Decode)] #[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] pub struct ForkData { - #[cfg_attr(feature = "std", serde(with = "serde_utils::as_hex"))] + #[cfg_attr(feature = "std", serde(with = "serde_hex_utils::as_hex"))] pub current_version: Version, pub genesis_validators_root: Root, } @@ -375,10 +375,10 @@ pub struct BeaconState< const BYTES_PER_LOGS_BLOOM: usize, const MAX_EXTRA_DATA_BYTES: usize, > { - #[cfg_attr(feature = "std", serde(with = "serde_utils::as_string"))] + #[cfg_attr(feature = "std", serde(with = "serde_hex_utils::as_string"))] pub genesis_time: u64, pub genesis_validators_root: Root, - #[cfg_attr(feature = "std", serde(with = "serde_utils::as_string"))] + #[cfg_attr(feature = "std", serde(with = "serde_hex_utils::as_string"))] pub slot: Slot, pub fork: Fork, pub latest_block_header: BeaconBlockHeader, @@ -387,31 +387,31 @@ pub struct BeaconState< pub historical_roots: List, pub eth1_data: Eth1Data, pub eth1_data_votes: List, - #[cfg_attr(feature = "std", serde(with = "serde_utils::as_string"))] + #[cfg_attr(feature = "std", serde(with = "serde_hex_utils::as_string"))] pub eth1_deposit_index: u64, pub validators: List, - #[cfg_attr(feature = "std", serde(with = "serde_utils::seq_of_str"))] + #[cfg_attr(feature = "std", serde(with = "serde_hex_utils::seq_of_str"))] pub balances: List, pub randao_mixes: Vector, - #[cfg_attr(feature = "std", serde(with = "serde_utils::seq_of_str"))] + #[cfg_attr(feature = "std", serde(with = "serde_hex_utils::seq_of_str"))] pub slashings: Vector, - #[cfg_attr(feature = "std", serde(with = "serde_utils::seq_of_str"))] + #[cfg_attr(feature = "std", serde(with = "serde_hex_utils::seq_of_str"))] pub previous_epoch_participation: List, - #[cfg_attr(feature = "std", serde(with = "serde_utils::seq_of_str"))] + #[cfg_attr(feature = "std", serde(with = "serde_hex_utils::seq_of_str"))] pub current_epoch_participation: List, pub justification_bits: Bitvector, pub previous_justified_checkpoint: Checkpoint, pub current_justified_checkpoint: Checkpoint, pub finalized_checkpoint: Checkpoint, - #[cfg_attr(feature = "std", serde(with = "serde_utils::seq_of_str"))] + #[cfg_attr(feature = "std", serde(with = "serde_hex_utils::seq_of_str"))] pub inactivity_scores: List, pub current_sync_committee: SyncCommittee, pub next_sync_committee: SyncCommittee, pub latest_execution_payload_header: ExecutionPayloadHeader, - #[cfg_attr(feature = "std", serde(with = "serde_utils::as_string"))] + #[cfg_attr(feature = "std", serde(with = "serde_hex_utils::as_string"))] pub next_withdrawal_index: WithdrawalIndex, - #[cfg_attr(feature = "std", serde(with = "serde_utils::as_string"))] + #[cfg_attr(feature = "std", serde(with = "serde_hex_utils::as_string"))] pub next_withdrawal_validator_index: ValidatorIndex, pub historical_summaries: List, } diff --git a/modules/consensus/sync-committee/primitives/src/ssz/byte_list.rs b/modules/consensus/sync-committee/primitives/src/ssz/byte_list.rs index 6ba165723..9fa982bb1 100644 --- a/modules/consensus/sync-committee/primitives/src/ssz/byte_list.rs +++ b/modules/consensus/sync-committee/primitives/src/ssz/byte_list.rs @@ -10,7 +10,7 @@ use ssz_rs::prelude::*; #[derive(Default, Clone, Eq, SimpleSerialize, codec::Encode, codec::Decode)] #[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] pub struct ByteList( - #[cfg_attr(feature = "serde", serde(with = "serde_utils::as_hex"))] List, + #[cfg_attr(feature = "serde", serde(with = "serde_hex_utils::as_hex"))] List, ); impl TryFrom<&[u8]> for ByteList { diff --git a/modules/consensus/sync-committee/primitives/src/ssz/byte_vector.rs b/modules/consensus/sync-committee/primitives/src/ssz/byte_vector.rs index bb2dcfa5e..56903d721 100644 --- a/modules/consensus/sync-committee/primitives/src/ssz/byte_vector.rs +++ b/modules/consensus/sync-committee/primitives/src/ssz/byte_vector.rs @@ -10,7 +10,7 @@ use ssz_rs::prelude::*; #[derive(Default, Clone, Eq, SimpleSerialize, codec::Encode, codec::Decode)] #[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] pub struct ByteVector( - #[cfg_attr(feature = "serde", serde(with = "serde_utils::as_hex"))] Vector, + #[cfg_attr(feature = "serde", serde(with = "serde_hex_utils::as_hex"))] Vector, ); impl TryFrom<&[u8]> for ByteVector { diff --git a/modules/hyperclient/Cargo.toml b/modules/hyperclient/Cargo.toml index 272ef6fc8..bab0ac657 100644 --- a/modules/hyperclient/Cargo.toml +++ b/modules/hyperclient/Cargo.toml @@ -29,7 +29,7 @@ ethereum-triedb = { workspace = true } subxt-utils = { workspace = true } mmr-primitives = { workspace = true } sp-mmr-primitives = { workspace = true } -serde-utils = { workspace = true } +serde-hex-utils = { workspace = true } # crates.io getrandom = { version = "0.2", default-features = false, features = ["js"] } @@ -61,7 +61,7 @@ std = [ "subxt/jsonrpsee", "subxt-utils/std", "sp-core/std", - "serde-utils/std", + "serde-hex-utils/std", "substrate-state-machine/std", ] testing = [] diff --git a/modules/hyperclient/src/interfaces.rs b/modules/hyperclient/src/interfaces.rs index c59d87170..a1e8072c4 100644 --- a/modules/hyperclient/src/interfaces.rs +++ b/modules/hyperclient/src/interfaces.rs @@ -252,7 +252,7 @@ pub struct JsPostResponse { /// The request that triggered this response. pub post: JsPost, /// The response message. - #[serde(with = "serde_utils::as_hex")] + #[serde(with = "serde_hex_utils::as_hex")] pub response: Vec, /// Timestamp at which this response expires in seconds. #[serde(rename = "timeoutTimestamp")] diff --git a/modules/hyperclient/src/types.rs b/modules/hyperclient/src/types.rs index 905858216..12c8942a7 100644 --- a/modules/hyperclient/src/types.rs +++ b/modules/hyperclient/src/types.rs @@ -143,7 +143,7 @@ pub enum MessageStatusWithMetadata { #[serde(flatten)] meta: EventMetadata, /// Calldata that encodes the proof for the message to be sent to the destination. - #[serde(with = "serde_utils::as_hex")] + #[serde(with = "serde_hex_utils::as_hex")] calldata: Bytes, }, /// Delivered to destination @@ -236,7 +236,7 @@ pub enum TimeoutStatus { /// Encoded call data to be submitted to source chain TimeoutMessage { /// Calldata that encodes the proof for the timeout message on the source. - #[serde(with = "serde_utils::as_hex")] + #[serde(with = "serde_hex_utils::as_hex")] calldata: Bytes, }, } diff --git a/modules/ismp/clients/grandpa/Cargo.toml b/modules/ismp/clients/grandpa/Cargo.toml new file mode 100644 index 000000000..d06e98dce --- /dev/null +++ b/modules/ismp/clients/grandpa/Cargo.toml @@ -0,0 +1,63 @@ +[package] +name = "ismp-grandpa" +version = "1.15.0" +edition = "2021" +authors = ["Polytope Labs "] +license = "Apache-2.0" +repository = "https://github.com/polytope-labs/hyperbridge" +homepage = "https://docs.hyperbridge.network/developers/polkadot/integration" +documentation = "https://docs.rs/ismp-grandpa" +description = "GRANDPA consensus client for pallet-ismp" +keywords = ["substrate", "polkadot-sdk", "ISMP", "interoperability", "GRANDPA"] +readme = "./README.md" + +[dependencies] +codec = { workspace = true, features = ["derive"] } +primitive-types = { workspace = true } +scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +merkle-mountain-range = { workspace = true } +finality-grandpa = { version = "0.16.0", features = ["derive-codec"], default-features = false } + +# polytope labs +ismp = { workspace = true } +grandpa-verifier-primitives = { workspace = true } +grandpa-verifier = { workspace = true } +pallet-ismp = { workspace = true } + + +# substrate +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-trie = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-consensus-aura = { workspace = true } +sp-core = { workspace = true } + +# cumulus +cumulus-primitives-core = { workspace = true } +substrate-state-machine = { workspace = true } + +[features] +default = ["std"] +std = [ + "codec/std", + "frame-support/std", + "frame-system/std", + "scale-info/std", + "cumulus-primitives-core/std", + "ismp/std", + "sp-trie/std", + "sp-consensus-aura/std", + "sp-runtime/std", + "sp-io/std", + "primitive-types/std", + "pallet-ismp/std", + "sp-core/std", + "grandpa-verifier-primitives/std", + "grandpa-verifier/std", + "merkle-mountain-range/std", + "substrate-state-machine/std", + "finality-grandpa/std", +] +try-runtime = [] diff --git a/modules/ismp/clients/grandpa/README.md b/modules/ismp/clients/grandpa/README.md new file mode 100644 index 000000000..aaf7d2dd3 --- /dev/null +++ b/modules/ismp/clients/grandpa/README.md @@ -0,0 +1,30 @@ +# ISMP GRANDPA Consensus Client + +This allows standalone chains communicate with Hyperbridge over ISMP. + +The consensus client relies on a pallet which stores a list of parachains and State machine identifiers authorized to use this client. + +## Overview + +The Pallet allows the [`AdminOrigin`](https://docs.rs/pallet-ismp/latest/pallet_ismp/pallet/trait.Config.html#associatedtype.AdminOrigin) configured in [`pallet-ismp`](https://docs.rs/pallet-ismp/latest/pallet_ismp) to dispatch calls for adding and removing substrate-based chains from the pallet whitelist. + +## Setting up + +When using this consensus client the following should be done in order: + +- Create the consensus state using [`create_consensus_client`](https://docs.rs/pallet-ismp/latest/pallet_ismp/pallet/dispatchables/fn.create_consensus_client.html) + +- The supported state machine identifiers should be whitelisted in the pallet by calling `add_state_machines` from the [`AdminOrigin`].
+ +## Note + +If a state machine identifier is not found in the whitelist, ismp datagrams from that chain will be rejected. + +## Dispatchable Functions + +- `add_state_machines` - Adds some standalone chain state machine identifiers to the whitelist. +- `remove_state_machines` - Removes some standalone chain state machine identifiers from the whitelist. + +## License + +This library is licensed under the Apache 2.0 License, Copyright (c) 2024 Polytope Labs. diff --git a/modules/ismp/clients/grandpa/src/consensus.rs b/modules/ismp/clients/grandpa/src/consensus.rs new file mode 100644 index 000000000..dbd455ce2 --- /dev/null +++ b/modules/ismp/clients/grandpa/src/consensus.rs @@ -0,0 +1,331 @@ +// Copyright (c) 2024 Polytope Labs. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific lang + +use crate::{ + messages::{ConsensusMessage, SubstrateHeader}, + SupportedStateMachines, +}; +use alloc::{boxed::Box, collections::BTreeMap, format, vec::Vec}; +use codec::{Decode, Encode}; +use core::marker::PhantomData; +use finality_grandpa::Chain; +use ismp::{ + consensus::{ + ConsensusClient, ConsensusClientId, ConsensusStateId, StateCommitment, StateMachineClient, + VerifiedCommitments, + }, + error::Error, + host::{IsmpHost, StateMachine}, + messaging::StateCommitmentHeight, +}; + +use grandpa_verifier::{ + verify_grandpa_finality_proof, verify_parachain_headers_with_grandpa_finality_proof, +}; +use grandpa_verifier_primitives::{ + justification::{AncestryChain, GrandpaJustification}, + ConsensusState, FinalityProof, ParachainHeadersWithFinalityProof, +}; +use sp_core::Get; +use sp_runtime::traits::Header; +use substrate_state_machine::{fetch_overlay_root_and_timestamp, SubstrateStateMachine}; + +/// [`ConsensusStateId`] for the polkadot relay chain +pub const POLKADOT_CONSENSUS_STATE_ID: ConsensusStateId = *b"polk"; + +/// [`ConsensusStateId`] for the kusama relay chain +pub const KUSAMA_CONSENSUS_STATE_ID: ConsensusStateId = *b"ksma"; + +/// [`ConsensusClientId`] for GRANDPA consensus +pub const GRANDPA_CONSENSUS_ID: ConsensusClientId = *b"GRNP"; + +pub struct GrandpaConsensusClient(PhantomData); + +impl Default for GrandpaConsensusClient { + fn default() -> Self { + Self(PhantomData) + } +} + +impl ConsensusClient for GrandpaConsensusClient +where + T: pallet_ismp::Config + super::Config, +{ + fn verify_consensus( + &self, + _host: &dyn IsmpHost, + _consensus_state_id: ConsensusStateId, + trusted_consensus_state: Vec, + proof: Vec, + ) -> Result<(Vec, VerifiedCommitments), Error> { + // decode the proof into consensus message + let consensus_message: ConsensusMessage = + codec::Decode::decode(&mut &proof[..]).map_err(|e| { + Error::Custom(format!("Cannot decode consensus message from proof: {e:?}",)) + })?; + + // decode the consensus state + let consensus_state: ConsensusState = + codec::Decode::decode(&mut &trusted_consensus_state[..]).map_err(|e| { + Error::Custom(format!( + "Cannot decode consensus state from trusted consensus state bytes: {e:?}", + )) + })?; + + let mut intermediates = BTreeMap::new(); + + // match over the message + match consensus_message { + ConsensusMessage::RelayChainMessage(relay_chain_message) => { + let headers_with_finality_proof = ParachainHeadersWithFinalityProof { + finality_proof: relay_chain_message.finality_proof, + parachain_headers: relay_chain_message.parachain_headers, + }; + + let (consensus_state, parachain_headers) = + verify_parachain_headers_with_grandpa_finality_proof( + consensus_state, + headers_with_finality_proof, + ) + .map_err(|_| Error::Custom("Error verifying parachain headers".into()))?; + + let parachain_headers = parachain_headers + .into_iter() + // filter out unknown para ids + .filter_map(|(para_id, header)| { + if let Some(slot_duration) = + SupportedStateMachines::::get(StateMachine::Polkadot(para_id)) + .or(SupportedStateMachines::::get(StateMachine::Kusama(para_id))) + { + Some((para_id, header, slot_duration)) + } else { + None + } + }) + .collect::>(); + + for (para_id, header_vec, slot_duration) in parachain_headers { + let mut state_commitments_vec = Vec::new(); + + let state_id: StateMachine = match T::Coprocessor::get() { + Some(StateMachine::Polkadot(_)) => StateMachine::Polkadot(para_id), + Some(StateMachine::Kusama(_)) => StateMachine::Kusama(para_id), + _ => Err(Error::Custom( + "Coprocessor was not set, cannot determine para id state machine id" + .into(), + ))?, + }; + + for header in header_vec { + let digest_result = + fetch_overlay_root_and_timestamp(header.digest(), slot_duration)?; + + if digest_result.timestamp == 0 { + Err(Error::Custom("Timestamp or ismp root not found".into()))? + } + + let height: u32 = (*header.number()).into(); + + let intermediate = match T::Coprocessor::get() { + Some(id) if id == state_id => StateCommitmentHeight { + // for the coprocessor, we only care about the child root & mmr root + commitment: StateCommitment { + timestamp: digest_result.timestamp, + overlay_root: Some(digest_result.ismp_digest.mmr_root), + state_root: digest_result.ismp_digest.child_trie_root, /* child root */ + }, + height: height.into(), + }, + _ => StateCommitmentHeight { + commitment: StateCommitment { + timestamp: digest_result.timestamp, + overlay_root: Some(digest_result.ismp_digest.child_trie_root), + state_root: header.state_root, + }, + height: height.into(), + }, + }; + + state_commitments_vec.push(intermediate); + } + + intermediates.insert(state_id, state_commitments_vec); + } + + Ok((consensus_state.encode(), intermediates)) + }, + ConsensusMessage::StandaloneChainMessage(standalone_chain_message) => { + let (consensus_state, header, _, _) = verify_grandpa_finality_proof( + consensus_state, + standalone_chain_message.finality_proof, + ) + .map_err(|_| Error::Custom("Error verifying grandpa header".parse().unwrap()))?; + let digest_result = fetch_overlay_root_and_timestamp( + header.digest(), + consensus_state.slot_duration, + )?; + + if digest_result.timestamp == 0 { + Err(Error::Custom("Timestamp or ismp root not found".into()))? + } + + let height: u32 = (*header.number()).into(); + + let state_id = consensus_state.state_machine; + + let intermediate = StateCommitmentHeight { + commitment: StateCommitment { + timestamp: digest_result.timestamp, + overlay_root: Some(digest_result.ismp_digest.child_trie_root), + state_root: header.state_root, + }, + height: height.into(), + }; + + let mut state_commitments_vec = Vec::new(); + state_commitments_vec.push(intermediate); + + intermediates.insert(state_id, state_commitments_vec); + + Ok((consensus_state.encode(), intermediates)) + }, + } + } + + fn verify_fraud_proof( + &self, + _host: &dyn IsmpHost, + trusted_consensus_state: Vec, + proof_1: Vec, + proof_2: Vec, + ) -> Result<(), Error> { + // decode the consensus state + let consensus_state: ConsensusState = + codec::Decode::decode(&mut &trusted_consensus_state[..]).map_err(|e| { + Error::Custom(format!( + "Cannot decode consensus state from trusted consensus state bytes: {e:?}", + )) + })?; + + let first_proof: FinalityProof = codec::Decode::decode(&mut &proof_1[..]) + .map_err(|e| { + Error::Custom(format!( + "Cannot decode first finality proof from proof_1 bytes: {e:?}", + )) + })?; + + let second_proof: FinalityProof = codec::Decode::decode(&mut &proof_2[..]) + .map_err(|e| { + Error::Custom(format!( + "Cannot decode second finality proof from proof_2 bytes: {e:?}", + )) + })?; + + if first_proof.block == second_proof.block { + return Err(Error::Custom(format!("Fraud proofs are for the same block",))); + } + + let first_headers = AncestryChain::::new(&first_proof.unknown_headers); + let first_target = first_proof + .unknown_headers + .iter() + .max_by_key(|h| *h.number()) + .ok_or_else(|| Error::Custom(format!("Unknown headers can't be empty!")))?; + + let second_headers = AncestryChain::::new(&second_proof.unknown_headers); + let second_target = second_proof + .unknown_headers + .iter() + .max_by_key(|h| *h.number()) + .ok_or_else(|| Error::Custom(format!("Unknown headers can't be empty!")))?; + + if first_target.hash() != first_proof.block || second_target.hash() != second_proof.block { + return Err(Error::Custom(format!("Fraud proofs are not for the same chain"))); + } + + let first_base = first_proof + .unknown_headers + .iter() + .min_by_key(|h| *h.number()) + .ok_or_else(|| Error::Custom(format!("Unknown headers can't be empty!")))?; + first_headers + .ancestry(first_base.hash(), first_target.hash()) + .map_err(|_| Error::Custom(format!("Invalid ancestry!")))?; + + let second_base = second_proof + .unknown_headers + .iter() + .min_by_key(|h| *h.number()) + .ok_or_else(|| Error::Custom(format!("Unknown headers can't be empty!")))?; + second_headers + .ancestry(second_base.hash(), second_target.hash()) + .map_err(|_| Error::Custom(format!("Invalid ancestry!")))?; + + let first_parent = first_base.parent_hash(); + let second_parent = second_base.parent_hash(); + + if first_parent != second_parent { + return Err(Error::Custom(format!("Fraud proofs are not for the same ancestor"))); + } + + let first_justification = + GrandpaJustification::::decode(&mut &first_proof.justification[..]) + .map_err(|_| Error::Custom(format!("Could not decode first justification")))?; + + let second_justification = + GrandpaJustification::::decode(&mut &second_proof.justification[..]) + .map_err(|_| Error::Custom(format!("Could not decode second justification")))?; + + if first_proof.block != first_justification.commit.target_hash || + second_proof.block != second_justification.commit.target_hash + { + Err(Error::Custom( + format!("First or second finality proof block hash does not match justification target hash") + ))? + } + + if first_justification.commit.target_hash != consensus_state.latest_hash && + second_justification.commit.target_hash != consensus_state.latest_hash + { + Err(Error::Custom(format!( + "First or second justification does not match consensus latest hash" + )))? + } + + let first_valid = first_justification + .verify(consensus_state.current_set_id, &consensus_state.current_authorities) + .is_ok(); + let second_valid = second_justification + .verify(consensus_state.current_set_id, &consensus_state.current_authorities) + .is_ok(); + + if !first_valid || !second_valid { + Err(Error::Custom(format!("Invalid justification")))? + } + + Ok(()) + } + + fn state_machine(&self, id: StateMachine) -> Result, Error> { + if SupportedStateMachines::::contains_key(id) { + Ok(Box::new(SubstrateStateMachine::::default())) + } else { + Err(Error::Custom(format!("Unsupported State Machine {id:?}"))) + } + } + + fn consensus_client_id(&self) -> ConsensusClientId { + GRANDPA_CONSENSUS_ID + } +} diff --git a/modules/ismp/clients/grandpa/src/lib.rs b/modules/ismp/clients/grandpa/src/lib.rs new file mode 100644 index 000000000..b84335c30 --- /dev/null +++ b/modules/ismp/clients/grandpa/src/lib.rs @@ -0,0 +1,117 @@ +// Copyright (c) 2024 Polytope Labs. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific lang + +#![cfg_attr(not(feature = "std"), no_std)] +extern crate alloc; + +pub mod consensus; +pub mod messages; + +use alloc::vec::Vec; +use ismp::host::StateMachine; +pub use pallet::*; + +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + use ismp::host::IsmpHost; + + #[pallet::pallet] + #[pallet::without_storage_info] + pub struct Pallet(_); + + /// The config trait + #[pallet::config] + pub trait Config: frame_system::Config + pallet_ismp::Config { + /// The overarching event type + type RuntimeEvent: From> + IsType<::RuntimeEvent>; + + /// IsmpHost implementation + type IsmpHost: IsmpHost + Default; + } + + /// Events emitted by this pallet + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + /// State machines have been added to whitelist + StateMachineAdded { + /// The state machines in question + state_machines: Vec, + }, + /// State machines have been removed from the whitelist + StateMachineRemoved { + /// The state machines in question + state_machines: Vec, + }, + } + + /// Registered state machines for the grandpa consensus client + #[pallet::storage] + #[pallet::getter(fn state_machines)] + pub type SupportedStateMachines = + StorageMap<_, Twox64Concat, StateMachine, u64, OptionQuery>; + + #[pallet::call] + impl Pallet { + /// Add some a state machine to the list of supported state machines + #[pallet::call_index(0)] + #[pallet::weight(T::DbWeight::get().writes(new_state_machines.len() as u64))] + pub fn add_state_machines( + origin: OriginFor, + new_state_machines: Vec, + ) -> DispatchResult { + T::AdminOrigin::ensure_origin(origin)?; + + let state_machines = + new_state_machines.iter().map(|a| a.state_machine.clone()).collect(); + for AddStateMachine { state_machine, slot_duration } in new_state_machines { + SupportedStateMachines::::insert(state_machine, slot_duration); + } + + Self::deposit_event(Event::StateMachineAdded { state_machines }); + + Ok(()) + } + + /// Remove a state machine from the list of supported state machines + #[pallet::call_index(1)] + #[pallet::weight(T::DbWeight::get().writes(state_machines.len() as u64))] + pub fn remove_state_machines( + origin: OriginFor, + state_machines: Vec, + ) -> DispatchResult { + T::AdminOrigin::ensure_origin(origin)?; + + for state_machine in state_machines.clone() { + SupportedStateMachines::::remove(state_machine) + } + + Self::deposit_event(Event::StateMachineRemoved { state_machines }); + + Ok(()) + } + } +} + +/// Update the state machine whitelist +#[derive(Clone, codec::Encode, codec::Decode, scale_info::TypeInfo, Debug, PartialEq, Eq)] +pub struct AddStateMachine { + /// State machine to add + pub state_machine: StateMachine, + /// It's slot duration + pub slot_duration: u64, +} diff --git a/modules/consensus/grandpa/src/messages.rs b/modules/ismp/clients/grandpa/src/messages.rs similarity index 67% rename from modules/consensus/grandpa/src/messages.rs rename to modules/ismp/clients/grandpa/src/messages.rs index 6ce376132..33ff5a586 100644 --- a/modules/consensus/grandpa/src/messages.rs +++ b/modules/ismp/clients/grandpa/src/messages.rs @@ -13,7 +13,7 @@ // See the License for the specific lang use alloc::collections::BTreeMap; use codec::{Decode, Encode}; -use primitives::{FinalityProof, ParachainHeaderProofs}; +use grandpa_verifier_primitives::{FinalityProof, ParachainHeaderProofs}; use sp_core::H256; use sp_runtime::traits::BlakeTwo256; @@ -23,22 +23,22 @@ pub type SubstrateHeader = sp_runtime::generic::Header; /// [`ClientMessage`] definition #[derive(Clone, Debug, Encode, Decode)] pub enum ConsensusMessage { - /// This is the variant representing the standalone chain - StandaloneChainMessage(StandaloneChainMessage), - /// This is the variant representing the relay chain - RelayChainMessage(RelayChainMessage), + /// This is the variant representing the standalone chain + StandaloneChainMessage(StandaloneChainMessage), + /// This is the variant representing the relay chain + RelayChainMessage(RelayChainMessage), } #[derive(Clone, Debug, Encode, Decode)] pub struct StandaloneChainMessage { - /// finality proof - pub finality_proof: FinalityProof, + /// finality proof + pub finality_proof: FinalityProof, } #[derive(Clone, Debug, Encode, Decode)] pub struct RelayChainMessage { - /// finality proof - pub finality_proof: FinalityProof, - /// parachain headers - pub parachain_headers: BTreeMap, + /// finality proof + pub finality_proof: FinalityProof, + /// parachain headers + pub parachain_headers: BTreeMap, } diff --git a/modules/ismp/clients/parachain/client/Cargo.toml b/modules/ismp/clients/parachain/client/Cargo.toml index 6a82e0bb2..d28467604 100644 --- a/modules/ismp/clients/parachain/client/Cargo.toml +++ b/modules/ismp/clients/parachain/client/Cargo.toml @@ -5,7 +5,7 @@ edition = "2021" authors = ["Polytope Labs "] license = "Apache-2.0" repository = "https://github.com/polytope-labs/hyperbridge" -homepage = "https://docs.hyperbridge.network/integrations/polkadot-sdk" +homepage = "https://docs.hyperbridge.network/developers/polkadot/integration" documentation = "https://docs.rs/ismp-parachain" description = "Parachain consensus client for pallet-ismp" keywords = ["substrate", "polkadot-sdk", "ISMP", "interoperability"] diff --git a/modules/ismp/clients/parachain/inherent/Cargo.toml b/modules/ismp/clients/parachain/inherent/Cargo.toml index 7ffb5cc74..b045ecb0b 100644 --- a/modules/ismp/clients/parachain/inherent/Cargo.toml +++ b/modules/ismp/clients/parachain/inherent/Cargo.toml @@ -5,7 +5,7 @@ edition = "2021" authors = ["Polytope Labs "] license = "Apache-2.0" repository = "https://github.com/polytope-labs/hyperbridge" -homepage = "https://docs.hyperbridge.network/integrations/polkadot-sdk" +homepage = "https://docs.hyperbridge.network/developers/polkadot/integration" documentation = "https://docs.rs/ismp-parachain-inherent" description = "ISMP parachain consensus client inherent provider" keywords = ["substrate", "polkadot-sdk", "ISMP", "interoperability"] diff --git a/modules/ismp/clients/parachain/runtime-api/Cargo.toml b/modules/ismp/clients/parachain/runtime-api/Cargo.toml index 73aa37b55..a39ac25b5 100644 --- a/modules/ismp/clients/parachain/runtime-api/Cargo.toml +++ b/modules/ismp/clients/parachain/runtime-api/Cargo.toml @@ -5,7 +5,7 @@ edition = "2021" authors = ["Polytope Labs "] license = "Apache-2.0" repository = "https://github.com/polytope-labs/hyperbridge" -homepage = "https://docs.hyperbridge.network/integrations/polkadot-sdk" +homepage = "https://docs.hyperbridge.network/developers/polkadot/integration" documentation = "https://docs.rs/ismp-parachain" description = "Substrate runtime API for the ISMP Parachain consensus client" keywords = ["substrate", "polkadot-sdk", "ISMP", "interoperability"] diff --git a/modules/ismp/core/Cargo.toml b/modules/ismp/core/Cargo.toml index 8b69f2444..98b6c5176 100644 --- a/modules/ismp/core/Cargo.toml +++ b/modules/ismp/core/Cargo.toml @@ -21,7 +21,7 @@ derive_more = { version = "0.99.17", default-features = false, features = ["from serde_json = { version = "1.0.99", default-features = false, features = ["alloc"] } hex = { version = "0.4.3", features = ["alloc"], default-features = false } anyhow = {workspace = true, default-features = false} -serde-utils = { workspace = true, default-features = false } +serde-hex-utils = { workspace = true, default-features = false } [features] default = ["std"] @@ -34,5 +34,5 @@ std = [ "anyhow/std", "hex/std", "serde_json/std", - "serde-utils/std" + "serde-hex-utils/std" ] diff --git a/modules/ismp/core/src/consensus.rs b/modules/ismp/core/src/consensus.rs index ce3e0429f..603567d84 100644 --- a/modules/ismp/core/src/consensus.rs +++ b/modules/ismp/core/src/consensus.rs @@ -94,10 +94,10 @@ pub struct IntermediateState { )] pub struct StateMachineId { /// The state machine identifier - #[serde(with = "serde_utils::as_string")] + #[serde(with = "serde_hex_utils::as_string")] pub state_id: StateMachine, /// It's consensus state identifier - #[serde(with = "serde_utils::as_utf8_string")] + #[serde(with = "serde_hex_utils::as_utf8_string")] pub consensus_state_id: ConsensusStateId, } diff --git a/modules/ismp/core/src/events.rs b/modules/ismp/core/src/events.rs index f9496d341..36a388064 100644 --- a/modules/ismp/core/src/events.rs +++ b/modules/ismp/core/src/events.rs @@ -41,7 +41,7 @@ pub struct StateCommitmentVetoed { /// The state commitment identifier pub height: StateMachineHeight, /// The account responsible - #[serde(with = "serde_utils::as_hex")] + #[serde(with = "serde_hex_utils::as_hex")] pub fisherman: Vec, } @@ -53,7 +53,7 @@ pub struct RequestResponseHandled { /// The commitment to the request or response pub commitment: H256, /// The address of the relayer responsible for relaying the request - #[serde(with = "serde_utils::as_hex")] + #[serde(with = "serde_hex_utils::as_hex")] pub relayer: Vec, } @@ -65,10 +65,10 @@ pub struct TimeoutHandled { /// The commitment to the request or response pub commitment: H256, /// The source chain of the message - #[serde(with = "serde_utils::as_string")] + #[serde(with = "serde_hex_utils::as_string")] pub source: StateMachine, /// The destination chain of the message - #[serde(with = "serde_utils::as_string")] + #[serde(with = "serde_hex_utils::as_string")] pub dest: StateMachine, } diff --git a/modules/ismp/core/src/messaging.rs b/modules/ismp/core/src/messaging.rs index 433579d87..92746e1cb 100644 --- a/modules/ismp/core/src/messaging.rs +++ b/modules/ismp/core/src/messaging.rs @@ -88,13 +88,13 @@ pub struct StateCommitmentHeight { )] pub struct CreateConsensusState { /// Scale encoded consensus state - #[serde(with = "serde_utils::as_hex")] + #[serde(with = "serde_hex_utils::as_hex")] pub consensus_state: Vec, /// Consensus client id - #[serde(with = "serde_utils::as_utf8_string")] + #[serde(with = "serde_hex_utils::as_utf8_string")] pub consensus_client_id: ConsensusClientId, /// The consensus state Id - #[serde(with = "serde_utils::as_utf8_string")] + #[serde(with = "serde_hex_utils::as_utf8_string")] pub consensus_state_id: ConsensusStateId, /// Unbonding period for this consensus state. pub unbonding_period: u64, diff --git a/modules/ismp/core/src/router.rs b/modules/ismp/core/src/router.rs index 40a454f52..ef00b336e 100644 --- a/modules/ismp/core/src/router.rs +++ b/modules/ismp/core/src/router.rs @@ -34,23 +34,23 @@ use core::{fmt::Formatter, time::Duration}; )] pub struct PostRequest { /// The source state machine of this request. - #[serde(with = "serde_utils::as_string")] + #[serde(with = "serde_hex_utils::as_string")] pub source: StateMachine, /// The destination state machine of this request. - #[serde(with = "serde_utils::as_string")] + #[serde(with = "serde_hex_utils::as_string")] pub dest: StateMachine, /// The nonce of this request on the source chain pub nonce: u64, /// Module identifier of the sending module - #[serde(with = "serde_utils::as_hex")] + #[serde(with = "serde_hex_utils::as_hex")] pub from: Vec, /// Module identifier of the receiving module - #[serde(with = "serde_utils::as_hex")] + #[serde(with = "serde_hex_utils::as_hex")] pub to: Vec, /// Timestamp which this request expires in seconds. pub timeout_timestamp: u64, /// Encoded request body - #[serde(with = "serde_utils::as_hex")] + #[serde(with = "serde_hex_utils::as_hex")] pub body: Vec, } @@ -95,15 +95,15 @@ impl core::fmt::Display for PostRequest { )] pub struct GetRequest { /// The source state machine of this request. - #[serde(with = "serde_utils::as_string")] + #[serde(with = "serde_hex_utils::as_string")] pub source: StateMachine, /// The destination state machine of this request. - #[serde(with = "serde_utils::as_string")] + #[serde(with = "serde_hex_utils::as_string")] pub dest: StateMachine, /// The nonce of this request on the source chain pub nonce: u64, /// Module identifier of the sending module - #[serde(with = "serde_utils::as_hex")] + #[serde(with = "serde_hex_utils::as_hex")] pub from: Vec, /// Raw Storage keys that would be used to fetch the values from the counterparty /// For deriving storage keys for ink contract fields follow the guide in the link below @@ -119,12 +119,12 @@ pub struct GetRequest { /// For fetching keys from EVM contracts each key should either be 52 bytes or 20 bytes /// For 52 byte keys we expect it to be a concatenation of contract address and slot hash /// For 20 bytes we expect it to be a contract or account address - #[serde(with = "serde_utils::seq_of_hex")] + #[serde(with = "serde_hex_utils::seq_of_hex")] pub keys: Vec>, /// Height at which to read the state machine. pub height: u64, /// Some application-specific metadata relating to this request - #[serde(with = "serde_utils::as_hex")] + #[serde(with = "serde_hex_utils::as_hex")] pub context: Vec, /// Host timestamp at which this request expires in seconds pub timeout_timestamp: u64, @@ -294,7 +294,7 @@ pub struct PostResponse { /// The request that triggered this response. pub post: PostRequest, /// The response message. - #[serde(with = "serde_utils::as_hex")] + #[serde(with = "serde_hex_utils::as_hex")] pub response: Vec, /// Timestamp at which this response expires in seconds. pub timeout_timestamp: u64, @@ -408,11 +408,11 @@ impl GetResponse { )] pub struct StorageValue { /// The request storage keys - #[serde(with = "serde_utils::as_hex")] + #[serde(with = "serde_hex_utils::as_hex")] pub key: Vec, /// The verified value - #[serde(serialize_with = "serde_utils::as_hex::serialize_option")] - #[serde(deserialize_with = "serde_utils::as_hex::deserialize_option")] + #[serde(serialize_with = "serde_hex_utils::as_hex::serialize_option")] + #[serde(deserialize_with = "serde_hex_utils::as_hex::deserialize_option")] pub value: Option>, } diff --git a/modules/ismp/pallets/hyperbridge/Cargo.toml b/modules/ismp/pallets/hyperbridge/Cargo.toml index dbf441d94..72606fbaa 100644 --- a/modules/ismp/pallets/hyperbridge/Cargo.toml +++ b/modules/ismp/pallets/hyperbridge/Cargo.toml @@ -5,7 +5,7 @@ edition = "2021" authors = ["Polytope Labs "] license = "Apache-2.0" repository = "https://github.com/polytope-labs/hyperbridge" -homepage = "https://docs.hyperbridge.network/integrations/polkadot-sdk" +homepage = "https://docs.hyperbridge.network/developers/polkadot/integration" documentation = "https://docs.rs/pallet-hyperbridge" description = "Pallet hyperbridge mediates the connection between hyperbridge and substrate-based chains." keywords = ["substrate", "polkadot-sdk", "ISMP", "interoperability"] diff --git a/modules/ismp/pallets/pallet/Cargo.toml b/modules/ismp/pallets/pallet/Cargo.toml index 4fe61ef58..07a17d77a 100644 --- a/modules/ismp/pallets/pallet/Cargo.toml +++ b/modules/ismp/pallets/pallet/Cargo.toml @@ -5,7 +5,7 @@ edition = "2021" authors = ["Polytope Labs "] license = "Apache-2.0" repository = "https://github.com/polytope-labs/hyperbridge" -homepage = "https://docs.hyperbridge.network/integrations/polkadot-sdk" +homepage = "https://docs.hyperbridge.network/developers/polkadot/integration" documentation = "https://docs.rs/pallet-ismp" description = "The substrate runtime implementation of the Interoperable State Machine Protocol" keywords = ["substrate", "polkadot-sdk", "ISMP", "interoperability"] diff --git a/modules/ismp/pallets/pallet/src/utils.rs b/modules/ismp/pallets/pallet/src/utils.rs index 8f5c7d3ee..e26d04377 100644 --- a/modules/ismp/pallets/pallet/src/utils.rs +++ b/modules/ismp/pallets/pallet/src/utils.rs @@ -130,7 +130,7 @@ impl ModuleId { pub const ISMP_ID: sp_runtime::ConsensusEngineId = *b"ISMP"; /// Consensus log digest for pallet ismp -#[derive(Encode, Decode, Clone, scale_info::TypeInfo)] +#[derive(Encode, Decode, Clone, scale_info::TypeInfo, Default)] pub struct ConsensusDigest { /// Mmr root hash pub mmr_root: H256, diff --git a/modules/ismp/pallets/rpc/Cargo.toml b/modules/ismp/pallets/rpc/Cargo.toml index 9cd3f534c..08f170344 100644 --- a/modules/ismp/pallets/rpc/Cargo.toml +++ b/modules/ismp/pallets/rpc/Cargo.toml @@ -5,7 +5,7 @@ edition = "2021" authors = ["Polytope Labs "] license = "Apache-2.0" repository = "https://github.com/polytope-labs/hyperbridge" -homepage = "https://docs.hyperbridge.network/integrations/polkadot-sdk" +homepage = "https://docs.hyperbridge.network/developers/polkadot/integration" documentation = "https://docs.rs/pallet-ismp-rpc" description = "RPC apis for pallet-ismp" keywords = ["substrate", "polkadot-sdk", "ISMP", "interoperability"] diff --git a/modules/ismp/pallets/runtime-api/Cargo.toml b/modules/ismp/pallets/runtime-api/Cargo.toml index 06db062d2..bfe7bf3f5 100644 --- a/modules/ismp/pallets/runtime-api/Cargo.toml +++ b/modules/ismp/pallets/runtime-api/Cargo.toml @@ -5,7 +5,7 @@ edition = "2021" authors = ["Polytope Labs "] license = "Apache-2.0" repository = "https://github.com/polytope-labs/hyperbridge" -homepage = "https://docs.hyperbridge.network/integrations/polkadot-sdk" +homepage = "https://docs.hyperbridge.network/developers/polkadot/integration" documentation = "https://docs.rs/pallet-ismp" description = "The substrate runtime API for pallet-ismp" keywords = ["substrate", "polkadot-sdk", "ISMP", "interoperability"] diff --git a/modules/ismp/state-machines/substrate/Cargo.toml b/modules/ismp/state-machines/substrate/Cargo.toml index 2a83bd9f5..7cc4a4dd6 100644 --- a/modules/ismp/state-machines/substrate/Cargo.toml +++ b/modules/ismp/state-machines/substrate/Cargo.toml @@ -5,7 +5,7 @@ edition = "2021" authors = ["Polytope Labs "] license = "Apache-2.0" repository = "https://github.com/polytope-labs/hyperbridge" -homepage = "https://docs.hyperbridge.network/integrations/polkadot-sdk" +homepage = "https://docs.hyperbridge.network/developers/polkadot/integration" documentation = "https://docs.rs/substrate-state-machine" description = "ISMP state machine client implementation for substrate-based chains " keywords = ["substrate", "polkadot-sdk", "ISMP", "interoperability"] @@ -16,6 +16,7 @@ readme = "./README.md" frame-support = { workspace = true } sp-runtime = { workspace = true } sp-consensus-aura = { workspace = true } +sp-consensus-babe = { workspace = true } sp-trie = { workspace = true } # polytope labs @@ -25,7 +26,7 @@ pallet-ismp = { workspace = true } # crates.io codec = { workspace = true } primitive-types = { workspace = true } -serde = { workspace = true, features = ["derive"]} +serde = { workspace = true, features = ["derive"] } scale-info = { workspace = true, features = ["derive"] } hash-db = { workspace = true } @@ -33,6 +34,7 @@ hash-db = { workspace = true } default = ["std"] std = [ "sp-consensus-aura/std", + "sp-consensus-babe/std", "ismp/std", "codec/std", "sp-runtime/std", @@ -42,5 +44,5 @@ std = [ "frame-support/std", "pallet-ismp/std", "sp-trie/std", - "hash-db/std" + "hash-db/std", ] diff --git a/modules/ismp/state-machines/substrate/src/lib.rs b/modules/ismp/state-machines/substrate/src/lib.rs index 44f9473ae..7bbb3a237 100644 --- a/modules/ismp/state-machines/substrate/src/lib.rs +++ b/modules/ismp/state-machines/substrate/src/lib.rs @@ -33,10 +33,10 @@ use ismp::{ }; use pallet_ismp::{ child_trie::{RequestCommitments, RequestReceipts, ResponseCommitments, ResponseReceipts}, - ISMP_ID, + ConsensusDigest, ISMP_ID, }; -use primitive_types::H256; use sp_consensus_aura::{Slot, AURA_ENGINE_ID}; +use sp_consensus_babe::{digests::PreDigest, BABE_ENGINE_ID}; use sp_runtime::{ traits::{BlakeTwo256, Keccak256}, Digest, DigestItem, @@ -234,9 +234,8 @@ where let root = match &state_proof { SubstrateStateProof::OverlayProof { .. } => { match T::Coprocessor::get() { - Some(id) if id == proof.height.id.state_id => root.state_root, /* child root - * on */ - // hyperbridge + Some(id) if id == proof.height.id.state_id => root.state_root, + // child root on hyperbridge _ => root.overlay_root.ok_or_else(|| { Error::Custom( "Child trie root is not available for provided state commitment".into(), @@ -312,12 +311,21 @@ where Ok(result) } +/// Result for processing consensus digest logs +#[derive(Default)] +pub struct DigestResult { + /// Timestamp + pub timestamp: u64, + /// Ismp digest + pub ismp_digest: ConsensusDigest, +} + /// Fetches the overlay (ismp) root and timestamp from the header digest pub fn fetch_overlay_root_and_timestamp( digest: &Digest, slot_duration: u64, -) -> Result<(u64, H256), Error> { - let (mut timestamp, mut overlay_root) = (0, H256::default()); +) -> Result { + let mut digest_result = DigestResult::default(); for digest in digest.logs.iter() { match digest { @@ -326,21 +334,28 @@ pub fn fetch_overlay_root_and_timestamp( { let slot = Slot::decode(&mut &value[..]) .map_err(|e| Error::Custom(format!("Cannot slot: {e:?}")))?; - timestamp = Duration::from_millis(*slot * slot_duration).as_secs(); + digest_result.timestamp = Duration::from_millis(*slot * slot_duration).as_secs(); + }, + DigestItem::PreRuntime(consensus_engine_id, value) + if *consensus_engine_id == BABE_ENGINE_ID => + { + let slot = PreDigest::decode(&mut &value[..]) + .map_err(|e| Error::Custom(format!("Cannot slot: {e:?}")))? + .slot(); + digest_result.timestamp = Duration::from_millis(*slot * slot_duration).as_secs(); }, DigestItem::Consensus(consensus_engine_id, value) if *consensus_engine_id == ISMP_ID => { - if value.len() != 32 { - Err(Error::Custom("Header contains an invalid ismp root".into()))? - } + let digest = ConsensusDigest::decode(&mut &value[..]) + .map_err(|e| Error::Custom(format!("Failed to decode digest: {e:?}")))?; - overlay_root = H256::from_slice(&value); + digest_result.ismp_digest = digest }, // don't really care about the rest _ => {}, }; } - Ok((timestamp, overlay_root)) + Ok(digest_result) } diff --git a/modules/trees/mmr/primitives/Cargo.toml b/modules/trees/mmr/primitives/Cargo.toml index cf87d8cad..901fe1a6e 100644 --- a/modules/trees/mmr/primitives/Cargo.toml +++ b/modules/trees/mmr/primitives/Cargo.toml @@ -5,7 +5,7 @@ edition = "2021" authors = ["Polytope Labs "] license = "Apache-2.0" repository = "https://github.com/polytope-labs/hyperbridge" -homepage = "https://docs.hyperbridge.network/integrations/polkadot-sdk" +homepage = "https://docs.hyperbridge.network/developers/polkadot/integration" documentation = "https://docs.rs/ismp-parachain" description = "Merkle mountain range primitives for pallet-ismp" keywords = ["substrate", "polkadot-sdk", "ISMP", "interoperability"] diff --git a/modules/utils/serde/Cargo.toml b/modules/utils/serde/Cargo.toml index 3c8d439f2..227c7bd16 100644 --- a/modules/utils/serde/Cargo.toml +++ b/modules/utils/serde/Cargo.toml @@ -1,13 +1,20 @@ [package] -name = "serde-utils" +name = "serde-hex-utils" version = "0.1.0" edition = "2021" +authors = ["Polytope Labs "] +license = "Apache-2.0" +repository = "https://github.com/polytope-labs/hyperbridge" +documentation = "https://docs.rs/serde-hex-utils" +description = "Collection of hexadecimal (De)serialization utilities for serde" +keywords = ["serde", "utilities"] +readme = "./README.md" [dependencies] # crates.io serde = { workspace = true, features = ["derive"] } hex = { version = "0.4.3", features = ["alloc"], default-features = false } -anyhow = {workspace = true, default-features = false} +anyhow = {workspace = true, default-features = false } [features] default = ["std"] diff --git a/modules/utils/serde/README.md b/modules/utils/serde/README.md new file mode 100644 index 000000000..93573a19e --- /dev/null +++ b/modules/utils/serde/README.md @@ -0,0 +1,3 @@ +# serde utilities + +Collection of useful (De)serialization utilities for serde \ No newline at end of file diff --git a/parachain/node/Cargo.toml b/parachain/node/Cargo.toml index 6d69492c3..6c35248f9 100644 --- a/parachain/node/Cargo.toml +++ b/parachain/node/Cargo.toml @@ -23,7 +23,7 @@ serde_json = "1.0.108" # local gargantua-runtime = { path = "../runtimes/gargantua" } -messier-runtime = { path = "../runtimes/messier" } +#messier-runtime = { path = "../runtimes/messier" } nexus-runtime = { path = "../runtimes/nexus" } pallet-ismp-rpc = { workspace = true } pallet-ismp-runtime-api = { workspace = true } @@ -106,12 +106,12 @@ default = [] runtime-benchmarks = [ "try-runtime-cli/try-runtime", "gargantua-runtime/runtime-benchmarks", - "messier-runtime/runtime-benchmarks", +# "messier-runtime/runtime-benchmarks", "nexus-runtime/runtime-benchmarks", "polkadot-cli/runtime-benchmarks", ] try-runtime = [ "try-runtime-cli/try-runtime", "gargantua-runtime/try-runtime", - "messier-runtime/try-runtime" +# "messier-runtime/try-runtime" ] diff --git a/parachain/node/src/chain_spec.rs b/parachain/node/src/chain_spec.rs index 821442981..fcabd0118 100644 --- a/parachain/node/src/chain_spec.rs +++ b/parachain/node/src/chain_spec.rs @@ -127,56 +127,6 @@ pub fn gargantua_development_config(id: u32) -> ChainSpec { .build() } -pub fn messier_development_config(id: u32) -> ChainSpec { - // Give your base currency a unit name and decimal places - let mut properties = sc_chain_spec::Properties::new(); - properties.insert("tokenSymbol".into(), "DEV".into()); - properties.insert("tokenDecimals".into(), 12.into()); - properties.insert("ss58Format".into(), 42.into()); - - ChainSpec::builder( - messier_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), - Extensions { - relay_chain: "rococo-local".into(), - // You MUST set this to the correct network! - para_id: id, - }, - ) - .with_name("messier-dev") - .with_id("messier") - .with_chain_type(ChainType::Development) - .with_genesis_config_patch(testnet_genesis( - // initial collators. - vec![ - ( - get_account_id_from_seed::("Alice"), - get_collator_keys_from_seed("Alice"), - ), - ( - get_account_id_from_seed::("Bob"), - get_collator_keys_from_seed("Bob"), - ), - ], - vec![ - get_account_id_from_seed::("Alice"), - get_account_id_from_seed::("Bob"), - get_account_id_from_seed::("Charlie"), - get_account_id_from_seed::("Dave"), - get_account_id_from_seed::("Eve"), - get_account_id_from_seed::("Ferdie"), - get_account_id_from_seed::("Alice//stash"), - get_account_id_from_seed::("Bob//stash"), - get_account_id_from_seed::("Charlie//stash"), - get_account_id_from_seed::("Dave//stash"), - get_account_id_from_seed::("Eve//stash"), - get_account_id_from_seed::("Ferdie//stash"), - ], - get_account_id_from_seed::("Alice"), - id, - )) - .build() -} - pub fn nexus_development_config(id: u32) -> ChainSpec { // Give your base currency a unit name and decimal places let mut properties = sc_chain_spec::Properties::new(); diff --git a/parachain/node/src/command.rs b/parachain/node/src/command.rs index d3b331e4e..f44b3ec5c 100644 --- a/parachain/node/src/command.rs +++ b/parachain/node/src/command.rs @@ -40,9 +40,6 @@ fn load_spec(id: &str) -> std::result::Result, String> { "dev" | "gargantua" => Box::new(chain_spec::ChainSpec::from_json_bytes( include_bytes!("../../chainspec/gargantua.paseo.json").to_vec(), )?), - "messier" => Box::new(chain_spec::ChainSpec::from_json_bytes( - include_bytes!("../../chainspec/messier.json").to_vec(), - )?), "" | "nexus" => Box::new(chain_spec::ChainSpec::from_json_bytes( include_bytes!("../../chainspec/nexus.json").to_vec(), )?), @@ -51,11 +48,6 @@ fn load_spec(id: &str) -> std::result::Result, String> { let id = u32::from_str(id).expect("can't parse Id into u32"); Box::new(chain_spec::gargantua_development_config(id)) }, - name if name.starts_with("messier-") => { - let id = name.split('-').last().expect("dev chainspec should have chain id"); - let id = u32::from_str(id).expect("can't parse Id into u32"); - Box::new(chain_spec::messier_development_config(id)) - }, name if name.starts_with("nexus-") => { let id = name.split('-').last().expect("dev chainspec should have chain id"); let id = u32::from_str(id).expect("can't parse Id into u32"); @@ -157,13 +149,6 @@ macro_rules! construct_async_run { Ok::<_, sc_cli::Error>(( { $( $code )* }, $components.task_manager)) }) } - chain if chain.contains("messier") => { - runner.async_run(|$config| { - let executor = sc_service::new_wasm_executor::(&$config); - let $components = new_partial::(&$config, executor)?; - Ok::<_, sc_cli::Error>(( { $( $code )* }, $components.task_manager)) - }) - } chain if chain.contains("nexus") => { runner.async_run(|$config| { let executor = sc_service::new_wasm_executor::(&$config); @@ -249,12 +234,6 @@ pub fn run() -> Result<()> { cmd.run(components.client.clone()) }, - chain if chain.contains("messier") => { - let components = - new_partial::(&config, executor)?; - - cmd.run(components.client.clone()) - }, chain if chain.contains("nexus") => { let components = new_partial::(&config, executor)?; @@ -297,11 +276,6 @@ pub fn run() -> Result<()> { new_partial::(&config, executor)?; cmd.run(components.client) }, - chain if chain.contains("messier") => { - let components = - new_partial::(&config, executor)?; - cmd.run(components.client) - }, chain if chain.contains("nexus") => { let components = new_partial::(&config, executor)?; @@ -329,13 +303,6 @@ pub fn run() -> Result<()> { let storage = components.backend.expose_storage(); cmd.run(config, components.client.clone(), db, storage) }, - chain if chain.contains("messier") => { - let components = - new_partial::(&config, executor)?; - let db = components.backend.expose_db(); - let storage = components.backend.expose_storage(); - cmd.run(config, components.client.clone(), db, storage) - }, chain if chain.contains("nexus") => { let components = new_partial::(&config, executor)?; diff --git a/parachain/node/src/service.rs b/parachain/node/src/service.rs index f6b93da9f..065e75f69 100644 --- a/parachain/node/src/service.rs +++ b/parachain/node/src/service.rs @@ -513,15 +513,6 @@ pub async fn start_parachain_node( hwbench, ) .await, - chain if chain.contains("messier") => - start_node_impl::( - parachain_config, - polkadot_config, - collator_options, - para_id, - hwbench, - ) - .await, chain if chain.contains("nexus") => start_node_impl::( parachain_config, diff --git a/parachain/runtimes/gargantua/Cargo.toml b/parachain/runtimes/gargantua/Cargo.toml index c26f0b52a..eff0caec2 100644 --- a/parachain/runtimes/gargantua/Cargo.toml +++ b/parachain/runtimes/gargantua/Cargo.toml @@ -87,6 +87,7 @@ pallet-ismp-runtime-api = { workspace = true } ismp-sync-committee = { workspace = true } ismp-bsc = { workspace = true } ismp-parachain = { workspace = true } +ismp-grandpa = { workspace = true } ismp-parachain-runtime-api = { workspace = true } pallet-ismp-relayer = { workspace = true } pallet-ismp-host-executive = { workspace = true } @@ -162,6 +163,7 @@ std = [ "parachains-common/std", "sp-genesis-builder/std", "ismp-bsc/std", + "ismp-grandpa/std", "ismp-parachain/std", "ismp-parachain-runtime-api/std", "pallet-ismp-relayer/std", diff --git a/parachain/runtimes/gargantua/src/ismp.rs b/parachain/runtimes/gargantua/src/ismp.rs index d7af767b8..585117706 100644 --- a/parachain/runtimes/gargantua/src/ismp.rs +++ b/parachain/runtimes/gargantua/src/ismp.rs @@ -72,6 +72,7 @@ impl Get> for Coprocessor { Some(HostStateMachine::get()) } } + impl pallet_ismp::Config for Runtime { type RuntimeEvent = RuntimeEvent; type AdminOrigin = EnsureRoot; @@ -90,11 +91,17 @@ impl pallet_ismp::Config for Runtime { IsmpParachain, HyperbridgeClientMachine, >, + ismp_grandpa::consensus::GrandpaConsensusClient, ); type Mmr = Mmr; type WeightProvider = (); } +impl ismp_grandpa::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type IsmpHost = Ismp; +} + impl pallet_token_governor::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Dispatcher = Ismp; diff --git a/parachain/runtimes/gargantua/src/lib.rs b/parachain/runtimes/gargantua/src/lib.rs index 826230826..91b1e5326 100644 --- a/parachain/runtimes/gargantua/src/lib.rs +++ b/parachain/runtimes/gargantua/src/lib.rs @@ -709,7 +709,9 @@ construct_runtime!( StateCoprocessor: pallet_state_coprocessor = 60, // Governance - TechnicalCollective: pallet_collective = 80 + TechnicalCollective: pallet_collective = 80, + // consensus clients + IsmpGrandpa: ismp_grandpa = 255 } ); diff --git a/parachain/runtimes/messier/src/lib.rs b/parachain/runtimes/messier/src/lib.rs index a8e9601d9..2872c2987 100644 --- a/parachain/runtimes/messier/src/lib.rs +++ b/parachain/runtimes/messier/src/lib.rs @@ -27,7 +27,6 @@ mod ismp; mod weights; pub mod xcm; -use alloc::sync::Arc; use cumulus_primitives_core::AggregateMessageOrigin; use frame_support::traits::TransformOrigin; use parachains_common::message_queue::{NarrowOriginToSibling, ParaIdToSibling}; @@ -314,9 +313,9 @@ use pallet_collective::PrimeDefaultVote; use pallet_ismp::mmr::Leaf; #[cfg(feature = "runtime-benchmarks")] use pallet_treasury::ArgumentsFactory; -use sp_core::crypto::{AccountId32, FromEntropy}; +use sp_core::crypto::AccountId32; use sp_runtime::traits::IdentityLookup; -use staging_xcm::latest::{Junction, Junctions::X1, Location}; +use staging_xcm::latest::Location; #[derive_impl(frame_system::config_preludes::ParaChainDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { @@ -586,11 +585,16 @@ pub struct TreasuryAssetFactory {} impl ArgumentsFactory for TreasuryAssetFactory where A: From, - B: FromEntropy, + B: sp_core::crypto::FromEntropy, { fn create_asset_kind(seed: u32) -> A { - Location { parents: 0, interior: X1(Arc::new([Junction::GeneralIndex(seed as u128)])) } - .into() + Location { + parents: 0, + interior: staging_xcm::latest::Junctions::X1(alloc::sync::Arc::new([ + staging_xcm::latest::Junction::GeneralIndex(seed as u128), + ])), + } + .into() } fn create_beneficiary(seed: [u8; 32]) -> B { @@ -604,8 +608,13 @@ where A: From, { fn create_asset_kind(seed: u32) -> A { - Location { parents: 0, interior: X1(Arc::new([Junction::GeneralIndex(seed as u128)])) } - .into() + Location { + parents: 0, + interior: staging_xcm::latest::Location::X1(alloc::sync::Arc::new([ + staging_xcm::latest::Junctions::GeneralIndex(seed as u128), + ])), + } + .into() } } diff --git a/scripts/release-crates.sh b/scripts/release-crates.sh index c4e981fec..62f0f816f 100755 --- a/scripts/release-crates.sh +++ b/scripts/release-crates.sh @@ -1,6 +1,7 @@ #!/bin/bash cargo release \ +-p serde-hex-utils \ -p ismp \ -p mmr-primitives \ -p pallet-hyperbridge \ @@ -9,6 +10,9 @@ cargo release \ -p pallet-ismp-rpc \ -p substrate-state-machine \ -p ismp-parachain \ +-p grandpa-verifier-primitives \ +-p grandpa-verifier \ +-p ismp-grandpa \ -p ismp-parachain-runtime-api \ -p ismp-parachain-inherent \ --execute diff --git a/tesseract/evm/Cargo.toml b/tesseract/evm/Cargo.toml index c76eb47f7..56186eb40 100644 --- a/tesseract/evm/Cargo.toml +++ b/tesseract/evm/Cargo.toml @@ -53,7 +53,7 @@ ismp-testsuite = { workspace = true, default-features = true } reconnecting-jsonrpsee-ws-client = { workspace = true, default-features = true } jsonrpsee = { version = "0.21", features = ["ws-client"]} pallet-ismp-host-executive = { workspace = true, default-features = true } -serde-utils = { workspace = true, default-features = false } +serde-hex-utils = { workspace = true, default-features = false } [dev-dependencies] alloy-rlp = { workspace = true, default-features = true } diff --git a/tesseract/evm/src/lib.rs b/tesseract/evm/src/lib.rs index 7a498a674..c57d723d0 100644 --- a/tesseract/evm/src/lib.rs +++ b/tesseract/evm/src/lib.rs @@ -60,7 +60,7 @@ pub struct EvmConfig { /// RPC urls for the execution client pub rpc_urls: Vec, /// State machine Identifier for this client on it's counterparties. - #[serde(with = "serde_utils::as_string")] + #[serde(with = "serde_hex_utils::as_string")] pub state_machine: StateMachine, /// Consensus state id for the consensus client on counterparty chain pub consensus_state_id: String, diff --git a/tesseract/substrate/Cargo.toml b/tesseract/substrate/Cargo.toml index 75d43cf57..857db2a4a 100644 --- a/tesseract/substrate/Cargo.toml +++ b/tesseract/substrate/Cargo.toml @@ -31,7 +31,7 @@ subxt-utils = { workspace = true, default-features = true } pallet-ismp-host-executive = { workspace = true, default-features = true } substrate-state-machine = { workspace = true, default-features = true } pallet-hyperbridge = { workspace = true, default-features = true } -serde-utils = { workspace = true, default-features = false } +serde-hex-utils = { workspace = true, default-features = false } pallet-state-coprocessor = { workspace = true, default-features = true } [features] diff --git a/tesseract/substrate/src/lib.rs b/tesseract/substrate/src/lib.rs index f34517e69..68d0c460e 100644 --- a/tesseract/substrate/src/lib.rs +++ b/tesseract/substrate/src/lib.rs @@ -49,7 +49,7 @@ mod testing; #[derive(Debug, Clone, Serialize, Deserialize)] pub struct SubstrateConfig { /// Hyperbridge network - #[serde(with = "serde_utils::as_string")] + #[serde(with = "serde_hex_utils::as_string")] pub state_machine: StateMachine, /// The hashing algorithm that substrate chain uses. pub hashing: Option,