From a3ca47e0f41275abeb8583d84e1ac0e4d6a3b9b2 Mon Sep 17 00:00:00 2001 From: JesseAbram <33698952+JesseAbram@users.noreply.github.com> Date: Tue, 13 Aug 2024 12:57:11 -0400 Subject: [PATCH 1/6] Delete old keyshare if not in next_signers (#999) * Delete old keyshare if not in next_signers * test * rename function * changelog * Apply suggestions from code review Co-authored-by: Hernando Castano * fix * Update CHANGELOG.md Co-authored-by: Hernando Castano --------- Co-authored-by: Hernando Castano --- CHANGELOG.md | 2 +- .../src/validator/api.rs | 29 +++++++++++++++++-- .../src/validator/tests.rs | 22 +++++++++++++- 3 files changed, 48 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6997357af..76bbd0b2a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -23,7 +23,7 @@ At the moment this project **does not** adhere to - Reshare confirmation ([#965](https://github.com/entropyxyz/entropy-core/pull/965)) - Set inital signers ([#971](https://github.com/entropyxyz/entropy-core/pull/971)) - Add parent key threshold dynamically ([#974](https://github.com/entropyxyz/entropy-core/pull/974)) -- TSS attestation endpoint ([#1001](https://github.com/entropyxyz/entropy-core/pull/1001) +- TSS attestation endpoint ([#1001](https://github.com/entropyxyz/entropy-core/pull/1001)) ### Changed - Fix TSS `AccountId` keys in chainspec ([#993](https://github.com/entropyxyz/entropy-core/pull/993)) diff --git a/crates/threshold-signature-server/src/validator/api.rs b/crates/threshold-signature-server/src/validator/api.rs index 7632ed87e..de912a277 100644 --- a/crates/threshold-signature-server/src/validator/api.rs +++ b/crates/threshold-signature-server/src/validator/api.rs @@ -99,9 +99,12 @@ pub async fn new_reshare( ) .map_err(|e| ValidatorErr::VerifyingKeyError(e.to_string()))?; - let is_proper_signer = &validators_info - .iter() - .any(|validator_info| validator_info.tss_account == *signer.account_id()); + let is_proper_signer = is_signer_or_delete_parent_key( + signer.account_id(), + validators_info.clone(), + &app_state.kv_store, + ) + .await?; if !is_proper_signer { return Ok(StatusCode::MISDIRECTED_REQUEST); @@ -360,3 +363,23 @@ pub async fn prune_old_holders( validators_info.clone() }) } + +/// Checks if TSS is a proper signer and if isn't deletes their parent key if they have one +pub async fn is_signer_or_delete_parent_key( + account_id: &AccountId32, + validators_info: Vec, + kv_manager: &KvManager, +) -> Result { + let is_proper_signer = + validators_info.iter().any(|validator_info| validator_info.tss_account == *account_id); + if is_proper_signer { + Ok(true) + } else { + // delete old keyshare if has it and not next_signer + let network_key = hex::encode(NETWORK_PARENT_KEY); + if kv_manager.kv().exists(&network_key).await? { + kv_manager.kv().delete(&network_key).await? + } + Ok(false) + } +} diff --git a/crates/threshold-signature-server/src/validator/tests.rs b/crates/threshold-signature-server/src/validator/tests.rs index 83ca9edaa..abfb2a030 100644 --- a/crates/threshold-signature-server/src/validator/tests.rs +++ b/crates/threshold-signature-server/src/validator/tests.rs @@ -23,7 +23,7 @@ use crate::{ }, }, validator::{ - api::{prune_old_holders, validate_new_reshare}, + api::{is_signer_or_delete_parent_key, prune_old_holders, validate_new_reshare}, errors::ValidatorErr, }, }; @@ -222,3 +222,23 @@ async fn test_forbidden_keys() { let should_pass = check_forbidden_key("test"); assert_eq!(should_pass.unwrap(), ()); } + +#[tokio::test] +#[serial] +async fn test_deletes_key() { + initialize_test_logger().await; + clean_tests(); + + let dave = AccountKeyring::Dave; + let kv = setup_client().await; + let reservation = kv.kv().reserve_key(hex::encode(NETWORK_PARENT_KEY)).await.unwrap(); + kv.kv().put(reservation, vec![10]).await.unwrap(); + + let is_proper_signer_result = + is_signer_or_delete_parent_key(&dave.to_account_id().into(), vec![], &kv).await.unwrap(); + assert!(!is_proper_signer_result); + + let has_key = kv.kv().exists(&hex::encode(NETWORK_PARENT_KEY)).await.unwrap(); + assert!(!has_key); + clean_tests(); +} From 2fc472f6796b5c4d0355ed12f98c0ca264a6efa4 Mon Sep 17 00:00:00 2001 From: Zach <120055804+cooldracula@users.noreply.github.com> Date: Wed, 14 Aug 2024 09:01:05 +1200 Subject: [PATCH 2/6] Migrate circle-ci workflow to github actions (#991) * Add analogous gh jobs for .circleci/then.yml jobs * Update .github/actions/install-dependencies/action.yaml Co-authored-by: Hernando Castano * Update .github/actions/install-dependencies/action.yaml Co-authored-by: Hernando Castano * Update .github/workflows/build-and-run-node-test.yaml Co-authored-by: Hernando Castano * Update .github/workflows/build-and-run-node-test.yaml Co-authored-by: Hernando Castano * remove timeout for cargo test step This was originally added as an analog to the circle-ci's "no_output_timeout", but they are not actually analogs. circle-ci's step measures how long to wait without any output, while gh's timeout is how long to run the step in general before timing out, regardless of output. This could cause the job to timeout when it was still successfully running, and still within github's usage limits. * Remove circle-ci jobs All relevant workflows have been ported to github actions. * Run build-test on xl runner + small readme change * Update .github/workflows/build-and-run-node-test.yaml Co-authored-by: Hernando Castano --------- Co-authored-by: Hernando Castano Co-authored-by: Hernando Castano --- .circleci/config.yml | 16 -- .circleci/then.yml | 192 ------------------ .../actions/install-dependencies/action.yaml | 20 ++ .../workflows/build-and-run-node-test.yaml | 39 ++++ .github/workflows/check-chainspecs.yaml | 17 ++ .github/workflows/check-doc-build.yaml | 14 ++ .../workflows/lint-and-check-licenses.yaml | 27 +++ .../workflows/test-runtime-benchmarks.yaml | 14 ++ crates/README.md | 2 +- 9 files changed, 132 insertions(+), 209 deletions(-) delete mode 100644 .circleci/config.yml delete mode 100644 .circleci/then.yml create mode 100644 .github/actions/install-dependencies/action.yaml create mode 100644 .github/workflows/build-and-run-node-test.yaml create mode 100644 .github/workflows/check-chainspecs.yaml create mode 100644 .github/workflows/check-doc-build.yaml create mode 100644 .github/workflows/lint-and-check-licenses.yaml create mode 100644 .github/workflows/test-runtime-benchmarks.yaml diff --git a/.circleci/config.yml b/.circleci/config.yml deleted file mode 100644 index 529de50f2..000000000 --- a/.circleci/config.yml +++ /dev/null @@ -1,16 +0,0 @@ -orbs: - path-filtering: circleci/path-filtering@0.1.3 -setup: true -version: 2.1 -workflows: - check: - jobs: - - path-filtering/filter: - base-revision: master - config-path: .circleci/then.yml - mapping: | - node/.* node true - crates/.* crates true - pallets/.* pallets true - runtime/.* runtime true - name: check-updated-files diff --git a/.circleci/then.yml b/.circleci/then.yml deleted file mode 100644 index 7d0fd1f4a..000000000 --- a/.circleci/then.yml +++ /dev/null @@ -1,192 +0,0 @@ -commands: - fmt-lint: - steps: - - run: - name: Format and lint. - command: | - curl -LsSf https://github.com/tamasfe/taplo/releases/download/0.8.0/taplo-full-linux-x86_64.gz | gunzip -N -d - > ${CARGO_HOME:-~/.cargo}/bin/taplo && chmod +x ${CARGO_HOME:-~/.cargo}/bin/taplo - cargo fmt --check - taplo fmt --check - cargo clippy -- -D warnings - check-licenses: - steps: - - run: - name: Check licenses (`cargo-deny`). - command: | - cargo install --locked cargo-deny - cargo deny --all-features check license - increase-swap: - steps: - - run: - name: Increase swap. - command: | - sudo swapoff -a - sudo dd if=/dev/zero of=/swapfile bs=1G count=8 - sudo chmod 0600 /swapfile - sudo mkswap /swapfile - sudo swapon /swapfile - grep Swap /proc/meminfo - install-dependencies: - steps: - - run: - name: Install dependencies. - command: | - sudo sed -i "/#\$nrconf{restart} = 'i';/s/.*/\$nrconf{restart} = 'a';/" /etc/needrestart/needrestart.conf - sudo apt-get update - sudo apt install -y libssl-dev clang libclang-dev tor && sudo systemctl start tor && sudo systemctl enable tor - sudo apt install -y yarn - install-rust: - steps: - - run: - name: Install Rust. - command: | - curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y - source "$HOME/.cargo/env" - rustup update stable - rustup target add wasm32-unknown-unknown - rustup component add rust-src - install-wasm-pack: - steps: - - run: curl https://rustwasm.github.io/wasm-pack/installer/init.sh -sSf | sh - install-dependencies-and-checkout: - steps: - - install-dependencies - - checkout - - install-dependencies - - install-rust - - install-wasm-pack - new-cmd: - steps: - - run: echo test - build: - steps: - - run: cargo build --release - comment-on-pr: - steps: - - run: | - sudo apt-get install jq - pr_response=$(curl --location --request GET "https://api.github.com/repos/$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME/pulls?head=$CIRCLE_PROJECT_USERNAME:$CIRCLE_BRANCH&state=open" \ - -u $GH_USER:$GH_TOKEN) - if [ $(echo $pr_response | jq length) -eq 0 ]; then - echo "No PR found to update" - else - pr_comment_url=$(echo $pr_response | jq -r ".[]._links.comments.href") - fi - echo curl --location --request POST "$pr_comment_url" \ - -u $GH_USER:$GH_TOKEN \ - --header 'Content-Type: application/json' \ - --data-raw "'{ - \"body\": \"$(cat pr-comment)\" - }'" - -jobs: - threshold-signature-server: - machine: - image: ubuntu-2204:2022.10.2 - resource_class: xlarge - steps: - - install-dependencies-and-checkout - - run: cd crates/threshold-signature-server && cargo test --release - fmt-lint-all: - machine: - image: ubuntu-2204:2022.10.2 - resource_class: xlarge - steps: - - install-dependencies-and-checkout - - fmt-lint - - check-licenses - node-benchmark: - machine: - image: ubuntu-2204:2022.10.2 - resource_class: xlarge - steps: - - install-dependencies-and-checkout - - run: pushd node && cargo check --features=runtime-benchmarks - node-test: - machine: - image: ubuntu-2204:2022.10.2 - resource_class: xlarge - steps: - - increase-swap - - install-dependencies-and-checkout - - run: - name: Build entropy-protocol nodejs package. - command: | - cd crates/protocol - make build-nodejs-testing - cd nodejs-test - yarn - cd ../../.. - - run: - command: | - pushd node - cargo build --all-targets --release -j $(nproc) - cargo test --all-targets --release - yarn --cwd ../crates/protocol/nodejs-test test - cargo test -p entropy-tss --release --features=test_helpers -F wasm_test test_wasm - no_output_timeout: 45m - build-and-release: - machine: - image: ubuntu-2204:2022.10.2 - resource_class: xlarge - steps: - - increase-swap - - install-dependencies-and-checkout - - build - - release - check-doc-build: - machine: - image: ubuntu-2204:2022.10.2 - resource_class: xlarge - steps: - - install-dependencies-and-checkout - - run: cargo doc --no-deps - check-chainspecs: - machine: - image: ubuntu-2204:2022.10.2 - resource_class: xlarge - steps: - - install-dependencies-and-checkout - - run: - command: | - cargo run -p entropy -- build-spec --raw --chain dev > chainspec-dev-raw.json - cargo run -p entropy -- build-spec --raw --chain integration-tests > chainspec-integration-raw.json - cargo run -p entropy -- build-spec --raw --chain testnet > chainspec-testnet-raw.json - - -parameters: - crates: - default: false - type: boolean - node: - default: false - type: boolean - pallets: - default: false - type: boolean - runtime: - default: false - type: boolean -version: 2.1 -workflows: - lint: - jobs: - - fmt-lint-all - test: - jobs: - - node-benchmark - build: - jobs: - - node-test - when: - or: - - << pipeline.parameters.node >> - - << pipeline.parameters.pallets >> - - << pipeline.parameters.runtime >> - - pipeline.parameters.crates - chainspecs: - jobs: - - check-chainspecs - documentation: - jobs: - - check-doc-build diff --git a/.github/actions/install-dependencies/action.yaml b/.github/actions/install-dependencies/action.yaml new file mode 100644 index 000000000..d20c73922 --- /dev/null +++ b/.github/actions/install-dependencies/action.yaml @@ -0,0 +1,20 @@ +--- +name: 'Install Dependencies' +description: 'Install the dependencies and Rust components used across jobs' +runs: + using: "composite" + steps: + - name: Install dependencies + run: | + sudo sed -i "/#\$nrconf{restart} = 'i';/s/.*/\$nrconf{restart} = 'a';/" /etc/needrestart/needrestart.conf + sudo apt-get update + sudo apt install -y libssl-dev clang libclang-dev protobuf-compiler + shell: bash + - name: Add Rust components + run: | + rustup target add wasm32-unknown-unknown + rustup component add rust-src + shell: bash + - name: Install wasm-pack + run: curl https://rustwasm.github.io/wasm-pack/installer/init.sh -sSf | sh + shell: bash diff --git a/.github/workflows/build-and-run-node-test.yaml b/.github/workflows/build-and-run-node-test.yaml new file mode 100644 index 000000000..0c28340f6 --- /dev/null +++ b/.github/workflows/build-and-run-node-test.yaml @@ -0,0 +1,39 @@ +--- +name: "Build and test" +on: + push: + paths: + - "node/**" + - "crates/**" + - "pallets/**" + - "runtime/**" + +jobs: + node-test: + runs-on: core-build-runner + steps: + - uses: actions/checkout@v4 + - name: Increase swap + run: | + sudo swapoff -a + sudo dd if=/dev/zero of=/swapfile bs=1G count=8 + sudo chmod 0600 /swapfile + sudo mkswap /swapfile + sudo swapon /swapfile + grep Swap /proc/meminfo + - name: Install dependencies + uses: ./.github/actions/install-dependencies/ + - name: Build entropy-protocol nodejs package + run: | + cd crates/protocol + make build-nodejs-testing + cd nodejs-test + yarn + cd ../../.. + - name: Run `cargo build && cargo test` + run: | + pushd node + cargo build --all-targets --release -j $(nproc) + cargo test --all-targets --release + yarn --cwd ../crates/protocol/nodejs-test test + cargo test -p entropy-tss --release --features=test_helpers -F wasm_test test_wasm diff --git a/.github/workflows/check-chainspecs.yaml b/.github/workflows/check-chainspecs.yaml new file mode 100644 index 000000000..998a127d3 --- /dev/null +++ b/.github/workflows/check-chainspecs.yaml @@ -0,0 +1,17 @@ +--- +name: "Check chainspecs" +on: ["push"] + +jobs: + check-chainspecs: + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v4 + - name: Install dependencies + uses: ./.github/actions/install-dependencies/ + - name: Check chainspecs + run: | + cargo run -p entropy -- build-spec --raw --chain dev > chainspec-dev-raw.json + cargo run -p entropy -- build-spec --raw --chain integration-tests > chainspec-integration-raw.json + cargo run -p entropy -- build-spec --raw --chain testnet > chainspec-testnet-raw.json diff --git a/.github/workflows/check-doc-build.yaml b/.github/workflows/check-doc-build.yaml new file mode 100644 index 000000000..9dd989e75 --- /dev/null +++ b/.github/workflows/check-doc-build.yaml @@ -0,0 +1,14 @@ +--- +name: "Check documentation build" +on: ["push"] + +jobs: + check-doc-build: + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v4 + - name: Install dependencies + uses: ./.github/actions/install-dependencies/ + - name: Build documentation + run: cargo doc --no-deps \ No newline at end of file diff --git a/.github/workflows/lint-and-check-licenses.yaml b/.github/workflows/lint-and-check-licenses.yaml new file mode 100644 index 000000000..1fc0144a8 --- /dev/null +++ b/.github/workflows/lint-and-check-licenses.yaml @@ -0,0 +1,27 @@ +--- +name: "Lint and check licenses" +on: ["push"] + +jobs: + lint: + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v4 + - name: Install dependencies + uses: ./.github/actions/install-dependencies/ + - name: Format and lint + run: | + curl -LsSf https://github.com/tamasfe/taplo/releases/download/0.8.0/taplo-full-linux-x86_64.gz | gunzip -N -d - > ${CARGO_HOME:-~/.cargo}/bin/taplo && chmod +x ${CARGO_HOME:-~/.cargo}/bin/taplo + cargo fmt --check + taplo fmt --check + cargo clippy -- -D warnings + check-licenses: + runs-on: ubuntu-22.04 + steps: + - name: Checkout code + uses: actions/checkout@v4 + - name: Check licenses with cargo deny + run: | + cargo install --locked cargo-deny + cargo deny --all-features check license diff --git a/.github/workflows/test-runtime-benchmarks.yaml b/.github/workflows/test-runtime-benchmarks.yaml new file mode 100644 index 000000000..c3978a559 --- /dev/null +++ b/.github/workflows/test-runtime-benchmarks.yaml @@ -0,0 +1,14 @@ +name: "Test: check runtime benchmarks" +on: ["push"] + +jobs: + test-runtime-benchmarks: + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v4 + - name: Install dependencies + uses: ./.github/actions/install-dependencies/ + - name: Check runtime-benchmarks + run: | + pushd node && cargo check --features=runtime-benchmarks diff --git a/crates/README.md b/crates/README.md index e483180d7..217f848f9 100644 --- a/crates/README.md +++ b/crates/README.md @@ -2,7 +2,7 @@ This directory contains the [Threshold Signature Server](https://github.com/entropyxyz/entropy-core/tree/master/crates/threshold-signature-server) and some utility crates related to it: -- [`shared`](https://github.com/entropyxyz/entropy-core/tree/master/crates/shared) - Common types shared by both the chain node and TSS server +- [`shared`](https://github.com/entropyxyz/entropy-core/tree/master/crates/shared) - Common types shared by the chain node and TSS server - [`kvdb`](https://github.com/entropyxyz/entropy-core/tree/master/crates/kvdb) - An encrypted key-value datastore - [`protocol`](https://github.com/entropyxyz/entropy-core/tree/master/crates/protocol) - Transport logic for running the Entropy protocols - [`testing-utils`](https://github.com/entropyxyz/entropy-core/tree/master/crates/testing-utils) - Testing utility methods shared across the workspace From 3c3fd2e1d50f2019f7951550ed7ab434316e2a2b Mon Sep 17 00:00:00 2001 From: JesseAbram <33698952+JesseAbram@users.noreply.github.com> Date: Wed, 14 Aug 2024 10:19:56 -0400 Subject: [PATCH 3/6] Refactor reshare (#994) * Refactor reshare * clean * changelog * increase timeout * Apply suggestions from code review Co-authored-by: Hernando Castano * clean * clean --------- Co-authored-by: Hernando Castano --- crates/protocol/src/execute_protocol.rs | 67 ++++++------- crates/protocol/tests/helpers/mod.rs | 32 +++--- .../src/signing_client/api.rs | 97 ++++++++++++------- .../src/user/tests.rs | 2 +- .../src/validator/api.rs | 79 +++------------ 5 files changed, 127 insertions(+), 150 deletions(-) diff --git a/crates/protocol/src/execute_protocol.rs b/crates/protocol/src/execute_protocol.rs index f6042d21e..c80af4574 100644 --- a/crates/protocol/src/execute_protocol.rs +++ b/crates/protocol/src/execute_protocol.rs @@ -73,7 +73,7 @@ pub async fn execute_protocol_generic( mut chans: Channels, session: Session, session_id_hash: [u8; 32], -) -> Result<(Res::Success, mpsc::Receiver), GenericProtocolError> { +) -> Result<(Res::Success, Channels), GenericProtocolError> { let session_id = synedrion::SessionId::from_seed(&session_id_hash); let tx = &chans.0; let rx = &mut chans.1; @@ -143,7 +143,7 @@ pub async fn execute_protocol_generic( } match session.finalize_round(&mut OsRng, accum)? { - FinalizeOutcome::Success(res) => break Ok((res, chans.1)), + FinalizeOutcome::Success(res) => break Ok((res, chans)), FinalizeOutcome::AnotherRound { session: new_session, cached_messages: new_cached_messages, @@ -208,7 +208,6 @@ pub async fn execute_dkg( threshold: usize, ) -> Result { tracing::debug!("Executing DKG"); - let broadcaster = chans.0.clone(); let party_ids: BTreeSet = threshold_accounts.iter().cloned().map(PartyId::new).collect(); @@ -231,11 +230,10 @@ pub async fn execute_dkg( ) .map_err(ProtocolExecutionErr::SessionCreation)?; - let (init_keyshare, rx) = execute_protocol_generic(chans, session, session_id_hash).await?; + let (init_keyshare, chans) = + execute_protocol_generic(chans, session, session_id_hash).await?; tracing::info!("Finished key init protocol"); - // Setup channels for the next session - let chans = Channels(broadcaster.clone(), rx); // Send verifying key let verifying_key = init_keyshare.verifying_key(); @@ -259,6 +257,7 @@ pub async fn execute_dkg( } else { // Wait to receive verifying_key let mut rx = chans.1; + let broadcaster = chans.0; let message = rx.recv().await.ok_or_else(|| { ProtocolExecutionErr::IncomingStream("Waiting for validating key".to_string()) })?; @@ -302,15 +301,12 @@ pub async fn execute_dkg( inputs, ) .map_err(ProtocolExecutionErr::SessionCreation)?; - let (new_key_share_option, rx) = + let (new_key_share_option, chans) = execute_protocol_generic(chans, session, session_id_hash).await?; let new_key_share = new_key_share_option.ok_or(ProtocolExecutionErr::NoOutputFromReshareProtocol)?; tracing::info!("Finished reshare protocol"); - // Setup channels for the next session - let chans = Channels(broadcaster.clone(), rx); - // Now run the aux gen protocol to get AuxInfo let session_id_hash = session_id.blake2(Some(Subsession::AuxGen))?; let session = make_aux_gen_session( @@ -327,51 +323,56 @@ pub async fn execute_dkg( } /// Execute proactive refresh. +#[allow(clippy::type_complexity)] #[tracing::instrument( skip_all, fields(threshold_accounts, my_idx), level = tracing::Level::DEBUG )] -pub async fn execute_proactive_refresh( +pub async fn execute_reshare( session_id: SessionId, chans: Channels, threshold_pair: &sr25519::Pair, - threshold_accounts: Vec, - old_key: ThresholdKeyShare, -) -> Result, ProtocolExecutionErr> { + inputs: KeyResharingInputs, + aux_info_option: Option>, +) -> Result< + (ThresholdKeyShare, AuxInfo), + ProtocolExecutionErr, +> { tracing::debug!("Executing proactive refresh"); tracing::debug!("Signing with {:?}", &threshold_pair.public()); - let party_ids: BTreeSet = - threshold_accounts.iter().cloned().map(PartyId::new).collect(); let pair = PairWrapper(threshold_pair.clone()); - let verifying_key = old_key.verifying_key(); - - let threshold = old_key.threshold(); let session_id_hash = session_id.blake2(None)?; - let inputs = KeyResharingInputs { - old_holder: Some(OldHolder { key_share: old_key }), - new_holder: Some(NewHolder { - verifying_key, - old_threshold: party_ids.len(), - old_holders: party_ids.clone(), - }), - new_holders: party_ids.clone(), - new_threshold: threshold, - }; + let session = make_key_resharing_session( &mut OsRng, SynedrionSessionId::from_seed(session_id_hash.as_slice()), pair, - &party_ids, - inputs, + &inputs.new_holders, + inputs.clone(), ) .map_err(ProtocolExecutionErr::SessionCreation)?; - let new_key_share = execute_protocol_generic(chans, session, session_id_hash).await?.0; + let (new_key_share, chans) = execute_protocol_generic(chans, session, session_id_hash).await?; + let aux_info = if let Some(aux_info) = aux_info_option { + aux_info + } else { + // Now run an aux gen session + let session_id_hash_aux_data = session_id.blake2(Some(Subsession::AuxGen))?; + let session = make_aux_gen_session( + &mut OsRng, + SynedrionSessionId::from_seed(session_id_hash_aux_data.as_slice()), + PairWrapper(threshold_pair.clone()), + &inputs.new_holders, + ) + .map_err(ProtocolExecutionErr::SessionCreation)?; + + execute_protocol_generic(chans, session, session_id_hash_aux_data).await?.0 + }; - new_key_share.ok_or(ProtocolExecutionErr::NoOutputFromReshareProtocol) + Ok((new_key_share.ok_or(ProtocolExecutionErr::NoOutputFromReshareProtocol)?, aux_info)) } /// Psuedo-randomly select a subset of the parties of size `threshold` diff --git a/crates/protocol/tests/helpers/mod.rs b/crates/protocol/tests/helpers/mod.rs index 627d1c912..a1a0f60e9 100644 --- a/crates/protocol/tests/helpers/mod.rs +++ b/crates/protocol/tests/helpers/mod.rs @@ -16,9 +16,7 @@ //! A simple protocol server, like a mini version of entropy-tss, for benchmarking use anyhow::{anyhow, ensure}; use entropy_protocol::{ - execute_protocol::{ - execute_dkg, execute_proactive_refresh, execute_signing_protocol, Channels, - }, + execute_protocol::{execute_dkg, execute_reshare, execute_signing_protocol, Channels}, protocol_transport::{ errors::WsError, noise::{noise_handshake_initiator, noise_handshake_responder}, @@ -31,12 +29,13 @@ use entropy_shared::X25519PublicKey; use futures::future; use sp_core::{sr25519, Pair}; use std::{ + collections::BTreeSet, fmt, sync::{Arc, Mutex}, time::Duration, }; use subxt::utils::AccountId32; -use synedrion::{AuxInfo, KeyShare, ThresholdKeyShare}; +use synedrion::{AuxInfo, KeyResharingInputs, KeyShare, NewHolder, OldHolder, ThresholdKeyShare}; use tokio::{ net::{TcpListener, TcpStream}, time::timeout, @@ -131,15 +130,22 @@ pub async fn server( Ok(ProtocolOutput::Sign(RecoverableSignature { signature, recovery_id })) }, SessionId::Reshare { .. } => { - let new_keyshare = execute_proactive_refresh( - session_id, - channels, - &pair, - tss_accounts, - threshold_keyshare.unwrap(), - ) - .await?; - Ok(ProtocolOutput::Reshare(new_keyshare)) + let old_key = threshold_keyshare.unwrap(); + let party_ids: BTreeSet = + tss_accounts.iter().cloned().map(PartyId::new).collect(); + let inputs = KeyResharingInputs { + old_holder: Some(OldHolder { key_share: old_key.clone() }), + new_holder: Some(NewHolder { + verifying_key: old_key.verifying_key(), + old_threshold: party_ids.len(), + old_holders: party_ids.clone(), + }), + new_holders: party_ids.clone(), + new_threshold: old_key.threshold(), + }; + + let new_keyshare = execute_reshare(session_id, channels, &pair, inputs, None).await?; + Ok(ProtocolOutput::Reshare(new_keyshare.0)) }, SessionId::Dkg { .. } => { let keyshare_and_aux_info = diff --git a/crates/threshold-signature-server/src/signing_client/api.rs b/crates/threshold-signature-server/src/signing_client/api.rs index bbea6738f..973499eec 100644 --- a/crates/threshold-signature-server/src/signing_client/api.rs +++ b/crates/threshold-signature-server/src/signing_client/api.rs @@ -13,8 +13,6 @@ // You should have received a copy of the GNU Affero General Public License // along with this program. If not, see . -use std::time::Duration; - use axum::{ body::Bytes, extract::{ @@ -26,10 +24,11 @@ use axum::{ }; use blake2::{Blake2s256, Digest}; use entropy_protocol::{ - execute_protocol::{execute_proactive_refresh, Channels}, - KeyParams, KeyShareWithAuxInfo, Listener, PartyId, SessionId, ValidatorInfo, + execute_protocol::{execute_reshare, Channels}, + KeyParams, Listener, PartyId, SessionId, ValidatorInfo, }; use parity_scale_codec::Encode; +use std::{collections::BTreeSet, time::Duration}; use entropy_kvdb::kv_manager::{ helpers::{deserialize, serialize as key_serialize}, @@ -45,7 +44,7 @@ use subxt::{ utils::{AccountId32 as SubxtAccountId32, Static}, OnlineClient, }; -use synedrion::{AuxInfo, ThresholdKeyShare}; +use synedrion::{AuxInfo, KeyResharingInputs, NewHolder, OldHolder, ThresholdKeyShare}; use tokio::time::timeout; use x25519_dalek::StaticSecret; @@ -96,13 +95,13 @@ pub async fn proactive_refresh( let exists_result = app_state.kv_store.kv().exists(&key).await?; if exists_result { let old_key_share = app_state.kv_store.kv().get(&key).await?; - let (deserialized_old_key, _aux_info): ( + let (deserialized_old_key, aux_info): ( ThresholdKeyShare, AuxInfo, ) = deserialize(&old_key_share) .ok_or_else(|| ProtocolErr::Deserialization("Failed to load KeyShare".into()))?; - let new_key_share = do_proactive_refresh( + let (new_key_share, aux_info) = do_proactive_refresh( &ocw_data.validators_info, &signer, &x25519_secret_key, @@ -110,19 +109,10 @@ pub async fn proactive_refresh( encoded_key, deserialized_old_key, ocw_data.block_number, + aux_info, ) .await?; - // Get aux info from existing entry - let aux_info = { - let existing_entry = app_state.kv_store.kv().get(&key).await?; - let (_old_key_share, aux_info): KeyShareWithAuxInfo = deserialize(&existing_entry) - .ok_or_else(|| { - ProtocolErr::Deserialization("Failed to load KeyShare".into()) - })?; - aux_info - }; - // Since this is a refresh with the parties not changing, store the old aux_info let serialized_key_share = key_serialize(&(new_key_share, aux_info)) .map_err(|_| ProtocolErr::KvSerialize("Kv Serialize Error".to_string()))?; @@ -152,6 +142,7 @@ async fn handle_socket_result(socket: WebSocket, app_state: AppState) { }; } +#[allow(clippy::type_complexity, clippy::too_many_arguments)] #[tracing::instrument( skip_all, fields(validators_info, verifying_key, my_subgroup), @@ -165,7 +156,8 @@ pub async fn do_proactive_refresh( verifying_key: Vec, old_key: ThresholdKeyShare, block_number: u32, -) -> Result, ProtocolErr> { + aux_info: AuxInfo, +) -> Result<(ThresholdKeyShare, AuxInfo), ProtocolErr> { tracing::debug!("Preparing to perform proactive refresh"); tracing::debug!("Signing with {:?}", &signer.signer().public()); @@ -189,31 +181,31 @@ pub async fn do_proactive_refresh( tss_accounts.push(tss_account); } - // subscribe to all other participating parties. Listener waits for other subscribers. - let (rx_ready, rx_from_others, listener) = - Listener::new(converted_validator_info.clone(), &account_id); - state - .listeners - .lock() - .map_err(|_| ProtocolErr::SessionError("Error getting lock".to_string()))? - .insert(session_id.clone(), listener); + let party_ids: BTreeSet = tss_accounts.iter().cloned().map(PartyId::new).collect(); - open_protocol_connections( - &converted_validator_info, - &session_id, - signer.signer(), + let inputs = KeyResharingInputs { + old_holder: Some(OldHolder { key_share: old_key.clone() }), + new_holder: Some(NewHolder { + verifying_key: old_key.verifying_key(), + old_threshold: party_ids.len(), + old_holders: party_ids.clone(), + }), + new_holders: party_ids.clone(), + new_threshold: old_key.threshold(), + }; + + let channels = get_channels( state, + converted_validator_info, + account_id, + &session_id, + signer, x25519_secret_key, ) .await?; - let channels = { - let ready = timeout(Duration::from_secs(SETUP_TIMEOUT_SECONDS), rx_ready).await?; - let broadcast_out = ready??; - Channels(broadcast_out, rx_from_others) - }; + let result = - execute_proactive_refresh(session_id, channels, signer.signer(), tss_accounts, old_key) - .await?; + execute_reshare(session_id, channels, signer.signer(), inputs, Some(aux_info)).await?; Ok(result) } @@ -273,3 +265,34 @@ pub async fn validate_proactive_refresh( kv_manager.kv().put(reservation, latest_block_number.to_be_bytes().to_vec()).await?; Ok(()) } + +pub async fn get_channels( + state: &ListenerState, + converted_validator_info: Vec, + account_id: SubxtAccountId32, + session_id: &SessionId, + signer: &PairSigner, + x25519_secret_key: &StaticSecret, +) -> Result { + // subscribe to all other participating parties. Listener waits for other subscribers. + let (rx_ready, rx_from_others, listener) = + Listener::new(converted_validator_info.clone(), &account_id); + state + .listeners + .lock() + .map_err(|_| ProtocolErr::SessionError("Error getting lock".to_string()))? + .insert(session_id.clone(), listener); + + open_protocol_connections( + &converted_validator_info, + session_id, + signer.signer(), + state, + x25519_secret_key, + ) + .await?; + + let ready = timeout(Duration::from_secs(SETUP_TIMEOUT_SECONDS), rx_ready).await?; + let broadcast_out = ready??; + Ok(Channels(broadcast_out, rx_from_others)) +} diff --git a/crates/threshold-signature-server/src/user/tests.rs b/crates/threshold-signature-server/src/user/tests.rs index 71c2fa434..731a0558b 100644 --- a/crates/threshold-signature-server/src/user/tests.rs +++ b/crates/threshold-signature-server/src/user/tests.rs @@ -633,7 +633,7 @@ async fn test_store_share() { let mut new_verifying_key = vec![]; // wait for registered event check that key exists in kvdb - for _ in 0..45 { + for _ in 0..65 { std::thread::sleep(std::time::Duration::from_millis(1000)); let block_hash = rpc.chain_get_block_hash(None).await.unwrap(); let events = EventsClient::new(api.clone()).at(block_hash.unwrap()).await.unwrap(); diff --git a/crates/threshold-signature-server/src/validator/api.rs b/crates/threshold-signature-server/src/validator/api.rs index de912a277..810996711 100644 --- a/crates/threshold-signature-server/src/validator/api.rs +++ b/crates/threshold-signature-server/src/validator/api.rs @@ -23,33 +23,27 @@ use crate::{ launch::{FORBIDDEN_KEYS, LATEST_BLOCK_NUMBER_RESHARE}, substrate::{get_stash_address, get_validators_info, query_chain, submit_transaction}, }, - signing_client::{protocol_transport::open_protocol_connections, ProtocolErr}, + signing_client::{api::get_channels, ProtocolErr}, validator::errors::ValidatorErr, AppState, }; use axum::{body::Bytes, extract::State, http::StatusCode}; use entropy_kvdb::kv_manager::{helpers::serialize as key_serialize, KvManager}; -use entropy_protocol::Subsession; pub use entropy_protocol::{ decode_verifying_key, errors::ProtocolExecutionErr, - execute_protocol::{execute_protocol_generic, Channels, PairWrapper}, + execute_protocol::{execute_protocol_generic, execute_reshare, Channels, PairWrapper}, KeyParams, KeyShareWithAuxInfo, Listener, PartyId, SessionId, ValidatorInfo, }; -use entropy_shared::{OcwMessageReshare, NETWORK_PARENT_KEY, SETUP_TIMEOUT_SECONDS}; +use entropy_shared::{OcwMessageReshare, NETWORK_PARENT_KEY}; use parity_scale_codec::{Decode, Encode}; -use rand_core::OsRng; use sp_core::Pair; -use std::{collections::BTreeSet, str::FromStr, time::Duration}; +use std::{collections::BTreeSet, str::FromStr}; use subxt::{ backend::legacy::LegacyRpcMethods, ext::sp_core::sr25519, tx::PairSigner, utils::AccountId32, OnlineClient, }; -use synedrion::{ - make_aux_gen_session, make_key_resharing_session, sessions::SessionId as SynedrionSessionId, - AuxInfo, KeyResharingInputs, NewHolder, OldHolder, -}; -use tokio::time::timeout; +use synedrion::{KeyResharingInputs, NewHolder, OldHolder}; /// HTTP POST endpoint called by the off-chain worker (propagation pallet) during network reshare. /// @@ -62,7 +56,7 @@ pub async fn new_reshare( encoded_data: Bytes, ) -> Result { let data = OcwMessageReshare::decode(&mut encoded_data.as_ref())?; - // TODO: validate message came from chain (check reshare block # against current block number) see #941 + let api = get_api(&app_state.configuration.endpoint).await?; let rpc = get_rpc(&app_state.configuration.endpoint).await?; validate_new_reshare(&api, &rpc, &data, &app_state.kv_store).await?; @@ -154,8 +148,6 @@ pub async fn new_reshare( let session_id = SessionId::Reshare { verifying_key, block_number: data.block_number }; let account_id = AccountId32(signer.signer().public().0); - let session_id_hash = session_id.blake2(Some(Subsession::Reshare))?; - let pair = PairWrapper(signer.signer().clone()); let mut converted_validator_info = vec![]; let mut tss_accounts = vec![]; @@ -169,63 +161,18 @@ pub async fn new_reshare( tss_accounts.push(validator_info.tss_account.clone()); } - let (rx_ready, rx_from_others, listener) = - Listener::new(converted_validator_info.clone(), &account_id); - app_state - .listener_state - .listeners - .lock() - .map_err(|_| ValidatorErr::SessionError("Error getting lock".to_string()))? - .insert(session_id.clone(), listener); - - open_protocol_connections( - &converted_validator_info, - &session_id, - signer.signer(), + let channels = get_channels( &app_state.listener_state, + converted_validator_info, + account_id, + &session_id, + &signer, &x25519_secret_key, ) .await?; - let (channels, broadcaster) = { - let ready = timeout(Duration::from_secs(SETUP_TIMEOUT_SECONDS), rx_ready).await?; - let broadcast_out = ready??; - (Channels(broadcast_out.clone(), rx_from_others), broadcast_out) - }; - - let session = make_key_resharing_session( - &mut OsRng, - SynedrionSessionId::from_seed(session_id_hash.as_slice()), - pair.clone(), - &party_ids, - inputs, - ) - .map_err(ProtocolExecutionErr::SessionCreation)?; - - let (new_key_share_option, rx) = execute_protocol_generic(channels, session, session_id_hash) - .await - .map_err(|_| ValidatorErr::ProtocolError("Error executing protocol".to_string()))?; - - let new_key_share = new_key_share_option.ok_or(ValidatorErr::NoOutputFromReshareProtocol)?; - - // Setup channels for the next session - let channels = Channels(broadcaster, rx); - - // Now run an aux gen session - let session_id_hash = session_id.blake2(Some(Subsession::AuxGen))?; - let session = make_aux_gen_session( - &mut OsRng, - SynedrionSessionId::from_seed(session_id_hash.as_slice()), - pair, - &party_ids, - ) - .map_err(ProtocolExecutionErr::SessionCreation)?; - - let aux_info: AuxInfo = - execute_protocol_generic(channels, session, session_id_hash) - .await - .map_err(|_| ValidatorErr::ProtocolError("Error executing protocol".to_string()))? - .0; + let (new_key_share, aux_info) = + execute_reshare(session_id.clone(), channels, signer.signer(), inputs, None).await?; let serialized_key_share = key_serialize(&(new_key_share, aux_info)) .map_err(|_| ProtocolErr::KvSerialize("Kv Serialize Error".to_string()))?; From 314f6be467a2d3fa5b58f7723d27e1554b6d32bb Mon Sep 17 00:00:00 2001 From: Hernando Castano Date: Wed, 14 Aug 2024 16:43:19 -0400 Subject: [PATCH 4/6] Add `network-jumpstart` command to `entropy-test-cli` (#1004) * Add `network-jumpstart` subcommand to `entropy-test-cli` This PR adds a way to trigger a network jumpstart from the test CLI. This is useful for ensuring the network is in the correct state before registering using the new registration flow. * Add `CHANGELOG` entry * Add 45 second timeout when checking for jumpstarts --- CHANGELOG.md | 1 + crates/client/Cargo.toml | 2 +- crates/client/src/client.rs | 40 +++++++++++++++++++++++++++++++++++++ crates/client/src/errors.rs | 2 ++ crates/test-cli/src/lib.rs | 29 +++++++++++++++++++++++++-- 5 files changed, 71 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 76bbd0b2a..6b1d5542b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -24,6 +24,7 @@ At the moment this project **does not** adhere to - Set inital signers ([#971](https://github.com/entropyxyz/entropy-core/pull/971)) - Add parent key threshold dynamically ([#974](https://github.com/entropyxyz/entropy-core/pull/974)) - TSS attestation endpoint ([#1001](https://github.com/entropyxyz/entropy-core/pull/1001)) +- Add `network-jumpstart` command to `entropy-test-cli` ([#1004](https://github.com/entropyxyz/entropy-core/pull/1004)) ### Changed - Fix TSS `AccountId` keys in chainspec ([#993](https://github.com/entropyxyz/entropy-core/pull/993)) diff --git a/crates/client/Cargo.toml b/crates/client/Cargo.toml index 8f2755ca4..62fea585e 100644 --- a/crates/client/Cargo.toml +++ b/crates/client/Cargo.toml @@ -33,7 +33,7 @@ anyhow ="1.0.86" # Only for the browser js-sys={ version="0.3.70", optional=true } -tokio ="1.39" +tokio ={ version="1.39", features=["time"] } [dev-dependencies] serial_test ="3.1.1" diff --git a/crates/client/src/client.rs b/crates/client/src/client.rs index 4fc87744c..c0559d8b7 100644 --- a/crates/client/src/client.rs +++ b/crates/client/src/client.rs @@ -360,3 +360,43 @@ pub async fn change_threshold_accounts( .ok_or(anyhow!("Error with transaction"))?; Ok(result_event) } + +/// Trigger a network wide distributed key generation (DKG) event. +/// +/// Fails if the network has already been jumpstarted. +pub async fn jumpstart_network( + api: &OnlineClient, + rpc: &LegacyRpcMethods, + signer: sr25519::Pair, +) -> Result<(), ClientError> { + // We split the implementation out into an inner function so that we can more easily pass a + // single future to the `timeout` + tokio::time::timeout(std::time::Duration::from_secs(45), jumpstart_inner(api, rpc, signer)) + .await + .map_err(|_| ClientError::JumpstartTimeout)? +} + +async fn jumpstart_inner( + api: &OnlineClient, + rpc: &LegacyRpcMethods, + signer: sr25519::Pair, +) -> Result<(), ClientError> { + // In this case we don't care too much about the result because we're more interested in the + // `FinishedNetworkJumpStart` event, which happens later on. + let jump_start_request = entropy::tx().registry().jump_start_network(); + let _result = + submit_transaction_with_pair(api, rpc, &signer, &jump_start_request, None).await?; + + let mut blocks_sub = api.blocks().subscribe_finalized().await?; + + while let Some(block) = blocks_sub.next().await { + let block = block?; + let events = block.events().await?; + + if events.has::()? { + break; + } + } + + Ok(()) +} diff --git a/crates/client/src/errors.rs b/crates/client/src/errors.rs index 86e1000e9..012bf2d87 100644 --- a/crates/client/src/errors.rs +++ b/crates/client/src/errors.rs @@ -66,6 +66,8 @@ pub enum ClientError { Subxt(#[from] subxt::Error), #[error("Timed out waiting for register confirmation")] RegistrationTimeout, + #[error("Timed out waiting for jumpstart confirmation")] + JumpstartTimeout, #[error("Cannot get subgroup: {0}")] SubgroupGet(#[from] SubgroupGetError), #[error("JSON: {0}")] diff --git a/crates/test-cli/src/lib.rs b/crates/test-cli/src/lib.rs index 2f36ee1de..6af8292da 100644 --- a/crates/test-cli/src/lib.rs +++ b/crates/test-cli/src/lib.rs @@ -26,7 +26,7 @@ use entropy_client::{ }, client::{ change_endpoint, change_threshold_accounts, get_accounts, get_api, get_programs, get_rpc, - register, sign, store_program, update_programs, VERIFYING_KEY_LENGTH, + jumpstart_network, register, sign, store_program, update_programs, VERIFYING_KEY_LENGTH, }, }; use sp_core::{sr25519, Hasher, Pair}; @@ -76,7 +76,7 @@ enum CliCommand { /// A name or mnemonic from which to derive a program modification keypair. /// This is used to send the register extrinsic so it must be funded /// If giving a name it must be preceded with "//", eg: "--mnemonic-option //Alice" - /// If giving a mnemonic it must be enclosed in quotes, eg: "--mnemonic-option "alarm mutual concert..."" + /// If giving a mnemonic it must be enclosed in quotes, eg: "--mnemonic-option "alarm mutual concert..."" #[arg(short, long)] mnemonic_option: Option, }, @@ -143,6 +143,17 @@ enum CliCommand { }, /// Display a list of registered Entropy accounts Status, + /// Triggers the network wide distributed key generation process. + /// + /// A fully jumpstarted network is required for the on-chain registration flow to work + /// correctly. + /// + /// Note: Any account may trigger the jumpstart process. + JumpstartNetwork { + /// The mnemonic for the signer which will trigger the jumpstart process. + #[arg(short, long)] + mnemonic_option: Option, + }, } pub async fn run_command( @@ -392,6 +403,20 @@ pub async fn run_command( Ok("Threshold accounts changed".to_string()) }, + CliCommand::JumpstartNetwork { mnemonic_option } => { + let mnemonic = if let Some(mnemonic_option) = mnemonic_option { + mnemonic_option + } else { + passed_mnemonic.unwrap_or("//Alice".to_string()) + }; + + let signer = ::from_string(&mnemonic, None)?; + println!("Account being used for jumpstart: {}", signer.public()); + + jumpstart_network(&api, &rpc, signer).await?; + + Ok("Succesfully jumpstarted network.".to_string()) + }, } } From 13a365f79afc41ca1262a05f9ebe7125dae80020 Mon Sep 17 00:00:00 2001 From: Hernando Castano Date: Wed, 14 Aug 2024 17:58:09 -0400 Subject: [PATCH 5/6] Signing flow with derived accounts (#990) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Store `derivation_path` in `RegisteredInfo` * Bump metadata * Split registration flow based on existence of derivation path * Tiny up a few things related to jumpstart mocking * Small grammatical tidyings * Remove extra function introduced in merge * Fix a few typos * Add a few logging events * Allow querying both new and old signers using helper * Add working test for signature request with derived account * Fix Registry test compilation * Don't ignore old signing test * Fix Registry benchmark compilation * Handle BIP-32 errors instead of panicking * Store full BIP-32 derivation path on-chain * Add `CHANGELOG` entry * Sort signers from Staking Extension pallet when signing * Revert "Sort signers from Staking Extension pallet when signing" Turns out this breaks the signing flow for some reason 🤷 --- CHANGELOG.md | 1 + Cargo.lock | 3 + crates/client/entropy_metadata.scale | Bin 207236 -> 207273 bytes crates/client/src/client.rs | 6 +- crates/client/src/user.rs | 30 +++- crates/threshold-signature-server/Cargo.toml | 3 +- .../src/helpers/signing.rs | 6 +- .../src/helpers/substrate.rs | 4 +- .../src/signing_client/errors.rs | 2 + .../signing_client/protocol_execution/mod.rs | 29 +++- .../src/signing_client/protocol_transport.rs | 2 + .../src/user/api.rs | 51 ++++-- .../src/user/errors.rs | 2 + .../src/user/tests.rs | 160 ++++++++++++++++-- .../src/validator/tests.rs | 5 +- pallets/registry/src/benchmarking.rs | 2 + pallets/registry/src/lib.rs | 11 +- pallets/registry/src/tests.rs | 3 + 18 files changed, 271 insertions(+), 49 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6b1d5542b..08e2a0299 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -23,6 +23,7 @@ At the moment this project **does not** adhere to - Reshare confirmation ([#965](https://github.com/entropyxyz/entropy-core/pull/965)) - Set inital signers ([#971](https://github.com/entropyxyz/entropy-core/pull/971)) - Add parent key threshold dynamically ([#974](https://github.com/entropyxyz/entropy-core/pull/974)) +- Signing flow with derived accounts ([#990](https://github.com/entropyxyz/entropy-core/pull/990)) - TSS attestation endpoint ([#1001](https://github.com/entropyxyz/entropy-core/pull/1001)) - Add `network-jumpstart` command to `entropy-test-cli` ([#1004](https://github.com/entropyxyz/entropy-core/pull/1004)) diff --git a/Cargo.lock b/Cargo.lock index 60ff956e1..72db042ca 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -831,6 +831,8 @@ dependencies = [ "bs58 0.5.1", "hmac 0.12.1", "k256", + "once_cell", + "pbkdf2 0.12.2", "rand_core 0.6.4", "ripemd", "sha2 0.10.8", @@ -2754,6 +2756,7 @@ dependencies = [ "backoff", "base64 0.22.1", "bincode 1.3.3", + "bip32", "bip39", "blake2 0.10.6", "blake3", diff --git a/crates/client/entropy_metadata.scale b/crates/client/entropy_metadata.scale index e4a096a429421bbd780f43e25175f76a859c1cc9..7efb92da82182b3dafca8a27f2262a392da09eb6 100644 GIT binary patch delta 1404 zcmZWpU2IfE6rOWtc6(>LyDXM1{Y|&KF~J87YNBEkFE9QjjgJ~b4B>$U9(bTygf>m&0d#hk2biz7h_w=_Q79l_f?!c=QTZE??yGE6jJZ{>_Y-VKKv=i42=;|9UOuL3u zyer?io4&sT)p*^Yfep9?Zy4172}J8JgBV~;uv-RH6@Y`gnI1ESmx0}OZZ1MM!X4_q z3)S?(68wZqB_2iYLIvD+^ujg3Rb%eo`6k+R4<_(|LB0b}Uxtvg4B@Rz71V z)2i-Z>Cf~NE(kvd5t?yfW3XL9XTgcy3EN6$a>=-MBvRa({%~R66S^e9c4%6=)HX#V z>!4@d7#`XwMODdhAqJsCf)UuC%_t2F3}n+vunvs}#&&7kL6ys=Y(wKti(cr~Sfg)D zu+tjMbg`CT-6e20Wh6GzH8*aBUV7xlM%YK8GOX6K;>vItUeTfAxYNKsh|yUCo1ve+ zG;mvczZ4BG4_Zpa%yC1!1~C*vutOK{y4Ej#@J-3|%Y+fb9c-AS#Df(v3DJF-gtD}2 zJ*KVAHw#-FimElqC*}XMPZlKCrk^Y&R^`Dq(wmrr z4}$cgKo?!|p_}&lu?5ak){ooU&aaW5UoSsjlAm8Mv*gE7=iLps>W15tuEZ$b*JYP) zqHij(Nvs<1k>0n8mMgIV9yk@7vF|S2pty^_1-G3aTs#W;xc%+sU%?VxDC0Xdoqn#w z3R)`Tv#{!%Ff`=U1EZ6PSscb2;-S&-(w>Bhj}45g_>i)4`pAO}bQWWS6OSV<8Hg4m zdsRl|k~t}(B}6Q1*=8zfPx@rEwTRG78&^R&)mEInp^`@ucD0;(dr!i&j;KM)%;adB zhx>TFoGKiz8WECH&2-4a_d}R2diVl#P^z5wz)t7ua{dMIh}=b{kN?JFvYnps^P`@y z42kquZmpj=v9Q9F2@}K3(G0qVrFP`#H=wWzDlk}uzP?zz9; zFTZoXb8as>rWPHOUxQc$2c2JmS17m&FVey)R8r3^@RRE?$nqr#m3>C!MCc@AOOi4C z2H3KsKhHv?+3^?}5dNUyHF$(8vcSr&!vR>^&3VjQ>u?WnL(cv?Uq{#ef-G#AumPWd zXIo2&+8qhXqLeC8Wy+&!6x)$BD$qmUJ%J+tG$Qa>D&B;{^!F2}qMl9g((z3leYgpS zY)%D^&QzE&r+IG^?zo|3m(WE?fmQT_z*_Vu$_+v_6AHHT%e3goDp$zk;WK7qUeJb?Y&1!=U@1`V)|i12C;=*q>#c32LcNGfkf0u8Hr3sIHP;oU-0mo z!sGt%9`lC!zk6d>cdvbePTTND9#e9*r?-rb*s%eIDP+fb-{_up#rC%=R_MSOeQw8J z>82gaXvTqeeG_|#x&6dkftaI=6HmZZy5qz~xK2rzZWp+47^ZjQS7Cnl+^5>;11EpT1FDJMbn(l!dgUI}!xK;!>4A%{mG&!{$@)~oyYXaX zXdoI+NBv>*`(pmS(8Dooc9rt;2&43{ns=E?<@_@NV>D64ufRC%RPj^#bF!L$1QVu4 z_&LuNI8|uYc)5$iRkN|4cc9Ow9C0JzzOGou=$MfjA9&9&1_q5xI2KP!$5=M=6fjNm Phxujw8%;|C{{sF46rER( diff --git a/crates/client/src/client.rs b/crates/client/src/client.rs index c0559d8b7..476672fe7 100644 --- a/crates/client/src/client.rs +++ b/crates/client/src/client.rs @@ -131,7 +131,7 @@ pub async fn sign( auxilary_data: Option>, ) -> Result { let message_hash = Hasher::keccak(&message); - let validators_info = get_signers_from_chain(api, rpc).await?; + let validators_info = get_signers_from_chain(api, rpc, false).await?; tracing::debug!("Validators info {:?}", validators_info); let block_number = rpc.chain_get_header(None).await?.ok_or(ClientError::BlockNumber)?.number; let signature_request = UserSignatureRequest { @@ -295,9 +295,9 @@ pub async fn put_register_request_on_chain( rpc: &LegacyRpcMethods, signature_request_keypair: sr25519::Pair, deployer: SubxtAccountId32, - program_instance: BoundedVec, + program_instances: BoundedVec, ) -> Result<(), ClientError> { - let registering_tx = entropy::tx().registry().register(deployer, program_instance); + let registering_tx = entropy::tx().registry().register(deployer, program_instances); submit_transaction_with_pair(api, rpc, &signature_request_keypair, ®istering_tx, None) .await?; diff --git a/crates/client/src/user.rs b/crates/client/src/user.rs index 1c8542d08..679bfc2b3 100644 --- a/crates/client/src/user.rs +++ b/crates/client/src/user.rs @@ -36,27 +36,39 @@ pub struct UserSignatureRequest { pub block_number: BlockNumber, /// Hashing algorithm to be used for signing pub hash: HashingAlgorithm, - /// The veryfying key for the signature requested + /// The verifying key for the signature requested pub signature_verifying_key: Vec, } pub async fn get_signers_from_chain( api: &OnlineClient, rpc: &LegacyRpcMethods, + with_parent_key: bool, ) -> Result, SubgroupGetError> { - let all_validators_query = entropy::storage().session().validators(); - let mut validators = query_chain(api, rpc, all_validators_query, None) - .await? - .ok_or_else(|| SubgroupGetError::ChainFetch("Get all validators error"))?; - let block_hash = rpc.chain_get_block_hash(None).await?; - let mut handles = Vec::new(); + let mut validators = if with_parent_key { + let signer_query = entropy::storage().staking_extension().signers(); + query_chain(api, rpc, signer_query, None) + .await? + .ok_or_else(|| SubgroupGetError::ChainFetch("Get all validators error"))? + } else { + let all_validators_query = entropy::storage().session().validators(); + let mut validators = query_chain(api, rpc, all_validators_query, None) + .await? + .ok_or_else(|| SubgroupGetError::ChainFetch("Get all validators error"))?; + + validators.sort(); + validators + }; // TODO #898 For now we use a fix proportion of the number of validators as the threshold let threshold = (validators.len() as f32 * 0.75) as usize; + // TODO #899 For now we just take the first t validators as the ones to perform signing - validators.sort(); validators.truncate(threshold); + let block_hash = rpc.chain_get_block_hash(None).await?; + let mut handles = Vec::new(); + for validator in validators { let handle: tokio::task::JoinHandle> = tokio::task::spawn({ @@ -77,8 +89,10 @@ pub async fn get_signers_from_chain( }) } }); + handles.push(handle); } + let mut all_signers: Vec = vec![]; for handle in handles { all_signers.push(handle.await??); diff --git a/crates/threshold-signature-server/Cargo.toml b/crates/threshold-signature-server/Cargo.toml index 104d844f5..acb9ae0c7 100644 --- a/crates/threshold-signature-server/Cargo.toml +++ b/crates/threshold-signature-server/Cargo.toml @@ -57,8 +57,9 @@ uuid ={ version="1.10.0", features=["v4"] } # Misc tokio-tungstenite="0.23.1" -bip39 ={ version="2.0.0", features=["zeroize"] } bincode ="1.3.3" +bip32 ={ version="0.5.2" } +bip39 ={ version="2.0.0", features=["zeroize"] } bytes ={ version="1.7", default-features=false, features=["serde"] } base64 ="0.22.1" clap ={ version="4.5.15", features=["derive"] } diff --git a/crates/threshold-signature-server/src/helpers/signing.rs b/crates/threshold-signature-server/src/helpers/signing.rs index 29daf9ac5..ba8856d5c 100644 --- a/crates/threshold-signature-server/src/helpers/signing.rs +++ b/crates/threshold-signature-server/src/helpers/signing.rs @@ -45,6 +45,7 @@ pub async fn do_signing( app_state: &AppState, signing_session_info: SigningSessionInfo, request_limit: u32, + derivation_path: Option, ) -> Result { tracing::debug!("Preparing to perform signing"); @@ -60,8 +61,8 @@ pub async fn do_signing( let account_id = AccountId32(signer.public().0); - // set up context for signing protocol execution - let sign_context = signing_service.get_sign_context(info.clone()).await?; + // Set up context for signing protocol execution + let sign_context = signing_service.get_sign_context(info.clone(), derivation_path).await?; let tss_accounts: Vec = user_signature_request .validators_info @@ -89,6 +90,7 @@ pub async fn do_signing( &x25519_secret_key, ) .await?; + let channels = { let ready = timeout(Duration::from_secs(SETUP_TIMEOUT_SECONDS), rx_ready).await?; let broadcast_out = ready??; diff --git a/crates/threshold-signature-server/src/helpers/substrate.rs b/crates/threshold-signature-server/src/helpers/substrate.rs index 6f5c05499..0b836844b 100644 --- a/crates/threshold-signature-server/src/helpers/substrate.rs +++ b/crates/threshold-signature-server/src/helpers/substrate.rs @@ -75,10 +75,12 @@ pub async fn get_registered_details( let registered_result = query_chain(api, rpc, registered_info_query, None).await?; let registration_info = if let Some(old_registration_info) = registered_result { + tracing::debug!("Found user in old `Registered` struct."); + old_registration_info } else { // We failed with the old registration path, let's try the new one - tracing::warn!("Didn't find user in old `Registered` struct, trying new one"); + tracing::warn!("Didn't find user in old `Registered` struct, trying new one."); let registered_info_query = entropy::storage().registry().registered_on_chain(BoundedVec(verifying_key)); diff --git a/crates/threshold-signature-server/src/signing_client/errors.rs b/crates/threshold-signature-server/src/signing_client/errors.rs index 66155004d..abe5c9065 100644 --- a/crates/threshold-signature-server/src/signing_client/errors.rs +++ b/crates/threshold-signature-server/src/signing_client/errors.rs @@ -108,6 +108,8 @@ pub enum ProtocolErr { SubstrateClient(#[from] entropy_client::substrate::SubstrateError), #[error("Listener: {0}")] Listener(#[from] entropy_protocol::errors::ListenerErr), + #[error("Failed to derive BIP-32 account: {0}")] + Bip32DerivationError(#[from] bip32::Error), } impl IntoResponse for ProtocolErr { diff --git a/crates/threshold-signature-server/src/signing_client/protocol_execution/mod.rs b/crates/threshold-signature-server/src/signing_client/protocol_execution/mod.rs index abc0bec69..780a21134 100644 --- a/crates/threshold-signature-server/src/signing_client/protocol_execution/mod.rs +++ b/crates/threshold-signature-server/src/signing_client/protocol_execution/mod.rs @@ -63,18 +63,34 @@ impl<'a> ThresholdSigningService<'a> { fields(sign_init), level = tracing::Level::DEBUG )] - pub async fn get_sign_context(&self, sign_init: SignInit) -> Result { + pub async fn get_sign_context( + &self, + sign_init: SignInit, + derivation_path: Option, + ) -> Result { tracing::debug!("Getting signing context"); - let key_share_and_aux_info_vec = self - .kv_manager - .kv() - .get(&hex::encode(sign_init.signing_session_info.signature_verifying_key.clone())) - .await?; + + let verifying_key = if derivation_path.is_some() { + entropy_shared::NETWORK_PARENT_KEY.as_bytes().to_vec() + } else { + sign_init.signing_session_info.signature_verifying_key.clone() + }; + + let key_share_and_aux_info_vec = + self.kv_manager.kv().get(&hex::encode(verifying_key)).await?; + let (key_share, aux_info): ( ThresholdKeyShare, AuxInfo, ) = entropy_kvdb::kv_manager::helpers::deserialize(&key_share_and_aux_info_vec) .ok_or_else(|| ProtocolErr::Deserialization("Failed to load KeyShare".into()))?; + + let key_share = if let Some(path) = derivation_path { + key_share.derive_bip32(&path)? + } else { + key_share + }; + Ok(SignContext::new(sign_init, key_share, aux_info)) } @@ -92,6 +108,7 @@ impl<'a> ThresholdSigningService<'a> { threshold_signer: &sr25519::Pair, threshold_accounts: Vec, ) -> Result { + tracing::debug!("Executing signing session"); tracing::trace!("Signing info {session_id:?}"); let message_hash = if let SessionId::Sign(session_info) = &session_id { diff --git a/crates/threshold-signature-server/src/signing_client/protocol_transport.rs b/crates/threshold-signature-server/src/signing_client/protocol_transport.rs index a6a047db5..635d8814d 100644 --- a/crates/threshold-signature-server/src/signing_client/protocol_transport.rs +++ b/crates/threshold-signature-server/src/signing_client/protocol_transport.rs @@ -50,6 +50,8 @@ pub async fn open_protocol_connections( state: &ListenerState, x25519_secret_key: &x25519_dalek::StaticSecret, ) -> Result<(), ProtocolErr> { + tracing::debug!("Opening protocol connections"); + let connect_to_validators = validators_info .iter() .filter(|validators_info| { diff --git a/crates/threshold-signature-server/src/user/api.rs b/crates/threshold-signature-server/src/user/api.rs index 6e5b4bda7..9fb374ce8 100644 --- a/crates/threshold-signature-server/src/user/api.rs +++ b/crates/threshold-signature-server/src/user/api.rs @@ -145,6 +145,7 @@ pub async fn sign_tx( .number; check_stale(user_sig_req.block_number, block_number).await?; + // Probably impossible but block signing from parent key anyways if string_verifying_key == hex::encode(NETWORK_PARENT_KEY) { return Err(UserErr::NoSigningFromParentKey); @@ -159,7 +160,9 @@ pub async fn sign_tx( if user_details.programs_data.0.is_empty() { return Err(UserErr::NoProgramPointerDefined()); } - // handle aux data padding, if it is not explicit by client for ease send through None, error if incorrect length + + // Handle aux data padding, if it is not explicit by client for ease send through None, error + // if incorrect length let auxilary_data_vec; if let Some(auxilary_data) = user_sig_req.clone().auxilary_data { if auxilary_data.len() < user_details.programs_data.0.len() { @@ -170,6 +173,7 @@ pub async fn sign_tx( } else { auxilary_data_vec = vec![None; user_details.programs_data.0.len()]; } + // gets fuel from chain let max_instructions_per_programs_query = entropy::storage().parameters().max_instructions_per_programs(); @@ -186,7 +190,9 @@ pub async fn sign_tx( runtime.evaluate(&program, &signature_request, Some(&program_info.program_config), None)?; } - let signers = get_signers_from_chain(&api, &rpc).await?; + let with_parent_key = user_details.derivation_path.is_some(); + let signers = get_signers_from_chain(&api, &rpc, with_parent_key).await?; + // Use the validator info from chain as we can be sure it is in the correct order and the // details are correct user_sig_req.validators_info = signers; @@ -207,22 +213,41 @@ pub async fn sign_tx( request_author, }; - let _has_key = check_for_key(&string_verifying_key, &app_state.kv_store).await?; + // In the new registration flow we don't store the verifying key in the KVDB, so we only do this + // check if we're using the old registration flow + if user_details.derivation_path.is_none() { + let _has_key = check_for_key(&string_verifying_key, &app_state.kv_store).await?; + } + + let derivation_path = if let Some(path) = user_details.derivation_path { + let decoded_path = String::decode(&mut path.as_ref())?; + let path = bip32::DerivationPath::from_str(&decoded_path)?; + + Some(path) + } else { + None + }; let (mut response_tx, response_rx) = mpsc::channel(1); // Do the signing protocol in another task, so we can already respond tokio::spawn(async move { - let signing_protocol_output = - do_signing(&rpc, user_sig_req, &app_state, signing_session_id, request_limit) - .await - .map(|signature| { - ( - BASE64_STANDARD.encode(signature.to_rsv_bytes()), - signer.signer().sign(&signature.to_rsv_bytes()), - ) - }) - .map_err(|error| error.to_string()); + let signing_protocol_output = do_signing( + &rpc, + user_sig_req, + &app_state, + signing_session_id, + request_limit, + derivation_path, + ) + .await + .map(|signature| { + ( + BASE64_STANDARD.encode(signature.to_rsv_bytes()), + signer.signer().sign(&signature.to_rsv_bytes()), + ) + }) + .map_err(|error| error.to_string()); // This response chunk is sent later with the result of the signing protocol if response_tx.try_send(serde_json::to_string(&signing_protocol_output)).is_err() { diff --git a/crates/threshold-signature-server/src/user/errors.rs b/crates/threshold-signature-server/src/user/errors.rs index e6eeb8203..61c43fe24 100644 --- a/crates/threshold-signature-server/src/user/errors.rs +++ b/crates/threshold-signature-server/src/user/errors.rs @@ -167,6 +167,8 @@ pub enum UserErr { SubgroupGet(#[from] entropy_client::user::SubgroupGetError), #[error("Unknown hashing algorthim - user is using a newer version than us")] UnknownHashingAlgorithm, + #[error("Failed to derive BIP-32 account: {0}")] + Bip32DerivationError(#[from] bip32::Error), } impl From for UserErr { diff --git a/crates/threshold-signature-server/src/user/tests.rs b/crates/threshold-signature-server/src/user/tests.rs index 731a0558b..f564932b6 100644 --- a/crates/threshold-signature-server/src/user/tests.rs +++ b/crates/threshold-signature-server/src/user/tests.rs @@ -192,8 +192,10 @@ async fn test_sign_tx_no_chain() { request_author: signature_request_account.clone(), }); + let with_parent_key = false; let (validators_info, mut generic_msg, validator_ips_and_keys) = - get_sign_tx_data(&entropy_api, &rpc, hex::encode(PREIMAGE_SHOULD_SUCCEED)).await; + get_sign_tx_data(&entropy_api, &rpc, hex::encode(PREIMAGE_SHOULD_SUCCEED), with_parent_key) + .await; generic_msg.block_number = rpc.chain_get_header(None).await.unwrap().unwrap().number; // test points to no program @@ -362,6 +364,134 @@ async fn test_sign_tx_no_chain() { clean_tests(); } +#[tokio::test] +#[serial] +async fn signature_request_with_derived_account_works() { + initialize_test_logger().await; + clean_tests(); + + let alice = AccountKeyring::Alice; + let bob = AccountKeyring::Bob; + let charlie = AccountKeyring::Charlie; + + let add_parent_key_to_kvdb = true; + let (_validator_ips, _validator_ids) = spawn_testing_validators(add_parent_key_to_kvdb).await; + + // Here we need to use `--chain=integration-tests` and force authoring otherwise we won't be + // able to get our chain in the right state to be jump started. + let force_authoring = true; + let substrate_context = test_node_process_testing_state(force_authoring).await; + let entropy_api = get_api(&substrate_context.ws_url).await.unwrap(); + let rpc = get_rpc(&substrate_context.ws_url).await.unwrap(); + + // We first need to jump start the network and grab the resulting network wide verifying key + // for later + jump_start_network(&entropy_api, &rpc).await; + + let jump_start_progress_query = entropy::storage().staking_extension().jump_start_progress(); + let jump_start_progress = + query_chain(&entropy_api, &rpc, jump_start_progress_query, None).await.unwrap().unwrap(); + + let network_verifying_key = jump_start_progress.verifying_key.unwrap().0; + + // We need to store a program in order to be able to register succesfully + let program_hash = store_program( + &entropy_api, + &rpc, + &bob.pair(), // This is our program deployer + TEST_PROGRAM_WASM_BYTECODE.to_owned(), + vec![], + vec![], + vec![], + ) + .await + .unwrap(); + + let registration_request = put_new_register_request_on_chain( + &entropy_api, + &rpc, + &alice, // This is our signature request account + charlie.to_account_id().into(), // This is our program modification account + BoundedVec(vec![ProgramInstance { program_pointer: program_hash, program_config: vec![] }]), + ) + .await; + + assert!( + matches!(registration_request, Ok(_)), + "We expect our registration request to succeed." + ); + + let entropy::registry::events::AccountRegistered( + _actual_signature_request_account, + actual_verifying_key, + ) = registration_request.unwrap(); + + // This is slightly more convenient to work with later one + let actual_verifying_key = actual_verifying_key.0; + + // Next we want to check that the info that's on-chain is what we actually expect + let registered_info = crate::helpers::substrate::get_registered_details( + &entropy_api, + &rpc, + actual_verifying_key.to_vec(), + ) + .await; + + assert!( + matches!(registered_info, Ok(_)), + "We expect that the verifying key we got back matches registration entry in storage." + ); + + assert_eq!( + registered_info.unwrap().program_modification_account, + charlie.to_account_id().into() + ); + + // Next, let's check that the child verifying key matches + let network_verifying_key = + SynedrionVerifyingKey::try_from(network_verifying_key.as_slice()).unwrap(); + + // We hardcode the derivation path here since we know that there's only been one registration + // request (ours). + let derivation_path = "m/0/0".parse().unwrap(); + let expected_verifying_key = + network_verifying_key.derive_verifying_key_bip32(&derivation_path).unwrap(); + let expected_verifying_key = expected_verifying_key.to_encoded_point(true).as_bytes().to_vec(); + + assert_eq!( + expected_verifying_key, actual_verifying_key, + "The derived child key doesn't match our registered verifying key." + ); + + // Now that we've set up and registered a user, we can proceed with testing the signing flow + + let with_parent_key = true; + let (validators_info, mut signature_request, validator_ips_and_keys) = + get_sign_tx_data(&entropy_api, &rpc, hex::encode(PREIMAGE_SHOULD_SUCCEED), with_parent_key) + .await; + + // We'll use the actual verifying key we registered for the signature request + signature_request.signature_verifying_key = actual_verifying_key.to_vec(); + signature_request.block_number = rpc.chain_get_header(None).await.unwrap().unwrap().number; + + let signature_request_responses = submit_transaction_requests( + validator_ips_and_keys.clone(), + signature_request.clone(), + alice, + ) + .await; + + // We expect that the signature we get back is valid + let message_hash = Hasher::keccak(PREIMAGE_SHOULD_SUCCEED); + let verifying_key = + SynedrionVerifyingKey::try_from(signature_request.signature_verifying_key.as_slice()) + .unwrap(); + verify_signature(signature_request_responses, message_hash, &verifying_key, &validators_info) + .await; + + clean_tests(); +} + #[tokio::test] #[serial] async fn test_sign_tx_no_chain_fail() { @@ -376,8 +506,10 @@ async fn test_sign_tx_no_chain_fail() { let rpc = get_rpc(&substrate_context.node_proc.ws_url).await.unwrap(); let mock_client = reqwest::Client::new(); + let with_parent_key = false; let (validators_info, mut generic_msg, validator_ips_and_keys) = - get_sign_tx_data(&entropy_api, &rpc, hex::encode(PREIMAGE_SHOULD_SUCCEED)).await; + get_sign_tx_data(&entropy_api, &rpc, hex::encode(PREIMAGE_SHOULD_SUCCEED), with_parent_key) + .await; // fails verification tests // wrong key for wrong validator @@ -515,8 +647,10 @@ async fn test_program_with_config() { let message = "0xef01808094772b9a9e8aa1c9db861c6611a82d251db4fac990019243726561746564204f6e20456e74726f7079018080"; let message_hash = Hasher::keccak(message.as_bytes()); + + let with_parent_key = false; let (validators_info, mut generic_msg, validator_ips_and_keys) = - get_sign_tx_data(&entropy_api, &rpc, hex::encode(message)).await; + get_sign_tx_data(&entropy_api, &rpc, hex::encode(message), with_parent_key).await; let config = r#" { @@ -819,13 +953,13 @@ pub async fn put_register_request_on_chain( rpc: &LegacyRpcMethods, sig_req_keyring: &Sr25519Keyring, program_modification_account: subxtAccountId32, - program_instance: BoundedVec, + program_instances: BoundedVec, ) { let sig_req_account = PairSigner::::new(sig_req_keyring.pair()); let registering_tx = - entropy::tx().registry().register(program_modification_account, program_instance); + entropy::tx().registry().register(program_modification_account, program_instances); submit_transaction(api, rpc, &sig_req_account, ®istering_tx, None).await.unwrap(); } @@ -835,14 +969,14 @@ pub async fn put_new_register_request_on_chain( rpc: &LegacyRpcMethods, signature_request_account: &Sr25519Keyring, program_modification_account: subxtAccountId32, - program_instance: BoundedVec, + program_instances: BoundedVec, ) -> Result { let signature_request_account = PairSigner::::new(signature_request_account.pair()); let registering_tx = - entropy::tx().registry().register_on_chain(program_modification_account, program_instance); + entropy::tx().registry().register_on_chain(program_modification_account, program_instances); let events = submit_transaction(api, rpc, &signature_request_account, ®istering_tx, None).await?; @@ -1097,7 +1231,9 @@ async fn test_device_key_proxy() { .await .unwrap(); - let validators_info = get_signers_from_chain(&entropy_api, &rpc).await.unwrap(); + let with_parent_key = false; + let validators_info = + get_signers_from_chain(&entropy_api, &rpc, with_parent_key).await.unwrap(); let context = signing_context(b""); let sr25519_signature: Sr25519Signature = keypair.sign(context.bytes(PREIMAGE_SHOULD_SUCCEED)); @@ -1337,8 +1473,8 @@ async fn test_new_registration_flow() { let add_parent_key_to_kvdb = true; let (_validator_ips, _validator_ids) = spawn_testing_validators(add_parent_key_to_kvdb).await; - // Here we need to use `--chain=integration-tests` force authoring otherwise we won't be able - // to get our chain in the right state to be jump started. + // Here we need to use `--chain=integration-tests` and force authoring otherwise we won't be + // able to get our chain in the right state to be jump started. let force_authoring = true; let substrate_context = test_node_process_testing_state(force_authoring).await; let entropy_api = get_api(&substrate_context.ws_url).await.unwrap(); @@ -1580,8 +1716,10 @@ pub async fn get_sign_tx_data( api: &OnlineClient, rpc: &LegacyRpcMethods, message: String, + with_parent_key: bool, ) -> (Vec, UserSignatureRequest, Vec<(String, [u8; 32])>) { - let validators_info = get_signers_from_chain(api, rpc).await.unwrap(); + let validators_info = get_signers_from_chain(api, rpc, with_parent_key).await.unwrap(); + let generic_msg = UserSignatureRequest { message, auxilary_data: Some(vec![ diff --git a/crates/threshold-signature-server/src/validator/tests.rs b/crates/threshold-signature-server/src/validator/tests.rs index abfb2a030..187c46206 100644 --- a/crates/threshold-signature-server/src/validator/tests.rs +++ b/crates/threshold-signature-server/src/validator/tests.rs @@ -54,7 +54,10 @@ async fn test_reshare() { let alice = AccountKeyring::AliceStash; let cxt = test_node_process_testing_state(true).await; - let (_validator_ips, _validator_ids) = spawn_testing_validators(true).await; + + let add_parent_key_to_kvdb = true; + let (_validator_ips, _validator_ids) = spawn_testing_validators(add_parent_key_to_kvdb).await; + let validator_ports = vec![3001, 3002, 3003]; let api = get_api(&cxt.ws_url).await.unwrap(); let rpc = get_rpc(&cxt.ws_url).await.unwrap(); diff --git a/pallets/registry/src/benchmarking.rs b/pallets/registry/src/benchmarking.rs index f7312229f..9aa057a84 100644 --- a/pallets/registry/src/benchmarking.rs +++ b/pallets/registry/src/benchmarking.rs @@ -311,6 +311,7 @@ benchmarks! { RegisteredInfo { program_modification_account: sig_req_account.clone(), programs_data: programs_info, + derivation_path: None, version_number: T::KeyVersionNumber::get() }, ); @@ -345,6 +346,7 @@ benchmarks! { RegisteredInfo { program_modification_account: sig_req_account.clone(), programs_data: programs_info, + derivation_path: None, version_number: T::KeyVersionNumber::get() }, ); diff --git a/pallets/registry/src/lib.rs b/pallets/registry/src/lib.rs index 31640dd95..35c7b5075 100644 --- a/pallets/registry/src/lib.rs +++ b/pallets/registry/src/lib.rs @@ -119,6 +119,8 @@ pub mod pallet { pub struct RegisteredInfo { pub programs_data: BoundedVec, T::MaxProgramHashes>, pub program_modification_account: T::AccountId, + /// The SCALE encoded BIP-32 `DerivationPath` used to register this account. + pub derivation_path: Option>, pub version_number: u8, } @@ -140,6 +142,7 @@ pub mod pallet { RegisteredInfo { programs_data: BoundedVec::default(), program_modification_account: account_id.clone(), + derivation_path: None, version_number: T::KeyVersionNumber::get(), }, ); @@ -656,6 +659,7 @@ pub mod pallet { RegisteredInfo { programs_data: registering_info.programs_data, program_modification_account: registering_info.program_modification_account, + derivation_path: None, version_number: registering_info.version_number, }, ); @@ -751,9 +755,9 @@ pub mod pallet { // For a V1 of this flow it's fine, but we'll need to think about a better solution // down the line. let count = RegisteredOnChain::::count(); - let path = - bip32::DerivationPath::from_str(&scale_info::prelude::format!("m/0/{}", count)) - .map_err(|_| Error::::InvalidBip32DerivationPath)?; + let inner_path = scale_info::prelude::format!("m/0/{}", count); + let path = bip32::DerivationPath::from_str(&inner_path) + .map_err(|_| Error::::InvalidBip32DerivationPath)?; let child_verifying_key = network_verifying_key .derive_verifying_key_bip32(&path) .map_err(|_| Error::::Bip32AccountDerivationFailed)?; @@ -768,6 +772,7 @@ pub mod pallet { RegisteredInfo { programs_data, program_modification_account: program_modification_account.clone(), + derivation_path: Some(inner_path.encode()), version_number: T::KeyVersionNumber::get(), }, ); diff --git a/pallets/registry/src/tests.rs b/pallets/registry/src/tests.rs index 6313cfc3d..1483ca685 100644 --- a/pallets/registry/src/tests.rs +++ b/pallets/registry/src/tests.rs @@ -513,6 +513,7 @@ fn it_confirms_registers_a_user() { RegisteredInfo { programs_data: programs_info.clone(), program_modification_account: 2, + derivation_path: None, version_number: 1, } ); @@ -572,6 +573,7 @@ fn it_changes_a_program_pointer() { let mut registered_info = RegisteredInfo { programs_data: programs_info, program_modification_account: 2, + derivation_path: None, version_number: 1, }; @@ -652,6 +654,7 @@ fn it_changes_a_program_mod_account() { let mut registered_info = RegisteredInfo { programs_data: programs_info, program_modification_account: 2, + derivation_path: None, version_number: 1, }; From aa4c3d90137af3583e3de3f3ac95a50c68d21ef6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 15 Aug 2024 08:07:14 +0000 Subject: [PATCH 6/6] Bump serde_json from 1.0.124 to 1.0.125 in the patch-dependencies group (#1007) Bumps the patch-dependencies group with 1 update: [serde_json](https://github.com/serde-rs/json). Updates `serde_json` from 1.0.124 to 1.0.125 - [Release notes](https://github.com/serde-rs/json/releases) - [Commits](https://github.com/serde-rs/json/compare/v1.0.124...1.0.125) --- updated-dependencies: - dependency-name: serde_json dependency-type: direct:production update-type: version-update:semver-patch dependency-group: patch-dependencies ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- node/cli/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 72db042ca..df486da62 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -11461,9 +11461,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.124" +version = "1.0.125" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66ad62847a56b3dba58cc891acd13884b9c61138d330c0d7b6181713d4fce38d" +checksum = "83c8e735a073ccf5be70aa8066aa984eaf2fa000db6c8d0100ae605b366d31ed" dependencies = [ "itoa", "memchr", diff --git a/node/cli/Cargo.toml b/node/cli/Cargo.toml index e0a7a9045..9eddcab47 100644 --- a/node/cli/Cargo.toml +++ b/node/cli/Cargo.toml @@ -30,7 +30,7 @@ log ="0.4.22" pallet-im-online={ version="28.0.0" } rand ="0.8.5" serde ={ version="1.0.207", features=["derive"] } -serde_json ='1.0.124' +serde_json ='1.0.125' # Substrate Client