From a34cc8dff09dc3840a304befc2415343244b5fd0 Mon Sep 17 00:00:00 2001 From: Alexandru Gheorghe <49718502+alexggh@users.noreply.github.com> Date: Thu, 12 Sep 2024 12:37:07 +0300 Subject: [PATCH 01/12] [4 / 5] Make approval-voting runnable on a worker thread (#4846) This is part of the work to further optimize the approval subsystems, if you want to understand the full context start with reading https://github.com/paritytech/polkadot-sdk/pull/4849#issue-2364261568, # Description This PR contain changes to make possible the run of single approval-voting instance on a worker thread, so that it can be instantiated by the approval-voting-parallel subsystem. This does not contain any functional changes it just decouples the subsystem from the subsystem Context and introduces more specific trait dependencies for each function instead of all of them requiring a context. This change can be merged independent of the followup PRs. --------- Signed-off-by: Alexandru Gheorghe Co-authored-by: Andrei Sandu <54316454+sandreim@users.noreply.github.com> --- polkadot/cli/src/command.rs | 8 +- polkadot/node/core/approval-voting/Cargo.toml | 1 + .../node/core/approval-voting/src/import.rs | 114 ++--- polkadot/node/core/approval-voting/src/lib.rs | 392 ++++++++++++------ .../node/core/approval-voting/src/tests.rs | 25 +- polkadot/node/service/src/lib.rs | 13 +- polkadot/node/service/src/overseer.rs | 1 + .../subsystem-bench/src/lib/approval/mod.rs | 3 +- prdoc/pr_4846.prdoc | 13 + 9 files changed, 382 insertions(+), 188 deletions(-) create mode 100644 prdoc/pr_4846.prdoc diff --git a/polkadot/cli/src/command.rs b/polkadot/cli/src/command.rs index 168a645430ed..2947867c516e 100644 --- a/polkadot/cli/src/command.rs +++ b/polkadot/cli/src/command.rs @@ -373,16 +373,16 @@ pub fn run() -> Result<()> { Ok(runner.async_run(|mut config| { let (client, backend, _, task_manager) = polkadot_service::new_chain_ops(&mut config, None)?; + let task_handle = task_manager.spawn_handle(); let aux_revert = Box::new(|client, backend, blocks| { - polkadot_service::revert_backend(client, backend, blocks, config).map_err( - |err| { + polkadot_service::revert_backend(client, backend, blocks, config, task_handle) + .map_err(|err| { match err { polkadot_service::Error::Blockchain(err) => err.into(), // Generic application-specific error. err => sc_cli::Error::Application(err.into()), } - }, - ) + }) }); Ok(( cmd.run(client, backend, Some(aux_revert)).map_err(Error::SubstrateCli), diff --git a/polkadot/node/core/approval-voting/Cargo.toml b/polkadot/node/core/approval-voting/Cargo.toml index bc0187bf4922..e678118440f5 100644 --- a/polkadot/node/core/approval-voting/Cargo.toml +++ b/polkadot/node/core/approval-voting/Cargo.toml @@ -22,6 +22,7 @@ kvdb = { workspace = true } derive_more = { workspace = true, default-features = true } thiserror = { workspace = true } itertools = { workspace = true } +async-trait = { workspace = true } polkadot-node-subsystem = { workspace = true, default-features = true } polkadot-node-subsystem-util = { workspace = true, default-features = true } diff --git a/polkadot/node/core/approval-voting/src/import.rs b/polkadot/node/core/approval-voting/src/import.rs index b163d718eb25..bf6ea0c98149 100644 --- a/polkadot/node/core/approval-voting/src/import.rs +++ b/polkadot/node/core/approval-voting/src/import.rs @@ -44,6 +44,7 @@ use polkadot_node_subsystem::{ overseer, RuntimeApiError, SubsystemError, SubsystemResult, }; use polkadot_node_subsystem_util::{determine_new_blocks, runtime::RuntimeInfo}; +use polkadot_overseer::SubsystemSender; use polkadot_primitives::{ node_features, BlockNumber, CandidateEvent, CandidateHash, CandidateReceipt, ConsensusLog, CoreIndex, GroupIndex, Hash, Header, SessionIndex, @@ -111,8 +112,8 @@ enum ImportedBlockInfoError { /// Computes information about the imported block. Returns an error if the info couldn't be /// extracted. #[overseer::contextbounds(ApprovalVoting, prefix = self::overseer)] -async fn imported_block_info( - ctx: &mut Context, +async fn imported_block_info>( + sender: &mut Sender, env: ImportedBlockInfoEnv<'_>, block_hash: Hash, block_header: &Header, @@ -124,11 +125,12 @@ async fn imported_block_info( // fetch candidates let included_candidates: Vec<_> = { let (c_tx, c_rx) = oneshot::channel(); - ctx.send_message(RuntimeApiMessage::Request( - block_hash, - RuntimeApiRequest::CandidateEvents(c_tx), - )) - .await; + sender + .send_message(RuntimeApiMessage::Request( + block_hash, + RuntimeApiRequest::CandidateEvents(c_tx), + )) + .await; let events: Vec = match c_rx.await { Ok(Ok(events)) => events, @@ -151,11 +153,12 @@ async fn imported_block_info( // short, that shouldn't happen. let session_index = { let (s_tx, s_rx) = oneshot::channel(); - ctx.send_message(RuntimeApiMessage::Request( - block_header.parent_hash, - RuntimeApiRequest::SessionIndexForChild(s_tx), - )) - .await; + sender + .send_message(RuntimeApiMessage::Request( + block_header.parent_hash, + RuntimeApiRequest::SessionIndexForChild(s_tx), + )) + .await; let session_index = match s_rx.await { Ok(Ok(s)) => s, @@ -201,11 +204,12 @@ async fn imported_block_info( // by one block. This gives us the opposite invariant for sessions - the parent block's // post-state gives us the canonical information about the session index for any of its // children, regardless of which slot number they might be produced at. - ctx.send_message(RuntimeApiMessage::Request( - block_hash, - RuntimeApiRequest::CurrentBabeEpoch(s_tx), - )) - .await; + sender + .send_message(RuntimeApiMessage::Request( + block_hash, + RuntimeApiRequest::CurrentBabeEpoch(s_tx), + )) + .await; match s_rx.await { Ok(Ok(s)) => s, @@ -216,7 +220,7 @@ async fn imported_block_info( }; let extended_session_info = - get_extended_session_info(env.runtime_info, ctx.sender(), block_hash, session_index).await; + get_extended_session_info(env.runtime_info, sender, block_hash, session_index).await; let enable_v2_assignments = extended_session_info.map_or(false, |extended_session_info| { *extended_session_info .node_features @@ -225,7 +229,7 @@ async fn imported_block_info( .unwrap_or(&false) }); - let session_info = get_session_info(env.runtime_info, ctx.sender(), block_hash, session_index) + let session_info = get_session_info(env.runtime_info, sender, block_hash, session_index) .await .ok_or(ImportedBlockInfoError::SessionInfoUnavailable)?; @@ -329,9 +333,15 @@ pub struct BlockImportedCandidates { /// * and return information about all candidates imported under each block. /// /// It is the responsibility of the caller to schedule wakeups for each block. -#[overseer::contextbounds(ApprovalVoting, prefix = self::overseer)] -pub(crate) async fn handle_new_head( - ctx: &mut Context, +pub(crate) async fn handle_new_head< + Sender: SubsystemSender + + SubsystemSender + + SubsystemSender, + AVSender: SubsystemSender, + B: Backend, +>( + sender: &mut Sender, + approval_voting_sender: &mut AVSender, state: &State, db: &mut OverlayedBackend<'_, B>, session_info_provider: &mut RuntimeInfo, @@ -349,7 +359,7 @@ pub(crate) async fn handle_new_head( let header = { let (h_tx, h_rx) = oneshot::channel(); - ctx.send_message(ChainApiMessage::BlockHeader(head, h_tx)).await; + sender.send_message(ChainApiMessage::BlockHeader(head, h_tx)).await; match h_rx.await? { Err(e) => { gum::debug!( @@ -375,7 +385,7 @@ pub(crate) async fn handle_new_head( let lower_bound_number = finalized_number.unwrap_or(lower_bound_number).max(lower_bound_number); let new_blocks = determine_new_blocks( - ctx.sender(), + sender, |h| db.load_block_entry(h).map(|e| e.is_some()), head, &header, @@ -401,12 +411,15 @@ pub(crate) async fn handle_new_head( keystore: &state.keystore, }; - match imported_block_info(ctx, env, block_hash, &block_header, finalized_number).await { + match imported_block_info(sender, env, block_hash, &block_header, finalized_number) + .await + { Ok(i) => imported_blocks_and_info.push((block_hash, block_header, i)), Err(error) => { // It's possible that we've lost a race with finality. let (tx, rx) = oneshot::channel(); - ctx.send_message(ChainApiMessage::FinalizedBlockHash(block_header.number, tx)) + sender + .send_message(ChainApiMessage::FinalizedBlockHash(block_header.number, tx)) .await; let lost_to_finality = match rx.await { @@ -450,17 +463,11 @@ pub(crate) async fn handle_new_head( force_approve, } = imported_block_info; - let session_info = match get_session_info( - session_info_provider, - ctx.sender(), - head, - session_index, - ) - .await - { - Some(session_info) => session_info, - None => return Ok(Vec::new()), - }; + let session_info = + match get_session_info(session_info_provider, sender, head, session_index).await { + Some(session_info) => session_info, + None => return Ok(Vec::new()), + }; let (block_tick, no_show_duration) = { let block_tick = slot_number_to_tick(state.slot_duration_millis, slot); @@ -510,7 +517,7 @@ pub(crate) async fn handle_new_head( }; // If all bits are already set, then send an approve message. if approved_bitfield.count_ones() == approved_bitfield.len() { - ctx.send_message(ChainSelectionMessage::Approved(block_hash)).await; + sender.send_message(ChainSelectionMessage::Approved(block_hash)).await; } let block_entry = v3::BlockEntry { @@ -567,7 +574,7 @@ pub(crate) async fn handle_new_head( // Notify chain-selection of all approved hashes. for hash in approved_hashes { - ctx.send_message(ChainSelectionMessage::Approved(hash)).await; + sender.send_message(ChainSelectionMessage::Approved(hash)).await; } } @@ -603,7 +610,8 @@ pub(crate) async fn handle_new_head( "Informing distribution of newly imported chain", ); - ctx.send_unbounded_message(ApprovalDistributionMessage::NewBlocks(approval_meta)); + approval_voting_sender + .send_unbounded_message(ApprovalDistributionMessage::NewBlocks(approval_meta)); Ok(imported_candidates) } @@ -620,7 +628,10 @@ pub(crate) mod tests { approval::v1::{VrfSignature, VrfTranscript}, DISPUTE_WINDOW, }; - use polkadot_node_subsystem::messages::{AllMessages, ApprovalVotingMessage}; + use polkadot_node_subsystem::{ + messages::{AllMessages, ApprovalVotingMessage}, + SubsystemContext, + }; use polkadot_node_subsystem_test_helpers::make_subsystem_context; use polkadot_node_subsystem_util::database::Database; use polkadot_primitives::{ @@ -662,7 +673,7 @@ pub(crate) mod tests { State { keystore: Arc::new(LocalKeystore::in_memory()), slot_duration_millis: 6_000, - clock: Box::new(MockClock::default()), + clock: Arc::new(MockClock::default()), assignment_criteria: Box::new(MockAssignmentCriteria::default()), spans: HashMap::new(), per_block_assignments_gathering_times: LruMap::new(ByLength::new( @@ -806,8 +817,9 @@ pub(crate) mod tests { keystore: &LocalKeystore::in_memory(), }; - let info = - imported_block_info(&mut ctx, env, hash, &header, &Some(4)).await.unwrap(); + let info = imported_block_info(ctx.sender(), env, hash, &header, &Some(4)) + .await + .unwrap(); assert_eq!(info.included_candidates, included_candidates); assert_eq!(info.session_index, session); @@ -953,7 +965,7 @@ pub(crate) mod tests { keystore: &LocalKeystore::in_memory(), }; - let info = imported_block_info(&mut ctx, env, hash, &header, &Some(4)).await; + let info = imported_block_info(ctx.sender(), env, hash, &header, &Some(4)).await; assert_matches!(info, Err(ImportedBlockInfoError::VrfInfoUnavailable)); }) @@ -1092,7 +1104,7 @@ pub(crate) mod tests { keystore: &LocalKeystore::in_memory(), }; - let info = imported_block_info(&mut ctx, env, hash, &header, &Some(6)).await; + let info = imported_block_info(ctx.sender(), env, hash, &header, &Some(6)).await; assert_matches!(info, Err(ImportedBlockInfoError::BlockAlreadyFinalized)); }) @@ -1128,7 +1140,8 @@ pub(crate) mod tests { #[test] fn imported_block_info_extracts_force_approve() { let pool = TaskExecutor::new(); - let (mut ctx, mut handle) = make_subsystem_context(pool.clone()); + let (mut ctx, mut handle) = + make_subsystem_context::(pool.clone()); let session = 5; let session_info = dummy_session_info(session); @@ -1191,7 +1204,7 @@ pub(crate) mod tests { }; let info = - imported_block_info(&mut ctx, env, hash, &header, &Some(4)).await.unwrap(); + imported_block_info(ctx.sender(), env, hash, &header, &Some(4)).await.unwrap(); assert_eq!(info.included_candidates, included_candidates); assert_eq!(info.session_index, session); @@ -1384,8 +1397,11 @@ pub(crate) mod tests { let test_fut = { Box::pin(async move { let mut overlay_db = OverlayedBackend::new(&db); + + let mut approval_voting_sender = ctx.sender().clone(); let result = handle_new_head( - &mut ctx, + ctx.sender(), + &mut approval_voting_sender, &state, &mut overlay_db, &mut session_info_provider, diff --git a/polkadot/node/core/approval-voting/src/lib.rs b/polkadot/node/core/approval-voting/src/lib.rs index 942922cba6df..2149ce81fa80 100644 --- a/polkadot/node/core/approval-voting/src/lib.rs +++ b/polkadot/node/core/approval-voting/src/lib.rs @@ -165,7 +165,8 @@ pub struct ApprovalVotingSubsystem { db: Arc, mode: Mode, metrics: Metrics, - clock: Box, + clock: Arc, + spawner: Arc, } #[derive(Clone)] @@ -484,6 +485,7 @@ impl ApprovalVotingSubsystem { keystore: Arc, sync_oracle: Box, metrics: Metrics, + spawner: Arc, ) -> Self { ApprovalVotingSubsystem::with_config_and_clock( config, @@ -491,7 +493,8 @@ impl ApprovalVotingSubsystem { keystore, sync_oracle, metrics, - Box::new(SystemClock {}), + Arc::new(SystemClock {}), + spawner, ) } @@ -502,7 +505,8 @@ impl ApprovalVotingSubsystem { keystore: Arc, sync_oracle: Box, metrics: Metrics, - clock: Box, + clock: Arc, + spawner: Arc, ) -> Self { ApprovalVotingSubsystem { keystore, @@ -512,6 +516,7 @@ impl ApprovalVotingSubsystem { mode: Mode::Syncing(sync_oracle), metrics, clock, + spawner, } } @@ -551,12 +556,21 @@ fn db_sanity_check(db: Arc, config: DatabaseConfig) -> SubsystemRe #[overseer::subsystem(ApprovalVoting, error = SubsystemError, prefix = self::overseer)] impl ApprovalVotingSubsystem { - fn start(self, ctx: Context) -> SpawnedSubsystem { + fn start(self, mut ctx: Context) -> SpawnedSubsystem { let backend = DbBackend::new(self.db.clone(), self.db_config); - let future = - run::(ctx, self, Box::new(RealAssignmentCriteria), backend) - .map_err(|e| SubsystemError::with_origin("approval-voting", e)) - .boxed(); + let to_other_subsystems = ctx.sender().clone(); + let to_approval_distr = ctx.sender().clone(); + + let future = run::( + ctx, + to_other_subsystems, + to_approval_distr, + self, + Box::new(RealAssignmentCriteria), + backend, + ) + .map_err(|e| SubsystemError::with_origin("approval-voting", e)) + .boxed(); SpawnedSubsystem { name: "approval-voting-subsystem", future } } @@ -825,7 +839,7 @@ where struct State { keystore: Arc, slot_duration_millis: u64, - clock: Box, + clock: Arc, assignment_criteria: Box, spans: HashMap, // Per block, candidate records about how long we take until we gather enough @@ -961,20 +975,20 @@ impl State { } // Returns the approval voting params from the RuntimeApi. - #[overseer::contextbounds(ApprovalVoting, prefix = self::overseer)] - async fn get_approval_voting_params_or_default( + async fn get_approval_voting_params_or_default>( &self, - ctx: &mut Context, + sender: &mut Sender, session_index: SessionIndex, block_hash: Hash, ) -> Option { let (s_tx, s_rx) = oneshot::channel(); - ctx.send_message(RuntimeApiMessage::Request( - block_hash, - RuntimeApiRequest::ApprovalVotingParams(session_index, s_tx), - )) - .await; + sender + .send_message(RuntimeApiMessage::Request( + block_hash, + RuntimeApiRequest::ApprovalVotingParams(session_index, s_tx), + )) + .await; match s_rx.await { Ok(Ok(params)) => { @@ -1143,9 +1157,36 @@ enum Action { Conclude, } +/// Trait for providing approval voting subsystem with work. +#[async_trait::async_trait] +pub trait ApprovalVotingWorkProvider { + async fn recv(&mut self) -> SubsystemResult>; +} + +#[async_trait::async_trait] +#[overseer::contextbounds(ApprovalVoting, prefix = self::overseer)] +impl ApprovalVotingWorkProvider for Context { + async fn recv(&mut self) -> SubsystemResult> { + self.recv().await + } +} + #[overseer::contextbounds(ApprovalVoting, prefix = self::overseer)] -async fn run( - mut ctx: Context, +async fn run< + B, + WorkProvider: ApprovalVotingWorkProvider, + Sender: SubsystemSender + + SubsystemSender + + SubsystemSender + + SubsystemSender + + SubsystemSender + + SubsystemSender + + Clone, + ADSender: SubsystemSender, +>( + mut work_provider: WorkProvider, + mut to_other_subsystems: Sender, + mut to_approval_distr: ADSender, mut subsystem: ApprovalVotingSubsystem, assignment_criteria: Box, mut backend: B, @@ -1169,19 +1210,11 @@ where no_show_stats: NoShowStats::default(), }; - // `None` on start-up. Gets initialized/updated on leaf update - let mut session_info_provider = RuntimeInfo::new_with_config(RuntimeInfoConfig { - keystore: None, - session_cache_lru_size: DISPUTE_WINDOW.get(), - }); - let mut wakeups = Wakeups::default(); - let mut currently_checking_set = CurrentlyCheckingSet::default(); - let mut delayed_approvals_timers = DelayedApprovalTimer::default(); - let mut approvals_cache = LruMap::new(ByLength::new(APPROVAL_CACHE_SIZE)); - let mut last_finalized_height: Option = { let (tx, rx) = oneshot::channel(); - ctx.send_message(ChainApiMessage::FinalizedBlockNumber(tx)).await; + to_other_subsystems + .send_message(ChainApiMessage::FinalizedBlockNumber(tx)) + .await; match rx.await? { Ok(number) => Some(number), Err(err) => { @@ -1191,13 +1224,24 @@ where } }; + // `None` on start-up. Gets initialized/updated on leaf update + let mut session_info_provider = RuntimeInfo::new_with_config(RuntimeInfoConfig { + keystore: None, + session_cache_lru_size: DISPUTE_WINDOW.get(), + }); + + let mut wakeups = Wakeups::default(); + let mut currently_checking_set = CurrentlyCheckingSet::default(); + let mut delayed_approvals_timers = DelayedApprovalTimer::default(); + let mut approvals_cache = LruMap::new(ByLength::new(APPROVAL_CACHE_SIZE)); + loop { let mut overlayed_db = OverlayedBackend::new(&backend); let actions = futures::select! { (_tick, woken_block, woken_candidate) = wakeups.next(&*state.clock).fuse() => { subsystem.metrics.on_wakeup(); process_wakeup( - &mut ctx, + &mut to_other_subsystems, &mut state, &mut overlayed_db, &mut session_info_provider, @@ -1207,9 +1251,11 @@ where &wakeups, ).await? } - next_msg = ctx.recv().fuse() => { + next_msg = work_provider.recv().fuse() => { let mut actions = handle_from_overseer( - &mut ctx, + &mut to_other_subsystems, + &mut to_approval_distr, + &subsystem.spawner, &mut state, &mut overlayed_db, &mut session_info_provider, @@ -1269,7 +1315,8 @@ where &mut overlayed_db, &mut session_info_provider, &state, - &mut ctx, + &mut to_other_subsystems, + &mut to_approval_distr, block_hash, validator_index, &subsystem.metrics, @@ -1291,7 +1338,9 @@ where }; if handle_actions( - &mut ctx, + &mut to_other_subsystems, + &mut to_approval_distr, + &subsystem.spawner, &mut state, &mut overlayed_db, &mut session_info_provider, @@ -1318,6 +1367,63 @@ where Ok(()) } +// Starts a worker thread that runs the approval voting subsystem. +pub async fn start_approval_worker< + WorkProvider: ApprovalVotingWorkProvider + Send + 'static, + Sender: SubsystemSender + + SubsystemSender + + SubsystemSender + + SubsystemSender + + SubsystemSender + + SubsystemSender + + Clone, + ADSender: SubsystemSender, +>( + work_provider: WorkProvider, + to_other_subsystems: Sender, + to_approval_distr: ADSender, + config: Config, + db: Arc, + keystore: Arc, + sync_oracle: Box, + metrics: Metrics, + spawner: Arc, + task_name: &'static str, + group_name: &'static str, + clock: Arc, +) -> SubsystemResult<()> { + let approval_voting = ApprovalVotingSubsystem::with_config_and_clock( + config, + db.clone(), + keystore, + sync_oracle, + metrics, + clock, + spawner, + ); + let backend = DbBackend::new(db.clone(), approval_voting.db_config); + let spawner = approval_voting.spawner.clone(); + spawner.spawn_blocking( + task_name, + Some(group_name), + Box::pin(async move { + if let Err(err) = run( + work_provider, + to_other_subsystems, + to_approval_distr, + approval_voting, + Box::new(RealAssignmentCriteria), + backend, + ) + .await + { + gum::error!(target: LOG_TARGET, ?err, "Approval voting worker stopped processing messages"); + }; + }), + ); + Ok(()) +} + // Handle actions is a function that accepts a set of instructions // and subsequently updates the underlying approvals_db in accordance // with the linear set of instructions passed in. Therefore, actions @@ -1338,8 +1444,19 @@ where // // returns `true` if any of the actions was a `Conclude` command. #[overseer::contextbounds(ApprovalVoting, prefix = self::overseer)] -async fn handle_actions( - ctx: &mut Context, +async fn handle_actions< + Sender: SubsystemSender + + SubsystemSender + + SubsystemSender + + SubsystemSender + + SubsystemSender + + SubsystemSender + + Clone, + ADSender: SubsystemSender, +>( + sender: &mut Sender, + approval_voting_sender: &mut ADSender, + spawn_handle: &Arc, state: &mut State, overlayed_db: &mut OverlayedBackend<'_, impl Backend>, session_info_provider: &mut RuntimeInfo, @@ -1371,7 +1488,8 @@ async fn handle_actions( // Note that chaining these iterators is O(n) as we must consume // the prior iterator. let next_actions: Vec = issue_approval( - ctx, + sender, + approval_voting_sender, state, overlayed_db, session_info_provider, @@ -1422,10 +1540,12 @@ async fn handle_actions( let validator_index = indirect_cert.validator; if distribute_assignment { - ctx.send_unbounded_message(ApprovalDistributionMessage::DistributeAssignment( - indirect_cert, - claimed_candidate_indices, - )); + approval_voting_sender.send_unbounded_message( + ApprovalDistributionMessage::DistributeAssignment( + indirect_cert, + claimed_candidate_indices, + ), + ); } match approvals_cache.get(&candidate_hash) { @@ -1440,7 +1560,8 @@ async fn handle_actions( actions_iter = new_actions.into_iter(); }, None => { - let ctx = &mut *ctx; + let sender = sender.clone(); + let spawn_handle = spawn_handle.clone(); currently_checking_set .insert_relay_block_hash( @@ -1449,7 +1570,8 @@ async fn handle_actions( relay_block_hash, async move { launch_approval( - ctx, + sender, + spawn_handle, metrics.clone(), session, candidate, @@ -1478,13 +1600,13 @@ async fn handle_actions( }) .with_string_tag("block-hash", format!("{:?}", block_hash)) .with_stage(jaeger::Stage::ApprovalChecking); - ctx.send_message(ChainSelectionMessage::Approved(block_hash)).await; + sender.send_message(ChainSelectionMessage::Approved(block_hash)).await; }, Action::BecomeActive => { *mode = Mode::Active; let (messages, next_actions) = distribution_messages_for_activation( - ctx, + sender, overlayed_db, state, delayed_approvals_timers, @@ -1492,7 +1614,7 @@ async fn handle_actions( ) .await?; - ctx.send_messages(messages.into_iter()).await; + approval_voting_sender.send_messages(messages.into_iter()).await; let next_actions: Vec = next_actions.into_iter().map(|v| v.clone()).chain(actions_iter).collect(); @@ -1566,8 +1688,8 @@ fn get_assignment_core_indices( } #[overseer::contextbounds(ApprovalVoting, prefix = self::overseer)] -async fn distribution_messages_for_activation( - ctx: &mut Context, +async fn distribution_messages_for_activation>( + sender: &mut Sender, db: &OverlayedBackend<'_, impl Backend>, state: &State, delayed_approvals_timers: &mut DelayedApprovalTimer, @@ -1693,7 +1815,7 @@ async fn distribution_messages_for_activation( let ExtendedSessionInfo { ref executor_params, .. } = match get_extended_session_info( session_info_provider, - ctx.sender(), + sender, block_entry.block_hash(), block_entry.session(), ) @@ -1791,9 +1913,16 @@ async fn distribution_messages_for_activation( } // Handle an incoming signal from the overseer. Returns true if execution should conclude. -#[overseer::contextbounds(ApprovalVoting, prefix = self::overseer)] -async fn handle_from_overseer( - ctx: &mut Context, +async fn handle_from_overseer< + Sender: SubsystemSender + + SubsystemSender + + SubsystemSender + + Clone, + ADSender: SubsystemSender, +>( + sender: &mut Sender, + approval_voting_sender: &mut ADSender, + spawn_handle: &Arc, state: &mut State, db: &mut OverlayedBackend<'_, impl Backend>, session_info_provider: &mut RuntimeInfo, @@ -1811,7 +1940,8 @@ async fn handle_from_overseer( jaeger::PerLeafSpan::new(activated.span, "approval-voting"); state.spans.insert(head, approval_voting_span); match import::handle_new_head( - ctx, + sender, + approval_voting_sender, state, db, session_info_provider, @@ -1894,14 +2024,9 @@ async fn handle_from_overseer( }, FromOrchestra::Communication { msg } => match msg { ApprovalVotingMessage::ImportAssignment(checked_assignment, tx) => { - let (check_outcome, actions) = import_assignment( - ctx.sender(), - state, - db, - session_info_provider, - checked_assignment, - ) - .await?; + let (check_outcome, actions) = + import_assignment(sender, state, db, session_info_provider, checked_assignment) + .await?; // approval-distribution makes sure this assignment is valid and expected, // so this import should never fail, if it does it might mean one of two things, // there is a bug in the code or the two subsystems got out of sync. @@ -1912,16 +2037,9 @@ async fn handle_from_overseer( actions }, ApprovalVotingMessage::ImportApproval(a, tx) => { - let result = import_approval( - ctx.sender(), - state, - db, - session_info_provider, - metrics, - a, - &wakeups, - ) - .await?; + let result = + import_approval(sender, state, db, session_info_provider, metrics, a, &wakeups) + .await?; // approval-distribution makes sure this vote is valid and expected, // so this import should never fail, if it does it might mean one of two things, // there is a bug in the code or the two subsystems got out of sync. @@ -1941,7 +2059,7 @@ async fn handle_from_overseer( .with_stage(jaeger::Stage::ApprovalChecking) .with_string_tag("leaf", format!("{:?}", target)); match handle_approved_ancestor( - ctx, + sender, db, target, lower_bound, @@ -1964,7 +2082,14 @@ async fn handle_from_overseer( }, ApprovalVotingMessage::GetApprovalSignaturesForCandidate(candidate_hash, tx) => { metrics.on_candidate_signatures_request(); - get_approval_signatures_for_candidate(ctx, db, candidate_hash, tx).await?; + get_approval_signatures_for_candidate( + approval_voting_sender.clone(), + spawn_handle, + db, + candidate_hash, + tx, + ) + .await?; Vec::new() }, }, @@ -1978,8 +2103,11 @@ async fn handle_from_overseer( /// This involves an unbounded message send to approval-distribution, the caller has to ensure that /// calls to this function are infrequent and bounded. #[overseer::contextbounds(ApprovalVoting, prefix = self::overseer)] -async fn get_approval_signatures_for_candidate( - ctx: &mut Context, +async fn get_approval_signatures_for_candidate< + Sender: SubsystemSender, +>( + mut sender: Sender, + spawn_handle: &Arc, db: &OverlayedBackend<'_, impl Backend>, candidate_hash: CandidateHash, tx: oneshot::Sender, ValidatorSignature)>>, @@ -2038,7 +2166,6 @@ async fn get_approval_signatures_for_candidate( } } - let mut sender = ctx.sender().clone(); let get_approvals = async move { let (tx_distribution, rx_distribution) = oneshot::channel(); sender.send_unbounded_message(ApprovalDistributionMessage::GetApprovalSignatures( @@ -2118,12 +2245,17 @@ async fn get_approval_signatures_for_candidate( ?candidate_hash, "Spawning task for fetching signatures from approval-distribution" ); - ctx.spawn("get-approval-signatures", Box::pin(get_approvals)) + spawn_handle.spawn( + "get-approval-signatures", + Some("approval-voting-subsystem"), + Box::pin(get_approvals), + ); + Ok(()) } #[overseer::contextbounds(ApprovalVoting, prefix = self::overseer)] -async fn handle_approved_ancestor( - ctx: &mut Context, +async fn handle_approved_ancestor>( + sender: &mut Sender, db: &OverlayedBackend<'_, impl Backend>, target: Hash, lower_bound: BlockNumber, @@ -2143,7 +2275,7 @@ async fn handle_approved_ancestor( let target_number = { let (tx, rx) = oneshot::channel(); - ctx.send_message(ChainApiMessage::BlockNumber(target, tx)).await; + sender.send_message(ChainApiMessage::BlockNumber(target, tx)).await; match rx.await { Ok(Ok(Some(n))) => n, @@ -2164,12 +2296,13 @@ async fn handle_approved_ancestor( let ancestry = if target_number > lower_bound + 1 { let (tx, rx) = oneshot::channel(); - ctx.send_message(ChainApiMessage::Ancestors { - hash: target, - k: (target_number - (lower_bound + 1)) as usize, - response_channel: tx, - }) - .await; + sender + .send_message(ChainApiMessage::Ancestors { + hash: target, + k: (target_number - (lower_bound + 1)) as usize, + response_channel: tx, + }) + .await; match rx.await { Ok(Ok(a)) => a, @@ -3107,9 +3240,8 @@ fn should_trigger_assignment( } } -#[overseer::contextbounds(ApprovalVoting, prefix = self::overseer)] -async fn process_wakeup( - ctx: &mut Context, +async fn process_wakeup>( + sender: &mut Sender, state: &mut State, db: &mut OverlayedBackend<'_, impl Backend>, session_info_provider: &mut RuntimeInfo, @@ -3140,7 +3272,7 @@ async fn process_wakeup( let ExtendedSessionInfo { ref session_info, ref executor_params, .. } = match get_extended_session_info( session_info_provider, - ctx.sender(), + sender, block_entry.block_hash(), block_entry.session(), ) @@ -3282,7 +3414,7 @@ async fn process_wakeup( // Note that this function also schedules a wakeup as necessary. actions.extend( advance_approval_state( - ctx.sender(), + sender, state, db, session_info_provider, @@ -3303,8 +3435,14 @@ async fn process_wakeup( // spawned. When the background work is no longer needed, the `AbortHandle` should be dropped // to cancel the background work and any requests it has spawned. #[overseer::contextbounds(ApprovalVoting, prefix = self::overseer)] -async fn launch_approval( - ctx: &mut Context, +async fn launch_approval< + Sender: SubsystemSender + + SubsystemSender + + SubsystemSender + + SubsystemSender, +>( + mut sender: Sender, + spawn_handle: Arc, metrics: Metrics, session_index: SessionIndex, candidate: CandidateReceipt, @@ -3355,14 +3493,15 @@ async fn launch_approval( .with_stage(jaeger::Stage::ApprovalChecking); let timer = metrics.time_recover_and_approve(); - ctx.send_message(AvailabilityRecoveryMessage::RecoverAvailableData( - candidate.clone(), - session_index, - Some(backing_group), - core_index, - a_tx, - )) - .await; + sender + .send_message(AvailabilityRecoveryMessage::RecoverAvailableData( + candidate.clone(), + session_index, + Some(backing_group), + core_index, + a_tx, + )) + .await; let request_validation_result_span = span .child("request-validation-result") @@ -3371,15 +3510,18 @@ async fn launch_approval( .with_string_tag("block-hash", format!("{:?}", block_hash)) .with_stage(jaeger::Stage::ApprovalChecking); - ctx.send_message(RuntimeApiMessage::Request( - block_hash, - RuntimeApiRequest::ValidationCodeByHash(candidate.descriptor.validation_code_hash, code_tx), - )) - .await; + sender + .send_message(RuntimeApiMessage::Request( + block_hash, + RuntimeApiRequest::ValidationCodeByHash( + candidate.descriptor.validation_code_hash, + code_tx, + ), + )) + .await; let candidate = candidate.clone(); let metrics_guard = StaleGuard(Some(metrics)); - let mut sender = ctx.sender().clone(); let background = async move { // Force the move of the timer into the background task. let _timer = timer; @@ -3509,14 +3651,19 @@ async fn launch_approval( } }; let (background, remote_handle) = background.remote_handle(); - ctx.spawn("approval-checks", Box::pin(background)).map(move |()| remote_handle) + spawn_handle.spawn("approval-checks", Some("approval-voting-subsystem"), Box::pin(background)); + Ok(remote_handle) } // Issue and import a local approval vote. Should only be invoked after approval checks // have been done. #[overseer::contextbounds(ApprovalVoting, prefix = self::overseer)] -async fn issue_approval( - ctx: &mut Context, +async fn issue_approval< + Sender: SubsystemSender, + ADSender: SubsystemSender, +>( + sender: &mut Sender, + approval_voting_sender: &mut ADSender, state: &mut State, db: &mut OverlayedBackend<'_, impl Backend>, session_info_provider: &mut RuntimeInfo, @@ -3595,7 +3742,7 @@ async fn issue_approval( let session_info = match get_session_info( session_info_provider, - ctx.sender(), + sender, block_entry.parent_hash(), block_entry.session(), ) @@ -3637,7 +3784,7 @@ async fn issue_approval( ); let actions = advance_approval_state( - ctx.sender(), + sender, state, db, session_info_provider, @@ -3654,7 +3801,8 @@ async fn issue_approval( db, session_info_provider, state, - ctx, + sender, + approval_voting_sender, block_hash, validator_index, metrics, @@ -3673,11 +3821,15 @@ async fn issue_approval( // Create signature for the approved candidates pending signatures #[overseer::contextbounds(ApprovalVoting, prefix = self::overseer)] -async fn maybe_create_signature( +async fn maybe_create_signature< + Sender: SubsystemSender, + ADSender: SubsystemSender, +>( db: &mut OverlayedBackend<'_, impl Backend>, session_info_provider: &mut RuntimeInfo, state: &State, - ctx: &mut Context, + sender: &mut Sender, + approval_voting_sender: &mut ADSender, block_hash: Hash, validator_index: ValidatorIndex, metrics: &Metrics, @@ -3696,7 +3848,7 @@ async fn maybe_create_signature( }; let approval_params = state - .get_approval_voting_params_or_default(ctx, block_entry.session(), block_hash) + .get_approval_voting_params_or_default(sender, block_entry.session(), block_hash) .await .unwrap_or_default(); @@ -3716,7 +3868,7 @@ async fn maybe_create_signature( let session_info = match get_session_info( session_info_provider, - ctx.sender(), + sender, block_entry.parent_hash(), block_entry.session(), ) @@ -3797,7 +3949,7 @@ async fn maybe_create_signature( metrics.on_approval_produced(); - ctx.send_unbounded_message(ApprovalDistributionMessage::DistributeApproval( + approval_voting_sender.send_unbounded_message(ApprovalDistributionMessage::DistributeApproval( IndirectSignedApprovalVoteV2 { block_hash: block_entry.block_hash(), candidate_indices: candidates_indices, @@ -3838,7 +3990,7 @@ fn issue_local_invalid_statement( candidate_hash: CandidateHash, candidate: CandidateReceipt, ) where - Sender: overseer::ApprovalVotingSenderTrait, + Sender: SubsystemSender, { // We need to send an unbounded message here to break a cycle: // DisputeCoordinatorMessage::IssueLocalStatement -> diff --git a/polkadot/node/core/approval-voting/src/tests.rs b/polkadot/node/core/approval-voting/src/tests.rs index 7126f209a94f..65aa4f894c23 100644 --- a/polkadot/node/core/approval-voting/src/tests.rs +++ b/polkadot/node/core/approval-voting/src/tests.rs @@ -35,11 +35,11 @@ use polkadot_node_subsystem::{ messages::{ AllMessages, ApprovalVotingMessage, AssignmentCheckResult, AvailabilityRecoveryMessage, }, - ActiveLeavesUpdate, + ActiveLeavesUpdate, SubsystemContext, }; use polkadot_node_subsystem_test_helpers as test_helpers; use polkadot_node_subsystem_util::TimeoutExt; -use polkadot_overseer::HeadSupportsParachains; +use polkadot_overseer::{HeadSupportsParachains, SpawnGlue}; use polkadot_primitives::{ ApprovalVote, CandidateCommitments, CandidateEvent, CoreIndex, DisputeStatement, GroupIndex, Header, Id as ParaId, IndexedVec, NodeFeatures, ValidDisputeStatementKind, ValidationCode, @@ -536,7 +536,7 @@ impl Default for HarnessConfig { struct TestHarness { virtual_overseer: VirtualOverseer, - clock: Box, + clock: Arc, sync_oracle_handle: TestSyncOracleHandle, } @@ -550,8 +550,8 @@ fn test_harness>( config; let pool = sp_core::testing::TaskExecutor::new(); - let (context, virtual_overseer) = - polkadot_node_subsystem_test_helpers::make_subsystem_context(pool); + let (mut context, virtual_overseer) = + polkadot_node_subsystem_test_helpers::make_subsystem_context(pool.clone()); let keystore = LocalKeystore::in_memory(); let _ = keystore.sr25519_generate_new( @@ -559,12 +559,14 @@ fn test_harness>( Some(&Sr25519Keyring::Alice.to_seed()), ); - let clock = Box::new(clock); + let clock = Arc::new(clock); let db = kvdb_memorydb::create(test_constants::NUM_COLUMNS); let db = polkadot_node_subsystem_util::database::kvdb_impl::DbAdapter::new(db, &[]); - + let sender = context.sender().clone(); let subsystem = run( context, + sender.clone(), + sender.clone(), ApprovalVotingSubsystem::with_config_and_clock( Config { col_approval_data: test_constants::TEST_CONFIG.col_approval_data, @@ -575,6 +577,7 @@ fn test_harness>( sync_oracle, Metrics::default(), clock.clone(), + Arc::new(SpawnGlue(pool)), ), assignment_criteria, backend, @@ -4114,7 +4117,7 @@ async fn handle_approval_on_max_coalesce_count( async fn handle_approval_on_max_wait_time( virtual_overseer: &mut VirtualOverseer, candidate_indices: Vec, - clock: Box, + clock: Arc, ) { const TICK_NOW_BEGIN: u64 = 1; const MAX_COALESCE_COUNT: u32 = 3; @@ -4412,7 +4415,7 @@ async fn build_chain_with_two_blocks_with_one_candidate_each( async fn setup_overseer_with_two_blocks_each_with_one_assignment_triggered( virtual_overseer: &mut VirtualOverseer, store: TestStore, - clock: &Box, + clock: &Arc, sync_oracle_handle: TestSyncOracleHandle, ) { assert_matches!( @@ -4926,7 +4929,7 @@ fn test_gathering_assignments_statements() { let mut state = State { keystore: Arc::new(LocalKeystore::in_memory()), slot_duration_millis: 6_000, - clock: Box::new(MockClock::default()), + clock: Arc::new(MockClock::default()), assignment_criteria: Box::new(MockAssignmentCriteria::check_only(|_| Ok(0))), spans: HashMap::new(), per_block_assignments_gathering_times: LruMap::new(ByLength::new( @@ -5021,7 +5024,7 @@ fn test_observe_assignment_gathering_status() { let mut state = State { keystore: Arc::new(LocalKeystore::in_memory()), slot_duration_millis: 6_000, - clock: Box::new(MockClock::default()), + clock: Arc::new(MockClock::default()), assignment_criteria: Box::new(MockAssignmentCriteria::check_only(|_| Ok(0))), spans: HashMap::new(), per_block_assignments_gathering_times: LruMap::new(ByLength::new( diff --git a/polkadot/node/service/src/lib.rs b/polkadot/node/service/src/lib.rs index a8e7fc16eb4d..fe96d29c1ceb 100644 --- a/polkadot/node/service/src/lib.rs +++ b/polkadot/node/service/src/lib.rs @@ -63,6 +63,7 @@ use { }; use polkadot_node_subsystem_util::database::Database; +use polkadot_overseer::SpawnGlue; #[cfg(feature = "full-node")] pub use { @@ -83,7 +84,7 @@ use std::{collections::HashMap, path::PathBuf, sync::Arc, time::Duration}; use prometheus_endpoint::Registry; #[cfg(feature = "full-node")] use sc_service::KeystoreContainer; -use sc_service::RpcHandlers; +use sc_service::{RpcHandlers, SpawnTaskHandle}; use sc_telemetry::TelemetryWorker; #[cfg(feature = "full-node")] use sc_telemetry::{Telemetry, TelemetryWorkerHandle}; @@ -1500,6 +1501,7 @@ pub fn revert_backend( backend: Arc, blocks: BlockNumber, config: Configuration, + task_handle: SpawnTaskHandle, ) -> Result<(), Error> { let best_number = client.info().best_number; let finalized = client.info().finalized_number; @@ -1520,7 +1522,7 @@ pub fn revert_backend( let parachains_db = open_database(&config.database) .map_err(|err| sp_blockchain::Error::Backend(err.to_string()))?; - revert_approval_voting(parachains_db.clone(), hash)?; + revert_approval_voting(parachains_db.clone(), hash, task_handle)?; revert_chain_selection(parachains_db, hash)?; // Revert Substrate consensus related components sc_consensus_babe::revert(client.clone(), backend, blocks)?; @@ -1543,7 +1545,11 @@ fn revert_chain_selection(db: Arc, hash: Hash) -> sp_blockchain::R .map_err(|err| sp_blockchain::Error::Backend(err.to_string())) } -fn revert_approval_voting(db: Arc, hash: Hash) -> sp_blockchain::Result<()> { +fn revert_approval_voting( + db: Arc, + hash: Hash, + task_handle: SpawnTaskHandle, +) -> sp_blockchain::Result<()> { let config = approval_voting_subsystem::Config { col_approval_data: parachains_db::REAL_COLUMNS.col_approval_data, slot_duration_millis: Default::default(), @@ -1555,6 +1561,7 @@ fn revert_approval_voting(db: Arc, hash: Hash) -> sp_blockchain::R Arc::new(sc_keystore::LocalKeystore::in_memory()), Box::new(sp_consensus::NoNetwork), approval_voting_subsystem::Metrics::default(), + Arc::new(SpawnGlue(task_handle)), ); approval_voting diff --git a/polkadot/node/service/src/overseer.rs b/polkadot/node/service/src/overseer.rs index 0b57ff6e395f..3c071e34fe11 100644 --- a/polkadot/node/service/src/overseer.rs +++ b/polkadot/node/service/src/overseer.rs @@ -320,6 +320,7 @@ where keystore.clone(), Box::new(sync_service.clone()), Metrics::register(registry)?, + Arc::new(spawner.clone()), )) .gossip_support(GossipSupportSubsystem::new( keystore.clone(), diff --git a/polkadot/node/subsystem-bench/src/lib/approval/mod.rs b/polkadot/node/subsystem-bench/src/lib/approval/mod.rs index f05d061f3fde..9d85039b8880 100644 --- a/polkadot/node/subsystem-bench/src/lib/approval/mod.rs +++ b/polkadot/node/subsystem-bench/src/lib/approval/mod.rs @@ -814,7 +814,8 @@ fn build_overseer( Arc::new(keystore), Box::new(TestSyncOracle {}), state.approval_voting_metrics.clone(), - Box::new(system_clock.clone()), + Arc::new(system_clock.clone()), + Arc::new(SpawnGlue(spawn_task_handle.clone())), ); let approval_distribution = ApprovalDistribution::new_with_clock( diff --git a/prdoc/pr_4846.prdoc b/prdoc/pr_4846.prdoc new file mode 100644 index 000000000000..eb18301b1010 --- /dev/null +++ b/prdoc/pr_4846.prdoc @@ -0,0 +1,13 @@ +title: "Make approval-voting runnable on a worker thread" + +doc: + - audience: Node Dev + description: | + Make approval-voting subsystem runnable on a separate worker thread without having to + to always pass to it an orchestra context. It achieves that by refactoring existing functions + to require only the minimal set of traits needed in the function instead of the general + `Context` + +crates: + - name: polkadot-node-core-approval-voting + bump: major From d50996948903fc7e5469213dd7ef718cef747831 Mon Sep 17 00:00:00 2001 From: Vincent Geddes <117534+vgeddes@users.noreply.github.com> Date: Thu, 12 Sep 2024 15:24:19 +0200 Subject: [PATCH 02/12] Improve tests for snowbridge-pallet-ethereum-system --- .../snowbridge/pallets/system/src/tests.rs | 108 ++++++------------ bridges/snowbridge/primitives/core/src/lib.rs | 18 ++- 2 files changed, 53 insertions(+), 73 deletions(-) diff --git a/bridges/snowbridge/pallets/system/src/tests.rs b/bridges/snowbridge/pallets/system/src/tests.rs index b3699ec2f24d..98c937d86328 100644 --- a/bridges/snowbridge/pallets/system/src/tests.rs +++ b/bridges/snowbridge/pallets/system/src/tests.rs @@ -638,112 +638,84 @@ fn register_token_with_signed_yields_bad_origin() { let origin = RuntimeOrigin::signed([14; 32].into()); let location = Location::new(1, [Parachain(2000)]); let versioned_location: Box = Box::new(location.clone().into()); - let asset_metadata = AssetMetadata { - decimals: 10, - name: b"Dot".to_vec().try_into().unwrap(), - symbol: b"DOT".to_vec().try_into().unwrap(), - }; - assert_noop!( - EthereumSystem::register_token(origin, versioned_location, asset_metadata), + EthereumSystem::register_token(origin, versioned_location, Default::default()), BadOrigin ); }); } -pub struct TokenInfo { - pub location: Location, - pub metadata: AssetMetadata, - pub foreign_token_id: TokenId, +pub struct RegisterTokenTestCase { + /// Input: Location of Polkadot-native token relative to BH + pub native: Location, + /// Output: Reanchored, canonicalized location + pub reanchored: Location, + /// Output: Stable hash of reanchored location + pub foreign: TokenId, } #[test] fn register_all_tokens_succeeds() { - let assets = vec![ + let test_cases = vec![ // DOT - TokenInfo { - location: Location::parent(), - metadata: AssetMetadata { - decimals: 10, - name: b"DOT".to_vec().try_into().unwrap(), - symbol: b"DOT".to_vec().try_into().unwrap(), - }, - foreign_token_id: hex!( + RegisterTokenTestCase { + native: Location::parent(), + reanchored: Location::new(1, GlobalConsensus(Polkadot)), + foreign: hex!( "4e241583d94b5d48a27a22064cd49b2ed6f5231d2d950e432f9b7c2e0ade52b2" ) .into(), }, // GLMR (Some Polkadot parachain currency) - TokenInfo { - location: Location::new(1, [Parachain(2004)]), - metadata: AssetMetadata { - decimals: 12, - name: b"GLMR".to_vec().try_into().unwrap(), - symbol: b"GLMR".to_vec().try_into().unwrap(), - }, - foreign_token_id: hex!( + RegisterTokenTestCase { + native: Location::new(1, [Parachain(2004)]), + reanchored: Location::new(1, [GlobalConsensus(Polkadot), Parachain(2004)]), + foreign: hex!( "34c08fc90409b6924f0e8eabb7c2aaa0c749e23e31adad9f6d217b577737fafb" ) .into(), }, // USDT - TokenInfo { - location: Location::new(1, [Parachain(1000), PalletInstance(50), GeneralIndex(1984)]), - metadata: AssetMetadata { - decimals: 6, - name: b"USDT".to_vec().try_into().unwrap(), - symbol: b"USDT".to_vec().try_into().unwrap(), - }, - foreign_token_id: hex!( + RegisterTokenTestCase { + native: Location::new(1, [Parachain(1000), PalletInstance(50), GeneralIndex(1984)]), + reanchored: Location::new(1, [GlobalConsensus(Polkadot), Parachain(1000), PalletInstance(50), GeneralIndex(1984)]), + foreign: hex!( "14b0579be12d7d7f9971f1d4b41f0e88384b9b74799b0150d4aa6cd01afb4444" ) .into(), }, // KSM - TokenInfo { - location: Location::new(2, [GlobalConsensus(Kusama)]), - metadata: AssetMetadata { - decimals: 12, - name: b"KSM".to_vec().try_into().unwrap(), - symbol: b"KSM".to_vec().try_into().unwrap(), - }, - foreign_token_id: hex!( + RegisterTokenTestCase { + native: Location::new(2, [GlobalConsensus(Kusama)]), + reanchored: Location::new(1, [GlobalConsensus(Kusama)]), + foreign: hex!( "03b6054d0c576dd8391e34e1609cf398f68050c23009d19ce93c000922bcd852" ) .into(), }, // KAR (Some Kusama parachain currency) - TokenInfo { - location: Location::new(2, [GlobalConsensus(Kusama), Parachain(2000)]), - metadata: AssetMetadata { - decimals: 12, - name: b"KAR".to_vec().try_into().unwrap(), - symbol: b"KAR".to_vec().try_into().unwrap(), - }, - foreign_token_id: hex!( + RegisterTokenTestCase { + native: Location::new(2, [GlobalConsensus(Kusama), Parachain(2000)]), + reanchored: Location::new(1, [GlobalConsensus(Kusama), Parachain(2000)]), + foreign: hex!( "d3e39ad6ea4cee68c9741181e94098823b2ea34a467577d0875c036f0fce5be0" ) .into(), }, ]; - for asset in assets.iter() { + for tc in test_cases.iter() { new_test_ext(true).execute_with(|| { let origin = RuntimeOrigin::root(); - let versioned_location: Box = - Box::new(asset.location.clone().into()); - let asset_metadata = asset.metadata.clone(); + let versioned_location: VersionedLocation = tc.native.clone().into(); - assert_ok!(EthereumSystem::register_token(origin, versioned_location, asset_metadata)); + assert_ok!(EthereumSystem::register_token(origin, Box::new(versioned_location), Default::default())); - let location = asset - .location - .clone() - .reanchored(&EthereumDestination::get(), &UniversalLocation::get()) - .unwrap(); + assert_eq!(NativeToForeignId::::get(tc.reanchored.clone()), Some(tc.foreign.clone())); + assert_eq!(ForeignToNativeId::::get(tc.foreign.clone()), Some(tc.reanchored.clone())); System::assert_last_event(RuntimeEvent::EthereumSystem(Event::::RegisterToken { - location: location.into(), - foreign_token_id: asset.foreign_token_id, + location: tc.reanchored.clone().into(), + foreign_token_id: tc.foreign, })); }); } @@ -764,14 +736,8 @@ fn register_ethereum_native_token_fails() { ], ); let versioned_location: Box = Box::new(location.clone().into()); - let asset_metadata = AssetMetadata { - decimals: 18, - name: b"WETH".to_vec().try_into().unwrap(), - symbol: b"WETH".to_vec().try_into().unwrap(), - }; - assert_noop!( - EthereumSystem::register_token(origin, versioned_location, asset_metadata), + EthereumSystem::register_token(origin, versioned_location, Default::default()), Error::::LocationConversionFailed ); }); diff --git a/bridges/snowbridge/primitives/core/src/lib.rs b/bridges/snowbridge/primitives/core/src/lib.rs index 01ad86a26018..7ad129a52542 100644 --- a/bridges/snowbridge/primitives/core/src/lib.rs +++ b/bridges/snowbridge/primitives/core/src/lib.rs @@ -154,7 +154,21 @@ pub const SECONDARY_GOVERNANCE_CHANNEL: ChannelId = /// Metadata to include in the instantiated ERC20 token contract #[derive(Clone, Encode, Decode, PartialEq, RuntimeDebug, TypeInfo)] pub struct AssetMetadata { - pub name: BoundedVec>, - pub symbol: BoundedVec>, + pub name: BoundedVec>, + pub symbol: BoundedVec>, pub decimals: u8, } + +#[cfg(any(test, feature = "std", feature = "runtime-benchmarks"))] +impl Default for AssetMetadata { + fn default() -> Self { + AssetMetadata { + name: BoundedVec::truncate_from(vec![]), + symbol: BoundedVec::truncate_from(vec![]), + decimals: 0, + } + } +} + +/// Maximum length of a string field in ERC20 token metada +const METADATA_FIELD_MAX_LEN: u32 = 32; From 5f72a1fe9d2c1e72325eea2e88f84c2dccbe9c5f Mon Sep 17 00:00:00 2001 From: Maksym H <1177472+mordamax@users.noreply.github.com> Date: Thu, 12 Sep 2024 17:31:25 +0100 Subject: [PATCH 03/12] Fix subsystem bench publishing (#5667) Fixes: https://github.com/paritytech/ci_cd/issues/1034 --- .github/workflows/subsystem-benchmarks.yml | 110 +++++++++++++-------- 1 file changed, 71 insertions(+), 39 deletions(-) diff --git a/.github/workflows/subsystem-benchmarks.yml b/.github/workflows/subsystem-benchmarks.yml index c33c782a731c..6f9db9da9e69 100644 --- a/.github/workflows/subsystem-benchmarks.yml +++ b/.github/workflows/subsystem-benchmarks.yml @@ -1,9 +1,11 @@ +name: Subsystem Benchmarks + on: push: branches: - master pull_request: - types: [ opened, synchronize, reopened, ready_for_review ] + types: [opened, synchronize, reopened, ready_for_review] merge_group: concurrency: @@ -21,27 +23,49 @@ jobs: runs-on: ubuntu-latest outputs: IMAGE: ${{ steps.set_image.outputs.IMAGE }} + RUNNER: ${{ steps.set_runner.outputs.RUNNER }} steps: - name: Checkout uses: actions/checkout@v4 - id: set_image run: cat .github/env >> $GITHUB_OUTPUT + - id: set_runner + run: | + # Run merge queues on persistent runners + if [[ $GITHUB_REF_NAME == *"gh-readonly-queue"* ]]; then + echo "RUNNER=arc-runners-polkadot-sdk-beefy-persistent" >> $GITHUB_OUTPUT + else + echo "RUNNER=arc-runners-polkadot-sdk-beefy" >> $GITHUB_OUTPUT + fi build: timeout-minutes: 80 - needs: [ set-image ] - runs-on: arc-runners-polkadot-sdk-benchmark + needs: [set-image] + runs-on: ${{ needs.set-image.outputs.RUNNER }} container: image: ${{ needs.set-image.outputs.IMAGE }} strategy: fail-fast: false matrix: - features: [ - { name: "polkadot-availability-recovery", bench: "availability-recovery-regression-bench" }, - { name: "polkadot-availability-distribution", bench: "availability-distribution-regression-bench" }, - { name: "polkadot-node-core-approval-voting", bench: "approval-voting-regression-bench" }, - { name: "polkadot-statement-distribution", bench: "statement-distribution-regression-bench" } - ] + features: + [ + { + name: "polkadot-availability-recovery", + bench: "availability-recovery-regression-bench", + }, + { + name: "polkadot-availability-distribution", + bench: "availability-distribution-regression-bench", + }, + { + name: "polkadot-node-core-approval-voting", + bench: "approval-voting-regression-bench", + }, + { + name: "polkadot-statement-distribution", + bench: "statement-distribution-regression-bench", + }, + ] steps: - name: Checkout uses: actions/checkout@v4 @@ -56,34 +80,38 @@ jobs: run: | forklift cargo bench -p ${{ matrix.features.name }} --bench ${{ matrix.features.bench }} --features subsystem-benchmarks || echo "Benchmarks failed" ls -lsa ./charts - mkdir ./artifacts - cp ./charts/${{ matrix.features.bench }}.json ./artifacts/${{ matrix.features.bench }}.json + - name: Upload artifacts uses: actions/upload-artifact@v4.3.6 with: name: ${{matrix.features.bench}} - path: ./artifacts + path: ./charts publish-benchmarks: timeout-minutes: 60 - needs: [ build ] + needs: [build] if: github.ref == 'refs/heads/master' environment: subsystem-benchmarks runs-on: ubuntu-latest steps: - name: Checkout uses: actions/checkout@v4 + with: + ref: gh-pages + fetch-depth: 0 + + - run: git checkout master -- - name: Download artifacts uses: actions/download-artifact@v4.1.8 with: - path: ./artifacts + path: ./charts - name: Setup git run: | # Fixes "detected dubious ownership" error in the ci git config --global --add safe.directory '*' - ls -lsR ./artifacts + ls -lsR ./charts - uses: actions/create-github-app-token@v1 id: app-token @@ -91,46 +119,50 @@ jobs: app-id: ${{ secrets.POLKADOTSDK_GHPAGES_APP_ID }} private-key: ${{ secrets.POLKADOTSDK_GHPAGES_APP_KEY }} - - name: Publish ${{ env.BENCH_NAME }} - uses: benchmark-action/github-action-benchmark@v1 + - name: Generate ${{ env.BENCH }} env: - BENCH_NAME: availability-recovery-regression-bench + BENCH: availability-recovery-regression-bench + uses: benchmark-action/github-action-benchmark@v1 with: tool: "customSmallerIsBetter" - name: ${{ env.BENCH_NAME }} - output-file-path: ./artifacts/${{ env.BENCH_NAME }}/${{ env.BENCH_NAME }}.json - benchmark-data-dir-path: ./artifacts/${{ env.BENCH_NAME }} + name: ${{ env.BENCH }} + output-file-path: ./charts/${{ env.BENCH }}/${{ env.BENCH }}.json + benchmark-data-dir-path: ./bench/${{ env.BENCH }} github-token: ${{ steps.app-token.outputs.token }} + auto-push: true - - name: Publish ${{ env.BENCH_NAME }} - uses: benchmark-action/github-action-benchmark@v1 + - name: Generate ${{ env.BENCH }} env: - BENCH_NAME: availability-distribution-regression-bench + BENCH: availability-distribution-regression-bench + uses: benchmark-action/github-action-benchmark@v1 with: tool: "customSmallerIsBetter" - name: ${{ env.BENCH_NAME }} - output-file-path: ./artifacts/${{ env.BENCH_NAME }}/${{ env.BENCH_NAME }}.json - benchmark-data-dir-path: ./artifacts/${{ env.BENCH_NAME }} + name: ${{ env.BENCH }} + output-file-path: ./charts/${{ env.BENCH }}/${{ env.BENCH }}.json + benchmark-data-dir-path: ./bench/${{ env.BENCH }} github-token: ${{ steps.app-token.outputs.token }} + auto-push: true - - name: Publish ${{ env.BENCH_NAME }} - uses: benchmark-action/github-action-benchmark@v1 + - name: Generate ${{ env.BENCH }} env: - BENCH_NAME: approval-voting-regression-bench + BENCH: approval-voting-regression-bench + uses: benchmark-action/github-action-benchmark@v1 with: tool: "customSmallerIsBetter" - name: ${{ env.BENCH_NAME }} - output-file-path: ./artifacts/${{ env.BENCH_NAME }}/${{ env.BENCH_NAME }}.json - benchmark-data-dir-path: ./artifacts/${{ env.BENCH_NAME }} + name: ${{ env.BENCH }} + output-file-path: ./charts/${{ env.BENCH }}/${{ env.BENCH }}.json + benchmark-data-dir-path: ./bench/${{ env.BENCH }} github-token: ${{ steps.app-token.outputs.token }} + auto-push: true - - name: Publish ${{ env.BENCH_NAME }} - uses: benchmark-action/github-action-benchmark@v1 + - name: Generate ${{ env.BENCH }} env: - BENCH_NAME: statement-distribution-regression-bench + BENCH: statement-distribution-regression-bench + uses: benchmark-action/github-action-benchmark@v1 with: tool: "customSmallerIsBetter" - name: ${{ env.BENCH_NAME }} - output-file-path: ./artifacts/${{ env.BENCH_NAME }}/${{ env.BENCH_NAME }}.json - benchmark-data-dir-path: ./artifacts/${{ env.BENCH_NAME }} + name: ${{ env.BENCH }} + output-file-path: ./charts/${{ env.BENCH }}/${{ env.BENCH }}.json + benchmark-data-dir-path: ./bench/${{ env.BENCH }} github-token: ${{ steps.app-token.outputs.token }} + auto-push: true From 4653c3791479cec68ad78624a403eaee1c147835 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?D=C3=B3nal=20Murray?= Date: Thu, 12 Sep 2024 17:42:28 +0100 Subject: [PATCH 04/12] Add `pallet_proxy` to People Chain and Coretime Chain testnet runtimes. (#5509) Proxies are possible in the runtimes for Kusama and Polkadot but this functionality was not previously available on testnets. Closes #5453. Proxies can now be used on `coretime-rococo`, `coretime-westend`, `people-rococo` and `people-westend` in the same way as they can be on Kusama and Polkadot chains. The exact same proxies are configured as the production runtimes for the respective system parachains. --- Cargo.lock | 4 + .../coretime/coretime-rococo/Cargo.toml | 4 + .../coretime/coretime-rococo/src/lib.rs | 143 ++++++++++- .../coretime-rococo/src/weights/mod.rs | 1 + .../src/weights/pallet_proxy.rs | 226 ++++++++++++++++++ .../coretime/coretime-westend/Cargo.toml | 4 + .../coretime/coretime-westend/src/lib.rs | 143 ++++++++++- .../coretime-westend/src/weights/mod.rs | 1 + .../src/weights/pallet_proxy.rs | 226 ++++++++++++++++++ .../runtimes/people/people-rococo/Cargo.toml | 4 + .../runtimes/people/people-rococo/src/lib.rs | 123 +++++++++- .../people/people-rococo/src/weights/mod.rs | 1 + .../people-rococo/src/weights/pallet_proxy.rs | 226 ++++++++++++++++++ .../runtimes/people/people-westend/Cargo.toml | 4 + .../runtimes/people/people-westend/src/lib.rs | 123 +++++++++- .../people/people-westend/src/weights/mod.rs | 1 + .../src/weights/pallet_proxy.rs | 226 ++++++++++++++++++ prdoc/pr_5509.prdoc | 17 ++ 18 files changed, 1465 insertions(+), 12 deletions(-) create mode 100644 cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_proxy.rs create mode 100644 cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_proxy.rs create mode 100644 cumulus/parachains/runtimes/people/people-rococo/src/weights/pallet_proxy.rs create mode 100644 cumulus/parachains/runtimes/people/people-westend/src/weights/pallet_proxy.rs create mode 100644 prdoc/pr_5509.prdoc diff --git a/Cargo.lock b/Cargo.lock index 35dc61928178..d30acc0f5b25 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3357,6 +3357,7 @@ dependencies = [ "pallet-collator-selection", "pallet-message-queue", "pallet-multisig", + "pallet-proxy", "pallet-session", "pallet-sudo", "pallet-timestamp", @@ -3453,6 +3454,7 @@ dependencies = [ "pallet-collator-selection", "pallet-message-queue", "pallet-multisig", + "pallet-proxy", "pallet-session", "pallet-timestamp", "pallet-transaction-payment", @@ -12683,6 +12685,7 @@ dependencies = [ "pallet-identity", "pallet-message-queue", "pallet-multisig", + "pallet-proxy", "pallet-session", "pallet-timestamp", "pallet-transaction-payment", @@ -12782,6 +12785,7 @@ dependencies = [ "pallet-identity", "pallet-message-queue", "pallet-multisig", + "pallet-proxy", "pallet-session", "pallet-timestamp", "pallet-transaction-payment", diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml b/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml index 07d133c80be7..80417ea00362 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml @@ -34,6 +34,7 @@ pallet-balances = { workspace = true } pallet-message-queue = { workspace = true } pallet-broker = { workspace = true } pallet-multisig = { workspace = true } +pallet-proxy = { workspace = true } pallet-session = { workspace = true } pallet-sudo = { workspace = true } pallet-timestamp = { workspace = true } @@ -108,6 +109,7 @@ std = [ "pallet-collator-selection/std", "pallet-message-queue/std", "pallet-multisig/std", + "pallet-proxy/std", "pallet-session/std", "pallet-sudo/std", "pallet-timestamp/std", @@ -158,6 +160,7 @@ runtime-benchmarks = [ "pallet-collator-selection/runtime-benchmarks", "pallet-message-queue/runtime-benchmarks", "pallet-multisig/runtime-benchmarks", + "pallet-proxy/runtime-benchmarks", "pallet-sudo/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", "pallet-utility/runtime-benchmarks", @@ -188,6 +191,7 @@ try-runtime = [ "pallet-collator-selection/try-runtime", "pallet-message-queue/try-runtime", "pallet-multisig/try-runtime", + "pallet-proxy/try-runtime", "pallet-session/try-runtime", "pallet-sudo/try-runtime", "pallet-timestamp/try-runtime", diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs index aea2bf232cbc..0c9f9461f7f4 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs @@ -36,6 +36,7 @@ pub mod xcm_config; extern crate alloc; use alloc::{vec, vec::Vec}; +use codec::{Decode, Encode, MaxEncodedLen}; use cumulus_pallet_parachain_system::RelayNumberMonotonicallyIncreases; use cumulus_primitives_core::{AggregateMessageOrigin, ParaId}; use frame_support::{ @@ -43,7 +44,9 @@ use frame_support::{ dispatch::DispatchClass, genesis_builder_helper::{build_state, get_preset}, parameter_types, - traits::{ConstBool, ConstU32, ConstU64, ConstU8, EitherOfDiverse, TransformOrigin}, + traits::{ + ConstBool, ConstU32, ConstU64, ConstU8, EitherOfDiverse, InstanceFilter, TransformOrigin, + }, weights::{ConstantMultiplier, Weight, WeightToFee as _}, PalletId, }; @@ -65,9 +68,9 @@ use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; pub use sp_runtime::BuildStorage; use sp_runtime::{ create_runtime_str, generic, impl_opaque_keys, - traits::Block as BlockT, + traits::{BlakeTwo256, Block as BlockT}, transaction_validity::{TransactionSource, TransactionValidity}, - ApplyExtrinsicResult, DispatchError, MultiAddress, Perbill, + ApplyExtrinsicResult, DispatchError, MultiAddress, Perbill, RuntimeDebug, }; #[cfg(feature = "std")] use sp_version::NativeVersion; @@ -438,6 +441,138 @@ impl pallet_multisig::Config for Runtime { type WeightInfo = weights::pallet_multisig::WeightInfo; } +/// The type used to represent the kinds of proxying allowed. +#[derive( + Copy, + Clone, + Eq, + PartialEq, + Ord, + PartialOrd, + Encode, + Decode, + RuntimeDebug, + MaxEncodedLen, + scale_info::TypeInfo, +)] +pub enum ProxyType { + /// Fully permissioned proxy. Can execute any call on behalf of _proxied_. + Any, + /// Can execute any call that does not transfer funds or assets. + NonTransfer, + /// Proxy with the ability to reject time-delay proxy announcements. + CancelProxy, + /// Proxy for all Broker pallet calls. + Broker, + /// Proxy for renewing coretime. + CoretimeRenewer, + /// Proxy able to purchase on-demand coretime credits. + OnDemandPurchaser, + /// Collator selection proxy. Can execute calls related to collator selection mechanism. + Collator, +} +impl Default for ProxyType { + fn default() -> Self { + Self::Any + } +} + +impl InstanceFilter for ProxyType { + fn filter(&self, c: &RuntimeCall) -> bool { + match self { + ProxyType::Any => true, + ProxyType::NonTransfer => !matches!( + c, + RuntimeCall::Balances { .. } | + // `purchase`, `renew`, `transfer` and `purchase_credit` are pretty self explanatory. + RuntimeCall::Broker(pallet_broker::Call::purchase { .. }) | + RuntimeCall::Broker(pallet_broker::Call::renew { .. }) | + RuntimeCall::Broker(pallet_broker::Call::transfer { .. }) | + RuntimeCall::Broker(pallet_broker::Call::purchase_credit { .. }) | + // `pool` doesn't transfer, but it defines the account to be paid for contributions + RuntimeCall::Broker(pallet_broker::Call::pool { .. }) | + // `assign` is essentially a transfer of a region NFT + RuntimeCall::Broker(pallet_broker::Call::assign { .. }) + ), + ProxyType::CancelProxy => matches!( + c, + RuntimeCall::Proxy(pallet_proxy::Call::reject_announcement { .. }) | + RuntimeCall::Utility { .. } | + RuntimeCall::Multisig { .. } + ), + ProxyType::Broker => { + matches!( + c, + RuntimeCall::Broker { .. } | + RuntimeCall::Utility { .. } | + RuntimeCall::Multisig { .. } + ) + }, + ProxyType::CoretimeRenewer => { + matches!( + c, + RuntimeCall::Broker(pallet_broker::Call::renew { .. }) | + RuntimeCall::Utility { .. } | + RuntimeCall::Multisig { .. } + ) + }, + ProxyType::OnDemandPurchaser => { + matches!( + c, + RuntimeCall::Broker(pallet_broker::Call::purchase_credit { .. }) | + RuntimeCall::Utility { .. } | + RuntimeCall::Multisig { .. } + ) + }, + ProxyType::Collator => matches!( + c, + RuntimeCall::CollatorSelection { .. } | + RuntimeCall::Utility { .. } | + RuntimeCall::Multisig { .. } + ), + } + } + + fn is_superset(&self, o: &Self) -> bool { + match (self, o) { + (x, y) if x == y => true, + (ProxyType::Any, _) => true, + (_, ProxyType::Any) => false, + (ProxyType::Broker, ProxyType::CoretimeRenewer) => true, + (ProxyType::Broker, ProxyType::OnDemandPurchaser) => true, + (ProxyType::NonTransfer, ProxyType::Collator) => true, + _ => false, + } + } +} + +parameter_types! { + // One storage item; key size 32, value size 8; . + pub const ProxyDepositBase: Balance = deposit(1, 40); + // Additional storage item size of 33 bytes. + pub const ProxyDepositFactor: Balance = deposit(0, 33); + pub const MaxProxies: u16 = 32; + // One storage item; key size 32, value size 16 + pub const AnnouncementDepositBase: Balance = deposit(1, 48); + pub const AnnouncementDepositFactor: Balance = deposit(0, 66); + pub const MaxPending: u16 = 32; +} + +impl pallet_proxy::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type RuntimeCall = RuntimeCall; + type Currency = Balances; + type ProxyType = ProxyType; + type ProxyDepositBase = ProxyDepositBase; + type ProxyDepositFactor = ProxyDepositFactor; + type MaxProxies = MaxProxies; + type WeightInfo = weights::pallet_proxy::WeightInfo; + type MaxPending = MaxPending; + type CallHasher = BlakeTwo256; + type AnnouncementDepositBase = AnnouncementDepositBase; + type AnnouncementDepositFactor = AnnouncementDepositFactor; +} + impl pallet_utility::Config for Runtime { type RuntimeEvent = RuntimeEvent; type RuntimeCall = RuntimeCall; @@ -481,6 +616,7 @@ construct_runtime!( // Handy utilities. Utility: pallet_utility = 40, Multisig: pallet_multisig = 41, + Proxy: pallet_proxy = 42, // The main stage. Broker: pallet_broker = 50, @@ -504,6 +640,7 @@ mod benches { [pallet_xcm, PalletXcmExtrinsicsBenchmark::] [pallet_message_queue, MessageQueue] [pallet_multisig, Multisig] + [pallet_proxy, Proxy] [pallet_utility, Utility] // NOTE: Make sure you point to the individual modules below. [pallet_xcm_benchmarks::fungible, XcmBalances] diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/mod.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/mod.rs index f1050b3ae636..ab3d6704c937 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/mod.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/mod.rs @@ -27,6 +27,7 @@ pub mod pallet_broker; pub mod pallet_collator_selection; pub mod pallet_message_queue; pub mod pallet_multisig; +pub mod pallet_proxy; pub mod pallet_session; pub mod pallet_timestamp; pub mod pallet_utility; diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_proxy.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_proxy.rs new file mode 100644 index 000000000000..5f95906f473e --- /dev/null +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_proxy.rs @@ -0,0 +1,226 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_proxy` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("coretime-rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot-parachain +// benchmark +// pallet +// --chain=coretime-rococo-dev +// --wasm-execution=compiled +// --pallet=pallet_proxy +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/coretime/coretime-rococo/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_proxy`. +pub struct WeightInfo(PhantomData); +impl pallet_proxy::WeightInfo for WeightInfo { + /// Storage: `Proxy::Proxies` (r:1 w:0) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// The range of component `p` is `[1, 31]`. + fn proxy(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `127 + p * (37 ±0)` + // Estimated: `4706` + // Minimum execution time: 16_417_000 picoseconds. + Weight::from_parts(17_283_443, 0) + .saturating_add(Weight::from_parts(0, 4706)) + // Standard Error: 2_409 + .saturating_add(Weight::from_parts(32_123, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `Proxy::Proxies` (r:1 w:0) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `a` is `[0, 31]`. + /// The range of component `p` is `[1, 31]`. + fn proxy_announced(a: u32, p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `454 + a * (68 ±0) + p * (37 ±0)` + // Estimated: `5698` + // Minimum execution time: 37_572_000 picoseconds. + Weight::from_parts(37_045_756, 0) + .saturating_add(Weight::from_parts(0, 5698)) + // Standard Error: 2_896 + .saturating_add(Weight::from_parts(139_561, 0).saturating_mul(a.into())) + // Standard Error: 2_993 + .saturating_add(Weight::from_parts(73_270, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `a` is `[0, 31]`. + /// The range of component `p` is `[1, 31]`. + fn remove_announcement(a: u32, p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `369 + a * (68 ±0)` + // Estimated: `5698` + // Minimum execution time: 24_066_000 picoseconds. + Weight::from_parts(24_711_403, 0) + .saturating_add(Weight::from_parts(0, 5698)) + // Standard Error: 1_626 + .saturating_add(Weight::from_parts(128_391, 0).saturating_mul(a.into())) + // Standard Error: 1_680 + .saturating_add(Weight::from_parts(23_124, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `a` is `[0, 31]`. + /// The range of component `p` is `[1, 31]`. + fn reject_announcement(a: u32, p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `369 + a * (68 ±0)` + // Estimated: `5698` + // Minimum execution time: 24_162_000 picoseconds. + Weight::from_parts(23_928_058, 0) + .saturating_add(Weight::from_parts(0, 5698)) + // Standard Error: 2_072 + .saturating_add(Weight::from_parts(152_299, 0).saturating_mul(a.into())) + // Standard Error: 2_141 + .saturating_add(Weight::from_parts(39_775, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Proxy::Proxies` (r:1 w:0) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `a` is `[0, 31]`. + /// The range of component `p` is `[1, 31]`. + fn announce(a: u32, p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `386 + a * (68 ±0) + p * (37 ±0)` + // Estimated: `5698` + // Minimum execution time: 33_858_000 picoseconds. + Weight::from_parts(33_568_059, 0) + .saturating_add(Weight::from_parts(0, 5698)) + // Standard Error: 1_816 + .saturating_add(Weight::from_parts(134_400, 0).saturating_mul(a.into())) + // Standard Error: 1_876 + .saturating_add(Weight::from_parts(57_028, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// The range of component `p` is `[1, 31]`. + fn add_proxy(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `127 + p * (37 ±0)` + // Estimated: `4706` + // Minimum execution time: 24_947_000 picoseconds. + Weight::from_parts(26_235_199, 0) + .saturating_add(Weight::from_parts(0, 4706)) + // Standard Error: 1_363 + .saturating_add(Weight::from_parts(41_435, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// The range of component `p` is `[1, 31]`. + fn remove_proxy(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `127 + p * (37 ±0)` + // Estimated: `4706` + // Minimum execution time: 25_186_000 picoseconds. + Weight::from_parts(26_823_133, 0) + .saturating_add(Weight::from_parts(0, 4706)) + // Standard Error: 2_259 + .saturating_add(Weight::from_parts(34_224, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// The range of component `p` is `[1, 31]`. + fn remove_proxies(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `127 + p * (37 ±0)` + // Estimated: `4706` + // Minimum execution time: 22_156_000 picoseconds. + Weight::from_parts(23_304_060, 0) + .saturating_add(Weight::from_parts(0, 4706)) + // Standard Error: 1_738 + .saturating_add(Weight::from_parts(39_612, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// The range of component `p` is `[1, 31]`. + fn create_pure(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `139` + // Estimated: `4706` + // Minimum execution time: 26_914_000 picoseconds. + Weight::from_parts(28_009_062, 0) + .saturating_add(Weight::from_parts(0, 4706)) + // Standard Error: 1_978 + .saturating_add(Weight::from_parts(12_255, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// The range of component `p` is `[0, 30]`. + fn kill_pure(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `164 + p * (37 ±0)` + // Estimated: `4706` + // Minimum execution time: 23_281_000 picoseconds. + Weight::from_parts(24_392_989, 0) + .saturating_add(Weight::from_parts(0, 4706)) + // Standard Error: 2_943 + .saturating_add(Weight::from_parts(30_287, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml b/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml index 5029c82f971d..25bf777047d0 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml @@ -34,6 +34,7 @@ pallet-balances = { workspace = true } pallet-message-queue = { workspace = true } pallet-broker = { workspace = true } pallet-multisig = { workspace = true } +pallet-proxy = { workspace = true } pallet-session = { workspace = true } pallet-timestamp = { workspace = true } pallet-transaction-payment = { workspace = true } @@ -108,6 +109,7 @@ std = [ "pallet-collator-selection/std", "pallet-message-queue/std", "pallet-multisig/std", + "pallet-proxy/std", "pallet-session/std", "pallet-timestamp/std", "pallet-transaction-payment-rpc-runtime-api/std", @@ -157,6 +159,7 @@ runtime-benchmarks = [ "pallet-collator-selection/runtime-benchmarks", "pallet-message-queue/runtime-benchmarks", "pallet-multisig/runtime-benchmarks", + "pallet-proxy/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", "pallet-utility/runtime-benchmarks", "pallet-xcm-benchmarks/runtime-benchmarks", @@ -186,6 +189,7 @@ try-runtime = [ "pallet-collator-selection/try-runtime", "pallet-message-queue/try-runtime", "pallet-multisig/try-runtime", + "pallet-proxy/try-runtime", "pallet-session/try-runtime", "pallet-timestamp/try-runtime", "pallet-transaction-payment/try-runtime", diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs index 218afaab924d..614eae895a74 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs @@ -36,6 +36,7 @@ pub mod xcm_config; extern crate alloc; use alloc::{vec, vec::Vec}; +use codec::{Decode, Encode, MaxEncodedLen}; use cumulus_pallet_parachain_system::RelayNumberMonotonicallyIncreases; use cumulus_primitives_core::{AggregateMessageOrigin, ParaId}; use frame_support::{ @@ -43,7 +44,9 @@ use frame_support::{ dispatch::DispatchClass, genesis_builder_helper::{build_state, get_preset}, parameter_types, - traits::{ConstBool, ConstU32, ConstU64, ConstU8, EitherOfDiverse, TransformOrigin}, + traits::{ + ConstBool, ConstU32, ConstU64, ConstU8, EitherOfDiverse, InstanceFilter, TransformOrigin, + }, weights::{ConstantMultiplier, Weight, WeightToFee as _}, PalletId, }; @@ -65,9 +68,9 @@ use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; pub use sp_runtime::BuildStorage; use sp_runtime::{ create_runtime_str, generic, impl_opaque_keys, - traits::Block as BlockT, + traits::{BlakeTwo256, Block as BlockT}, transaction_validity::{TransactionSource, TransactionValidity}, - ApplyExtrinsicResult, DispatchError, MultiAddress, Perbill, + ApplyExtrinsicResult, DispatchError, MultiAddress, Perbill, RuntimeDebug, }; #[cfg(feature = "std")] use sp_version::NativeVersion; @@ -438,6 +441,138 @@ impl pallet_multisig::Config for Runtime { type WeightInfo = weights::pallet_multisig::WeightInfo; } +/// The type used to represent the kinds of proxying allowed. +#[derive( + Copy, + Clone, + Eq, + PartialEq, + Ord, + PartialOrd, + Encode, + Decode, + RuntimeDebug, + MaxEncodedLen, + scale_info::TypeInfo, +)] +pub enum ProxyType { + /// Fully permissioned proxy. Can execute any call on behalf of _proxied_. + Any, + /// Can execute any call that does not transfer funds or assets. + NonTransfer, + /// Proxy with the ability to reject time-delay proxy announcements. + CancelProxy, + /// Proxy for all Broker pallet calls. + Broker, + /// Proxy for renewing coretime. + CoretimeRenewer, + /// Proxy able to purchase on-demand coretime credits. + OnDemandPurchaser, + /// Collator selection proxy. Can execute calls related to collator selection mechanism. + Collator, +} +impl Default for ProxyType { + fn default() -> Self { + Self::Any + } +} + +impl InstanceFilter for ProxyType { + fn filter(&self, c: &RuntimeCall) -> bool { + match self { + ProxyType::Any => true, + ProxyType::NonTransfer => !matches!( + c, + RuntimeCall::Balances { .. } | + // `purchase`, `renew`, `transfer` and `purchase_credit` are pretty self explanatory. + RuntimeCall::Broker(pallet_broker::Call::purchase { .. }) | + RuntimeCall::Broker(pallet_broker::Call::renew { .. }) | + RuntimeCall::Broker(pallet_broker::Call::transfer { .. }) | + RuntimeCall::Broker(pallet_broker::Call::purchase_credit { .. }) | + // `pool` doesn't transfer, but it defines the account to be paid for contributions + RuntimeCall::Broker(pallet_broker::Call::pool { .. }) | + // `assign` is essentially a transfer of a region NFT + RuntimeCall::Broker(pallet_broker::Call::assign { .. }) + ), + ProxyType::CancelProxy => matches!( + c, + RuntimeCall::Proxy(pallet_proxy::Call::reject_announcement { .. }) | + RuntimeCall::Utility { .. } | + RuntimeCall::Multisig { .. } + ), + ProxyType::Broker => { + matches!( + c, + RuntimeCall::Broker { .. } | + RuntimeCall::Utility { .. } | + RuntimeCall::Multisig { .. } + ) + }, + ProxyType::CoretimeRenewer => { + matches!( + c, + RuntimeCall::Broker(pallet_broker::Call::renew { .. }) | + RuntimeCall::Utility { .. } | + RuntimeCall::Multisig { .. } + ) + }, + ProxyType::OnDemandPurchaser => { + matches!( + c, + RuntimeCall::Broker(pallet_broker::Call::purchase_credit { .. }) | + RuntimeCall::Utility { .. } | + RuntimeCall::Multisig { .. } + ) + }, + ProxyType::Collator => matches!( + c, + RuntimeCall::CollatorSelection { .. } | + RuntimeCall::Utility { .. } | + RuntimeCall::Multisig { .. } + ), + } + } + + fn is_superset(&self, o: &Self) -> bool { + match (self, o) { + (x, y) if x == y => true, + (ProxyType::Any, _) => true, + (_, ProxyType::Any) => false, + (ProxyType::Broker, ProxyType::CoretimeRenewer) => true, + (ProxyType::Broker, ProxyType::OnDemandPurchaser) => true, + (ProxyType::NonTransfer, ProxyType::Collator) => true, + _ => false, + } + } +} + +parameter_types! { + // One storage item; key size 32, value size 8; . + pub const ProxyDepositBase: Balance = deposit(1, 40); + // Additional storage item size of 33 bytes. + pub const ProxyDepositFactor: Balance = deposit(0, 33); + pub const MaxProxies: u16 = 32; + // One storage item; key size 32, value size 16 + pub const AnnouncementDepositBase: Balance = deposit(1, 48); + pub const AnnouncementDepositFactor: Balance = deposit(0, 66); + pub const MaxPending: u16 = 32; +} + +impl pallet_proxy::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type RuntimeCall = RuntimeCall; + type Currency = Balances; + type ProxyType = ProxyType; + type ProxyDepositBase = ProxyDepositBase; + type ProxyDepositFactor = ProxyDepositFactor; + type MaxProxies = MaxProxies; + type WeightInfo = weights::pallet_proxy::WeightInfo; + type MaxPending = MaxPending; + type CallHasher = BlakeTwo256; + type AnnouncementDepositBase = AnnouncementDepositBase; + type AnnouncementDepositFactor = AnnouncementDepositFactor; +} + impl pallet_utility::Config for Runtime { type RuntimeEvent = RuntimeEvent; type RuntimeCall = RuntimeCall; @@ -475,6 +610,7 @@ construct_runtime!( // Handy utilities. Utility: pallet_utility = 40, Multisig: pallet_multisig = 41, + Proxy: pallet_proxy = 42, // The main stage. Broker: pallet_broker = 50, @@ -495,6 +631,7 @@ mod benches { [pallet_xcm, PalletXcmExtrinsicsBenchmark::] [pallet_message_queue, MessageQueue] [pallet_multisig, Multisig] + [pallet_proxy, Proxy] [pallet_utility, Utility] // NOTE: Make sure you point to the individual modules below. [pallet_xcm_benchmarks::fungible, XcmBalances] diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/mod.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/mod.rs index f1050b3ae636..ab3d6704c937 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/mod.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/mod.rs @@ -27,6 +27,7 @@ pub mod pallet_broker; pub mod pallet_collator_selection; pub mod pallet_message_queue; pub mod pallet_multisig; +pub mod pallet_proxy; pub mod pallet_session; pub mod pallet_timestamp; pub mod pallet_utility; diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_proxy.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_proxy.rs new file mode 100644 index 000000000000..d3edc1a8b200 --- /dev/null +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_proxy.rs @@ -0,0 +1,226 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_proxy` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("coretime-westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot-parachain +// benchmark +// pallet +// --chain=coretime-westend-dev +// --wasm-execution=compiled +// --pallet=pallet_proxy +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/coretime/coretime-westend/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_proxy`. +pub struct WeightInfo(PhantomData); +impl pallet_proxy::WeightInfo for WeightInfo { + /// Storage: `Proxy::Proxies` (r:1 w:0) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// The range of component `p` is `[1, 31]`. + fn proxy(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `127 + p * (37 ±0)` + // Estimated: `4706` + // Minimum execution time: 16_417_000 picoseconds. + Weight::from_parts(17_283_443, 0) + .saturating_add(Weight::from_parts(0, 4706)) + // Standard Error: 2_409 + .saturating_add(Weight::from_parts(32_123, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `Proxy::Proxies` (r:1 w:0) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `a` is `[0, 31]`. + /// The range of component `p` is `[1, 31]`. + fn proxy_announced(a: u32, p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `454 + a * (68 ±0) + p * (37 ±0)` + // Estimated: `5698` + // Minimum execution time: 37_572_000 picoseconds. + Weight::from_parts(37_045_756, 0) + .saturating_add(Weight::from_parts(0, 5698)) + // Standard Error: 2_896 + .saturating_add(Weight::from_parts(139_561, 0).saturating_mul(a.into())) + // Standard Error: 2_993 + .saturating_add(Weight::from_parts(73_270, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `a` is `[0, 31]`. + /// The range of component `p` is `[1, 31]`. + fn remove_announcement(a: u32, p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `369 + a * (68 ±0)` + // Estimated: `5698` + // Minimum execution time: 24_066_000 picoseconds. + Weight::from_parts(24_711_403, 0) + .saturating_add(Weight::from_parts(0, 5698)) + // Standard Error: 1_626 + .saturating_add(Weight::from_parts(128_391, 0).saturating_mul(a.into())) + // Standard Error: 1_680 + .saturating_add(Weight::from_parts(23_124, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `a` is `[0, 31]`. + /// The range of component `p` is `[1, 31]`. + fn reject_announcement(a: u32, p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `369 + a * (68 ±0)` + // Estimated: `5698` + // Minimum execution time: 24_162_000 picoseconds. + Weight::from_parts(23_928_058, 0) + .saturating_add(Weight::from_parts(0, 5698)) + // Standard Error: 2_072 + .saturating_add(Weight::from_parts(152_299, 0).saturating_mul(a.into())) + // Standard Error: 2_141 + .saturating_add(Weight::from_parts(39_775, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Proxy::Proxies` (r:1 w:0) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `a` is `[0, 31]`. + /// The range of component `p` is `[1, 31]`. + fn announce(a: u32, p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `386 + a * (68 ±0) + p * (37 ±0)` + // Estimated: `5698` + // Minimum execution time: 33_858_000 picoseconds. + Weight::from_parts(33_568_059, 0) + .saturating_add(Weight::from_parts(0, 5698)) + // Standard Error: 1_816 + .saturating_add(Weight::from_parts(134_400, 0).saturating_mul(a.into())) + // Standard Error: 1_876 + .saturating_add(Weight::from_parts(57_028, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// The range of component `p` is `[1, 31]`. + fn add_proxy(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `127 + p * (37 ±0)` + // Estimated: `4706` + // Minimum execution time: 24_947_000 picoseconds. + Weight::from_parts(26_235_199, 0) + .saturating_add(Weight::from_parts(0, 4706)) + // Standard Error: 1_363 + .saturating_add(Weight::from_parts(41_435, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// The range of component `p` is `[1, 31]`. + fn remove_proxy(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `127 + p * (37 ±0)` + // Estimated: `4706` + // Minimum execution time: 25_186_000 picoseconds. + Weight::from_parts(26_823_133, 0) + .saturating_add(Weight::from_parts(0, 4706)) + // Standard Error: 2_259 + .saturating_add(Weight::from_parts(34_224, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// The range of component `p` is `[1, 31]`. + fn remove_proxies(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `127 + p * (37 ±0)` + // Estimated: `4706` + // Minimum execution time: 22_156_000 picoseconds. + Weight::from_parts(23_304_060, 0) + .saturating_add(Weight::from_parts(0, 4706)) + // Standard Error: 1_738 + .saturating_add(Weight::from_parts(39_612, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// The range of component `p` is `[1, 31]`. + fn create_pure(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `139` + // Estimated: `4706` + // Minimum execution time: 26_914_000 picoseconds. + Weight::from_parts(28_009_062, 0) + .saturating_add(Weight::from_parts(0, 4706)) + // Standard Error: 1_978 + .saturating_add(Weight::from_parts(12_255, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// The range of component `p` is `[0, 30]`. + fn kill_pure(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `164 + p * (37 ±0)` + // Estimated: `4706` + // Minimum execution time: 23_281_000 picoseconds. + Weight::from_parts(24_392_989, 0) + .saturating_add(Weight::from_parts(0, 4706)) + // Standard Error: 2_943 + .saturating_add(Weight::from_parts(30_287, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml b/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml index c676587b1de4..c969bb2985bd 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml @@ -31,6 +31,7 @@ pallet-balances = { workspace = true } pallet-identity = { workspace = true } pallet-message-queue = { workspace = true } pallet-multisig = { workspace = true } +pallet-proxy = { workspace = true } pallet-session = { workspace = true } pallet-timestamp = { workspace = true } pallet-transaction-payment = { workspace = true } @@ -104,6 +105,7 @@ std = [ "pallet-identity/std", "pallet-message-queue/std", "pallet-multisig/std", + "pallet-proxy/std", "pallet-session/std", "pallet-timestamp/std", "pallet-transaction-payment-rpc-runtime-api/std", @@ -153,6 +155,7 @@ runtime-benchmarks = [ "pallet-identity/runtime-benchmarks", "pallet-message-queue/runtime-benchmarks", "pallet-multisig/runtime-benchmarks", + "pallet-proxy/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", "pallet-utility/runtime-benchmarks", "pallet-xcm-benchmarks/runtime-benchmarks", @@ -182,6 +185,7 @@ try-runtime = [ "pallet-identity/try-runtime", "pallet-message-queue/try-runtime", "pallet-multisig/try-runtime", + "pallet-proxy/try-runtime", "pallet-session/try-runtime", "pallet-timestamp/try-runtime", "pallet-transaction-payment/try-runtime", diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs index cb9177d0c23b..9b251a90d678 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs @@ -25,6 +25,7 @@ pub mod xcm_config; extern crate alloc; use alloc::{vec, vec::Vec}; +use codec::{Decode, Encode, MaxEncodedLen}; use cumulus_pallet_parachain_system::RelayNumberMonotonicallyIncreases; use cumulus_primitives_core::{AggregateMessageOrigin, ParaId}; use frame_support::{ @@ -33,7 +34,8 @@ use frame_support::{ genesis_builder_helper::{build_state, get_preset}, parameter_types, traits::{ - ConstBool, ConstU32, ConstU64, ConstU8, EitherOfDiverse, Everything, TransformOrigin, + ConstBool, ConstU32, ConstU64, ConstU8, EitherOfDiverse, Everything, InstanceFilter, + TransformOrigin, }, weights::{ConstantMultiplier, Weight, WeightToFee as _}, PalletId, @@ -57,11 +59,11 @@ use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; pub use sp_runtime::BuildStorage; use sp_runtime::{ create_runtime_str, generic, impl_opaque_keys, - traits::Block as BlockT, + traits::{BlakeTwo256, Block as BlockT}, transaction_validity::{TransactionSource, TransactionValidity}, ApplyExtrinsicResult, }; -pub use sp_runtime::{MultiAddress, Perbill, Permill}; +pub use sp_runtime::{MultiAddress, Perbill, Permill, RuntimeDebug}; #[cfg(feature = "std")] use sp_version::NativeVersion; use sp_version::RuntimeVersion; @@ -401,6 +403,119 @@ impl pallet_multisig::Config for Runtime { type WeightInfo = weights::pallet_multisig::WeightInfo; } +/// The type used to represent the kinds of proxying allowed. +#[derive( + Copy, + Clone, + Eq, + PartialEq, + Ord, + PartialOrd, + Encode, + Decode, + RuntimeDebug, + MaxEncodedLen, + scale_info::TypeInfo, +)] +pub enum ProxyType { + /// Fully permissioned proxy. Can execute any call on behalf of _proxied_. + Any, + /// Can execute any call that does not transfer funds or assets. + NonTransfer, + /// Proxy with the ability to reject time-delay proxy announcements. + CancelProxy, + /// Proxy for all Identity pallet calls. + Identity, + /// Proxy for identity registrars. + IdentityJudgement, + /// Collator selection proxy. Can execute calls related to collator selection mechanism. + Collator, +} +impl Default for ProxyType { + fn default() -> Self { + Self::Any + } +} + +impl InstanceFilter for ProxyType { + fn filter(&self, c: &RuntimeCall) -> bool { + match self { + ProxyType::Any => true, + ProxyType::NonTransfer => !matches!( + c, + RuntimeCall::Balances { .. } | + // `request_judgement` puts up a deposit to transfer to a registrar + RuntimeCall::Identity(pallet_identity::Call::request_judgement { .. }) + ), + ProxyType::CancelProxy => matches!( + c, + RuntimeCall::Proxy(pallet_proxy::Call::reject_announcement { .. }) | + RuntimeCall::Utility { .. } | + RuntimeCall::Multisig { .. } + ), + ProxyType::Identity => { + matches!( + c, + RuntimeCall::Identity { .. } | + RuntimeCall::Utility { .. } | + RuntimeCall::Multisig { .. } + ) + }, + ProxyType::IdentityJudgement => matches!( + c, + RuntimeCall::Identity(pallet_identity::Call::provide_judgement { .. }) | + RuntimeCall::Utility(..) | + RuntimeCall::Multisig { .. } + ), + ProxyType::Collator => matches!( + c, + RuntimeCall::CollatorSelection { .. } | + RuntimeCall::Utility { .. } | + RuntimeCall::Multisig { .. } + ), + } + } + + fn is_superset(&self, o: &Self) -> bool { + match (self, o) { + (x, y) if x == y => true, + (ProxyType::Any, _) => true, + (_, ProxyType::Any) => false, + (ProxyType::Identity, ProxyType::IdentityJudgement) => true, + (ProxyType::NonTransfer, ProxyType::IdentityJudgement) => true, + (ProxyType::NonTransfer, ProxyType::Collator) => true, + _ => false, + } + } +} + +parameter_types! { + // One storage item; key size 32, value size 8. + pub const ProxyDepositBase: Balance = deposit(1, 40); + // Additional storage item size of 33 bytes. + pub const ProxyDepositFactor: Balance = deposit(0, 33); + pub const MaxProxies: u16 = 32; + // One storage item; key size 32, value size 16. + pub const AnnouncementDepositBase: Balance = deposit(1, 48); + pub const AnnouncementDepositFactor: Balance = deposit(0, 66); + pub const MaxPending: u16 = 32; +} + +impl pallet_proxy::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type RuntimeCall = RuntimeCall; + type Currency = Balances; + type ProxyType = ProxyType; + type ProxyDepositBase = ProxyDepositBase; + type ProxyDepositFactor = ProxyDepositFactor; + type MaxProxies = MaxProxies; + type WeightInfo = weights::pallet_proxy::WeightInfo; + type MaxPending = MaxPending; + type CallHasher = BlakeTwo256; + type AnnouncementDepositBase = AnnouncementDepositBase; + type AnnouncementDepositFactor = AnnouncementDepositFactor; +} + impl pallet_utility::Config for Runtime { type RuntimeEvent = RuntimeEvent; type RuntimeCall = RuntimeCall; @@ -446,6 +561,7 @@ construct_runtime!( // Handy utilities. Utility: pallet_utility = 40, Multisig: pallet_multisig = 41, + Proxy: pallet_proxy = 42, // The main stage. Identity: pallet_identity = 50, @@ -464,6 +580,7 @@ mod benches { [pallet_identity, Identity] [pallet_message_queue, MessageQueue] [pallet_multisig, Multisig] + [pallet_proxy, Proxy] [pallet_session, SessionBench::] [pallet_utility, Utility] [pallet_timestamp, Timestamp] diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/weights/mod.rs b/cumulus/parachains/runtimes/people/people-rococo/src/weights/mod.rs index 3396a8caea05..dce959e817be 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/weights/mod.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/weights/mod.rs @@ -25,6 +25,7 @@ pub mod pallet_collator_selection; pub mod pallet_identity; pub mod pallet_message_queue; pub mod pallet_multisig; +pub mod pallet_proxy; pub mod pallet_session; pub mod pallet_timestamp; pub mod pallet_utility; diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/weights/pallet_proxy.rs b/cumulus/parachains/runtimes/people/people-rococo/src/weights/pallet_proxy.rs new file mode 100644 index 000000000000..264213c94d44 --- /dev/null +++ b/cumulus/parachains/runtimes/people/people-rococo/src/weights/pallet_proxy.rs @@ -0,0 +1,226 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_proxy` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("people-rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot-parachain +// benchmark +// pallet +// --chain=people-rococo-dev +// --wasm-execution=compiled +// --pallet=pallet_proxy +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/people/people-rococo/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_proxy`. +pub struct WeightInfo(PhantomData); +impl pallet_proxy::WeightInfo for WeightInfo { + /// Storage: `Proxy::Proxies` (r:1 w:0) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// The range of component `p` is `[1, 31]`. + fn proxy(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `127 + p * (37 ±0)` + // Estimated: `4706` + // Minimum execution time: 16_417_000 picoseconds. + Weight::from_parts(17_283_443, 0) + .saturating_add(Weight::from_parts(0, 4706)) + // Standard Error: 2_409 + .saturating_add(Weight::from_parts(32_123, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `Proxy::Proxies` (r:1 w:0) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `a` is `[0, 31]`. + /// The range of component `p` is `[1, 31]`. + fn proxy_announced(a: u32, p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `454 + a * (68 ±0) + p * (37 ±0)` + // Estimated: `5698` + // Minimum execution time: 37_572_000 picoseconds. + Weight::from_parts(37_045_756, 0) + .saturating_add(Weight::from_parts(0, 5698)) + // Standard Error: 2_896 + .saturating_add(Weight::from_parts(139_561, 0).saturating_mul(a.into())) + // Standard Error: 2_993 + .saturating_add(Weight::from_parts(73_270, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `a` is `[0, 31]`. + /// The range of component `p` is `[1, 31]`. + fn remove_announcement(a: u32, p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `369 + a * (68 ±0)` + // Estimated: `5698` + // Minimum execution time: 24_066_000 picoseconds. + Weight::from_parts(24_711_403, 0) + .saturating_add(Weight::from_parts(0, 5698)) + // Standard Error: 1_626 + .saturating_add(Weight::from_parts(128_391, 0).saturating_mul(a.into())) + // Standard Error: 1_680 + .saturating_add(Weight::from_parts(23_124, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `a` is `[0, 31]`. + /// The range of component `p` is `[1, 31]`. + fn reject_announcement(a: u32, p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `369 + a * (68 ±0)` + // Estimated: `5698` + // Minimum execution time: 24_162_000 picoseconds. + Weight::from_parts(23_928_058, 0) + .saturating_add(Weight::from_parts(0, 5698)) + // Standard Error: 2_072 + .saturating_add(Weight::from_parts(152_299, 0).saturating_mul(a.into())) + // Standard Error: 2_141 + .saturating_add(Weight::from_parts(39_775, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Proxy::Proxies` (r:1 w:0) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `a` is `[0, 31]`. + /// The range of component `p` is `[1, 31]`. + fn announce(a: u32, p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `386 + a * (68 ±0) + p * (37 ±0)` + // Estimated: `5698` + // Minimum execution time: 33_858_000 picoseconds. + Weight::from_parts(33_568_059, 0) + .saturating_add(Weight::from_parts(0, 5698)) + // Standard Error: 1_816 + .saturating_add(Weight::from_parts(134_400, 0).saturating_mul(a.into())) + // Standard Error: 1_876 + .saturating_add(Weight::from_parts(57_028, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// The range of component `p` is `[1, 31]`. + fn add_proxy(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `127 + p * (37 ±0)` + // Estimated: `4706` + // Minimum execution time: 24_947_000 picoseconds. + Weight::from_parts(26_235_199, 0) + .saturating_add(Weight::from_parts(0, 4706)) + // Standard Error: 1_363 + .saturating_add(Weight::from_parts(41_435, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// The range of component `p` is `[1, 31]`. + fn remove_proxy(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `127 + p * (37 ±0)` + // Estimated: `4706` + // Minimum execution time: 25_186_000 picoseconds. + Weight::from_parts(26_823_133, 0) + .saturating_add(Weight::from_parts(0, 4706)) + // Standard Error: 2_259 + .saturating_add(Weight::from_parts(34_224, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// The range of component `p` is `[1, 31]`. + fn remove_proxies(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `127 + p * (37 ±0)` + // Estimated: `4706` + // Minimum execution time: 22_156_000 picoseconds. + Weight::from_parts(23_304_060, 0) + .saturating_add(Weight::from_parts(0, 4706)) + // Standard Error: 1_738 + .saturating_add(Weight::from_parts(39_612, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// The range of component `p` is `[1, 31]`. + fn create_pure(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `139` + // Estimated: `4706` + // Minimum execution time: 26_914_000 picoseconds. + Weight::from_parts(28_009_062, 0) + .saturating_add(Weight::from_parts(0, 4706)) + // Standard Error: 1_978 + .saturating_add(Weight::from_parts(12_255, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// The range of component `p` is `[0, 30]`. + fn kill_pure(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `164 + p * (37 ±0)` + // Estimated: `4706` + // Minimum execution time: 23_281_000 picoseconds. + Weight::from_parts(24_392_989, 0) + .saturating_add(Weight::from_parts(0, 4706)) + // Standard Error: 2_943 + .saturating_add(Weight::from_parts(30_287, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/cumulus/parachains/runtimes/people/people-westend/Cargo.toml b/cumulus/parachains/runtimes/people/people-westend/Cargo.toml index ab7dd04bb78a..64e956d8b6b5 100644 --- a/cumulus/parachains/runtimes/people/people-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/people/people-westend/Cargo.toml @@ -31,6 +31,7 @@ pallet-balances = { workspace = true } pallet-identity = { workspace = true } pallet-message-queue = { workspace = true } pallet-multisig = { workspace = true } +pallet-proxy = { workspace = true } pallet-session = { workspace = true } pallet-timestamp = { workspace = true } pallet-transaction-payment = { workspace = true } @@ -104,6 +105,7 @@ std = [ "pallet-identity/std", "pallet-message-queue/std", "pallet-multisig/std", + "pallet-proxy/std", "pallet-session/std", "pallet-timestamp/std", "pallet-transaction-payment-rpc-runtime-api/std", @@ -153,6 +155,7 @@ runtime-benchmarks = [ "pallet-identity/runtime-benchmarks", "pallet-message-queue/runtime-benchmarks", "pallet-multisig/runtime-benchmarks", + "pallet-proxy/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", "pallet-utility/runtime-benchmarks", "pallet-xcm-benchmarks/runtime-benchmarks", @@ -182,6 +185,7 @@ try-runtime = [ "pallet-identity/try-runtime", "pallet-message-queue/try-runtime", "pallet-multisig/try-runtime", + "pallet-proxy/try-runtime", "pallet-session/try-runtime", "pallet-timestamp/try-runtime", "pallet-transaction-payment/try-runtime", diff --git a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs index 9813c5cb6acc..07bfba92c933 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs @@ -25,6 +25,7 @@ pub mod xcm_config; extern crate alloc; use alloc::{vec, vec::Vec}; +use codec::{Decode, Encode, MaxEncodedLen}; use cumulus_pallet_parachain_system::RelayNumberMonotonicallyIncreases; use cumulus_primitives_core::{AggregateMessageOrigin, ParaId}; use frame_support::{ @@ -33,7 +34,8 @@ use frame_support::{ genesis_builder_helper::{build_state, get_preset}, parameter_types, traits::{ - ConstBool, ConstU32, ConstU64, ConstU8, EitherOfDiverse, Everything, TransformOrigin, + ConstBool, ConstU32, ConstU64, ConstU8, EitherOfDiverse, Everything, InstanceFilter, + TransformOrigin, }, weights::{ConstantMultiplier, Weight, WeightToFee as _}, PalletId, @@ -57,11 +59,11 @@ use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; pub use sp_runtime::BuildStorage; use sp_runtime::{ create_runtime_str, generic, impl_opaque_keys, - traits::Block as BlockT, + traits::{BlakeTwo256, Block as BlockT}, transaction_validity::{TransactionSource, TransactionValidity}, ApplyExtrinsicResult, }; -pub use sp_runtime::{MultiAddress, Perbill, Permill}; +pub use sp_runtime::{MultiAddress, Perbill, Permill, RuntimeDebug}; #[cfg(feature = "std")] use sp_version::NativeVersion; use sp_version::RuntimeVersion; @@ -401,6 +403,119 @@ impl pallet_multisig::Config for Runtime { type WeightInfo = weights::pallet_multisig::WeightInfo; } +/// The type used to represent the kinds of proxying allowed. +#[derive( + Copy, + Clone, + Eq, + PartialEq, + Ord, + PartialOrd, + Encode, + Decode, + RuntimeDebug, + MaxEncodedLen, + scale_info::TypeInfo, +)] +pub enum ProxyType { + /// Fully permissioned proxy. Can execute any call on behalf of _proxied_. + Any, + /// Can execute any call that does not transfer funds or assets. + NonTransfer, + /// Proxy with the ability to reject time-delay proxy announcements. + CancelProxy, + /// Proxy for all Identity pallet calls. + Identity, + /// Proxy for identity registrars. + IdentityJudgement, + /// Collator selection proxy. Can execute calls related to collator selection mechanism. + Collator, +} +impl Default for ProxyType { + fn default() -> Self { + Self::Any + } +} + +impl InstanceFilter for ProxyType { + fn filter(&self, c: &RuntimeCall) -> bool { + match self { + ProxyType::Any => true, + ProxyType::NonTransfer => !matches!( + c, + RuntimeCall::Balances { .. } | + // `request_judgement` puts up a deposit to transfer to a registrar + RuntimeCall::Identity(pallet_identity::Call::request_judgement { .. }) + ), + ProxyType::CancelProxy => matches!( + c, + RuntimeCall::Proxy(pallet_proxy::Call::reject_announcement { .. }) | + RuntimeCall::Utility { .. } | + RuntimeCall::Multisig { .. } + ), + ProxyType::Identity => { + matches!( + c, + RuntimeCall::Identity { .. } | + RuntimeCall::Utility { .. } | + RuntimeCall::Multisig { .. } + ) + }, + ProxyType::IdentityJudgement => matches!( + c, + RuntimeCall::Identity(pallet_identity::Call::provide_judgement { .. }) | + RuntimeCall::Utility(..) | + RuntimeCall::Multisig { .. } + ), + ProxyType::Collator => matches!( + c, + RuntimeCall::CollatorSelection { .. } | + RuntimeCall::Utility { .. } | + RuntimeCall::Multisig { .. } + ), + } + } + + fn is_superset(&self, o: &Self) -> bool { + match (self, o) { + (x, y) if x == y => true, + (ProxyType::Any, _) => true, + (_, ProxyType::Any) => false, + (ProxyType::Identity, ProxyType::IdentityJudgement) => true, + (ProxyType::NonTransfer, ProxyType::IdentityJudgement) => true, + (ProxyType::NonTransfer, ProxyType::Collator) => true, + _ => false, + } + } +} + +parameter_types! { + // One storage item; key size 32, value size 8. + pub const ProxyDepositBase: Balance = deposit(1, 40); + // Additional storage item size of 33 bytes. + pub const ProxyDepositFactor: Balance = deposit(0, 33); + pub const MaxProxies: u16 = 32; + // One storage item; key size 32, value size 16. + pub const AnnouncementDepositBase: Balance = deposit(1, 48); + pub const AnnouncementDepositFactor: Balance = deposit(0, 66); + pub const MaxPending: u16 = 32; +} + +impl pallet_proxy::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type RuntimeCall = RuntimeCall; + type Currency = Balances; + type ProxyType = ProxyType; + type ProxyDepositBase = ProxyDepositBase; + type ProxyDepositFactor = ProxyDepositFactor; + type MaxProxies = MaxProxies; + type WeightInfo = weights::pallet_proxy::WeightInfo; + type MaxPending = MaxPending; + type CallHasher = BlakeTwo256; + type AnnouncementDepositBase = AnnouncementDepositBase; + type AnnouncementDepositFactor = AnnouncementDepositFactor; +} + impl pallet_utility::Config for Runtime { type RuntimeEvent = RuntimeEvent; type RuntimeCall = RuntimeCall; @@ -446,6 +561,7 @@ construct_runtime!( // Handy utilities. Utility: pallet_utility = 40, Multisig: pallet_multisig = 41, + Proxy: pallet_proxy = 42, // The main stage. Identity: pallet_identity = 50, @@ -464,6 +580,7 @@ mod benches { [pallet_identity, Identity] [pallet_message_queue, MessageQueue] [pallet_multisig, Multisig] + [pallet_proxy, Proxy] [pallet_session, SessionBench::] [pallet_utility, Utility] [pallet_timestamp, Timestamp] diff --git a/cumulus/parachains/runtimes/people/people-westend/src/weights/mod.rs b/cumulus/parachains/runtimes/people/people-westend/src/weights/mod.rs index 3396a8caea05..dce959e817be 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/weights/mod.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/weights/mod.rs @@ -25,6 +25,7 @@ pub mod pallet_collator_selection; pub mod pallet_identity; pub mod pallet_message_queue; pub mod pallet_multisig; +pub mod pallet_proxy; pub mod pallet_session; pub mod pallet_timestamp; pub mod pallet_utility; diff --git a/cumulus/parachains/runtimes/people/people-westend/src/weights/pallet_proxy.rs b/cumulus/parachains/runtimes/people/people-westend/src/weights/pallet_proxy.rs new file mode 100644 index 000000000000..e962123f216c --- /dev/null +++ b/cumulus/parachains/runtimes/people/people-westend/src/weights/pallet_proxy.rs @@ -0,0 +1,226 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_proxy` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("people-westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot-parachain +// benchmark +// pallet +// --chain=people-westend-dev +// --wasm-execution=compiled +// --pallet=pallet_proxy +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/people/people-westend/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_proxy`. +pub struct WeightInfo(PhantomData); +impl pallet_proxy::WeightInfo for WeightInfo { + /// Storage: `Proxy::Proxies` (r:1 w:0) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// The range of component `p` is `[1, 31]`. + fn proxy(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `127 + p * (37 ±0)` + // Estimated: `4706` + // Minimum execution time: 16_417_000 picoseconds. + Weight::from_parts(17_283_443, 0) + .saturating_add(Weight::from_parts(0, 4706)) + // Standard Error: 2_409 + .saturating_add(Weight::from_parts(32_123, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `Proxy::Proxies` (r:1 w:0) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `a` is `[0, 31]`. + /// The range of component `p` is `[1, 31]`. + fn proxy_announced(a: u32, p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `454 + a * (68 ±0) + p * (37 ±0)` + // Estimated: `5698` + // Minimum execution time: 37_572_000 picoseconds. + Weight::from_parts(37_045_756, 0) + .saturating_add(Weight::from_parts(0, 5698)) + // Standard Error: 2_896 + .saturating_add(Weight::from_parts(139_561, 0).saturating_mul(a.into())) + // Standard Error: 2_993 + .saturating_add(Weight::from_parts(73_270, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `a` is `[0, 31]`. + /// The range of component `p` is `[1, 31]`. + fn remove_announcement(a: u32, p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `369 + a * (68 ±0)` + // Estimated: `5698` + // Minimum execution time: 24_066_000 picoseconds. + Weight::from_parts(24_711_403, 0) + .saturating_add(Weight::from_parts(0, 5698)) + // Standard Error: 1_626 + .saturating_add(Weight::from_parts(128_391, 0).saturating_mul(a.into())) + // Standard Error: 1_680 + .saturating_add(Weight::from_parts(23_124, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `a` is `[0, 31]`. + /// The range of component `p` is `[1, 31]`. + fn reject_announcement(a: u32, p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `369 + a * (68 ±0)` + // Estimated: `5698` + // Minimum execution time: 24_162_000 picoseconds. + Weight::from_parts(23_928_058, 0) + .saturating_add(Weight::from_parts(0, 5698)) + // Standard Error: 2_072 + .saturating_add(Weight::from_parts(152_299, 0).saturating_mul(a.into())) + // Standard Error: 2_141 + .saturating_add(Weight::from_parts(39_775, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Proxy::Proxies` (r:1 w:0) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `a` is `[0, 31]`. + /// The range of component `p` is `[1, 31]`. + fn announce(a: u32, p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `386 + a * (68 ±0) + p * (37 ±0)` + // Estimated: `5698` + // Minimum execution time: 33_858_000 picoseconds. + Weight::from_parts(33_568_059, 0) + .saturating_add(Weight::from_parts(0, 5698)) + // Standard Error: 1_816 + .saturating_add(Weight::from_parts(134_400, 0).saturating_mul(a.into())) + // Standard Error: 1_876 + .saturating_add(Weight::from_parts(57_028, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// The range of component `p` is `[1, 31]`. + fn add_proxy(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `127 + p * (37 ±0)` + // Estimated: `4706` + // Minimum execution time: 24_947_000 picoseconds. + Weight::from_parts(26_235_199, 0) + .saturating_add(Weight::from_parts(0, 4706)) + // Standard Error: 1_363 + .saturating_add(Weight::from_parts(41_435, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// The range of component `p` is `[1, 31]`. + fn remove_proxy(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `127 + p * (37 ±0)` + // Estimated: `4706` + // Minimum execution time: 25_186_000 picoseconds. + Weight::from_parts(26_823_133, 0) + .saturating_add(Weight::from_parts(0, 4706)) + // Standard Error: 2_259 + .saturating_add(Weight::from_parts(34_224, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// The range of component `p` is `[1, 31]`. + fn remove_proxies(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `127 + p * (37 ±0)` + // Estimated: `4706` + // Minimum execution time: 22_156_000 picoseconds. + Weight::from_parts(23_304_060, 0) + .saturating_add(Weight::from_parts(0, 4706)) + // Standard Error: 1_738 + .saturating_add(Weight::from_parts(39_612, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// The range of component `p` is `[1, 31]`. + fn create_pure(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `139` + // Estimated: `4706` + // Minimum execution time: 26_914_000 picoseconds. + Weight::from_parts(28_009_062, 0) + .saturating_add(Weight::from_parts(0, 4706)) + // Standard Error: 1_978 + .saturating_add(Weight::from_parts(12_255, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// The range of component `p` is `[0, 30]`. + fn kill_pure(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `164 + p * (37 ±0)` + // Estimated: `4706` + // Minimum execution time: 23_281_000 picoseconds. + Weight::from_parts(24_392_989, 0) + .saturating_add(Weight::from_parts(0, 4706)) + // Standard Error: 2_943 + .saturating_add(Weight::from_parts(30_287, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/prdoc/pr_5509.prdoc b/prdoc/pr_5509.prdoc new file mode 100644 index 000000000000..154146034e6f --- /dev/null +++ b/prdoc/pr_5509.prdoc @@ -0,0 +1,17 @@ +title: Add `pallet_proxy` to People Chain and Coretime Chain testnet runtimes. + +doc: + - audience: Runtime User + description: | + Proxies can now be used on `coretime-rococo`, `coretime-westend`, `people-rococo` and + `people-westend` in the same way as they can be on Kusama and Polkadot chains. + +crates: + - name: coretime-rococo-runtime + bump: major + - name: coretime-westend-runtime + bump: major + - name: people-rococo-runtime + bump: major + - name: people-westend-runtime + bump: major From cd69f209b71c554f06d3061169ad00c5f1c170b2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?D=C3=B3nal=20Murray?= Date: Thu, 12 Sep 2024 17:48:04 +0100 Subject: [PATCH 05/12] Add emulated tests for the Coretime Interface calls to ensure sufficient weights (#2704) Add emulated test cases for the coretime chain. This tests the calls sent across the `CoretimeInterface` and ensures the weights are sufficient. --- Cargo.lock | 14 +- .../coretime/coretime-rococo/Cargo.toml | 2 +- .../coretime/coretime-westend/Cargo.toml | 2 +- .../tests/coretime/coretime-rococo/Cargo.toml | 5 +- .../tests/coretime/coretime-rococo/src/lib.rs | 4 +- .../src/tests/coretime_interface.rs | 235 ++++++++++++++++++ .../coretime/coretime-rococo/src/tests/mod.rs | 1 + .../coretime/coretime-westend/Cargo.toml | 5 +- .../coretime/coretime-westend/src/lib.rs | 4 +- .../src/tests/coretime_interface.rs | 223 +++++++++++++++++ .../coretime-westend/src/tests/mod.rs | 1 + 11 files changed, 484 insertions(+), 12 deletions(-) create mode 100644 cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-rococo/src/tests/coretime_interface.rs create mode 100644 cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-westend/src/tests/coretime_interface.rs diff --git a/Cargo.lock b/Cargo.lock index d30acc0f5b25..e5b12a60da2a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3299,7 +3299,7 @@ dependencies = [ [[package]] name = "coretime-rococo-emulated-chain" -version = "0.0.0" +version = "0.1.0" dependencies = [ "coretime-rococo-runtime", "cumulus-primitives-core", @@ -3312,14 +3312,17 @@ dependencies = [ [[package]] name = "coretime-rococo-integration-tests" -version = "0.1.0" +version = "0.0.0" dependencies = [ + "cumulus-pallet-parachain-system", "emulated-integration-tests-common", "frame-support", "pallet-balances", + "pallet-broker", "pallet-identity", "pallet-message-queue", "polkadot-runtime-common", + "polkadot-runtime-parachains", "rococo-runtime-constants", "rococo-system-emulated-network", "sp-runtime", @@ -3396,7 +3399,7 @@ dependencies = [ [[package]] name = "coretime-westend-emulated-chain" -version = "0.0.0" +version = "0.1.0" dependencies = [ "coretime-westend-runtime", "cumulus-primitives-core", @@ -3409,14 +3412,17 @@ dependencies = [ [[package]] name = "coretime-westend-integration-tests" -version = "0.1.0" +version = "0.0.0" dependencies = [ + "cumulus-pallet-parachain-system", "emulated-integration-tests-common", "frame-support", "pallet-balances", + "pallet-broker", "pallet-identity", "pallet-message-queue", "polkadot-runtime-common", + "polkadot-runtime-parachains", "sp-runtime", "staging-xcm", "staging-xcm-executor", diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/coretime/coretime-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/coretime/coretime-rococo/Cargo.toml index 6af3f270a905..94d43c5eee2f 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/coretime/coretime-rococo/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/coretime/coretime-rococo/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "coretime-rococo-emulated-chain" -version = "0.0.0" +version = "0.1.0" authors.workspace = true edition.workspace = true license = "Apache-2.0" diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/coretime/coretime-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/coretime/coretime-westend/Cargo.toml index 895a984eccb2..2640c27d016b 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/coretime/coretime-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/coretime/coretime-westend/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "coretime-westend-emulated-chain" -version = "0.0.0" +version = "0.1.0" authors.workspace = true edition.workspace = true license = "Apache-2.0" diff --git a/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-rococo/Cargo.toml index 259be790c3e5..28d9da0993ff 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-rococo/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-rococo/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "coretime-rococo-integration-tests" -version = "0.1.0" +version = "0.0.0" authors.workspace = true edition.workspace = true license = "Apache-2.0" @@ -12,16 +12,19 @@ publish = false # Substrate frame-support = { workspace = true } pallet-balances = { workspace = true } +pallet-broker = { workspace = true, default-features = true } pallet-message-queue = { workspace = true } pallet-identity = { workspace = true } sp-runtime = { workspace = true } # Polkadot polkadot-runtime-common = { workspace = true, default-features = true } +polkadot-runtime-parachains = { workspace = true, default-features = true } rococo-runtime-constants = { workspace = true, default-features = true } xcm = { workspace = true } xcm-executor = { workspace = true } # Cumulus +cumulus-pallet-parachain-system = { workspace = true, default-features = true } emulated-integration-tests-common = { workspace = true } rococo-system-emulated-network = { workspace = true } diff --git a/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-rococo/src/lib.rs index ad3c4fd58da9..055bd50d8298 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-rococo/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-rococo/src/lib.rs @@ -24,7 +24,7 @@ mod imports { // Cumulus pub use emulated_integration_tests_common::xcm_emulator::{ - assert_expected_events, bx, TestExt, + assert_expected_events, bx, Chain, Parachain, TestExt, }; pub use rococo_system_emulated_network::{ coretime_rococo_emulated_chain::{ @@ -32,7 +32,7 @@ mod imports { CoretimeRococoParaPallet as CoretimeRococoPallet, }, CoretimeRococoPara as CoretimeRococo, CoretimeRococoParaReceiver as CoretimeRococoReceiver, - CoretimeRococoParaSender as CoretimeRococoSender, + CoretimeRococoParaSender as CoretimeRococoSender, RococoRelay as Rococo, }; } diff --git a/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-rococo/src/tests/coretime_interface.rs b/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-rococo/src/tests/coretime_interface.rs new file mode 100644 index 000000000000..584bce8f1df7 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-rococo/src/tests/coretime_interface.rs @@ -0,0 +1,235 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::imports::*; +use frame_support::traits::OnInitialize; +use pallet_broker::{ConfigRecord, Configuration, CoreAssignment, CoreMask, ScheduleItem}; +use rococo_runtime_constants::system_parachain::coretime::TIMESLICE_PERIOD; +use sp_runtime::Perbill; + +#[test] +fn transact_hardcoded_weights_are_sane() { + // There are three transacts with hardcoded weights sent from the Coretime Chain to the Relay + // Chain across the CoretimeInterface which are triggered at various points in the sales cycle. + // - Request core count - triggered directly by `start_sales` or `request_core_count` + // extrinsics. + // - Request revenue info - triggered when each timeslice is committed. + // - Assign core - triggered when an entry is encountered in the workplan for the next + // timeslice. + + // RuntimeEvent aliases to avoid warning from usage of qualified paths in assertions due to + // + type CoretimeEvent = ::RuntimeEvent; + type RelayEvent = ::RuntimeEvent; + + // Reserve a workload, configure broker and start sales. + CoretimeRococo::execute_with(|| { + // Hooks don't run in emulated tests - workaround as we need `on_initialize` to tick things + // along and have no concept of time passing otherwise. + ::Broker::on_initialize( + ::System::block_number(), + ); + + let coretime_root_origin = ::RuntimeOrigin::root(); + + // Create and populate schedule with the worst case assignment on this core. + let mut schedule = Vec::new(); + for i in 0..27 { + schedule.push(ScheduleItem { + mask: CoreMask::void().set(i), + assignment: CoreAssignment::Task(2000 + i), + }) + } + + assert_ok!(::Broker::reserve( + coretime_root_origin.clone(), + schedule.try_into().expect("Vector is within bounds."), + )); + + // Configure broker and start sales. + let config = ConfigRecord { + advance_notice: 1, + interlude_length: 1, + leadin_length: 2, + region_length: 1, + ideal_bulk_proportion: Perbill::from_percent(40), + limit_cores_offered: None, + renewal_bump: Perbill::from_percent(2), + contribution_timeout: 1, + }; + assert_ok!(::Broker::configure( + coretime_root_origin.clone(), + config + )); + assert_ok!(::Broker::start_sales( + coretime_root_origin, + 100, + 0 + )); + assert_eq!( + pallet_broker::Status::<::Runtime>::get() + .unwrap() + .core_count, + 1 + ); + + assert_expected_events!( + CoretimeRococo, + vec![ + CoretimeEvent::Broker( + pallet_broker::Event::ReservationMade { .. } + ) => {}, + CoretimeEvent::Broker( + pallet_broker::Event::CoreCountRequested { core_count: 1 } + ) => {}, + CoretimeEvent::ParachainSystem( + cumulus_pallet_parachain_system::Event::UpwardMessageSent { .. } + ) => {}, + ] + ); + }); + + // Check that the request_core_count message was processed successfully. This will fail if the + // weights are misconfigured. + Rococo::execute_with(|| { + Rococo::assert_ump_queue_processed(true, Some(CoretimeRococo::para_id()), None); + + assert_expected_events!( + Rococo, + vec![ + RelayEvent::MessageQueue( + pallet_message_queue::Event::Processed { success: true, .. } + ) => {}, + ] + ); + }); + + // Keep track of the relay chain block number so we can fast forward while still checking the + // right block. + let mut block_number_cursor = Rococo::ext_wrapper(::System::block_number); + + let config = CoretimeRococo::ext_wrapper(|| { + Configuration::<::Runtime>::get() + .expect("Pallet was configured earlier.") + }); + + // Now run up to the block before the sale is rotated. + while block_number_cursor < TIMESLICE_PERIOD - config.advance_notice - 1 { + CoretimeRococo::execute_with(|| { + // Hooks don't run in emulated tests - workaround. + ::Broker::on_initialize( + ::System::block_number(), + ); + }); + + Rococo::ext_wrapper(|| { + block_number_cursor = ::System::block_number(); + }); + } + + // In this block we trigger assign core. + CoretimeRococo::execute_with(|| { + // Hooks don't run in emulated tests - workaround. + ::Broker::on_initialize( + ::System::block_number(), + ); + + assert_expected_events!( + CoretimeRococo, + vec![ + CoretimeEvent::Broker( + pallet_broker::Event::SaleInitialized { .. } + ) => {}, + CoretimeEvent::Broker( + pallet_broker::Event::CoreAssigned { .. } + ) => {}, + CoretimeEvent::ParachainSystem( + cumulus_pallet_parachain_system::Event::UpwardMessageSent { .. } + ) => {}, + ] + ); + }); + + // Check that the assign_core message was processed successfully. + // This will fail if the weights are misconfigured. + Rococo::execute_with(|| { + Rococo::assert_ump_queue_processed(true, Some(CoretimeRococo::para_id()), None); + + assert_expected_events!( + Rococo, + vec![ + RelayEvent::MessageQueue( + pallet_message_queue::Event::Processed { success: true, .. } + ) => {}, + RelayEvent::Coretime( + polkadot_runtime_parachains::coretime::Event::CoreAssigned { .. } + ) => {}, + ] + ); + }); + + // In this block we trigger request revenue. + CoretimeRococo::execute_with(|| { + // Hooks don't run in emulated tests - workaround. + ::Broker::on_initialize( + ::System::block_number(), + ); + + assert_expected_events!( + CoretimeRococo, + vec![ + CoretimeEvent::ParachainSystem( + cumulus_pallet_parachain_system::Event::UpwardMessageSent { .. } + ) => {}, + ] + ); + }); + + // Check that the request_revenue_info_at message was processed successfully. + // This will fail if the weights are misconfigured. + Rococo::execute_with(|| { + Rococo::assert_ump_queue_processed(true, Some(CoretimeRococo::para_id()), None); + + assert_expected_events!( + Rococo, + vec![ + RelayEvent::MessageQueue( + pallet_message_queue::Event::Processed { success: true, .. } + ) => {}, + ] + ); + }); + + // Here we receive and process the notify_revenue XCM with zero revenue. + CoretimeRococo::execute_with(|| { + // Hooks don't run in emulated tests - workaround. + ::Broker::on_initialize( + ::System::block_number(), + ); + + assert_expected_events!( + CoretimeRococo, + vec![ + CoretimeEvent::MessageQueue( + pallet_message_queue::Event::Processed { success: true, .. } + ) => {}, + // Zero revenue in first timeslice so history is immediately dropped. + CoretimeEvent::Broker( + pallet_broker::Event::HistoryDropped { when: 0, revenue: 0 } + ) => {}, + ] + ); + }); +} diff --git a/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-rococo/src/tests/mod.rs b/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-rococo/src/tests/mod.rs index 0e78351bce03..bb0387a4b350 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-rococo/src/tests/mod.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-rococo/src/tests/mod.rs @@ -14,3 +14,4 @@ // limitations under the License. mod claim_assets; +mod coretime_interface; diff --git a/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-westend/Cargo.toml index a8fa905d2e5e..d57e7926b0ec 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-westend/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "coretime-westend-integration-tests" -version = "0.1.0" +version = "0.0.0" authors.workspace = true edition.workspace = true license = "Apache-2.0" @@ -12,16 +12,19 @@ publish = false # Substrate frame-support = { workspace = true } pallet-balances = { workspace = true } +pallet-broker = { workspace = true, default-features = true } pallet-message-queue = { workspace = true } pallet-identity = { workspace = true } sp-runtime = { workspace = true } # Polkadot polkadot-runtime-common = { workspace = true, default-features = true } +polkadot-runtime-parachains = { workspace = true, default-features = true } westend-runtime-constants = { workspace = true, default-features = true } xcm = { workspace = true } xcm-executor = { workspace = true } # Cumulus +cumulus-pallet-parachain-system = { workspace = true, default-features = true } emulated-integration-tests-common = { workspace = true } westend-system-emulated-network = { workspace = true } diff --git a/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-westend/src/lib.rs index 838ca6eeafb6..ac844e0f3284 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-westend/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-westend/src/lib.rs @@ -24,7 +24,7 @@ mod imports { // Cumulus pub use emulated_integration_tests_common::xcm_emulator::{ - assert_expected_events, bx, TestExt, + assert_expected_events, bx, Chain, Parachain, TestExt, }; pub use westend_system_emulated_network::{ coretime_westend_emulated_chain::{ @@ -33,7 +33,7 @@ mod imports { }, CoretimeWestendPara as CoretimeWestend, CoretimeWestendParaReceiver as CoretimeWestendReceiver, - CoretimeWestendParaSender as CoretimeWestendSender, + CoretimeWestendParaSender as CoretimeWestendSender, WestendRelay as Westend, }; } diff --git a/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-westend/src/tests/coretime_interface.rs b/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-westend/src/tests/coretime_interface.rs new file mode 100644 index 000000000000..f61bc4285a0c --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-westend/src/tests/coretime_interface.rs @@ -0,0 +1,223 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::imports::*; +use frame_support::traits::OnInitialize; +use pallet_broker::{ConfigRecord, Configuration, CoreAssignment, CoreMask, ScheduleItem}; +use sp_runtime::Perbill; +use westend_runtime_constants::system_parachain::coretime::TIMESLICE_PERIOD; + +#[test] +fn transact_hardcoded_weights_are_sane() { + // There are three transacts with hardcoded weights sent from the Coretime Chain to the Relay + // Chain across the CoretimeInterface which are triggered at various points in the sales cycle. + // - Request core count - triggered directly by `start_sales` or `request_core_count` + // extrinsics. + // - Request revenue info - triggered when each timeslice is committed. + // - Assign core - triggered when an entry is encountered in the workplan for the next + // timeslice. + + // RuntimeEvent aliases to avoid warning from usage of qualified paths in assertions due to + // + type CoretimeEvent = ::RuntimeEvent; + type RelayEvent = ::RuntimeEvent; + + // Reserve a workload, configure broker and start sales. + CoretimeWestend::execute_with(|| { + // Hooks don't run in emulated tests - workaround as we need `on_initialize` to tick things + // along and have no concept of time passing otherwise. + ::Broker::on_initialize( + ::System::block_number(), + ); + + let coretime_root_origin = ::RuntimeOrigin::root(); + + // Create and populate schedule with the worst case assignment on this core. + let mut schedule = Vec::new(); + for i in 0..27 { + schedule.push(ScheduleItem { + mask: CoreMask::void().set(i), + assignment: CoreAssignment::Task(2000 + i), + }) + } + + assert_ok!(::Broker::reserve( + coretime_root_origin.clone(), + schedule.try_into().expect("Vector is within bounds."), + )); + + // Configure broker and start sales. + let config = ConfigRecord { + advance_notice: 1, + interlude_length: 1, + leadin_length: 2, + region_length: 1, + ideal_bulk_proportion: Perbill::from_percent(40), + limit_cores_offered: None, + renewal_bump: Perbill::from_percent(2), + contribution_timeout: 1, + }; + assert_ok!(::Broker::configure( + coretime_root_origin.clone(), + config + )); + assert_ok!(::Broker::start_sales( + coretime_root_origin, + 100, + 0 + )); + assert_eq!( + pallet_broker::Status::<::Runtime>::get() + .unwrap() + .core_count, + 1 + ); + + assert_expected_events!( + CoretimeWestend, + vec![ + CoretimeEvent::Broker( + pallet_broker::Event::ReservationMade { .. } + ) => {}, + CoretimeEvent::Broker( + pallet_broker::Event::CoreCountRequested { core_count: 1 } + ) => {}, + CoretimeEvent::ParachainSystem( + cumulus_pallet_parachain_system::Event::UpwardMessageSent { .. } + ) => {}, + ] + ); + }); + + // Check that the request_core_count message was processed successfully. This will fail if the + // weights are misconfigured. + Westend::execute_with(|| { + Westend::assert_ump_queue_processed(true, Some(CoretimeWestend::para_id()), None); + + assert_expected_events!( + Westend, + vec![ + RelayEvent::MessageQueue( + pallet_message_queue::Event::Processed { success: true, .. } + ) => {}, + ] + ); + }); + + // Keep track of the relay chain block number so we can fast forward while still checking the + // right block. + let mut block_number_cursor = Westend::ext_wrapper(::System::block_number); + + let config = CoretimeWestend::ext_wrapper(|| { + Configuration::<::Runtime>::get() + .expect("Pallet was configured earlier.") + }); + + // Now run up to the block before the sale is rotated. + while block_number_cursor < TIMESLICE_PERIOD - config.advance_notice - 1 { + CoretimeWestend::execute_with(|| { + // Hooks don't run in emulated tests - workaround. + ::Broker::on_initialize( + ::System::block_number(), + ); + }); + + Westend::ext_wrapper(|| { + block_number_cursor = ::System::block_number(); + }); + } + + // In this block we trigger assign core. + CoretimeWestend::execute_with(|| { + // Hooks don't run in emulated tests - workaround. + ::Broker::on_initialize( + ::System::block_number(), + ); + + assert_expected_events!( + CoretimeWestend, + vec![ + CoretimeEvent::Broker( + pallet_broker::Event::SaleInitialized { .. } + ) => {}, + CoretimeEvent::Broker( + pallet_broker::Event::CoreAssigned { .. } + ) => {}, + CoretimeEvent::ParachainSystem( + cumulus_pallet_parachain_system::Event::UpwardMessageSent { .. } + ) => {}, + ] + ); + }); + + // In this block we trigger request revenue. + CoretimeWestend::execute_with(|| { + // Hooks don't run in emulated tests - workaround. + ::Broker::on_initialize( + ::System::block_number(), + ); + + assert_expected_events!( + CoretimeWestend, + vec![ + CoretimeEvent::ParachainSystem( + cumulus_pallet_parachain_system::Event::UpwardMessageSent { .. } + ) => {}, + ] + ); + }); + + // Check that the assign_core and request_revenue_info_at messages were processed successfully. + // This will fail if the weights are misconfigured. + Westend::execute_with(|| { + Westend::assert_ump_queue_processed(true, Some(CoretimeWestend::para_id()), None); + + assert_expected_events!( + Westend, + vec![ + RelayEvent::MessageQueue( + pallet_message_queue::Event::Processed { success: true, .. } + ) => {}, + RelayEvent::MessageQueue( + pallet_message_queue::Event::Processed { success: true, .. } + ) => {}, + RelayEvent::Coretime( + polkadot_runtime_parachains::coretime::Event::CoreAssigned { .. } + ) => {}, + ] + ); + }); + + // Here we receive and process the notify_revenue XCM with zero revenue. + CoretimeWestend::execute_with(|| { + // Hooks don't run in emulated tests - workaround. + ::Broker::on_initialize( + ::System::block_number(), + ); + + assert_expected_events!( + CoretimeWestend, + vec![ + CoretimeEvent::MessageQueue( + pallet_message_queue::Event::Processed { success: true, .. } + ) => {}, + // Zero revenue in first timeslice so history is immediately dropped. + CoretimeEvent::Broker( + pallet_broker::Event::HistoryDropped { when: 0, revenue: 0 } + ) => {}, + ] + ); + }); +} diff --git a/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-westend/src/tests/mod.rs b/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-westend/src/tests/mod.rs index 0e78351bce03..bb0387a4b350 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-westend/src/tests/mod.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-westend/src/tests/mod.rs @@ -14,3 +14,4 @@ // limitations under the License. mod claim_assets; +mod coretime_interface; From 8d0aab812ebb70e39b0af893862ba204dc098860 Mon Sep 17 00:00:00 2001 From: PG Herveou Date: Thu, 12 Sep 2024 19:26:59 +0200 Subject: [PATCH 06/12] [pallet-revive] fix xcm tests (#5684) fix https://github.com/paritytech/polkadot-sdk/issues/5683 --- Cargo.lock | 1 + prdoc/pr_5684.prdoc | 19 +++++++ substrate/frame/revive/fixtures/Cargo.toml | 2 + substrate/frame/revive/fixtures/src/lib.rs | 1 + .../frame/revive/mock-network/Cargo.toml | 1 + .../frame/revive/mock-network/src/tests.rs | 55 +++++++++---------- substrate/frame/revive/src/wasm/runtime.rs | 7 ++- .../frame/revive/uapi/src/host/riscv32.rs | 9 ++- umbrella/Cargo.toml | 1 + 9 files changed, 64 insertions(+), 32 deletions(-) create mode 100644 prdoc/pr_5684.prdoc diff --git a/Cargo.lock b/Cargo.lock index e5b12a60da2a..5f0fa0f613f9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -11471,6 +11471,7 @@ version = "0.1.0" dependencies = [ "anyhow", "frame-system", + "log", "parity-wasm", "polkavm-linker 0.10.0", "sp-core", diff --git a/prdoc/pr_5684.prdoc b/prdoc/pr_5684.prdoc new file mode 100644 index 000000000000..a17bacd2fb94 --- /dev/null +++ b/prdoc/pr_5684.prdoc @@ -0,0 +1,19 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "[pallet-revive]" + +doc: + - audience: Runtime Devs + description: | + Update xcm runtime api, and fix pallet-revive xcm tests + +crates: + - name: pallet-revive + bump: patch + - name: pallet-revive-fixtures + bump: patch + - name: pallet-revive-mock-network + bump: patch + - name: polkadot-sdk + bump: patch diff --git a/substrate/frame/revive/fixtures/Cargo.toml b/substrate/frame/revive/fixtures/Cargo.toml index db284c7cc062..903298d2df21 100644 --- a/substrate/frame/revive/fixtures/Cargo.toml +++ b/substrate/frame/revive/fixtures/Cargo.toml @@ -16,6 +16,7 @@ sp-core = { workspace = true, default-features = true, optional = true } sp-io = { workspace = true, default-features = true, optional = true } sp-runtime = { workspace = true, default-features = true, optional = true } anyhow = { workspace = true, default-features = true, optional = true } +log = { workspace = true } [build-dependencies] parity-wasm = { workspace = true } @@ -34,6 +35,7 @@ riscv = [] std = [ "anyhow", "frame-system", + "log/std", "sp-core", "sp-io", "sp-runtime", diff --git a/substrate/frame/revive/fixtures/src/lib.rs b/substrate/frame/revive/fixtures/src/lib.rs index 54e32130635a..5548dca66d07 100644 --- a/substrate/frame/revive/fixtures/src/lib.rs +++ b/substrate/frame/revive/fixtures/src/lib.rs @@ -24,6 +24,7 @@ extern crate alloc; pub fn compile_module(fixture_name: &str) -> anyhow::Result<(Vec, sp_core::H256)> { let out_dir: std::path::PathBuf = env!("OUT_DIR").into(); let fixture_path = out_dir.join(format!("{fixture_name}.polkavm")); + log::debug!("Loading fixture from {fixture_path:?}"); let binary = std::fs::read(fixture_path)?; let code_hash = sp_io::hashing::keccak_256(&binary); Ok((binary, sp_core::H256(code_hash))) diff --git a/substrate/frame/revive/mock-network/Cargo.toml b/substrate/frame/revive/mock-network/Cargo.toml index 0d597bbdc228..85656a57b49c 100644 --- a/substrate/frame/revive/mock-network/Cargo.toml +++ b/substrate/frame/revive/mock-network/Cargo.toml @@ -48,6 +48,7 @@ pallet-revive-fixtures = { workspace = true } [features] default = ["std"] +riscv = ["pallet-revive-fixtures/riscv"] std = [ "codec/std", "frame-support/std", diff --git a/substrate/frame/revive/mock-network/src/tests.rs b/substrate/frame/revive/mock-network/src/tests.rs index 9259dd6f169e..bd05726a1a45 100644 --- a/substrate/frame/revive/mock-network/src/tests.rs +++ b/substrate/frame/revive/mock-network/src/tests.rs @@ -16,10 +16,8 @@ // limitations under the License. use crate::{ - parachain::{self, Runtime}, - parachain_account_sovereign_account_id, - primitives::{AccountId, CENTS}, - relay_chain, MockNet, ParaA, ParachainBalances, Relay, ALICE, BOB, INITIAL_BALANCE, + parachain, parachain_account_sovereign_account_id, primitives::CENTS, relay_chain, MockNet, + ParaA, ParachainBalances, Relay, ALICE, BOB, INITIAL_BALANCE, }; use codec::{Decode, Encode}; use frame_support::traits::{fungibles::Mutate, Currency}; @@ -30,6 +28,7 @@ use pallet_revive::{ }; use pallet_revive_fixtures::compile_module; use pallet_revive_uapi::ReturnErrorCode; +use sp_core::H160; use xcm::{v4::prelude::*, VersionedLocation, VersionedXcm}; use xcm_simulator::TestExt; @@ -39,41 +38,43 @@ macro_rules! assert_return_code { }}; } -fn bare_call(dest: sp_runtime::AccountId32) -> BareCallBuilder { +fn bare_call(dest: H160) -> BareCallBuilder { BareCallBuilder::::bare_call(RawOrigin::Signed(ALICE).into(), dest) } /// Instantiate the tests contract, and fund it with some balance and assets. -fn instantiate_test_contract(name: &str) -> AccountId { - let (wasm, _) = compile_module::(name).unwrap(); +fn instantiate_test_contract(name: &str) -> Contract { + let (wasm, _) = compile_module(name).unwrap(); // Instantiate contract. - let contract_addr = ParaA::execute_with(|| { + let contract = ParaA::execute_with(|| { BareInstantiateBuilder::::bare_instantiate( RawOrigin::Signed(ALICE).into(), Code::Upload(wasm), ) - .build_and_unwrap_account_id() + .storage_deposit_limit(1_000_000_000_000) + .build_and_unwrap_contract() }); // Funds contract account with some balance and assets. ParaA::execute_with(|| { - parachain::Balances::make_free_balance_be(&contract_addr, INITIAL_BALANCE); - parachain::Assets::mint_into(0u32.into(), &contract_addr, INITIAL_BALANCE).unwrap(); + parachain::Balances::make_free_balance_be(&contract.account_id, INITIAL_BALANCE); + parachain::Assets::mint_into(0u32.into(), &contract.account_id, INITIAL_BALANCE).unwrap(); }); Relay::execute_with(|| { - let sovereign_account = parachain_account_sovereign_account_id(1u32, contract_addr.clone()); + let sovereign_account = + parachain_account_sovereign_account_id(1u32, contract.account_id.clone()); relay_chain::Balances::make_free_balance_be(&sovereign_account, INITIAL_BALANCE); }); - contract_addr + contract } #[test] fn test_xcm_execute() { MockNet::reset(); - let contract_addr = instantiate_test_contract("xcm_execute"); + let Contract { addr, account_id } = instantiate_test_contract("xcm_execute"); // Execute XCM instructions through the contract. ParaA::execute_with(|| { @@ -87,9 +88,7 @@ fn test_xcm_execute() { .deposit_asset(assets, beneficiary) .build(); - let result = bare_call(contract_addr.clone()) - .data(VersionedXcm::V4(message).encode()) - .build(); + let result = bare_call(addr).data(VersionedXcm::V4(message).encode()).build(); assert_eq!(result.gas_consumed, result.gas_required); assert_return_code!(&result.result.unwrap(), ReturnErrorCode::Success); @@ -98,7 +97,7 @@ fn test_xcm_execute() { // Bob. let initial = INITIAL_BALANCE; assert_eq!(ParachainBalances::free_balance(BOB), initial + amount); - assert_eq!(ParachainBalances::free_balance(&contract_addr), initial - amount); + assert_eq!(ParachainBalances::free_balance(&account_id), initial - amount); }); } @@ -106,7 +105,7 @@ fn test_xcm_execute() { fn test_xcm_execute_incomplete() { MockNet::reset(); - let contract_addr = instantiate_test_contract("xcm_execute"); + let Contract { addr, account_id } = instantiate_test_contract("xcm_execute"); let amount = 10 * CENTS; // Execute XCM instructions through the contract. @@ -124,15 +123,13 @@ fn test_xcm_execute_incomplete() { .deposit_asset(assets, beneficiary) .build(); - let result = bare_call(contract_addr.clone()) - .data(VersionedXcm::V4(message).encode()) - .build(); + let result = bare_call(addr).data(VersionedXcm::V4(message).encode()).build(); assert_eq!(result.gas_consumed, result.gas_required); assert_return_code!(&result.result.unwrap(), ReturnErrorCode::XcmExecutionFailed); assert_eq!(ParachainBalances::free_balance(BOB), INITIAL_BALANCE); - assert_eq!(ParachainBalances::free_balance(&contract_addr), INITIAL_BALANCE - amount); + assert_eq!(ParachainBalances::free_balance(&account_id), INITIAL_BALANCE - amount); }); } @@ -140,11 +137,11 @@ fn test_xcm_execute_incomplete() { fn test_xcm_execute_reentrant_call() { MockNet::reset(); - let contract_addr = instantiate_test_contract("xcm_execute"); + let Contract { addr, .. } = instantiate_test_contract("xcm_execute"); ParaA::execute_with(|| { let transact_call = parachain::RuntimeCall::Contracts(pallet_revive::Call::call { - dest: contract_addr.clone(), + dest: addr, gas_limit: 1_000_000.into(), storage_deposit_limit: test_utils::deposit_limit::(), data: vec![], @@ -157,7 +154,7 @@ fn test_xcm_execute_reentrant_call() { .expect_transact_status(MaybeErrorCode::Success) .build(); - let result = bare_call(contract_addr.clone()) + let result = bare_call(addr) .data(VersionedXcm::V4(message).encode()) .build_and_unwrap_result(); @@ -171,7 +168,7 @@ fn test_xcm_execute_reentrant_call() { #[test] fn test_xcm_send() { MockNet::reset(); - let contract_addr = instantiate_test_contract("xcm_send"); + let Contract { addr, account_id } = instantiate_test_contract("xcm_send"); let amount = 1_000 * CENTS; let fee = parachain::estimate_message_fee(4); // Accounts for the `DescendOrigin` instruction added by `send_xcm` @@ -188,7 +185,7 @@ fn test_xcm_send() { .deposit_asset(assets, beneficiary) .build(); - let result = bare_call(contract_addr.clone()) + let result = bare_call(addr) .data((dest, VersionedXcm::V4(message)).encode()) .build_and_unwrap_result(); @@ -197,7 +194,7 @@ fn test_xcm_send() { }); Relay::execute_with(|| { - let derived_contract_addr = ¶chain_account_sovereign_account_id(1, contract_addr); + let derived_contract_addr = ¶chain_account_sovereign_account_id(1, account_id); assert_eq!( INITIAL_BALANCE - amount, relay_chain::Balances::free_balance(derived_contract_addr) diff --git a/substrate/frame/revive/src/wasm/runtime.rs b/substrate/frame/revive/src/wasm/runtime.rs index d9257d38b66e..02d60ee9e34d 100644 --- a/substrate/frame/revive/src/wasm/runtime.rs +++ b/substrate/frame/revive/src/wasm/runtime.rs @@ -1790,6 +1790,7 @@ pub mod env { &mut self, memory: &mut M, dest_ptr: u32, + dest_len: u32, msg_ptr: u32, msg_len: u32, output_ptr: u32, @@ -1797,10 +1798,12 @@ pub mod env { use xcm::{VersionedLocation, VersionedXcm}; use xcm_builder::{SendController, SendControllerWeightInfo}; - self.charge_gas(RuntimeCosts::CopyFromContract(msg_len))?; - let dest: VersionedLocation = memory.read_as(dest_ptr)?; + self.charge_gas(RuntimeCosts::CopyFromContract(dest_len))?; + let dest: VersionedLocation = memory.read_as_unbounded(dest_ptr, dest_len)?; + self.charge_gas(RuntimeCosts::CopyFromContract(msg_len))?; let message: VersionedXcm<()> = memory.read_as_unbounded(msg_ptr, msg_len)?; + let weight = <::Xcm as SendController<_>>::WeightInfo::send(); self.charge_gas(RuntimeCosts::CallRuntime(weight))?; let origin = crate::RawOrigin::Signed(self.ext.account_id().clone()).into(); diff --git a/substrate/frame/revive/uapi/src/host/riscv32.rs b/substrate/frame/revive/uapi/src/host/riscv32.rs index b7b660c40837..89cf92786445 100644 --- a/substrate/frame/revive/uapi/src/host/riscv32.rs +++ b/substrate/frame/revive/uapi/src/host/riscv32.rs @@ -124,6 +124,7 @@ mod sys { pub fn xcm_execute(msg_ptr: *const u8, msg_len: u32) -> ReturnCode; pub fn xcm_send( dest_ptr: *const u8, + dest_len: *const u8, msg_ptr: *const u8, msg_len: u32, out_ptr: *mut u8, @@ -530,7 +531,13 @@ impl HostFn for HostFnImpl { fn xcm_send(dest: &[u8], msg: &[u8], output: &mut [u8; 32]) -> Result { let ret_code = unsafe { - sys::xcm_send(dest.as_ptr(), msg.as_ptr(), msg.len() as _, output.as_mut_ptr()) + sys::xcm_send( + dest.as_ptr(), + dest.len() as _, + msg.as_ptr(), + msg.len() as _, + output.as_mut_ptr(), + ) }; ret_code.into() } diff --git a/umbrella/Cargo.toml b/umbrella/Cargo.toml index 6d380a4bcbb7..b7c1c375094f 100644 --- a/umbrella/Cargo.toml +++ b/umbrella/Cargo.toml @@ -607,6 +607,7 @@ tuples-96 = [ ] riscv = [ "pallet-revive-fixtures?/riscv", + "pallet-revive-mock-network?/riscv", "pallet-revive?/riscv", ] From 3c694436ab9cfb992ed666ecded40f904f1c8164 Mon Sep 17 00:00:00 2001 From: Vincent Geddes <117534+vgeddes@users.noreply.github.com> Date: Thu, 12 Sep 2024 20:22:47 +0200 Subject: [PATCH 07/12] Remove redundant code --- bridges/snowbridge/pallets/system/src/lib.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/bridges/snowbridge/pallets/system/src/lib.rs b/bridges/snowbridge/pallets/system/src/lib.rs index 8f3235279956..1e8a788b7a5a 100644 --- a/bridges/snowbridge/pallets/system/src/lib.rs +++ b/bridges/snowbridge/pallets/system/src/lib.rs @@ -628,9 +628,7 @@ pub mod pallet { let location: Location = (*location).try_into().map_err(|_| Error::::UnsupportedLocationVersion)?; - let pays_fee = PaysFee::::No; - - Self::do_register_token(&location, metadata, pays_fee)?; + Self::do_register_token(&location, metadata, PaysFee::::No)?; Ok(PostDispatchInfo { actual_weight: Some(T::WeightInfo::register_token()), From 315070f0fe94975f15c8caed2b3192ab410fd7db Mon Sep 17 00:00:00 2001 From: ron Date: Fri, 13 Sep 2024 09:23:05 +0800 Subject: [PATCH 08/12] Remove tests unused --- bridges/snowbridge/primitives/router/src/inbound/tests.rs | 4 ---- 1 file changed, 4 deletions(-) diff --git a/bridges/snowbridge/primitives/router/src/inbound/tests.rs b/bridges/snowbridge/primitives/router/src/inbound/tests.rs index 1c018f3ab0c4..e0672f3928af 100644 --- a/bridges/snowbridge/primitives/router/src/inbound/tests.rs +++ b/bridges/snowbridge/primitives/router/src/inbound/tests.rs @@ -66,9 +66,5 @@ fn test_reanchor_all_assets() { let mut reanchored_asset_with_ethereum_context = reanchored_asset.clone(); assert_ok!(reanchored_asset_with_ethereum_context.reanchor(&global_ah, ðereum_context)); assert_eq!(reanchored_asset_with_ethereum_context, asset.clone()); - // reanchor back to original location in context of BH - let mut reanchored_asset_with_bh_context = reanchored_asset.clone(); - assert_ok!(reanchored_asset_with_bh_context.reanchor(&global_ah, &bh_context)); - assert_eq!(reanchored_asset_with_bh_context, asset.clone()); } } From 37a28c4a6d486c80aac31fad62d68d5e3723f646 Mon Sep 17 00:00:00 2001 From: ron Date: Fri, 13 Sep 2024 09:38:01 +0800 Subject: [PATCH 09/12] Rename multi_assets --- .../bridge-hub-westend/src/tests/snowbridge.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge.rs index f2cd15e71ad7..b668ac89d845 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge.rs @@ -218,7 +218,7 @@ fn send_weth_asset_from_asset_hub_to_ethereum() { )), fun: Fungible(TOKEN_AMOUNT), }]; - let multi_assets = VersionedAssets::V4(Assets::from(assets)); + let versioned_assets = VersionedAssets::V4(Assets::from(assets)); let destination = VersionedLocation::V4(Location::new( 2, @@ -239,7 +239,7 @@ fn send_weth_asset_from_asset_hub_to_ethereum() { RuntimeOrigin::signed(AssetHubWestendReceiver::get()), Box::new(destination), Box::new(beneficiary), - Box::new(multi_assets), + Box::new(versioned_assets), 0, Unlimited, ) @@ -337,7 +337,7 @@ fn transfer_relay_token() { type RuntimeEvent = ::RuntimeEvent; let assets = vec![Asset { id: AssetId(Location::parent()), fun: Fungible(TOKEN_AMOUNT) }]; - let multi_assets = VersionedAssets::V4(Assets::from(assets)); + let versioned_assets = VersionedAssets::V4(Assets::from(assets)); let destination = VersionedLocation::V4(Location::new( 2, @@ -353,7 +353,7 @@ fn transfer_relay_token() { RuntimeOrigin::signed(AssetHubWestendSender::get()), Box::new(destination), Box::new(beneficiary), - Box::new(multi_assets), + Box::new(versioned_assets), 0, Unlimited, )); @@ -500,7 +500,7 @@ fn transfer_ah_token() { // Send partial of the token, will fail if send all let assets = vec![Asset { id: AssetId(asset_id.clone()), fun: Fungible(TOKEN_AMOUNT / 10) }]; - let multi_assets = VersionedAssets::V4(Assets::from(assets)); + let versioned_assets = VersionedAssets::V4(Assets::from(assets)); let beneficiary = VersionedLocation::V4(Location::new( 0, @@ -511,7 +511,7 @@ fn transfer_ah_token() { RuntimeOrigin::signed(AssetHubWestendSender::get()), Box::new(VersionedLocation::from(ethereum_destination)), Box::new(beneficiary), - Box::new(multi_assets), + Box::new(versioned_assets), 0, Unlimited, )); From 22f8dea20120f758eebb0285c6b37615be5456a6 Mon Sep 17 00:00:00 2001 From: ron Date: Fri, 13 Sep 2024 09:42:47 +0800 Subject: [PATCH 10/12] Cleanup --- .../primitives/core/src/location.rs | 23 ++----------------- .../primitives/router/src/inbound/tests.rs | 1 - 2 files changed, 2 insertions(+), 22 deletions(-) diff --git a/bridges/snowbridge/primitives/core/src/location.rs b/bridges/snowbridge/primitives/core/src/location.rs index fe8db0e35283..aad1c9ece05c 100644 --- a/bridges/snowbridge/primitives/core/src/location.rs +++ b/bridges/snowbridge/primitives/core/src/location.rs @@ -97,10 +97,9 @@ impl DescribeLocation for DescribeTokenTerminal { #[cfg(test)] mod tests { use crate::TokenIdOf; - use frame_support::assert_ok; use xcm::prelude::{ - GeneralIndex, GeneralKey, GlobalConsensus, InteriorLocation, Junction::*, Location, - NetworkId::*, PalletInstance, Parachain, Reanchorable, + GeneralIndex, GeneralKey, GlobalConsensus, Junction::*, Location, NetworkId::*, + PalletInstance, Parachain, }; use xcm_executor::traits::ConvertLocation; @@ -203,22 +202,4 @@ mod tests { ); } } - - #[test] - fn test_reanchor_relay_token_from_different_consensus() { - let asset_id: Location = Location::new(2, [GlobalConsensus(Rococo)]); - let ah_context: InteriorLocation = [GlobalConsensus(Westend), Parachain(1000)].into(); - let ethereum = Location::new(2, [GlobalConsensus(Ethereum { chain_id: 1 })]); - let mut reanchored_asset = asset_id.clone(); - assert_ok!(reanchored_asset.reanchor(ðereum, &ah_context)); - assert_eq!( - reanchored_asset, - Location { parents: 1, interior: [GlobalConsensus(Rococo)].into() } - ); - let bh_context: InteriorLocation = [GlobalConsensus(Westend), Parachain(1002)].into(); - let ah = Location::new(1, [GlobalConsensus(Westend), Parachain(1000)]); - let mut reanchored_asset = reanchored_asset.clone(); - assert_ok!(reanchored_asset.reanchor(&ah, &bh_context)); - assert_eq!(reanchored_asset, asset_id); - } } diff --git a/bridges/snowbridge/primitives/router/src/inbound/tests.rs b/bridges/snowbridge/primitives/router/src/inbound/tests.rs index e0672f3928af..e0e90e516be1 100644 --- a/bridges/snowbridge/primitives/router/src/inbound/tests.rs +++ b/bridges/snowbridge/primitives/router/src/inbound/tests.rs @@ -45,7 +45,6 @@ fn test_reanchor_all_assets() { let ethereum = Location::new(2, ethereum_context.clone()); let ah_context: InteriorLocation = [GlobalConsensus(Polkadot), Parachain(1000)].into(); let global_ah = Location::new(1, ah_context.clone()); - let bh_context: InteriorLocation = [GlobalConsensus(Polkadot), Parachain(1002)].into(); let assets = vec![ // DOT Location::new(1, []), From 1a67710130501cc245da1e6790f0f755e1d9a2a6 Mon Sep 17 00:00:00 2001 From: ron Date: Fri, 13 Sep 2024 09:55:37 +0800 Subject: [PATCH 11/12] More checks --- .../tests/bridges/bridge-hub-westend/src/tests/snowbridge.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge.rs index b668ac89d845..4e9dd5a77dd7 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge.rs @@ -363,8 +363,8 @@ fn transfer_relay_token() { assert!( events.iter().any(|event| matches!( event, - RuntimeEvent::Balances(pallet_balances::Event::Transfer { amount, ..}) - if *amount == TOKEN_AMOUNT, + RuntimeEvent::Balances(pallet_balances::Event::Transfer { amount, to, ..}) + if *amount == TOKEN_AMOUNT && *to == ethereum_sovereign.clone(), )), "native token reserved to Ethereum sovereign account." ); From 211b450f06fc8b29f44a50147f40e6544ccbeffa Mon Sep 17 00:00:00 2001 From: ron Date: Fri, 13 Sep 2024 10:04:32 +0800 Subject: [PATCH 12/12] Rename GlobalAssetHub to AssetHubFromEthereum --- bridges/snowbridge/pallets/inbound-queue/src/mock.rs | 4 ++-- .../bridge-hub-rococo/src/bridge_to_ethereum_config.rs | 4 ++-- .../bridge-hub-westend/src/bridge_to_ethereum_config.rs | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/bridges/snowbridge/pallets/inbound-queue/src/mock.rs b/bridges/snowbridge/pallets/inbound-queue/src/mock.rs index f5e44c2e46a8..3e67d5ab738b 100644 --- a/bridges/snowbridge/pallets/inbound-queue/src/mock.rs +++ b/bridges/snowbridge/pallets/inbound-queue/src/mock.rs @@ -114,7 +114,7 @@ parameter_types! { pub const InboundQueuePalletInstance: u8 = 80; pub UniversalLocation: InteriorLocation = [GlobalConsensus(Westend), Parachain(1002)].into(); - pub GlobalAssetHub: Location = Location::new(1,[GlobalConsensus(Westend),Parachain(1000)]); + pub AssetHubFromEthereum: Location = Location::new(1,[GlobalConsensus(Westend),Parachain(1000)]); } #[cfg(feature = "runtime-benchmarks")] @@ -233,7 +233,7 @@ impl inbound_queue::Config for Test { Balance, MockTokenIdConvert, UniversalLocation, - GlobalAssetHub, + AssetHubFromEthereum, >; type PricingParameters = Parameters; type ChannelLookup = MockChannelLookup; diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_ethereum_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_ethereum_config.rs index d9a3869bd6e1..be7005b5379a 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_ethereum_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_ethereum_config.rs @@ -66,7 +66,7 @@ parameter_types! { rewards: Rewards { local: 1 * UNITS, remote: meth(1) }, multiplier: FixedU128::from_rational(1, 1), }; - pub GlobalAssetHub: Location = Location::new(1,[GlobalConsensus(RelayNetwork::get()),Parachain(rococo_runtime_constants::system_parachain::ASSET_HUB_ID)]); + pub AssetHubFromEthereum: Location = Location::new(1,[GlobalConsensus(RelayNetwork::get()),Parachain(rococo_runtime_constants::system_parachain::ASSET_HUB_ID)]); pub EthereumUniversalLocation: InteriorLocation = [GlobalConsensus(EthereumNetwork::get())].into(); } @@ -90,7 +90,7 @@ impl snowbridge_pallet_inbound_queue::Config for Runtime { Balance, EthereumSystem, EthereumUniversalLocation, - GlobalAssetHub, + AssetHubFromEthereum, >; type WeightToFee = WeightToFee; type LengthToFee = ConstantMultiplier; diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_ethereum_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_ethereum_config.rs index 1bd425ab4075..dbca4166a135 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_ethereum_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_ethereum_config.rs @@ -69,7 +69,7 @@ parameter_types! { rewards: Rewards { local: 1 * UNITS, remote: meth(1) }, multiplier: FixedU128::from_rational(1, 1), }; - pub GlobalAssetHub: Location = Location::new(1,[GlobalConsensus(RelayNetwork::get()),Parachain(westend_runtime_constants::system_parachain::ASSET_HUB_ID)]); + pub AssetHubFromEthereum: Location = Location::new(1,[GlobalConsensus(RelayNetwork::get()),Parachain(westend_runtime_constants::system_parachain::ASSET_HUB_ID)]); pub EthereumUniversalLocation: InteriorLocation = [GlobalConsensus(EthereumNetwork::get())].into(); } impl snowbridge_pallet_inbound_queue::Config for Runtime { @@ -92,7 +92,7 @@ impl snowbridge_pallet_inbound_queue::Config for Runtime { Balance, EthereumSystem, EthereumUniversalLocation, - GlobalAssetHub, + AssetHubFromEthereum, >; type WeightToFee = WeightToFee; type LengthToFee = ConstantMultiplier;