Skip to content

Commit

Permalink
clean
Browse files Browse the repository at this point in the history
  • Loading branch information
JesseAbram committed Oct 31, 2024
1 parent ace167d commit dcc186c
Show file tree
Hide file tree
Showing 13 changed files with 155 additions and 151 deletions.
2 changes: 1 addition & 1 deletion crates/client/src/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -182,7 +182,7 @@ async fn test_remove_program_reference_counter() {
verifying_key,
&program_owner,
BoundedVec(vec![ProgramInstance {
program_pointer: H256([0; 32]),
program_pointer: subxt::utils::H256([0; 32]),
program_config: vec![],
}]),
)
Expand Down
1 change: 0 additions & 1 deletion crates/testing-utils/src/node_proc.rs
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,6 @@ impl TestNodeProcessBuilder {
let mut cmd = process::Command::new(&self.node_path);
cmd.env("RUST_LOG", "error").arg(&self.chain_type).arg("--tmp");
cmd.arg("--unsafe-force-node-key-generation");
cmd.arg("--public-addr");
if self.force_authoring {
cmd.arg("--force-authoring");
}
Expand Down
13 changes: 10 additions & 3 deletions crates/threshold-signature-server/src/user/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -260,8 +260,14 @@ async fn test_signature_requests_fail_on_different_conditions() {
verifying_key.as_slice().try_into().unwrap(),
&two.pair(),
OtherBoundedVec(vec![
OtherProgramInstance { program_pointer: subxt::utils::H256(program_hash.into()), program_config: vec![] },
OtherProgramInstance { program_pointer: subxt::utils::H256(program_hash.into()), program_config: vec![] },
OtherProgramInstance {
program_pointer: subxt::utils::H256(program_hash.into()),
program_config: vec![],
},
OtherProgramInstance {
program_pointer: subxt::utils::H256(program_hash.into()),
program_config: vec![],
},
]),
)
.await
Expand Down Expand Up @@ -1200,7 +1206,8 @@ async fn test_device_key_proxy() {
};

// check to make sure config data stored properly
let program_query = entropy::storage().programs().programs(subxt::utils::H256(DEVICE_KEY_HASH.0));
let program_query =
entropy::storage().programs().programs(subxt::utils::H256(DEVICE_KEY_HASH.0));
let program_data = query_chain(&entropy_api, &rpc, program_query, None).await.unwrap().unwrap();
let schema_config_device_key_proxy = schema_for!(UserConfig);
let schema_aux_data_device_key_proxy = schema_for!(AuxData);
Expand Down
11 changes: 7 additions & 4 deletions node/cli/src/rpc.rs
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,12 @@ pub struct FullDeps<C, P, SC, B> {

/// Instantiate all Full RPC extensions.
pub fn create_full<C, P, SC, B>(
FullDeps { client, pool, select_chain, chain_spec, babe, grandpa, backend, .. }: FullDeps<C, P, SC, B>,
FullDeps { client, pool, select_chain, chain_spec, babe, grandpa, backend, .. }: FullDeps<
C,
P,
SC,
B,
>,
) -> Result<RpcExtension, Box<dyn std::error::Error + Send + Sync>>
where
C: ProvideRuntimeApi<Block>
Expand All @@ -132,9 +137,7 @@ where
use pallet_transaction_payment_rpc::{TransactionPayment, TransactionPaymentApiServer};
use sc_consensus_babe_rpc::{Babe, BabeApiServer};
use sc_consensus_grandpa_rpc::{Grandpa, GrandpaApiServer};
use sc_rpc::{
dev::{Dev, DevApiServer},
};
use sc_rpc::dev::{Dev, DevApiServer};
use sc_rpc_spec_v2::chain_spec::{ChainSpec, ChainSpecApiServer};
use sc_sync_state_rpc::{SyncState, SyncStateApiServer};
use substrate_frame_rpc_system::{System, SystemApiServer};
Expand Down
234 changes: 112 additions & 122 deletions node/cli/src/service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -61,14 +61,14 @@ use crate::cli::Cli;
/// Host functions required for kitchensink runtime and Substrate node.
#[cfg(not(feature = "runtime-benchmarks"))]
pub type HostFunctions =
(sp_io::SubstrateHostFunctions, sp_statement_store::runtime_api::HostFunctions);
(sp_io::SubstrateHostFunctions, sp_statement_store::runtime_api::HostFunctions);

/// Host functions required for kitchensink runtime and Substrate node.
#[cfg(feature = "runtime-benchmarks")]
pub type HostFunctions = (
sp_io::SubstrateHostFunctions,
sp_statement_store::runtime_api::HostFunctions,
frame_benchmarking::benchmarking::HostFunctions,
sp_io::SubstrateHostFunctions,
sp_statement_store::runtime_api::HostFunctions,
frame_benchmarking::benchmarking::HostFunctions,
);

/// A specialized `WasmExecutor` intended to use across substrate node. It provides all required
Expand Down Expand Up @@ -110,110 +110,106 @@ impl sc_executor::NativeExecutionDispatch for ExecutorDispatch {

/// Creates a new partial node.
pub fn new_partial(
config: &Configuration,
config: &Configuration,
) -> Result<
sc_service::PartialComponents<
FullClient,
FullBackend,
FullSelectChain,
sc_consensus::DefaultImportQueue<Block>,
sc_transaction_pool::FullPool<Block, FullClient>,
(
impl Fn(
sc_rpc::SubscriptionTaskExecutor,
) -> Result<jsonrpsee::RpcModule<()>, sc_service::Error>,
(
sc_consensus_babe::BabeBlockImport<
Block,
FullClient,
FullGrandpaBlockImport,
>,
grandpa::LinkHalf<Block, FullClient, FullSelectChain>,
sc_consensus_babe::BabeLink<Block>,
),
grandpa::SharedVoterState,
Option<Telemetry>,
),
>,
ServiceError,
sc_service::PartialComponents<
FullClient,
FullBackend,
FullSelectChain,
sc_consensus::DefaultImportQueue<Block>,
sc_transaction_pool::FullPool<Block, FullClient>,
(
impl Fn(
sc_rpc::SubscriptionTaskExecutor,
) -> Result<jsonrpsee::RpcModule<()>, sc_service::Error>,
(
sc_consensus_babe::BabeBlockImport<Block, FullClient, FullGrandpaBlockImport>,
grandpa::LinkHalf<Block, FullClient, FullSelectChain>,
sc_consensus_babe::BabeLink<Block>,
),
grandpa::SharedVoterState,
Option<Telemetry>,
),
>,
ServiceError,
> {
let telemetry = config
.telemetry_endpoints
.clone()
.filter(|x| !x.is_empty())
.map(|endpoints| -> Result<_, sc_telemetry::Error> {
let worker = TelemetryWorker::new(16)?;
let telemetry = worker.handle().new_telemetry(endpoints);
Ok((worker, telemetry))
})
.transpose()?;

let executor = sc_service::new_wasm_executor(&config.executor);

let (client, backend, keystore_container, task_manager) =
sc_service::new_full_parts::<Block, RuntimeApi, _>(
config,
telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()),
executor,
)?;
let client = Arc::new(client);

let telemetry = telemetry.map(|(worker, telemetry)| {
task_manager.spawn_handle().spawn("telemetry", None, worker.run());
telemetry
});

let select_chain = sc_consensus::LongestChain::new(backend.clone());

let transaction_pool = sc_transaction_pool::BasicPool::new_full(
config.transaction_pool.clone(),
config.role.is_authority().into(),
config.prometheus_registry(),
task_manager.spawn_essential_handle(),
client.clone(),
);

let (grandpa_block_import, grandpa_link) = grandpa::block_import(
client.clone(),
GRANDPA_JUSTIFICATION_PERIOD,
&(client.clone() as Arc<_>),
select_chain.clone(),
telemetry.as_ref().map(|x| x.handle()),
)?;
let justification_import = grandpa_block_import.clone();

let (block_import, babe_link) = sc_consensus_babe::block_import(
sc_consensus_babe::configuration(&*client)?,
grandpa_block_import,
client.clone(),
)?;

let slot_duration = babe_link.config().slot_duration();
let (import_queue, babe_worker_handle) =
sc_consensus_babe::import_queue(sc_consensus_babe::ImportQueueParams {
link: babe_link.clone(),
block_import: block_import.clone(),
justification_import: Some(Box::new(justification_import)),
client: client.clone(),
select_chain: select_chain.clone(),
create_inherent_data_providers: move |_, ()| async move {
let timestamp = sp_timestamp::InherentDataProvider::from_system_time();

let slot =
let telemetry = config
.telemetry_endpoints
.clone()
.filter(|x| !x.is_empty())
.map(|endpoints| -> Result<_, sc_telemetry::Error> {
let worker = TelemetryWorker::new(16)?;
let telemetry = worker.handle().new_telemetry(endpoints);
Ok((worker, telemetry))
})
.transpose()?;

let executor = sc_service::new_wasm_executor(&config.executor);

let (client, backend, keystore_container, task_manager) =
sc_service::new_full_parts::<Block, RuntimeApi, _>(
config,
telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()),
executor,
)?;
let client = Arc::new(client);

let telemetry = telemetry.map(|(worker, telemetry)| {
task_manager.spawn_handle().spawn("telemetry", None, worker.run());
telemetry
});

let select_chain = sc_consensus::LongestChain::new(backend.clone());

let transaction_pool = sc_transaction_pool::BasicPool::new_full(
config.transaction_pool.clone(),
config.role.is_authority().into(),
config.prometheus_registry(),
task_manager.spawn_essential_handle(),
client.clone(),
);

let (grandpa_block_import, grandpa_link) = grandpa::block_import(
client.clone(),
GRANDPA_JUSTIFICATION_PERIOD,
&(client.clone() as Arc<_>),
select_chain.clone(),
telemetry.as_ref().map(|x| x.handle()),
)?;
let justification_import = grandpa_block_import.clone();

let (block_import, babe_link) = sc_consensus_babe::block_import(
sc_consensus_babe::configuration(&*client)?,
grandpa_block_import,
client.clone(),
)?;

let slot_duration = babe_link.config().slot_duration();
let (import_queue, babe_worker_handle) =
sc_consensus_babe::import_queue(sc_consensus_babe::ImportQueueParams {
link: babe_link.clone(),
block_import: block_import.clone(),
justification_import: Some(Box::new(justification_import)),
client: client.clone(),
select_chain: select_chain.clone(),
create_inherent_data_providers: move |_, ()| async move {
let timestamp = sp_timestamp::InherentDataProvider::from_system_time();

let slot =
sp_consensus_babe::inherents::InherentDataProvider::from_timestamp_and_slot_duration(
*timestamp,
slot_duration,
);

Ok((slot, timestamp))
},
spawner: &task_manager.spawn_essential_handle(),
registry: config.prometheus_registry(),
telemetry: telemetry.as_ref().map(|x| x.handle()),
offchain_tx_pool_factory: OffchainTransactionPoolFactory::new(transaction_pool.clone()),
})?;
Ok((slot, timestamp))
},
spawner: &task_manager.spawn_essential_handle(),
registry: config.prometheus_registry(),
telemetry: telemetry.as_ref().map(|x| x.handle()),
offchain_tx_pool_factory: OffchainTransactionPoolFactory::new(transaction_pool.clone()),
})?;

let import_setup = (block_import, grandpa_link, babe_link);
let import_setup = (block_import, grandpa_link, babe_link);

let (rpc_extensions_builder, rpc_setup) = {
let (_, grandpa_link, _) = &import_setup;
Expand Down Expand Up @@ -295,24 +291,20 @@ pub fn new_full_base<N: NetworkBackend<Block, <Block as BlockT>::Hash>>(
config: Configuration,
disable_hardware_benchmarks: bool,
with_startup_data: impl FnOnce(
&sc_consensus_babe::BabeBlockImport<
Block,
FullClient,
FullGrandpaBlockImport,
>,
&sc_consensus_babe::BabeLink<Block>,
),
&sc_consensus_babe::BabeBlockImport<Block, FullClient, FullGrandpaBlockImport>,
&sc_consensus_babe::BabeLink<Block>,
),
tss_server_endpoint: Option<String>,
) -> Result<NewFullBase, ServiceError> {
let is_offchain_indexing_enabled = config.offchain_worker.indexing_enabled;
let role = config.role;
let force_authoring = config.force_authoring;
// let backoff_authoring_blocks: =
// Some(sc_consensus_slots::BackoffAuthoringOnFinalizedHeadLagging::default());
let name = config.network.node_name.clone();
let enable_grandpa = !config.disable_grandpa;
let prometheus_registry = config.prometheus_registry().cloned();
let enable_offchain_worker = config.offchain_worker.enabled;
let role = config.role;
let force_authoring = config.force_authoring;
// let backoff_authoring_blocks: =
// Some(sc_consensus_slots::BackoffAuthoringOnFinalizedHeadLagging::default());
let name = config.network.node_name.clone();
let enable_grandpa = !config.disable_grandpa;
let prometheus_registry = config.prometheus_registry().cloned();
let enable_offchain_worker = config.offchain_worker.enabled;

let hwbench = (!disable_hardware_benchmarks)
.then_some(config.database.path().map(|database_path| {
Expand Down Expand Up @@ -343,7 +335,7 @@ pub fn new_full_base<N: NetworkBackend<Block, <Block as BlockT>::Hash>>(
);

let auth_disc_publish_non_global_ips = config.network.allow_non_globals_in_dht;
let auth_disc_public_addresses = config.network.public_addresses.clone();
let auth_disc_public_addresses = config.network.public_addresses.clone();

let peer_store_handle = net_config.peer_store_handle();
let genesis_hash = client.block_hash(0).ok().flatten().expect("Genesis block exists; qed");
Expand Down Expand Up @@ -375,7 +367,6 @@ pub fn new_full_base<N: NetworkBackend<Block, <Block as BlockT>::Hash>>(
metrics,
})?;


if config.offchain_worker.enabled {
use futures::FutureExt;

Expand Down Expand Up @@ -428,7 +419,6 @@ pub fn new_full_base<N: NetworkBackend<Block, <Block as BlockT>::Hash>>(
);
log::info!("Threshold Signing Sever (TSS) location changed to {}", endpoint);
}

}

let role = config.role.clone();
Expand Down Expand Up @@ -530,10 +520,10 @@ pub fn new_full_base<N: NetworkBackend<Block, <Block as BlockT>::Hash>>(

let babe = sc_consensus_babe::start_babe(babe_config)?;
task_manager.spawn_essential_handle().spawn_blocking(
"babe-proposer",
Some("block-authoring"),
babe,
);
"babe-proposer",
Some("block-authoring"),
babe,
);
}

// Spawn authority discovery module.
Expand Down
1 change: 1 addition & 0 deletions pallets/attestation/src/mock.rs
Original file line number Diff line number Diff line change
Expand Up @@ -271,6 +271,7 @@ impl pallet_staking::Config for Test {
type TargetList = pallet_staking::UseValidatorsMap<Self>;
type UnixTime = pallet_timestamp::Pallet<Test>;
type VoterList = BagsList;
type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy;
type WeightInfo = ();
}

Expand Down
5 changes: 4 additions & 1 deletion pallets/propagation/src/mock.rs
Original file line number Diff line number Diff line change
Expand Up @@ -263,6 +263,7 @@ impl pallet_staking::Config for Test {
type TargetList = pallet_staking::UseValidatorsMap<Self>;
type UnixTime = pallet_timestamp::Pallet<Test>;
type VoterList = BagsList;
type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy;
type WeightInfo = ();
}

Expand Down Expand Up @@ -397,7 +398,9 @@ pub fn new_test_ext() -> sp_io::TestExternalities {
let stakers = vec![1, 2];
let keys: Vec<_> = stakers.iter().cloned().map(|i| (i, i, UintAuthorityId(i).into())).collect();

pallet_session::GenesisConfig::<Test> { keys }.assimilate_storage(&mut t).unwrap();
pallet_session::GenesisConfig::<Test> { keys, non_authority_keys: vec![] }
.assimilate_storage(&mut t)
.unwrap();

t.into()
}
Loading

0 comments on commit dcc186c

Please sign in to comment.