mirror of
https://git.ghostchain.io/proxmio/ghost-node.git
synced 2025-12-27 19:29:56 +00:00
rustfmt service and fix typos
Signed-off-by: Uncle Stretch <uncle.stretch@ghostchain.io>
This commit is contained in:
@@ -7,15 +7,15 @@ pub mod chain_spec;
|
||||
use {
|
||||
grandpa::{self, FinalityProofProvider as GrandpaFinalityProofProvider},
|
||||
sc_client_api::BlockBackend,
|
||||
tx_pool_api::OffchainTransactionPoolFactory,
|
||||
sp_blockchain::HeaderBackend,
|
||||
sc_service::{KeystoreContainer, RpcHandlers},
|
||||
sp_blockchain::HeaderBackend,
|
||||
telemetry::{Telemetry, TelemetryWorkerHandle},
|
||||
tx_pool_api::OffchainTransactionPoolFactory,
|
||||
};
|
||||
|
||||
use sc_executor::{HeapAllocStrategy, WasmExecutor, DEFAULT_HEAP_ALLOC_STRATEGY};
|
||||
use std::{sync::Arc, time::Duration};
|
||||
use telemetry::TelemetryWorker;
|
||||
use sc_executor::{HeapAllocStrategy, WasmExecutor, DEFAULT_HEAP_ALLOC_STRATEGY};
|
||||
|
||||
pub use chain_spec::GenericChainSpec;
|
||||
pub use consensus_common::{Proposal, SelectChain};
|
||||
@@ -23,27 +23,23 @@ pub use primitives::{Block, BlockId, BlockNumber, Hash};
|
||||
pub use sc_client_api::{Backend, CallExecutor};
|
||||
pub use sc_consensus::BlockImport;
|
||||
pub use sc_executor::NativeExecutionDispatch;
|
||||
pub use sp_api::{ApiRef, ConstructRuntimeApi, Core as CoreApi, ProvideRuntimeApi};
|
||||
pub use sc_service::{
|
||||
config::{DatabaseSource, PrometheusConfig},
|
||||
ChainSpec, Configuration, Error as SubstrateServiceError, PruningMode, Role,
|
||||
RuntimeGenesis, TFullBackend, TFullCallExecutor, TFullClient, TaskManager,
|
||||
TransactionPoolOptions,
|
||||
ChainSpec, Configuration, Error as SubstrateServiceError, PruningMode, Role, RuntimeGenesis,
|
||||
TFullBackend, TFullCallExecutor, TFullClient, TaskManager, TransactionPoolOptions,
|
||||
};
|
||||
pub use sp_api::{ApiRef, ConstructRuntimeApi, Core as CoreApi, ProvideRuntimeApi};
|
||||
pub use sp_runtime::{
|
||||
generic,
|
||||
traits::{
|
||||
self as runtime_traits, BlakeTwo256, Block as BlockT, Header as HeaderT,
|
||||
NumberFor,
|
||||
},
|
||||
traits::{self as runtime_traits, BlakeTwo256, Block as BlockT, Header as HeaderT, NumberFor},
|
||||
};
|
||||
|
||||
#[cfg(feature = "casper-native")]
|
||||
use casper_runtime::RuntimeApi;
|
||||
#[cfg(feature = "casper-native")]
|
||||
pub use chain_spec::CasperChainSpec;
|
||||
#[cfg(feature = "casper-native")]
|
||||
pub use {casper_runtime, casper_runtime_constants};
|
||||
#[cfg(feature = "casper-native")]
|
||||
use casper_runtime::RuntimeApi;
|
||||
|
||||
#[cfg(feature = "full-node")]
|
||||
pub type FullBackend = sc_service::TFullBackend<Block>;
|
||||
@@ -53,7 +49,7 @@ pub type FullClient = sc_service::TFullClient<
|
||||
Block,
|
||||
RuntimeApi,
|
||||
WasmExecutor<(
|
||||
sp_io::SubstrateHostFunctions,
|
||||
sp_io::SubstrateHostFunctions,
|
||||
frame_benchmarking::benchmarking::HostFunctions,
|
||||
)>,
|
||||
>;
|
||||
@@ -97,7 +93,7 @@ where
|
||||
}
|
||||
|
||||
pub trait HeaderProviderProvider<Block>: Send + Sync + 'static
|
||||
where
|
||||
where
|
||||
Block: BlockT,
|
||||
{
|
||||
type Provider: HeaderProvider<Block> + 'static;
|
||||
@@ -177,9 +173,13 @@ impl IdentifyVariant for Box<dyn ChainSpec> {
|
||||
}
|
||||
|
||||
fn identify_chain(&self) -> Chain {
|
||||
if self.is_ghost() { Chain::Ghost }
|
||||
else if self.is_casper() { Chain::Casper }
|
||||
else { Chain::Unknown }
|
||||
if self.is_ghost() {
|
||||
Chain::Ghost
|
||||
} else if self.is_casper() {
|
||||
Chain::Casper
|
||||
} else {
|
||||
Chain::Unknown
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -222,7 +222,9 @@ fn new_partial_basics(
|
||||
|
||||
let heap_pages = config
|
||||
.default_heap_pages
|
||||
.map_or(DEFAULT_HEAP_ALLOC_STRATEGY, |h| HeapAllocStrategy::Static { extra_pages: h as _ });
|
||||
.map_or(DEFAULT_HEAP_ALLOC_STRATEGY, |h| HeapAllocStrategy::Static {
|
||||
extra_pages: h as _,
|
||||
});
|
||||
|
||||
let executor = WasmExecutor::builder()
|
||||
.with_execution_method(config.wasm_method)
|
||||
@@ -251,13 +253,25 @@ fn new_partial_basics(
|
||||
telemetry
|
||||
});
|
||||
|
||||
Ok(Basics { task_manager, client, backend, keystore_container, telemetry })
|
||||
Ok(Basics {
|
||||
task_manager,
|
||||
client,
|
||||
backend,
|
||||
keystore_container,
|
||||
telemetry,
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(feature = "full-node")]
|
||||
fn new_partial<ChainSelection>(
|
||||
config: &mut Configuration,
|
||||
Basics { task_manager, backend, client, keystore_container, telemetry }: Basics,
|
||||
Basics {
|
||||
task_manager,
|
||||
backend,
|
||||
client,
|
||||
keystore_container,
|
||||
telemetry,
|
||||
}: Basics,
|
||||
select_chain: ChainSelection,
|
||||
) -> Result<
|
||||
sc_service::PartialComponents<
|
||||
@@ -272,11 +286,7 @@ fn new_partial<ChainSelection>(
|
||||
ghost_rpc::SubscriptionTaskExecutor,
|
||||
) -> Result<ghost_rpc::RpcExtension, SubstrateServiceError>,
|
||||
(
|
||||
babe::BabeBlockImport<
|
||||
Block,
|
||||
FullClient,
|
||||
FullGrandpaBlockImport<ChainSelection>,
|
||||
>,
|
||||
babe::BabeBlockImport<Block, FullClient, FullGrandpaBlockImport<ChainSelection>>,
|
||||
grandpa::LinkHalf<Block, FullClient, ChainSelection>,
|
||||
babe::BabeLink<Block>,
|
||||
),
|
||||
@@ -286,7 +296,7 @@ fn new_partial<ChainSelection>(
|
||||
>,
|
||||
Error,
|
||||
>
|
||||
where
|
||||
where
|
||||
ChainSelection: 'static + SelectChain<Block>,
|
||||
{
|
||||
let transaction_pool = sc_transaction_pool::BasicPool::new_full(
|
||||
@@ -297,42 +307,40 @@ where
|
||||
client.clone(),
|
||||
);
|
||||
|
||||
let (grandpa_block_import, grandpa_link) =
|
||||
grandpa::block_import(
|
||||
client.clone(),
|
||||
GRANDPA_JUSTIFICATION_PERIOD,
|
||||
&(client.clone() as Arc<_>),
|
||||
select_chain.clone(),
|
||||
telemetry.as_ref().map(|x| x.handle()),
|
||||
)?;
|
||||
let (grandpa_block_import, grandpa_link) = grandpa::block_import(
|
||||
client.clone(),
|
||||
GRANDPA_JUSTIFICATION_PERIOD,
|
||||
&(client.clone() as Arc<_>),
|
||||
select_chain.clone(),
|
||||
telemetry.as_ref().map(|x| x.handle()),
|
||||
)?;
|
||||
let justification_import = grandpa_block_import.clone();
|
||||
|
||||
let babe_config = babe::configuration(&*client)?;
|
||||
let (block_import, babe_link) =
|
||||
let (block_import, babe_link) =
|
||||
babe::block_import(babe_config.clone(), grandpa_block_import, client.clone())?;
|
||||
|
||||
let slot_duration = babe_link.config().slot_duration();
|
||||
let (import_queue, babe_worker_handle) =
|
||||
babe::import_queue(babe::ImportQueueParams {
|
||||
link: babe_link.clone(),
|
||||
block_import: block_import.clone(),
|
||||
justification_import: Some(Box::new(justification_import)),
|
||||
client: client.clone(),
|
||||
select_chain: select_chain.clone(),
|
||||
create_inherent_data_providers: move |_, ()| async move {
|
||||
let timestamp = sp_timestamp::InherentDataProvider::from_system_time();
|
||||
let slot =
|
||||
babe_primitives::inherents::InherentDataProvider::from_timestamp_and_slot_duration(
|
||||
*timestamp,
|
||||
slot_duration,
|
||||
);
|
||||
Ok((slot, timestamp))
|
||||
},
|
||||
spawner: &task_manager.spawn_essential_handle(),
|
||||
registry: config.prometheus_registry(),
|
||||
telemetry: telemetry.as_ref().map(|x| x.handle()),
|
||||
offchain_tx_pool_factory: OffchainTransactionPoolFactory::new(transaction_pool.clone()),
|
||||
})?;
|
||||
let (import_queue, babe_worker_handle) = babe::import_queue(babe::ImportQueueParams {
|
||||
link: babe_link.clone(),
|
||||
block_import: block_import.clone(),
|
||||
justification_import: Some(Box::new(justification_import)),
|
||||
client: client.clone(),
|
||||
select_chain: select_chain.clone(),
|
||||
create_inherent_data_providers: move |_, ()| async move {
|
||||
let timestamp = sp_timestamp::InherentDataProvider::from_system_time();
|
||||
let slot =
|
||||
babe_primitives::inherents::InherentDataProvider::from_timestamp_and_slot_duration(
|
||||
*timestamp,
|
||||
slot_duration,
|
||||
);
|
||||
Ok((slot, timestamp))
|
||||
},
|
||||
spawner: &task_manager.spawn_essential_handle(),
|
||||
registry: config.prometheus_registry(),
|
||||
telemetry: telemetry.as_ref().map(|x| x.handle()),
|
||||
offchain_tx_pool_factory: OffchainTransactionPoolFactory::new(transaction_pool.clone()),
|
||||
})?;
|
||||
|
||||
let justification_stream = grandpa_link.justification_stream();
|
||||
let shared_authority_set = grandpa_link.shared_authority_set().clone();
|
||||
@@ -353,10 +361,9 @@ where
|
||||
let chain_spec = config.chain_spec.cloned_box();
|
||||
let backend = backend.clone();
|
||||
|
||||
move |
|
||||
deny_unsafe,
|
||||
subscription_executor: ghost_rpc::SubscriptionTaskExecutor,
|
||||
| -> Result<ghost_rpc::RpcExtension, sc_service::Error> {
|
||||
move |deny_unsafe,
|
||||
subscription_executor: ghost_rpc::SubscriptionTaskExecutor|
|
||||
-> Result<ghost_rpc::RpcExtension, sc_service::Error> {
|
||||
let deps = ghost_rpc::FullDeps {
|
||||
client: client.clone(),
|
||||
pool: transaction_pool.clone(),
|
||||
@@ -425,11 +432,10 @@ pub fn new_full<Network: sc_network::NetworkBackend<Block, <Block as BlockT>::Ha
|
||||
|
||||
let role = config.role.clone();
|
||||
let force_authoring = config.force_authoring;
|
||||
let backoff_authoring_blocks = if !force_authoring_backoff {
|
||||
let backoff_authoring_blocks = if !force_authoring_backoff {
|
||||
None
|
||||
} else {
|
||||
let mut backoff =
|
||||
sc_consensus_slots::BackoffAuthoringOnFinalizedHeadLagging::default();
|
||||
let mut backoff = sc_consensus_slots::BackoffAuthoringOnFinalizedHeadLagging::default();
|
||||
|
||||
if config.chain_spec.is_dev() {
|
||||
backoff.max_interval = 10;
|
||||
@@ -446,7 +452,14 @@ pub fn new_full<Network: sc_network::NetworkBackend<Block, <Block as BlockT>::Ha
|
||||
let prometheus_registry = config.prometheus_registry().cloned();
|
||||
let select_chain = sc_consensus::LongestChain::new(basics.backend.clone());
|
||||
|
||||
let sc_service::PartialComponents::<_, _, sc_consensus::LongestChain<FullBackend, Block>, _, _, _,> {
|
||||
let sc_service::PartialComponents::<
|
||||
_,
|
||||
_,
|
||||
sc_consensus::LongestChain<FullBackend, Block>,
|
||||
_,
|
||||
_,
|
||||
_,
|
||||
> {
|
||||
client,
|
||||
backend,
|
||||
mut task_manager,
|
||||
@@ -454,15 +467,10 @@ pub fn new_full<Network: sc_network::NetworkBackend<Block, <Block as BlockT>::Ha
|
||||
select_chain,
|
||||
import_queue,
|
||||
transaction_pool,
|
||||
other: (
|
||||
rpc_extensions_builder,
|
||||
import_setup,
|
||||
rpc_setup,
|
||||
mut telemetry,
|
||||
),
|
||||
other: (rpc_extensions_builder, import_setup, rpc_setup, mut telemetry),
|
||||
} = new_partial::<sc_consensus::LongestChain<FullBackend, Block>>(
|
||||
&mut config,
|
||||
basics,
|
||||
&mut config,
|
||||
basics,
|
||||
select_chain,
|
||||
)?;
|
||||
|
||||
@@ -473,10 +481,14 @@ pub fn new_full<Network: sc_network::NetworkBackend<Block, <Block as BlockT>::Ha
|
||||
let auth_disc_publish_non_global_ips = config.network.allow_non_globals_in_dht;
|
||||
let auth_disc_public_addresses = config.network.public_addresses.clone();
|
||||
|
||||
let mut net_config =
|
||||
let mut net_config =
|
||||
sc_network::config::FullNetworkConfiguration::<_, _, Network>::new(&config.network);
|
||||
|
||||
let genesis_hash = client.block_hash(0).ok().flatten().expect("Genesis block exists; qed");
|
||||
let genesis_hash = client
|
||||
.block_hash(0)
|
||||
.ok()
|
||||
.flatten()
|
||||
.expect("Genesis block exists; qed");
|
||||
let peer_store_handle = net_config.peer_store_handle();
|
||||
|
||||
let grandpa_protocol_name = grandpa::protocol_standard_name(&genesis_hash, &config.chain_spec);
|
||||
@@ -554,8 +566,8 @@ pub fn new_full<Network: sc_network::NetworkBackend<Block, <Block as BlockT>::Ha
|
||||
"⚠️ The hardware does not meet the minimal requirements {} for role 'Authority'",
|
||||
err
|
||||
);
|
||||
},
|
||||
_ => {},
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
|
||||
if let Some(ref mut telemetry) = telemetry {
|
||||
@@ -563,10 +575,7 @@ pub fn new_full<Network: sc_network::NetworkBackend<Block, <Block as BlockT>::Ha
|
||||
task_manager.spawn_handle().spawn(
|
||||
"telemetry_hwbench",
|
||||
None,
|
||||
sc_sysinfo::initialize_hwbench_telemetry(
|
||||
telemetry_handle,
|
||||
hwbench,
|
||||
),
|
||||
sc_sysinfo::initialize_hwbench_telemetry(telemetry_handle, hwbench),
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -577,15 +586,17 @@ pub fn new_full<Network: sc_network::NetworkBackend<Block, <Block as BlockT>::Ha
|
||||
use futures::StreamExt;
|
||||
use sc_network::{Event, NetworkEventStream};
|
||||
|
||||
let authority_discovery_role =
|
||||
let authority_discovery_role =
|
||||
sc_authority_discovery::Role::PublishAndDiscover(keystore_container.keystore());
|
||||
let dht_event_stream =
|
||||
network.event_stream("authority-discovery").filter_map(|e| async move {
|
||||
match e {
|
||||
Event::Dht(e) => Some(e),
|
||||
_ => None,
|
||||
}
|
||||
});
|
||||
let dht_event_stream =
|
||||
network
|
||||
.event_stream("authority-discovery")
|
||||
.filter_map(|e| async move {
|
||||
match e {
|
||||
Event::Dht(e) => Some(e),
|
||||
_ => None,
|
||||
}
|
||||
});
|
||||
let (worker, _service) = sc_authority_discovery::new_worker_and_service_with_config(
|
||||
sc_authority_discovery::WorkerConfig {
|
||||
publish_non_global_ips: auth_disc_publish_non_global_ips,
|
||||
@@ -597,7 +608,7 @@ pub fn new_full<Network: sc_network::NetworkBackend<Block, <Block as BlockT>::Ha
|
||||
Arc::new(network.clone()),
|
||||
Box::pin(dht_event_stream),
|
||||
authority_discovery_role,
|
||||
prometheus_registry.clone()
|
||||
prometheus_registry.clone(),
|
||||
);
|
||||
|
||||
task_manager.spawn_handle().spawn(
|
||||
@@ -625,16 +636,14 @@ pub fn new_full<Network: sc_network::NetworkBackend<Block, <Block as BlockT>::Ha
|
||||
env: proposer,
|
||||
sync_oracle: sync_service.clone(),
|
||||
justification_sync_link: sync_service.clone(),
|
||||
create_inherent_data_providers: move |_, ()| {
|
||||
async move {
|
||||
let timestamp = sp_timestamp::InherentDataProvider::from_system_time();
|
||||
let slot =
|
||||
create_inherent_data_providers: move |_, ()| async move {
|
||||
let timestamp = sp_timestamp::InherentDataProvider::from_system_time();
|
||||
let slot =
|
||||
babe_primitives::inherents::InherentDataProvider::from_timestamp_and_slot_duration(
|
||||
*timestamp,
|
||||
slot_duration,
|
||||
);
|
||||
Ok((slot, timestamp))
|
||||
}
|
||||
Ok((slot, timestamp))
|
||||
},
|
||||
force_authoring,
|
||||
backoff_authoring_blocks,
|
||||
@@ -642,14 +651,16 @@ pub fn new_full<Network: sc_network::NetworkBackend<Block, <Block as BlockT>::Ha
|
||||
block_proposal_slot_portion: babe::SlotProportion::new(2f32 / 3f32),
|
||||
max_block_proposal_slot_portion: None,
|
||||
telemetry: telemetry.as_ref().map(|x| x.handle()),
|
||||
};
|
||||
};
|
||||
|
||||
let babe = babe::start_babe(babe_config)?;
|
||||
|
||||
task_manager.spawn_essential_handle().spawn_blocking("babe", None, babe);
|
||||
task_manager
|
||||
.spawn_essential_handle()
|
||||
.spawn_blocking("babe", None, babe);
|
||||
}
|
||||
|
||||
let keystore_opt = if role.is_authority() {
|
||||
let keystore_opt = if role.is_authority() {
|
||||
Some(keystore_container.keystore())
|
||||
} else {
|
||||
None
|
||||
@@ -711,8 +722,17 @@ macro_rules! chain_ops {
|
||||
|
||||
let chain_selection = sc_consensus::LongestChain::new(basics.backend.clone());
|
||||
|
||||
let sc_service::PartialComponents { client, backend, import_queue, task_manager, .. } =
|
||||
new_partial::<sc_consensus::LongestChain<FullBackend, Block>>(&mut config, basics, chain_selection)?;
|
||||
let sc_service::PartialComponents {
|
||||
client,
|
||||
backend,
|
||||
import_queue,
|
||||
task_manager,
|
||||
..
|
||||
} = new_partial::<sc_consensus::LongestChain<FullBackend, Block>>(
|
||||
&mut config,
|
||||
basics,
|
||||
chain_selection,
|
||||
)?;
|
||||
Ok((client, backend, import_queue, task_manager))
|
||||
}};
|
||||
}
|
||||
@@ -720,22 +740,28 @@ macro_rules! chain_ops {
|
||||
#[cfg(feature = "full-node")]
|
||||
pub fn new_chain_ops(
|
||||
config: &mut Configuration,
|
||||
) -> Result<(Arc<FullClient>, Arc<FullBackend>, sc_consensus::BasicQueue<Block>, TaskManager), Error>
|
||||
{
|
||||
) -> Result<
|
||||
(
|
||||
Arc<FullClient>,
|
||||
Arc<FullBackend>,
|
||||
sc_consensus::BasicQueue<Block>,
|
||||
TaskManager,
|
||||
),
|
||||
Error,
|
||||
> {
|
||||
config.keystore = sc_service::config::KeystoreConfig::InMemory;
|
||||
chain_ops!(config, None)
|
||||
}
|
||||
|
||||
#[cfg(feature = "full-node")]
|
||||
pub fn build_full(
|
||||
config: Configuration,
|
||||
params: NewFullParams,
|
||||
) -> Result<NewFull, Error> {
|
||||
pub fn build_full(config: Configuration, params: NewFullParams) -> Result<NewFull, Error> {
|
||||
match config.network.network_backend {
|
||||
sc_network::config::NetworkBackendType::Libp2p =>
|
||||
new_full::<sc_network::NetworkWorker<Block, Hash>>(config, params),
|
||||
sc_network::config::NetworkBackendType::Litep2p =>
|
||||
new_full::<sc_network::Litep2pNetworkBackend>(config, params),
|
||||
sc_network::config::NetworkBackendType::Libp2p => {
|
||||
new_full::<sc_network::NetworkWorker<Block, Hash>>(config, params)
|
||||
}
|
||||
sc_network::config::NetworkBackendType::Litep2p => {
|
||||
new_full::<sc_network::Litep2pNetworkBackend>(config, params)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -750,7 +776,7 @@ pub fn revert_backend(
|
||||
let revertible = blocks.min(best_number - finalized);
|
||||
|
||||
if revertible == 0 {
|
||||
return Ok(())
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
babe::revert(client.clone(), backend, blocks)?;
|
||||
|
||||
Reference in New Issue
Block a user