Newer
Older
// Copyright 2021 Axiom-Team
//
// This file is part of Substrate-Libre-Currency.
//
// Substrate-Libre-Currency is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License.
//
// Substrate-Libre-Currency is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with Substrate-Libre-Currency. If not, see <https://www.gnu.org/licenses/>.
//! Service and ServiceFactory implementation. Specialized wrapper over substrate service.
use std::sync::Arc;
use std::time::Duration;
use sc_client_api::{ExecutorProvider, RemoteBackend};
use node_template_runtime::{self, opaque::Block, RuntimeApi};
use sc_service::{error::Error as ServiceError, Configuration, TaskManager};
use sp_inherents::InherentDataProviders;
use sc_executor::native_executor_instance;
pub use sc_executor::NativeExecutor;
use sp_consensus_aura::sr25519::AuthorityPair as AuraPair;
use sc_consensus_aura::{ImportQueueParams, StartAuraParams, SlotProportion};
use sc_finality_grandpa::SharedVoterState;
use sc_keystore::LocalKeystore;
use sc_telemetry::{Telemetry, TelemetryWorker};
// Our native executor instance.
native_executor_instance!(
pub Executor,
node_template_runtime::api::dispatch,
node_template_runtime::native_version,
frame_benchmarking::benchmarking::HostFunctions,
type FullClient = sc_service::TFullClient<Block, RuntimeApi, Executor>;
type FullBackend = sc_service::TFullBackend<Block>;
type FullSelectChain = sc_consensus::LongestChain<FullBackend, Block>;
pub fn new_partial(config: &Configuration) -> Result<sc_service::PartialComponents<
FullClient, FullBackend, FullSelectChain,
sp_consensus::DefaultImportQueue<Block, FullClient>,
sc_transaction_pool::FullPool<Block, FullClient>,
(
sc_consensus_aura::AuraBlockImport<
Block,
FullClient,
sc_finality_grandpa::GrandpaBlockImport<FullBackend, Block, FullClient, FullSelectChain>,
AuraPair
>,
sc_finality_grandpa::LinkHalf<Block, FullClient, FullSelectChain>,
Option<Telemetry>,
if config.keystore_remote.is_some() {
return Err(ServiceError::Other(
format!("Remote Keystores are not supported.")))
}
let inherent_data_providers = InherentDataProviders::new();
let telemetry = config.telemetry_endpoints.clone()
.filter(|x| !x.is_empty())
.map(|endpoints| -> Result<_, sc_telemetry::Error> {
let worker = TelemetryWorker::new(16)?;
let telemetry = worker.handle().new_telemetry(endpoints);
Ok((worker, telemetry))
})
.transpose()?;
let (client, backend, keystore_container, task_manager) =
sc_service::new_full_parts::<Block, RuntimeApi, Executor>(
&config,
telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()),
)?;
let telemetry = telemetry
.map(|(worker, telemetry)| {
task_manager.spawn_handle().spawn("telemetry", worker.run());
telemetry
});
let select_chain = sc_consensus::LongestChain::new(backend.clone());
let transaction_pool = sc_transaction_pool::BasicPool::new_full(
config.transaction_pool.clone(),
config.prometheus_registry(),
task_manager.spawn_handle(),
client.clone(),
);
let (grandpa_block_import, grandpa_link) = sc_finality_grandpa::block_import(
client.clone(),
&(client.clone() as Arc<_>),
select_chain.clone(),
telemetry.as_ref().map(|x| x.handle()),
let aura_block_import = sc_consensus_aura::AuraBlockImport::<_, _, _, AuraPair>::new(
grandpa_block_import.clone(), client.clone(),
);
let import_queue = sc_consensus_aura::import_queue::<AuraPair, _, _, _, _, _>(
ImportQueueParams {
block_import: aura_block_import.clone(),
justification_import: Some(Box::new(grandpa_block_import.clone())),
client: client.clone(),
inherent_data_providers: inherent_data_providers.clone(),
spawner: &task_manager.spawn_essential_handle(),
can_author_with: sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone()),
slot_duration: sc_consensus_aura::slot_duration(&*client)?,
registry: config.prometheus_registry(),
check_for_equivocation: Default::default(),
telemetry: telemetry.as_ref().map(|x| x.handle()),
},
client,
backend,
task_manager,
import_queue,
keystore_container,
select_chain,
transaction_pool,
other: (aura_block_import, grandpa_link, telemetry),
fn remote_keystore(_url: &String) -> Result<Arc<LocalKeystore>, &'static str> {
// FIXME: here would the concrete keystore be built,
// must return a concrete type (NOT `LocalKeystore`) that
// implements `CryptoStore` and `SyncCryptoStore`
Err("Remote Keystore not supported.")
}
pub fn new_full(mut config: Configuration) -> Result<TaskManager, ServiceError> {
client,
backend,
mut task_manager,
import_queue,
mut keystore_container,
select_chain,
transaction_pool,
other: (block_import, grandpa_link, mut telemetry),
if let Some(url) = &config.keystore_remote {
match remote_keystore(url) {
Ok(k) => keystore_container.set_remote_keystore(k),
Err(e) => {
return Err(ServiceError::Other(
format!("Error hooking up remote keystore for {}: {}", url, e)))
}
};
}
config.network.extra_sets.push(sc_finality_grandpa::grandpa_peers_set_config());
let (network, network_status_sinks, system_rpc_tx, network_starter) =
sc_service::build_network(sc_service::BuildNetworkParams {
config: &config,
client: client.clone(),
transaction_pool: transaction_pool.clone(),
spawn_handle: task_manager.spawn_handle(),
import_queue,
on_demand: None,
block_announce_validator_builder: None,
})?;
if config.offchain_worker.enabled {
sc_service::build_offchain_workers(
&config, task_manager.spawn_handle(), client.clone(), network.clone(),
);
}
let role = config.role.clone();
let force_authoring = config.force_authoring;
let name = config.network.node_name.clone();
let enable_grandpa = !config.disable_grandpa;
let prometheus_registry = config.prometheus_registry().cloned();
let rpc_extensions_builder = {
let client = client.clone();
let pool = transaction_pool.clone();
Box::new(move |deny_unsafe, _| {
let deps = crate::rpc::FullDeps {
client: client.clone(),
pool: pool.clone(),
deny_unsafe,
};
crate::rpc::create_full(deps)
})
let _rpc_handlers = sc_service::spawn_tasks(
sc_service::SpawnTasksParams {
network: network.clone(),
client: client.clone(),
keystore: keystore_container.sync_keystore(),
task_manager: &mut task_manager,
transaction_pool: transaction_pool.clone(),
rpc_extensions_builder,
on_demand: None,
remote_blockchain: None,
backend,
network_status_sinks,
system_rpc_tx,
config,
telemetry: telemetry.as_mut(),
let proposer_factory = sc_basic_authorship::ProposerFactory::new(
client.clone(),
transaction_pool,
prometheus_registry.as_ref(),
telemetry.as_ref().map(|x| x.handle()),
let can_author_with =
sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone());
let aura = sc_consensus_aura::start_aura::<AuraPair, _, _, _, _, _, _, _, _,_>(
StartAuraParams {
slot_duration: sc_consensus_aura::slot_duration(&*client)?,
client: client.clone(),
select_chain,
block_import,
proposer_factory,
inherent_data_providers: inherent_data_providers.clone(),
force_authoring,
backoff_authoring_blocks,
keystore: keystore_container.sync_keystore(),
can_author_with,
sync_oracle: network.clone(),
block_proposal_slot_portion: SlotProportion::new(2f32 / 3f32),
telemetry: telemetry.as_ref().map(|x| x.handle()),
},
// the AURA authoring task is considered essential, i.e. if it
task_manager.spawn_essential_handle().spawn_blocking("aura", aura);
// if the node isn't actively participating in consensus then it doesn't
// need a keystore, regardless of which protocol we use below.
// FIXME #1578 make this available through chainspec
gossip_duration: Duration::from_millis(333),
justification_period: 512,
name: Some(name),
is_authority: role.is_authority(),
telemetry: telemetry.as_ref().map(|x| x.handle()),
if enable_grandpa {
// start the full GRANDPA voter
// NOTE: non-authorities could run the GRANDPA observer protocol, but at
// this point the full voter should provide better guarantees of block
// and vote data availability than the observer. The observer has not
// been tested extensively yet and having most nodes in a network run it
// could lead to finality stalls.
let grandpa_config = sc_finality_grandpa::GrandpaParams {
config: grandpa_config,
link: grandpa_link,
voting_rule: sc_finality_grandpa::VotingRulesBuilder::default().build(),
shared_voter_state: SharedVoterState::empty(),
telemetry: telemetry.as_ref().map(|x| x.handle()),
};
// the GRANDPA voter task is considered infallible, i.e.
// if it fails we take down the service with it.
task_manager.spawn_essential_handle().spawn_blocking(
sc_finality_grandpa::run_grandpa_voter(grandpa_config)?
}
/// Builds a new service for a light client.
pub fn new_light(mut config: Configuration) -> Result<TaskManager, ServiceError> {
let telemetry = config.telemetry_endpoints.clone()
.filter(|x| !x.is_empty())
.map(|endpoints| -> Result<_, sc_telemetry::Error> {
let worker = TelemetryWorker::new(16)?;
let telemetry = worker.handle().new_telemetry(endpoints);
Ok((worker, telemetry))
})
.transpose()?;
let (client, backend, keystore_container, mut task_manager, on_demand) =
sc_service::new_light_parts::<Block, RuntimeApi, Executor>(
&config,
telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()),
)?;
let mut telemetry = telemetry
.map(|(worker, telemetry)| {
task_manager.spawn_handle().spawn("telemetry", worker.run());
telemetry
});
config.network.extra_sets.push(sc_finality_grandpa::grandpa_peers_set_config());
let select_chain = sc_consensus::LongestChain::new(backend.clone());
let transaction_pool = Arc::new(sc_transaction_pool::BasicPool::new_light(
config.transaction_pool.clone(),
config.prometheus_registry(),
task_manager.spawn_handle(),
let (grandpa_block_import, _) = sc_finality_grandpa::block_import(
client.clone(),
&(client.clone() as Arc<_>),
select_chain.clone(),
telemetry.as_ref().map(|x| x.handle()),
let aura_block_import = sc_consensus_aura::AuraBlockImport::<_, _, _, AuraPair>::new(
grandpa_block_import.clone(),
client.clone(),
);
let import_queue = sc_consensus_aura::import_queue::<AuraPair, _, _, _, _, _>(
ImportQueueParams {
block_import: aura_block_import.clone(),
justification_import: Some(Box::new(grandpa_block_import.clone())),
client: client.clone(),
inherent_data_providers: InherentDataProviders::new(),
spawner: &task_manager.spawn_essential_handle(),
can_author_with: sp_consensus::NeverCanAuthor,
slot_duration: sc_consensus_aura::slot_duration(&*client)?,
registry: config.prometheus_registry(),
check_for_equivocation: Default::default(),
telemetry: telemetry.as_ref().map(|x| x.handle()),
},
let (network, network_status_sinks, system_rpc_tx, network_starter) =
sc_service::build_network(sc_service::BuildNetworkParams {
config: &config,
client: client.clone(),
transaction_pool: transaction_pool.clone(),
spawn_handle: task_manager.spawn_handle(),
import_queue,
on_demand: Some(on_demand.clone()),
block_announce_validator_builder: None,
})?;
if config.offchain_worker.enabled {
sc_service::build_offchain_workers(
&config, task_manager.spawn_handle(), client.clone(), network.clone(),
);
}
sc_service::spawn_tasks(sc_service::SpawnTasksParams {
remote_blockchain: Some(backend.remote_blockchain()),
transaction_pool,
task_manager: &mut task_manager,
on_demand: Some(on_demand),
rpc_extensions_builder: Box::new(|_, _| ()),
config,
client,
backend,
network,
network_status_sinks,
system_rpc_tx,
telemetry: telemetry.as_mut(),
network_starter.start_network();