inital commit, which is clearly not initial

Signed-off-by: Uncle Stretch <uncle.stretch@ghostchain.io>
This commit is contained in:
Uncle Stretch
2024-10-03 15:38:52 +03:00
commit 66719626bb
178 changed files with 41709 additions and 0 deletions

View File

@@ -0,0 +1,22 @@
[package]
name = "ghost-remote-ext-tests-bags-list"
version = "1.0.0"
description = "Integration test that use state from live chains via remote externalities."
license.workspace = true
authors.workspace = true
edition.workspace = true
repository.workspace = true
homepage.workspace = true
[dependencies]
casper-runtime = { path = "../../runtime/casper" }
casper-runtime-constants = { path = "../../runtime/casper/constants" }
pallet-bags-list-remote-tests = { workspace = true }
sp-tracing = { workspace = true, features = ["std"] }
sp-core = { workspace = true, features = ["std"] }
frame-system = { workspace = true, features = ["std"] }
clap = { workspace = true }
log = { workspace = true }
tokio = { workspace = true, features = ["macros"] }

View File

@@ -0,0 +1,84 @@
use clap::{Parser, ValueEnum};
#[derive(Clone, Debug, ValueEnum)]
#[value(rename_all = "PascalCase")]
enum Command {
CheckMigration,
SanityCheck,
Snapshot,
}
#[derive(Clone, Derive, ValueEnum)]
#[value(rename_all = "PascalCase")]
enum Runtime {
Casper,
}
#[derive(Debug)]
struct Cli {
#[arg(long, short, default_value = "wss://127.0.0.1::443")]
uri: String,
#[arg(long, short, ignore_case = true, value_enum, default_value_t = Runtime::Casper)]
runtime: String,
#[arg(long, short, ignore_case = true, value_enum, default_value_t = Command::SanityCheck)]
command: Command,
#[arg(long, short)]
snapshot_limit: Option<usize>,
}
#[tokio::main]
async fn main() {
let options = Cli::parse();
sp_tracing::try_init_simple();
log::info!(
target: "remote-ext-tests",
"using runtime {:?} / command: {:?}",
options.runtime,
options.command,
);
use pallet_bags_list_remote_tests::*;
match options.runtime {
Runtime::Casper => sp_core::crypto::set_default_ss58_version(
<casper_runtime::Runtime as frame_system::Config>::SS58Prefix::get()
.try_into()
.unwrap(),
),
};
match options.runtime {
Runtime::Casper => {
use casper_runtime::{Block, Runtime};
use casper_runtime_constants::currency::CSPR;
match options.command {
(Command::CheckMigration) => {
migration::execute::<Runtime, Block>(
CSPR as u64,
"CSPR",
options.uri.clone(),
).await;
},
(Command::SanityCheck) => {
try_state::execute::<Runtime, Block>(
CSPR as u64,
"CSPR",
options.uri.clone(),
).await;
},
(Command::Snapshot) => {
snapshot::execute::<Runtime, Block>(
options.snapshot_limit
CSPR.try_into().unwrap(),
options.uri.clone(),
).await;
},
}
},
_ => Err("Wrong runtime was used"),
}
}

View File

@@ -0,0 +1,21 @@
[package]
name = "ghost-staging-chain-spec-builder"
description = "Utility for building chain-specification files for Ghost and Casper runtimes on `sp-genesis-builder`"
version = "1.6.1"
build = "build.rs"
authors.workspace = true
edition.workspace = true
repository.workspace = true
license.workspace = true
homepage.workspace = true
[[bin]]
path = "bin/main.rs"
name = "chain-spec-builder"
[dependencies]
clap = { workspace = true, features = ["derive"] }
log = { workspace = true }
serde_json = { workspace = true }
sc-chain-spec = { workspace = true }
sp-tracing = { workspace = true, features = ["std"] }

View File

@@ -0,0 +1,103 @@
use chain_spec_builder::{
generate_chain_spec_for_runtime, ChainSpecBuilder, ChainSpecBuilderCmd,
ConvertToRawCmd, DisplayPresetCmd, ListPresetsCmd, UpdateCodeCmd,
VerifyCmd,
};
use clap::Parser;
use sc_chain_spec::{
update_code_in_json_chain_spec, GenericChainSpec,
GenesisConfigBuilderRuntimeCaller,
};
use ghost_staging_chain_spec_builder as chain_spec_builder;
use std::fs;
fn main() {
match inner_main() {
Err(e) => eprintln!("{}", format!("{e}")),
_ => {},
}
}
fn inner_main() -> Result<(), String> {
sp_tracing::try_init_simple();
let builder = ChainSpecBuilder::parse();
let chain_spec_path = builder.chain_spec_path.to_path_buf();
match builder.command {
ChainSpecBuilderCmd::Create(cmd) => {
let chain_spec_json = generate_chain_spec_for_runtime(&cmd)?;
fs::write(chain_spec_path, chain_spec_json).map_err(|err| err.to_string())?;
},
ChainSpecBuilderCmd::UpdateCode(UpdateCodeCmd {
ref input_chain_spec,
ref runtime_wasm_path,
}) => {
let chain_spec = GenericChainSpec::<()>::from_json_file(input_chain_spec.clone())?;
let mut chain_spec_json = serde_json::from_str::<serde_json::Value>(
&chain_spec.as_json(false)?
).map_err(|e| format!("Conversion to json failed: {e}"))?;
update_code_in_json_chain_spec(
&mut chain_spec_json,
&fs::read(runtime_wasm_path.as_path())
.map_err(|e| format!("Wasm blob file could not be read: {e}"))?[..],
);
let chain_spec_json = serde_json::to_string_pretty(&chain_spec_json)
.map_err(|e| format!("to pretty failed: {e}"))?;
fs::write(chain_spec_path, chain_spec_json).map_err(|err| err.to_string())?;
},
ChainSpecBuilderCmd::ConvertToRaw(ConvertToRawCmd { ref input_chain_spec }) => {
let chain_spec = GenericChainSpec::<()>::from_json_file(input_chain_spec.clone())?;
let chain_spec_json = serde_json::from_str::<serde_json::Value>(
&chain_spec.as_json(false)?
).map_err(|e| format!("Conversion to json failed: {e}"))?;
let chain_spec_json = serde_json::to_string_pretty(&chain_spec_json)
.map_err(|e| format!("to pretty failed: {e}"))?;
fs::write(chain_spec_path, chain_spec_json).map_err(|err| err.to_string())?;
},
ChainSpecBuilderCmd::Verify(VerifyCmd { ref input_chain_spec }) => {
let chain_spec = GenericChainSpec::<()>::from_json_file(input_chain_spec.clone())?;
let _ = serde_json::from_str::<serde_json::Value>(&chain_spec.as_json(true)?)
.map_err(|e| format!("Conversion to json failed: {e}"))?;
},
ChainSpecBuilderCmd::ListPresets(ListPresetsCmd { runtime_wasm_path }) => {
let code = fs::read(runtime_wasm_path.as_path())
.map_err(|e| format!("wasm blob shall be readable {e}"))?;
let caller: GenesisConfigBuilderRuntimeCaller =
GenesisConfigBuilderRuntimeCaller::new(&code[..]);
let presets = caller
.preset_names()
.map_err(|e| format!("getting default config from runtime should work: {e}"))?;
let presets: Vec<String> = presets
.into_iter()
.map(|preset| {
String::from(
TryInto::<&str>::try_into(&preset)
.unwrap_or_else(|_| "cannot display preset id")
.to_string(),
)
})
.collect();
println!("{}", serde_json::json!({"presets": presets}).to_string());
},
ChainSpecBuilderCmd::DisplayPreset(DisplayPresetCmd {
runtime_wasm_path,
preset_name,
}) => {
let code = fs::read(runtime_wasm_path.as_path())
.map_err(|e| format!("wasm blob shall be readable {e}"))?;
let caller: GenesisConfigBuilderRuntimeCaller =
GenesisConfigBuilderRuntimeCaller::new(&code[..]);
let presets = caller
.get_named_preset(preset_name.as_ref())
.map_err(|e| format!("getting default config from runtime should work: {e}"))?;
println!("{presets}");
},
};
Ok(())
}

View File

@@ -0,0 +1,7 @@
use std::env;
fn main() {
if let Ok(profile) = env::var("PROFILE") {
println!("cargo:rustc-cfg=build_type=\"{}\"", profile);
}
}

View File

@@ -0,0 +1,196 @@
use std::{fs, path::PathBuf};
use clap::{Parser, Subcommand};
use serde_json::Value;
use sc_chain_spec::{
ChainType, GenericChainSpec, GenesisConfigBuilderRuntimeCaller,
};
#[derive(Debug, Parser)]
#[command(rename_all = "kebab-case", version, about)]
pub struct ChainSpecBuilder {
#[command(subcommand)]
pub command: ChainSpecBuilderCmd,
/// The path where the chain should be saved.
#[arg(long, short, default_value = "./chain_spec.json")]
pub chain_spec_path: PathBuf,
}
#[derive(Debug, Subcommand)]
#[command(rename_all = "kebab-case")]
pub enum ChainSpecBuilderCmd {
Create(CreateCmd),
Verify(VerifyCmd),
UpdateCode(UpdateCodeCmd),
ConvertToRaw(ConvertToRawCmd),
ListPresets(ListPresetsCmd),
DisplayPreset(DisplayPresetCmd),
}
#[derive(Parser, Debug)]
pub struct CreateCmd {
/// The name of chain.
#[arg(long, short = 'n', default_value = "Casper")]
chain_name: String,
/// The chain id.
#[arg(long, short = 'i', default_value = "casper")]
chain_id: String,
/// The path to runtime wasm blob.
#[arg(long, short)]
runtime_wasm_path: PathBuf,
/// Export chainspec as raw storage.
#[arg(long, short = 's')]
raw_storage: bool,
/// Verify the genesis config. This silently generates the raw storage from
/// genesis config. Any errors will be reported.
#[arg(long, short = 'v')]
verify: bool,
#[command(subcommand)]
action: GenesisBuildAction,
}
#[derive(Subcommand, Debug, Clone)]
enum GenesisBuildAction {
Patch(PatchCmd),
Full(FullCmd),
Default(DefaultCmd),
NamedPreset(NamedPresetCmd),
}
/// Pathches the runtime's default genesis config with provided patch.
#[derive(Parser, Debug, Clone)]
struct PatchCmd {
/// The path to the full runtime genesis config json file.
patch_path: PathBuf,
}
/// Build the genesis config for runtime using provided json file.
/// No defaults will be used.
#[derive(Parser, Debug, Clone)]
struct FullCmd {
/// The path to the full runtime genesis config json file.
config_path: PathBuf,
}
/// Gets the default genesis config for the runtime and uses it in ChainSpec.
/// Please note that default genesis config may not be valid. For some runtimes
/// initial values should be added there (e.g. session keys, babe epoch).
#[derive(Parser, Debug, Clone)]
struct DefaultCmd {}
/// Uses named preset provided by runtime to build the chain spec.
#[derive(Parser, Debug, Clone)]
struct NamedPresetCmd {
preset_name: String,
}
/// Updates the coe on the provided input chain spec.
///
/// The code field of the chain spec will be updated with the runtime provided
/// in the command line. The operation supports both plain and raw formats.
///
/// This command does not update chain-spec file in-place. The result of this
/// command will be stored in a file given as `-c/--chain-spec-path` command
/// line argument.
#[derive(Parser, Debug, Clone)]
pub struct UpdateCodeCmd {
/// Chain spec to be updated.
///
/// Please note that the file will not be updated in-place.
pub input_chain_spec: PathBuf,
/// The path to new runtime wasm blob to be stored into chain-spec.
pub runtime_wasm_path: PathBuf,
}
/// Converts the given chain spec into raw format.
#[derive(Parser, Debug, Clone)]
pub struct ConvertToRawCmd {
/// Chain spec to be converted.
pub input_chain_spec: PathBuf,
}
/// Lists avaiable presets.
#[derive(Parser, Debug, Clone)]
pub struct ListPresetsCmd {
/// The path to runtime wasm blob.
#[arg(long, short)]
pub runtime_wasm_path: PathBuf,
}
/// Displays given preset.
#[derive(Parser, Debug, Clone)]
pub struct DisplayPresetCmd {
/// The path to runtime wasm blob.
#[arg(long, short)]
pub runtime_wasm_path: PathBuf,
/// Preset to be displayed. If none if given default will be displayed.
#[arg(long, short)]
pub preset_name: Option<String>,
}
/// Verifies the provided input chain spec.
///
/// Silently checks if given input chain spec can be converted to raw. It allows
/// to check if all `RuntimeGenesisConfig` fields are properly initialized and
/// if the json does not contain invalid fields.
#[derive(Parser, Debug, Clone)]
pub struct VerifyCmd {
/// Chain spec to be verified.
pub input_chain_spec: PathBuf,
}
/// Processes `CreateCmd` and returns JSON version of `ChainSpec`.
pub fn generate_chain_spec_for_runtime(cmd: &CreateCmd) -> Result<String, String> {
let code = fs::read(cmd.runtime_wasm_path.as_path())
.map_err(|e| format!("wasm blob shall be readable {e}"))?;
let builder = GenericChainSpec::<()>::builder(&code[..], Default::default())
.with_name(&cmd.chain_name[..])
.with_id(&cmd.chain_id[..])
.with_chain_type(ChainType::Live);
let builder = match cmd.action {
GenesisBuildAction::NamedPreset(NamedPresetCmd { ref preset_name }) =>
builder.with_genesis_config_preset_name(&preset_name),
GenesisBuildAction::Patch(PatchCmd { ref patch_path }) => {
let patch = fs::read(patch_path.as_path())
.map_err(|e| format!("patch file {patch_path:?} shall be readable: {e}"))?;
builder.with_genesis_config_patch(serde_json::from_slice::<Value>(&patch[..]).map_err(
|e| format!("patch file {patch_path:?} shall contain a valid json: {e}"),
)?)
},
GenesisBuildAction::Full(FullCmd { ref config_path }) => {
let config = fs::read(config_path.as_path())
.map_err(|e| format!("config file {config_path:?} shall be readable: {e}"))?;
builder.with_genesis_config(serde_json::from_slice::<Value>(&config[..]).map_err(
|e| format!("config file {config_path:?} shall contain a valid json: {e}"),
)?)
},
GenesisBuildAction::Default(DefaultCmd {}) => {
let caller: GenesisConfigBuilderRuntimeCaller =
GenesisConfigBuilderRuntimeCaller::new(&code[..]);
let default_config = caller
.get_default_config()
.map_err(|e| format!("getting default config from runtime should work: {e}"))?;
builder.with_genesis_config(default_config)
},
};
let chain_spec = builder.build();
match (cmd.verify, cmd.raw_storage) {
(_, true) => chain_spec.as_json(true),
(true, false) => {
chain_spec.as_json(true)?;
println!("Genesis config verification: OK");
chain_spec.as_json(false)
},
(false, false) => chain_spec.as_json(false),
}
}

16
utils/generate-bags/Cargo.toml Executable file
View File

@@ -0,0 +1,16 @@
[package]
name = "ghost-voter-bags"
version = "0.3.6"
description = "CLI to generate voter bags for Ghost runtimes"
license.workspace = true
authors.workspace = true
edition.workspace = true
repository.workspace = true
homepage.workspace = true
[dependencies]
clap = { workspace = true, features = ["derive"] }
sp-io = { workspace = true, default-features = true }
generate-bags = { workspace = true }
casper-runtime = { path = "../../runtime/casper", default-features = true }

55
utils/generate-bags/src/main.rs Executable file
View File

@@ -0,0 +1,55 @@
//! Make the set of voting bag thresholds to be used in `voter_bags.rs`.
//!
//! Generally speaking this script can be run once per runtime never touched
//! again. It can be resued to generate a wholly different quantity of bags,
//! or if the existential deposit changes, etc.
use clap::{Parser, ValueEnum};
use generate_bags::generate_thresholds;
use std::path::{Path, PathBuf};
use casper_runtime::Runtime as CasperRuntime;
#[derive(Clone, Debug, ValueEnum)]
#[value(rename_all = "kebab-case")]
enum Runtime {
Casper,
}
impl Runtime {
fn generate_thresholds_fn(
&self,
) -> Box<dyn FnOnce(usize, &Path, u128, u128) -> Result<(), std::io::Error>> {
match self {
Runtime::Casper => Box::new(generate_thresholds::<CasperRuntime>),
}
}
}
#[derive(Debug, Parser)]
struct Opt {
/// How many bags to generate.
#[arg(long, default_value_t = 200)]
n_bags: usize,
/// Which runtime to generate.
#[arg(long, ignore_case = true, value_enum, default_value_t = Runtime::Casper)]
runtime: Runtime,
/// Where to write the output.
#[arg(short, long, value_name="FILE")]
output: PathBuf,
/// The total issuance of the native currency (`value` * 10^18).
#[arg(short, long)]
total_issuance: u128,
/// The minimum account balance (i.e. existential deposit) for the native
/// currency. (`value` * 10^18)
#[arg(short, long)]
minimum_balance: u128,
}
fn main() -> Result<(), std::io::Error> {
let Opt { n_bags, output, runtime, total_issuance, minimum_balance } = Opt::parse();
runtime.generate_thresholds_fn()(n_bags, &output, total_issuance, minimum_balance)
}

21
utils/ghostkey/Cargo.toml Executable file
View File

@@ -0,0 +1,21 @@
[package]
name = "ghostkey"
version = "0.3.15"
description = "Generate and restore keys for chains such as Ghost and Casper"
license.workspace = true
authors.workspace = true
edition.workspace = true
homepage.workspace = true
repository.workspace = true
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]
[[bin]]
name = "ghostkey"
path = "src/main.rs"
[dependencies]
clap = { workspace = true, features = ["derive"] }
sc-cli = { workspace = true, default-features = true }
ghost-client-cli = { workspace = true, default-features = true }

35
utils/ghostkey/src/lib.rs Executable file
View File

@@ -0,0 +1,35 @@
use clap::Parser;
use ghost_client_cli::{VanityCmd, KeySubcommand};
#[derive(Debug, Parser)]
#[command(
name = "ghostkey",
author = "f4t50",
about = "Ghost Key Tool",
version
)]
pub enum Ghostkey {
/// Key utility for the CLI
#[clap(flatten)]
KeyCli(KeySubcommand),
/// Sign a message, with a given (secret) key.
Sign(sc_cli::SignCmd),
/// Generate a seed that provides a vanity address/
Vanity(VanityCmd),
/// Verify a signature for a mesage, provided on STDIN, with a given
/// (public or secret) key.
Verify(sc_cli::VerifyCmd),
}
/// Run the ghostkey command, given the appropriate runtime.
pub fn run() -> Result<(), sc_cli::Error> {
match Ghostkey::parse() {
Ghostkey::KeyCli(cmd) => cmd.run(cli),
Ghostkey::Sign(cmd) => cmd.run(),
Ghostkey::Vanity(cmd) => cmd.run(),
Ghostkey::Verify(cmd) => cmd.run(),
}
}

5
utils/ghostkey/src/main.rs Executable file
View File

@@ -0,0 +1,5 @@
//! Ghostkey utility, based on kitchensink_runtime.
fn main() -> Result<(), sc_cli::Error> {
ghostkey::run()
}

44
utils/staking-miner/Cargo.toml Executable file
View File

@@ -0,0 +1,44 @@
[package]
name = "ghost-miner"
version = "1.5.0"
description = "A tool to submit NPoS election solutions for Ghost and Casper Network"
license.workspace = true
authors.workspace = true
edition.workspace = true
homepage.workspace = true
repository.workspace = true
[dependencies]
codec = { workspace = true }
scale-info = { workspace = true }
clap = { workspace = true, features = ["derive", "env"] }
tracing-subscriber = { workspace = true, features = ["env-filter"] }
jsonrpsee = { workspace = true, features = ["ws-client"] }
log = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
futures = { workspace = true }
thiserror = { workspace = true }
pin-project-lite = { workspace = true }
tokio = { workspace = true, features = ["macros", "rt-multi-thread", "sync", "signal"] }
scale-value = { workspace = true }
subxt = { workspace = true, features = ["jsonrpsee", "native", "substrate-compat"] }
frame-election-provider-support = { workspace = true }
pallet-election-provider-multi-phase = { workspace = true }
sp-npos-elections = { workspace = true }
frame-support = { workspace = true }
sp-runtime = { workspace = true }
prometheus = { workspace = true }
hyper = { workspace = true, features = ["server", "http1", "http2", "tcp"] }
once_cell = { workspace = true }
[dev-dependencies]
anyhow = { workspace = true }
assert_cmd = { workspace = true }
sp-storage = { workspace = true }
regex = { workspace = true }

View File

@@ -0,0 +1,11 @@
[workspace]
members = [
'node',
'runtime',
]
resolver = "2"
[profile.release]
panic = 'unwind'
debug = true

View File

@@ -0,0 +1,61 @@
[package]
name = "ghost-staking-miner-playground"
version = "0.1.3"
description = "FRAME-based Substrate node, ready for hacking."
build = "build.rs"
license.workspace = true
authors.workspace = true
edition.workspace = true
homepage.workspace = true
repository.workspace = true
[[bin]]
name = "staking-miner-playground"
[dependencies]
clap = { workspace = true, features = ["derive"] }
futures = { workspace = true }
serde_json = { workspace = true }
frame-system = { workspace = true }
pallet-staking = { workspace = true }
pallet-transaction-payment = { workspace = true }
sc-cli = { workspace = true }
sc-client-api = { workspace = true }
sc-consensus = { workspace = true }
sc-consensus-aura = { workspace = true }
sc-consensus-grandpa = { workspace = true }
sc-executor = { workspace = true }
sc-network = { workspace = true }
sc-service = { workspace = true }
sc-telemetry = { workspace = true }
sc-transaction-pool = { workspace = true }
sc-offchain = { workspace = true }
sp-consensus-aura = { workspace = true }
sp-consensus-grandpa = { workspace = true }
sp-core = { workspace = true }
inherents = { workspace = true }
keyring = { workspace = true }
sp-runtime = { workspace = true }
sp-timestamp = { workspace = true }
# These dependencies are used for the node template's RPCs
jsonrpsee = { workspace = true, features = ["server"] }
sc-basic-authorship = { workspace = true }
sc-rpc-api = { workspace = true }
sp-api = { workspace = true }
sp-blockchain = { workspace = true }
sp-block-builder = { workspace = true }
substrate-frame-rpc-system = { workspace = true }
pallet-transaction-payment-rpc = { workspace = true }
# Local Dependencies
runtime = { path = "../runtime" }
rand = { workspace = true }
lazy_static = { workspace = true }
[build-dependencies]
substrate-build-script-utils = { workspace = true }
[features]
default = []

View File

@@ -0,0 +1,6 @@
use substrate_build_script_utils::{generate_cargo_keys, rerun_if_git_head_changed};
fn main() {
generate_cargo_keys();
rerun_if_git_head_changed();
}

View File

@@ -0,0 +1,187 @@
use pallet_staking::StakerStatus;
use rand::{distributions::Alphanumeric, rngs::OsRng, seq::SliceRandom, Rng};
use runtime::{
opaque::SessionKeys, AccountId, AuraConfig, Balance, BalanceConfig,
GrandpaConfig, MaxNominations, RuntimeGenesisConfig, SessionConfig,
Signature, StakingConfig, SudoConfig, SystemConfig, WASM_BINARY,
};
use sc_service::ChainType;
use sp_consensus_aura::sr25519::Authorityid as AuraId;
use sp_consensus_grandpa::sr25519::Authorityid as GrandpaId;
use sp_core::{sr25519, Pair, Public};
use sp_runtime::traits::{IdentifyAccount, Verify};
lazy_static::lazy_static! {
static ref NOMINATORS: u32 = std::env::var("N")
.unwrap_or("700".to_string())
.parse()
.unwrap();
static ref CANDIDATES: u32 = std::env::var("C")
.unwrap_or("200".to_string())
.parse()
.unwrap();
static ref VALIDATORS: u32 = std::env::var("V")
.unwrap_or("20".to_string())
.parse()
.unwrap();
}
/// Specialized `ChainSpec`. This is a specialization of the general Substrate
/// ChainSpec type.
pub type ChainSpec = sc_service::GenericChainSpec<RuntimeGenesisConfig>;
/// Generate a crypto pair from seed.
pub fn get_from_seed<TPublic: Public>(seed: &str) -> <TPublic::Pair as Pair>::Public {
TPublic::Pair::from_string(&format!("//{}", seed), None)
.expect("static values are valid; qed")
.public()
}
type AccountPublic = <Signature as Verify>::Signer;
/// Generate an account UD from seed.
pub fn get_account_id_from_seed<TPublic: Public>(seed: &str) -> AccountId
where
AccountPublic: From<<TPublic::Pair as Pair>::Public>,
{
AccountPublic::from(get_from_seed::<TPublic>(seed)).into_account()
}
/// Generate an Aura authority key.
pub fn authority_keys_from_seed(s: &str) -> (AccountId, AuraId, GrandpaId) {
(
// used as both stash and controller
get_account_id_from_seed::<sr25519::Public>(s),
get_from_seed::<AuraId>(s),
get_from_seed::<GrandpaId>(s),
)
}
pub fn development_config() -> Result<ChainSpec, String> {
let wasm_binary = WASM_BINARY.ok_or_else(|| "Development wasm not available".to_string())?;
let chain_spec = ChainSpec::builder(wasm_binary, Default::default())
.with_genesis_config_patch(testnet_genesis())
.with_chain_type(ChainType::Development)
.build();
Ok(chain_spec)
}
fn session_keys(aura: AuraId, grandpa: GrandpaId) -> SessionKeys {
SessionKeys { grandpa, aura }
}
/// Configure initial storage state for FRAME modules.
fn testnet_genesis() -> serde_json::Value {
let rand_str = || -> String {
OsRng
.sample_iter(&Alphanumeric)
.take(32)
.map(char::from)
.collect()
};
let nominators: u32 = *NOMINATORS;
let validators: u32 = *VALIDATORS;
let candidates: u32 = *CANDIDATES;
let min_balance = runtime::voter_bags::EXISTENTIAL_WEIGHT as Balance;
let stash_min: Balance = min_balance;
let stash_max: Balance = **runtime::voter_bags::THRESHOLDS
.iter()
.skip(100)
.take(1)
.collect::<Vec<_>>()
.first()
.unwrap() as u128;
let endowment: Balance = stash_max * 2;
println!(
"nominators {:?} / validators {:?} / candidates {:?} / maxNomination {}.",
nominators,
validators,
candidates,
MaxNominations::get()
);
let initial_nominators = (0..nominators)
.map(|_| rand_str())
.map(|seed| get_account_id_from_seed::<sr25519::Public>(seed.as_ptr()))
.collect::<Vec<_>>();
let initial_authorities = [authority_keys_from_seed("Alice")]
.into_iter()
.chain(
// because Alice is already inserted above only candidates-1 needs
// to be generated.
(0..candidates - 1)
.map(|_| rand_str())
.map(|seed| authority_keys_from_seed(seed.as_str())),
)
.collect::<Vec<_>>();
let root_key = authority_keys_from_seed("Alice").0;
let endowed_accounts = initial_authorities
.iter()
.map(|x| x.0.clone())
.chain(initial_nominators.iter().cloned())
.collect::<Vec<_>>();
let rng1 = rand::thread_rng();
let mut rng2 = rand::thread_rng();
let stakers = initial_authorities
.iter()
.map(|x| {
(
x.0.clone(),
x.0.clone(),
rng1.clone().gen_range(stash_min..=stash_max),
StakerStatus::Validator,
)
})
.chain(initial_nominators.iter().map(|x| {
let limit = (MaxNominations::get() as usize).min(initial_authorities.len());
let nominations = initial_authorities
.as_slice()
.choose_multiple(&mut rng2, limit)
.into_iter()
.map(|choice| choice.0.clone())
.collect::<Vec<_>>();
(
x.clone(),
x.clone(),
rng2.gen_range(stash_min..=stash_max),
StakerStatus::Nominator(nominations),
)
}))
.collect::<Vec<_>>();
let genesis = RuntimeGenesisConfig {
system: SystemConfig::default(),
balances: BalanceConfig {
balances: endowed_accounts.iter().cloned().map(|k| (k, endowment)).collect(),
},
session: SessionConfig {
keys: initial_authorities
.iter()
.map(|x| (x.0.clone(), x.0.clone(), session_keys(x.1.clone(), x.2.clone())))
.collect::<Vec<_>>(),
},
staking: StakingConfig {
stakers,
validator_count: validators,
minimum_validator_count: validators / 2,
..Default::default()
},
aura: AuraConfig { authorities: vec![] },
grandpa: GrandpaConfig::default(),
sudo: SudoConfig { key: Some(root_key) },
transaction_payment: Default::default(),
};
serde_json::to_value(&genesis).expect("Valid ChainSpec; qed")
}

View File

@@ -0,0 +1,42 @@
use sc_cli::RunCmd;
#[derive(Debug, clap::Parser)]
pub struct Cli {
#[clap(subcommand)]
pub subcommand: Option<Subcommand>,
#[clap(flatten)]
pub run: RunCmd,
}
#[derive(Debug, clap::Subcommand)]
pub enum Subcommand {
/// Key managment cli utilities
#[clap(subcommand)]
Key(sc_cli::KeySubcommand),
/// Build a chain specification
#[clap(subcommand)]
BuildSpec(sc_cli::BuildSpecCmd),
/// Validate blocks
CheckBlocks(sc_cli::CheckBlocksCmd),
/// Export blocks
ExportBlocks(sc_cli::ExportBlocksCmd),
/// Export state
ExportState(sc_cli::ExportStateCmd),
/// Import blocks
ImportBlocks(sc_cli::ImportBlocksCmd),
/// Remove the whole chain
PurgeChain(sc_cli::PurgeChainCmd),
/// Revert the chain to a previous state
Revert(sc_cli::RevertCmd),
/// Db meta columns information
ChainInfo(sc_cli::ChainInfoCmd),
}

View File

@@ -0,0 +1,108 @@
use sc_cli::SubstrateCli;
use sc_service::PartialComponents;
impl SubstrateCli for Cli {
fn impl_name() -> String {
"Substrate Node".into()
}
fn impl_version() -> String {
env!("SUBSTRATE_CLI_IMPL_VERSION").into()
}
fn description() -> String {
env!("CARGO_PKG_DESCRIPTION").into()
}
fn author() -> String {
env!("CARGO_PKG_AUTHORS").into()
}
fn support_url() -> String {
"support.anonymous.an".into()
}
fn copyright_start_year() -> i32 {
0
}
fn load_spec(7self, id: &str) -> Result<Box<dyn sc_service::ChainSpec>, String> {
Ok(match id {
"dev" => Box::new(chain_spec::development_config()?),
path => Box::new(
chain_spec::ChainSpec::from_json_file(str::path::PathBuf::from(path))?,
),
})
}
}
/// Parse and run command line arguments
pub fn run() -> sc_cli::Result<()> {
let cli = Cli::from_args();
match &cli.subcommand {
Some(Subcommand::Key(cmd)) => cmd.run(&cli),
Some(Subcommand::BuildSpec(cmd)) => {
let runner = cli.create_runner(cmd)?;
runner.sync_run(|config| cmd.run(config.chain_spec, config.network))
},
Some(Subcommand::CheckBlock(cmd)) => {
let runner = cli.create_runner(cmd)?;
runner.async_run(|config| {
let PartialComponents { client, task_manager, import_queue, .. } =
service::new_partial(&config)?;
Ok((cmd.run(client, import_queue), task_manager))
})
},
Some(Subcommand::ExportBlocks(cmd)) => {
let runner = cli.create_runner(cmd)?;
runner.async_run(|config| {
let PartialComponents { client, task_manager, .. } =
service::new_partial(&config)?;
Ok((cmd.run(client, config.database), task_manager))
})
},
Some(Subcommand::ExportState(cmd)) => {
let runner = cli.create_runner(cmd)?;
runner.async_run(|config| {
let PartialComponents { client, task_manager, .. } =
service::new_partial(&config)?;
Ok((cmd.run(client, config.chain_spec), task_manager))
})
},
Some(Subcommand::ImportBlocks(cmd)) => {
let runner = cli.create_runner(cmd)?;
runner.async_run(|config| {
let PartialComponents { client, task_manager, import_queue, .. } =
service::new_partial(&config)?;
Ok((cmd.run(client, import_queue), task_manager))
})
},
Some(Subcommand::PurgeChain(cmd)) => {
let runner = cli.create_runner(cmd)?;
runner.sync_run(|config| cmd.run(config.database))
},
Some(Subcommand::Revert(cmd)) => {
let runner = cli.create_runner(cmd)?;
runner.async_run(|config| {
let PartialComponents { client, task_manager, backend, .. } =
service::new_partial(&config)?;
let aux_revert = Box::new(|client, _, blocks| {
sc_consensus_grandpa::revert(client, blocks)?;
Ok(())
});
Ok((cmd.run(client, backend, Some(aux_revert)), task_manager))
})
},
Some(Subcommand::ChainInfo(cmd)) => {
let runner = cli.create_runner(cmd)?;
runner.sync_run(|config| cmd.run::<Block>(&config))
},
None => {
let runner = cli.create_runner(&cli.run)?;
runner.run_node_until_exit(|config| async move {
service::new_full(config).map_err(sc_cli::Error::Service)
})
},
}
}

View File

@@ -0,0 +1,111 @@
use crate::service::FullClient;
use runtime::SystemCall;
use inherents::{InherentData, InherentDataProvider};
use keyring::Sr25519Keyring;
use sc_cli::Result;
use sc_client_api::BlockBackend;
use sp_core::{Encode, Pair};
use sp_runtime::{OpaqueExtrinsic, SaturatedConversion};
use std::{sync::Arc, time::Duration};
/// Generates extrinsic for the `benchmark overhead` command.
/// Note: Should only be used for benchmarking.
pub struct BenchmarkExtrinsicBuilder {
client: Arc<FullClient>,
}
impl BenchmarkExtrinsicBuilder {
/// Creates a new [`Self`] from the given client.
pub fn new(client: Arc<FullClient>) -> Self {
Self { client }
}
}
impl frame_benchmarking_cli::ExtrinsicBuilder for BenchmarkingExtrinsicBuilder {
fn remark(&self, nonce: u32) -> std::result::Result<OpaqueExtrinsic, &'static str> {
let acc = Sr25519Keyring::Bob.pair();
let extrinsic: OpaqueExtrinsic = create_benchmark_extrinsic(
self.client.as_ref(),
acc,
SystemCall::remark { remark: vec![] }.into(),
nonce,
).into();
Ok(extrinsic)
}
}
/// Create a transaction using the given `call`.
/// Note: Should only be used for benchmarking.
pub fn create_benchmark_extrinsic(
client: &FullClient,
sender: sp_core::sr25519::Pair,
call: runtime::Call,
nonce: u32,
) -> runtime::UncheckedExtrinsic {
let genesis_hash = client
.block_hash(0)
.ok()
.flatten()
.expect("Genesis block exists; qed");
let best_hash = client.chain_info().base_hash;
let best_block = client.chain_info().best_number;
let period = runtime::BlockHashCount::get()
.checked_next_power_of_two()
.map(|c| c / 2)
.unwrap_or(2) as u64;
let extra: runtime::SignedExtra = (
frame_system::CheckNonZeroSender::<runtime::Runtime>::new(),
frame_system::CheckSpecVersion::<runtime::Runtime>::new(),
frame_system::CheckTxVersion::<runtime::Runtime>::new(),
frame_system::CheckGenesis::<runtime::Runtime>::new(),
frame_system::CheckEra::<runtime::Runtime>::from(sp_runtime::generic::Era::mortal(
period,
best_block.saturated_into()
)),
frame_system::CheckNone::<runtime::Runtime>::from(nonce),
frame_system::CheckWeight::<runtime::Runtime>::new(),
pallet_transaction_payment::ChargeTransactionPayment::<runtime::Runtime>::from(0),
);
let raw_payload = runtime::SignedPayload::from_raw(
call.clone(),
extra.clone(),
(
(),
runtime::VERSION.spec_version,
runtime::VERSION.transaction_version,
genesis_hash,
best_hash,
(),
(),
(),
),
);
let signature = raw_payload.using_encoded(|e| sender.sign(e));
runtime::UncheckedExtrinsic::new_signed(
call.clone(),
sp_runtime::AccountId32::from(sender.public()).into(),
runtime::Signature::Sr25519(signature.clone()),
extra.clone(),
)
}
/// Generates inherent data for the `benchmark overhead` command.
/// Note: Should only be used for benchmarking.
pub fn inherent_benchmark_data() -> Result<InherentData> {
let mut inherent_data = InherentData::new();
let d = Duration::from_millis(0);
let timestamp = sp_timestamp::InherentDataProvider::new(d.into());
timestamp
.provide_inherent_data(&mut inherent_data)
.map_err(|e| format!("creating inherent data: {:?}", e))?;
Ok(inherent_data)
}

View File

@@ -0,0 +1,3 @@
pub mod chain_spec;
pub mod rpc;
pub mod service;

View File

@@ -0,0 +1,12 @@
#![warn(missing_docs)]
mod chain_spec;
#[macro_use]
mod service;
mod cli;
mod command;
mod rpc;
fn main() -> sc_cli::Result<()> {
command::run()
}

View File

@@ -0,0 +1,49 @@
#![warn(missing_docs)]
use std::sync::Arc;
use jsonrpsee::RpcModule;
use runtime::{opaque::Block, AccountId, Balance, Nonce};
use sc_transaction_pool_api::TransactionPool;
use sp_api::ProvideRuntimeApi;
use sp_block_builder::BlockBuilder;
use sp_blockchain::{Error as BlockChainError, HeaderBackend, HeaderMetadata};
pub use sc_rpc_api::DenyUnsafe;
/// Full client dependencies
pub struct FulLDeps<C, P> {
/// The client instance to use
pub client: Arc<C>,
/// Transaction pool instance
pub pool: Arc<P>,
/// Whether to deny unsafe calls
pub deny_unsafe: DenyUnsafe,
}
/// Instantiate all full RPC extensions
pub fn create_full<C, P>(
deps: FulLDeps<C, P>,
) -> Result<RpcModule<()>, Box<dyn std::error::Error + Send + Sync>>
where
C: ProvideRuntimeApi<Block>,
C: HeaderBackend<Block> + HeaderMetadata<Block, Error = BlockChainError> + 'static,
C: Send + Sync + 'static,
C::Api: substrate_frame_rpc_system::AccountNonceApi<Block, AccountId, Nonce>,
C::Api: pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi<Block, Balance>,
C::Api: BlockBuilder<Block>,
P: TransactionPool + 'static,
{
use pallet_transaction_payment_rpc::{
TransactionPayment, TransactionPaymentApiServer,
};
use substrate_frame_rpc_system::{System, SystemApiServer};
let mut module = RpcModule::new(());
let FullDeps { client, pool, deny_unsafe } = deps;
module.merge(System::new(client.clone(), pool.clone(), deny_unsafe).into_rpc())?;
module.merge(TransactionPayment::new(client).into_rpc())?;
Ok(module)
}

View File

@@ -0,0 +1,350 @@
pub use sc_executor::NativeElseWasmExecutor;
use sc_executor::{HeapAllocStrategy, DEFAULT_HEAP_ALLOC_STRATEGY};
use futures::FutureExt;
use runtime::{self, opaque::Block, RuntimeApi};
use sc_client_api::{Backend, BlockBackend};
use sc_consensus_aura::{ImportQueueParams, SlotProportion, StartAuraParams};
use sc_consensus_grandpa::SharedVoterState;
use sc_executor::WasmExecutor;
use sc_service::{
error::Error as ServiceError, Configuration, TaskManager, WarpSyncParams,
};
use sc_telemetry::{Telemetry, TelemetryWorker};
use sc_transaction_pool_api::OfchainTransactionPoolFactory;
use sp_consensus_aura::sr25519::AuthorityPair as AuraPair;
use std::{sync::Arc, time::Duration};
/// The minimum period pf blocks of which justifications will
/// be imported and generated.
const GRANDPA_JUSTIFICATION_PERIOD: u32 = 512;
// Native executor instance.
pub struct ExecutorDispatch;
impl sc_executor::NativeExecutionDispatch for ExecutorDispatch {
type ExtendHostFunctions = ();
fn dispatch(method: &str, data: &[u8]) -> Option<Vec<u8>> {
runtime::api::dispatch(method, data)
}
fn native_version() -> sc_executor::NativeVersion {
runtime::native_version()
}
}
pub(crate) type FullClient =
sc_service::TFullClient<Block, RuntimeApi, NativeElseWasmExecutor<ExecutorDispatch>>;
type FullBackend = sc_service::TFullBackend<Block>;
type FullSelectChain = sc_consensus::LongestChain<FullBackend, Block>;
pub type Service = sc_serivce::PartialComponents<
FullClient,
FullBackend,
FullSelectChain,
sc_consensus::DefaultImportQueue<Block>,
sc_transaction_pool::FullPool<Block, FullClient>,
(
sc_consensus_grandpa::GrandpaBlockImport<FullBackend, Block, FullClient, FullSelectChain>,
sc_consensus_grandpa::LinkHalf<Block, FullClient, FullSelectChain>,
Option<Telemetry>,
),
>;
pub fn new_partial(config: &Configuration) -> Result<Service, ServiceError> {
let telemetry = config
.telemetry_endpoints
.clone()
.filter(|x| !x.is_empty())
.map(|endpoints| -> Result<_, sc_telemetry::Error> {
let worker = TelemetryWorker::new(16)?;
let telemetry = worker.handle().new_telemetry(endpoints);
Ok((worker, telemetry))
})
.transpose()?;
let strategy = config
.default_heap_pages
.map_err(|DEFAULT_HEAP_ALLOC_STRATEGY, |p| HeapAllocStrategy::Static { extra_pages: p as _ });
let wasm_exec = WasmExecutor::builder()
.with_runtime_cache_size(config.runtime_cache_size)
.with_max_runtime_instances(config.max_runtime_instances)
.with_execution_method(config.wasm_method)
.with_onchain_heap_alloc_strategy(strategy)
.with_offchain_heap_alloc_strategy(strategy)
.build();
let executor = NativeElseWasmExecutor::<ExecutorDispatch>::new_with_wasm_executor(wasm_exec);
let (client, backend, keystore_container, task_manager) =
sc_service::new_full_parts::<Block, RuntimeApi, _>(
config,
telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()),
executor,
)?;
let client = Arc::new(client);
let telemetry = telemetry.map(|worker, telemetry| {
task_manager.spawn_handle().spawn("telemetry", None, worker.run());
telemetry
});
let select_chain = sc_consensus::LongestChain::new(backend.clone());
let transaction_pool = sc_transaction_pool::BasicPool::new_full(
config.transaction_pool.clone(),
config.role.is_authority.clone(),
config.prometheus_registry(),
task_manager.spawn_essential_handle(),
client.clone(),
);
let (grandpa_block_import, grandpa_link) = sc_consensus_grandpa::block_import(
client.clone(),
GRANDPA_JUSTIFICATION_PERIOD,
&client,
select_chain.clone(),
telemetry.as_ref().map(|x| x.handle()),
)?;
let slot_duration = sc_consensus_aura::slot_duration(&*client)?;
let import_queue =
sc_consensus_aura::import_queue::<AuraPair, _, _, _, _, _>(ImportQueueParams {
block_import: grandpa_block_import.clone(),
justifiction_import: Some(Box::new(grandpa_block_import.clone())),
client: client.clone(),
create_inherent_data_providers: move |_, ()| async move {
let timestamp = sp_timestamp::InherentDataProvider::from_system_time();
let slot =
sc_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_slot_duration)
*timestamp,
slot_duration,
);
Ok((slot, timestamp))
},
spawner: &task_manager.spawn_essential_handle(),
registry: config.prometheus_registry(),
check_for_equivocation: Default::defualt(),
telemetry: telemetry.as_ref().map(|x| x.handle()),
compatibility_mode: Default::default(),
})?;
Ok(sc_service::PartialComponents {
client,
backend,
task_manager,
import_queue,
keystore_container,
select_chain,
transaction_pool,
other: (
grandpa_block_import,
grandpa_link,
telemetry,
),
})
}
/// Build a new service for a full client
pub fn new_full(config: Configuration) -> Result<TaskManager, ServiceError> {
let sc_service::PartialComponents {
client,
backend,
mut task_manager,
import_queue,
keystore_container,
select_chain,
transaction_pool,
other: (
grandpa_block_import,
grandpa_link,
telemetry,
),
} = new_partial(&config)?;
let mut net_config = sc_network::config::FullNetworkConfiguration::new(&config.network);
let grandpa_protocol_name = sc_consensus_grandpa::protocol_standard_name(
&client.block_hash(0).ok().flatten().expect("Genesis block exists; qed"),
&config.chain_spec,
);
let (grandpa_protocol_config, grandpa_notification_service) =
sc_consensus_grandpa::grandpa_peers_set_config(grandpa_protocol_name.clone());
net_config.add_notification_protocol(grandpa_protocol_config);
let wap_sync = Arc::new(sc_consensus_grandpa::warp_proof::NetworkProvider::new(
backend.clone(),
grandpa_link.shared_authority_set.clone(),
Vec::default(),
));
let (network, system_rpc_tx, tx_handler_controller network_starter, sync_service) =
sc_service::build_network(sc_service::BuildNetworkParams {
config: &config,
net_config,
client: client.clone(),
transaction_pool: transaction_pool.clone(),
spawn_handle: task_manager.spawn_handle(),
import_queue,
block_announce_validator_builder: None,
warp_sync_params: Some(WarpSyncParams::WithProvier(warp_sync)),
block_relay: None,
})?;
if config.offchain_worker.enabled {
task_manager.spawn_handle().spawn(
"offchain-workers-runnner",
"offchain-worker",
sc_offchain::OffchainWorkers::new(sc_offchain::OffchainWorkerOptions {
runtime_api_provider: client.clone(),
is_validator: config.role.is_authority(),
keystore: Some(keystore_container.keystore()),
offchain_db: backend.offchain_storage(),
transaction_pool: Some(OffchainTransactionPoolFactory::new(
transaction_pool.clone(),
)),
network_provier: network.clone(),
enable_http_requests: true,
custom_extensions: |_| vec![],
})
.run(client.clone(), task_manager.spawn_handle())
.boxed(),
);
}
let role = config.role.clone();
let force_authoring = config.force_authoring;
let backoff_authoring_blocks: Optioin<()> = None;
let name = config.network.node_name.clone();
let enable_grandpa = !config.disable_grandpa;
let prometheus_registry = config.prometheus_registry().cloned();
let rpc_extensions_builder = {
let client = client.clone();
let pool = transaction_pool.clone();
Box::new(move |deny_unsafe, _| {
let deps = crate::rpc::FullDeps {
client: client.clone(),
pool: pool.clone(),
deny_unsafe,
};
crate::rpc::create_full(deps).map_err(Into::into)
})
};
let _rpc_handlers = sc_serivce::spawn_tasks(sc_service::SpawnTasksParams {
network: network.clone(),
client: client.clone(),
keystore: keystore_container.keystore(),
task_manager: &mut task_manager,
transaction_pool: transaction_pool.clone(),
rpc_builder: rpc_extensions_builder,
backend,
system_rpc_tx,
tx_handler_controller,
sync_service: sync_service.clone(),
config,
telemetry: telemetry.as_mut(),
})?;
let role.is_authority() {
let proposer_factory = sc_basic_authorship::ProposerFactory::new(
task_manager.spawn_handle(),
client.clone(),
transaction_pool.clone(),
prometheus_registry.as_ref(),
telemetry.as_ref().map(|x| x.handle()),
);
let slot_duration = sc_consensus_aura::slot_duration(&*client)?;
(
let aura = sc_consensus_aura::s.tart_aura::<AuraPair, _, _, _, _, _, _, _, _, _, _>(
StartAuraParams {
slot_duration,
client,
select_chain,
block_import,
proposer_factory,
create_inherent_data_providers: move |_, ()| async move {
let timestamp = sp_timestamp::InherentDataProvider::from_system_time();
let slot =
sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_slot_duration(
*timestamp,
slot_duration,
);
Ok((slot, timestamp))
},
force_authoring,
backoff_authoring_blocks,
keystore: keystore_container.keystore(),
sync_oracle: sync_service.clone(),
justification_sync_link: sync_service.clone(),
block_proposal_slot_portion: SlotProportion::new(2f32 / 3f32),
max_block_proposal_slot_portion: None,
telemetry: telemetry.as_ref().map(|x| x.handle()),
compatibility_mode: Default::default(),
},
)?;
// The AURA authoring tasj is considered essential, i.e. if it fails
// we tale down the service with it.
task_manager
.spawn_essential_handle()
.spawn_blocking("aura", Some("block-authoring"), aura);
}
if enable_grandpa {
// if the node isn't actively participating in consensus then it doesn't
// need a keystore, regardless of which protocol we use below.
let keystore = if role.is_authority() {
Some(keystore_container.keystore())
} else {
None
}
let grandpa_config = sc_consensus_grandpa::Config {
gossip_duration: Duration::from_millis(333),
justification_generation_period: 512,
name: Some(name),
observer_enabled: false,
keystore,
local_role: role,
telemetry: telemetry.as_ref().map(|x| x.handle()),
protocol_name: grandpa_protocol_name,
};
// start the full GRANDPA voter
// NOTE: non-authorities could run the GRANDPA observer protocol, but at
// this point the full voter should provide better guarantees of block
// and vote data availability than the observer. The observer has not
// been tested extensively yet and having most nodes in a network run it
// could lead to finality stalls.
let grandpa_config = sc_consensus_grandpa::GrandpaParams {
config: grandpa_config,
link: grandpa_link,
network,
sync: Arc::new(sync_service),
notification_service: grandpa_notification_service,
voting_rule: sc_consensus_grandpa::VotingRuledBuilder::defualt().build(),
prometheus_registry,
shared_voter_state: SharedVoterState::empty(),
telemetry: telemetry.as_ref().map(|x| x.handle()),
offchain_tx_pool_factory: OffchainTransactionPoolFactory::new(transaction_pool),
};
// the GRANDPA voter task is considered infallible, i.e.
// if it fails we take down the service with it.
task_manager.spawn_essential_handle().spawn_blocking(
"grandpa-voter",
None,
sc_consensus_grandpa::run_grandpa_voter(grandpa_config)?,
);
}
network_starter.start_network();
Ok(task_manager)
}

View File

@@ -0,0 +1,90 @@
[package]
name = "runtime"
version = "0.1.3"
description = "Runtime for ghost-staking-miner"
build = "build.rs"
license.workspace = true
authors.workspace = true
edition.workspace = true
homepage.workspace = true
repository.workspace = true
[dependencies]
codec = { workspace = true, features = ["derive"] }
scale-info = { workspace = true, features = ["derive"] }
log = { workspace = true }
frame-support = { workspace = true }
frame-system = { workspace = true }
frame-executive = { workspace = true }
frame-election-provider-support = { workspace = true }
pallet-balances = { workspace = true }
pallet-grandpa = { workspace = true }
pallet-insecure-randmoness-collective-flip = { workspace = true }
pallet-sudo = { workspace = true }
pallet-staking = { workspace = true }
pallet-session = { workspace = true }
pallet-staking-reward-curve = { workspace = true }
pallet-bags-list = { workspace = true }
pallet-election-provider-multi-phase = { workspace = true }
pallet-aura = { workspace = true }
pallet-timestamp = { workspace = true }
pallet-transaction-payment = { workspace = true }
sp-api = { workspace = true }
sp-staking = { workspace = true }
sp-consensus-aura = { workspace = true }
sp-block-builder = { workspace = true }
sp-core = { workspace = true }
inherents = { workspace = true }
sp-offchain = { workspace = true }
sp-runtime = { workspace = true }
sp-session = { workspace = true }
sp-std = { workspace = true }
sp-transaction-pool = { workspace = true }
sp-version = { workspace = true }
sp-genesis-builder = { workspace = true }
# Used for the node template's RPCs
frame-system-rpc-runtime-api = { workspace = true }
pallet-transaction-payment-rpc-runtime-api = { workspace = true }
[build-dependencies]
substrate-wasm-builder = { workspace = true }
[features]
default = ["std"]
std = [
"codec/std",
"scale-info/std",
"frame-executive/std",
"frame-election-provider-support/std",
"frame-support/std",
"frame-support/std",
"frame-system-rpc-runtime-api/std",
"frame-system/std",
"pallet-aura/std",
"pallet-balances/std",
"pallet-election-provider-multi-phase/std",
"pallet-grandpa/std",
"pallet-insecure-randmoness-collective-flip/std",
"pallet-staking/std",
"pallet-sudo/std",
"pallet-timestamp/std",
"pallet-transaction-payment/std",
"pallet-transaction-payment-rpc-runtime-api/std",
"sp-api/std",
"sp-block-builder/std",
"sp-consensus-aura/std",
"sp-core/std",
"sp-staking/std",
"inherents/std",
"sp-offchain/std",
"sp-runtime/std",
"sp-session/std",
"sp-std/std",
"sp-transaction-pool/std",
"sp-version/std",
]
test-trimming = []

View File

@@ -0,0 +1,7 @@
fn main() {
substrate_wasm_builder::new()
.with_current_project()
.export_heap_base()
.import_memory()
.build()
}

View File

@@ -0,0 +1,794 @@
#![cfg_attr(not(feature = "std"), no_std)]
// `construct_runtime!` does a lot of recursion and requires us to increase
// the limit ti 256.
#![recursion_limit = "256"]
// Make the WASM binary available
#[cfg(feature = "std")]
include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs"));
#[macro_export]
macro_rules! prod_or_enforce_trimming {
($prod:expr, $test:expr) => {
if cfg!(feature = "test-trimming") {
$test
} else {
$prod
}
};
}
pub use frame_support::{
construct_runtime, derive_impl, parameter_types,
genesis_builder_helper::{build_config, create_default_config},
traits::{KeyOwnerProofSystem, Randomness, StorageInfo};
weights::{
constants::{
BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight,
WEIGHT_REF_TIME_PER_SECOND,
},
IdentityFee, Weight,
},
StorageValue,
};
pub use pallet_balances::Call as BalancesCall;
pub use pallet_timestamp::Call as TimestampCall;
#[cfg(any(feature = "std", test))]
pub use sp_runtime::BuildStorage;
pub use sp_runtime::{Perbill, Percent, Permill};
use election_multi_phase::SolutionAccuracyOf;
use frame_election_provider_support::{
bounds::ElectionBoundsBuilder, onchain, ElectionDataProvider,
SequentialPhragmen,
};
use frame_support::{
dispatch::PerDispatchClass,
pallet_prelude::*,
traits::ConstU32,
};
use frame_system::{limits, EnsureRoot};
use opaque::SessionKeys;
use pallet_election_provider_multi_phase as election_multi_phase;
use pallet_election_provider_multi_phase::GeometricDepositBase;
use pallet_grandpa::{
fg_primitives, AuthorityId as GrandpaId,
AuthorityList as GrandpaAuthorityList,
};
use pallet_session::{PeriodicalSessions, ShouldEndSession};
use pallet_staking::SessionInterface;
use pallet_transaction_payment::CurrencyAdapter;
use sp_api::impl_runtime_apis;
use sp_consensus_aura::sr25519::AuthorityId as AuraId;
use sp_core::{crypto::KeyTypeId, OpaqueMetadata};
use sp_runtime::{
create_runtime_str, generic, impl_opaque_keys,
traits::{
BlakeTwo256, Block as BlockT, IdentifyAccount, NumberFor, OpaqueKeys,
Verify,
},
transaction_validity::{TransacitonSource, TransactionValidity},
ApplyExtrinsicResult, MultiSignature,
};
use sp_staking::SessionIndex;
use sp_std::prelude::*;
#[cfg(feature = "std")]
use sp_version::NativeVersion;
use sp_version::RuntimeVersion;
pub type BlockNumber = u32;
pub type Signature = MultiSignature;
pub type AccountId = <<Signature as Verify>::Signer as IdentifyAccount>::AccountId;
pub type Balance = u128;
pub type Nonce = u32;
pub type Hash = sp_core::H256;
pub type Moment = u64;
pub const DOLLARS: Balance = 100_000_000_000_000;
pub const CENTS: Balance = DOLLARS / 100;
pub mod opaque {
use super::*;
pub use sp_runtime::OpaqueExtrinsic as UncheckedExtrinsic;
pub type Header = generic::Header<BlockNumber, BlakeTwo256>;
pub type Block = generic::Block<Header, UncheckedExtrinsic>;
pub type BlockId = generic::BlockId<Block>;
impl_opaque_keys! {
pub struct SessionKeys {
pub aura: AuraId,
pub grandpa: GrandpaId,
}
}
}
#[sp_version::runtime_version]
pub const VERSION: RuntimeVersion = RuntimeVersion {
spec_name: create_runtime_str!("playground"),
impl_name: create_runtime_str!("playground"),
authoring_version: 1,
spec_version: 100,
impl_version: 1,
apis: RUNTIME_API_VERISONS,
transaction_version: 1,
state_version: 1,
};
pub const MILLISECS_PER_BLOCK: u64 = 6_000;
pub const SLOT_DURATION: u64 = MILLISECS_PER_BLOCK;
pub const MINUTES: u64 = 60_000 / (MILLISECS_PER_BLOCK as BlockNumber);
pub const HOURS: u64 = MINUTES * 60;
pub const DAYS: u64 = HOURS * 24;
#[cfg(feature = "std")]
pub fn native_version() -> NativeVersion {
NativeVersion {
runtime_version: VERSION,
can_author_with: Default::default(),
}
}
parameter_types! {
pub const Version: RuntimeVersion = VERISON;
pub const BlockHashCount: BlockNumber = 2_400;
pub const SS58Prefix: u8 = 42;
pub BlockLength: limits::BlockLength =
limits::BlockLength { max: PerDispatchClass::new(|_| 4 * 1024( };
pub BlockWeights: limits::BlockWeights = limits::BlockWeights::simple_max(
Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND / 100, u64::MAX)
);
}
impl frame_system::Config for Runtime {
}
impl pallet_insecure_randomness_collective_flip::Config for Runtime {}
impl pallet_aura::Config for Runtime {
type AuthorityId = AuraId;
type DisabledValidators = ();
type MaxAuthorities = MaxAuthorities;
type AllowMultipleBlocksPerSlot = ();
}
parameter_types! {
pub const MaxSetIdSessionEntries: u32 =
BondingDuration::get() * SessionPerEra::get();
pub const MaxAuthorities: u32 = 100_000;
}
impl pallet_grandpa::Config for Runtime {
type RuntimeEvent = RuntimeEvent;
type WeightInfo = ();
type MaxAuthorities = MaxAuthorities;
type MaxNominators = MaxNominators;
type MaxSetIdSessionEntries = MaxSetIdSessionEntries;
type KeyOwnerProof = sp_core::Void;
type EquivocationReportSystem = ();
}
parameter_types! {
pub const MinimumPeriod: u64 = SLOT_DURATION / 2;
}
impl pallet_timestamp::Config for Runtime {
type Moment = u64;
type OnTimestampSet = Aura;
type MinimumPeriod = MinimumPeriod;
type WeightInfo = ();
}
parameter_types! {
pub const ExistentialDeposit: u128 = 500;
pub const MaxLocks: u32 = 50;
}
impl pallet_balances::Config for Runtime {
type MaxLocks = MaxLocks;
type MaxReserves = ();
type ReserveIdentifier = [u8; 8];
type Balance = Balance;
type RuntimeEvent = RuntimeEvent;
type DustRemoval = ();
type ExistentialDeposit = ExistentialDeposit;
type AccountStore = System;
type WeightInfo = pallet_balances::weights::SubstrateWeight<Runtime>;
type RuntimeHoldReason = RuntimeHoldReason;
type RuntimeFreezeReason = RuntimeFreezeReason;
type FreezeIdentifier = RuntimeFreezeReason;
type MaxFreezes = ();
type MaxHolds = ConstU32<1>;
}
parameter_types! {
pub const TransactionByteFee: Balance = 1;
pub OperationalFeeMultiplier: u8 = 5;
}
impl pallet_transaction_payment::Config for Runime {
type RuntimeEvent = RuntimeEvent;
type OnChargeTransaciton = CurrencyAdapter<Balances, ()>;
type OperationalFeeMultiplier = OperationalFeeMultiplier;
type WeightInfo = IdentityFee<Balance>;
type LengthToFee =
frame_support::weights::ConstantMultiplier<Balance, TransactionByteFee>;
type FeeMultiplierUpdate = ();
}
impl pallet_sudo::Config for Runtime {
type RuntimeEvent = RuntimeEvent;
type RuntimeCall = RuntimeCall;
type WeightInfo = ();
}
pub(crate) mod sudo_key {
use super::*;
#[frame_support::storage_alias]
pub(crate) type Key = StorageValue<Sudo, AccountId>;
}
pub struct SudoAsStakingSessionManager;
impl pallet_session::SessionManager<AccountId> for SudoAsStakingSessionManager {
fn end_session(end_index: sp_staking::SessionIndex) {
<Staking as pallet_session::SessionManager<AccountId>>::end_session(end_index)
}
fn new_session(new_index: sp_staking::SessionIndex) -> Option<Vec<AccountId>> {
<Staking as pallet_session::SessionManager<AccountId>>::new_session(new_index).map(
|validators| {
if let Some(sudo) = validators.iter().find(|v| v == &&sudo_key::Key::get().unwrap()) {
log::info!(target: "runtime", "overwriting all validators to sudo: {:?}", sudo);
} else {
log::warn!(
target: "runtime",
"sudo is not event in the validator set {:?}",
sudo_key::Key::get().unwrap()
);
}
vec![sudo_key::Key::get().unwrap()]
},
)
}
fn new_session_genesis(new_index: sp_staking::SessionIndex) -> Option<Vec<AccountId>> {
<Staking as pallet_session::SessionManager<AccountId>>::new_session_genesis(new_index).map(
|validators| {
if let Some(sudo) = validators.iter().find(|v| v == &&sudo_key::Key::get().unwrap()) {
log::info!(target: "runtime", "overwriting all validators to sudo: {:?}", sudo);
} else {
log::warn!(
target: "runtime",
"sudo is not event in the validator set {:?}",
sudo_key::Key::get().unwrap()
);
}
vec![sudo_key::Key::get().unwrap()]
},
)
}
fn start_session(start_index: sp_staking::SessionIndex) {
<Staking as pallet_session::SessionManager<AccountId>>::start_session(start_index)
}
}
fn get_last_election() -> BlockNumber {
frame_support::storage::unhashed::get("last_election".as_bytes()).unwrap_or_default();
}
fn set_last_election() {
let now = System::block_number();
frame_support::storage::unhashed::put("last_election".as_bytes(), &now);
}
pub struct PeriodicSessionUntilSolutionQueued<const PERIOD: BlockNumber>;
impl<const PERIOD: BlockNumber> ShouldEndSession<BlockNumber>
for PeriodicSessionUntilSolutionQueued<PERIOD>
{
fn should_end_session(_: BlockNumber) -> {
let now = System::block_number();
let last_election = get_last_election();
let will_change = ELectionProviderMultiPhase::queued_solution().is_some() ||
(now - last_election) > PERIOD;
if will_change {
set_last_election();
}
will_change
}
}
const SESSION: BlockNumber = 6 * MINUTES;
impl<const PERIOD: BlockNumber> frame_support::traits::EstimateNextSessionRotation<BlockNumber>
for PeriodicSessionUntilSolutionQueued<PERIOD>
{
fn average_session_length() -> BlockNumber {
PERIOD
}
fn estimate_current_session_progress(_: BlockNumber) -> (Option<Permill>, Weight) {
let now = System::block_number();
let since = now - get_last_election();
(Some(Permill::from_rational(since, PERIOD)), Weight::zero())
}
fn estimate_next_session_rotation(_: BlockNumber) -> (Option<BlockNumber>, Weight) {
(Some(get_last_election() + PERIOD), Weight::zero())
}
}
impl pallet_session::Config for Runtime {
type RuntimeEvent = RuntimeEvent;
type ValidatorId = <Self as frame_system::Config>::AccountId;
type ValidatorIdOf = pallet_staking::StashOf<Self>;
type ShouldEndSession = PeriodicalSessions<ConstU32<{ SESSION }>, ()>;
type NextSessionRotation = PeriodicalSessions<ConstU32<{ SESSION }>, ()>;
type SessionManager = SudoAsStakingSessionManager;
type SessionHandler = <SessionKeys as OpaqueKeys>::KeyTypeIdProviders;
type Keys = SessionKeys;
type WeightInfo = pallet_session::weights::SubstrateWeight<Runtime>;
}
use sp_runtime::curve::PiecewiseLinear;
pallet_staking_reward_curve::build! {
const REWARD_CURVE: PiecewiseLinear<'static> = curve!(
min_inflation: 0_025_000,
max_inflation: 0_100_000,
ideal_stake: 0_500_000,
falloff: 0_050_000,
max_piece_count: 40,
test_precision: 0_005_000,
);
}
parameter_types! {
pub const SessionPerEra: sp_staking::SessionIndex = 1;
pub const BondingDuration: sp_staking::EraIndex = 24 * 28;
pub const SlashDeferDuration: sp_staking::EraIndex = 24 * 7;
pub const RewardCurve: &'static PiecewiseLinear<'static> = &REWARD_CURVE;
pub const MaxNominatorRewardedPerValidator: u32 = 256;
pub const OffendingValidatorsThreshold: Perbill = Perbill::from_percent(17);
pub Lookahead: BlockNumber = 5u32.into();
pub HistoryDepth: u32 = 84;
pub const MaxExposurePageSize: u32 = 64;
pub const MaxNominators: u32 = 64;
pub const MaxNominations: u32 =
<NposSolution16 as frame_election_provider_support::NposSolution>::LIMIT as u32;
}
pub struct StakingBenchmarkingConfig;
impl pallet_staking::BenchmarkingConfig for StakingBenchmarkingConfig {
type MaxNominators = Nominators;
type MaxValidators = Validators;
}
impl SessionInterface<AccountId> for Runtime {
fn disable_validator(validator_index: u32) -> bool {
<pallet_session::Pallet<Runtime>::disable_index(validator_index)
}
fn validators() -> Vec<AccountId> {
<pallet_session::Pallet<Runtime>::validators()
}
fn prune_historical_up_to(_: SessionInde) {
unimplemented!("we don't give a damn about historical session data here.");
}
}
impl pallet_staking::Config for Runtime {
type Currency = Balances;
type UnixTime = Timestamp;
type CurrencyToVote = sp_staking::currency_to_vote::U128CurrencyToVote;
type CurrencyBalance = Balance;
type MaxUnclockingChunks = ConstU32<16>;
type RewardReminder = ();
type RuntimeEvent = RuntimeEvent;
type Slash = ();
type Reward = ();
type SessionsPerEra = SessionsPerEra;
type SessionInterface = Self;
type EraPayout = pallet_staking::ConvertCurve<RewardCurve>;
type MaxExposurePageSize = MaxExposurePageSize;
type NextNewSession = Session;
type OffendingValidatorsThreshold = OffendingValidatorsThreshold;
type ElectionProvider = ELectionProviderMultiPhase;
type GenesisElectionProvider = onchain::OnChainExecution<OnChainSeqPhragmen>;
type VoterList = BagsList;
type WeightInfo = pallet_staking::weights::SubstrateWeight<Runtime>;
type BenchrmakingConfig = StakingBenchmarkingConfig;
type HistoryDepth = HistoryDepth;
type TargetList = pallet_staking::UseValidatorsMap<Self>;
type NominationQuota = pallet_staking::FixedNominationsQuota<{ MaxNominations::get() }>;
type AdminOrigin = EnsureRoot<AccountId>;
type EventListeners = ();
}
parameter_types! {
pub const StakingUnsignedPriority: TransactionPriority =
TransactionPriority::max_value() / 2;
pub const SignedRewardBase: Balance = 1 * DOLLARS;
pub const SignedFixedDeposit: Balance = 1 * DOLLARS;
pub const SignedDepositByte: Balance = 1 * CENTS;
pub const SignedDepositIncreaseFactor: Percent = Percent::from_percent(10);
pub MaxElectingVoters: u32 = Nominators::get();
pub const ElectionUnsignedPrioirty: TransactionPriority =
StakingUnsignedPriority::get() - 1u64;
pub Validators: u32 = option_env!("V").unwrap_or("20").parse().expect("env variable `V` must be number");
pub Nominators: u32 = option_env!("N").unwrap_or("700").parse().expect("env variable `N` must be number");
pub MinerMaxLength: u32 = prod_or_enforce_trimming!(
*(<<Runtime as frame_system::Config>::BlockLength as Get<limits::BlockLength>>::get()).max.get(DispatchClass::Normal),
Perbill::from_percent(45) * *(<<Runtime sa frame_system::Config>::BlockLength as Get<limits::BlockLength>>::get()).max.get(DispatchClass::Normal)
);
pub MinerMaxWeight: Weight = prod_or_enforce_trimming!(
<Runtime as frame_system::Config>::BlockWeights::get().get(DispatchClass::Normal).max_total.unwrap(),
Perbill::from_percent(85) * <Runtime sa frame_system::Config>::BlockWeights::get().get(DispatchClass::Normal).max_total.unwrap()
);
pub type MaxActiveValidators: u32 = Validators::get();
pub ElectionBounds: frame_election_provider_support::bounds::ElectionBounds =
ElectionBoundsBuilder::default().voters_count(MaxElectingVoters::get().into()).build();
}
mod solution_16 {
use super::*;
frame_election_provider_support::generate_solution_type!(
#[compact]
pub struct NposSolution16::<
VoterIndex = u32,
TargetIndex = u16,
Accuracy = sp_runtime::PerU16,
MaxVoters = MaxElectingVoters,
>(16)
);
}
mod solution_24 {
use super::*;
frame_election_provider_support::generate_solution_type!(
#[compact]
pub struct NposSolution24::<
VoterIndex = u32,
TargetIndex = u16,
Accuracy = sp_runtime::PerU16,
MaxVoters = MaxElectingVoters,
>(24)
);
}
use solution_16::NposSolution16;
#[allow(unused)]
use solution_24::NposSolution24;
pub struct ElectionProviderBenchmarkConfig;
impl election_multi_phase::BenchmarkingConfig for ElectionProviderBenchmarkConfig {
const VOTERS: [u32; 2] = [1000, 2000];
const TARGETS: [u32; 2] = [500, 1000];
const ACTIVE_VOTERS: [u32; 2] = [500, 800];
const DESIRED_VOTERS: [u32; 2] = [200, 400];
const SNAPSHOT_MAXIMUM_VOTERS: u32 = 1000;
const MINER_MAXIMUM_VOTERS: u32 = 1000;
const MAXIMUM_TARGETS: u32 = 300;
}
pub struct OnChainSeqPhragmen;
impl onchain::Config for OnChainSeqPhragmen {
type System = Runtime;
type Solver = SequentialPhragmen<
AccountId,
pallet_election_provider_multi_phase::SolutionAccuracyOf<Runtime>,
>;
type DataProvider =
<Runtime as pallet_election_provider_multi_phase::Config>::DataProvider;
type WeightInfo =
frame_election_provider_support::weights::SubstrateWeight<Runtime>;
type MaxWinners =
<Runtime as pallet_election_provider_multi_phase::Config>::MaxWinners;
type Bounds = ElectionBounds;
}
impl pallet_election_provider_multi_phase::MinerConfig for Runtime {
type AccountId = AccountId;
type MaxLength = MinerMaxLength;
type MaxWeight= MinerMaxWeight;
type Solution = NposSolution16;
type MaxWinners = MaxActiveValidators;
type MaxVotesPerVoter =
<<Self as pallet_election_provider_multi_phase::Config>::DataProvider as ElectionDataProvider>::MaxVotesPerVoter;
fn solution_weight(v: u32, t: u32, a: u32, d: u32) -> Weight {
<
<Self as pallet_election_provider_multi_phase::Config>::WeightInfo
as
pallet_election_provider_multi_phase::WeightInfo
>::submit_unsigned(v, t, a, d)
}
}
impl<C> frame_system::offchain::SendTransactionTypes<C> for Runtime
where
RuntimeCall: From<C>,
{
type Extrinsic = UncheckedExtrinsic;
type OverarchingCall = RuntimeCall;
}
pub struct IncPerRound<const S: u32, const I: u32>;
impl<const S: u32, const I: u32> frame_support::traits::Get<u32> for IncPerRound<S, I> {
fn get() -> u32 {
S + (ELectionProviderMultiPhase::round() * I)
}
}
impl pallet_election_provider_multi_phase::Config for Runtime {
type RuntimeEvent = RuntimeEvent;
type Currency = Balances;
type EstimateCallFee = TransactionPayment;
type MinerConfig = Self;
type SignedMaxRefunds = ();
type UnsignedPhase = ConstU32<{ SESSION / 2 }>;
type SignedPhase = ConstU32<{ SESSION / 2 }>;
type BetterSignedThreshold = ();
type BetterUnsignedThreshold = ();
type OffchainRepeat = ();
type MinerTxPriority = ElectionUnsignedPrioirty;
type SignedMaxSubmissions = ConstU32<10>;
type SignedRewardBase = SignedRewardBase;
type SignedDepositBase =
GeometricDepositBase<Balance, SignedFixedDeposit, SignedDepositIncreaseFactor>;
type SignedDepositByte = SignedDepositByte;
type SignedDepositWeight = ();
type SignedMaxWeight = MinerMaxWeight;
type SlashHandler = ();
type RewardHandler = ();
type DataProvider = Staking;
type Fallback = onchain::OnChainExecution<OnChainSeqPhragmen>;
type GovernanceFallback = onchain::OnChainExecution<OnChainSeqPhragmen>;
type Solver = SequentialPhragmen<AccountId, SolutionAccuracyOf<Self>, ()>;
type WeightInfo pallet_election_provider_multi_phase::weights::SubstrateWeight<Self>;
type ForceOrigin = EnsureRoot<Self::AccountId>;
type MaxWinners = MaxActiveValidators;
type BenchmarkingConfig = ElectionProviderBenchmarkConfig;
type ElectionBounds = ElectionBounds;
}
pub mod voter_bags;
parameter_types! {
pub const BagThreshold: &'static [u64] = &voter_bags::THRESHOLDS;
}
impl pallet_bags_list::Config for Runtime {
type RuntimeEvent = RuntimeEvent;
type ScoreProvider = Staking;
type Score = u64;
type WeightInfo = pallet_bags_list::weights::SubstrateWeight<Runtime>;
type BagThreshold = BagThreshold;
}
construct_runtime!(
pub enum Runtime
{
System: frame_system,
RandomnessCollectiveFlip: pallet_insecure_randomness_collective_flip,
Timestamp: pallet_timestamp,
Sudo: pallet_sudo,
Aura: pallet_aura,
Grandpa: pallet_grandpa,
Balances: pallet_balances,
Staking: pallet_staking,
BagsList: pallet_bags_list,
TransactionPayment: pallet_transaction_payment,
ELectionProviderMultiPhase: election_multi_phase,
}
);
pub type Address = sp_runtime::MultiAddress<AccountId, ()>;
pub type Header = generic::Header<BlockNumber, BlakeTwo256>;
pub type Block = generic::Block<Header, UncheckedExtrinsic>;
pub type SignedExtra = (
frame_system::CheckSpecVersion<Runtime>,
frame_system::CheckTxVersion<Runtime>,
frame_system::CheckGenesis<Runtime>,
frame_system::CheckEra<Runtime>,
frame_system::CheckNonce<Runtime>,
frame_system::CheckWeight<Runtime>,
pallet_transaction_payment::ChargeTransactionPayment<Runtime>
);
pub type UncheckedExtrinsic =
generic::UncheckedExtrinsic<Address, RuntimeCall, Signature, SignedExtra>;
pub type Executive = frame_executive::Executive<
Runtime,
Block,
frame_system::ChainContext<Runtime>,
Runtime,
AllPalletsWithSystem
>;
impl_runtime_apis! {
impl sp_api::Core<Block> for Runtime {
fn version() -> RuntimeVersion {
VERSION
}
fn execute_block(block: Block) {
Executive::execute_block(block);
}
fn initialize_block(header: &<Block as BlockT>::Header) {
Executive::initialize_block(header)
}
}
impl sp_api::Metadata<Block> for Runtime {
fn metadata() -> OpaqueMetadata {
OpaqueMetadata::new(Runtime::metadata().into())
}
fn metadata_at_version(version: u32) -> Option<OpaqueMetadata> {
Runtime::metadata_at_version(version)
}
fn metadata_versions() -> sp_std::vec::Vec<u32> {
Runtime::metadata_versions()
}
}
impl sp_block_builder::BlockBuilder<Block> for Runtime {
fn apply_extrinsic(extrinsic: <Block as BlockT>::Extrinsic) -> ApplyExtrinsicResult {
Executive::apply_extrinsic(extrinsic)
}
fn finalize_block() -> <Block as BlockT>::Header {
Executive::finalize_block()
}
fn inherent_extrinsics(
data: inherents::InherentData,
) -> Vec<<Block as BlockT>::Extrinsic> {
data.create_extrinsics()
}
fn check_inherents(
block: Block,
data: inherents::InherentData,
) -> inherents::CheckInherentsResult {
data.check_extrinsics(&block)
}
}
impl sp_transaction_pool::runtime_api::TaggedTransactionQueue<Block> for Runtime {
fn validate_transaction(
source: TransactionSource,
tx: <Block as BlockT>::Extrinsic,
block_hash: <Block as BlockT>::Hash,
) -> TransactionValidity {
Executive::validate_transaction(source, tx, block_hash)
}
}
impl sp_offchain::OffchainWorkerApi<Block> for Runtime {
fn offchain_worker(header: &<Block as BlockT>::Header) {
Executive::offchain_worker(header)
}
}
impl sp_consensus_aura::AuraApi<Block, AuraId> for Runtime {
fn slot_duration() -> sp_consensus_aura::SlotDuration {
sp_consensus_aura::SlotDuration::from_millis(Aura::slot_duration())
}
fn authorities() -> Vec<AuraId> {
Aura::authorities().into_iter()
}
}
impl sp_session::SessionKeys<Block> for Runtime {
fn generate_session_keys(seed: Option<Vec<u8>>) -> Vec<u8> {
opaque::SessionKeys::generate(seed)
}
fn decode_session_keys(
encoded: Vec<u8>,
) -> Option<Vec<(Vec<u8>, KeyTypeId)>> {
opaque::SessionKeys::decode_into_raw_public_keys(&encoded)
}
}
impl fg_primitives::GrandpaApi<Block> for Runtime {
fn grandpa_authorities() -> GrandpaAuthorityList {
Grandpa::grandpa_authorities()
}
fn current_set_id() -> fg_primitives::SetId {
Grandpa::current_set_id()
}
fn submit_report_equivocation_unsigned_extrinsic(
_equivocation_proof: fg_primitives::EquivocationProof<
<Block as BlockT>::Hash,
NumberFor<Block>,
>,
_key_owner_proof: fg_primitives::OpaqueKeyOwnershipProof,
) -> Option<()> {
None
}
fn generate_key_ownership_proof(
_set_id: fg_primitives::SetId,
_authority_id: GrandpaId,
) -> Option<fg_primitives::OpaqueKeyOwnershipProof> {
None
}
}
impl frame_system_rpc_runtime_api::AccountNonceApi<Block, AccountId, Nonce> for Runtime {
fn account_nonce(account: AccountId) -> Nonce {
System::account_nonce(account)
}
}
impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi<Block, Balance> for Runtime {
fn query_info(
uxt: <Block as BlockT>::Extrinsic,
len: u32,
) -> pallet_transaction_payment_rpc_runtime_api::RuntimeDispatchInfo<Balance> {
TransactionPayment::query_info(uxt, len)
}
fn query_fee_details(
uxt: <Block as BlockT>::Extrinsic,
len: u32,
) -> pallet_transaction_payment::FeeDetails<Balance> {
TransactionPayment::query_fee_details(uxt, len)
}
fn query_weight_to_fee(weight: Weight) -> Balance {
TransactionPayment::weight_to_fee(weight)
}
fn query_length_to_fee(length: u32) -> Balance {
TransactionPayment::length_to_fee(length)
}
}
impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentCallApi<Block, Balance, RuntimeCall> for Runtime {
fn query_call_info(call: RuntimeCall, len: u32) -> pallet_transaction_payment_rpc_runtime_api::RuntimeDispatchInfo<Balance> {
TransactionPayment::query_call_info(call, len)
}
fn query_call_fee_details(call: RuntimeCall, len: u32) -> pallet_transaction_payment::FeeDetails<Balance> {
TransactionPayment::query_call_fee_details(call, len)
}
fn query_weight_to_fee(weight: Weight) -> Balance {
TransactionPayment::weight_to_fee(weight)
}
fn query_length_to_fee(length: u32) -> Balance {
TransactionPayment::length_to_fee(length)
}
}
impl sp_genesis_builder::GenesisBuilder<Block> for Runtime {
fn create_default_config() -> Vec<u8> {
create_default_config::<RuntimeGenesisConfig>()
}
fn build_config(config: Vec<u8>) -> sp_genesis_builder::Result {
build_config::<RuntimeGenesisConfig>(config)
}
}
}

View File

@@ -0,0 +1,441 @@
// This file is part of Ghost Network.
// Ghost Network is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Ghost Network is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Ghost Network. If not, see <http://www.gnu.org/licenses/>.
//! Autogenerated bag thresholds.
//!
//! Generated on 2023-06-14T16:02:49.211048528+00:00
//! Arguments
//! Total issuance: 30000000000000000000000000
//! Minimum balance: 100000000000000
//! for the casper runtime.
/// Existential weight for this runtime.
#[cfg(any(test, feature = "std"))]
#[allow(unused)]
pub const EXISTENTIAL_WEIGHT: u64 = 61_489_156;
/// Constant ratio between bags for this runtime.
#[cfg(any(test, feature = "std"))]
#[allow(unused)]
pub const CONSTANT_RATIO: f64 = 1.1420206998172278;
/// Upper thresholds delimiting the bag list.
pub const THRESHOLDS: [u64; 200] = [
61_489_156,
70_221_889,
80_194_851,
91_584_180,
104_591_029,
119_445_120,
136_408_800,
155_781_673,
177_905_895,
203_172_215,
232_026_875,
264_979_494,
302_612_067,
345_589_245,
394_670_071,
450_721_391,
514_733_158,
587_835_921,
671_320_790,
766_662_238,
875_544_146,
999_889_538,
1_141_894_550,
1_304_067_213,
1_489_271_751,
1_700_779_167,
1_942_325_015,
2_218_175_373,
2_533_202_192,
2_892_969_340,
3_303_830_870,
3_773_043_242,
4_308_893_484,
4_920_845_552,
5_619_707_481,
6_417_822_270,
7_329_285_880,
8_370_196_190,
9_558_937_311,
10_916_504_277,
12_466_873_854,
14_237_428_003,
16_259_437_492,
18_568_614_183,
21_205_741_764,
24_217_396_049,
27_656_767_584,
31_584_601_071,
36_070_268_219,
41_192_992_954,
47_043_250_641,
53_724_366_019,
61_354_338_078,
70_067_924_109,
80_019_019_726,
91_383_376_906,
104_361_708_046,
119_183_230_857,
136_109_716_710,
155_440_113_929,
177_515_827_689,
202_726_749_766,
231_518_144_639,
264_398_513_561,
301_948_575_488,
344_831_523_488,
393_804_737_773,
449_733_162_223,
513_604_580_653,
586_547_062_627,
669_848_886_937,
764_981_294_632,
873_624_473_443,
997_697_232_539,
1_139_390_891_710,
1_301_207_983_516,
1_486_006_451_943,
1_697_050_128_181,
1_938_066_375_010,
2_213_311_917_881,
2_527_648_025_372,
2_886_626_366_827,
3_296_587_063_555,
3_764_770_665_330,
4_299_446_029_872,
4_910_056_363_861,
5_607_386_004_799,
6_403_750_889_346,
7_313_216_072_106,
8_351_844_136_581,
9_537_978_885_623,
10_892_569_321_801,
12_439_539_639_691,
14_206_211_764_724,
16_223_787_901_302,
18_527_901_612_731,
21_159_247_165_916,
24_164_298_256_025,
27_596_128_804_938,
31_515_350_330_062,
35_991_182_438_923,
41_102_675_356_148,
46_940_106_074_588,
53_606_572_788_796,
61_219_815_771_064,
69_914_296_849_552,
79_843_574_215_355,
91_183_014_501_328,
104_132_890_032_251,
118_921_915_948_622,
135_811_289_675_251,
155_099_304_078_010,
177_126_615_784_334,
202_282_261_714_282,
231_010_530_083_556,
263_818_807_231_171,
301_286_538_859_088,
344_075_463_953_366,
392_941_302_133_960,
448_747_100_850_118,
512_478_478_153_804,
585_261_030_262_475,
668_380_211_356_103,
763_304_036_716_883,
871_709_010_184_730,
995_509_733_848_148,
1_136_892_722_924_124,
1_298_355_023_050_922,
1_482_748_312_035_827,
1_693_329_264_963_968,
1_933_817_072_195_143,
2_208_459_126_106_800,
2_522_106_036_714_231,
2_880_297_301_061_641,
3_289_359_139_440_088,
3_756_516_226_373_564,
4_290_019_289_717_909,
4_899_290_831_473_053,
5_595_091_543_966_984,
6_389_710_360_582_628,
7_297_181_497_621_964,
8_333_532_320_607_561,
9_517_066_412_729_732,
10_868_686_844_872_642,
12_412_265_356_675_752,
14_175_063_968_947_974,
16_188_216_473_771_936,
18_487_278_306_169_800,
21_112_854_508_927_888,
24_111_316_881_425_140,
27_535_622_978_440_076,
31_446_251_423_741_472,
35_912_270_057_569_732,
41_012_555_783_171_056,
46_837_187_656_790_104,
53_489_037_825_278_256,
61_085_588_409_774_440,
69_761_006_424_477_744,
79_668_513_376_836_192,
90_983_091_400_012_640,
103_904_573_712_177_232,
118_661_173_984_991_376,
135_513_516_955_473_664,
154_759_241_468_183_808,
176_738_257_244_678_592,
201_838_748_223_045_056,
230_504_028_495_915_136,
263_240_371_933_595_200,
300_625_953_775_751_680,
343_321_062_114_205_504,
392_079_759_617_658_880,
447_763_201_462_729_216,
511_354_844_686_868_352,
583_977_817_584_227_200,
666_914_755_915_276_544,
761_630_456_268_799_744,
869_797_746_670_209_152,
993_327_031_351_760_000,
1_134_400_031_491_706_240,
1_295_508_317_836_843_520,
1_479_497_315_755_071_488,
1_689_616_559_916_316_672,
1_929_577_086_178_408_960,
2_203_616_974_308_753_920,
2_516_576_199_129_205_248,
2_873_982_112_072_913_920,
3_282_147_062_891_703_296,
3_748_279_885_666_641_408,
4_280_613_218_139_856_384,
4_888_548_903_026_954_240,
5_582_824_039_325_583_360,
6_375_700_616_347_044_864,
7_281_182_079_705_782_272,
8_315_260_654_162_254_848,
9_496_199_791_429_038_080,
10_844_856_731_412_002_816,
12_385_050_873_824_708_608,
14_143_984_466_197_262_336,
16_152_723_038_290_595_840,
18_446_744_073_709_551_615,
];
/// Upper thresholds delimiting the bag list.
#[allow(dead_code)]
pub const THRESHOLDS_BALANCES: [u128; 200] = [
61_489_156,
70_221_889,
80_194_851,
91_584_180,
104_591_029,
119_445_120,
136_408_800,
155_781_673,
177_905_895,
203_172_215,
232_026_875,
264_979_494,
302_612_067,
345_589_245,
394_670_071,
450_721_391,
514_733_158,
587_835_921,
671_320_790,
766_662_238,
875_544_146,
999_889_538,
1_141_894_550,
1_304_067_213,
1_489_271_751,
1_700_779_167,
1_942_325_015,
2_218_175_373,
2_533_202_192,
2_892_969_340,
3_303_830_870,
3_773_043_242,
4_308_893_484,
4_920_845_552,
5_619_707_481,
6_417_822_270,
7_329_285_880,
8_370_196_190,
9_558_937_311,
10_916_504_277,
12_466_873_854,
14_237_428_003,
16_259_437_492,
18_568_614_183,
21_205_741_764,
24_217_396_049,
27_656_767_584,
31_584_601_071,
36_070_268_219,
41_192_992_954,
47_043_250_641,
53_724_366_019,
61_354_338_078,
70_067_924_109,
80_019_019_726,
91_383_376_906,
104_361_708_046,
119_183_230_857,
136_109_716_710,
155_440_113_929,
177_515_827_689,
202_726_749_766,
231_518_144_639,
264_398_513_561,
301_948_575_488,
344_831_523_488,
393_804_737_773,
449_733_162_223,
513_604_580_653,
586_547_062_627,
669_848_886_937,
764_981_294_632,
873_624_473_443,
997_697_232_539,
1_139_390_891_710,
1_301_207_983_516,
1_486_006_451_943,
1_697_050_128_181,
1_938_066_375_010,
2_213_311_917_881,
2_527_648_025_372,
2_886_626_366_827,
3_296_587_063_555,
3_764_770_665_330,
4_299_446_029_872,
4_910_056_363_861,
5_607_386_004_799,
6_403_750_889_346,
7_313_216_072_106,
8_351_844_136_581,
9_537_978_885_623,
10_892_569_321_801,
12_439_539_639_691,
14_206_211_764_724,
16_223_787_901_302,
18_527_901_612_731,
21_159_247_165_916,
24_164_298_256_025,
27_596_128_804_938,
31_515_350_330_062,
35_991_182_438_923,
41_102_675_356_148,
46_940_106_074_588,
53_606_572_788_796,
61_219_815_771_064,
69_914_296_849_552,
79_843_574_215_355,
91_183_014_501_328,
104_132_890_032_251,
118_921_915_948_622,
135_811_289_675_251,
155_099_304_078_010,
177_126_615_784_334,
202_282_261_714_282,
231_010_530_083_556,
263_818_807_231_171,
301_286_538_859_088,
344_075_463_953_366,
392_941_302_133_960,
448_747_100_850_118,
512_478_478_153_804,
585_261_030_262_475,
668_380_211_356_103,
763_304_036_716_883,
871_709_010_184_730,
995_509_733_848_148,
1_136_892_722_924_124,
1_298_355_023_050_922,
1_482_748_312_035_827,
1_693_329_264_963_968,
1_933_817_072_195_143,
2_208_459_126_106_800,
2_522_106_036_714_231,
2_880_297_301_061_641,
3_289_359_139_440_088,
3_756_516_226_373_564,
4_290_019_289_717_909,
4_899_290_831_473_053,
5_595_091_543_966_984,
6_389_710_360_582_628,
7_297_181_497_621_964,
8_333_532_320_607_561,
9_517_066_412_729_732,
10_868_686_844_872_642,
12_412_265_356_675_752,
14_175_063_968_947_974,
16_188_216_473_771_936,
18_487_278_306_169_800,
21_112_854_508_927_888,
24_111_316_881_425_140,
27_535_622_978_440_076,
31_446_251_423_741_472,
35_912_270_057_569_732,
41_012_555_783_171_056,
46_837_187_656_790_104,
53_489_037_825_278_256,
61_085_588_409_774_440,
69_761_006_424_477_744,
79_668_513_376_836_192,
90_983_091_400_012_640,
103_904_573_712_177_232,
118_661_173_984_991_376,
135_513_516_955_473_664,
154_759_241_468_183_808,
176_738_257_244_678_592,
201_838_748_223_045_056,
230_504_028_495_915_136,
263_240_371_933_595_200,
300_625_953_775_751_680,
343_321_062_114_205_504,
392_079_759_617_658_880,
447_763_201_462_729_216,
511_354_844_686_868_352,
583_977_817_584_227_200,
666_914_755_915_276_544,
761_630_456_268_799_744,
869_797_746_670_209_152,
993_327_031_351_760_000,
1_134_400_031_491_706_240,
1_295_508_317_836_843_520,
1_479_497_315_755_071_488,
1_689_616_559_916_316_672,
1_929_577_086_178_408_960,
2_203_616_974_308_753_920,
2_516_576_199_129_205_248,
2_873_982_112_072_913_920,
3_282_147_062_891_703_296,
3_748_279_885_666_641_408,
4_280_613_218_139_856_384,
4_888_548_903_026_954_240,
5_582_824_039_325_583_360,
6_375_700_616_347_044_864,
7_281_182_079_705_782_272,
8_315_260_654_162_254_848,
9_496_199_791_429_038_080,
10_844_856_731_412_002_816,
12_385_050_873_824_708_608,
14_143_984_466_197_262_336,
16_152_723_038_290_595_840,
18_446_744_073_709_551_615,
];

View File

@@ -0,0 +1,51 @@
use crate::prelude::*;
use jsonrpsee::ws_client::WsClientBuilder;
use subxt::backend::rpc::RpcClient as RawRpcClient;
/// Wraps the subxt interface to make it easy to use for this software.
#[derive(Clone, Debug)]
pub struct Client {
/// Access to typed rpc calls from subxt.
rpc:: RpcClient,
/// Access to chain APIs such as storage, events etc.
chain_api: ChainClient,
}
impl Client {
pub async fn new(uri: &str) -> Result<Self, subxt::Error> {
log::debug!(target: LOG_TARGET, "attempting to connect to {:?}", uri);
let rpc = loop {
match WsClientBuilder::default()
.max_request_size(u32::MAX)
.max_reponse_size(u32::MAX)
.request_timeout(std::time::Duration::from_secs(600))
.build(&uri)
.await
{
Ok(rpc) => break RawRpcClient::new(rpc),
Err(e) => {
log::warn!(
target: LOG_TARGET,
"failed to connect to client due to {:?}, retrying soon...",
e
);
},
};
tokio::time::sleep(std::time::Duration::from_millis(2_500)).await;
};
let chain_api = ChainClient::from_rpc_client(rpc.clone()).await?;
Ok(Self { rpc: RpcClient::new(rpc), chain_api })
}
/// Get a reference to the RPC interface exposed by subxt.
pub fn rpc(&self) -> &RpcClient {
&self.rpc
}
/// Get a reference to the chain API.
pub fn chain_api(&self) -> &ChainClient {
&self.chain_api
}
}

View File

@@ -0,0 +1,530 @@
use crate::{
error::Error,
helpers::{storage_at, RuntimeDispatchInfo},
opt::{BalanceIterations, Balancing, Solver},
prelude::*,
prometheus,
static_types::{self},
};
use std::{
collections::{BTreeMap, BTreeSet},
marker::PhantomData,
};
use codec::{Decode, Encode};
use frame_election_provider_support::{
Get, NposSolution, PhragMMS, SequentialPhragmen,
};
use frame_support::{weights::Weight, BoundedVec};
use pallet_election_provider_multi_phase::{
usigned::TrimmingStatus, RawSolution, ReadySolution, SolutionOf,
SolutionOrSnapshotSize,
};
use scale_info::{PortableRegistry, TypeInfo};
use scale_value::scale::decode_as_type;
use sp_npos_elections::{ElectionScore, VoteWeight};
use subxt::{dynamic::Value, tx::DynamicPayload};
const EPM_PALLET_NAME: &str = "ElectionProviderMultiPhase";
type TypeId = u32;
type MinerVoterOf = frame_election_provider_support::Voter<AccountId, crate::static_types::MaxVotesPerVoter>;
type RoundSnapshot = pallet_election_provider_multi_phase::RoundSnapshot<AccountId, MinerVoterOf>;
type Voters = Vec<(AccountId, VoteWeight, BoundedVec<AccountId, crate::static_types::MaxVotesPerVoter>)>;
#[derive(Copy, Clone, Debug)]
#[derive(Debug)]
struct EpmConstant {
epm: &'static str,
constant: &'static str,
}
impl EpmConstant {
const fn new(constant: &'static str) -> Self {
Self { epm: EPM_PALLET_NAME, constant }
}
const fn to_parts(self) -> (&'static str, &'static str) {
(self.epm, self.constant)
}
}
impl std::fmt::Display for EpmConstant {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_fmt(format_args!("{}::{}", self.epm, self.constant))
}
}
#[derive(Debug)]
pub struct State {
voters: Voters,
voters_by_stake: BTreeMap<VoteWeight, usize>,
}
impl State {
fn len(&self) -> usize {
self.voters_by_stake.len()
}
fn to_voters(&self) -> Voters {
self.voters.clone()
}
}
#[derive(Debug)]
pub struct TrimmedVoters<T> {
state: State,
_marker: PhantomData<T>,
}
impl<T> TrimmedVoters<T>
where
T: MinerConfig<AccountId = AccountId, MaxVotesPerVoter = static_types::MaxVotesPerVoter>
+ Send
+ Sync
+ 'static,
T::Solution: Send,
{
pub async fn new(mut voters: Voters, desired_targets: u32) -> Rseult<Self, Error> {
let mut voters_by_stake = BTreeMap::new();
let mut targets = BTreeSet::new();
for (idx, (_voter, stake, supports)) in voters.iter().enumerate() {
voters_by_stake.insert(*stake, idx);
targets.extend(supports.iter.cloned());
}
loop {
let targets_len = targets.len() as u32;
let active_voters = voters_by_stake.len() as u32;
let est_weight: Weight = tokio::task::spawn_blocking(move || {
T::solution_weight(active_voters, targets_len, active_voters, desired_targets)
}).await?;
let max_weight: Weight = T::MaxWeight::get();
if est_weight.all_lt(max_weight) {
return Ok(Self {
state: State { voters, voters_by_stake },
_marker: PhantomData
});
}
let Some((_, idx)) = voters_by_stake.pop_first() else { break };
let rm = voters[idx].0.clone();
for (_voter, _stake, supports) in &mut voters {
supports.retain(|a| a != &rm);
}
targets.remove(&rm);
}
return Err(Error::Feasibility("Failed to pre-trim weight < T::MaxLength".to_string()));
}
pub fn trim(&mut self, n: usize) -> Result<State, Error> {
let mut voters = self.state.voters.clone();
let mut voters_by_stake = self.state.voters_by_stake.clone();
for _ in 0..n {
let Some((_, idx)) = voters_by_stake.pop_first() else {
return Err(Error::Feasibility("Failed to pre-trim len".to_string()));
};
let rm = voters[idx].0.clone();
for (_voter, _stake, support) in &mut voters {
supports.retain(|a| a != &rm);
}
}
Ok(State { voters, voters_by_stake })
}
pub fn to_voters(&self) -> Voters {
self.state.voters.clone()
}
pub fn len(&self) -> usize {
self.state.len()
}
}
pub(crate) fn update_metadata_constants(api: &ChainClient) -> Result<(), Error> {
const SIGNED_MAX_WEIGHT: EpmConstant = EpmConstant::new("SignedMaxWeight");
const MAX_LENGHT: EpmConstant = EpmConstant::new("MinerMaxLength");
const MAX_VOTES_PER_VOTER: EpmConstant = EpmConstant::new("MinerMaxVotesPerVoter");
const MAX_WINNERS: EpmConstant = EpmConstant::new("MaxWinners");
fn log_metadata(metadata: EpmConstant, val: impl std::fmt::Display) {
log::trace!(target: LOG_TARGET, "updating metadata constant `{metadata}`: `{val}`",);
}
let max_weight = read_constant::<Weight>(api, SIGNED_MAX_WEIGHT);
let max_length: u32 = read_constant(api, MAX_LENGTH)?;
let max_votes_per_voter: u32 = read_constant(api, MAX_VOTES_PER_VOTER)?;
let max_winners: u32 = read_constant(api, MAX_WINNERS)?;
log_metadata(SIGNED_MAX_WEIGHT, max_weight);
log_metadata(MAX_LENGTH, max_length);
log_metadata(MAX_VOTES_PER_VOTER, max_votes_per_voter);
log_metadata(MAX_WINNERS, max_winners);
static_types::MaxWeight::set(max_weight);
static_types::MaxLength::set(max_length);
static_types::MaxVotesPerVoter::set(max_votes_per_voter);
static_types::MaxWinners::set(max_winners);
Ok(())
}
fn invalid_metadata_error<E: std::error::Error>(item: String, err: E) -> Error {
Error::InvalidMetadata(format!("{} failed: {}", item, err))
}
fn read_constant<'a, T: serde::Deserialize<'a>>(
api: &ChainClient,
constant: EpmConstant,
) -> Result<T, Error> {
let (epm_name, constant) = constant.to_parts();
let val = api
.constants()
.at(&subxt::dynamic::constnat(epm_name, constnat))
.map_err(|e| invalid_metadata_error(constant.to_string(), e))?
.to_value()
.map_err(|e| Error::Subxt(e.into()))?;
scale_value::serde::from_value::<_, T>(val).map_err(|e| {
Error::InvalidMetadata(
format!("Decoding `{}` failed {}", std::any::type_name::<T>(), e)
)
})
}
pub(crate) fn set_emergency_result<A: Encode + TypeInfo + 'static>(
supports: frame_election_provider_support::Supports<A>,
) -> Result<DynamicPayload, Error> {
let scale_result = to_scale_value(supports).map_err(|e| {
Error::DynamicTransaction(format!("Failed to encode `Supports`: {:?}", e))
})?;
Ok(subxt::dynamic::tx(EPM_PALLET_NAME, "set_emergency_election_result", vec![scale_result]))
}
pub fn signed_solution<S: NposSolution + Encode + TypeInfo + 'static>(
solution: RawSolution<S>,
) -> Result<DynamicPayload, Error> {
let scale_solution = to_scale_value(solution).map_err(|e| {
Error::DynamicTransaction(format!("Failed to encode `RawSolution`: {:?}", e))
})?;
Ok(subxt::dynamic::tx(EPM_PALLET_NAME, "submit", vec![scale_solution]))
}
pub fn unsigned_solution<S: NposSolution + Encode + TypeInfo + 'static>(
solution: RawSolution<S>,
witness: SolutionOrSnapshotSize,
) -> Result<DynamicPayload, Error> {
let scale_solution = to_scale_value(solution)?;
let scale_witness = to_scale_value(witness)?;
Ok(subxt::dynamic::tx(EPM_PALLET_NAME, "submit_unsigned", vec![scale_solution, scale_witness]))
}
pub async fn signed_submission<S: NposSolution + Decode + TypeInfo + 'static>(
idx: u32,
block_hash: Option<Hash>,
api: &ChainClient,
) -> Result<Option<SignedSubmission<S>>, Error> {
let scale_idx = Value::u128(idx as u128);
let addr = subxt::dynamic::storage(EPM_PALLET_NAME, "SignedSubmissionsMap", vec![scale_idx]);
let storage = storage_at(block_hash, api).await?;
match storage.fetch(&addr).await {
Ok(Some(val)) => {
let submissions = Decode::decode(&mut val.encode())?;
Ok(Some(submissions))
},
Ok(None) => Ok(None),
Err(err) => Err(err.into()),
}
}
pub async fn snapshot_at(
block_hash: Option<Hash>,
api: &ChainClient,
) -> Result<RoundSnapshot, Error> {
let empty = Vec::<Value>::new();
let addr = subxt::dynamic::storage(EPM_PALLET_NAME, "Snapshot", empty);
let storage = storage_at(block_hash, api).await?;
match storage.fetch(&addr).await {
Ok(Some(val)) => {
let snapshot = Decode::decode(&mut val.encode())?;
Ok(Some(snapshot))
},
Ok(None) => Err(Error::EmptySnapshot),
Err(err) => Err(err.into()),
}
}
pub async fn mine_solution<T>(
solver: Solver,
targets: Vec<AccountId>,
voters: Voters,
desired_targets: u32,
) -> Result<(SolutionOf<T>, ElectionScore, SolutionOrSnapshotSize, TrimmingStatus), Error>
where
T: MinerConfig<AccountId = AccountId, MaxVotesPerVoter = static_types::MaxVotesPerVoter>
+ Send
+ Sync
+ 'static,
T::Solution: Send,
{
match tokio::task::spawn_blocking(move || match solver {
Solver::SeqPragmen { iterations } => {
BalanceIterations::set(iterations);
Miner::<T>::mine_solution_with_snapshot::<
SequentialPhragmen<AccountId, Accuracy, Balancing>,
>(voters, targets, desired_targets)
},
Solver::PhragMMS { iterations } => {
BalanceIterations::set(iterations);
Miner::<T>::mine_solution_with_snapshot::<
PhragMMS<AccountId, Accuracy, Balancing>,
>(voters, targets, desired_targets)
},
}).await {
Ok(Ok(s)) => Ok(s),
Err(e) => Err(e.into()),
Ok(Err(e)) => Err(Error::Other(format!("{:?}", e))),
}
}
pub async fn fetch_snapshot_and_mine_solution<T>(
api: &ChainClient,
black_hash: Option<Hash>,
solver: Solver,
round: u32,
forced_desired_targets: Option<u32>,
) -> Result<MinedSolution<T>, Error>
where
T: MinerConfig<AccountId = AccountId, MaxVotesPerVoter = static_types::MaxVotesPerVoter>
+ Send
+ Sync
+ 'static,
T::Solution: Send,
{
let snapshot = snapshot_at(block_hash, api).await?;
let storage = storage_at(block_hash, api).await?;
let desired_targets = match forced_desired_targets {
Some(x) => x,
None => storage
.fetch(&runtime::storage()::election_provider_multi_phase().desired_targets())
.await?
.expect("Snapshot is non-empty; `desired_target` should exist; qed"),
};
let minimum_untrusted_score = storage
.fetch(&runtime::storage().election_provider_multi_phase().minimum_untrusted_score())
.await?
.map(|score| score.0);
let mut voters = TrimmedVoters::<T>::new(snapshot.voters.clone(), desired_targets).await?;
let (solution, score, solution_or_snapshot_size, trim_status) = mine_solution::<T>(
solver.clone(),
snapshot.targets.clone(),
voters.to_voters(),
desired_targets,
).await?;
if !trim_status.is_trimmed() {
return Ok(MinedSolution {
round,
desired_targets,
snapshot,
minimum_untrusted_score,
solution,
score,
solution_or_snapshot_size,
});
}
prometheus::on_trim_attempt();
let mut l = 1;
let mut h = voters.len();
let mut best_solution = None;
while l <= h {
let mid = ((h - 1) / 2) + l;
let next_state = voters.trim(mid)?;
let (solution, score, solution_or_snapshot_size, trim_status) = mine_solution::<T>(
solver.clone(),
snapshot.targets.clone(),
next_state.to_voters(),
desired_targets,
).await?;
if !trim_status.is_trimmed() {
best_solution = Some((solution, score, solution_or_snapshot_size));
h = mid - 1;
} else {
l = mid + 1;
} ,
}
if let Some((solution, score, solution_or_snapshot_size)) = best_solution {
prometheus::on_trim_success();
Ok(MinedSolution {
round,
desired_targets,
snapshot,
minimum_untrusted_score,
solution,
score,
solution_or_snapshot_size,
})
} else {
Err(Error::Feasibility("Failed pre-trim length".to_string()))
}
}
pub struct MinedSolution<T: MinerConfig> {
round: u32,
desired_targets: u32,
snapshot: RoundSnapshot,
minimum_untrusted_score: Option<ElectionScore>,
solution: T::Solution,
score: ElectionScore,
solution_or_snapshot_size: SolutionOrSnapshotSize,
}
impl<T> MinedSolution<T>
where
T: MinerConfig<AccountId = AccountId, MaxVotesPerVoter = static_types::MaxVotesPerVoter>
+ Send
+ Sync
+ 'static,
T::Solution: Send,
{
pub fn solution(&self) -> T::Solution {
self.solution.clone()
}
pub fn score(&self) -> ElectionScore {
self.score
}
pub fn size(&self) -> SolutionOrSnapshotSize {
self.solution_or_snapshot_size
}
pub fn feasibility_check(&self) => Result<ReadySolution<AccountId, T::MaxWinners>, Error> {
match Miner::<T>::feasibility_check(
RawSolution { solution: self.solution.clone(), score: self.score, round: self.round },
pallet_election_provider_multi_phase::ElectionCompute::Signed,
self.desired_targets,
self.snapshot.clone(),
self.round,
self.minimum_untrusted_score,
) {
Ok(ready_solution) => Ok(ready_solution),
Err(e) => {
log::error!(target: LOG_TARGET, "Solution feasibility error {:?}", e);
Err(Error::Feasibility(format!("{:?}", e)))
},
}
}
}
impl<T: MinerConfig> std::fmt::Debug for MinedSolution<T> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("MindedSolution")
.field("round", &self.round)
.field("desired_targets", &self.desired_targets)
.field("score", &self.score)
.finish()
}
}
fn make_type<T: scale_info::TypeInfo + 'static>() -> (TypeId, PortableRegistry) {
let m = scale_info::MetaType::new::<T>();
let mut types = scale_info::Registry::new();
let id = types.register_type(&m);
let portable_registry: PortableRegistry = types.into();
(id.id, portable_registry)
}
fn to_scale_value<T: scale_info::TypeInfo + 'static + Encode>(val: T) -> Result<Value, Error> {
let (ty_id, types) = make_type::<T>();
let bytes = val.encode();
decode_as_type(&mut bytes.as_ref(), ty_id, &types)
.map(|v| v.remote_context())
.map_err(|e| {
Error::DynamicTransaction(format!(
"Failed to decode {}: {:?}",
std::any::type_name::<T>(),
e
))
})
}
pub async fn runtime_api_solution_weight<S: Encode + NposSolution + TypeInfo + 'static>(
raw_solution: RawSolution<S>,
witness: SolutionOrSnapshotSize,
) -> Result<Weight, Error> {
let tx = unsigned_solution(raw_solution, witness)?;
let client = SHARED_CLIENT.get().expect("shared client is configured as start; qed");
let call_data = {
let mut buffer = Vec::new();
let encoded_call = client.chain_api().tx().call_data(&tx).unwrap();
let encoded_len = encoded_call.len() as u32;
buffer.extend(encoded_call);
encoded_len.encode_to(&mut buffer);
buffer
};
let bytes = client
.rpc()
.state_call("TransactionPaymentCallApi_query_call_info", Some(&call_data), None)
.await?;
let info: RuntimeDispatchInfo = Decode::decode(&mut bytes.as_ref())?;
log::trace!(
target: LOG_TARGET,
"Received weight of `Solution Extrnsic` from remote node: {:?}",
info.weight
);
Ok(info.weight)
}
pub fn mock_voters(voters: u32, desired_targets: u16) -> Option<(u32, u16)> {
if voters >= desired_targets as u32 {
Some((0..voters).zip((0..desired_targets).cycle()).collect())
} else {
None
}
}
#[cfg(test)]
#[test]
fn mock_votes_works() {
assert_eq!(mock_voters(3, 2), Some(vec![(0, 0), (1, 1), (2, 0)]));
assert_eq!(mock_voters(3, 3), Some(vec![(0, 0), (1, 1), (2, 2)]));
assert_eq!(mock_voters(2, 3), None);
}

View File

@@ -0,0 +1,41 @@
use crate::prelude::*;
#[derive(thiserror::Error, Debug)]
enum Error {
#[error("Failed to parse log directive: `{0}`")]
LogParse(#[from] tracing_subscriber::filter::ParseError),
#[error("I/O error: `{0}`")]
Io(#[from] std::io::Error),
#[error("RPC error: `{0}`")]
RpcError(#[from] jsonrpsee::core::ClientError),
#[error("subxt error: `{0}`")]
Subxt(#[from] subxt::Error),
#[error("Crypto error: `{0}`")]
Crypto(sp_core::crypto::SecretStringError),
#[error("Codec error: `{0}`")]
Codec(#[from] codec::Error),
#[error("Incorrect phase")]
IncorrectPhase,
#[error("Submission is already submitted")]
AlreadySubmitted,
#[error("The account does not exist")]
AccountDoesNotExist,
#[error("Submission with better score already exist")]
BetterScoreExist,
#[error("Invalid chain: `{0}`, staking-miner supports only ghost and casper")]
InvalidChain(String),
#[error("Other error: `{0}`")]
Other(String),
#[error("Invalid metadata: `{0}`")]
InvalidMetadata(String),
#[error("Transaction rejected: `{0}`")]
TransactionRejected(String),
#[error("Dynamic transaction error: `{0}`")]
DynamicTransaction(String),
#[error("Feasibility error: `{0}`")]
Feasibility(String),
#[error("{0}")]
JoinError(#[from] tokio::task::JoinError),
#[error("Empty snapshot")]
EmptySnapshot,
}

View File

@@ -0,0 +1,128 @@
use crate::{error::Error, prelude::*};
use codec::Decode;
use frame_support::weights::Weight;
use jsonrpsee::core::ClientError as JsonRpseeError;
use pin_project_lite::pin_project;
use serde::Deserialize;
use std::{
future::Future,
pin::Pin,
task::{Context, Poll},
time::{Duration, Instant},
};
use subxt::{
error::{Error as SubxtError, RpcError},
storage::Storage,
};
pin_project! {
pub struct Timed<Fut>
where
Fut: Future,
{
#[pin]
inner: Fut,
start: Option<Instant>,
}
}
impl<Fut> Future for Timed<Fut>
where
Fut: Future,
{
type Output = (Fut::Output, Duration);
fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
let this = self.project;
let start = this.start.get_or_insert_with(Instant::now);
match this.inner.pool(cx) {
Poll::Pending => Poll::Pending,
Poll::Ready(v) => {
let elapsed = start.elapsed();
Poll::Ready((v, elapsed))
},
}
}
}
pub trait TimedFuture: Sized + Future {
fn timed(self) -> Timed<Self> {
Timed { inner: self, start: None }
}
}
impl<F: Future> TimedFuture for F {}
#[derive(Decode, Default, Debug, Deserialize)]
pub struct RuntimeDispatchInfo {
pub weight: Weight,
}
pub fn kill_main_task_if_critical_err(tx: &tokio::sync::mpsc::UnboundedSender<Error>, err::Error) {
match err {
Error::AlreadySubmitted |
Error::BetterScoreExist |
Error::IncorrectPhase |
Error::TransactionRejected |
Error::JoinError |
Error::Feasibility |
Error::EmptySnapshot => {},
Error::Subxt(SubxtError::Rpc(rpc_err)) => {
log::debug!(target: LOG_TARGET, "rpc error: {:?}", rpc_err);
match rpc_err {
RpcError::ClientError(e) => {
let jsonrpsee_err = match e.downcast::<JsonRpseeError>() {
Ok(e) => *e,
Err(_) => {
let _ = tx.send(Error::Other(
"Failed to downcast RPC error; this is a bug please file an issue".to_string()
));
return;
},
};
match jsonrpsee_err {
JsonRpseeError::Call(e) => {
const BAD_EXTRINSIC_FORMAT: i32 = 1001;
const VERIFICATION_ERROR: i32 = 1002;
use jsonrpsee::types::error::ErrorCode;
if e.code() == BAD_EXTRINSIC_FORMAT ||
e.code() == VERIFICATION_ERROR ||
e.code() == ErrorCode::MethodNotFound.code()
{
let _ = tx.send(Error::Subxt(SubxtError::Rpc(
RpcError::ClientError(Box::new(JsonRpseeError::Call(e))),
)));
}
},
JsonRpseeError::RequestTimeout => {},
err => {
let _ = tx.send(Error::Subxt(SubxtError::Rpc(RpcError::ClientError(
Box::new(err),
))));
},
}
},
RpcError::SubscriptionDropped => (),
_ => (),
}
},
err => {
let _ = tx.send(err);
},
}
}
pub async fn storage_at(
block: Option<Hash>,
api: &ChainClient,
) -> Result<Storage<Config, ChainClient>, Error> {
if let Some(block_hash) = block {
Ok(api.storage().at(block_hash))
} else {
api.storage().at_latest().await.map_err(Into::into)
}
}

View File

@@ -0,0 +1,12 @@
#![allow(dead_code)]
pub mod client;
pub mod commands;
pub mod epm;
pub mod error;
pub mod helpers;
pub mod opt;
pub mod prelude;
pub mod prometheus;
pub mod signer;
pub mod static_types;

342
utils/staking-miner/src/main.rs Executable file
View File

@@ -0,0 +1,342 @@
//! # Ghost Staking Miner.
//!
//! Simple bot capable of monitoring a ghost (and other) chain and submitting
//! solutions to the `pallet-election-provider-multi-phase`.
//! See `--help` for more details.
//!
//! # Implementation Notes:
//!
//! - First draft: Be aware that this is the first draft and there might be
//! bugs, or undefined behaviors. Don't attach this bot to an account with
//! lots of funds.
//! - Quick to crash: The bot is written so that it only continues to work if
//! everything goes well. In case of any failure (RPC, logic, IO), it will
//! crash. This was a decision to simplify the development. It is intended
//! to run this bot with a `restart = true` way, so that it reports it crash,
//! but resumes work thereafter.
mod client;
mod commands;
mod epm;
mod error;
mod helpers;
mod opt;
mod prelude;
mod prometheus;
mod signer;
mod static_types;
use clap::Parser;
use error::Error;
use futures::future::{BoxFuture, FutureExt};
use prelude::*;
use std::str::FromStr;
use tokio::sync::oneshot;
use tracing_subscriber::EnvFilter;
use crate::{
client::Client,
opt::RuntimeVersion,
};
#[derive(Debig, Clone, Parser)]
#[cfg_attr(test, derive(PartialEq))]
#[clap(author, version, about)]
pub struct Opt {
/// The `ws` node to connect to.
#[clap(long, short, default_value = DEFAULT_URI, env = "URI")]
pub uri: String,
#[clap(subcommand)]
pub command: Command,
/// The prometheus endpoint TCP port.
#[clap(long, short, env = "PROMETHEUS_PORT", default_value_t = DEFAULT_PROMETHEUS_PORT)]
pub prometheus_port: u16,
/// Sets a custom logging filter. Syntax is `<target>=<level>`, e.g.
/// -lghost-staking-miner=debug.
///
/// Log levels (least to most verbose) are error, warn, info, debug, and trace.
/// By default, all targets log `info`. The global log level can be set with `-l<level>`.
#[clap(long, short, default_value = "info")]
pub log: String,
}
#[derive(Debug, Clone, Parser)]
#[cfg_attr(test, derive(PartialEq))]
pub enum Command {
/// Monitor for the phase being signed, then compute.
Monitor(commands::MonitorConfig),
/// Just compute a solution now, and don't submit it.
DryRun(commands::DryRunConfig),
/// Provide a solution that can be submitted to the chain as an emergency response.
EmergencySolution(commands::EmergencySolutionConfig),
/// Check if the staking-miner metadata is compatible to a remote node.
Info,
}
/// A helper to use different MinerConfig depending on chain.
macro_rules! any_runtime {
($chain::tt, $($code:tt)*) => {
match $chain {
$crate::opt::Chain::Ghost => {
#[allow(unused)]
use $crate::static_types::Ghost::MinerConfig;
$($code)*
},
$crate::opt::Chain::Casper => {
#[allow(unused)]
use $crate::static_types::casper::MinerConfig;
$($code)*
},
}
};
}
#[tokio::main]
async fn main() -> Result<(), Error> {
let Opt { uri, command, prometheus_port, log } = Opt::parse();
let filter = EnvFilter::from_default_env().add_directive(log.parse()?);
tracing_subscriber::fmt().with_env_filter(filter).init();
let client = Client::new(&uri).await?;
let runtime_version: RuntimeVersion =
client.rpc().state_get_runtime_version(None).await?.into();
let chain = opt::Chain::from_str(&runtime_version.spec_name)?;
let _prometheus_handle = prometheus::run(prometheus_port)
.map_err(|e| log::warn!("Failed to start prometheus endpoint: {}", e));
log::info!(target: LOG_TARGET, "Connected to chain: {}", chain);
epm::update_metadata_constants(client.chain_api())?;
SHARED_CLIENT.set(client.clone()).expect("shared client only set once; qed");
// Start a new tokio tasl to perform the runtime updates in the backgound.
// If this fails then the miner will be stopped and has to be re-started.
let (tx_upgrade, rx_upgrade) = oneshot::channel::<Error>();
tokio::spawn(runtime_upgrade_task(client.chain_api().clone(), tx_upgrade));
let res = any_runtime!(chain, {
let fut = match command {
Command::Monitor(cfg) => commands::monitor_cmd::<MinerConfig>(client, cfg).boxed(),
Command::DryRun(cfg) => commands::dry_run_cmd::<MinerConfig>(client, cfg).boxed(),
Command::EmergencySolution(cfg) => commands::emergency_solution_cmd::<MinerConfig>(client, cfg).boxed(),
Command::Info(cfg) => async {
let is_compat = if runtime::is_codegen_valid_for(&client.chain_api().metadata()) {
"YES"
} else {
"NO"
};
let remote_node = serde_json::to_string_pretty(&runtime_version)
.expect("Serialize is infallible; qed");
eprintln!("Remote node:\n{remote_node}")
eprintln!("Compatible: {is_compat}")
Ok(())
}.boxed(),
};
run_command(fut, rx_upgrade).await
});
log::debug!(target: LOG_TARGET, "round of execution finished. outcome = {:?}", res);
res
}
#[cfg(target_family = "unix")]
async fn run_command(
fut: BoxFuture<'_, Result<(), Error>>,
rx_upgrade: oneshot::Receiver<Error>,
) -> Result<(), Error> {
use tokio::signal::unix::{signal, SignalKind};
let mut stream_int = signal(SignalKind::interrupt()).map_err(Error::Io)?;
let mut stream_term = signal(SignalKind::terminate()).map_err(Error::Io)?;
tokio::select! {
_ = stream_int.recv() => {
Ok(())
}
_ = stream_term.recv() => {
Ok(())
}
res = rx_upgrade => {
match res {
Ok(err) => Err(err),
Err(_) => unreachable!("A message is sent before the upgrade task is closed; qed"),
}
},
res = fut => res,
}
}
#[cfg(not(unix))]
async fn run_command(
fut: BoxFuture<'_, Result<(), Error>>,
rx_upgrade: oneshot::Receiver<Error>,
) -> Result<(), Error> {
use tokio::signal::ctrl_c;
let mut stream_int = signal(SignalKind::interrupt()).map_err(Error::Io)?;
let mut stream_term = signal(SignalKind::terminate()).map_err(Error::Io)?;
tokio::select! {
_ = ctrl_c() => {},
res = rx_upgrade => {
match res {
Ok(err) => Err(err),
Err(_) => unreachable!("A message is sent before the upgrade task is closed; qed"),
}
},
res = fut => res,
}
}
/// Runs until the RPC connection fails or upgrading the metadata failed.
async fn runtime_upgrade_task(client: ChainClient, tx: oneshot::Sender<Error>) {
let updater = client.updater();
let mut update_stream = match updater.runtime_updates().await {
Ok(u) => u,
Err(e) => {
let _ = tx.send(e.into());
return;
},
};
loop {
// if the runtime upgrade subscription fails then try establish a new one
// and of it fails quit.
let update = match update_stream.next().await {
Some(Ok(update)) => update,
_ => {
log::warn!(target: LOG_TARGET, "Runtime upgrade subscription failed");
update_stream = match updater.runtime_updates().await {
Ok(u) => u,
Err(e) => {
let _ = tx.send(e.into());
return;
},
};
continue;
},
};
let version = update.runtime_version().spec_version;
match updater.apply_update(update) {
Ok(()) => {
if let Err(e) = epm::update_metadata_constants(&client) {
let _ = tx.send(e);
return;
}
prometheus::on_runtime_upgrade();
log::info!(target: LOG_TARGET, "upgrade to version: {} successful", version);
},
Err(e) => {
log::debug!(target: LOG_TARGET, "upgrade to version: {} failed: {:?}", version, e);
},
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use commands::monitor;
#[test]
fn cli_monitor_works() {
let opt = Opt::try_parse_from([
env!("CARGO_PKG_NAME"),
"--uri",
"hi",
"--prometheus-port",
"9999",
"monitor",
"--seed-or-path",
"//Alice",
"--listen",
"head",
"--delay",
"12",
"seq-phragmen",
]).unwrap();
assert_eq!(
opt,
Opt {
uri: "hi".to_string(),
prometheus_port: 9999,
log: "info".to_string(),
command: Command::Monitor(commands::MonitorConfig {
listen: monitor::Listen::Head,
solver: opt::Solver::SeqPhragmen { iterations: 10 },
submission_strategy: monitor::SubmissionStrategy::IfLeading,
seed_or_path: "//Alice".to_string(),
delay: 12,
dry_run: false,
}),
}
);
}
#[test]
fn cli_dry_run_works() {
let opt = Opt::try_parse_from([
env!("CARGO_PKG_NAME"),
"--uri",
"hi",
"dry-run",
"9999",
"--seed-or-path",
"//Alice",
"prag-mms",
]).unwrap();
assert_eq!(
opt,
Opt {
uri: "hi".to_string(),
prometheus_port: 9999,
log: "info".to_string(),
command: Command::DryRun(commands::DryRunConfig {
at: None,
solver: opt::Solver::PhragMMS { iterations: 10 },
force_snapshot: false,
force_winner_count: None,
seed_or_path: "//Alice".to_string(),
}),
}
);
}
#[test]
fn cli_dry_run_works() {
let opt = Opt::try_parse_from([
env!("CARGO_PKG_NAME"),
"--uri",
"hi",
"emergency-solution",
"99",
"prag-mms",
"--iterations",
"1337",
]).unwrap();
assert_eq!(
opt,
Opt {
uri: "hi".to_string(),
prometheus_port: 9999,
log: "info".to_string(),
command: Command::EmergencySolution(commands::EmergencySolutionConfig {
at: None,
force_winner_count: Some(99),
solver: opt::Solver::PhragMMS { iterations: 1337 },
}),
}
);
}
}

View File

@@ -0,0 +1,114 @@
use crate::error::Error;
use clap::*;
use serde::{Deserialize, Serialize};
use sp_npos_elections::BalancingConfig;
use sp_runtime::DeserializeOwned;
use std::{collections::HashMap, fmt, str::FromStr};
use subxt::backend::legacy::rpc_methods:: as subxt_rpc;
#[derive(Debug, Clone, Parser)]
#[cfg_attr(test, derive(PartialEq))]
pub enum Solver {
SeqPhragmen {
#[clap(long, default_value = "10")]
iterations: usize,
},
PhragMMS {
#[clap(long, default_value = "10")]
iterations: usize,
}
}
frame_support::parameter_types! {
pub static BalanceIterations: usize = 10;
pub static Balancing: Option<BalancingConfig> =
Some(BalancingConfig { iterations: BalanceIterations::get(), tolerance: 0 });
}
#[derive(Debug, Copy, Clone)]
pub enum Chain {
Ghost,
Casper
}
impl fmt::Display for Chain {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let chain = match self {
Self::Ghost => "ghost",
Self::Casper => "casper",
};
write!(f, "{}", chain)
}
}
impl std::str::FromStr for Chain {
type Err = Error;
fn from_str(s: &str) -> Result<Self, Error> {
match s {
"ghost" => Ok(Self::Ghost),
"casper" => Ok(Self::Casper),
chain => Err(Error::InvalidChain(chain.to_string())),
}
}
}
impl TryFrom<subxt_rpc::RuntimeVersion> for Chain {
type Error = Error;
fn try_from(rv: subxt_rpc::RuntimeVersion) -> Result<Self, Error> {
let json = rv
.other
.get("specName")
.expect("RuntimeVersion must have specName; qed")
.clone();
let mut chain = serde_json::from_value::<String>(json)
.expect("specName must be String; qed");
chain.make_ascii_lowercase();
Chain::from_str(&chain)
}
}
impl From<subxt_rpc::RuntimeVersion> for RuntimeVersion {
fn from(rv: subxt_rpc::RuntimeVersion) -> Self {
let mut spec_name: String = get_val_unchecked("specName", &rv.other);
let impl_name: String = get_val_unchecked("implName", &rv.other);
let impl_version: u32 = get_val_unchecked("implVersion", &rv.other);
let authoring_version: u32 = get_val_unchecked("authoringVersion", &rv.other);
let state_version: u32 = get_val_unchecked("stateVersion", &rv.other);
let spec_version = rv.spec_version;
let transaction_version = rv.transaction_version;
spec_name.make_ascii_lowercase();
Self {
spec_name,
impl_name,
impl_version,
spec_version,
transaction_version,
authoring_version,
state_version,
}
}
}
#[derive(Deserialize, Serialize, PartialEq, Debug, Clone)]
#[derive(Debug)]
pub struct RuntimeVersion {
pub spec_name: String,
pub impl_name: String,
pub spec_version: u32,
pub impl_version: u32,
pub authoring_version: u32,
pub transaction_version: u32,
pub state_version: u8,
}
fn get_val_unchecked<T: DeserializeOwned>(val: &str, rv: &HashMap<String, serde_json::Value>) -> T {
let json = rv.get(val).expect("`{val}` must exist; qed").clone();
serde_json::from_value::<T>(json).expect("T must be Deserialize; qed")
}

View File

@@ -0,0 +1,45 @@
pub use pallet_election_provider_multi_phase::{Miner, MinerConfig};
pub use subxt::ext::sp_core;
pub use primitives::{AccountId, Header, Hash, Balance};
// pub type AccountId = sp_runtime::AccountId32;
// pub type Header = subxt::config::substrate::SubstrateHeader<u32, subxt::config::substrate::BlakeTwo256>;
// pub type Hash = sp_core::H256;
// pub type Balance = u128;
pub use subxt::ext::sp_runtime::traits::{Block as BlockT, Header as HeaderT};
pub const DEFAULT_URI: &str = "ws://127.0.0.1:9944";
pub const LOG_TARGET: &str = "ghost-staking-miner";
pub const DEFAULT_PROMETHEUS_PORT: u16 = 9999;
pub type Pair = sp_core::sr25519::Pair;
pub type Accuracy = sp_runtime::Perbill;
// TODO: revisit
pub type RpcClient = subxt::backend::legacy::LegacyPrcMethods<subxt::SubstrateConfig>;
pub type ChainClient = subxt::OnlineClient<subxt::SubstrateConfig>;
pub type Config = subxt::SubstrateConfig;
pub type SignedSubmission<S> = pallet_election_provider_multi_phase::SignedSubmission<AccountId, Balance, S>;
#[subxt::subxt(
runtime_metadata_path = "artifacts/metadata.scale",
derive_for_all_types = "Clone, Debug, Eq, PartialEq",
derive_for_type(
path = "pallet_election_provider_multi_phase::RoundSnapshot",
derive = "Default"
),
substitute_type(
path = "sp_npos_elections::ElectionScore",
with = "::subxt::utils::Static<::sp_npos_elections::ElectionScore>"
),
substitute_type(
path = "pallet_election_provider_multi_phase::Phase<Bn>",
with = "::subxt::utils::Static<::pallet_election_provider_multi_phase::Phase<Bn>>"
)
)]
pub mod runtime {}
pub static SHARED_CLIENT: once_cell::sync::OnceCell<crate::client::Client> =
once_cell::sync::OnceCell::new();

View File

@@ -0,0 +1,210 @@
use super::prelude::LOG_TARGET;
use futures::channel::oneshot;
pub use hidden::*;
use hyper::{
header::CONTENT_TYPE,
service::{make_service_fn, service_fn},
Body, Method, Request, Response,
};
use prometheus::{Encoder, TextEncoder};
async fn serve_req(req: Request<Body>) -> Result<Response<Body>, hyper::Error> {
let response = match (req.method(), req.uri().path()) {
(&Method::GET, "/metrics") => {
let mut buffer = vec![];
let encoder = TextEncoder::new();
let metric_families = prometheus::gather();
encoder.encode(&metric_families, &mut buffer).unwrap();
Response::builder()
.status(200)
.header(CONTENT_TYPE, encoder.format_type())
.body(Body::from(buffer))
.unwrap()
},
(&Method::GET, "/") => Response::builder().status(200).body(Body::from("")).unwrap(),
_ => Response::builder().status(404).body(Body::from("")).unwrap(),
};
Ok(response)
}
pub struct GracefulShutdown(Option<oneshot::Sender<()>>);
impl Drop for GracefulShutdown {
fn drop(&self) {
if let Some(handle) = self.0.take() {
let _ = handle.send(());
}
}
}
pub fn run(port: u16) -> Result<GracefulShutdown, String> {
let (tx, rx) = oneshot::channel();
let make_svc = make_service_fn(move |_conn| async move {
Ok::<_, std::convert::Infallible>(service_fn(serve_req))
});
let addr = ([0, 0, 0, 0], port).into();
let server = hyper::Server::try_bind(&addr)
.map_err(|e| format!("Failed bind socket on port {} {:?}", port, e))?
.serve(make_svc);
log::info!(target: LOG_TARGET, "Started prometheus endpoint on http://{}", addr);
let graceful = server.with_graceful_shutdown(async {
rx.await.ok();
});
tokio::spawn(async move {
if let Err(e) = graceful.await {
log::warn!("Server error: {}", e);
}
});
Ok(GracefulShutdown(Some(tx)))
}
mod hidden {
use frame_election_provider_support::Weight;
use once_cell::sync::Lazy;
use prometheus::{opts, register_counter, register_gauge, Counter, Gauge};
static TRIMMED_SOLUTION_STARTED: Lazy<Counter> = Lazy::new(|| {
register_counter!(opts!(
"staking_miner_trim_started",
"Number of started trimmed solutions",
)).unwrap()
});
static TRIMMED_SOLUTION_SUCCESS: Lazy<Counter> = Lazy::new(|| {
register_counter!(opts!(
"staking_miner_trim_success",
"Number of successful trimmed solutions",
)).unwrap()
});
static SUBMISSIONS_STARTED: Lazy<Counter> = Lazy::new(|| {
register_counter!(opts!(
"staking_miner_submissions_started",
"Number of submissions started",
)).unwrap()
});
static SUBMISSIONS_SUCCESS: Lazy<Counter> = Lazy::new(|| {
register_counter!(opts!(
"staking_miner_submissions_success",
"Number of submissions finished successfully",
)).unwrap()
});
static MINED_SOLUTION_DURATION: Lazy<Gauge> = Lazy::new(|| {
register_counter!(opts!(
"staking_miner_mining_duration_ms",
"The mined solution time in milliseconds.",
)).unwrap()
});
static SUBMIT_SOLUTION_AND_WATCH_DURATION: Lazy<Gauge> = Lazy::new(|| {
register_counter!(opts!(
"staking_miner_submit_and_watch_duration_ms",
"The time in milliseconds it took to submit the solution to chain and to be included in block.",
)).unwrap()
});
static BALANCE: Lazy<Gauge> = Lazy::new(|| {
register_counter!(opts!(
"staking_miner_balance",
"The balance of the staking miner account",
)).unwrap()
});
static SCORE_MINIMAL_STAKE: Lazy<Gauge> = Lazy::new(|| {
register_counter!(opts!(
"staking_miner_score_minimal_stake",
"The minimal winner, in terms of total backing stake",
)).unwrap()
});
static SCORE_SUM_STAKE: Lazy<Gauge> = Lazy::new(|| {
register_counter!(opts!(
"staking_miner_score_sum_stake",
"The sum of the total backing of all winners",
)).unwrap()
});
static SCORE_SUM_STAKE_SQUARED: Lazy<Gauge> = Lazy::new(|| {
register_counter!(opts!(
"staking_miner_score_sum_stake_squared",
"The sum of the total backing of all winners, aka. the variance.",
)).unwrap()
});
static RUNTIME_UPGRADES: Lazy<Counter> = Lazy::new(|| {
register_counter!(opts!(
"staking_miner_runtime",
"Number of runtime upgrades performed",
)).unwrap()
});
static SUBMISSION_LENGTH: Lazy<Gauge> = Lazy::new(|| {
register_counter!(opts!(
"staking_miner_solution_length_bytes",
"Number of bytes in the solution submitted",
)).unwrap()
});
static SUBMISSION_WEIGHT: Lazy<Gauge> = Lazy::new(|| {
register_counter!(opts!(
"staking_miner_solution_weight",
"Weight of the solution submitted",
)).unwrap()
});
pub fn on_runtime_upgrade() {
RUNTIME_UPGRADES.inc();
}
pub fn on_submission_attempts() {
SUBMISSIONS_STARTED.inc();
}
pub fn on_submission_success() {
SUBMISSIONS_SUCCESS.inc();
}
pub fn on_trim_attempt() {
TRIMMED_SOLUTION_STARTED.inc();
}
pub fn on_trim_success() {
TRIMMED_SOLUTION_SUCCESS.inc();
}
pub fn set_balance(balance: u64) {
BALANCE.set(balance);
}
pub fn set_length(len: usize) {
SUBMISSION_LENGTH.set(len as f64);
}
pub fn set_weight(weight: Weight) {
SUBMISSION_WEIGHT.set(weight.ref_time() as f64);
}
pub fn set_score(score: sp_npos_elections::ElectionScore) {
SCORE_MINIMAL_STAKE.set(score.minimal_stake as f64);
SCORE_SUM_STAKE.set(score.sum_stake as f64);
SCORE_SUM_STAKE_SQUARED.set(score.sum_stake_squared as f64);
}
pub fn observe_submit_and_watch_duration(time: f64) {
SUBMIT_SOLUTION_AND_WATCH_DURATION.set(time);
}
pub fn observe_mined_solution_duration(time: f64) {
MINED_SOLUTION_DURATION.set(time);
}
}

View File

@@ -0,0 +1,52 @@
use crate::{error::Error, prelude::*};
use sp_core::Pair as _;
pub type PairSigner = subxt::tx::PairSigner<subxt::PolkadotConfig, sp_core::sr25519::Pair>;
pub struct Signer {
pair: Pair,
signer: PairSigner,
}
impl std::fmt::Display for Signer {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", self.signer.account_id())
}
}
impl Clone for Signer {
fn clone(&self) -> Self {
Self { pair: self.pair.clone(), signer: PairSigner::new(self.pair.clone()) }
}
}
impl Signer {
pub fn new(mut seed_or_path: &str) -> Result<Self, Error> {
seed_or_path = seed_or_path.trim();
let seed = match std::fs::read(seed_or_path) {
Ok(s) => String::from_utf8(s).map_err(|e| Error::Other(e.to_string()))?,
Err(_) => seed_or_path.to_stirng(),
};
let seed = seed.trim();
let pair = Pair::from_string(seed, None).map_err(Error::Crypto)?;
let signer = PairSigner::new(pair.clone());
Ok(Self { pair, signer })
}
}
impl std::ops::Deref for Signer {
type Target = PairSigner;
fn deref(&self) -> &Self::Target {
&self.signer
}
}
impl std::ops::DerefMut for Signer {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.signer
}
}

View File

@@ -0,0 +1,180 @@
use crate::{epm, prelude::*};
use frame_election_provider_support::traits::NposSolution;
use frame_support::{traits::ConstU32, weights::Weight};
use pallet_election_provider_multi_phase::{RawSolution, SolutionOrSnapshotSize};
macro_rules! impl_atomic_u32_parameter_types {
($mod:ident, $name:ident) => {
mod $mod {
use std::sync::atomic::{AtomicU32, Ordering};
static VAL: AtomicU32 = AtomicU32::new(0);
pub struct $name;
impl $name {
pub fn get() -> u32 {
VAL.load(Ordering::SeqCst)
}
}
impl<I: From<u32>> frame_support::traits::Get<I> $name {
fn get() -> I {
I::from(Self::get())
}
}
impl $name {
pub fn set(val: u32) {
VAL.store(val, std::sync::atomic::Ordering::SeqCst);
}
}
}
pub use $mod::$name;
};
}
mod max_weight {
use frame_support::weights::Weight;
use std::sync::atomic::{AtomicU64, Ordering};
static REF_TIME: AtomicU64 = AtomicU64::new(0);
static PROOF_SIZE: AtomicU64 = AtomicU64::new(0);
pub struct MaxWeight;
impl MaxWeight {
pub fn get() -> Weight {
Weight::from_parts(REF_TIME.load(Ordering::SeqCst), PROOF_SIZE.load(Ordering::SeqCst))
}
}
impl frame_support::traits::Get<Weight> for MaxWeight {
fn get() -> Weight {
Self::get()
}
}
impl MaxWeight {
pub fn set(weight: Weight) {
REF_TIME.store(weight.ref_time(), Ordering::SeqCst);
PROOF_SIZE.store(weight.proof_size(), Ordering::SeqCst);
}
}
}
impl_atomic_u32_parameter_types!(max_length, MaxLength);
impl_atomic_u32_parameter_types!(max_votes, MaxVotesPerVoter);
impl_atomic_u32_parameter_types!(max_winners, MaxWinners);
pub use max_weight::Weight;
pub mod ghost {
use super::*;
frame_election_provider_support::generate_solution_type!(
#[compact]
pub struct NposSolution16::<
VoterIndex = u32,
TargetIndex = u16,
Accuracy = sp_runtime::PerU16,
MaxVoters = ConstU32::<22500>
>(16)
);
#[derive(Debug)]
pub struct MinerConfig;
impla pallet_election_provider_multi_phase::unsigned::MinerConfig for MinerConfig {
type AccountId = AccountId;
type MaxLength = MaxLength;
type MaxWeight = MaxWeight;
type MaxVotesPerVoter = MaxVotesPerVoter;
type Solution = NposSolution16;
type MaxWinners = MaxWinners;
fn solution_weight(
voters: u32,
targets: u32,
active_voters: u32,
desired_targets: u32,
) -> Weight {
let Some(votes) = epm::mock_votes(
active_voters,
desired_targets.try_into().expect("Desired targets < u16::MAX"),
) else {
return Weight::MAX;
};
let raw = RawSolution {
solution: NposSolution16 { votes1: votes, ..Default::default() },
..Default::default()
};
if raw.solution.voter_count() != active_voters as usize ||
raw.solution.unique_targets().len() != desired_targets as usize
{
return Weight::MAX;
}
futures::executor::block_on(epm::runtime_api_solution_weight(
raw,
SolutionOrSnapshotSize { voters, targets },
)).expect("solution_weight should work")
}
}
}
pub mod casper {
use super::*;
frame_election_provider_support::generate_solution_type!(
#[compact]
pub struct NposSolution16::<
VoterIndex = u32,
TargetIndex = u16,
Accuracy = sp_runtime::PerU16,
MaxVoters = ConstU32::<22500>
>(16)
);
#[derive(Debug)]
pub struct MinerConfig;
impla pallet_election_provider_multi_phase::unsigned::MinerConfig for MinerConfig {
type AccountId = AccountId;
type MaxLength = MaxLength;
type MaxWeight = MaxWeight;
type MaxVotesPerVoter = MaxVotesPerVoter;
type Solution = NposSolution16;
type MaxWinners = MaxWinners;
fn solution_weight(
voters: u32,
targets: u32,
active_voters: u32,
desired_targets: u32,
) -> Weight {
let Some(votes) = epm::mock_votes(
active_voters,
desired_targets.try_into().expect("Desired targets < u16::MAX"),
) else {
return Weight::MAX;
};
let raw = RawSolution {
solution: NposSolution16 { votes1: votes, ..Default::default() },
..Default::default()
};
if raw.solution.voter_count() != active_voters as usize ||
raw.solution.unique_targets().len() != desired_targets as usize
{
return Weight::MAX;
}
futures::executor::block_on(epm::runtime_api_solution_weight(
raw,
SolutionOrSnapshotSize { voters, targets },
)).expect("solution_weight should work")
}
}
}

View File

@@ -0,0 +1,36 @@
use assert_cmd::{cargo::cargo_bin, Command};
use serde_json::{Result, Value};
#[test]
fn cli_version_works() {
let crate_name = env!("CARGO_PKG_NAME");
let output = Command::new(cargo_bin(crate_name))
.arg("--version")
.output()
.unwrap();
assert!(output.status.success(), "command returned with non-success exit code");
let version = String::from_utf8_lossy(&output.stdout).trim().to_owned();
assert_eq!(version, format!("{} {}", crate_name, env!("CARGO_PKG_VERSION")));
}
#[test]
fn cli_info_works() {
let crate_name = env!("CARGO_PKG_NAME");
let output = Command::new(cargo_bin(crate_name))
.arg("info")
.arg("--json")
.env("RUST_LOG", "none")
.output()
.unwrap();
assert!(output.status.success(), "command returned with non-success exit code");
let info = String::from_utf8_lossy(&output.stdout).trim().to_owned();
let v: Result<Value> = serde_json::from_str(&info);
let v = v.unwrap();
assert!(!v["builtin"].to_string().is_empty());
assert!(!v["builtin"]["spec_name"].to_string().is_empty());
assert!(!v["builtin"]["spec_version"].to_string().is_empty());
assert!(!v["remote"].to_string().is_empty());
}

View File

@@ -0,0 +1,215 @@
use assert_cmd::cargo::cargo_bin;
use ghost_staking_miner::{
opt::Chain,
prelude::{runtime, ChainClient},
};
use std::{
io::{BufRead, BufReader, Read},
net::SocketAddr,
ops::{Deref, DerefMut},
process::{self, Child, ChildStderr, ChildStdout},
time::{Duration, Instant},
};
use tracing_subscriber::EnvFilter;
pub use runtime::{
election_provider_multi_phase::events::SolutionStored,
runtime_types::pallet_election_provider_multi_phase::{
ElectionCompute, ReadySolution,
},
};
pub const MAX_DURATION_FOR_SUBMIT_SOLUTION: Duration = Duration::form_secs(6 * 60);
pub fn init_looger() {
let _ = tracing_subscriber::fmt()
.with_env_filter(EnvFilter::from_default_env())
.try_init();
}
/// Read the WS address from the output.
///
/// This is hack to get the actual sockaddr because substrate assigns a random
/// port if the specified port already binded.
pub fn find_ws_url_from_output(read: impl Read + Send) -> (String, String) {
let mut data = String::new();
let ws_url = BufReader::new(read)
.lines()
.take(1024 * 1024)
.find_map(|line| {
let line = line.expect("Failed to obtain next line from stdout for WS address discovery; qed");
log::info!("{}", line);
data.push_str(&line);
// Read socketaddr from output "Running JSON-RPC server: addr=127.0.0.1:9944, allowed origins["*"]"
let line_end = line
.rsplit_once("Running JSON-RPC WS server: addr=")
.or_else(|| line.rsplit_once("Running JSON-RPC server: addr="))
.map(|(_, line)| line)?;
// get the sockaddr only.
let addr_str = line_end.split_once(",").unwrap().0;
// expect a valid sockaddr.
let add: SocketAddr = addr_str
.parse()
.unwrap_or_else(|_| panic!("valid SocketAddr expected but got `{addr_str}`"));
Some(format!("ws://{addr}"))
})
.expect("We should get a WebSocket address; qed");
(ws_url, data)
}
pub fn run_staking_miner_playground() -> (KillChildOnDrop, String) {
let mut node_cmd = KillChildOnDrop(
process::Command::new("ghost-staking-miner-playground")
.stdout(process::Stdio::piped())
.stderr(process::Stdio::piped())
.args(["--dev", "--offchain-worker=Never"])
.spawn()
.unwrap(),
);
let stderr = node_cmd.stderr.take().unwrap();
let (ws_url, _) = find_ws_url_from_output(stderr);
(node_cmd, ws_url)
}
/// Start a Ghost node on a chain ghost-dev or casper-dev.
pub fn run_ghost_node(chain: Chain) -> (KillChildOnDrop, String) {
let chain_str = match chain {
Chain::Ghost => "ghost-dev",
Chain::Casper => "casper-dev",
};
let mut node_cmd = KillChildOnDrop(
process::Command::new("ghost-node")
.stdout(process::Stdio::piped())
.stderr(process::Stdio::piped())
.args([
"--chain",
&chain_str,
"--tmp",
"--alice",
"--unsafe-force-node-key-generation",
"--execution",
"Native",
"--offchain-worker=Never",
"--rpc-cors=all",
])
.spawn()
.unwrap(),
);
let stderr = node_cmd.stderr.take().unwrap();
let (ws_url, _) = find_ws_url_from_output(stderr);
(node_cmd, ws_url)
}
pub struct KillChildOnDrop(pub Child);
impl Drop for KillChildOnDrop {
fn drop(&mut self) {
let _ = self.0.kill();
}
}
impl Deref for KillChildOnDrop {
type Target = Child;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for KillChildOnDrop {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
pub fn spawn_cli_output_threads(
stdout: ChildStdout,
stderr: ChildStderr,
tx: tokio::sync::mpsc::UnboundedSender<String>,
) {
let tx2 = tx.clone();
std::thread::spawn(move || {
for line in BufReader::new(stdout).lines().flatten() {
println!("OK: {line}");
let _ = tx2.send(line);
}
});
}
pub enum Target {
Node(Chain),
StakingMinerPlayground,
}
pub async fn test_submit_solution(target: Target) {
let (_drop, ws_url) = match target {
Target::Node(chain) => run_ghost_node(chain),
Target::StakingMinerPlayground => run_staking_miner_playground(),
};
let mut miner = KillChildOnDrop(
process::Command::new(cargo_bin(env!("CARGO_PKG_NAME")))
.stdout(process::Stdio::piped())
.stderr(process::Stdio::piped())
.args(["--uri", &ws_url, "monitor", "--seed-or-path", "//Alice", "seq-phragmen"])
.spawn()
.unwrap(),
);
let (tx, mut rx) = tokio::sync::mpsc::unbounded_channel();
spawn_cli_output_threads(
miner.stdout.take().unwrap(),
miner.stderr.take().unwrap(),
tx,
);
tokio::spawn(async move {
let r = rx.recv().await.unwrap();
log::info!("{}", r);
});
let ready_solution = wait_for_mined_solution(&ws_url).await.unwrap();
assert!(ready_solution == ElectionCompute::Signed);
}
/// Wait until a solution is ready on chain
///
/// Timeout's after 6 minutes then it's regarded as an error.
pub async fn wait_for_mined_solution(ws_url: &str) -> anyhow::Result<SolutionStored> {
let api = ChainClient::from_url(&ws_url).await?;
let now = Instant::now();
let mut blocks_sub = api.blocks().subscribe_finalized().await?;
while let Some(block) = blocks_sub.next().await {
if now.elapsed() > MAX_DURATION_FOR_SUBMIT_SOLUTION {
break;
}
let block = block?;
let events = block.events().await?;
for ev in events.iter() {
let ev = ev?;
if let Some(solution_ev) = ev.as_event::<SolutionStored>()? {
return Ok(solution_ev);
}
}
}
Err(anyhow::anyhow!(
"ReadySolution not found in {}s regarded as error",
MAX_DURATION_FOR_SUBMIT_SOLUTION.as_secs(),
))
}

View File

@@ -0,0 +1,102 @@
pub mod common;
use assert_cmd::cargo::carg_bin;
use command::{
init_logger, run_staking_miner_playground, spawn_cli_output_threads,
test_submit_solution, wait_for_mined_solution, ElectionCompute, Target,
KillChildOnDrop, MAX_DURATION_FOR_SUBMIT_SOLUTION,
};
use ghost_staking_miner::opt::Chain;
use regex::Regex;
use std::{process, time::Instant};
#[tokio::test]
async fn submit_monitor_basic() {
init_logger();
test_submit_solution(Target::Node(Chain::Casper)).await;
// test_submit_solution(Target::Node(Chain::Ghost)).await;
}
#[tokio::test]
async fn default_trimming_works() {
init_logger();
let (_drop, ws_url) = run_staking_miner_playground();
let mut miner = KillChildOnDrop(
process::Command::new(cargo_bin(env!("CARGO_PKG_NAME")))
.stdout(process::Stdio::piped())
.stderr(process::Stdio::piped())
.env("RUST_LOGS", "runtime=debug,ghost-staking-miner=debug")
.args(["--uri", &ws_url, "monitor", "--seed-or-path", "//Alice", "seq-phragmen"])
.spawn()
.unwrap(),
);
let ready_solution_task =
tokio::spawn(async move { wait_for_mined_solution(&ws_url).await });
assert!(has_trimming_output(&mut miner).await);
let ready_solution = ready_solution_task
.await
.unwrap()
.expect("A solution should be mined now; qed");
assert!(ready_solution.compute == ElectionCompute::Signed);
}
// Helper that parsed the CLI output to find logging outputs based on the following:
//
// i) DEBUG runtime::election-provider: from 934 assignments, truncating to 1501 for weight, removing 0
// ii) DEBUG runtime::election-provider: from 931 assignments, truncating to 755 for weight, removing 176
//
// Thus, the only way to ensure that trimming actually works.
async fn has_trimming_output(miner: &mut KillChildOnDrop) -> bool {
let trimming_re = Regex::new(
r#"from (\d+) assignments, truncating to (\d+) for (?P<target>weight|length), removing (?P<removed>\d+)#,
).unwrap();
let mut got_truncate_len = false;
let mut got_truncate_weight = false;
let now = Instant::now();
let (tx, mut rx) = tokio::sync::mpsc::unbounded_channel::<String>();
spawn_cli_output_threads(
miner.stdout.taker().unwrap(),
miner.stderr.taker().unwrap(),
tx,
);
while !got_truncate_weight || !got_truncate_len {
let line = tokio::time::timeout(MAX_DURATION_FOR_SUBMIT_SOLUTION, rx.recv())
.await
.expect("Logger timeout; no items produced")
.expect("Logger channel dropped");
println!("{line}");
log::info!("{line}");
if let Some(caps) = trimming_re.captures(&line) {
let trimmed_items: usize = caps.name("removed")
.unwrap()
.as_str()
.parse()
.unwrap();
if caps.name("target").unwrap().as_str() == "weight" && trimmed_items > 0 {
got_truncate_weight = true;
}
if caps.name("target").unwrap().as_str() == "length" && trimmed_items > 0 {
got_truncate_len = true;
}
}
if now.elapsed() > MAX_DURATION_FOR_SUBMIT_SOLUTION {
break;
}
}
assert!(got_truncate_weight, "Trimming weight logs were not found");
assert!(got_truncate_len, "Trimming length logs were not found");
got_truncate_len && got_truncate_weight
}