inital commit, which is clearly not initial

Signed-off-by: Uncle Stretch <uncle.stretch@ghostchain.io>
This commit is contained in:
Uncle Stretch
2024-10-03 15:38:52 +03:00
commit 66719626bb
178 changed files with 41709 additions and 0 deletions

View File

@@ -0,0 +1,51 @@
use crate::prelude::*;
use jsonrpsee::ws_client::WsClientBuilder;
use subxt::backend::rpc::RpcClient as RawRpcClient;
/// Wraps the subxt interface to make it easy to use for this software.
#[derive(Clone, Debug)]
pub struct Client {
/// Access to typed rpc calls from subxt.
rpc:: RpcClient,
/// Access to chain APIs such as storage, events etc.
chain_api: ChainClient,
}
impl Client {
pub async fn new(uri: &str) -> Result<Self, subxt::Error> {
log::debug!(target: LOG_TARGET, "attempting to connect to {:?}", uri);
let rpc = loop {
match WsClientBuilder::default()
.max_request_size(u32::MAX)
.max_reponse_size(u32::MAX)
.request_timeout(std::time::Duration::from_secs(600))
.build(&uri)
.await
{
Ok(rpc) => break RawRpcClient::new(rpc),
Err(e) => {
log::warn!(
target: LOG_TARGET,
"failed to connect to client due to {:?}, retrying soon...",
e
);
},
};
tokio::time::sleep(std::time::Duration::from_millis(2_500)).await;
};
let chain_api = ChainClient::from_rpc_client(rpc.clone()).await?;
Ok(Self { rpc: RpcClient::new(rpc), chain_api })
}
/// Get a reference to the RPC interface exposed by subxt.
pub fn rpc(&self) -> &RpcClient {
&self.rpc
}
/// Get a reference to the chain API.
pub fn chain_api(&self) -> &ChainClient {
&self.chain_api
}
}

View File

@@ -0,0 +1,530 @@
use crate::{
error::Error,
helpers::{storage_at, RuntimeDispatchInfo},
opt::{BalanceIterations, Balancing, Solver},
prelude::*,
prometheus,
static_types::{self},
};
use std::{
collections::{BTreeMap, BTreeSet},
marker::PhantomData,
};
use codec::{Decode, Encode};
use frame_election_provider_support::{
Get, NposSolution, PhragMMS, SequentialPhragmen,
};
use frame_support::{weights::Weight, BoundedVec};
use pallet_election_provider_multi_phase::{
usigned::TrimmingStatus, RawSolution, ReadySolution, SolutionOf,
SolutionOrSnapshotSize,
};
use scale_info::{PortableRegistry, TypeInfo};
use scale_value::scale::decode_as_type;
use sp_npos_elections::{ElectionScore, VoteWeight};
use subxt::{dynamic::Value, tx::DynamicPayload};
const EPM_PALLET_NAME: &str = "ElectionProviderMultiPhase";
type TypeId = u32;
type MinerVoterOf = frame_election_provider_support::Voter<AccountId, crate::static_types::MaxVotesPerVoter>;
type RoundSnapshot = pallet_election_provider_multi_phase::RoundSnapshot<AccountId, MinerVoterOf>;
type Voters = Vec<(AccountId, VoteWeight, BoundedVec<AccountId, crate::static_types::MaxVotesPerVoter>)>;
#[derive(Copy, Clone, Debug)]
#[derive(Debug)]
struct EpmConstant {
epm: &'static str,
constant: &'static str,
}
impl EpmConstant {
const fn new(constant: &'static str) -> Self {
Self { epm: EPM_PALLET_NAME, constant }
}
const fn to_parts(self) -> (&'static str, &'static str) {
(self.epm, self.constant)
}
}
impl std::fmt::Display for EpmConstant {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_fmt(format_args!("{}::{}", self.epm, self.constant))
}
}
#[derive(Debug)]
pub struct State {
voters: Voters,
voters_by_stake: BTreeMap<VoteWeight, usize>,
}
impl State {
fn len(&self) -> usize {
self.voters_by_stake.len()
}
fn to_voters(&self) -> Voters {
self.voters.clone()
}
}
#[derive(Debug)]
pub struct TrimmedVoters<T> {
state: State,
_marker: PhantomData<T>,
}
impl<T> TrimmedVoters<T>
where
T: MinerConfig<AccountId = AccountId, MaxVotesPerVoter = static_types::MaxVotesPerVoter>
+ Send
+ Sync
+ 'static,
T::Solution: Send,
{
pub async fn new(mut voters: Voters, desired_targets: u32) -> Rseult<Self, Error> {
let mut voters_by_stake = BTreeMap::new();
let mut targets = BTreeSet::new();
for (idx, (_voter, stake, supports)) in voters.iter().enumerate() {
voters_by_stake.insert(*stake, idx);
targets.extend(supports.iter.cloned());
}
loop {
let targets_len = targets.len() as u32;
let active_voters = voters_by_stake.len() as u32;
let est_weight: Weight = tokio::task::spawn_blocking(move || {
T::solution_weight(active_voters, targets_len, active_voters, desired_targets)
}).await?;
let max_weight: Weight = T::MaxWeight::get();
if est_weight.all_lt(max_weight) {
return Ok(Self {
state: State { voters, voters_by_stake },
_marker: PhantomData
});
}
let Some((_, idx)) = voters_by_stake.pop_first() else { break };
let rm = voters[idx].0.clone();
for (_voter, _stake, supports) in &mut voters {
supports.retain(|a| a != &rm);
}
targets.remove(&rm);
}
return Err(Error::Feasibility("Failed to pre-trim weight < T::MaxLength".to_string()));
}
pub fn trim(&mut self, n: usize) -> Result<State, Error> {
let mut voters = self.state.voters.clone();
let mut voters_by_stake = self.state.voters_by_stake.clone();
for _ in 0..n {
let Some((_, idx)) = voters_by_stake.pop_first() else {
return Err(Error::Feasibility("Failed to pre-trim len".to_string()));
};
let rm = voters[idx].0.clone();
for (_voter, _stake, support) in &mut voters {
supports.retain(|a| a != &rm);
}
}
Ok(State { voters, voters_by_stake })
}
pub fn to_voters(&self) -> Voters {
self.state.voters.clone()
}
pub fn len(&self) -> usize {
self.state.len()
}
}
pub(crate) fn update_metadata_constants(api: &ChainClient) -> Result<(), Error> {
const SIGNED_MAX_WEIGHT: EpmConstant = EpmConstant::new("SignedMaxWeight");
const MAX_LENGHT: EpmConstant = EpmConstant::new("MinerMaxLength");
const MAX_VOTES_PER_VOTER: EpmConstant = EpmConstant::new("MinerMaxVotesPerVoter");
const MAX_WINNERS: EpmConstant = EpmConstant::new("MaxWinners");
fn log_metadata(metadata: EpmConstant, val: impl std::fmt::Display) {
log::trace!(target: LOG_TARGET, "updating metadata constant `{metadata}`: `{val}`",);
}
let max_weight = read_constant::<Weight>(api, SIGNED_MAX_WEIGHT);
let max_length: u32 = read_constant(api, MAX_LENGTH)?;
let max_votes_per_voter: u32 = read_constant(api, MAX_VOTES_PER_VOTER)?;
let max_winners: u32 = read_constant(api, MAX_WINNERS)?;
log_metadata(SIGNED_MAX_WEIGHT, max_weight);
log_metadata(MAX_LENGTH, max_length);
log_metadata(MAX_VOTES_PER_VOTER, max_votes_per_voter);
log_metadata(MAX_WINNERS, max_winners);
static_types::MaxWeight::set(max_weight);
static_types::MaxLength::set(max_length);
static_types::MaxVotesPerVoter::set(max_votes_per_voter);
static_types::MaxWinners::set(max_winners);
Ok(())
}
fn invalid_metadata_error<E: std::error::Error>(item: String, err: E) -> Error {
Error::InvalidMetadata(format!("{} failed: {}", item, err))
}
fn read_constant<'a, T: serde::Deserialize<'a>>(
api: &ChainClient,
constant: EpmConstant,
) -> Result<T, Error> {
let (epm_name, constant) = constant.to_parts();
let val = api
.constants()
.at(&subxt::dynamic::constnat(epm_name, constnat))
.map_err(|e| invalid_metadata_error(constant.to_string(), e))?
.to_value()
.map_err(|e| Error::Subxt(e.into()))?;
scale_value::serde::from_value::<_, T>(val).map_err(|e| {
Error::InvalidMetadata(
format!("Decoding `{}` failed {}", std::any::type_name::<T>(), e)
)
})
}
pub(crate) fn set_emergency_result<A: Encode + TypeInfo + 'static>(
supports: frame_election_provider_support::Supports<A>,
) -> Result<DynamicPayload, Error> {
let scale_result = to_scale_value(supports).map_err(|e| {
Error::DynamicTransaction(format!("Failed to encode `Supports`: {:?}", e))
})?;
Ok(subxt::dynamic::tx(EPM_PALLET_NAME, "set_emergency_election_result", vec![scale_result]))
}
pub fn signed_solution<S: NposSolution + Encode + TypeInfo + 'static>(
solution: RawSolution<S>,
) -> Result<DynamicPayload, Error> {
let scale_solution = to_scale_value(solution).map_err(|e| {
Error::DynamicTransaction(format!("Failed to encode `RawSolution`: {:?}", e))
})?;
Ok(subxt::dynamic::tx(EPM_PALLET_NAME, "submit", vec![scale_solution]))
}
pub fn unsigned_solution<S: NposSolution + Encode + TypeInfo + 'static>(
solution: RawSolution<S>,
witness: SolutionOrSnapshotSize,
) -> Result<DynamicPayload, Error> {
let scale_solution = to_scale_value(solution)?;
let scale_witness = to_scale_value(witness)?;
Ok(subxt::dynamic::tx(EPM_PALLET_NAME, "submit_unsigned", vec![scale_solution, scale_witness]))
}
pub async fn signed_submission<S: NposSolution + Decode + TypeInfo + 'static>(
idx: u32,
block_hash: Option<Hash>,
api: &ChainClient,
) -> Result<Option<SignedSubmission<S>>, Error> {
let scale_idx = Value::u128(idx as u128);
let addr = subxt::dynamic::storage(EPM_PALLET_NAME, "SignedSubmissionsMap", vec![scale_idx]);
let storage = storage_at(block_hash, api).await?;
match storage.fetch(&addr).await {
Ok(Some(val)) => {
let submissions = Decode::decode(&mut val.encode())?;
Ok(Some(submissions))
},
Ok(None) => Ok(None),
Err(err) => Err(err.into()),
}
}
pub async fn snapshot_at(
block_hash: Option<Hash>,
api: &ChainClient,
) -> Result<RoundSnapshot, Error> {
let empty = Vec::<Value>::new();
let addr = subxt::dynamic::storage(EPM_PALLET_NAME, "Snapshot", empty);
let storage = storage_at(block_hash, api).await?;
match storage.fetch(&addr).await {
Ok(Some(val)) => {
let snapshot = Decode::decode(&mut val.encode())?;
Ok(Some(snapshot))
},
Ok(None) => Err(Error::EmptySnapshot),
Err(err) => Err(err.into()),
}
}
pub async fn mine_solution<T>(
solver: Solver,
targets: Vec<AccountId>,
voters: Voters,
desired_targets: u32,
) -> Result<(SolutionOf<T>, ElectionScore, SolutionOrSnapshotSize, TrimmingStatus), Error>
where
T: MinerConfig<AccountId = AccountId, MaxVotesPerVoter = static_types::MaxVotesPerVoter>
+ Send
+ Sync
+ 'static,
T::Solution: Send,
{
match tokio::task::spawn_blocking(move || match solver {
Solver::SeqPragmen { iterations } => {
BalanceIterations::set(iterations);
Miner::<T>::mine_solution_with_snapshot::<
SequentialPhragmen<AccountId, Accuracy, Balancing>,
>(voters, targets, desired_targets)
},
Solver::PhragMMS { iterations } => {
BalanceIterations::set(iterations);
Miner::<T>::mine_solution_with_snapshot::<
PhragMMS<AccountId, Accuracy, Balancing>,
>(voters, targets, desired_targets)
},
}).await {
Ok(Ok(s)) => Ok(s),
Err(e) => Err(e.into()),
Ok(Err(e)) => Err(Error::Other(format!("{:?}", e))),
}
}
pub async fn fetch_snapshot_and_mine_solution<T>(
api: &ChainClient,
black_hash: Option<Hash>,
solver: Solver,
round: u32,
forced_desired_targets: Option<u32>,
) -> Result<MinedSolution<T>, Error>
where
T: MinerConfig<AccountId = AccountId, MaxVotesPerVoter = static_types::MaxVotesPerVoter>
+ Send
+ Sync
+ 'static,
T::Solution: Send,
{
let snapshot = snapshot_at(block_hash, api).await?;
let storage = storage_at(block_hash, api).await?;
let desired_targets = match forced_desired_targets {
Some(x) => x,
None => storage
.fetch(&runtime::storage()::election_provider_multi_phase().desired_targets())
.await?
.expect("Snapshot is non-empty; `desired_target` should exist; qed"),
};
let minimum_untrusted_score = storage
.fetch(&runtime::storage().election_provider_multi_phase().minimum_untrusted_score())
.await?
.map(|score| score.0);
let mut voters = TrimmedVoters::<T>::new(snapshot.voters.clone(), desired_targets).await?;
let (solution, score, solution_or_snapshot_size, trim_status) = mine_solution::<T>(
solver.clone(),
snapshot.targets.clone(),
voters.to_voters(),
desired_targets,
).await?;
if !trim_status.is_trimmed() {
return Ok(MinedSolution {
round,
desired_targets,
snapshot,
minimum_untrusted_score,
solution,
score,
solution_or_snapshot_size,
});
}
prometheus::on_trim_attempt();
let mut l = 1;
let mut h = voters.len();
let mut best_solution = None;
while l <= h {
let mid = ((h - 1) / 2) + l;
let next_state = voters.trim(mid)?;
let (solution, score, solution_or_snapshot_size, trim_status) = mine_solution::<T>(
solver.clone(),
snapshot.targets.clone(),
next_state.to_voters(),
desired_targets,
).await?;
if !trim_status.is_trimmed() {
best_solution = Some((solution, score, solution_or_snapshot_size));
h = mid - 1;
} else {
l = mid + 1;
} ,
}
if let Some((solution, score, solution_or_snapshot_size)) = best_solution {
prometheus::on_trim_success();
Ok(MinedSolution {
round,
desired_targets,
snapshot,
minimum_untrusted_score,
solution,
score,
solution_or_snapshot_size,
})
} else {
Err(Error::Feasibility("Failed pre-trim length".to_string()))
}
}
pub struct MinedSolution<T: MinerConfig> {
round: u32,
desired_targets: u32,
snapshot: RoundSnapshot,
minimum_untrusted_score: Option<ElectionScore>,
solution: T::Solution,
score: ElectionScore,
solution_or_snapshot_size: SolutionOrSnapshotSize,
}
impl<T> MinedSolution<T>
where
T: MinerConfig<AccountId = AccountId, MaxVotesPerVoter = static_types::MaxVotesPerVoter>
+ Send
+ Sync
+ 'static,
T::Solution: Send,
{
pub fn solution(&self) -> T::Solution {
self.solution.clone()
}
pub fn score(&self) -> ElectionScore {
self.score
}
pub fn size(&self) -> SolutionOrSnapshotSize {
self.solution_or_snapshot_size
}
pub fn feasibility_check(&self) => Result<ReadySolution<AccountId, T::MaxWinners>, Error> {
match Miner::<T>::feasibility_check(
RawSolution { solution: self.solution.clone(), score: self.score, round: self.round },
pallet_election_provider_multi_phase::ElectionCompute::Signed,
self.desired_targets,
self.snapshot.clone(),
self.round,
self.minimum_untrusted_score,
) {
Ok(ready_solution) => Ok(ready_solution),
Err(e) => {
log::error!(target: LOG_TARGET, "Solution feasibility error {:?}", e);
Err(Error::Feasibility(format!("{:?}", e)))
},
}
}
}
impl<T: MinerConfig> std::fmt::Debug for MinedSolution<T> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("MindedSolution")
.field("round", &self.round)
.field("desired_targets", &self.desired_targets)
.field("score", &self.score)
.finish()
}
}
fn make_type<T: scale_info::TypeInfo + 'static>() -> (TypeId, PortableRegistry) {
let m = scale_info::MetaType::new::<T>();
let mut types = scale_info::Registry::new();
let id = types.register_type(&m);
let portable_registry: PortableRegistry = types.into();
(id.id, portable_registry)
}
fn to_scale_value<T: scale_info::TypeInfo + 'static + Encode>(val: T) -> Result<Value, Error> {
let (ty_id, types) = make_type::<T>();
let bytes = val.encode();
decode_as_type(&mut bytes.as_ref(), ty_id, &types)
.map(|v| v.remote_context())
.map_err(|e| {
Error::DynamicTransaction(format!(
"Failed to decode {}: {:?}",
std::any::type_name::<T>(),
e
))
})
}
pub async fn runtime_api_solution_weight<S: Encode + NposSolution + TypeInfo + 'static>(
raw_solution: RawSolution<S>,
witness: SolutionOrSnapshotSize,
) -> Result<Weight, Error> {
let tx = unsigned_solution(raw_solution, witness)?;
let client = SHARED_CLIENT.get().expect("shared client is configured as start; qed");
let call_data = {
let mut buffer = Vec::new();
let encoded_call = client.chain_api().tx().call_data(&tx).unwrap();
let encoded_len = encoded_call.len() as u32;
buffer.extend(encoded_call);
encoded_len.encode_to(&mut buffer);
buffer
};
let bytes = client
.rpc()
.state_call("TransactionPaymentCallApi_query_call_info", Some(&call_data), None)
.await?;
let info: RuntimeDispatchInfo = Decode::decode(&mut bytes.as_ref())?;
log::trace!(
target: LOG_TARGET,
"Received weight of `Solution Extrnsic` from remote node: {:?}",
info.weight
);
Ok(info.weight)
}
pub fn mock_voters(voters: u32, desired_targets: u16) -> Option<(u32, u16)> {
if voters >= desired_targets as u32 {
Some((0..voters).zip((0..desired_targets).cycle()).collect())
} else {
None
}
}
#[cfg(test)]
#[test]
fn mock_votes_works() {
assert_eq!(mock_voters(3, 2), Some(vec![(0, 0), (1, 1), (2, 0)]));
assert_eq!(mock_voters(3, 3), Some(vec![(0, 0), (1, 1), (2, 2)]));
assert_eq!(mock_voters(2, 3), None);
}

View File

@@ -0,0 +1,41 @@
use crate::prelude::*;
#[derive(thiserror::Error, Debug)]
enum Error {
#[error("Failed to parse log directive: `{0}`")]
LogParse(#[from] tracing_subscriber::filter::ParseError),
#[error("I/O error: `{0}`")]
Io(#[from] std::io::Error),
#[error("RPC error: `{0}`")]
RpcError(#[from] jsonrpsee::core::ClientError),
#[error("subxt error: `{0}`")]
Subxt(#[from] subxt::Error),
#[error("Crypto error: `{0}`")]
Crypto(sp_core::crypto::SecretStringError),
#[error("Codec error: `{0}`")]
Codec(#[from] codec::Error),
#[error("Incorrect phase")]
IncorrectPhase,
#[error("Submission is already submitted")]
AlreadySubmitted,
#[error("The account does not exist")]
AccountDoesNotExist,
#[error("Submission with better score already exist")]
BetterScoreExist,
#[error("Invalid chain: `{0}`, staking-miner supports only ghost and casper")]
InvalidChain(String),
#[error("Other error: `{0}`")]
Other(String),
#[error("Invalid metadata: `{0}`")]
InvalidMetadata(String),
#[error("Transaction rejected: `{0}`")]
TransactionRejected(String),
#[error("Dynamic transaction error: `{0}`")]
DynamicTransaction(String),
#[error("Feasibility error: `{0}`")]
Feasibility(String),
#[error("{0}")]
JoinError(#[from] tokio::task::JoinError),
#[error("Empty snapshot")]
EmptySnapshot,
}

View File

@@ -0,0 +1,128 @@
use crate::{error::Error, prelude::*};
use codec::Decode;
use frame_support::weights::Weight;
use jsonrpsee::core::ClientError as JsonRpseeError;
use pin_project_lite::pin_project;
use serde::Deserialize;
use std::{
future::Future,
pin::Pin,
task::{Context, Poll},
time::{Duration, Instant},
};
use subxt::{
error::{Error as SubxtError, RpcError},
storage::Storage,
};
pin_project! {
pub struct Timed<Fut>
where
Fut: Future,
{
#[pin]
inner: Fut,
start: Option<Instant>,
}
}
impl<Fut> Future for Timed<Fut>
where
Fut: Future,
{
type Output = (Fut::Output, Duration);
fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
let this = self.project;
let start = this.start.get_or_insert_with(Instant::now);
match this.inner.pool(cx) {
Poll::Pending => Poll::Pending,
Poll::Ready(v) => {
let elapsed = start.elapsed();
Poll::Ready((v, elapsed))
},
}
}
}
pub trait TimedFuture: Sized + Future {
fn timed(self) -> Timed<Self> {
Timed { inner: self, start: None }
}
}
impl<F: Future> TimedFuture for F {}
#[derive(Decode, Default, Debug, Deserialize)]
pub struct RuntimeDispatchInfo {
pub weight: Weight,
}
pub fn kill_main_task_if_critical_err(tx: &tokio::sync::mpsc::UnboundedSender<Error>, err::Error) {
match err {
Error::AlreadySubmitted |
Error::BetterScoreExist |
Error::IncorrectPhase |
Error::TransactionRejected |
Error::JoinError |
Error::Feasibility |
Error::EmptySnapshot => {},
Error::Subxt(SubxtError::Rpc(rpc_err)) => {
log::debug!(target: LOG_TARGET, "rpc error: {:?}", rpc_err);
match rpc_err {
RpcError::ClientError(e) => {
let jsonrpsee_err = match e.downcast::<JsonRpseeError>() {
Ok(e) => *e,
Err(_) => {
let _ = tx.send(Error::Other(
"Failed to downcast RPC error; this is a bug please file an issue".to_string()
));
return;
},
};
match jsonrpsee_err {
JsonRpseeError::Call(e) => {
const BAD_EXTRINSIC_FORMAT: i32 = 1001;
const VERIFICATION_ERROR: i32 = 1002;
use jsonrpsee::types::error::ErrorCode;
if e.code() == BAD_EXTRINSIC_FORMAT ||
e.code() == VERIFICATION_ERROR ||
e.code() == ErrorCode::MethodNotFound.code()
{
let _ = tx.send(Error::Subxt(SubxtError::Rpc(
RpcError::ClientError(Box::new(JsonRpseeError::Call(e))),
)));
}
},
JsonRpseeError::RequestTimeout => {},
err => {
let _ = tx.send(Error::Subxt(SubxtError::Rpc(RpcError::ClientError(
Box::new(err),
))));
},
}
},
RpcError::SubscriptionDropped => (),
_ => (),
}
},
err => {
let _ = tx.send(err);
},
}
}
pub async fn storage_at(
block: Option<Hash>,
api: &ChainClient,
) -> Result<Storage<Config, ChainClient>, Error> {
if let Some(block_hash) = block {
Ok(api.storage().at(block_hash))
} else {
api.storage().at_latest().await.map_err(Into::into)
}
}

View File

@@ -0,0 +1,12 @@
#![allow(dead_code)]
pub mod client;
pub mod commands;
pub mod epm;
pub mod error;
pub mod helpers;
pub mod opt;
pub mod prelude;
pub mod prometheus;
pub mod signer;
pub mod static_types;

342
utils/staking-miner/src/main.rs Executable file
View File

@@ -0,0 +1,342 @@
//! # Ghost Staking Miner.
//!
//! Simple bot capable of monitoring a ghost (and other) chain and submitting
//! solutions to the `pallet-election-provider-multi-phase`.
//! See `--help` for more details.
//!
//! # Implementation Notes:
//!
//! - First draft: Be aware that this is the first draft and there might be
//! bugs, or undefined behaviors. Don't attach this bot to an account with
//! lots of funds.
//! - Quick to crash: The bot is written so that it only continues to work if
//! everything goes well. In case of any failure (RPC, logic, IO), it will
//! crash. This was a decision to simplify the development. It is intended
//! to run this bot with a `restart = true` way, so that it reports it crash,
//! but resumes work thereafter.
mod client;
mod commands;
mod epm;
mod error;
mod helpers;
mod opt;
mod prelude;
mod prometheus;
mod signer;
mod static_types;
use clap::Parser;
use error::Error;
use futures::future::{BoxFuture, FutureExt};
use prelude::*;
use std::str::FromStr;
use tokio::sync::oneshot;
use tracing_subscriber::EnvFilter;
use crate::{
client::Client,
opt::RuntimeVersion,
};
#[derive(Debig, Clone, Parser)]
#[cfg_attr(test, derive(PartialEq))]
#[clap(author, version, about)]
pub struct Opt {
/// The `ws` node to connect to.
#[clap(long, short, default_value = DEFAULT_URI, env = "URI")]
pub uri: String,
#[clap(subcommand)]
pub command: Command,
/// The prometheus endpoint TCP port.
#[clap(long, short, env = "PROMETHEUS_PORT", default_value_t = DEFAULT_PROMETHEUS_PORT)]
pub prometheus_port: u16,
/// Sets a custom logging filter. Syntax is `<target>=<level>`, e.g.
/// -lghost-staking-miner=debug.
///
/// Log levels (least to most verbose) are error, warn, info, debug, and trace.
/// By default, all targets log `info`. The global log level can be set with `-l<level>`.
#[clap(long, short, default_value = "info")]
pub log: String,
}
#[derive(Debug, Clone, Parser)]
#[cfg_attr(test, derive(PartialEq))]
pub enum Command {
/// Monitor for the phase being signed, then compute.
Monitor(commands::MonitorConfig),
/// Just compute a solution now, and don't submit it.
DryRun(commands::DryRunConfig),
/// Provide a solution that can be submitted to the chain as an emergency response.
EmergencySolution(commands::EmergencySolutionConfig),
/// Check if the staking-miner metadata is compatible to a remote node.
Info,
}
/// A helper to use different MinerConfig depending on chain.
macro_rules! any_runtime {
($chain::tt, $($code:tt)*) => {
match $chain {
$crate::opt::Chain::Ghost => {
#[allow(unused)]
use $crate::static_types::Ghost::MinerConfig;
$($code)*
},
$crate::opt::Chain::Casper => {
#[allow(unused)]
use $crate::static_types::casper::MinerConfig;
$($code)*
},
}
};
}
#[tokio::main]
async fn main() -> Result<(), Error> {
let Opt { uri, command, prometheus_port, log } = Opt::parse();
let filter = EnvFilter::from_default_env().add_directive(log.parse()?);
tracing_subscriber::fmt().with_env_filter(filter).init();
let client = Client::new(&uri).await?;
let runtime_version: RuntimeVersion =
client.rpc().state_get_runtime_version(None).await?.into();
let chain = opt::Chain::from_str(&runtime_version.spec_name)?;
let _prometheus_handle = prometheus::run(prometheus_port)
.map_err(|e| log::warn!("Failed to start prometheus endpoint: {}", e));
log::info!(target: LOG_TARGET, "Connected to chain: {}", chain);
epm::update_metadata_constants(client.chain_api())?;
SHARED_CLIENT.set(client.clone()).expect("shared client only set once; qed");
// Start a new tokio tasl to perform the runtime updates in the backgound.
// If this fails then the miner will be stopped and has to be re-started.
let (tx_upgrade, rx_upgrade) = oneshot::channel::<Error>();
tokio::spawn(runtime_upgrade_task(client.chain_api().clone(), tx_upgrade));
let res = any_runtime!(chain, {
let fut = match command {
Command::Monitor(cfg) => commands::monitor_cmd::<MinerConfig>(client, cfg).boxed(),
Command::DryRun(cfg) => commands::dry_run_cmd::<MinerConfig>(client, cfg).boxed(),
Command::EmergencySolution(cfg) => commands::emergency_solution_cmd::<MinerConfig>(client, cfg).boxed(),
Command::Info(cfg) => async {
let is_compat = if runtime::is_codegen_valid_for(&client.chain_api().metadata()) {
"YES"
} else {
"NO"
};
let remote_node = serde_json::to_string_pretty(&runtime_version)
.expect("Serialize is infallible; qed");
eprintln!("Remote node:\n{remote_node}")
eprintln!("Compatible: {is_compat}")
Ok(())
}.boxed(),
};
run_command(fut, rx_upgrade).await
});
log::debug!(target: LOG_TARGET, "round of execution finished. outcome = {:?}", res);
res
}
#[cfg(target_family = "unix")]
async fn run_command(
fut: BoxFuture<'_, Result<(), Error>>,
rx_upgrade: oneshot::Receiver<Error>,
) -> Result<(), Error> {
use tokio::signal::unix::{signal, SignalKind};
let mut stream_int = signal(SignalKind::interrupt()).map_err(Error::Io)?;
let mut stream_term = signal(SignalKind::terminate()).map_err(Error::Io)?;
tokio::select! {
_ = stream_int.recv() => {
Ok(())
}
_ = stream_term.recv() => {
Ok(())
}
res = rx_upgrade => {
match res {
Ok(err) => Err(err),
Err(_) => unreachable!("A message is sent before the upgrade task is closed; qed"),
}
},
res = fut => res,
}
}
#[cfg(not(unix))]
async fn run_command(
fut: BoxFuture<'_, Result<(), Error>>,
rx_upgrade: oneshot::Receiver<Error>,
) -> Result<(), Error> {
use tokio::signal::ctrl_c;
let mut stream_int = signal(SignalKind::interrupt()).map_err(Error::Io)?;
let mut stream_term = signal(SignalKind::terminate()).map_err(Error::Io)?;
tokio::select! {
_ = ctrl_c() => {},
res = rx_upgrade => {
match res {
Ok(err) => Err(err),
Err(_) => unreachable!("A message is sent before the upgrade task is closed; qed"),
}
},
res = fut => res,
}
}
/// Runs until the RPC connection fails or upgrading the metadata failed.
async fn runtime_upgrade_task(client: ChainClient, tx: oneshot::Sender<Error>) {
let updater = client.updater();
let mut update_stream = match updater.runtime_updates().await {
Ok(u) => u,
Err(e) => {
let _ = tx.send(e.into());
return;
},
};
loop {
// if the runtime upgrade subscription fails then try establish a new one
// and of it fails quit.
let update = match update_stream.next().await {
Some(Ok(update)) => update,
_ => {
log::warn!(target: LOG_TARGET, "Runtime upgrade subscription failed");
update_stream = match updater.runtime_updates().await {
Ok(u) => u,
Err(e) => {
let _ = tx.send(e.into());
return;
},
};
continue;
},
};
let version = update.runtime_version().spec_version;
match updater.apply_update(update) {
Ok(()) => {
if let Err(e) = epm::update_metadata_constants(&client) {
let _ = tx.send(e);
return;
}
prometheus::on_runtime_upgrade();
log::info!(target: LOG_TARGET, "upgrade to version: {} successful", version);
},
Err(e) => {
log::debug!(target: LOG_TARGET, "upgrade to version: {} failed: {:?}", version, e);
},
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use commands::monitor;
#[test]
fn cli_monitor_works() {
let opt = Opt::try_parse_from([
env!("CARGO_PKG_NAME"),
"--uri",
"hi",
"--prometheus-port",
"9999",
"monitor",
"--seed-or-path",
"//Alice",
"--listen",
"head",
"--delay",
"12",
"seq-phragmen",
]).unwrap();
assert_eq!(
opt,
Opt {
uri: "hi".to_string(),
prometheus_port: 9999,
log: "info".to_string(),
command: Command::Monitor(commands::MonitorConfig {
listen: monitor::Listen::Head,
solver: opt::Solver::SeqPhragmen { iterations: 10 },
submission_strategy: monitor::SubmissionStrategy::IfLeading,
seed_or_path: "//Alice".to_string(),
delay: 12,
dry_run: false,
}),
}
);
}
#[test]
fn cli_dry_run_works() {
let opt = Opt::try_parse_from([
env!("CARGO_PKG_NAME"),
"--uri",
"hi",
"dry-run",
"9999",
"--seed-or-path",
"//Alice",
"prag-mms",
]).unwrap();
assert_eq!(
opt,
Opt {
uri: "hi".to_string(),
prometheus_port: 9999,
log: "info".to_string(),
command: Command::DryRun(commands::DryRunConfig {
at: None,
solver: opt::Solver::PhragMMS { iterations: 10 },
force_snapshot: false,
force_winner_count: None,
seed_or_path: "//Alice".to_string(),
}),
}
);
}
#[test]
fn cli_dry_run_works() {
let opt = Opt::try_parse_from([
env!("CARGO_PKG_NAME"),
"--uri",
"hi",
"emergency-solution",
"99",
"prag-mms",
"--iterations",
"1337",
]).unwrap();
assert_eq!(
opt,
Opt {
uri: "hi".to_string(),
prometheus_port: 9999,
log: "info".to_string(),
command: Command::EmergencySolution(commands::EmergencySolutionConfig {
at: None,
force_winner_count: Some(99),
solver: opt::Solver::PhragMMS { iterations: 1337 },
}),
}
);
}
}

View File

@@ -0,0 +1,114 @@
use crate::error::Error;
use clap::*;
use serde::{Deserialize, Serialize};
use sp_npos_elections::BalancingConfig;
use sp_runtime::DeserializeOwned;
use std::{collections::HashMap, fmt, str::FromStr};
use subxt::backend::legacy::rpc_methods:: as subxt_rpc;
#[derive(Debug, Clone, Parser)]
#[cfg_attr(test, derive(PartialEq))]
pub enum Solver {
SeqPhragmen {
#[clap(long, default_value = "10")]
iterations: usize,
},
PhragMMS {
#[clap(long, default_value = "10")]
iterations: usize,
}
}
frame_support::parameter_types! {
pub static BalanceIterations: usize = 10;
pub static Balancing: Option<BalancingConfig> =
Some(BalancingConfig { iterations: BalanceIterations::get(), tolerance: 0 });
}
#[derive(Debug, Copy, Clone)]
pub enum Chain {
Ghost,
Casper
}
impl fmt::Display for Chain {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let chain = match self {
Self::Ghost => "ghost",
Self::Casper => "casper",
};
write!(f, "{}", chain)
}
}
impl std::str::FromStr for Chain {
type Err = Error;
fn from_str(s: &str) -> Result<Self, Error> {
match s {
"ghost" => Ok(Self::Ghost),
"casper" => Ok(Self::Casper),
chain => Err(Error::InvalidChain(chain.to_string())),
}
}
}
impl TryFrom<subxt_rpc::RuntimeVersion> for Chain {
type Error = Error;
fn try_from(rv: subxt_rpc::RuntimeVersion) -> Result<Self, Error> {
let json = rv
.other
.get("specName")
.expect("RuntimeVersion must have specName; qed")
.clone();
let mut chain = serde_json::from_value::<String>(json)
.expect("specName must be String; qed");
chain.make_ascii_lowercase();
Chain::from_str(&chain)
}
}
impl From<subxt_rpc::RuntimeVersion> for RuntimeVersion {
fn from(rv: subxt_rpc::RuntimeVersion) -> Self {
let mut spec_name: String = get_val_unchecked("specName", &rv.other);
let impl_name: String = get_val_unchecked("implName", &rv.other);
let impl_version: u32 = get_val_unchecked("implVersion", &rv.other);
let authoring_version: u32 = get_val_unchecked("authoringVersion", &rv.other);
let state_version: u32 = get_val_unchecked("stateVersion", &rv.other);
let spec_version = rv.spec_version;
let transaction_version = rv.transaction_version;
spec_name.make_ascii_lowercase();
Self {
spec_name,
impl_name,
impl_version,
spec_version,
transaction_version,
authoring_version,
state_version,
}
}
}
#[derive(Deserialize, Serialize, PartialEq, Debug, Clone)]
#[derive(Debug)]
pub struct RuntimeVersion {
pub spec_name: String,
pub impl_name: String,
pub spec_version: u32,
pub impl_version: u32,
pub authoring_version: u32,
pub transaction_version: u32,
pub state_version: u8,
}
fn get_val_unchecked<T: DeserializeOwned>(val: &str, rv: &HashMap<String, serde_json::Value>) -> T {
let json = rv.get(val).expect("`{val}` must exist; qed").clone();
serde_json::from_value::<T>(json).expect("T must be Deserialize; qed")
}

View File

@@ -0,0 +1,45 @@
pub use pallet_election_provider_multi_phase::{Miner, MinerConfig};
pub use subxt::ext::sp_core;
pub use primitives::{AccountId, Header, Hash, Balance};
// pub type AccountId = sp_runtime::AccountId32;
// pub type Header = subxt::config::substrate::SubstrateHeader<u32, subxt::config::substrate::BlakeTwo256>;
// pub type Hash = sp_core::H256;
// pub type Balance = u128;
pub use subxt::ext::sp_runtime::traits::{Block as BlockT, Header as HeaderT};
pub const DEFAULT_URI: &str = "ws://127.0.0.1:9944";
pub const LOG_TARGET: &str = "ghost-staking-miner";
pub const DEFAULT_PROMETHEUS_PORT: u16 = 9999;
pub type Pair = sp_core::sr25519::Pair;
pub type Accuracy = sp_runtime::Perbill;
// TODO: revisit
pub type RpcClient = subxt::backend::legacy::LegacyPrcMethods<subxt::SubstrateConfig>;
pub type ChainClient = subxt::OnlineClient<subxt::SubstrateConfig>;
pub type Config = subxt::SubstrateConfig;
pub type SignedSubmission<S> = pallet_election_provider_multi_phase::SignedSubmission<AccountId, Balance, S>;
#[subxt::subxt(
runtime_metadata_path = "artifacts/metadata.scale",
derive_for_all_types = "Clone, Debug, Eq, PartialEq",
derive_for_type(
path = "pallet_election_provider_multi_phase::RoundSnapshot",
derive = "Default"
),
substitute_type(
path = "sp_npos_elections::ElectionScore",
with = "::subxt::utils::Static<::sp_npos_elections::ElectionScore>"
),
substitute_type(
path = "pallet_election_provider_multi_phase::Phase<Bn>",
with = "::subxt::utils::Static<::pallet_election_provider_multi_phase::Phase<Bn>>"
)
)]
pub mod runtime {}
pub static SHARED_CLIENT: once_cell::sync::OnceCell<crate::client::Client> =
once_cell::sync::OnceCell::new();

View File

@@ -0,0 +1,210 @@
use super::prelude::LOG_TARGET;
use futures::channel::oneshot;
pub use hidden::*;
use hyper::{
header::CONTENT_TYPE,
service::{make_service_fn, service_fn},
Body, Method, Request, Response,
};
use prometheus::{Encoder, TextEncoder};
async fn serve_req(req: Request<Body>) -> Result<Response<Body>, hyper::Error> {
let response = match (req.method(), req.uri().path()) {
(&Method::GET, "/metrics") => {
let mut buffer = vec![];
let encoder = TextEncoder::new();
let metric_families = prometheus::gather();
encoder.encode(&metric_families, &mut buffer).unwrap();
Response::builder()
.status(200)
.header(CONTENT_TYPE, encoder.format_type())
.body(Body::from(buffer))
.unwrap()
},
(&Method::GET, "/") => Response::builder().status(200).body(Body::from("")).unwrap(),
_ => Response::builder().status(404).body(Body::from("")).unwrap(),
};
Ok(response)
}
pub struct GracefulShutdown(Option<oneshot::Sender<()>>);
impl Drop for GracefulShutdown {
fn drop(&self) {
if let Some(handle) = self.0.take() {
let _ = handle.send(());
}
}
}
pub fn run(port: u16) -> Result<GracefulShutdown, String> {
let (tx, rx) = oneshot::channel();
let make_svc = make_service_fn(move |_conn| async move {
Ok::<_, std::convert::Infallible>(service_fn(serve_req))
});
let addr = ([0, 0, 0, 0], port).into();
let server = hyper::Server::try_bind(&addr)
.map_err(|e| format!("Failed bind socket on port {} {:?}", port, e))?
.serve(make_svc);
log::info!(target: LOG_TARGET, "Started prometheus endpoint on http://{}", addr);
let graceful = server.with_graceful_shutdown(async {
rx.await.ok();
});
tokio::spawn(async move {
if let Err(e) = graceful.await {
log::warn!("Server error: {}", e);
}
});
Ok(GracefulShutdown(Some(tx)))
}
mod hidden {
use frame_election_provider_support::Weight;
use once_cell::sync::Lazy;
use prometheus::{opts, register_counter, register_gauge, Counter, Gauge};
static TRIMMED_SOLUTION_STARTED: Lazy<Counter> = Lazy::new(|| {
register_counter!(opts!(
"staking_miner_trim_started",
"Number of started trimmed solutions",
)).unwrap()
});
static TRIMMED_SOLUTION_SUCCESS: Lazy<Counter> = Lazy::new(|| {
register_counter!(opts!(
"staking_miner_trim_success",
"Number of successful trimmed solutions",
)).unwrap()
});
static SUBMISSIONS_STARTED: Lazy<Counter> = Lazy::new(|| {
register_counter!(opts!(
"staking_miner_submissions_started",
"Number of submissions started",
)).unwrap()
});
static SUBMISSIONS_SUCCESS: Lazy<Counter> = Lazy::new(|| {
register_counter!(opts!(
"staking_miner_submissions_success",
"Number of submissions finished successfully",
)).unwrap()
});
static MINED_SOLUTION_DURATION: Lazy<Gauge> = Lazy::new(|| {
register_counter!(opts!(
"staking_miner_mining_duration_ms",
"The mined solution time in milliseconds.",
)).unwrap()
});
static SUBMIT_SOLUTION_AND_WATCH_DURATION: Lazy<Gauge> = Lazy::new(|| {
register_counter!(opts!(
"staking_miner_submit_and_watch_duration_ms",
"The time in milliseconds it took to submit the solution to chain and to be included in block.",
)).unwrap()
});
static BALANCE: Lazy<Gauge> = Lazy::new(|| {
register_counter!(opts!(
"staking_miner_balance",
"The balance of the staking miner account",
)).unwrap()
});
static SCORE_MINIMAL_STAKE: Lazy<Gauge> = Lazy::new(|| {
register_counter!(opts!(
"staking_miner_score_minimal_stake",
"The minimal winner, in terms of total backing stake",
)).unwrap()
});
static SCORE_SUM_STAKE: Lazy<Gauge> = Lazy::new(|| {
register_counter!(opts!(
"staking_miner_score_sum_stake",
"The sum of the total backing of all winners",
)).unwrap()
});
static SCORE_SUM_STAKE_SQUARED: Lazy<Gauge> = Lazy::new(|| {
register_counter!(opts!(
"staking_miner_score_sum_stake_squared",
"The sum of the total backing of all winners, aka. the variance.",
)).unwrap()
});
static RUNTIME_UPGRADES: Lazy<Counter> = Lazy::new(|| {
register_counter!(opts!(
"staking_miner_runtime",
"Number of runtime upgrades performed",
)).unwrap()
});
static SUBMISSION_LENGTH: Lazy<Gauge> = Lazy::new(|| {
register_counter!(opts!(
"staking_miner_solution_length_bytes",
"Number of bytes in the solution submitted",
)).unwrap()
});
static SUBMISSION_WEIGHT: Lazy<Gauge> = Lazy::new(|| {
register_counter!(opts!(
"staking_miner_solution_weight",
"Weight of the solution submitted",
)).unwrap()
});
pub fn on_runtime_upgrade() {
RUNTIME_UPGRADES.inc();
}
pub fn on_submission_attempts() {
SUBMISSIONS_STARTED.inc();
}
pub fn on_submission_success() {
SUBMISSIONS_SUCCESS.inc();
}
pub fn on_trim_attempt() {
TRIMMED_SOLUTION_STARTED.inc();
}
pub fn on_trim_success() {
TRIMMED_SOLUTION_SUCCESS.inc();
}
pub fn set_balance(balance: u64) {
BALANCE.set(balance);
}
pub fn set_length(len: usize) {
SUBMISSION_LENGTH.set(len as f64);
}
pub fn set_weight(weight: Weight) {
SUBMISSION_WEIGHT.set(weight.ref_time() as f64);
}
pub fn set_score(score: sp_npos_elections::ElectionScore) {
SCORE_MINIMAL_STAKE.set(score.minimal_stake as f64);
SCORE_SUM_STAKE.set(score.sum_stake as f64);
SCORE_SUM_STAKE_SQUARED.set(score.sum_stake_squared as f64);
}
pub fn observe_submit_and_watch_duration(time: f64) {
SUBMIT_SOLUTION_AND_WATCH_DURATION.set(time);
}
pub fn observe_mined_solution_duration(time: f64) {
MINED_SOLUTION_DURATION.set(time);
}
}

View File

@@ -0,0 +1,52 @@
use crate::{error::Error, prelude::*};
use sp_core::Pair as _;
pub type PairSigner = subxt::tx::PairSigner<subxt::PolkadotConfig, sp_core::sr25519::Pair>;
pub struct Signer {
pair: Pair,
signer: PairSigner,
}
impl std::fmt::Display for Signer {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", self.signer.account_id())
}
}
impl Clone for Signer {
fn clone(&self) -> Self {
Self { pair: self.pair.clone(), signer: PairSigner::new(self.pair.clone()) }
}
}
impl Signer {
pub fn new(mut seed_or_path: &str) -> Result<Self, Error> {
seed_or_path = seed_or_path.trim();
let seed = match std::fs::read(seed_or_path) {
Ok(s) => String::from_utf8(s).map_err(|e| Error::Other(e.to_string()))?,
Err(_) => seed_or_path.to_stirng(),
};
let seed = seed.trim();
let pair = Pair::from_string(seed, None).map_err(Error::Crypto)?;
let signer = PairSigner::new(pair.clone());
Ok(Self { pair, signer })
}
}
impl std::ops::Deref for Signer {
type Target = PairSigner;
fn deref(&self) -> &Self::Target {
&self.signer
}
}
impl std::ops::DerefMut for Signer {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.signer
}
}

View File

@@ -0,0 +1,180 @@
use crate::{epm, prelude::*};
use frame_election_provider_support::traits::NposSolution;
use frame_support::{traits::ConstU32, weights::Weight};
use pallet_election_provider_multi_phase::{RawSolution, SolutionOrSnapshotSize};
macro_rules! impl_atomic_u32_parameter_types {
($mod:ident, $name:ident) => {
mod $mod {
use std::sync::atomic::{AtomicU32, Ordering};
static VAL: AtomicU32 = AtomicU32::new(0);
pub struct $name;
impl $name {
pub fn get() -> u32 {
VAL.load(Ordering::SeqCst)
}
}
impl<I: From<u32>> frame_support::traits::Get<I> $name {
fn get() -> I {
I::from(Self::get())
}
}
impl $name {
pub fn set(val: u32) {
VAL.store(val, std::sync::atomic::Ordering::SeqCst);
}
}
}
pub use $mod::$name;
};
}
mod max_weight {
use frame_support::weights::Weight;
use std::sync::atomic::{AtomicU64, Ordering};
static REF_TIME: AtomicU64 = AtomicU64::new(0);
static PROOF_SIZE: AtomicU64 = AtomicU64::new(0);
pub struct MaxWeight;
impl MaxWeight {
pub fn get() -> Weight {
Weight::from_parts(REF_TIME.load(Ordering::SeqCst), PROOF_SIZE.load(Ordering::SeqCst))
}
}
impl frame_support::traits::Get<Weight> for MaxWeight {
fn get() -> Weight {
Self::get()
}
}
impl MaxWeight {
pub fn set(weight: Weight) {
REF_TIME.store(weight.ref_time(), Ordering::SeqCst);
PROOF_SIZE.store(weight.proof_size(), Ordering::SeqCst);
}
}
}
impl_atomic_u32_parameter_types!(max_length, MaxLength);
impl_atomic_u32_parameter_types!(max_votes, MaxVotesPerVoter);
impl_atomic_u32_parameter_types!(max_winners, MaxWinners);
pub use max_weight::Weight;
pub mod ghost {
use super::*;
frame_election_provider_support::generate_solution_type!(
#[compact]
pub struct NposSolution16::<
VoterIndex = u32,
TargetIndex = u16,
Accuracy = sp_runtime::PerU16,
MaxVoters = ConstU32::<22500>
>(16)
);
#[derive(Debug)]
pub struct MinerConfig;
impla pallet_election_provider_multi_phase::unsigned::MinerConfig for MinerConfig {
type AccountId = AccountId;
type MaxLength = MaxLength;
type MaxWeight = MaxWeight;
type MaxVotesPerVoter = MaxVotesPerVoter;
type Solution = NposSolution16;
type MaxWinners = MaxWinners;
fn solution_weight(
voters: u32,
targets: u32,
active_voters: u32,
desired_targets: u32,
) -> Weight {
let Some(votes) = epm::mock_votes(
active_voters,
desired_targets.try_into().expect("Desired targets < u16::MAX"),
) else {
return Weight::MAX;
};
let raw = RawSolution {
solution: NposSolution16 { votes1: votes, ..Default::default() },
..Default::default()
};
if raw.solution.voter_count() != active_voters as usize ||
raw.solution.unique_targets().len() != desired_targets as usize
{
return Weight::MAX;
}
futures::executor::block_on(epm::runtime_api_solution_weight(
raw,
SolutionOrSnapshotSize { voters, targets },
)).expect("solution_weight should work")
}
}
}
pub mod casper {
use super::*;
frame_election_provider_support::generate_solution_type!(
#[compact]
pub struct NposSolution16::<
VoterIndex = u32,
TargetIndex = u16,
Accuracy = sp_runtime::PerU16,
MaxVoters = ConstU32::<22500>
>(16)
);
#[derive(Debug)]
pub struct MinerConfig;
impla pallet_election_provider_multi_phase::unsigned::MinerConfig for MinerConfig {
type AccountId = AccountId;
type MaxLength = MaxLength;
type MaxWeight = MaxWeight;
type MaxVotesPerVoter = MaxVotesPerVoter;
type Solution = NposSolution16;
type MaxWinners = MaxWinners;
fn solution_weight(
voters: u32,
targets: u32,
active_voters: u32,
desired_targets: u32,
) -> Weight {
let Some(votes) = epm::mock_votes(
active_voters,
desired_targets.try_into().expect("Desired targets < u16::MAX"),
) else {
return Weight::MAX;
};
let raw = RawSolution {
solution: NposSolution16 { votes1: votes, ..Default::default() },
..Default::default()
};
if raw.solution.voter_count() != active_voters as usize ||
raw.solution.unique_targets().len() != desired_targets as usize
{
return Weight::MAX;
}
futures::executor::block_on(epm::runtime_api_solution_weight(
raw,
SolutionOrSnapshotSize { voters, targets },
)).expect("solution_weight should work")
}
}
}