Skip to content

Commit

Permalink
feat: vote keyed leader schedule
Browse files Browse the repository at this point in the history
  • Loading branch information
jstarry committed Feb 5, 2025
1 parent 6681859 commit 77923aa
Show file tree
Hide file tree
Showing 3 changed files with 238 additions and 38 deletions.
189 changes: 172 additions & 17 deletions ledger/src/leader_schedule.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,8 @@ use {
rand::distributions::{Distribution, WeightedIndex},
rand_chacha::{rand_core::SeedableRng, ChaChaRng},
solana_pubkey::Pubkey,
solana_sdk::clock::Epoch,
solana_sdk::clock::{Epoch, Slot},
solana_vote::vote_account::VoteAccountsHashMap,
std::{collections::HashMap, convert::identity, ops::Index, sync::Arc},
};

Expand All @@ -16,14 +17,120 @@ pub struct FixedSchedule {
}

/// Stake-weighted leader schedule for one epoch.
#[derive(Debug, Default, PartialEq, Eq, Clone)]
pub struct LeaderSchedule {
#[derive(Debug, PartialEq, Eq, Clone)]
pub struct LeaderSchedule(LeaderScheduleVariants);

#[cfg(feature = "dev-context-only-utils")]
impl Default for LeaderSchedule {
fn default() -> Self {
Self(
LeaderScheduleVariants::ValidatorIdentityKeyedLeaderSchedule(
ValidatorIdentityKeyedLeaderSchedule {
slot_leaders: vec![],
index: HashMap::new(),
},
),
)
}
}

#[derive(Debug, PartialEq, Eq, Clone)]
struct VoteAccountKeyedLeaderSchedule {
slot_leader_vote_account_addresses: Vec<Pubkey>,
// cached leader schedule of the validator identity keys created by mapping
// vote account keys to the validator identity designated at the time of
// leader schedule generation. This is used to avoid the need to look up
// the validator identity key for each slot.
validator_identity_keyed_leader_schedule: ValidatorIdentityKeyedLeaderSchedule,
}

#[derive(Debug, PartialEq, Eq, Clone)]
struct ValidatorIdentityKeyedLeaderSchedule {
slot_leaders: Vec<Pubkey>,
// Inverted index from pubkeys to indices where they are the leader.
index: HashMap<Pubkey, Arc<Vec<usize>>>,
}

#[derive(Debug, PartialEq, Eq, Clone)]
enum LeaderScheduleVariants {
// Latest leader schedule algorithm which designates a specific vote account
// to each slot so that the runtime can load vote state (e.g. commission and
// fee collector accounts) for a given slot
VoteAccountKeyedLeaderSchedule(VoteAccountKeyedLeaderSchedule),
// Old leader schedule algorithm which designates a specific validator
// identity to each slot. Since multiple vote accounts can be associated
// with a single validator identity, it's not possible to use this to load
// vote state for a given slot.
ValidatorIdentityKeyedLeaderSchedule(ValidatorIdentityKeyedLeaderSchedule),
}

impl LeaderSchedule {
// Note: passing in zero vote accounts will cause a panic.
pub fn new_keyed_by_vote_account(
vote_accounts_map: &VoteAccountsHashMap,
epoch: Epoch,
len: u64,
repeat: u64,
) -> Self {
let stakes: Vec<_> = vote_accounts_map
.iter()
.map(|(vote_pubkey, (stake, _account))| (vote_pubkey, *stake))
.collect();
let slot_leader_vote_account_addresses =
Self::stake_weighted_slot_leaders(stakes, epoch, len, repeat);

let validator_identity_keyed_leader_schedule = {
struct SlotLeaderAddresses<'a> {
vote_account_address: &'a Pubkey,
validator_identity_address: &'a Pubkey,
}

let mut slot_leader_addresses = {
let vote_account_address = &slot_leader_vote_account_addresses[0];
let validator_identity_address = vote_accounts_map
.get(vote_account_address)
.unwrap()
.1
.node_pubkey();
SlotLeaderAddresses {
vote_account_address,
validator_identity_address,
}
};

let slot_leaders: Vec<Pubkey> = slot_leader_vote_account_addresses
.iter()
.map(|vote_account_address| {
if vote_account_address != slot_leader_addresses.vote_account_address {
let validator_identity_address = vote_accounts_map
.get(vote_account_address)
.unwrap()
.1
.node_pubkey();
slot_leader_addresses = SlotLeaderAddresses {
vote_account_address,
validator_identity_address,
};
}
*slot_leader_addresses.validator_identity_address
})
.collect();

let index = Self::index_from_slot_leaders(&slot_leaders);
ValidatorIdentityKeyedLeaderSchedule {
slot_leaders,
index,
}
};

Self(LeaderScheduleVariants::VoteAccountKeyedLeaderSchedule(
VoteAccountKeyedLeaderSchedule {
slot_leader_vote_account_addresses,
validator_identity_keyed_leader_schedule,
},
))
}

// Note: passing in zero stakers will cause a panic.
pub fn new_keyed_by_validator_identity(
stakes: &HashMap<Pubkey, u64>,
Expand All @@ -41,10 +148,14 @@ impl LeaderSchedule {

#[cfg_attr(feature = "dev-context-only-utils", qualifiers(pub))]
fn new_from_schedule(slot_leaders: Vec<Pubkey>) -> Self {
Self {
index: Self::index_from_slot_leaders(&slot_leaders),
slot_leaders,
}
Self(
LeaderScheduleVariants::ValidatorIdentityKeyedLeaderSchedule(
ValidatorIdentityKeyedLeaderSchedule {
index: Self::index_from_slot_leaders(&slot_leaders),
slot_leaders,
},
),
)
}

// Note: passing in zero stakers will cause a panic.
Expand Down Expand Up @@ -82,12 +193,54 @@ impl LeaderSchedule {
.collect()
}

pub fn is_keyed_by_vote_account(&self) -> bool {
matches!(
self.0,
LeaderScheduleVariants::VoteAccountKeyedLeaderSchedule(_)
)
}

/// Get the vote account address for the given epoch slot index. This is
/// guaranteed to be Some if the leader schedule is keyed by vote account
/// and the slot index is within the range of the leader schedule.
pub fn get_vote_account_address_for_slot_index(
&self,
epoch_slot_index: usize,
) -> Option<&Pubkey> {
match &self.0 {
LeaderScheduleVariants::VoteAccountKeyedLeaderSchedule(schedule) => schedule
.slot_leader_vote_account_addresses
.get(epoch_slot_index),
LeaderScheduleVariants::ValidatorIdentityKeyedLeaderSchedule(_) => None,
}
}

pub fn get_slot_leaders(&self) -> &[Pubkey] {
&self.slot_leaders
match self.0 {
LeaderScheduleVariants::VoteAccountKeyedLeaderSchedule(ref schedule) => {
&schedule
.validator_identity_keyed_leader_schedule
.slot_leaders
}
LeaderScheduleVariants::ValidatorIdentityKeyedLeaderSchedule(ref schedule) => {
&schedule.slot_leaders
}
}
}

pub fn num_slots(&self) -> usize {
self.slot_leaders.len()
self.get_slot_leaders().len()
}

fn index(&self) -> &HashMap<Pubkey, Arc<Vec<usize>>> {
match &self.0 {
LeaderScheduleVariants::VoteAccountKeyedLeaderSchedule(schedule) => {
&schedule.validator_identity_keyed_leader_schedule.index
}
LeaderScheduleVariants::ValidatorIdentityKeyedLeaderSchedule(schedule) => {
&schedule.index
}
}
}

/// 'offset' is an index into the leader schedule. The function returns an
Expand All @@ -97,8 +250,8 @@ impl LeaderSchedule {
pubkey: &Pubkey,
offset: usize, // Starting index.
) -> impl Iterator<Item = usize> {
let index = self.index.get(pubkey).cloned().unwrap_or_default();
let num_slots = self.slot_leaders.len();
let index = self.index().get(pubkey).cloned().unwrap_or_default();
let num_slots = self.num_slots();
let size = index.len();
#[allow(clippy::reversed_empty_ranges)]
let range = if index.is_empty() {
Expand All @@ -122,7 +275,7 @@ impl Index<u64> for LeaderSchedule {
type Output = Pubkey;
fn index(&self, index: u64) -> &Pubkey {
let index = index as usize;
&self.slot_leaders[index % self.slot_leaders.len()]
&self.get_slot_leaders()[index % self.num_slots()]
}
}

Expand Down Expand Up @@ -188,7 +341,7 @@ mod tests {
LeaderSchedule::new_keyed_by_validator_identity(&stakes, epoch, len, repeat);
assert_eq!(leader_schedule.num_slots() as u64, len);
let mut leader_node = Pubkey::default();
for (i, node) in leader_schedule.slot_leaders.iter().enumerate() {
for (i, node) in leader_schedule.get_slot_leaders().iter().enumerate() {
if i % repeat as usize == 0 {
leader_node = *node;
} else {
Expand All @@ -206,12 +359,14 @@ mod tests {
let epoch = 0;
let len = 8;
// What the schedule looks like without any repeats
let leaders1 =
LeaderSchedule::new_keyed_by_validator_identity(&stakes, epoch, len, 1).slot_leaders;
let leaders1 = LeaderSchedule::new_keyed_by_validator_identity(&stakes, epoch, len, 1)
.get_slot_leaders()
.to_vec();

// What the schedule looks like with repeats
let leaders2 =
LeaderSchedule::new_keyed_by_validator_identity(&stakes, epoch, len, 2).slot_leaders;
let leaders2 = LeaderSchedule::new_keyed_by_validator_identity(&stakes, epoch, len, 2)
.get_slot_leaders()
.to_vec();
assert_eq!(leaders1.len(), leaders2.len());

let leaders1_expected = vec![
Expand Down
66 changes: 45 additions & 21 deletions ledger/src/leader_schedule_utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,14 +10,33 @@ use {

/// Return the leader schedule for the given epoch.
pub fn leader_schedule(epoch: Epoch, bank: &Bank) -> Option<LeaderSchedule> {
bank.epoch_staked_nodes(epoch).map(|stakes| {
LeaderSchedule::new_keyed_by_validator_identity(
&stakes,
epoch,
bank.get_slots_in_epoch(epoch),
NUM_CONSECUTIVE_LEADER_SLOTS,
)
})
let use_new_leader_schedule = match bank
.feature_set
.new_leader_schedule_epoch(bank.epoch_schedule())
{
Some(new_leader_schedule_epoch) => epoch >= new_leader_schedule_epoch,
None => false,
};

if use_new_leader_schedule {
bank.epoch_vote_accounts(epoch).map(|vote_accounts_map| {
LeaderSchedule::new_keyed_by_vote_account(
vote_accounts_map,
epoch,
bank.get_slots_in_epoch(epoch),
NUM_CONSECUTIVE_LEADER_SLOTS,
)
})
} else {
bank.epoch_staked_nodes(epoch).map(|stakes| {
LeaderSchedule::new_keyed_by_validator_identity(
&stakes,
epoch,
bank.get_slots_in_epoch(epoch),
NUM_CONSECUTIVE_LEADER_SLOTS,
)
})
}
}

/// Map of leader base58 identity pubkeys to the slot indices relative to the first epoch slot
Expand Down Expand Up @@ -64,27 +83,32 @@ mod tests {
super::*,
solana_runtime::genesis_utils::{
bootstrap_validator_stake_lamports, create_genesis_config_with_leader,
deactivate_features,
},
test_case::test_case,
};

#[test]
fn test_leader_schedule_via_bank() {
#[test_case(true; "vote keyed leader schedule")]
#[test_case(false; "identity keyed leader schedule")]
fn test_leader_schedule_via_bank(use_vote_keyed_leader_schedule: bool) {
let pubkey = solana_pubkey::new_rand();
let genesis_config =
let mut genesis_config =
create_genesis_config_with_leader(0, &pubkey, bootstrap_validator_stake_lamports())
.genesis_config;

if !use_vote_keyed_leader_schedule {
deactivate_features(
&mut genesis_config,
&vec![solana_feature_set::enable_vote_address_leader_schedule::id()],
);
}

let bank = Bank::new_for_tests(&genesis_config);
let leader_schedule = leader_schedule(0, &bank).unwrap();

let pubkeys_and_stakes: HashMap<_, _> = bank
.current_epoch_staked_nodes()
.iter()
.map(|(pubkey, stake)| (*pubkey, *stake))
.collect();
let leader_schedule = LeaderSchedule::new_keyed_by_validator_identity(
&pubkeys_and_stakes,
0,
genesis_config.epoch_schedule.slots_per_epoch,
NUM_CONSECUTIVE_LEADER_SLOTS,
assert_eq!(
leader_schedule.is_keyed_by_vote_account(),
use_vote_keyed_leader_schedule
);

assert_eq!(leader_schedule[0], pubkey);
Expand Down
21 changes: 21 additions & 0 deletions sdk/feature-set/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -924,6 +924,10 @@ pub mod reserve_minimal_cus_for_builtin_instructions {
solana_pubkey::declare_id!("C9oAhLxDBm3ssWtJx1yBGzPY55r2rArHmN1pbQn6HogH");
}

pub mod enable_vote_address_leader_schedule {
solana_pubkey::declare_id!("5JsG4NWH8Jbrqdd8uL6BNwnyZK3dQSoieRXG5vmofj9y");
}

pub mod raise_block_limits_to_50m {
solana_pubkey::declare_id!("5oMCU3JPaFLr8Zr4ct7yFA7jdk6Mw1RmB8K4u9ZbS42z");
}
Expand Down Expand Up @@ -1158,6 +1162,7 @@ lazy_static! {
(deplete_cu_meter_on_vm_failure::id(), "Deplete compute meter for vm errors SIMD-0182 #3993"),
(reserve_minimal_cus_for_builtin_instructions::id(), "Reserve minimal CUs for builtin instructions SIMD-170 #2562"),
(raise_block_limits_to_50m::id(), "Raise block limit to 50M SIMD-0207"),
(enable_vote_address_leader_schedule::id(), "Enable vote address leader schedule SIMD-0180 #4573"),
(fix_alt_bn128_multiplication_input_length::id(), "fix alt_bn128 multiplication input length SIMD-0222 #3686"),
(drop_unchained_merkle_shreds::id(), "drops unchained Merkle shreds #2149"),
/*************** ADD NEW FEATURES HERE ***************/
Expand Down Expand Up @@ -1222,6 +1227,22 @@ impl FeatureSet {
self.active.get(feature_id).copied()
}

pub fn new_leader_schedule_epoch(&self, epoch_schedule: &EpochSchedule) -> Option<u64> {
// check the epoch of the slot when the new leader schedule feature was
// activated and then use the following epoch to start using the new
// leader schedule.
const NEW_LEADER_SCHEDULE_EPOCH_DELAY: u64 = 1;
self.activated_slot(&enable_vote_address_leader_schedule::id())
.map(|slot| {
let activation_epoch = epoch_schedule.get_epoch(slot);
if activation_epoch == 0 {
activation_epoch
} else {
activation_epoch.wrapping_add(NEW_LEADER_SCHEDULE_EPOCH_DELAY)
}
})
}

/// List of enabled features that trigger full inflation
pub fn full_inflation_features_enabled(&self) -> AHashSet<Pubkey> {
let mut hash_set = FULL_INFLATION_FEATURE_PAIRS
Expand Down

0 comments on commit 77923aa

Please sign in to comment.