Skip to content

simd-0180: implement vote-based leader schedule for firedancer #5913

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 3 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
58 changes: 46 additions & 12 deletions src/discof/replay/fd_exec.h
Original file line number Diff line number Diff line change
Expand Up @@ -6,24 +6,23 @@
#include "../../flamenco/runtime/fd_runtime.h"
#include "../../flamenco/stakes/fd_stakes.h"
#include "../../flamenco/runtime/sysvar/fd_sysvar_epoch_schedule.h"
#include "../../discof/restore/utils/fd_ssmsg.h"

/* FIXME: SIMD-0180 - set the correct epochs */
#define FD_SIMD0180_ACTIVE_EPOCH_TESTNET (5000)
#define FD_SIMD0180_ACTIVE_EPOCH_MAINNET (5000)

/* Replay tile msg link formatting. The following take a pointer into
a dcache region and formats it as a specific message type. */

static inline ulong
generate_stake_weight_msg( fd_exec_slot_ctx_t * slot_ctx,
fd_spad_t * runtime_spad,
ulong epoch,
generate_stake_weight_msg( fd_exec_slot_ctx_t * slot_ctx,
ulong epoch,
fd_vote_accounts_global_t const * vote_accounts,
ulong * stake_weight_msg_out ) {
/* This function needs to be completely rewritten for SIMD-0180.
For now it's a hack that sends old data (pre SIMD-0180) in the new format. */

fd_stake_weight_msg_t * stake_weight_msg = (fd_stake_weight_msg_t *)fd_type_pun( stake_weight_msg_out );
fd_vote_stake_weight_t * stake_weights = stake_weight_msg->weights;
ulong staked_cnt = fd_stake_weights_by_node( vote_accounts,
stake_weights,
runtime_spad );
ulong * stake_weight_msg_out ) {
fd_stake_weight_msg_t * stake_weight_msg = (fd_stake_weight_msg_t *)fd_type_pun( stake_weight_msg_out );
fd_vote_stake_weight_t * stake_weights = stake_weight_msg->weights;
ulong staked_cnt = fd_stake_weights_by_node( vote_accounts, stake_weights );
fd_epoch_schedule_t const * epoch_schedule = fd_bank_epoch_schedule_query( slot_ctx->bank );

stake_weight_msg->epoch = epoch;
Expand All @@ -36,6 +35,41 @@ generate_stake_weight_msg( fd_exec_slot_ctx_t * slot_ctx,
return fd_stake_weight_msg_sz( staked_cnt );
}

static inline ulong
generate_stake_weight_msg_manifest( ulong epoch,
fd_epoch_schedule_t const * epoch_schedule,
fd_snapshot_manifest_epoch_stakes_t const * epoch_stakes,
ulong * stake_weight_msg_out ) {
fd_stake_weight_msg_t * stake_weight_msg = (fd_stake_weight_msg_t *)fd_type_pun( stake_weight_msg_out );
fd_vote_stake_weight_t * stake_weights = stake_weight_msg->weights;

stake_weight_msg->epoch = epoch;
stake_weight_msg->staked_cnt = epoch_stakes->vote_stakes_len;
stake_weight_msg->start_slot = fd_epoch_slot0( epoch_schedule, epoch );
stake_weight_msg->slot_cnt = epoch_schedule->slots_per_epoch;
stake_weight_msg->excluded_stake = 0UL;
stake_weight_msg->vote_keyed_lsched = 1UL;

/* FIXME: SIMD-0180 - hack to (de)activate in testnet vs mainnet.
This code can be removed once the feature is active. */
{
if( ( 1==epoch_schedule->warmup && epoch<FD_SIMD0180_ACTIVE_EPOCH_TESTNET )
|| ( 0==epoch_schedule->warmup && epoch<FD_SIMD0180_ACTIVE_EPOCH_MAINNET ) ) {
stake_weight_msg->vote_keyed_lsched = 0UL;
}
}

/* epoch_stakes from manifest are already filtered (stake>0), but not sorted */
for( ulong i=0UL; i<epoch_stakes->vote_stakes_len; i++ ) {
stake_weights[ i ].stake = epoch_stakes->vote_stakes[ i ].stake;
memcpy( stake_weights[ i ].id_key.uc, epoch_stakes->vote_stakes[ i ].identity, sizeof(fd_pubkey_t) );
memcpy( stake_weights[ i ].vote_key.uc, epoch_stakes->vote_stakes[ i ].vote, sizeof(fd_pubkey_t) );
}
sort_vote_weights_by_stake_vote_inplace( stake_weights, epoch_stakes->vote_stakes_len);

return fd_stake_weight_msg_sz( epoch_stakes->vote_stakes_len );
}

static inline void
generate_hash_bank_msg( ulong task_infos_gaddr,
ulong lt_hash_gaddr,
Expand Down
36 changes: 31 additions & 5 deletions src/discof/replay/fd_replay_tile.c
Original file line number Diff line number Diff line change
Expand Up @@ -292,6 +292,7 @@ struct fd_replay_tile_ctx {
/* TODO: Remove this and use the parsed manifest generated by snapin
tiles. */
uchar manifest_scratch[ (1UL<<31UL)+(1UL<<28UL) ] __attribute((aligned(FD_SOLANA_MANIFEST_GLOBAL_ALIGN)));
fd_snapshot_manifest_t * manifest;

int read_only; /* The read-only slot is the slot the validator needs
to replay through before it can proceed with any
Expand Down Expand Up @@ -363,22 +364,21 @@ publish_stake_weights( fd_replay_tile_ctx_t * ctx,
if( epoch_stakes_root!=NULL ) {
ulong * stake_weights_msg = fd_chunk_to_laddr( ctx->stake_out->mem, ctx->stake_out->chunk );
ulong epoch = fd_slot_to_leader_schedule_epoch( epoch_schedule, fd_bank_slot_get( slot_ctx->bank ) );
ulong stake_weights_sz = generate_stake_weight_msg( slot_ctx, ctx->runtime_spad, epoch - 1, epoch_stakes, stake_weights_msg );
ulong stake_weights_sz = generate_stake_weight_msg( slot_ctx, epoch - 1, epoch_stakes, stake_weights_msg );
ulong stake_weights_sig = 4UL;
fd_stem_publish( stem, 0UL, stake_weights_sig, ctx->stake_out->chunk, stake_weights_sz, 0UL, 0UL, fd_frag_meta_ts_comp( fd_tickcount() ) );
ctx->stake_out->chunk = fd_dcache_compact_next( ctx->stake_out->chunk, stake_weights_sz, ctx->stake_out->chunk0, ctx->stake_out->wmark );
FD_LOG_NOTICE(("sending current epoch stake weights - epoch: %lu, stake_weight_cnt: %lu, start_slot: %lu, slot_cnt: %lu", stake_weights_msg[0], stake_weights_msg[1], stake_weights_msg[2], stake_weights_msg[3]));
}

fd_bank_epoch_stakes_end_locking_query( slot_ctx->bank );

fd_vote_accounts_global_t const * next_epoch_stakes = fd_bank_next_epoch_stakes_locking_query( slot_ctx->bank );
fd_vote_accounts_pair_global_t_mapnode_t * next_epoch_stakes_root = fd_vote_accounts_vote_accounts_root_join( next_epoch_stakes );

if( next_epoch_stakes_root!=NULL ) {
ulong * stake_weights_msg = fd_chunk_to_laddr( ctx->stake_out->mem, ctx->stake_out->chunk );
ulong epoch = fd_slot_to_leader_schedule_epoch( epoch_schedule, fd_bank_slot_get( slot_ctx->bank ) ); /* epoch */
ulong stake_weights_sz = generate_stake_weight_msg( slot_ctx, ctx->runtime_spad, epoch, next_epoch_stakes, stake_weights_msg );
ulong stake_weights_sz = generate_stake_weight_msg( slot_ctx, epoch, next_epoch_stakes, stake_weights_msg );
ulong stake_weights_sig = 4UL;
fd_stem_publish( stem, 0UL, stake_weights_sig, ctx->stake_out->chunk, stake_weights_sz, 0UL, 0UL, fd_frag_meta_ts_comp( fd_tickcount() ) );
ctx->stake_out->chunk = fd_dcache_compact_next( ctx->stake_out->chunk, stake_weights_sz, ctx->stake_out->chunk0, ctx->stake_out->wmark );
Expand All @@ -387,6 +387,30 @@ publish_stake_weights( fd_replay_tile_ctx_t * ctx,
fd_bank_next_epoch_stakes_end_locking_query( slot_ctx->bank );
}

static void
publish_stake_weights_manifest( fd_replay_tile_ctx_t * ctx,
fd_stem_context_t * stem,
fd_snapshot_manifest_t const * manifest ) {
fd_epoch_schedule_t const * schedule = &manifest->epoch_schedule_params;
ulong epoch = fd_slot_to_epoch( schedule, manifest->slot, NULL );

/* current epoch */
ulong * stake_weights_msg = fd_chunk_to_laddr( ctx->stake_out->mem, ctx->stake_out->chunk );
ulong stake_weights_sz = generate_stake_weight_msg_manifest( epoch, schedule, &manifest->epoch_stakes[0], stake_weights_msg );
ulong stake_weights_sig = 4UL;
fd_stem_publish( stem, 0UL, stake_weights_sig, ctx->stake_out->chunk, stake_weights_sz, 0UL, 0UL, fd_frag_meta_ts_comp( fd_tickcount() ) );
ctx->stake_out->chunk = fd_dcache_compact_next( ctx->stake_out->chunk, stake_weights_sz, ctx->stake_out->chunk0, ctx->stake_out->wmark );
FD_LOG_NOTICE(("sending current epoch stake weights - epoch: %lu, stake_weight_cnt: %lu, start_slot: %lu, slot_cnt: %lu", stake_weights_msg[0], stake_weights_msg[1], stake_weights_msg[2], stake_weights_msg[3]));

/* next current epoch */
stake_weights_msg = fd_chunk_to_laddr( ctx->stake_out->mem, ctx->stake_out->chunk );
stake_weights_sz = generate_stake_weight_msg_manifest( epoch + 1, schedule, &manifest->epoch_stakes[1], stake_weights_msg );
stake_weights_sig = 4UL;
fd_stem_publish( stem, 0UL, stake_weights_sig, ctx->stake_out->chunk, stake_weights_sz, 0UL, 0UL, fd_frag_meta_ts_comp( fd_tickcount() ) );
ctx->stake_out->chunk = fd_dcache_compact_next( ctx->stake_out->chunk, stake_weights_sz, ctx->stake_out->chunk0, ctx->stake_out->wmark );
FD_LOG_NOTICE(("sending next epoch stake weights - epoch: %lu, stake_weight_cnt: %lu, start_slot: %lu, slot_cnt: %lu", stake_weights_msg[0], stake_weights_msg[1], stake_weights_msg[2], stake_weights_msg[3]));
}

static void
block_finalize_tiles_cb( void * para_arg_1,
void * para_arg_2,
Expand Down Expand Up @@ -589,6 +613,8 @@ restore_slot_ctx( fd_replay_tile_ctx_t * ctx,
ctx->slot_ctx->status_cache = ctx->status_cache;

uchar const * data = fd_chunk_to_laddr( mem, chunk );
ctx->manifest = (fd_snapshot_manifest_t*)data;

uchar const * manifest_bytes = data+sizeof(fd_snapshot_manifest_t);

fd_bincode_decode_ctx_t decode = {
Expand Down Expand Up @@ -616,8 +642,8 @@ restore_slot_ctx( fd_replay_tile_ctx_t * ctx,

static void
kickoff_repair_orphans( fd_replay_tile_ctx_t * ctx, fd_stem_context_t * stem ) {
fd_fseq_update( ctx->published_wmark, fd_bank_slot_get( ctx->slot_ctx->bank ) );
publish_stake_weights( ctx, stem, ctx->slot_ctx );
fd_fseq_update( ctx->published_wmark, ctx->manifest->slot );
publish_stake_weights_manifest( ctx, stem, ctx->manifest );
}

static void
Expand Down
Loading
Loading