Skip to content

Send the stake state in the format the rest of the tiles are expecting #5224

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 4 additions & 2 deletions src/app/shared_dev/commands/bench/fd_bencho.c
Original file line number Diff line number Diff line change
Expand Up @@ -85,8 +85,10 @@ service_block_hash( fd_bencho_ctx_t * ctx,
return did_work;
}

if( FD_UNLIKELY( response->status!=FD_RPC_CLIENT_SUCCESS ) )
FD_LOG_ERR(( "RPC server returned error %ld", response->status ));
if( FD_UNLIKELY( response->status!=FD_RPC_CLIENT_SUCCESS ) ) {
FD_LOG_WARNING(( "RPC server returned error %ld", response->status ));
return did_work;
}

ctx->blockhash_state = FD_BENCHO_STATE_WAIT;
ctx->blockhash_deadline = fd_log_wallclock() + 400L * 1000L * 1000L; /* 400 millis til we fetch new blockhash */
Expand Down
1 change: 1 addition & 0 deletions src/disco/fd_disco_base.h
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
#define REPLAY_FLAG_MICROBLOCK (0x04UL)
#define REPLAY_FLAG_CATCHING_UP (0x08UL)
#define REPLAY_FLAG_INIT (0x10UL)
#define REPLAY_FLAG_PUBLISH_LEADER (0x20UL)

#define EXEC_FLAG_READY_NEW (0x20UL)
#define EXEC_FLAG_EXECUTING_SLICE (0x40UL)
Expand Down
56 changes: 44 additions & 12 deletions src/discof/poh/fd_poh_tile.c
Original file line number Diff line number Diff line change
Expand Up @@ -355,6 +355,8 @@
#define IN_KIND_BANK (0)
#define IN_KIND_PACK (1)
#define IN_KIND_STAKE (2)
#define IN_KIND_REPLAY (3)
#define IN_KIND_PACK_REPLAY (4)


typedef struct {
Expand All @@ -371,7 +373,7 @@ typedef struct {
ulong chunk;
} fd_poh_out_ctx_t;

typedef struct {
typedef struct __attribute__((aligned(32UL))) {
fd_stem_context_t * stem;

/* Static configuration determined at genesis creation time. See
Expand Down Expand Up @@ -576,7 +578,7 @@ typedef struct {
returned lock value back to zero, and the POH tile continues with its
day. */

static fd_poh_ctx_t * fd_poh_global_ctx;
static fd_poh_ctx_t * fd_poh_global_ctx = NULL;

static volatile ulong fd_poh_waiting_lock __attribute__((aligned(128UL)));
static volatile ulong fd_poh_returned_lock __attribute__((aligned(128UL)));
Expand Down Expand Up @@ -750,7 +752,7 @@ extern void fd_ext_poh_register_tick( void const * bank, uchar
It can be used with `fd_ext_poh_signal_leader_change` which
will just issue a nonblocking send on the channel. */

CALLED_FROM_RUST void
void
fd_ext_poh_initialize( ulong tick_duration_ns, /* See clock comments above, will be 6.4 microseconds for mainnet-beta. */
ulong hashcnt_per_tick, /* See clock comments above, will be 62,500 for mainnet-beta. */
ulong ticks_per_slot, /* See clock comments above, will almost always be 64. */
Expand All @@ -773,8 +775,8 @@ fd_ext_poh_initialize( ulong tick_duration_ns, /* See clock comments
ctx->reset_slot = ctx->slot;
ctx->reset_slot_start_ns = fd_log_wallclock(); /* safe to call from Rust */

memcpy( ctx->reset_hash, last_entry_hash, 32UL );
memcpy( ctx->hash, last_entry_hash, 32UL );
fd_memcpy( ctx->reset_hash, last_entry_hash, 32UL );
fd_memcpy( ctx->hash, last_entry_hash, 32UL );

ctx->signal_leader_change = signal_leader_change;

Expand Down Expand Up @@ -952,7 +954,7 @@ fd_ext_bank_load_account( void const * bank,
uchar * data,
ulong * data_sz );

CALLED_FROM_RUST static void
static void
publish_became_leader( fd_poh_ctx_t * ctx,
ulong slot,
ulong epoch ) {
Expand Down Expand Up @@ -1019,8 +1021,8 @@ publish_became_leader( fd_poh_ctx_t * ctx,
leader->limits.slot_max_vote_cost = ctx->limits.slot_max_vote_cost;
leader->limits.slot_max_write_cost_per_acct = ctx->limits.slot_max_write_cost_per_acct;

memcpy( leader->bundle->last_blockhash, ctx->reset_hash, 32UL );
memcpy( leader->bundle->tip_receiver_owner, tip_receiver_owner, 32UL );
fd_memcpy( leader->bundle->last_blockhash, ctx->reset_hash, 32UL );
fd_memcpy( leader->bundle->tip_receiver_owner, tip_receiver_owner, 32UL );

if( FD_UNLIKELY( leader->ticks_per_slot+leader->total_skipped_ticks>=MAX_SKIPPED_TICKS ) )
FD_LOG_ERR(( "Too many skipped ticks %lu for slot %lu, chain must halt", leader->ticks_per_slot+leader->total_skipped_ticks, slot ));
Expand All @@ -1036,7 +1038,7 @@ publish_became_leader( fd_poh_ctx_t * ctx,
is by the replay stage. See the notes in the long comment above for
more on how this works. */

CALLED_FROM_RUST void
void
fd_ext_poh_begin_leader( void const * bank,
ulong slot,
ulong epoch,
Expand Down Expand Up @@ -1262,8 +1264,8 @@ fd_ext_poh_reset( ulong completed_bank_slot, /* The slot that successful
}
ctx->expect_sequential_leader_slot = ULONG_MAX;

memcpy( ctx->reset_hash, reset_blockhash, 32UL );
memcpy( ctx->hash, reset_blockhash, 32UL );
fd_memcpy( ctx->reset_hash, reset_blockhash, 32UL );
fd_memcpy( ctx->hash, reset_blockhash, 32UL );
if( FD_LIKELY( parent_block_id!=NULL ) ) {
ctx->parent_slot = completed_bank_slot;
memcpy( ctx->parent_block_id, parent_block_id, 32UL );
Expand Down Expand Up @@ -1458,6 +1460,8 @@ after_credit( fd_poh_ctx_t * ctx,
int * charge_busy ) {
ctx->stem = stem;

if( FD_UNLIKELY( ctx->reset_slot==ULONG_MAX ) ) return;

FD_COMPILER_MFENCE();
if( FD_UNLIKELY( fd_poh_waiting_lock ) ) {
FD_VOLATILE( fd_poh_returned_lock ) = 1UL;
Expand Down Expand Up @@ -1804,6 +1808,8 @@ before_frag( fd_poh_ctx_t * ctx,
return 0;
}

extern void fd_ext_poh_publish_leader_schedule( uchar * data, ulong data_len );

static inline void
during_frag( fd_poh_ctx_t * ctx,
ulong in_idx,
Expand Down Expand Up @@ -1838,6 +1844,22 @@ during_frag( fd_poh_ctx_t * ctx,
slot = fd_disco_poh_sig_slot( sig );
break;
}
case IN_KIND_REPLAY: {
// yech... this is sorta now in the right place... These flags really the right thing? Who came up with this?!
if( ctx->reset_slot==ULONG_MAX && fd_disco_replay_old_sig_flags( sig )==REPLAY_FLAG_INIT ) {
FD_LOG_NOTICE(( "init msg rx" ));
fd_poh_init_msg_t * init_msg = (fd_poh_init_msg_t *) fd_chunk_to_laddr( ctx->in[ in_idx ].mem, chunk );
fd_ext_poh_initialize( init_msg->tick_duration_ns, init_msg->hashcnt_per_tick, init_msg->ticks_per_slot, init_msg->tick_height, init_msg->last_entry_hash, NULL );
ctx->skip_frag = 1;
} else if( fd_disco_replay_old_sig_flags( sig )==REPLAY_FLAG_PUBLISH_LEADER ) {
FD_LOG_NOTICE(( "fd_ext_poh_publish_leader_schedule %lu", sz ));
fd_ext_poh_publish_leader_schedule ( fd_chunk_to_laddr( ctx->in[ in_idx ].mem, chunk ), sz );
}

return;
}
case IN_KIND_PACK_REPLAY:
return;
default:
FD_LOG_ERR(( "unexpected in_kind %d", ctx->in_kind[ in_idx ] ));
}
Expand Down Expand Up @@ -2289,15 +2311,20 @@ unprivileged_init( fd_topo_t * topo,
FD_COMPILER_MFENCE();
}

FD_LOG_INFO(( "PoH waiting to be initialized by Agave client... %lu %lu", fd_poh_waiting_lock, fd_poh_returned_lock ));
#if 0
FD_LOG_NOTICE(( "PoH waiting to be initialized by Agave client... %lu %lu", fd_poh_waiting_lock, fd_poh_returned_lock ));
#endif
FD_VOLATILE( fd_poh_global_ctx ) = ctx;
#if 0
FD_COMPILER_MFENCE();
for(;;) {
if( FD_LIKELY( FD_VOLATILE_CONST( fd_poh_waiting_lock ) ) ) break;
FD_SPIN_PAUSE();
}
#endif
FD_VOLATILE( fd_poh_waiting_lock ) = 0UL;
FD_VOLATILE( fd_poh_returned_lock ) = 1UL;
#if 0
FD_COMPILER_MFENCE();
for(;;) {
if( FD_UNLIKELY( !FD_VOLATILE_CONST( fd_poh_returned_lock ) ) ) break;
Expand All @@ -2306,6 +2333,7 @@ unprivileged_init( fd_topo_t * topo,
FD_COMPILER_MFENCE();

if( FD_UNLIKELY( ctx->reset_slot==ULONG_MAX ) ) FD_LOG_ERR(( "PoH was not initialized by Agave client" ));
#endif

fd_histf_join( fd_histf_new( ctx->begin_leader_delay, FD_MHIST_SECONDS_MIN( POH, BEGIN_LEADER_DELAY_SECONDS ),
FD_MHIST_SECONDS_MAX( POH, BEGIN_LEADER_DELAY_SECONDS ) ) );
Expand All @@ -2331,6 +2359,10 @@ unprivileged_init( fd_topo_t * topo,
ctx->in_kind[ i ] = IN_KIND_PACK;
} else if( !strcmp( link->name, "bank_poh" ) ) {
ctx->in_kind[ i ] = IN_KIND_BANK;
} else if( !strcmp( link->name, "replay_poh" ) ) {
ctx->in_kind[ i ] = IN_KIND_REPLAY;
} else if( !strcmp( link->name, "pack_replay" ) ) {
ctx->in_kind[ i ] = IN_KIND_PACK_REPLAY;
} else {
FD_LOG_ERR(( "unexpected input link name %s", link->name ));
}
Expand Down
21 changes: 20 additions & 1 deletion src/discof/replay/fd_replay_tile.c
Original file line number Diff line number Diff line change
Expand Up @@ -1295,7 +1295,7 @@ prepare_new_block_execution( fd_replay_tile_ctx_t * ctx,

static void
init_poh( fd_replay_tile_ctx_t * ctx ) {
FD_LOG_INFO(( "sending init msg" ));
FD_LOG_NOTICE(( "sending init msg" ));
fd_replay_out_link_t * bank_out = &ctx->bank_out[ 0UL ];
fd_poh_init_msg_t * msg = fd_chunk_to_laddr( bank_out->mem, bank_out->chunk );
fd_epoch_bank_t * epoch_bank = fd_exec_epoch_ctx_epoch_bank( ctx->epoch_ctx );
Expand Down Expand Up @@ -2478,6 +2478,22 @@ privileged_init( fd_topo_t * topo,
}
}

static void
replay_publish_leader_schedule( void *_ctx, uchar *memory, ulong len )
{
fd_replay_tile_ctx_t * ctx = (fd_replay_tile_ctx_t *) _ctx;

FD_LOG_NOTICE(( "sending publish leader schedule %lu", len ));
fd_replay_out_link_t * bank_out = &ctx->bank_out[ 0UL ];
uchar * msg = fd_chunk_to_laddr( bank_out->mem, bank_out->chunk );
fd_memcpy(msg, memory, len);

ulong sig = fd_disco_replay_old_sig( ctx->slot_ctx->slot_bank.slot, REPLAY_FLAG_PUBLISH_LEADER );
fd_mcache_publish( bank_out->mcache, bank_out->depth, bank_out->seq, sig, bank_out->chunk, len, 0UL, 0UL, 0UL );
bank_out->chunk = fd_dcache_compact_next( bank_out->chunk, len, bank_out->chunk0, bank_out->wmark );
bank_out->seq = fd_seq_inc( bank_out->seq, 1UL );
}

static void
unprivileged_init( fd_topo_t * topo,
fd_topo_tile_t * tile ) {
Expand Down Expand Up @@ -2711,6 +2727,9 @@ unprivileged_init( fd_topo_t * topo,
}
fd_features_enable_one_offs(&ctx->epoch_ctx->features, one_off_features, (uint)tile->replay.enable_features_cnt, 0UL);

ctx->epoch_ctx->hooks.publish_leader_schedule = replay_publish_leader_schedule;
ctx->epoch_ctx->hooks.publish_leader_schedule_ctx = ctx;

ctx->epoch = fd_epoch_join( fd_epoch_new( epoch_mem, FD_VOTER_MAX ) );
ctx->forks = fd_forks_join( fd_forks_new( forks_mem, FD_BLOCK_MAX, 42UL ) );
ctx->ghost = fd_ghost_join( fd_ghost_new( ghost_mem, 42UL, FD_BLOCK_MAX ) );
Expand Down
2 changes: 2 additions & 0 deletions src/flamenco/runtime/context/fd_exec_epoch_ctx.c
Original file line number Diff line number Diff line change
Expand Up @@ -237,6 +237,8 @@ fd_exec_epoch_ctx_from_prev( fd_exec_epoch_ctx_t * self,

self->runtime_public->features = prev->features; /* large memcpy */

fd_memcpy(&self->hooks, &prev->hooks, sizeof(self->hooks));

fd_epoch_bank_t * old_epoch_bank = fd_exec_epoch_ctx_epoch_bank( prev );

FD_SPAD_FRAME_BEGIN( runtime_spad ) {
Expand Down
10 changes: 10 additions & 0 deletions src/flamenco/runtime/context/fd_exec_epoch_ctx.h
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,14 @@ typedef struct fd_exec_epoch_ctx_layout fd_exec_epoch_ctx_layout_t;

typedef struct fd_runtime_public fd_runtime_public_t;

typedef void (*FunctionPublishLeaderSchedule)(void *ctx, uchar *memory, ulong len);

struct fd_runtime_hooks_ctx {
FunctionPublishLeaderSchedule publish_leader_schedule;
void *publish_leader_schedule_ctx;
};
typedef struct fd_runtime_hooks_ctx fd_runtime_hooks_ctx_t;

struct __attribute__((aligned(64UL))) fd_exec_epoch_ctx {
ulong magic; /* ==FD_EXEC_EPOCH_CTX_MAGIC */

Expand All @@ -35,6 +43,8 @@ struct __attribute__((aligned(64UL))) fd_exec_epoch_ctx {
fd_runtime_public_t * runtime_public;
int constipate_root; /* Used for constipation in offline replay. */
ulong total_epoch_stake;

fd_runtime_hooks_ctx_t hooks;
};

#define FD_EXEC_EPOCH_CTX_ALIGN (alignof(fd_exec_epoch_ctx_t))
Expand Down
103 changes: 55 additions & 48 deletions src/flamenco/runtime/fd_runtime.c
Original file line number Diff line number Diff line change
Expand Up @@ -148,6 +148,58 @@ fd_runtime_update_slots_per_epoch( fd_exec_slot_ctx_t * slot_ctx,
fd_runtime_repartition_fresh_account_partitions( slot_ctx );
}

#define FIREDANCER_STAKE_WEIGHT_CNT (40200UL)
#define FIREDANCER_PACKET_SZ (40 + (FIREDANCER_STAKE_WEIGHT_CNT * 40))

static void
fd_runtime_send_leader_schedule(fd_exec_slot_ctx_t * slot_ctx FD_PARAM_UNUSED,
fd_spad_t * runtime_spad,
ulong slot FD_PARAM_UNUSED, // really? not needed?
fd_epoch_schedule_t *schedule,
fd_stake_weight_t * epoch_weights,
ulong stake_weight_cnt,
ulong epoch) {
ulong first_slot = schedule->first_normal_slot;
ulong slot_cnt = schedule->slots_per_epoch;

ulong remaining_stake = 0;

if( stake_weight_cnt>FIREDANCER_STAKE_WEIGHT_CNT ) {
fd_stake_weight_t * ptr = &epoch_weights[FIREDANCER_STAKE_WEIGHT_CNT];
fd_stake_weight_t * ePtr = &epoch_weights[stake_weight_cnt];
while (ptr < ePtr) {
remaining_stake += ptr->stake;
ptr++;
}
stake_weight_cnt=FIREDANCER_STAKE_WEIGHT_CNT;
}

uchar *memory = fd_spad_alloc_check( runtime_spad, 1U, FIREDANCER_PACKET_SZ );
if( FD_UNLIKELY( NULL == memory ) ) {
FD_LOG_ERR(("spad allocation failure setting up a firedancer_packet"));
}


*((ulong *) &memory[0]) = epoch;
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@emwang-jump put this in a struct, might be cleaner to use that? src/discof/replay/fd_exec.h

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I was unaware somebody had already written this code...

*((ulong *) &memory[8]) = stake_weight_cnt;
*((ulong *) &memory[16]) = first_slot;
*((ulong *) &memory[24]) = slot_cnt;
*((ulong *) &memory[32]) = remaining_stake;

fd_stake_weight_t * ptr = &epoch_weights[0];
fd_stake_weight_t * ePtr = &epoch_weights[stake_weight_cnt];
ulong i = 0;
while ( FD_LIKELY( ptr < ePtr) ) {
ulong offset = 40UL + (i++ * 40UL);
fd_memcpy(&memory[offset], ptr->key.uc, 32);
*((ulong *) &memory[offset + 32]) = ptr->stake;
ptr++;
}

if( FD_LIKELY( NULL != slot_ctx->epoch_ctx->hooks.publish_leader_schedule ) )
slot_ctx->epoch_ctx->hooks.publish_leader_schedule(slot_ctx->epoch_ctx->hooks.publish_leader_schedule_ctx, memory, 40UL + (stake_weight_cnt * 40UL));
}

void
fd_runtime_update_leaders( fd_exec_slot_ctx_t * slot_ctx,
ulong slot,
Expand Down Expand Up @@ -180,6 +232,7 @@ fd_runtime_update_leaders( fd_exec_slot_ctx_t * slot_ctx,
FD_LOG_ERR(( "fd_stake_weights_by_node() failed" ));
}

fd_runtime_send_leader_schedule( slot_ctx, runtime_spad, slot, &schedule, epoch_weights, stake_weight_cnt, epoch );
/* Derive leader schedule */

FD_LOG_INFO(( "stake_weight_cnt=%lu slot_cnt=%lu", stake_weight_cnt, slot_cnt ));
Expand Down Expand Up @@ -3390,54 +3443,8 @@ fd_runtime_init_bank_from_genesis( fd_exec_slot_ctx_t * slot_ctx,
fd_vote_accounts_pair_t_mapnode_t *node = fd_vote_accounts_pair_t_map_acquire(vacc_pool);
FD_TEST( node );

/* FIXME: Reimplement when we try to fix genesis. */
// fd_vote_block_timestamp_t last_timestamp = {0};
// fd_pubkey_t node_pubkey = {0};
// FD_SPAD_FRAME_BEGIN( runtime_spad ) {
// /* Deserialize content */
// fd_vote_state_versioned_t vs[1];
// fd_bincode_decode_ctx_t decode = {
// .data = acc->account.data,
// .dataend = acc->account.data + acc->account.data_len,
// .valloc = fd_spad_virtual( runtime_spad )
// };
// int decode_err = fd_vote_state_versioned_decode( vs, &decode );
// if( FD_UNLIKELY( decode_err!=FD_BINCODE_SUCCESS ) ) {
// FD_LOG_WARNING(( "fd_vote_state_versioned_decode failed (%d)", decode_err ));
// return;
// }

// switch( vs->discriminant )
// {
// case fd_vote_state_versioned_enum_current:
// last_timestamp = vs->inner.current.last_timestamp;
// node_pubkey = vs->inner.current.node_pubkey;
// break;
// case fd_vote_state_versioned_enum_v0_23_5:
// last_timestamp = vs->inner.v0_23_5.last_timestamp;
// node_pubkey = vs->inner.v0_23_5.node_pubkey;
// break;
// case fd_vote_state_versioned_enum_v1_14_11:
// last_timestamp = vs->inner.v1_14_11.last_timestamp;
// node_pubkey = vs->inner.v1_14_11.node_pubkey;
// break;
// default:
// __builtin_unreachable();
// }

// } FD_SPAD_FRAME_END;

// fd_memcpy(node->elem.key.key, acc->key.key, sizeof(fd_pubkey_t));
// node->elem.stake = acc->account.lamports;
// node->elem.value = (fd_solana_vote_account_t){
// .lamports = acc->account.lamports,
// .node_pubkey = node_pubkey,
// .last_timestamp_ts = last_timestamp.timestamp,
// .last_timestamp_slot = last_timestamp.slot,
// .owner = acc->account.owner,
// .executable = acc->account.executable,
// .rent_epoch = acc->account.rent_epoch
// };
fd_memcpy(node->elem.key.key, acc->key.key, sizeof(fd_pubkey_t));
node->elem.value = acc->account;

fd_vote_accounts_pair_t_map_insert( vacc_pool, &vacc_root, node );

Expand Down
Loading