Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
18 commits
Select commit Hold shift + click to select a range
1dab670
core/types, params: add `InclusionList` type
jihoonsong Jul 28, 2025
7ce64f1
beacon/engine, eth/catalyst, miner: add `GetInclusionListV1`
jihoonsong Jun 16, 2025
3e487c0
beacon/engine: add `INCLUSION_LIST_UNSATISFIED` to engine API errors
jihoonsong Nov 26, 2024
d03a817
params: support eip7805 fork
jihoonsong Apr 8, 2025
169d4a0
eth/catalyst: make `forkchoiceUpdatedV3` and `newPayloadV4` available…
jihoonsong May 19, 2025
1986e81
eth/catalyst: add `engine_newPayloadV5`
jihoonsong Nov 28, 2024
dffe6d3
beacon/engine, core, eth/catalyst: verify if a block satisfies the in…
jihoonsong Feb 25, 2025
ed28133
eth/catalyst: add an unit test for verifying new payload against incl…
jihoonsong Nov 28, 2024
3111ae8
miner: add `inclusionList` to `BuildPayloadArgs` and `generateParams`
jihoonsong Nov 30, 2024
8ddd60a
miner: add `inclusionListTxs` to `environment`
jihoonsong Nov 30, 2024
526baf2
miner: include inclusion list transactions when building a payload
jihoonsong Nov 30, 2024
8b1cf69
miner: add a public method to notify the inclusion list to payload
jihoonsong Nov 30, 2024
4ef576d
eth/catalyst: add `peek` method to `payloadQueue` to return `Miner.Pa…
jihoonsong Nov 30, 2024
5d97794
beacon/engine, eth/catalyst, miner: add `engine_forkchoiceUpdatedV4`
jihoonsong Nov 30, 2024
c146cf4
eth/catalyst: add `engine_forkchoiceUpdatedWithWitnessV4`
jihoonsong Aug 15, 2025
358685b
eth/catalyst: modify `getPayloadV5` to accept `PayloadV4`
jihoonsong Oct 1, 2025
508ba1b
eth/catalyst: add an unit test for updating payload with inclusion list
jihoonsong Nov 30, 2024
9d17826
beacon/engine, eth/catalyst: return UnknownParent error when parent d…
jihoonsong Aug 14, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions beacon/engine/errors.go
Original file line number Diff line number Diff line change
Expand Up @@ -74,13 +74,18 @@ var (
// - newPayloadV1: if the payload was accepted, but not processed (side chain)
ACCEPTED = "ACCEPTED"

// INCLUSION_LIST_UNSATISFIED is returned by the engine API in the following calls:
// - newPayloadV5: if the payload failed to satisfy the inclusion list constraints
INCLUSION_LIST_UNSATISFIED = "INCLUSION_LIST_UNSATISFIED"

GenericServerError = &EngineAPIError{code: -32000, msg: "Server error"}
UnknownPayload = &EngineAPIError{code: -38001, msg: "Unknown payload"}
InvalidForkChoiceState = &EngineAPIError{code: -38002, msg: "Invalid forkchoice state"}
InvalidPayloadAttributes = &EngineAPIError{code: -38003, msg: "Invalid payload attributes"}
TooLargeRequest = &EngineAPIError{code: -38004, msg: "Too large request"}
InvalidParams = &EngineAPIError{code: -32602, msg: "Invalid parameters"}
UnsupportedFork = &EngineAPIError{code: -38005, msg: "Unsupported fork"}
UnknownParent = &EngineAPIError{code: -38006, msg: "Unknown parent"}

STATUS_INVALID = ForkChoiceResponse{PayloadStatus: PayloadStatusV1{Status: INVALID}, PayloadID: nil}
STATUS_SYNCING = ForkChoiceResponse{PayloadStatus: PayloadStatusV1{Status: SYNCING}, PayloadID: nil}
Expand Down
6 changes: 6 additions & 0 deletions beacon/engine/gen_blockparams.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

11 changes: 7 additions & 4 deletions beacon/engine/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,7 @@ var (
// ExecutionPayloadV3 has the syntax of ExecutionPayloadV2 and appends the new
// fields: blobGasUsed and excessBlobGas.
PayloadV3 PayloadVersion = 0x3
PayloadV4 PayloadVersion = 0x4
)

//go:generate go run github.com/fjl/gencodec -type PayloadAttributes -field-override payloadAttributesMarshaling -out gen_blockparams.go
Expand All @@ -62,6 +63,7 @@ type PayloadAttributes struct {
SuggestedFeeRecipient common.Address `json:"suggestedFeeRecipient" gencodec:"required"`
Withdrawals []*types.Withdrawal `json:"withdrawals"`
BeaconRoot *common.Hash `json:"parentBeaconBlockRoot"`
InclusionList types.InclusionList `json:"inclusionListTransactions"`
}

// JSON type overrides for PayloadAttributes.
Expand Down Expand Up @@ -237,8 +239,8 @@ func decodeTransactions(enc [][]byte) ([]*types.Transaction, error) {
// and that the blockhash of the constructed block matches the parameters. Nil
// Withdrawals value will propagate through the returned block. Empty
// Withdrawals value must be passed via non-nil, length 0 value in data.
func ExecutableDataToBlock(data ExecutableData, versionedHashes []common.Hash, beaconRoot *common.Hash, requests [][]byte) (*types.Block, error) {
block, err := ExecutableDataToBlockNoHash(data, versionedHashes, beaconRoot, requests)
func ExecutableDataToBlock(data ExecutableData, versionedHashes []common.Hash, beaconRoot *common.Hash, requests [][]byte, inclusionListTxs []*types.Transaction) (*types.Block, error) {
block, err := ExecutableDataToBlockNoHash(data, versionedHashes, beaconRoot, requests, inclusionListTxs)
if err != nil {
return nil, err
}
Expand All @@ -251,7 +253,7 @@ func ExecutableDataToBlock(data ExecutableData, versionedHashes []common.Hash, b
// ExecutableDataToBlockNoHash is analogous to ExecutableDataToBlock, but is used
// for stateless execution, so it skips checking if the executable data hashes to
// the requested hash (stateless has to *compute* the root hash, it's not given).
func ExecutableDataToBlockNoHash(data ExecutableData, versionedHashes []common.Hash, beaconRoot *common.Hash, requests [][]byte) (*types.Block, error) {
func ExecutableDataToBlockNoHash(data ExecutableData, versionedHashes []common.Hash, beaconRoot *common.Hash, requests [][]byte, inclusionListTxs []*types.Transaction) (*types.Block, error) {
txs, err := decodeTransactions(data.Transactions)
if err != nil {
return nil, err
Expand Down Expand Up @@ -317,7 +319,8 @@ func ExecutableDataToBlockNoHash(data ExecutableData, versionedHashes []common.H
}
return types.NewBlockWithHeader(header).
WithBody(types.Body{Transactions: txs, Uncles: nil, Withdrawals: data.Withdrawals}).
WithWitness(data.ExecutionWitness),
WithWitness(data.ExecutionWitness).
WithInclusionListTransactions(inclusionListTxs),
nil
}

Expand Down
66 changes: 34 additions & 32 deletions core/blockchain.go
Original file line number Diff line number Diff line change
Expand Up @@ -1741,7 +1741,7 @@ func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) {
}
defer bc.chainmu.Unlock()

_, n, err := bc.insertChain(chain, true, false) // No witness collection for mass inserts (would get super large)
_, n, _, err := bc.insertChain(chain, true, false) // No witness collection for mass inserts (would get super large)
return n, err
}

Expand All @@ -1753,10 +1753,10 @@ func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) {
// racey behaviour. If a sidechain import is in progress, and the historic state
// is imported, but then new canon-head is added before the actual sidechain
// completes, then the historic state could be pruned again
func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool, makeWitness bool) (*stateless.Witness, int, error) {
func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool, makeWitness bool) (*stateless.Witness, int, *bool, error) {
// If the chain is terminating, don't even bother starting up.
if bc.insertStopped() {
return nil, 0, nil
return nil, 0, nil, nil
}

if atomic.AddInt32(&bc.blockProcCounter, 1) == 1 {
Expand Down Expand Up @@ -1821,7 +1821,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool, makeWitness
for block != nil && bc.skipBlock(err, it) {
log.Debug("Writing previously known block", "number", block.Number(), "hash", block.Hash())
if err := bc.writeKnownBlock(block); err != nil {
return nil, it.index, err
return nil, it.index, nil, err
}
lastCanon = block

Expand All @@ -1840,15 +1840,15 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool, makeWitness
// We're post-merge and the parent is pruned, try to recover the parent state
log.Debug("Pruned ancestor", "number", block.Number(), "hash", block.Hash())
_, err := bc.recoverAncestors(block, makeWitness)
return nil, it.index, err
return nil, it.index, nil, err
}
// Some other error(except ErrKnownBlock) occurred, abort.
// ErrKnownBlock is allowed here since some known blocks
// still need re-execution to generate snapshots that are missing
case err != nil && !errors.Is(err, ErrKnownBlock):
stats.ignored += len(it.chain)
bc.reportBlock(block, nil, err)
return nil, it.index, err
return nil, it.index, nil, err
}
// Track the singleton witness from this chain insertion (if any)
var witness *stateless.Witness
Expand Down Expand Up @@ -1889,7 +1889,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool, makeWitness
"hash", block.Hash(), "number", block.NumberU64())
}
if err := bc.writeKnownBlock(block); err != nil {
return nil, it.index, err
return nil, it.index, nil, err
}
stats.processed++
if bc.logger != nil && bc.logger.OnSkippedBlock != nil {
Expand All @@ -1913,7 +1913,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool, makeWitness
start := time.Now()
res, err := bc.ProcessBlock(parent.Root, block, setHead, makeWitness && len(chain) == 1)
if err != nil {
return nil, it.index, err
return nil, it.index, nil, err
}
// Report the import stats before returning the various results
stats.processed++
Expand All @@ -1934,7 +1934,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool, makeWitness
// After merge we expect few side chains. Simply count
// all blocks the CL gives us for GC processing time
bc.gcproc += res.procTime
return witness, it.index, nil // Direct block insertion of a single block
return witness, it.index, res.inclusionListSatisfied, nil // Direct block insertion of a single block
}
switch res.status {
case CanonStatTy:
Expand Down Expand Up @@ -1965,16 +1965,17 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool, makeWitness
}

stats.ignored += it.remaining()
return witness, it.index, err
return witness, it.index, nil, err
}

// blockProcessingResult is a summary of block processing
// used for updating the stats.
type blockProcessingResult struct {
usedGas uint64
procTime time.Duration
status WriteStatus
witness *stateless.Witness
usedGas uint64
procTime time.Duration
status WriteStatus
witness *stateless.Witness
inclusionListSatisfied *bool
}

func (bpr *blockProcessingResult) Witness() *stateless.Witness {
Expand Down Expand Up @@ -2179,10 +2180,11 @@ func (bc *BlockChain) ProcessBlock(parentRoot common.Hash, block *types.Block, s
chainMgaspsMeter.Update(time.Duration(mgasps))

return &blockProcessingResult{
usedGas: res.GasUsed,
procTime: proctime,
status: status,
witness: witness,
usedGas: res.GasUsed,
procTime: proctime,
status: status,
witness: witness,
inclusionListSatisfied: res.InclusionListSatisfied,
}, nil
}

Expand All @@ -2193,7 +2195,7 @@ func (bc *BlockChain) ProcessBlock(parentRoot common.Hash, block *types.Block, s
// The method writes all (header-and-body-valid) blocks to disk, then tries to
// switch over to the new chain if the TD exceeded the current chain.
// insertSideChain is only used pre-merge.
func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator, makeWitness bool) (*stateless.Witness, int, error) {
func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator, makeWitness bool) (*stateless.Witness, int, *bool, error) {
var current = bc.CurrentBlock()

// The first sidechain block error is already verified to be ErrPrunedAncestor.
Expand Down Expand Up @@ -2222,13 +2224,13 @@ func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator, ma
// If someone legitimately side-mines blocks, they would still be imported as usual. However,
// we cannot risk writing unverified blocks to disk when they obviously target the pruning
// mechanism.
return nil, it.index, errors.New("sidechain ghost-state attack")
return nil, it.index, nil, errors.New("sidechain ghost-state attack")
}
}
if !bc.HasBlock(block.Hash(), block.NumberU64()) {
start := time.Now()
if err := bc.writeBlockWithoutState(block); err != nil {
return nil, it.index, err
return nil, it.index, nil, err
}
log.Debug("Injected sidechain block", "number", block.Number(), "hash", block.Hash(),
"diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)),
Expand All @@ -2245,7 +2247,7 @@ func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator, ma
for parent != nil && !bc.HasState(parent.Root) {
if bc.stateRecoverable(parent.Root) {
if err := bc.triedb.Recover(parent.Root); err != nil {
return nil, 0, err
return nil, 0, nil, err
}
break
}
Expand All @@ -2255,7 +2257,7 @@ func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator, ma
parent = bc.GetHeader(parent.ParentHash, parent.Number.Uint64()-1)
}
if parent == nil {
return nil, it.index, errors.New("missing parent")
return nil, it.index, nil, errors.New("missing parent")
}
// Import all the pruned blocks to make the state available
var (
Expand All @@ -2274,23 +2276,23 @@ func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator, ma
// memory here.
if len(blocks) >= 2048 || memory > 64*1024*1024 {
log.Info("Importing heavy sidechain segment", "blocks", len(blocks), "start", blocks[0].NumberU64(), "end", block.NumberU64())
if _, _, err := bc.insertChain(blocks, true, false); err != nil {
return nil, 0, err
if _, _, _, err := bc.insertChain(blocks, true, false); err != nil {
return nil, 0, nil, err
}
blocks, memory = blocks[:0], 0

// If the chain is terminating, stop processing blocks
if bc.insertStopped() {
log.Debug("Abort during blocks processing")
return nil, 0, nil
return nil, 0, nil, nil
}
}
}
if len(blocks) > 0 {
log.Info("Importing sidechain segment", "start", blocks[0].NumberU64(), "end", blocks[len(blocks)-1].NumberU64())
return bc.insertChain(blocks, true, makeWitness)
}
return nil, 0, nil
return nil, 0, nil, nil
}

// recoverAncestors finds the closest ancestor with available state and re-execute
Expand Down Expand Up @@ -2337,7 +2339,7 @@ func (bc *BlockChain) recoverAncestors(block *types.Block, makeWitness bool) (co
} else {
b = bc.GetBlock(hashes[i], numbers[i])
}
if _, _, err := bc.insertChain(types.Blocks{b}, false, makeWitness && i == 0); err != nil {
if _, _, _, err := bc.insertChain(types.Blocks{b}, false, makeWitness && i == 0); err != nil {
return b.ParentHash(), err
}
}
Expand Down Expand Up @@ -2564,14 +2566,14 @@ func (bc *BlockChain) reorg(oldHead *types.Header, newHead *types.Header) error
// The key difference between the InsertChain is it won't do the canonical chain
// updating. It relies on the additional SetCanonical call to finalize the entire
// procedure.
func (bc *BlockChain) InsertBlockWithoutSetHead(block *types.Block, makeWitness bool) (*stateless.Witness, error) {
func (bc *BlockChain) InsertBlockWithoutSetHead(block *types.Block, makeWitness bool) (*stateless.Witness, *bool, error) {
if !bc.chainmu.TryLock() {
return nil, errChainStopped
return nil, nil, errChainStopped
}
defer bc.chainmu.Unlock()

witness, _, err := bc.insertChain(types.Blocks{block}, false, makeWitness)
return witness, err
witness, _, inclusionListSatisfied, err := bc.insertChain(types.Blocks{block}, false, makeWitness)
return witness, inclusionListSatisfied, err
}

// SetCanonical rewinds the chain to set the new head block as the specified
Expand Down
2 changes: 1 addition & 1 deletion core/blockchain_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -3456,7 +3456,7 @@ func testSetCanonical(t *testing.T, scheme string) {
gen.AddTx(tx)
})
for _, block := range side {
_, err := chain.InsertBlockWithoutSetHead(block, false)
_, _, err := chain.InsertBlockWithoutSetHead(block, false)
if err != nil {
t.Fatalf("Failed to insert into chain: %v", err)
}
Expand Down
Loading
Loading