- 
        Couldn't load subscription status. 
- Fork 5
Open
Description
Overview
Currently, we consolidate all blocks from the safe block up to the head block in one go. This could result in a large number of blocks being read into memory at a single time, increasing the memory overhead. We should reconsider the pattern we use, potentially doing it in batches of blocks.
rollup-node/crates/chain-orchestrator/src/lib.rs
Lines 1084 to 1107 in f3674d8
| } else { | |
| let start_block_number = safe_block_number + 1; | |
| // TODO: Make fetching parallel but ensure concurrency limits are respected. | |
| let mut blocks_to_validate = vec![]; | |
| for block_number in start_block_number..=head_block_number { | |
| let block = self | |
| .l2_client | |
| .get_block_by_number(block_number.into()) | |
| .full() | |
| .await? | |
| .ok_or(ChainOrchestratorError::L2BlockNotFoundInL2Client(block_number))? | |
| .into_consensus() | |
| .map_transactions(|tx| tx.inner.into_inner()); | |
| blocks_to_validate.push(block); | |
| } | |
| self.validate_l1_messages(&blocks_to_validate).await?; | |
| self.database | |
| .update_l1_messages_from_l2_blocks( | |
| blocks_to_validate.into_iter().map(|b| (&b).into()).collect(), | |
| ) | |
| .await?; | |
| }; | 
Metadata
Metadata
Assignees
Labels
No labels