Skip to content

Commit

Permalink
chore: add benchmarks for varied forms of lookups (#2142)
Browse files Browse the repository at this point in the history
This PR introduces benchmarks for varied methods of database lookups for
blocks and included transactions, namely -
1. `header_and_tx_lookup`
2. `full_block_lookup`
3. `multi_get_lookup`

We can use these benchmarks to ascertain which form is best for this
use-case, and then implement it.

It is suggested to alter the bench matrix before running and using a
beefy machine to do so.

Run it with `cargo bench --bench db_lookup_times`


## Linked Issues/PRs
<!-- List of related issues/PRs -->
- #2023 

## Description
<!-- List of detailed changes -->
- moved the implementation of `KeyValueMutate` for rocksdb out of the
test module.
- each benchmark is located in `benches/benches/db_lookup_times.rs`
- stores `FullFuelBlocks` along with the `CompressedBlocks` and
`Transactions` table to not affect single header/tx lookup times in
other usages.
- implemented various traits for the `FullFuelBlocks` table
- the [notion
doc](https://www.notion.so/fuellabs/DB-Analysis-for-Lookups-f62aa0cd5cdf4c91b9575b8fb45683f7)
has more information about the analysis we did.
- each benchmark randomizes the block height that is queries *per
iteration* to ensure randomness and fair comparison
- rocksdb caching has been `disabled` for these benchmarks

## Checklist
- [x] Breaking changes are clearly marked as such in the PR description
and changelog
- [x] New behavior is reflected in tests
- [x] [The specification](https://github.com/FuelLabs/fuel-specs/)
matches the implemented behavior (link update PR if changes are needed)

### Before requesting review
- [x] I have reviewed the code myself
- [x] I have created follow-up issues caused by this PR and linked them
here

### After merging, notify other teams

[Add or remove entries as needed]

- [ ] [Rust SDK](https://github.com/FuelLabs/fuels-rs/)
- [ ] [Sway compiler](https://github.com/FuelLabs/sway/)
- [ ] [Platform
documentation](https://github.com/FuelLabs/devrel-requests/issues/new?assignees=&labels=new+request&projects=&template=NEW-REQUEST.yml&title=%5BRequest%5D%3A+)
(for out-of-organization contributors, the person merging the PR will do
this)
- [ ] Someone else?

---------

Co-authored-by: Mitchell Turner <[email protected]>
  • Loading branch information
rymnc and MitchTurner authored Sep 3, 2024
1 parent f3537fc commit 09cf975
Show file tree
Hide file tree
Showing 12 changed files with 450 additions and 37 deletions.
2 changes: 1 addition & 1 deletion CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ and this project adheres to [Semantic Versioning](http://semver.org/).
### Added
- [2135](https://github.com/FuelLabs/fuel-core/pull/2135): Added metrics logging for number of blocks served over the p2p req/res protocol.
- [2155](https://github.com/FuelLabs/fuel-core/pull/2155): Added trait declaration for block committer data

- [2142](https://github.com/FuelLabs/fuel-core/pull/2142): Added benchmarks for varied forms of db lookups to assist in optimizations.

## [Version 0.35.0]

Expand Down
1 change: 1 addition & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

5 changes: 5 additions & 0 deletions benches/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ p256 = { version = "0.13", default-features = false, features = [
"digest",
"ecdsa",
] }
postcard = { workspace = true }
primitive-types = { workspace = true, default-features = false }
quanta = "0.12"
rand = { workspace = true }
Expand Down Expand Up @@ -66,3 +67,7 @@ name = "block_target_gas"
[[bench]]
harness = false
name = "transaction_throughput"

[[bench]]
harness = false
name = "db_lookup_times"
96 changes: 96 additions & 0 deletions benches/benches/db_lookup_times.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,96 @@
use crate::db_lookup_times_utils::{
matrix::matrix,
utils::{
get_full_block,
get_random_block_height,
multi_get_block,
open_db,
open_raw_rocksdb,
},
};
use criterion::{
criterion_group,
criterion_main,
Criterion,
};
use db_lookup_times_utils::seed::{
seed_compressed_blocks_and_transactions_matrix,
seed_full_block_matrix,
};
use fuel_core_storage::transactional::AtomicView;
use rand::thread_rng;

mod db_lookup_times_utils;

pub fn header_and_tx_lookup(c: &mut Criterion) {
let method = "header_and_tx";
let mut rng = thread_rng();

seed_compressed_blocks_and_transactions_matrix(method);
let mut group = c.benchmark_group(method);

for (block_count, tx_count) in matrix() {
let database = open_db(block_count, tx_count, method);
let view = database.latest_view().unwrap();
group.bench_function(format!("{block_count}/{tx_count}"), |b| {
b.iter(|| {
let height = get_random_block_height(&mut rng, block_count);
let block = view.get_full_block(&height);
assert!(block.is_ok());
assert!(block.unwrap().is_some());
});
});
}

group.finish();
}

pub fn multi_get_lookup(c: &mut Criterion) {
let method = "multi_get";
let mut rng = thread_rng();

seed_compressed_blocks_and_transactions_matrix(method);
let mut group = c.benchmark_group(method);

for (block_count, tx_count) in matrix() {
let database = open_raw_rocksdb(block_count, tx_count, method);
group.bench_function(format!("{block_count}/{tx_count}"), |b| {
b.iter(|| {
let height = get_random_block_height(&mut rng, block_count);
assert!(multi_get_block(&database, height).is_ok());
});
});
}

group.finish();
}

pub fn full_block_lookup(c: &mut Criterion) {
let method = "full_block";
let mut rng = thread_rng();

seed_full_block_matrix();
let mut group = c.benchmark_group(method);

for (block_count, tx_count) in matrix() {
let database = open_db(block_count, tx_count, method);
let view = database.latest_view().unwrap();
group.bench_function(format!("{block_count}/{tx_count}"), |b| {
b.iter(|| {
let height = get_random_block_height(&mut rng, block_count);
let full_block = get_full_block(&view, &height);
assert!(full_block.is_ok());
assert!(full_block.unwrap().is_some());
});
});
}

group.finish();
}

criterion_group! {
name = benches;
config = Criterion::default().sample_size(100_000).measurement_time(std::time::Duration::from_secs(100));
targets = header_and_tx_lookup, multi_get_lookup, full_block_lookup
}
criterion_main!(benches);
10 changes: 10 additions & 0 deletions benches/benches/db_lookup_times_utils/matrix.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
pub const BLOCK_COUNT_MATRIX: [u32; 2] = [10, 100];
pub const TX_COUNT_MATRIX: [u32; 2] = [100, 1000];

pub fn matrix() -> impl Iterator<Item = (u32, u32)> {
BLOCK_COUNT_MATRIX.iter().flat_map(|&block_count| {
TX_COUNT_MATRIX
.iter()
.map(move |&tx_count| (block_count, tx_count))
})
}
3 changes: 3 additions & 0 deletions benches/benches/db_lookup_times_utils/mod.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
pub mod matrix;
pub mod seed;
pub mod utils;
151 changes: 151 additions & 0 deletions benches/benches/db_lookup_times_utils/seed.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,151 @@
use crate::db_lookup_times_utils::{
matrix::matrix,
utils::{
chain_id,
open_raw_rocksdb,
},
};
use fuel_core::{
database::database_description::on_chain::OnChain,
state::rocks_db::RocksDb,
};
use fuel_core_storage::{
column::Column,
kv_store::{
KeyValueMutate,
Value,
},
};
use fuel_core_types::{
blockchain::{
block::{
Block,
PartialFuelBlock,
},
header::{
ConsensusHeader,
PartialBlockHeader,
},
primitives::Empty,
},
fuel_tx::{
Transaction,
UniqueIdentifier,
},
fuel_types::BlockHeight,
};

pub fn seed_compressed_blocks_and_transactions_matrix(method: &str) {
for (block_count, tx_count) in matrix() {
let mut database = open_raw_rocksdb(block_count, tx_count, method);
let _ =
seed_compressed_blocks_and_transactions(&mut database, block_count, tx_count);
}
}

pub fn seed_full_block_matrix() {
for (block_count, tx_count) in matrix() {
let mut database = open_raw_rocksdb(block_count, tx_count, "full_block");
seed_full_blocks(&mut database, block_count, tx_count);
}
}

fn generate_bench_block(height: u32, tx_count: u32) -> Block {
let header = PartialBlockHeader {
application: Default::default(),
consensus: ConsensusHeader::<Empty> {
height: BlockHeight::from(height),
..Default::default()
},
};
let txes = generate_bench_transactions(tx_count);
let block = PartialFuelBlock::new(header, txes);
block.generate(&[], Default::default()).unwrap()
}

fn generate_bench_transactions(tx_count: u32) -> Vec<Transaction> {
let mut txes = vec![];
for _ in 0..tx_count {
txes.push(Transaction::default_test_tx());
}
txes
}

fn height_key(block_height: u32) -> Vec<u8> {
BlockHeight::from(block_height).to_bytes().to_vec()
}

fn insert_compressed_block(
database: &mut RocksDb<OnChain>,
height: u32,
tx_count: u32,
) -> Block {
let block = generate_bench_block(height, tx_count);

let compressed_block = block.compress(&chain_id());
let height_key = height_key(height);

let raw_compressed_block = postcard::to_allocvec(&compressed_block).unwrap().to_vec();
let raw_transactions = block
.transactions()
.iter()
.map(|tx| {
(
tx.id(&chain_id()),
postcard::to_allocvec(tx).unwrap().to_vec(),
)
})
.collect::<Vec<_>>();

// 1. insert into CompressedBlocks table
database
.put(
height_key.as_slice(),
Column::FuelBlocks,
Value::new(raw_compressed_block),
)
.unwrap();
// 2. insert into Transactions table
for (tx_id, tx) in raw_transactions {
database
.put(tx_id.as_slice(), Column::Transactions, Value::new(tx))
.unwrap();
}

block
}

fn insert_full_block(database: &mut RocksDb<OnChain>, height: u32, tx_count: u32) {
// we seed compressed blocks and transactions to not affect individual
// lookup times
let block = insert_compressed_block(database, height, tx_count);

let height_key = height_key(height);
let raw_full_block = postcard::to_allocvec(&block).unwrap().to_vec();

database
.put(
height_key.as_slice(),
Column::FullFuelBlocks,
Value::new(raw_full_block),
)
.unwrap();
}

fn seed_compressed_blocks_and_transactions(
database: &mut RocksDb<OnChain>,
block_count: u32,
tx_count: u32,
) -> Vec<Block> {
let mut blocks = vec![];
for block_number in 0..block_count {
blocks.push(insert_compressed_block(database, block_number, tx_count));
}
blocks
}

fn seed_full_blocks(database: &mut RocksDb<OnChain>, block_count: u32, tx_count: u32) {
for block_number in 0..block_count {
insert_full_block(database, block_number, tx_count);
}
}
100 changes: 100 additions & 0 deletions benches/benches/db_lookup_times_utils/utils.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,100 @@
use anyhow::anyhow;
use fuel_core::{
database::{
database_description::on_chain::OnChain,
Database,
},
state::{
historical_rocksdb::StateRewindPolicy,
rocks_db::RocksDb,
IterableKeyValueView,
},
};
use fuel_core_storage::{
column::Column,
kv_store::{
KeyValueInspect,
StorageColumn,
},
tables::FullFuelBlocks,
StorageAsRef,
};
use fuel_core_types::{
blockchain::block::{
Block,
CompressedBlock,
},
fuel_tx::Transaction,
fuel_types::{
BlockHeight,
ChainId,
},
};
use rand::{
rngs::ThreadRng,
Rng,
};
use std::{
borrow::Cow,
path::Path,
};

pub fn get_random_block_height(rng: &mut ThreadRng, block_count: u32) -> BlockHeight {
BlockHeight::from(rng.gen_range(0..block_count))
}

pub fn open_db(block_count: u32, tx_count: u32, method: &str) -> Database {
Database::open_rocksdb(
Path::new(format!("./{block_count}/{method}/{tx_count}").as_str()),
None, // no caching
StateRewindPolicy::NoRewind,
)
.unwrap()
}

pub fn open_raw_rocksdb(
block_count: u32,
tx_count: u32,
method: &str,
) -> RocksDb<OnChain> {
RocksDb::default_open(
Path::new(format!("./{block_count}/{method}/{tx_count}").as_str()),
None,
)
.unwrap()
}

pub fn chain_id() -> ChainId {
ChainId::default()
}

pub fn get_full_block(
view: &IterableKeyValueView<Column>,
height: &BlockHeight,
) -> anyhow::Result<Option<Block>> {
let db_block = view.storage::<FullFuelBlocks>().get(height)?;
if let Some(block) = db_block {
Ok(Some(Cow::into_owned(block)))
} else {
Ok(None)
}
}

pub fn multi_get_block(
database: &RocksDb<OnChain>,
height: BlockHeight,
) -> anyhow::Result<()> {
let height_key = height.to_bytes();

let raw_block = database
.get(&height_key, Column::FuelBlocks)?
.ok_or(anyhow!("empty raw block"))?;
let block: CompressedBlock = postcard::from_bytes(raw_block.as_slice())?;
let tx_ids = block.transactions().iter();
let raw_txs = database.multi_get(Column::Transactions.id(), tx_ids)?;
for raw_tx in raw_txs.iter().flatten() {
let _: Transaction = postcard::from_bytes(raw_tx.as_slice())?;
}

Ok(())
}
Loading

0 comments on commit 09cf975

Please sign in to comment.