Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 12 additions & 0 deletions src/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,13 @@ pub struct Config {
/// Must stay within db_write_buffer_size_mb to avoid mid-batch flushes.
pub initial_sync_batch_size: usize,

/// Store index and filter blocks inside the block cache (default: false).
/// When enabled, bounds memory but allows eviction under pressure.
/// When disabled (default), index/filter blocks stay on the heap and are
/// may never be evicted, giving better read performance at the cost of ~18 MB
/// per SST file of unbounded memory.
pub db_cache_index_filter_blocks: bool,

#[cfg(feature = "liquid")]
pub parent_network: BNetwork,
#[cfg(feature = "liquid")]
Expand Down Expand Up @@ -256,6 +263,10 @@ impl Config {
.help("Number of blocks per batch during initial sync. Larger values keep more txo rows in the write buffer during indexing, improving lookup_txos cache hit rate for recently-created outputs.")
.takes_value(true)
.default_value("250")
).arg(
Arg::with_name("cache_index_filter_blocks")
.long("cache-index-filter-blocks")
.help("Store index/filter blocks in the block cache instead of on the heap. Bounds memory but allows eviction under cache pressure.")
).arg(
Arg::with_name("zmq_addr")
.long("zmq-addr")
Expand Down Expand Up @@ -496,6 +507,7 @@ impl Config {
db_parallelism: value_t_or_exit!(m, "db_parallelism", usize),
db_write_buffer_size_mb: value_t_or_exit!(m, "db_write_buffer_size_mb", usize),
initial_sync_batch_size: value_t_or_exit!(m, "initial_sync_batch_size", usize),
db_cache_index_filter_blocks: m.is_present("cache_index_filter_blocks"),
zmq_addr,

#[cfg(feature = "liquid")]
Expand Down
26 changes: 12 additions & 14 deletions src/new_index/db.rs
Original file line number Diff line number Diff line change
Expand Up @@ -150,20 +150,18 @@ impl DB {
let mut block_opts = rocksdb::BlockBasedOptions::default();
let cache_size_bytes = config.db_block_cache_mb * 1024 * 1024;
block_opts.set_block_cache(&rocksdb::Cache::new_lru_cache(cache_size_bytes));
// Store index and filter blocks inside the block cache so their memory is
// bounded by --db-block-cache-mb. Without this, RocksDB allocates table-reader
// memory (index + filter blocks) on the heap separately for every open SST file.
// During initial sync, L0 files accumulate up to the compaction trigger (64 by
// default) and this unbounded heap allocation can grow to many GB.
// Note: increase --db-block-cache-mb proportionally (e.g. 4096) so the cache is
// large enough to hold the working set of filter/index blocks without thrashing.
block_opts.set_cache_index_and_filter_blocks(true);
// Pin L0 index and filter blocks in the cache so they are never evicted.
// Without this, data block churn evicts L0 index/filter blocks, causing
// repeated disk reads for every SST lookup — worse than the old heap approach.
// With this, L0 index/filter blocks behave like the old table-reader heap
// allocation but stay within the bounded block cache.
block_opts.set_pin_l0_filter_and_index_blocks_in_cache(true);
// When --cache-index-filter-blocks is passed, store index and filter blocks
// inside the block cache so their memory is bounded by --db-block-cache-mb.
// Without this (the default), RocksDB keeps them on the heap where they may
// never be evicted — possibly better for read performance compared to needing
// to go to disk, but uses ~18 MB per SST file.
if config.db_cache_index_filter_blocks {
block_opts.set_cache_index_and_filter_blocks(true);
// Pin L0 index and filter blocks in the cache so they are never evicted.
// Without this, data block churn evicts L0 index/filter blocks, causing
// repeated disk reads for every SST lookup.
block_opts.set_pin_l0_filter_and_index_blocks_in_cache(true);
}
// Bloom filters allow multi_get() to skip SST files that don't contain a key
// without touching the index or data blocks. Without this, every point lookup
// must binary-search the index of every L0 file whose key range overlaps the
Expand Down
1 change: 1 addition & 0 deletions tests/common.rs
Original file line number Diff line number Diff line change
Expand Up @@ -121,6 +121,7 @@ impl TestRunner {
db_parallelism: 2,
db_write_buffer_size_mb: 256,
initial_sync_batch_size: 250,
db_cache_index_filter_blocks: false,
//#[cfg(feature = "electrum-discovery")]
//electrum_public_hosts: Option<crate::electrum::ServerHosts>,
//#[cfg(feature = "electrum-discovery")]
Expand Down
Loading