diff --git a/p2pool/src/sharechain/in_memory.rs b/p2pool/src/sharechain/in_memory.rs index bd73578..b09d0a9 100644 --- a/p2pool/src/sharechain/in_memory.rs +++ b/p2pool/src/sharechain/in_memory.rs @@ -89,31 +89,29 @@ impl InMemoryShareChain { let mut p2chain = None; if fs::exists(&data_path).map_err(|e| anyhow!("block cache file errored when checking exists: {}", e))? { - let bkp_file = config - .block_cache_file - .as_path() - .parent() - .ok_or_else(|| anyhow!("Block cache file has no parent"))? - .join("block_cache_backup") - .join(pow_algo.to_string()); - info!(target: LOG_TARGET, "Found old block cache file, renaming from {:?} to {:?}", data_path.as_path(), &bkp_file); + // let bkp_file = config + // .block_cache_file + // .as_path() + // .parent() + // .ok_or_else(|| anyhow!("Block cache file has no parent"))? + // .join("block_cache_backup") + // .join(pow_algo.to_string()); + // info!(target: LOG_TARGET, "Found old block cache file, renaming from {:?} to {:?}", data_path.as_path(), &bkp_file); // First remove the old backup file - let _unused = fs::remove_dir_all(bkp_file.as_path()) - .inspect_err(|e| error!(target: LOG_TARGET, "Could not remove old block cache file:{:?}", e)); - fs::create_dir_all(bkp_file.parent().unwrap()) - .map_err(|e| anyhow::anyhow!("Could not create block cache backup directory:{:?}", e))?; - fs::rename(data_path.as_path(), bkp_file.as_path()) - .map_err(|e| anyhow::anyhow!("Could not rename file to old file:{:?}", e))?; - let old = LmdbBlockStorage::new_from_path(bkp_file.as_path()); - let new = LmdbBlockStorage::new_from_path(&data_path); + // let _unused = fs::remove_dir_all(bkp_file.as_path()) + // .inspect_err(|e| error!(target: LOG_TARGET, "Could not remove old block cache file:{:?}", e)); + // fs::create_dir_all(bkp_file.parent().unwrap()) + // .map_err(|e| anyhow::anyhow!("Could not create block cache backup directory:{:?}", e))?; + // fs::rename(data_path.as_path(), bkp_file.as_path()) + // .map_err(|e| anyhow::anyhow!("Could not rename file to old file:{:?}", e))?; + let block_cache = LmdbBlockStorage::new_from_path(&data_path); match P2Chain::try_load( pow_algo, config.share_window * 2, config.share_window, config.block_time, - old, - new, + block_cache, &squad, config .minimum_randomx_target_difficulty diff --git a/p2pool/src/sharechain/lmdb_block_storage.rs b/p2pool/src/sharechain/lmdb_block_storage.rs index 179e86f..b61431f 100644 --- a/p2pool/src/sharechain/lmdb_block_storage.rs +++ b/p2pool/src/sharechain/lmdb_block_storage.rs @@ -69,19 +69,11 @@ impl LmdbBlockStorage { info!(target: LOG_TARGET, "Using block storage at {:?}", path); if !fs::exists(path).expect("could not get file") { fs::create_dir_all(path).unwrap(); - // fs::File::create(path).unwrap(); } let mut manager = Manager::::singleton().write().unwrap(); let file_handle = manager.get_or_create(path, Rkv::new::).unwrap(); let env = file_handle.read().expect("reader"); - // let dbs = env.get_dbs().expect("No dbs"); - // if !dbs.contains(&Some("migrations".to_string())) { - // let store = env.open_integer("migrations", StoreOptions::create()).unwrap(); - // let writer = env.write().expect("writer"); - // store.put(&writer, 0, &rkv::Value::Str("init")).unwrap(); - // writer.commit(); - // } let mut migrations = HashMap::new(); { let store = env.open_single("migrations", StoreOptions::create()).unwrap(); @@ -152,7 +144,18 @@ impl BlockCache for LmdbBlockStorage { } } - fn insert(&self, hash: BlockHash, block: Arc) { + fn insert(&self, hash: BlockHash, block: Arc, force: bool) { + //First we check if the block already exists, if it does we don't insert it, if force is set, we overwrite it + if force { + let env = self.file_handle.read().expect("reader"); + let store = env.open_single("block_cache_v2", StoreOptions::create()).unwrap(); + let reader = env.read().expect("reader"); + let block = store.get(&reader, hash.as_bytes()).unwrap(); + // we dont want to deserialise this block, so we just check if it exists + if block.is_some() { + return; + } + } // Retry if the map is full // This weird pattern of setting a bool is so that the env is closed before resizing, otherwise // you can't resize with active transactions. @@ -225,7 +228,7 @@ fn resize_db(env: &Rkv) { pub trait BlockCache { fn get(&self, hash: &BlockHash) -> Option>; fn delete(&self, hash: &BlockHash); - fn insert(&self, hash: BlockHash, block: Arc); + fn insert(&self, hash: BlockHash, block: Arc, force: bool); fn all_blocks(&self) -> Result>, Error>; } @@ -256,7 +259,7 @@ pub mod test { self.blocks.write().unwrap().remove(hash); } - fn insert(&self, hash: BlockHash, block: Arc) { + fn insert(&self, hash: BlockHash, block: Arc, force: bool) { self.blocks.write().unwrap().insert(hash, block); } diff --git a/p2pool/src/sharechain/p2chain.rs b/p2pool/src/sharechain/p2chain.rs index 9e98504..1e97c5a 100644 --- a/p2pool/src/sharechain/p2chain.rs +++ b/p2pool/src/sharechain/p2chain.rs @@ -27,7 +27,7 @@ use std::{ ops::{Deref, Sub}, sync::Arc, }; - +use std::time::Instant; use chrono::{Duration, Utc}; use itertools::Itertools; use log::*; @@ -204,7 +204,6 @@ impl P2Chain { total_size: u64, share_window: u64, block_time: u64, - from_block_cache: T, new_block_cache: T, squad: &str, minimum_randomx_target_difficulty: u64, @@ -233,7 +232,8 @@ impl P2Chain { )) .timestamp() as u64) .into(); - for (i, block) in from_block_cache.all_blocks()?.into_iter().enumerate() { + let start = Instant::now(); + for (i, block) in new_chain.block_cache.all_blocks()?.into_iter().enumerate() { if block.version != PROTOCOL_VERSION { warn!(target: LOG_TARGET, "Block version mismatch, skipping block"); continue; @@ -258,6 +258,9 @@ impl P2Chain { error!(target: LOG_TARGET, "Failed to load block into chain: {}", e); }); } + let time = start.elapsed(); + info!(target: LOG_TARGET, "Loaded chain in {:?}", time.as_secs()); + panic!("close"); Ok(new_chain) } @@ -706,7 +709,7 @@ impl P2Chain { let level = self .level_at_height(height) .ok_or(ShareChainError::BlockLevelNotFound)?; - level.add_block(Arc::new(actual_block))?; + level.add_block(Arc::new(actual_block), true)?; Ok(()) } @@ -1083,7 +1086,7 @@ impl P2Chain { } match self.level_at_height(new_block_height) { Some(level) => { - level.add_block(block)?; + level.add_block(block, false)?; self.verify_chain(new_block_height, block_hash) }, None => { diff --git a/p2pool/src/sharechain/p2chain_level.rs b/p2pool/src/sharechain/p2chain_level.rs index 9badbf6..01d38fc 100644 --- a/p2pool/src/sharechain/p2chain_level.rs +++ b/p2pool/src/sharechain/p2chain_level.rs @@ -81,7 +81,7 @@ impl P2ChainLevel { let mut block_headers = HashMap::new(); block_headers.insert(block.hash, header); - block_cache.insert(block.hash, block); + block_cache.insert(block.hash, block, false); Self { block_cache, @@ -136,7 +136,7 @@ impl P2ChainLevel { *lock = hash; } - pub fn add_block(&self, block: Arc) -> Result<(), ShareChainError> { + pub fn add_block(&self, block: Arc, force: bool) -> Result<(), ShareChainError> { if self.height != block.height { return Err(ShareChainError::InvalidBlock { reason: "Block height does not match the chain level height".to_string(), @@ -158,7 +158,7 @@ impl P2ChainLevel { .write() .expect("could not lock") .insert(block.hash, header); - self.block_cache.insert(block.hash, block); + self.block_cache.insert(block.hash, block, force); Ok(()) }