Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -165,7 +165,7 @@ mod tests {

typed_store.put_one(c.clone(), HashSet::from([d.clone()]))?;

let mut casper_buffer = CasperBufferKeyValueStorage::new_from_kv_store(typed_store).await?;
let casper_buffer = CasperBufferKeyValueStorage::new_from_kv_store(typed_store).await?;

// CasperBufferStorage be able to restore state on startup
let c_parents = casper_buffer.get_parents(&c);
Expand Down
10 changes: 5 additions & 5 deletions block-storage/src/rust/util/doubly_linked_dag_operations.rs
Original file line number Diff line number Diff line change
Expand Up @@ -164,7 +164,7 @@ mod tests {

#[test]
fn test_add_single_edge() {
let mut dag = BlockDependencyDag::empty();
let dag = BlockDependencyDag::empty();
let parent = create_block_hash(b"parent");
let child = create_block_hash(b"child");

Expand Down Expand Up @@ -193,7 +193,7 @@ mod tests {

#[test]
fn test_add_multiple_children() {
let mut dag = BlockDependencyDag::empty();
let dag = BlockDependencyDag::empty();
let parent = create_block_hash(b"parent");
let child1 = create_block_hash(b"child1");
let child2 = create_block_hash(b"child2");
Expand Down Expand Up @@ -226,7 +226,7 @@ mod tests {

#[test]
fn test_remove_leaf_node() {
let mut dag = BlockDependencyDag::empty();
let dag = BlockDependencyDag::empty();
let parent = create_block_hash(b"parent");
let child = create_block_hash(b"child");

Expand All @@ -249,7 +249,7 @@ mod tests {

#[test]
fn test_remove_node_with_multiple_children() {
let mut dag = BlockDependencyDag::empty();
let dag = BlockDependencyDag::empty();
let parent = create_block_hash(b"parent");
let child1 = create_block_hash(b"child1");
let child2 = create_block_hash(b"child2");
Expand All @@ -276,7 +276,7 @@ mod tests {

#[test]
fn test_remove_node_with_remaining_parents() {
let mut dag = BlockDependencyDag::empty();
let dag = BlockDependencyDag::empty();
let parent1 = create_block_hash(b"parent1");
let parent2 = create_block_hash(b"parent2");
let child = create_block_hash(b"child");
Expand Down
24 changes: 12 additions & 12 deletions block-storage/tests/block_dag_storage_test.rs
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ fn genesis_block() -> BlockMessage {

async fn create_dag_storage(genesis: &BlockMessage) -> BlockDagKeyValueStorage {
let mut kvm = InMemoryStoreManager::new();
let mut dag_storage = BlockDagKeyValueStorage::new(&mut kvm).await.unwrap();
let dag_storage = BlockDagKeyValueStorage::new(&mut kvm).await.unwrap();
dag_storage.insert(genesis, false, true).unwrap();
dag_storage
}
Expand Down Expand Up @@ -272,7 +272,7 @@ fn test_lookup_elements_result(
fn dag_storage_should_be_able_to_lookup_a_stored_block() {
let genesis = genesis_block();
proptest!(proptest_config(), |(block_elements in block_elements_with_parents_gen(genesis.clone(), 0, 10))| {
let mut dag_storage = RUNTIME.block_on(create_dag_storage(&genesis));
let dag_storage = RUNTIME.block_on(create_dag_storage(&genesis));

for block_element in &block_elements {
dag_storage.insert(block_element, false, false).unwrap();
Expand Down Expand Up @@ -314,7 +314,7 @@ fn dag_storage_should_be_able_to_handle_checking_if_contains_a_block_with_empty_
fn dag_storage_should_be_able_to_restore_state_on_startup() {
let genesis = genesis_block();
proptest!(proptest_config(), |(block_elements in block_elements_with_parents_gen(genesis.clone(), 0, 10))| {
let mut dag_storage = RUNTIME.block_on(create_dag_storage(&genesis));
let dag_storage = RUNTIME.block_on(create_dag_storage(&genesis));

for block_element in &block_elements {
dag_storage.insert(block_element, false, false).unwrap();
Expand All @@ -329,7 +329,7 @@ fn dag_storage_should_be_able_to_restore_state_on_startup() {
fn dag_storage_should_be_able_to_restore_latest_messages_with_genesis_with_empty_sender_field() {
let genesis = genesis_block();
proptest!(proptest_config(), |(block_elements in block_elements_with_parents_gen(genesis.clone(), 0, 10))| {
let mut dag_storage = RUNTIME.block_on(create_dag_storage(&genesis));
let dag_storage = RUNTIME.block_on(create_dag_storage(&genesis));

let mut block_elements_with_genesis = block_elements.clone();
if let Some(first) = block_elements_with_genesis.first_mut() {
Expand All @@ -350,7 +350,7 @@ fn dag_storage_should_be_able_to_restore_state_from_the_previous_two_instances()
let genesis = genesis_block();
proptest!(proptest_config(), |(first_block_elements in block_elements_with_parents_gen(genesis.clone(), 0, 10),
second_block_elements in block_elements_with_parents_gen(genesis.clone(), 0, 10))| {
let mut dag_storage = RUNTIME.block_on(create_dag_storage(&genesis));
let dag_storage = RUNTIME.block_on(create_dag_storage(&genesis));

for block_element in &first_block_elements {
dag_storage.insert(block_element, false, false).unwrap();
Expand All @@ -375,7 +375,7 @@ fn dag_storage_should_be_able_to_restore_after_squashing_latest_messages() {
second_block_elements in block_with_new_hashes_gen(block_elements.clone()),
third_block_elements in block_with_new_hashes_gen(block_elements.clone())
)| {
let mut dag_storage = RUNTIME.block_on(create_dag_storage(&genesis));
let dag_storage = RUNTIME.block_on(create_dag_storage(&genesis));

for block_element in &block_elements {
dag_storage.insert(block_element, false, false).unwrap();
Expand Down Expand Up @@ -404,7 +404,7 @@ fn dag_storage_should_be_able_to_restore_equivocations_tracker_on_startup() {
let genesis = genesis_block();
proptest!(proptest_config(), |(block_elements in block_elements_with_parents_gen(genesis.clone(), 0, 10),
equivocator in validator_gen(), block_hash in block_hash_gen())| {
let mut dag_storage = RUNTIME.block_on(create_dag_storage(&genesis));
let dag_storage = RUNTIME.block_on(create_dag_storage(&genesis));

for block_element in &block_elements {
dag_storage.insert(block_element, false, false).unwrap();
Expand All @@ -427,7 +427,7 @@ fn dag_storage_should_be_able_to_modify_equivocation_records() {
let genesis = genesis_block();
proptest!(proptest_config(), |(equivocator in validator_gen(), block_hash1 in block_hash_gen(),
block_hash2 in block_hash_gen())| {
let mut dag_storage = RUNTIME.block_on(create_dag_storage(&genesis));
let dag_storage = RUNTIME.block_on(create_dag_storage(&genesis));

let equivocation_record = EquivocationRecord::new(equivocator.clone(), 0, BTreeSet::from([block_hash1.clone()]));
dag_storage.insert_equivocation_record(equivocation_record.clone()).unwrap();
Expand All @@ -444,7 +444,7 @@ fn dag_storage_should_be_able_to_modify_equivocation_records() {
fn dag_storage_should_be_able_to_restore_invalid_blocks_on_startup() {
let genesis = genesis_block();
proptest!(proptest_config(), |(block_elements in block_elements_with_parents_gen(genesis.clone(), 0, 10))| {
let mut dag_storage = RUNTIME.block_on(create_dag_storage(&genesis));
let dag_storage = RUNTIME.block_on(create_dag_storage(&genesis));

for block_element in &block_elements {
dag_storage.insert(block_element, true, false).unwrap();
Expand All @@ -461,7 +461,7 @@ fn dag_storage_should_be_able_to_restore_invalid_blocks_on_startup() {
fn dag_storage_should_be_able_to_restore_deploy_index_on_startup() {
let genesis = genesis_block();
proptest!(proptest_config(), |(block_elements in block_elements_with_parents_gen(genesis.clone(), 0, 10))| {
let mut dag_storage = RUNTIME.block_on(create_dag_storage(&genesis));
let dag_storage = RUNTIME.block_on(create_dag_storage(&genesis));

for block_element in &block_elements {
dag_storage.insert(block_element, true, false).unwrap();
Expand Down Expand Up @@ -491,7 +491,7 @@ fn dag_storage_should_be_able_to_restore_deploy_index_on_startup() {
fn dag_storage_should_be_able_to_handle_blocks_with_invalid_numbers() {
proptest!(proptest_config(), |(genesis in block_element_gen(None, None, None, None, None, None, None, None, None, None, None, None, None, None),
block in block_element_gen(None, None, None, None, None, None, None, None, None, None, None, None, None, None))| {
let mut dag_storage = RUNTIME.block_on(create_dag_storage(&genesis));
let dag_storage = RUNTIME.block_on(create_dag_storage(&genesis));
let mut invalid_block = block.clone();
invalid_block.body.state.block_number = 1000;
dag_storage.insert(&genesis, false, false).unwrap();
Expand All @@ -503,7 +503,7 @@ fn dag_storage_should_be_able_to_handle_blocks_with_invalid_numbers() {
async fn recording_of_new_directly_finalized_block_should_record_finalized_all_non_finalized_ancestors_of_lfb(
) {
let genesis = genesis_block();
let mut dag_storage = create_dag_storage(&genesis).await;
let dag_storage = create_dag_storage(&genesis).await;
dag_storage.insert(&genesis, false, true).unwrap();

let b1 = get_random_block(
Expand Down
4 changes: 1 addition & 3 deletions casper/tests/helper/secp256k1_sign_contract.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@

use k256::{
ecdsa::{signature::hazmat::PrehashSigner, Signature, SigningKey},
elliptic_curve::generic_array::GenericArray,
};
use models::{
rhoapi::{ListParWithRandom, Par},
Expand Down Expand Up @@ -35,9 +34,8 @@ pub async fn get(
)));
}

let key_bytes = GenericArray::clone_from_slice(&secret_key);
let signing_key =
SigningKey::from_bytes(&key_bytes).expect("Invalid private key");
SigningKey::from_slice(&secret_key).expect("Invalid private key");

let signature: Signature = signing_key
.sign_prehash(&hash)
Expand Down
13 changes: 0 additions & 13 deletions comm/src/rust/discovery/peer_table.rs
Original file line number Diff line number Diff line change
Expand Up @@ -57,12 +57,6 @@ pub struct PeerTable<T: KademliaRPC> {
/// but would be used in full Kademlia iterative lookup algorithms
/// for controlling parallel network requests to avoid overwhelming the network.
alpha: u32,
/// UNIMPLEMENTED: this parameter controls an optimization that can
/// reduce the number hops required to find an address in the network
/// by grouping keys in buckets of a size larger than one.
/// Currently hardcoded to 1 (standard Kademlia behavior).
#[allow(dead_code)]
bucket_width: u32,
kademlia_rpc: Arc<T>,
width: usize,
pub table: Vec<Arc<Mutex<Vec<PeerTableEntry>>>>,
Expand All @@ -76,7 +70,6 @@ impl<T: KademliaRPC> PeerTable<T> {
local_key,
k: k.unwrap_or(20),
alpha: alpha.unwrap_or(3),
bucket_width: 1, // Standard Kademlia behavior - could be configurable in future
kademlia_rpc,
width,
table: (0..8 * width)
Expand All @@ -90,12 +83,6 @@ impl<T: KademliaRPC> PeerTable<T> {
self.alpha
}

/// Get the bucket width (currently always 1 - standard Kademlia)
/// This would control bucket size optimization if implemented
pub fn bucket_width(&self) -> u32 {
self.bucket_width
}

/** Computes Kademlia XOR distance.
*
* Returns the length of the longest common prefix in bits between
Expand Down
12 changes: 4 additions & 8 deletions crypto/src/rust/signatures/secp256k1.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,11 +4,9 @@ use ed25519_dalek::ed25519::signature::hazmat::PrehashVerifier;
use k256::ecdsa::VerifyingKey;
use k256::{
ecdsa::{signature::hazmat::PrehashSigner, Signature, SigningKey},
elliptic_curve::generic_array::GenericArray,
};

use openssl::pkey::PKey;
use typenum::U32;

use eyre::{Context, Result};
use k256::elliptic_curve::{sec1::ToEncodedPoint, SecretKey};
Expand Down Expand Up @@ -153,8 +151,7 @@ impl SignaturesAlg for Secp256k1 {
);
}

let key_bytes = GenericArray::clone_from_slice(sec);
let signing_key = SigningKey::from_bytes(&key_bytes).expect("Invalid private key");
let signing_key = SigningKey::from_slice(sec).expect("Invalid private key");

// Always use prehash signing since we expect hashed data
let signature: Signature = signing_key
Expand All @@ -165,9 +162,8 @@ impl SignaturesAlg for Secp256k1 {
}

fn to_public(&self, sec: &PrivateKey) -> PublicKey {
let key_bytes: GenericArray<u8, U32> = GenericArray::clone_from_slice(&sec.bytes);
let secret_key: SecretKey<k256::Secp256k1> =
SecretKey::from_bytes(&key_bytes).expect("Invalid private key");
SecretKey::from_slice(&sec.bytes).expect("Invalid private key");

let public_key = secret_key.public_key();
let public_key_bytes = public_key.to_encoded_point(false).as_bytes().to_vec();
Expand Down Expand Up @@ -210,7 +206,7 @@ mod tests {
use sha2::{Digest, Sha256};

fn sec_key_verify(seckey: &[u8]) -> bool {
seckey.len() == 32 && SigningKey::from_bytes(GenericArray::from_slice(seckey)).is_ok()
seckey.len() == 32 && SigningKey::from_slice(seckey).is_ok()
}

//crypto/src/test/scala/coop/rchain/crypto/signatures/Secp256k1Spec.scala
Expand Down Expand Up @@ -482,7 +478,7 @@ mod tests {
let (private_key, _public_key) = secp256k1.new_key_pair();

// Create a PKCS#8 DER from our private key
let signing_key = SigningKey::from_bytes(&GenericArray::from_slice(&private_key.bytes))
let signing_key = SigningKey::from_slice(&private_key.bytes)
.expect("Failed to create signing key");

let pkcs8_der = signing_key
Expand Down
3 changes: 1 addition & 2 deletions crypto/src/rust/util/key_util.rs
Original file line number Diff line number Diff line change
Expand Up @@ -46,10 +46,9 @@ impl KeyUtil {
) -> Result<PKey<openssl::pkey::Private>> {
// Convert our private key to PKCS#8 DER format
use k256::ecdsa::SigningKey;
use k256::elliptic_curve::generic_array::GenericArray;
use pkcs8::EncodePrivateKey;

let signing_key = SigningKey::from_bytes(&GenericArray::from_slice(&private_key.bytes))
let signing_key = SigningKey::from_slice(&private_key.bytes)
.map_err(|e| eyre::eyre!("Failed to create signing key: {}", e))?;

let pkcs8_der = signing_key
Expand Down
9 changes: 3 additions & 6 deletions models/tests/scored_term_sort_test.rs
Original file line number Diff line number Diff line change
@@ -1,9 +1,7 @@
// See models/src/test/scala/coop/rchain/models/rholang/SortTest.scala - ScoredTermSpec

use models::rhoapi::var::WildcardMsg;
use models::rhoapi::{
connective, expr, var, Bundle, Connective, EList, EMethod, ENot, Match, MatchCase, New, Par,
Receive, ReceiveBind, Send,
EList, EMethod, New, Par,
};
use models::rust::par_map::ParMap;
use models::rust::par_map_type_mapper::ParMapTypeMapper;
Expand All @@ -14,11 +12,10 @@ use models::rust::rholang::sorter::new_sort_matcher::NewSortMatcher;
use models::rust::rholang::sorter::par_sort_matcher::ParSortMatcher;
use models::rust::rholang::sorter::receive_sort_matcher::ReceiveSortMatcher;
use models::rust::rholang::sorter::send_sort_matcher::SendSortMatcher;
use models::rust::rholang::sorter::unforgeable_sort_matcher::UnforgeableSortMatcher;
use models::rust::rholang::sorter::var_sort_matcher::VarSortMatcher;
use models::rust::test_utils::test_utils::{
for_all_similar_a, generate_bundle, generate_connective, generate_expr, generate_match,
generate_new, generate_par, generate_receive, generate_send, generate_var, sort,
generate_bundle, generate_connective, generate_expr, generate_match,
generate_new, generate_par, generate_receive, generate_send, generate_var,
};
use models::{
rhoapi::{expr::ExprInstance, Expr, Var},
Expand Down
4 changes: 2 additions & 2 deletions models/tests/sorted_par_map_test.rs
Original file line number Diff line number Diff line change
Expand Up @@ -277,14 +277,14 @@ fn sorted_par_map_should_preserve_sortedness_for_all_operations() {
.clone()
.into_iter()
.zip(pars2.clone().into_iter())
.map(|((k1, v1), (k2, v2))| (k1, v1)) // take pairs from the first list
.map(|((k1, v1), _)| (k1, v1)) // take pairs from the first list
.collect();

let pairs2: Vec<(Par, Par)> = pars2
.clone()
.into_iter()
.zip(pars1.clone().into_iter())
.map(|((k1, v1), (k2, v2))| (k1, v1)) // take pairs from the second list
.map(|((k1, v1), _)| (k1, v1)) // take pairs from the second list
.collect();

let map1 = SortedParMap::create_from_vec(pairs1);
Expand Down
2 changes: 1 addition & 1 deletion node/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -340,7 +340,7 @@ async fn start_node_runtime(conf: NodeConf, kamon_conf: KamonConf) -> Result<()>
#[allow(unused_variables)]
let prometheus_reporter = node::rust::diagnostics::initialize_diagnostics(&conf, &kamon_conf)?;

node::rust::runtime::node_runtime::start(conf, kamon_conf).await
node::rust::runtime::node_runtime::start(conf).await
}

/// Log configuration (equivalent to Scala's logConfiguration)
Expand Down
9 changes: 0 additions & 9 deletions node/src/rust/diagnostics/new_prometheus_reporter.rs
Original file line number Diff line number Diff line change
@@ -1,17 +1,12 @@
use crate::rust::diagnostics::prometheus_config::PrometheusConfiguration;
use eyre::Result;
use metrics_exporter_prometheus::PrometheusHandle;
use std::collections::HashMap;
use std::sync::{Arc, OnceLock};
use tracing::{info, warn};

static GLOBAL_REPORTER: OnceLock<Arc<NewPrometheusReporter>> = OnceLock::new();

pub struct NewPrometheusReporter {
#[allow(dead_code)]
config: PrometheusConfiguration,
#[allow(dead_code)]
environment_tags: HashMap<String, String>,
prometheus_handle: PrometheusHandle,
}

Expand Down Expand Up @@ -39,11 +34,7 @@ impl NewPrometheusReporter {

info!("Prometheus metrics exporter initialized");

let environment_tags = config.environment_tags();

let reporter = Arc::new(Self {
config,
environment_tags,
prometheus_handle: handle,
});

Expand Down
Loading