Skip to content

Commit

Permalink
add fixes to mempool from catalyst-core
Browse files Browse the repository at this point in the history
Original found at github.com/input-output-hk/catalyst-core

commit b2ac65058dcc2768d5dbbbf7c3074f44e21da524
Author: Alex Pozhylenkov <[email protected]>
Date:   Mon Jan 23 15:49:34 2023 +0200

    Update node metrics (#228)

    # Description

    Replace `mempool_total_size` which shows current memory usage of the
    mempool with the `mempool_tx_count` which shows actual transactions
    amount inside the mempool

    ## Type of change
    - Refactor
  • Loading branch information
saibatizoku committed Jun 28, 2023
1 parent 47e874c commit 56fe78d
Show file tree
Hide file tree
Showing 2 changed files with 4 additions and 26 deletions.
24 changes: 2 additions & 22 deletions jormungandr/src/fragment/pool.rs
Original file line number Diff line number Diff line change
Expand Up @@ -320,8 +320,7 @@ impl Pool {
n => self.pool.len() as f64 / n as f64,
};
self.metrics.set_mempool_usage_ratio(mempool_usage_ratio);
self.metrics
.set_mempool_total_size(self.pool.total_size_bytes());
self.metrics.set_mempool_total_size(self.pool.len());
}
}

Expand Down Expand Up @@ -562,7 +561,6 @@ pub(super) mod internal {
entries: IndexedDeqeue<FragmentId, Fragment>,
timeout_queue: BTreeSet<TimeoutQueueItem>,
max_entries: usize,
total_size_bytes: usize,
}

impl Pool {
Expand All @@ -573,7 +571,6 @@ pub(super) mod internal {
// out of their order in a queue. BinaryHeap does not allow that.
timeout_queue: BTreeSet::new(),
max_entries,
total_size_bytes: 0,
}
}

Expand All @@ -589,7 +586,6 @@ pub(super) mod internal {
if self.entries.contains(id) {
false
} else {
self.total_size_bytes += fragment.serialized_size();
self.timeout_queue_insert(fragment, *id);
self.entries.push_front(*id, fragment.clone());
true
Expand All @@ -604,15 +600,13 @@ pub(super) mod internal {
let maybe_fragment = self.entries.remove(fragment_id);
if let Some(fragment) = maybe_fragment {
self.timeout_queue_remove(&fragment, *fragment_id);
self.total_size_bytes -= fragment.serialized_size();
}
}
}

pub fn remove_oldest(&mut self) -> Option<(Fragment, FragmentId)> {
let (id, fragment) = self.entries.pop_back().map(|(id, value)| (id, value))?;
self.timeout_queue_remove(&fragment, id);
self.total_size_bytes -= fragment.serialized_size();
Some((fragment, id))
}

Expand All @@ -622,7 +616,6 @@ pub(super) mod internal {
) {
for (fragment, id) in fragments.into_iter() {
self.timeout_queue_insert(&fragment, id);
self.total_size_bytes += fragment.serialized_size();
self.entries.push_back(id, fragment);
}
}
Expand Down Expand Up @@ -650,9 +643,7 @@ pub(super) mod internal {
.collect();
for item in &to_remove {
self.timeout_queue.remove(item);
if let Some(fragment) = self.entries.remove(&item.id) {
self.total_size_bytes -= fragment.serialized_size();
}
self.entries.remove(&item.id);
}
to_remove.into_iter().map(|x| x.id).collect()
// TODO convert to something like this when .first() and .pop_first() are stabilized. This does not have unnecessary clones.
Expand All @@ -675,10 +666,6 @@ pub(super) mod internal {
self.entries.len()
}

pub fn total_size_bytes(&self) -> usize {
self.total_size_bytes
}

pub fn max_entries(&self) -> usize {
self.max_entries
}
Expand Down Expand Up @@ -729,13 +716,6 @@ pub(super) mod internal {
];
let mut pool = Pool::new(4);
assert_eq!(fragments1, pool.insert_all(fragments1.clone()));
assert_eq!(
pool.total_size_bytes,
fragments1
.iter()
.map(|(f, _)| f.serialized_size())
.sum::<usize>()
);

assert_eq!(fragments2_expected, pool.insert_all(fragments2));
for expected in final_expected.into_iter() {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ use assert_fs::{
fixture::{PathChild, PathCreateDir},
TempDir,
};
use chain_core::property::{FromStr, Serialize};
use chain_core::property::FromStr;
use chain_impl_mockchain::{
block::BlockDate,
chaintypes::ConsensusVersion,
Expand Down Expand Up @@ -544,15 +544,13 @@ fn pending_transaction_stats() {
},
);

let mut pending_size = 0;
let mut pending_cnt = 0;

for i in 0..10 {
let transaction = fragment_builder
.transaction(&alice, bob.address(), i.into())
.unwrap();

pending_size += transaction.serialized_size();
pending_cnt += 1;

let status =
Expand All @@ -566,7 +564,7 @@ fn pending_transaction_stats() {
pending_cnt as f64 / mempool_max_entries as f64,
stats.mempool_usage_ratio
);
assert_eq!(pending_size, stats.mempool_total_size as usize);
assert_eq!(pending_cnt, stats.mempool_total_size);
}
}

Expand Down

0 comments on commit 56fe78d

Please sign in to comment.