From afd64c80b9c6b5abe08a32202e014aee1ddfd70a Mon Sep 17 00:00:00 2001 From: James Hodgkinson Date: Wed, 26 Jun 2024 10:02:39 +1000 Subject: [PATCH] applying codespell fixes --- .codespell_ignore | 3 ++- Makefile | 5 ++-- src/arcache/ll.rs | 2 +- src/arcache/mod.rs | 12 ++++----- src/bptree/asynch.rs | 4 +-- src/bptree/impl.rs | 10 ++++---- src/bptree/mod.rs | 4 +-- src/cowcell/asynch.rs | 4 +-- src/cowcell/mod.rs | 4 +-- src/ebrcell/mod.rs | 4 +-- src/hashmap/asynch.rs | 2 +- src/hashmap/impl.rs | 35 ++++++++++----------------- src/hashmap/mod.rs | 4 +-- src/hashtrie/asynch.rs | 2 +- src/hashtrie/impl.rs | 24 ++++++++---------- src/hashtrie/mod.rs | 4 +-- src/internals/bptree/cursor.rs | 27 +++++++++------------ src/internals/bptree/iter.rs | 8 +++--- src/internals/bptree/node.rs | 18 +++++++------- src/internals/hashmap/cursor.rs | 16 ++++++------ src/internals/hashmap/iter.rs | 4 +-- src/internals/hashmap/mod.rs | 2 +- src/internals/hashmap/node.rs | 18 +++++++------- src/internals/hashtrie/cursor.rs | 2 +- src/internals/hashtrie/iter.rs | 4 +-- src/internals/lincowcell/mod.rs | 4 +-- src/internals/lincowcell_async/mod.rs | 4 +-- src/lib.rs | 2 +- src/threadcache/mod.rs | 2 +- 29 files changed, 110 insertions(+), 124 deletions(-) diff --git a/.codespell_ignore b/.codespell_ignore index 1a22196..d84f1c4 100644 --- a/.codespell_ignore +++ b/.codespell_ignore @@ -1 +1,2 @@ -crate \ No newline at end of file +crate +ser diff --git a/Makefile b/Makefile index 5915c02..d35136e 100644 --- a/Makefile +++ b/Makefile @@ -19,7 +19,8 @@ clippy: cargo clippy --features arcache check: ## check all the things -check: test clippy +check: test clippy codespell + cargo fmt --check cargo outdated -R cargo audit @@ -59,4 +60,4 @@ codespell: ## spell-check things. codespell: codespell -c \ --ignore-words .codespell_ignore \ - --skip='./target' + --skip='./target,dhat-heap.json' diff --git a/src/arcache/ll.rs b/src/arcache/ll.rs index 8da1fb1..44d8ed0 100644 --- a/src/arcache/ll.rs +++ b/src/arcache/ll.rs @@ -369,7 +369,7 @@ where while n != tail { unsafe { let next = (*n).next; - // For sanity - we want to check that the node preceeding us is the correct link. + // For sanity - we want to check that the node preceding us is the correct link. debug_assert!((*next).prev == n); // K is not a null pointer. diff --git a/src/arcache/mod.rs b/src/arcache/mod.rs index 534c9e2..7c1de3a 100644 --- a/src/arcache/mod.rs +++ b/src/arcache/mod.rs @@ -1547,8 +1547,8 @@ impl< // Inserts are accepted. } - /// Attempt to retieve a k-v pair from the cache. If it is present in the main cache OR - /// the thread local cache, a `Some` is returned, else you will recieve a `None`. On a + /// Attempt to retrieve a k-v pair from the cache. If it is present in the main cache OR + /// the thread local cache, a `Some` is returned, else you will receive a `None`. On a /// `None`, you must then consult the external data source that this structure is acting /// as a cache for. pub fn get(&mut self, k: &Q) -> Option<&V> @@ -1890,8 +1890,8 @@ impl< S: ARCacheReadStat + Clone, > ARCacheReadTxn<'_, K, V, S> { - /// Attempt to retieve a k-v pair from the cache. If it is present in the main cache OR - /// the thread local cache, a `Some` is returned, else you will recieve a `None`. On a + /// Attempt to retrieve a k-v pair from the cache. If it is present in the main cache OR + /// the thread local cache, a `Some` is returned, else you will receive a `None`. On a /// `None`, you must then consult the external data source that this structure is acting /// as a cache for. pub fn get(&mut self, k: &Q) -> Option<&V> @@ -2022,7 +2022,7 @@ impl< /// Note that is invalid to insert an item who's key already exists in this thread local cache, /// and this is asserted IE will panic if you attempt this. It is also invalid for you to insert /// a value that does not match the source-of-truth state, IE inserting a different - /// value than another thread may percieve. This is a *read* thread, so you should only be adding + /// value than another thread may perceive. This is a *read* thread, so you should only be adding /// values that are relevant to this read transaction and this point in time. If you do not /// heed this warning, you may alter the fabric of time and space and have some interesting /// distortions in your data over time. @@ -2846,7 +2846,7 @@ mod tests { ); wr_txn.commit(); - // Now once commited, the proper sizes kick in. + // Now once committed, the proper sizes kick in. let wr_txn = arc.write(); eprintln!("{:?}", wr_txn.peek_stat()); diff --git a/src/bptree/asynch.rs b/src/bptree/asynch.rs index 5235d19..b5c0d5c 100644 --- a/src/bptree/asynch.rs +++ b/src/bptree/asynch.rs @@ -35,7 +35,7 @@ impl { /// Commit the changes from this write transaction. Readers after this point - /// will be able to percieve these changes. + /// will be able to perceive these changes. /// /// To abort (unstage changes), just do not call this function. pub fn commit(self) { @@ -267,7 +267,7 @@ mod tests { async fn test_bptree2_map_acb_order() { // Need to ensure that txns are dropped in order. - // Add data, enouugh to cause a split. All data should be *2 + // Add data, enough to cause a split. All data should be *2 let map = BptreeMap::new(); // add values { diff --git a/src/bptree/impl.rs b/src/bptree/impl.rs index 846977a..e763c28 100644 --- a/src/bptree/impl.rs +++ b/src/bptree/impl.rs @@ -1,6 +1,6 @@ use crate::internals::bptree::cursor::CursorReadOps; use crate::internals::bptree::cursor::{CursorRead, CursorWrite, SuperBlock}; -use crate::internals::bptree::iter::{Iter, RangeIter, KeyIter, ValueIter}; +use crate::internals::bptree::iter::{Iter, KeyIter, RangeIter, ValueIter}; use crate::internals::bptree::mutiter::RangeMutIter; use crate::internals::lincowcell::LinCowCellCapable; use std::borrow::Borrow; @@ -24,7 +24,7 @@ use std::ops::RangeBounds; /// This is achieved through the use of [COW](https://en.wikipedia.org/wiki/Copy-on-write) /// or [MVCC](https://en.wikipedia.org/wiki/Multiversion_concurrency_control). /// As a write occurs, subsets of the tree are cloned into the writer thread -/// and then commited later. This may cause memory usage to increase in exchange +/// and then committed later. This may cause memory usage to increase in exchange /// for a gain in concurrent behaviour. /// /// Transactions can be rolled-back (aborted) without penalty by dropping @@ -70,7 +70,7 @@ unsafe impl where K: Ord + Clone + Debug + Sync + Send + 'static, @@ -89,7 +89,7 @@ where } /// A point-in-time snapshot of the tree from within a read OR write. This is -/// useful for building other transactional types ontop of this structure, as +/// useful for building other transactional types on top of this structure, as /// you need a way to downcast both BptreeMapReadTxn or BptreeMapWriteTxn to /// a singular reader type for a number of get_inner() style patterns. /// @@ -239,7 +239,7 @@ impl { /// Commit the changes from this write transaction. Readers after this point - /// will be able to percieve these changes. + /// will be able to perceive these changes. /// /// To abort (unstage changes), just do not call this function. pub fn commit(self) { @@ -270,7 +270,7 @@ mod tests { fn test_bptree2_map_acb_order() { // Need to ensure that txns are dropped in order. - // Add data, enouugh to cause a split. All data should be *2 + // Add data, enough to cause a split. All data should be *2 let map = BptreeMap::new(); // add values { diff --git a/src/cowcell/asynch.rs b/src/cowcell/asynch.rs index de3794f..39dcae2 100644 --- a/src/cowcell/asynch.rs +++ b/src/cowcell/asynch.rs @@ -121,9 +121,9 @@ where } fn commit(&self, newdata: Option) { - if let Some(nd) = newdata { + if let Some(new_data) = newdata { // now over-write the last value in the `ArcSwap`. - self.active.store(Arc::new(nd)); + self.active.store(Arc::new(new_data)); } // If not some, we do nothing. // Done diff --git a/src/cowcell/mod.rs b/src/cowcell/mod.rs index 9f0d2af..cbc6c28 100644 --- a/src/cowcell/mod.rs +++ b/src/cowcell/mod.rs @@ -154,9 +154,9 @@ where } fn commit(&self, newdata: Option) { - if let Some(nd) = newdata { + if let Some(new_data) = newdata { // now over-write the last value in the `ArcSwap`. - self.active.store(Arc::new(nd)); + self.active.store(Arc::new(new_data)); } // If not some, we do nothing. // Done diff --git a/src/ebrcell/mod.rs b/src/ebrcell/mod.rs index 8a5f92e..d55b48d 100644 --- a/src/ebrcell/mod.rs +++ b/src/ebrcell/mod.rs @@ -187,7 +187,7 @@ where }) } - /// This is an internal compontent of the commit cycle. It takes ownership + /// This is an internal component of the commit cycle. It takes ownership /// of the value stored in the writetxn, and commits it to the main EbrCell /// safely. /// @@ -257,7 +257,7 @@ pub struct EbrCellReadTxn { impl Deref for EbrCellReadTxn { type Target = T; - /// Derference and access the value within the read transaction. + /// De-reference and access the value within the read transaction. fn deref(&self) -> &T { unsafe { &(*self.data) } } diff --git a/src/hashmap/asynch.rs b/src/hashmap/asynch.rs index 895ea3a..b1be7de 100644 --- a/src/hashmap/asynch.rs +++ b/src/hashmap/asynch.rs @@ -55,7 +55,7 @@ impl { /// Commit the changes from this write transaction. Readers after this point - /// will be able to percieve these changes. + /// will be able to perceive these changes. /// /// To abort (unstage changes), just do not call this function. pub fn commit(self) { diff --git a/src/hashmap/impl.rs b/src/hashmap/impl.rs index 9f43600..7d576a4 100644 --- a/src/hashmap/impl.rs +++ b/src/hashmap/impl.rs @@ -21,7 +21,7 @@ use std::iter::FromIterator; /// in time. /// /// This is achieved through the use of COW or MVCC. As a write occurs -/// subsets of the tree are cloned into the writer thread and then commited +/// subsets of the tree are cloned into the writer thread and then committed /// later. This may cause memory usage to increase in exchange for a gain /// in concurrent behaviour. /// @@ -59,7 +59,7 @@ where /// may be modified exclusively through this transaction without affecting /// readers. The write may be rolledback/aborted by dropping this guard /// without calling `commit()`. Once `commit()` is called, readers will be -/// able to access and percieve changes in new transactions. +/// able to access and perceive changes in new transactions. pub struct HashMapWriteTxn<'a, K, V> where K: Hash + Eq + Clone + Debug + Sync + Send + 'static, @@ -78,7 +78,7 @@ where } /// A point-in-time snapshot of the tree from within a read OR write. This is -/// useful for building other transactional types ontop of this structure, as +/// useful for building other transactional types on top of this structure, as /// you need a way to downcast both HashMapReadTxn or HashMapWriteTxn to /// a singular reader type for a number of get_inner() style patterns. /// @@ -118,20 +118,16 @@ impl Extend<(K, V)> for HashMapWriteTxn<'_, K, V> +impl + Extend<(K, V)> for HashMapWriteTxn<'_, K, V> { fn extend>(&mut self, iter: I) { self.inner.as_mut().extend(iter); } } -impl< - K: Hash + Eq + Clone + Debug + Sync + Send + 'static, - V: Clone + Sync + Send + 'static, - > HashMapWriteTxn<'_, K, V> +impl + HashMapWriteTxn<'_, K, V> { /* pub(crate) fn prehash(&self, k: &Q) -> u64 @@ -197,8 +193,8 @@ impl< } /// Reset this map to an empty state. As this is within the transaction this - /// change only takes effect once commited. Once cleared, you can begin adding - /// new writes and changes, again, that will only be visible once commited. + /// change only takes effect once committed. Once cleared, you can begin adding + /// new writes and changes, again, that will only be visible once committed. pub fn clear(&mut self) { self.inner.as_mut().clear() } @@ -236,10 +232,8 @@ impl< } } -impl< - K: Hash + Eq + Clone + Debug + Sync + Send + 'static, - V: Clone + Sync + Send + 'static, - > HashMapReadTxn<'_, K, V> +impl + HashMapReadTxn<'_, K, V> { pub(crate) fn get_prehashed(&self, k: &Q, k_hash: u64) -> Option<&V> where @@ -303,10 +297,8 @@ impl< } } -impl< - K: Hash + Eq + Clone + Debug + Sync + Send + 'static, - V: Clone + Sync + Send + 'static, - > HashMapReadSnapshot<'_, K, V> +impl + HashMapReadSnapshot<'_, K, V> { /// Retrieve a value from the tree. If the value exists, a reference is returned /// as `Some(&V)`, otherwise if not present `None` is returned. @@ -375,4 +367,3 @@ impl< } } } - diff --git a/src/hashmap/mod.rs b/src/hashmap/mod.rs index d5f4964..c9fdcaf 100644 --- a/src/hashmap/mod.rs +++ b/src/hashmap/mod.rs @@ -11,7 +11,7 @@ //! //! This structure is very different to the `im` crate. The `im` crate is //! sync + send over individual operations. This means that multiple writes can -//! be interleaved atomicly and safely, and the readers always see the latest +//! be interleaved atomically and safely, and the readers always see the latest //! data. While this is potentially useful to a set of problems, transactional //! structures are suited to problems where readers have to maintain consistent //! data views for a duration of time, cpu cache friendly behaviours and @@ -99,7 +99,7 @@ impl { /// Commit the changes from this write transaction. Readers after this point - /// will be able to percieve these changes. + /// will be able to perceieve these changes. /// /// To abort (unstage changes), just do not call this function. pub fn commit(self) { diff --git a/src/hashtrie/impl.rs b/src/hashtrie/impl.rs index a444c0c..aae10da 100644 --- a/src/hashtrie/impl.rs +++ b/src/hashtrie/impl.rs @@ -19,7 +19,7 @@ use std::iter::FromIterator; /// in time. /// /// This is achieved through the use of COW or MVCC. As a write occurs -/// subsets of the tree are cloned into the writer thread and then commited +/// subsets of the tree are cloned into the writer thread and then committed /// later. This may cause memory usage to increase in exchange for a gain /// in concurrent behaviour. /// @@ -57,7 +57,7 @@ where /// may be modified exclusively through this transaction without affecting /// readers. The write may be rolledback/aborted by dropping this guard /// without calling `commit()`. Once `commit()` is called, readers will be -/// able to access and percieve changes in new transactions. +/// able to access and perceive changes in new transactions. pub struct HashTrieWriteTxn<'a, K, V> where K: Hash + Eq + Clone + Debug + Sync + Send + 'static, @@ -76,7 +76,7 @@ where } /// A point-in-time snapshot of the tree from within a read OR write. This is -/// useful for building other transactional types ontop of this structure, as +/// useful for building other transactional types on top of this structure, as /// you need a way to downcast both HashTrieReadTxn or HashTrieWriteTxn to /// a singular reader type for a number of get_inner() style patterns. /// @@ -116,20 +116,16 @@ impl Extend<(K, V)> for HashTrieWriteTxn<'_, K, V> +impl + Extend<(K, V)> for HashTrieWriteTxn<'_, K, V> { fn extend>(&mut self, iter: I) { self.inner.as_mut().extend(iter); } } -impl< - K: Hash + Eq + Clone + Debug + Sync + Send + 'static, - V: Clone + Sync + Send + 'static, - > HashTrieWriteTxn<'_, K, V> +impl + HashTrieWriteTxn<'_, K, V> { /* pub(crate) fn prehash(&self, k: &Q) -> u64 @@ -195,8 +191,8 @@ impl< } /// Reset this map to an empty state. As this is within the transaction this - /// change only takes effect once commited. Once cleared, you can begin adding - /// new writes and changes, again, that will only be visible once commited. + /// change only takes effect once committed. Once cleared, you can begin adding + /// new writes and changes, again, that will only be visible once committed. pub fn clear(&mut self) { self.inner.as_mut().clear() } @@ -250,7 +246,7 @@ impl(&self, k: &Q) -> Option<&V> where K: Borrow, - Q: Hash + Eq+ ?Sized, + Q: Hash + Eq + ?Sized, { let k_hash = self.inner.as_ref().hash_key(k); self.get_prehashed(k, k_hash) diff --git a/src/hashtrie/mod.rs b/src/hashtrie/mod.rs index 13d80fd..0308aff 100644 --- a/src/hashtrie/mod.rs +++ b/src/hashtrie/mod.rs @@ -16,7 +16,7 @@ //! //! This structure is very different to the `im` crate. The `im` crate is //! sync + send over individual operations. This means that multiple writes can -//! be interleaved atomicly and safely, and the readers always see the latest +//! be interleaved atomically and safely, and the readers always see the latest //! data. While this is potentially useful to a set of problems, transactional //! structures are suited to problems where readers have to maintain consistent //! data views for a duration of time, cpu cache friendly behaviours and @@ -105,7 +105,7 @@ impl { #[cfg(test)] fn get_tree_density(&self) -> (usize, usize) { - // Walk the tree and calculate the packing effeciency. + // Walk the tree and calculate the packing efficiency. let rref = self.get_root_ref(); rref.tree_density() } @@ -498,7 +498,7 @@ impl CursorWrite { */ /* - // Now work up the tree and clean up the remaining path inbetween + // Now work up the tree and clean up the remaining path in between let result = clone_and_split_off_prune_lt(&mut self.root, self.txid, k); // println!("clone_and_split_off_prune_lt -> {:?}", result); match result { @@ -785,7 +785,7 @@ fn clone_and_insert( } CRInsertState::NoClone(_res) => { // If our descendant did not clone, then we don't have to either. - unreachable!("Shoud never be possible."); + unreachable!("Should never be possible."); // CRInsertState::NoClone(res) } CRInsertState::Split(_rnode) => { @@ -970,7 +970,6 @@ fn clone_and_remove( match clone_and_remove(anode, txid, k, last_seen, first_seen) { CRRemoveState::NoClone(_res) => { unreachable!("Should never occur"); - // CRRemoveState::NoClone(res) } CRRemoveState::Clone(res, lnode) => { nmref.replace_by_idx(anode_idx, lnode); @@ -997,15 +996,13 @@ fn clone_and_remove( BranchShrinkState::Merge(dnode) => { // Right was merged to left, and we remain // valid - // println!("ls push 20 {:?}", dnode); debug_assert!(!last_seen.contains(&dnode)); last_seen.push(dnode); CRRemoveState::Clone(res, cnode) } BranchShrinkState::Shrink(dnode) => { - // Right was merged to left, but we have now falled under the needed + // Right was merged to left, but we have now fallen under the needed // amount of values. - // println!("ls push 21 {:?}", dnode); debug_assert!(!last_seen.contains(&dnode)); last_seen.push(dnode); CRRemoveState::CloneShrink(res, cnode) @@ -1048,7 +1045,7 @@ fn clone_and_remove( CRRemoveState::NoClone(res) } BranchShrinkState::Shrink(dnode) => { - // Right was merged to left, but we have now falled under the needed + // Right was merged to left, but we have now fallen under the needed // amount of values, so we begin to shrink up. // println!("ls push 23 {:?}", dnode); debug_assert!(!last_seen.contains(&dnode)); @@ -1080,7 +1077,7 @@ fn clone_and_remove( CRRemoveState::NoClone(res) } BranchShrinkState::Shrink(dnode) => { - // Right was merged to left, but we have now falled under the needed + // Right was merged to left, but we have now fallen under the needed // amount of values. // println!("ls push 25 {:?}", dnode); debug_assert!(!last_seen.contains(&dnode)); @@ -1424,7 +1421,7 @@ mod tests { // ^ // \----- nnode // - // Check leaf split inbetween l/sl (new txn) + // Check leaf split in between l/sl (new txn) let lnode = create_leaf_node_full(10); let rnode = create_leaf_node_full(20); let root = Node::new_branch(0, lnode, rnode); @@ -1481,7 +1478,7 @@ mod tests { // ^ // \----- nnode // - // Check leaf split inbetween l/sl (same txn) + // Check leaf split in between l/sl (same txn) // let lnode = create_leaf_node(10); let rnode = create_leaf_node(20); @@ -1785,7 +1782,7 @@ mod tests { // Check that a single value can be removed correctly without change. // Check that a missing value is removed as "None". // Check that emptying the root is ok. - // BOTH of these need new txns to check clone, and then re-use txns. + // BOTH of these need new txns to check clone, and then reuse txns. // // let lnode = create_leaf_node_full(0); @@ -2541,7 +2538,7 @@ mod tests { #[test] fn test_bptree2_cursor_split_off_lt_01() { - // Make a tree witth just a leaf + // Make a tree with just a leaf // Do a split_off_lt. let node = create_leaf_node(0); let sb = SuperBlock::new_test(1, node); @@ -2559,7 +2556,7 @@ mod tests { #[test] fn test_bptree2_cursor_split_off_lt_02() { - // Make a tree witth just a leaf + // Make a tree with just a leaf // Do a split_off_lt. let node = create_leaf_node_full(10); let sb = SuperBlock::new_test(1, node); @@ -2577,7 +2574,7 @@ mod tests { #[test] fn test_bptree2_cursor_split_off_lt_03() { - // Make a tree witth just a leaf + // Make a tree with just a leaf // Do a split_off_lt. let node = create_leaf_node_full(10); let sb = SuperBlock::new_test(1, node); diff --git a/src/internals/bptree/iter.rs b/src/internals/bptree/iter.rs index ee94bda..92f64cc 100644 --- a/src/internals/bptree/iter.rs +++ b/src/internals/bptree/iter.rs @@ -85,7 +85,7 @@ where if let Some((bref, bpidx)) = self.stack.back_mut() { let wbranch = branch_ref!(*bref, K, V); // We were currently looking at bpidx in bref. Increment and - // check whats next. + // check what's next. *bpidx += 1; if let Some(node) = wbranch.get_idx_checked(*bpidx) { @@ -230,7 +230,7 @@ where if let Some((bref, bpidx)) = self.stack.back_mut() { let wbranch = branch_ref!(*bref, K, V); // We were currently looking at bpidx in bref. Increment and - // check whats next. + // check what's next. // NOTE: If this underflows, it's okay because idx_checked won't // return the Some case! let (nidx, oflow) = (*bpidx).overflowing_sub(1); @@ -349,7 +349,7 @@ impl DoubleEndedIterator for Iter<'_, '_, K, V } } -/// Iterater over references to Keys stored in the map. +/// Iterator over references to Keys stored in the map. pub struct KeyIter<'n, 'a, K, V> where K: Ord + Clone + Debug, @@ -386,7 +386,7 @@ impl DoubleEndedIterator for KeyIter<'_, '_, K } } -/// Iterater over references to Values stored in the map. +/// Iterator over references to Values stored in the map. pub struct ValueIter<'n, 'a, K, V> where K: Ord + Clone + Debug, diff --git a/src/internals/bptree/node.rs b/src/internals/bptree/node.rs index e00215c..45ce7a1 100644 --- a/src/internals/bptree/node.rs +++ b/src/internals/bptree/node.rs @@ -659,7 +659,7 @@ impl Leaf { if self.count() >= L_CAPACITY { // Overflow to a new node if idx >= self.count() { - // Greate than all else, split right + // Greater than all else, split right let rnode = Node::new_leaf_ins(self.meta.0, k, v); LeafInsertState::Split(rnode) } else if idx == 0 { @@ -1115,7 +1115,7 @@ impl Branch { // leaf when it grows, it splits to the right. That importantly // means that we only need to insert to replace the min and it's // right leaf, or anything higher. As a result, we are always - // targetting ins_idx and leaf_ins_idx = ins_idx + 1. + // targeting ins_idx and leaf_ins_idx = ins_idx + 1. // // We have a situation like: // @@ -1343,7 +1343,7 @@ impl Branch { // / \ / // l1 l2 r1 // - // This means rbranch issues a cloneshrink to root. clone shrink must contain the remainer + // This means rbranch issues a cloneshrink to root. clone shrink must contain the remainder // so that it can be reparented: // // root @@ -1360,7 +1360,7 @@ impl Branch { // // So, we have to analyse the situation. // * Have left or right been emptied? (how to handle when branches) - // * Is left or right belowe a reasonable threshold? + // * Is left or right below a reasonable threshold? // * Does the opposite have capacity to remain valid? debug_assert_branch!(self); @@ -1509,7 +1509,7 @@ impl Branch { debug_assert!(self.count() > right.count()); // Starting index of where we move from. We work normally from a branch // with only zero (but the base) branch item, but we do the math anyway - // to be sure incase we change later. + // to be sure in case we change later. // // So, self.len must be larger, so let's give a few examples here. // 4 = 7 - (7 + 0) / 2 (will move 4, 5, 6) @@ -1756,7 +1756,7 @@ impl Branch { BranchTrimState::Promote(rnode) } else { // * A key is between two values. We can remove everything less, but not - // the assocated. For example, remove 6 would cause n1, n2 to be removed, but + // the associated. For example, remove 6 would cause n1, n2 to be removed, but // the prune/walk will have to examine n3 to know about further changes. debug_assert!(idx > 0); @@ -2108,7 +2108,7 @@ mod tests { } assert!(leaf.count() == 1); assert!(leaf.max() == &(L_CAPACITY - 1)); - // Remove a non-existant value. + // Remove a non-existent value. let r = leaf.remove(&(L_CAPACITY + 20)); if let LeafRemoveState::Ok(None) = r { // Ok! @@ -2124,7 +2124,7 @@ mod tests { assert!(false); } assert!(leaf.count() == 0); - // Remove non-existant post shrink. Should never happen + // Remove non-existent post shrink. Should never happen // but safety first! let r = leaf.remove(&0); if let LeafRemoveState::Shrink(None) = r { @@ -2285,7 +2285,7 @@ mod tests { assert!(branch_ref.verify()); // Test .min works on our descendants assert!(branch_ref.min() == &10); - // Test .max works on our descendats. + // Test .max works on our descendants. assert!(branch_ref.max() == &(20 + L_CAPACITY - 1)); // Get some k within the leaves. assert!(branch_ref.get_ref(&11) == Some(&11)); diff --git a/src/internals/hashmap/cursor.rs b/src/internals/hashmap/cursor.rs index bc83096..8355330 100644 --- a/src/internals/hashmap/cursor.rs +++ b/src/internals/hashmap/cursor.rs @@ -197,7 +197,7 @@ pub(crate) trait CursorReadOps { #[cfg(test)] fn get_tree_density(&self) -> (usize, usize, usize) { - // Walk the tree and calculate the packing effeciency. + // Walk the tree and calculate the packing efficiency. let rref = self.get_root_ref(); rref.tree_density() } @@ -731,7 +731,7 @@ fn clone_and_insert( } CRInsertState::NoClone(_res) => { // If our descendant did not clone, then we don't have to either. - unreachable!("Shoud never be possible."); + unreachable!("Should never be possible."); // CRInsertState::NoClone(res) } CRInsertState::Split(_rnode) => { @@ -950,7 +950,7 @@ fn clone_and_remove( CRRemoveState::Clone(res, cnode) } BranchShrinkState::Shrink(dnode) => { - // Right was merged to left, but we have now falled under the needed + // Right was merged to left, but we have now fallen under the needed // amount of values. // println!("ls push 21 {:?}", dnode); debug_assert!(!last_seen.contains(&dnode)); @@ -995,7 +995,7 @@ fn clone_and_remove( CRRemoveState::NoClone(res) } BranchShrinkState::Shrink(dnode) => { - // Right was merged to left, but we have now falled under the needed + // Right was merged to left, but we have now fallen under the needed // amount of values, so we begin to shrink up. // println!("ls push 23 {:?}", dnode); debug_assert!(!last_seen.contains(&dnode)); @@ -1027,7 +1027,7 @@ fn clone_and_remove( CRRemoveState::NoClone(res) } BranchShrinkState::Shrink(dnode) => { - // Right was merged to left, but we have now falled under the needed + // Right was merged to left, but we have now fallen under the needed // amount of values. // println!("ls push 25 {:?}", dnode); debug_assert!(!last_seen.contains(&dnode)); @@ -1221,7 +1221,7 @@ mod tests { // ^ // \----- nnode // - // Check leaf split inbetween l/sl (new txn) + // Check leaf split in between l/sl (new txn) let lnode = create_leaf_node_full(10); let rnode = create_leaf_node_full(20); let root = Node::new_branch(0, lnode, rnode); @@ -1278,7 +1278,7 @@ mod tests { // ^ // \----- nnode // - // Check leaf split inbetween l/sl (same txn) + // Check leaf split in between l/sl (same txn) // let lnode = create_leaf_node(10); let rnode = create_leaf_node(20); @@ -1584,7 +1584,7 @@ mod tests { // Check that a single value can be removed correctly without change. // Check that a missing value is removed as "None". // Check that emptying the root is ok. - // BOTH of these need new txns to check clone, and then re-use txns. + // BOTH of these need new txns to check clone, and then reuse txns. // // let lnode = create_leaf_node_full(0); diff --git a/src/internals/hashmap/iter.rs b/src/internals/hashmap/iter.rs index cb96844..837b4b1 100644 --- a/src/internals/hashmap/iter.rs +++ b/src/internals/hashmap/iter.rs @@ -187,7 +187,7 @@ impl<'a, K: Clone + Hash + Eq + Debug, V: Clone> Iterator for Iter<'a, K, V> { } } -/// Iterater over references to Keys stored in the map. +/// Iterator over references to Keys stored in the map. pub struct KeyIter<'a, K, V> where K: Hash + Eq + Clone + Debug, @@ -217,7 +217,7 @@ impl<'a, K: Clone + Hash + Eq + Debug, V: Clone> Iterator for KeyIter<'a, K, V> } } -/// Iterater over references to Values stored in the map. +/// Iterator over references to Values stored in the map. pub struct ValueIter<'a, K, V> where K: Hash + Eq + Clone + Debug, diff --git a/src/internals/hashmap/mod.rs b/src/internals/hashmap/mod.rs index a72fc8d..e675a8e 100644 --- a/src/internals/hashmap/mod.rs +++ b/src/internals/hashmap/mod.rs @@ -11,7 +11,7 @@ //! //! This structure is very different to the `im` crate. The `im` crate is //! sync + send over individual operations. This means that multiple writes can -//! be interleaved atomicly and safely, and the readers always see the latest +//! be interleaved atomically and safely, and the readers always see the latest //! data. While this is potentially useful to a set of problems, transactional //! structures are suited to problems where readers have to maintain consistent //! data views for a duration of time, cpu cache friendly behaviours and diff --git a/src/internals/hashmap/node.rs b/src/internals/hashmap/node.rs index c69abdd..1a5f69f 100644 --- a/src/internals/hashmap/node.rs +++ b/src/internals/hashmap/node.rs @@ -683,7 +683,7 @@ impl Leaf { if self.slots() >= H_CAPACITY { // Overflow to a new node if idx >= self.slots() { - // Greate than all else, split right + // Greater than all else, split right let rnode = Node::new_leaf_ins(unsafe { self.ctrl.a.0 .0 }, h, k, v); LeafInsertState::Split(rnode) } else if idx == 0 { @@ -1199,7 +1199,7 @@ impl Branch { // leaf when it grows, it splits to the right. That importantly // means that we only need to insert to replace the min and it's // right leaf, or anything higher. As a result, we are always - // targetting ins_idx and leaf_ins_idx = ins_idx + 1. + // targeting ins_idx and leaf_ins_idx = ins_idx + 1. // // We have a situation like: // @@ -1441,7 +1441,7 @@ impl Branch { // / \ / // l1 l2 r1 // - // This means rbranch issues a cloneshrink to root. clone shrink must contain the remainer + // This means rbranch issues a cloneshrink to root. clone shrink must contain the remaineer // so that it can be reparented: // // root @@ -1458,7 +1458,7 @@ impl Branch { // // So, we have to analyse the situation. // * Have left or right been emptied? (how to handle when branches) - // * Is left or right belowe a reasonable threshold? + // * Is left or right below a reasonable threshold? // * Does the opposite have capacity to remain valid? debug_assert_branch!(self); @@ -1616,7 +1616,7 @@ impl Branch { debug_assert!(self.slots() > right.slots()); // Starting index of where we move from. We work normally from a branch // with only zero (but the base) branch item, but we do the math anyway - // to be sure incase we change later. + // to be sure in case we change later. // // So, self.len must be larger, so let's give a few examples here. // 4 = 7 - (7 + 0) / 2 (will move 4, 5, 6) @@ -1885,7 +1885,7 @@ impl Branch { BranchTrimState::Promote(rnode) } else { // * A key is between two values. We can remove everything less, but not - // the assocated. For example, remove 6 would cause n1, n2 to be removed, but + // the associated. For example, remove 6 would cause n1, n2 to be removed, but // the prune/walk will have to examine n3 to know about further changes. debug_assert!(idx > 0); @@ -2291,7 +2291,7 @@ mod tests { } assert!(leaf.slots() == 1); assert!(leaf.max() == (H_CAPACITY - 1) as u64); - // Remove a non-existant value. + // Remove a non-existent value. let r = leaf.remove((H_CAPACITY + 20) as u64, &(H_CAPACITY + 20)); if let LeafRemoveState::Ok(None) = r { // Ok! @@ -2307,7 +2307,7 @@ mod tests { assert!(false); } assert!(leaf.slots() == 0); - // Remove non-existant post shrink. Should never happen + // Remove non-existent post shrink. Should never happen // but safety first! let r = leaf.remove(0, &0); if let LeafRemoveState::Shrink(None) = r { @@ -2505,7 +2505,7 @@ mod tests { assert!(branch_ref.verify()); // Test .min works on our descendants assert!(branch_ref.min() == 10); - // Test .max works on our descendats. + // Test .max works on our descendants. assert!(branch_ref.max() == (20 + H_CAPACITY - 1) as u64); // Get some k within the leaves. assert!(branch_ref.get_ref(11, &11) == Some(&11)); diff --git a/src/internals/hashtrie/cursor.rs b/src/internals/hashtrie/cursor.rs index f93337b..ae8f5ce 100644 --- a/src/internals/hashtrie/cursor.rs +++ b/src/internals/hashtrie/cursor.rs @@ -851,7 +851,7 @@ impl CursorWrite { let v = if tgt_ptr.is_dirty() { let tgt_bkt_mut = tgt_ptr.as_bucket_mut::(); let Datum { v, .. } = tgt_bkt_mut.remove(0); - // Keep any pointer that ISNT the one we are oob freeing. + // Keep any pointer that ISN'T the one we are oob freeing. self.first_seen.retain(|e| *e != tgt_ptr); tgt_ptr.free::(); v diff --git a/src/internals/hashtrie/iter.rs b/src/internals/hashtrie/iter.rs index c99a56d..a7e33c6 100644 --- a/src/internals/hashtrie/iter.rs +++ b/src/internals/hashtrie/iter.rs @@ -82,7 +82,7 @@ impl<'a, K: Clone + Hash + Eq + Debug, V: Clone> Iterator for Iter<'a, K, V> { } } -/// Iterater over references to Keys stored in the map. +/// Iterator over references to Keys stored in the map. pub struct KeyIter<'a, K, V> where K: Hash + Eq + Clone + Debug, @@ -112,7 +112,7 @@ impl<'a, K: Clone + Hash + Eq + Debug, V: Clone> Iterator for KeyIter<'a, K, V> } } -/// Iterater over references to Values stored in the map. +/// Iterator over references to Values stored in the map. pub struct ValueIter<'a, K, V> where K: Hash + Eq + Clone + Debug, diff --git a/src/internals/lincowcell/mod.rs b/src/internals/lincowcell/mod.rs index 1526f1d..a2726e3 100644 --- a/src/internals/lincowcell/mod.rs +++ b/src/internals/lincowcell/mod.rs @@ -1,6 +1,6 @@ //! A CowCell with linear drop behaviour //! -//! YOU SHOULD NOT USE THIS TYPE! Normaly concurrent cells do NOT require the linear dropping +//! YOU SHOULD NOT USE THIS TYPE! Normally concurrent cells do NOT require the linear dropping //! behaviour that this implements, and it will only make your application //! worse for it. Consider `CowCell` and `EbrCell` instead. @@ -351,7 +351,7 @@ mod tests { // Now commit cc_wrtxn.commit(); } - // Should not be percieved by the old txn. + // Should not be perceived by the old txn. assert_eq!(cc_rotxn_a.work.data.x, 0); let cc_rotxn_c = cc.read(); // Is visible to the new one though. diff --git a/src/internals/lincowcell_async/mod.rs b/src/internals/lincowcell_async/mod.rs index cf92de6..2438ebe 100644 --- a/src/internals/lincowcell_async/mod.rs +++ b/src/internals/lincowcell_async/mod.rs @@ -1,6 +1,6 @@ //! A CowCell with linear drop behaviour, and async locking. //! -//! YOU SHOULD NOT USE THIS TYPE! Normaly concurrent cells do NOT require the linear dropping +//! YOU SHOULD NOT USE THIS TYPE! Normally concurrent cells do NOT require the linear dropping //! behaviour that this implements, and it will only make your application //! worse for it. Consider `CowCell` and `EbrCell` instead. @@ -341,7 +341,7 @@ mod tests { // Now commit cc_wrtxn.commit(); } - // Should not be percieved by the old txn. + // Should not be perceived by the old txn. assert_eq!(cc_rotxn_a.work.data.x, 0); let cc_rotxn_c = cc.read(); // Is visible to the new one though. diff --git a/src/lib.rs b/src/lib.rs index deed9d1..2bad767 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -64,7 +64,7 @@ pub mod threadcache; // This is where the scary rust lives. #[cfg(feature = "maps")] pub mod internals; -// This is where the gud rust lives. +// This is where the good rust lives. #[cfg(feature = "maps")] mod utils; diff --git a/src/threadcache/mod.rs b/src/threadcache/mod.rs index 91e1eb7..9618cd8 100644 --- a/src/threadcache/mod.rs +++ b/src/threadcache/mod.rs @@ -216,7 +216,7 @@ where /// Commit the changes to this cache so they are visible to others. If you do NOT call /// commit, all changes to this cache are rolled back to prevent invalidate states. pub fn commit(mut self) { - // We are commiting, so lets get ready. + // We are committing, so let's get ready. // First, anything that we touched in the rollback set will need // to be invalidated from other caches. It doesn't matter if we // removed or inserted, it has the same effect on them.