From b5046c8ccbd49587a604adeea8da48a1599888fa Mon Sep 17 00:00:00 2001 From: Bruce Mitchener Date: Tue, 1 Oct 2024 00:21:49 +0700 Subject: [PATCH] Fix typos (#521) --- dogsdogsdogs/src/operators/half_join.rs | 2 +- doop/src/main.rs | 2 +- examples/projekt.rs | 4 ++-- examples/spines.rs | 2 +- interactive/src/plan/sfw.rs | 2 +- server/src/lib.rs | 2 +- src/algorithms/identifiers.rs | 2 +- src/consolidation.rs | 2 +- src/dynamic/pointstamp.rs | 4 ++-- src/logging.rs | 2 +- src/operators/arrange/arrangement.rs | 2 +- src/operators/arrange/mod.rs | 2 +- src/operators/reduce.rs | 4 ++-- src/trace/implementations/mod.rs | 6 +++--- src/trace/implementations/rhh.rs | 2 +- src/trace/implementations/spine_fueled.rs | 2 +- src/trace/mod.rs | 4 ++-- 17 files changed, 23 insertions(+), 23 deletions(-) diff --git a/dogsdogsdogs/src/operators/half_join.rs b/dogsdogsdogs/src/operators/half_join.rs index 5eb5a872e..71a92696a 100644 --- a/dogsdogsdogs/src/operators/half_join.rs +++ b/dogsdogsdogs/src/operators/half_join.rs @@ -26,7 +26,7 @@ //! collections. //! //! There are some caveats about ties, and we should treat each `time` for -//! each input as occuring at distinct times, one after the other (so that +//! each input as occurring at distinct times, one after the other (so that //! ties are resolved by the index of the input). There is also the matter //! of logical compaction, which should not be done in a way that prevents //! the correct determination of the total order comparison. diff --git a/doop/src/main.rs b/doop/src/main.rs index d6ad19c19..c0f0f03df 100644 --- a/doop/src/main.rs +++ b/doop/src/main.rs @@ -1199,7 +1199,7 @@ fn main() { while probe.less_than(inputs.0[0].time()) { worker.step(); } - println!("{:?}\tcomputation initalized", timer.elapsed()); + println!("{:?}\tcomputation initialized", timer.elapsed()); if batch > 0 { diff --git a/examples/projekt.rs b/examples/projekt.rs index 3a6303ecc..f9929bc3a 100644 --- a/examples/projekt.rs +++ b/examples/projekt.rs @@ -109,7 +109,7 @@ fn main() { }); - // Introduce XY projektion. + // Introduce XY projection. xy_goal.insert((0, 0)); xy_goal.insert((0, 1)); xy_goal.insert((0, 3)); @@ -125,7 +125,7 @@ fn main() { xy_goal.insert((4, 1)); xy_goal.insert((4, 2)); - // Introduce XZ projektion. + // Introduce XZ projection. xz_goal.insert((0, 2)); xz_goal.insert((0, 3)); xz_goal.insert((0, 4)); diff --git a/examples/spines.rs b/examples/spines.rs index 934ec6696..7fc726783 100644 --- a/examples/spines.rs +++ b/examples/spines.rs @@ -72,7 +72,7 @@ fn main() { .probe_with(&mut probe); } _ => { - println!("unreconized mode: {:?}", mode) + println!("unrecognized mode: {:?}", mode) } } diff --git a/interactive/src/plan/sfw.rs b/interactive/src/plan/sfw.rs index 0965227ec..711b71ce5 100644 --- a/interactive/src/plan/sfw.rs +++ b/interactive/src/plan/sfw.rs @@ -35,7 +35,7 @@ use differential_dataflow::{Collection, ExchangeData}; use crate::plan::{Plan, Render}; use crate::{TraceManager, Time, Diff, Datum}; -/// A multiway join of muliple relations. +/// A multiway join of multiple relations. /// /// By expressing multiple relations and required equivalances between their attributes, /// we can more efficiently design incremental update strategies without materializing diff --git a/server/src/lib.rs b/server/src/lib.rs index d90568714..2dbe570d9 100644 --- a/server/src/lib.rs +++ b/server/src/lib.rs @@ -32,7 +32,7 @@ pub type Environment<'a, 'b> = ( /// /// This type is meant to be a smart pointer for a type `T` that needs to keep /// a `Library` alive, perhaps because its methods would call in to the library. -/// The type should have a specified drop order (viz RFC 1857) which guarentees +/// The type should have a specified drop order (viz RFC 1857) which guarantees /// that the shared library reference drops only after the element itself is /// dropped. It also implements `Deref` and `DerefMut` to provide the experience /// of a `T` itself. diff --git a/src/algorithms/identifiers.rs b/src/algorithms/identifiers.rs index 19653d8d3..91e18647c 100644 --- a/src/algorithms/identifiers.rs +++ b/src/algorithms/identifiers.rs @@ -52,7 +52,7 @@ where // additions and subtractions of losers, rather than reproducing // the winners. This is done under the premise that losers are // very rare, and maintaining winners in both the input and output - // of `reduce` is an unneccesary duplication. + // of `reduce` is an unnecessary duplication. use crate::collection::AsCollection; diff --git a/src/consolidation.rs b/src/consolidation.rs index b9495d104..310628f4a 100644 --- a/src/consolidation.rs +++ b/src/consolidation.rs @@ -322,7 +322,7 @@ pub fn consolidate_container(container: &mut C, target: &m for item in permutation.drain(..) { let (key, diff) = C::into_parts(item); match &mut previous { - // Initial iteration, remeber key and diff. + // Initial iteration, remember key and diff. // TODO: Opportunity for GatCow for diff. None => previous = Some((key, diff.into_owned())), Some((prevkey, d)) => { diff --git a/src/dynamic/pointstamp.rs b/src/dynamic/pointstamp.rs index 0199fe85d..c68ad4fc6 100644 --- a/src/dynamic/pointstamp.rs +++ b/src/dynamic/pointstamp.rs @@ -68,7 +68,7 @@ impl PointStamp { /// Returns the wrapped vector. /// /// This method is the support way to mutate the contents of `self`, by extracting - /// the vector and then re-introducting it with `PointStamp::new` to re-establish + /// the vector and then re-introducing it with `PointStamp::new` to re-establish /// the invariant that the vector not end with `T::minimum`. pub fn into_vec(self) -> Vec { self.vector @@ -196,7 +196,7 @@ impl PathSummary> for PointStampSummary impl PartialOrder for PointStampSummary { fn less_equal(&self, other: &Self) -> bool { // If the `retain`s are not the same, there is some coordinate which - // could either be bigger or smaller as the timestamp or the replacemnt. + // could either be bigger or smaller as the timestamp or the replacement. // In principle, a `T::minimum()` extension could break this rule, and // we could tighten this logic if needed; I think it is fine not to though. self.retain == other.retain diff --git a/src/logging.rs b/src/logging.rs index 493c4e4e1..4d7de783d 100644 --- a/src/logging.rs +++ b/src/logging.rs @@ -95,7 +95,7 @@ impl From for DifferentialEvent { fn from(e: MergeEvent) -> Self { D /// A merge failed to complete in time. #[derive(Debug, Clone, Ord, PartialOrd, Eq, PartialEq, Serialize, Deserialize)] pub struct MergeShortfall { - /// Operator identifer. + /// Operator identifier. pub operator: usize, /// Which order of magnitude. pub scale: usize, diff --git a/src/operators/arrange/arrangement.rs b/src/operators/arrange/arrangement.rs index ecebb2068..fd9e7fd08 100644 --- a/src/operators/arrange/arrangement.rs +++ b/src/operators/arrange/arrangement.rs @@ -480,7 +480,7 @@ where // // 1. If any held capabilities are not in advance of the new input frontier, // we must carve out updates now in advance of the new input frontier and - // transmit them as batches, which requires appropriate *single* capabilites; + // transmit them as batches, which requires appropriate *single* capabilities; // Until timely dataflow supports multiple capabilities on messages, at least. // // 2. If there are no held capabilities in advance of the new input frontier, diff --git a/src/operators/arrange/mod.rs b/src/operators/arrange/mod.rs index 591822797..2ea9319ca 100644 --- a/src/operators/arrange/mod.rs +++ b/src/operators/arrange/mod.rs @@ -35,7 +35,7 @@ //! //! Importantly, the `Trace` type has no connection to the timely dataflow runtime. //! This means a trace can be used in a variety of contexts where a `Stream` would not be -//! appropriate, for example outside of the dataflow in which the arragement is performed. +//! appropriate, for example outside of the dataflow in which the arrangement is performed. //! Traces may be directly inspected by any code with access to them, and they can even be //! used to introduce the batches to other dataflows with the `import` method. diff --git a/src/operators/reduce.rs b/src/operators/reduce.rs index 495a77a61..a26f0072e 100644 --- a/src/operators/reduce.rs +++ b/src/operators/reduce.rs @@ -400,7 +400,7 @@ where let mut batch_cursors = Vec::new(); let mut batch_storage = Vec::new(); - // Downgrate previous upper limit to be current lower limit. + // Downgrade previous upper limit to be current lower limit. lower_limit.clear(); lower_limit.extend(upper_limit.borrow().iter().cloned()); @@ -603,7 +603,7 @@ where } capabilities = new_capabilities; - // ensure that observed progres is reflected in the output. + // ensure that observed progress is reflected in the output. output_writer.seal(upper_limit.clone()); } else { diff --git a/src/trace/implementations/mod.rs b/src/trace/implementations/mod.rs index bb6dc7a6a..168f5427c 100644 --- a/src/trace/implementations/mod.rs +++ b/src/trace/implementations/mod.rs @@ -24,7 +24,7 @@ //! //! Each of these representations is best suited for different data, but they can be combined to get the //! benefits of each, as appropriate. There are several `Cursor` combiners, `CursorList` and `CursorPair`, -//! for homogenous and inhomogenous cursors, respectively. +//! for homogeneous and inhomogeneous cursors, respectively. //! //! #Musings //! @@ -527,7 +527,7 @@ pub mod containers { /// Indicates if the length is zero. fn is_empty(&self) -> bool { self.len() == 0 } - /// Reports the number of elements satisfing the predicate. + /// Reports the number of elements satisfying the predicate. /// /// This methods *relies strongly* on the assumption that the predicate /// stays false once it becomes false, a joint property of the predicate @@ -537,7 +537,7 @@ pub mod containers { let small_limit = 8; - // Exponential seach if the answer isn't within `small_limit`. + // Exponential search if the answer isn't within `small_limit`. if end > start + small_limit && function(self.index(start + small_limit)) { // start with no advance diff --git a/src/trace/implementations/rhh.rs b/src/trace/implementations/rhh.rs index 61ee975aa..01f314650 100644 --- a/src/trace/implementations/rhh.rs +++ b/src/trace/implementations/rhh.rs @@ -683,7 +683,7 @@ mod val_batch { // We may have already passed `key`, and confirmed its absence, but our goal is to // find the next key afterwards so that users can, for example, alternately iterate. while self.key_valid(storage) && storage.storage.advance_key(self.key_cursor, key) { - // TODO: Based on our encoding, we could skip logarithmically over empy regions by galloping + // TODO: Based on our encoding, we could skip logarithmically over empty regions by galloping // through `storage.keys_offs`, which stays put for dead space. self.key_cursor += 1; } diff --git a/src/trace/implementations/spine_fueled.rs b/src/trace/implementations/spine_fueled.rs index eb6b44585..e6e014aa6 100644 --- a/src/trace/implementations/spine_fueled.rs +++ b/src/trace/implementations/spine_fueled.rs @@ -815,7 +815,7 @@ impl MergeState where B::Time: Eq { /// between Vacant entries and structurally empty batches, which should be done /// with the `is_complete()` method. /// - /// There is the addional option of input batches. + /// There is the additional option of input batches. fn complete(&mut self) -> Option<(B, Option<(B, B)>)> { match std::mem::replace(self, MergeState::Vacant) { MergeState::Vacant => None, diff --git a/src/trace/mod.rs b/src/trace/mod.rs index f7aadcb99..6ccf23bd4 100644 --- a/src/trace/mod.rs +++ b/src/trace/mod.rs @@ -102,7 +102,7 @@ pub trait TraceReader { /// Logical compaction is important, as it allows the trace to forget historical distinctions between update /// times, and maintain a compact memory footprint over an unbounded update history. /// - /// By advancing the logical compaction frontier, the caller unblocks merging of otherwise equivalent udates, + /// By advancing the logical compaction frontier, the caller unblocks merging of otherwise equivalent updates, /// but loses the ability to observe historical detail that is not beyond `frontier`. /// /// It is an error to call this method with a frontier not equal to or beyond the most recent arguments to @@ -153,7 +153,7 @@ pub trait TraceReader { /// Reports the physical compaction frontier. /// - /// All batches containing updates beyond this frontier will not be merged with ohter batches. This allows + /// All batches containing updates beyond this frontier will not be merged with other batches. This allows /// the caller to create a cursor through any frontier beyond the physical compaction frontier, with the /// `cursor_through()` method. This functionality is primarily of interest to the `join` operator, and any /// other operators who need to take notice of the physical structure of update batches.