Skip to content

Commit

Permalink
chore: fix typos
Browse files Browse the repository at this point in the history
Signed-off-by: Robert Pack <[email protected]>
  • Loading branch information
roeap committed Jan 17, 2025
1 parent af3102e commit 0850bef
Show file tree
Hide file tree
Showing 36 changed files with 135 additions and 132 deletions.
9 changes: 6 additions & 3 deletions crates/aws/helpers.rs
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,10 @@ pub async fn setup_s3_context() -> TestContext {
config.insert("AWS_ACCESS_KEY_ID".to_owned(), "deltalake".to_owned());
config.insert("AWS_SECRET_ACCESS_KEY".to_owned(), "weloverust".to_owned());
config.insert("AWS_S3_LOCKING_PROVIDER".to_owned(), "dynamodb".to_owned());
config.insert(constants::LOCK_TABLE_KEY_NAME.to_owned(), lock_table.clone());
config.insert(
constants::LOCK_TABLE_KEY_NAME.to_owned(),
lock_table.clone(),
);
config.insert("AWS_ALLOW_HTTP".to_owned(), "TRUE".to_string());

TestContext {
Expand Down Expand Up @@ -72,7 +75,7 @@ impl S3Cli {
child.wait().unwrap();
}

pub fn rm_recurive(&self, prefix: &str, endpoint: &str) {
pub fn rm_recursive(&self, prefix: &str, endpoint: &str) {
let mut child = Command::new("aws")
.args([
"s3",
Expand Down Expand Up @@ -140,7 +143,7 @@ struct S3 {
impl Drop for S3 {
fn drop(&mut self) {
let cli = S3Cli::default();
cli.rm_recurive(&self.uri, &self.endpoint);
cli.rm_recursive(&self.uri, &self.endpoint);
cli.delete_table(&self.lock_table, &self.endpoint);
}
}
4 changes: 2 additions & 2 deletions crates/aws/src/credentials.rs
Original file line number Diff line number Diff line change
Expand Up @@ -211,7 +211,7 @@ mod options_tests {
}

/// Generate a random session name for assuming IAM roles
fn assume_role_sessio_name() -> String {
fn assume_role_session_name() -> String {
let now = chrono::Utc::now();

format!("delta-rs_{}", now.timestamp_millis())
Expand Down Expand Up @@ -256,7 +256,7 @@ fn assume_session_name(options: &StorageOptions) -> String {
)
.cloned();

assume_session.unwrap_or_else(assume_role_sessio_name)
assume_session.unwrap_or_else(assume_role_session_name)
}

/// Take a set of [StorageOptions] and produce an appropriate AWS SDK [SdkConfig]
Expand Down
2 changes: 1 addition & 1 deletion crates/aws/src/errors.rs
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,7 @@ impl From<PutItemError> for LockClientError {
fn from(err: PutItemError) -> Self {
match err {
PutItemError::ConditionalCheckFailedException(_) => {
unreachable!("error must be handled explicitely")
unreachable!("error must be handled explicitly")
}
PutItemError::ProvisionedThroughputExceededException(_) => {
LockClientError::ProvisionedThroughputExceeded
Expand Down
2 changes: 1 addition & 1 deletion crates/aws/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -249,7 +249,7 @@ impl DynamoDbLockClient {
/// Transparently handles the case where that table already exists, so it's safe to call.
/// After `create_table` operation is executed, the table state in DynamoDb is `creating`, and
/// it's not immediately useable. This method does not wait for the table state to become
/// `active`, so transient failures might occurr when immediately using the lock client.
/// `active`, so transient failures might occur when immediately using the lock client.
pub async fn try_create_lock_table(&self) -> Result<CreateLockTableResult, LockClientError> {
let attribute_definitions = vec![
AttributeDefinition::builder()
Expand Down
2 changes: 1 addition & 1 deletion crates/aws/src/logstore/dynamodb_logstore.rs
Original file line number Diff line number Diff line change
Expand Up @@ -329,7 +329,7 @@ pub enum RepairLogEntryResult {
MovedFileAndFixedEntry,
/// The database entry has been rewritten, but the file was already moved.
FixedEntry,
/// Moved file, but the database entry was alrady updated.
/// Moved file, but the database entry was already updated.
MovedFile,
/// Both parts of the repair process where already carried.
AlreadyCompleted,
Expand Down
4 changes: 2 additions & 2 deletions crates/azure/src/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ impl AzureConfigHelper {
.all(|key| self.config.contains_key(key) || self.env_config.contains_key(key))
}

/// Generate a cofiguration augmented with options from the environment
/// Generate a configuration augmented with options from the environment
pub fn build(mut self) -> Result<HashMap<AzureConfigKey, String>> {
let mut has_credential = false;

Expand All @@ -142,7 +142,7 @@ impl AzureConfigHelper {
}
}

// try partially avaialbe credentials augmented by environment
// try partially available credentials augmented by environment
if !has_credential {
for cred in &self.priority {
if self.has_any_config(cred) && self.has_full_config_with_env(cred) {
Expand Down
4 changes: 2 additions & 2 deletions crates/core/src/delta_datafusion/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,7 @@ impl From<DataFusionError> for DeltaTableError {
}
}

/// Convience trait for calling common methods on snapshot heirarchies
/// Convenience trait for calling common methods on snapshot hierarchies
pub trait DataFusionMixins {
/// The physical datafusion schema of a table
fn arrow_schema(&self) -> DeltaResult<ArrowSchemaRef>;
Expand Down Expand Up @@ -2659,7 +2659,7 @@ mod tests {
#[tokio::test]
async fn passes_sanity_checker_when_all_files_filtered() {
// Run a query that filters out all files and sorts.
// Verify that it returns an empty set of rows without panicing.
// Verify that it returns an empty set of rows without panicking.
//
// Historically, we had a bug that caused us to emit a query plan with 0 partitions, which
// datafusion rejected.
Expand Down
4 changes: 2 additions & 2 deletions crates/core/src/kernel/arrow/extract.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
//! Utilties to extract columns from a record batch or nested / complex arrays.
//! Utilities to extract columns from a record batch or nested / complex arrays.
use std::sync::Arc;

Expand Down Expand Up @@ -70,7 +70,7 @@ pub(crate) fn extract_column<'a>(
if let Some(next_path_step) = remaining_path_steps.next() {
match child.data_type() {
DataType::Map(_, _) => {
// NOTE a map has exatly one child, but we wnat to be agnostic of its name.
// NOTE a map has exactly one child, but we want to be agnostic of its name.
// so we case the current array as map, and use the entries accessor.
let maparr = cast_column_as::<MapArray>(path_step, &Some(child))?;
if let Some(next_path) = remaining_path_steps.next() {
Expand Down
8 changes: 4 additions & 4 deletions crates/core/src/kernel/models/actions.rs
Original file line number Diff line number Diff line change
Expand Up @@ -188,19 +188,19 @@ impl Protocol {
mut self,
writer_features: impl IntoIterator<Item = impl Into<WriterFeatures>>,
) -> Self {
let all_writer_feautures = writer_features
let all_writer_features = writer_features
.into_iter()
.map(|c| c.into())
.collect::<HashSet<_>>();
if !all_writer_feautures.is_empty() {
if !all_writer_features.is_empty() {
self.min_writer_version = 7;

match self.writer_features {
Some(mut features) => {
features.extend(all_writer_feautures);
features.extend(all_writer_features);
self.writer_features = Some(features);
}
None => self.writer_features = Some(all_writer_feautures),
None => self.writer_features = Some(all_writer_features),
};
}
self
Expand Down
2 changes: 1 addition & 1 deletion crates/core/src/kernel/models/schema.rs
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ impl DataCheck for Invariant {
}
}

/// Trait to add convenince functions to struct type
/// Trait to add convenience functions to struct type
pub trait StructTypeExt {
/// Get all invariants in the schemas
fn get_invariants(&self) -> Result<Vec<Invariant>, Error>;
Expand Down
2 changes: 1 addition & 1 deletion crates/core/src/kernel/snapshot/log_data.rs
Original file line number Diff line number Diff line change
Expand Up @@ -158,7 +158,7 @@ impl LogicalFile<'_> {
/// An object store [`Path`] to the file.
///
/// this tries to parse the file string and if that fails, it will return the string as is.
// TODO assert consisent handling of the paths encoding when reading log data so this logic can be removed.
// TODO assert consistent handling of the paths encoding when reading log data so this logic can be removed.
pub fn object_store_path(&self) -> Path {
let path = self.path();
// Try to preserve percent encoding if possible
Expand Down
6 changes: 3 additions & 3 deletions crates/core/src/kernel/snapshot/log_segment.rs
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,7 @@ impl LogSegment {

/// Try to create a new [`LogSegment`] from a slice of the log.
///
/// Ths will create a new [`LogSegment`] from the log with all relevant log files
/// This will create a new [`LogSegment`] from the log with all relevant log files
/// starting at `start_version` and ending at `end_version`.
pub async fn try_new_slice(
table_root: &Path,
Expand Down Expand Up @@ -190,7 +190,7 @@ impl LogSegment {
Ok(())
}

/// Returns the highes commit version number in the log segment
/// Returns the highest commit version number in the log segment
pub fn file_version(&self) -> Option<i64> {
self.commit_files
.iter()
Expand Down Expand Up @@ -358,7 +358,7 @@ impl LogSegment {
/// Advance the log segment with new commits
///
/// Returns an iterator over record batches, as if the commits were read from the log.
/// The input commits should be in order in which they would be commited to the table.
/// The input commits should be in order in which they would be committed to the table.
pub(super) fn advance<'a>(
&mut self,
commits: impl IntoIterator<Item = &'a CommitData>,
Expand Down
2 changes: 1 addition & 1 deletion crates/core/src/kernel/snapshot/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -447,7 +447,7 @@ impl EagerSnapshot {

schema_actions.insert(ActionType::Add);
let checkpoint_stream = if new_slice.checkpoint_files.is_empty() {
// NOTE: we don't need to add the visitor relevant data here, as it is repüresented in teh state already
// NOTE: we don't need to add the visitor relevant data here, as it is repüresented in the state already
futures::stream::iter(files.into_iter().map(Ok)).boxed()
} else {
let read_schema =
Expand Down
2 changes: 1 addition & 1 deletion crates/core/src/kernel/snapshot/replay.rs
Original file line number Diff line number Diff line change
Expand Up @@ -513,7 +513,7 @@ impl LogReplayScanner {
self.seen.insert(seen_key(&r));
keep.push(false);
}
// NOTE: there sould always be only one action per row.
// NOTE: there should always be only one action per row.
(None, None) => debug!("WARNING: no action found for row"),
(Some(a), Some(r)) => {
debug!(
Expand Down
2 changes: 1 addition & 1 deletion crates/core/src/kernel/snapshot/visitors.rs
Original file line number Diff line number Diff line change
Expand Up @@ -144,7 +144,7 @@ mod tests {
Some(Some(123))
);

// test that only the first encountered txn ist tacked for every app id.
// test that only the first encountered txn is tacked for every app id.
data_app.extend([None, Some("my-app")]);
data_version.extend([None, Some(2)]);
data_last_updated.extend([None, Some(124)]);
Expand Down
6 changes: 3 additions & 3 deletions crates/core/src/operations/cast/merge_schema.rs
Original file line number Diff line number Diff line change
Expand Up @@ -336,9 +336,9 @@ fn merge_arrow_vec_fields(
// field.try_merge
f.set_metadata(right_field.metadata().clone());

let mut field_matadata = f.metadata().clone();
try_merge_metadata(&mut field_matadata, right_field.metadata())?;
f.set_metadata(field_matadata);
let mut field_metadata = f.metadata().clone();
try_merge_metadata(&mut field_metadata, right_field.metadata())?;
f.set_metadata(field_metadata);
Ok(f)
}
}
Expand Down
2 changes: 1 addition & 1 deletion crates/core/src/operations/constraints.rs
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,7 @@ impl std::future::IntoFuture for ConstraintBuilder {

let expr = this
.expr
.ok_or_else(|| DeltaTableError::Generic("No Expresion provided".to_string()))?;
.ok_or_else(|| DeltaTableError::Generic("No Expression provided".to_string()))?;

let mut metadata = this.snapshot.metadata().clone();
let configuration_key = format!("delta.constraints.{}", name);
Expand Down
6 changes: 3 additions & 3 deletions crates/core/src/operations/convert_to_delta.rs
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ impl From<Error> for DeltaTableError {
}

/// The partition strategy used by the Parquet table
/// Currently only hive-partitioning is supproted for Parquet paths
/// Currently only hive-partitioning is supported for Parquet paths
#[non_exhaustive]
#[derive(Default)]
pub enum PartitionStrategy {
Expand Down Expand Up @@ -187,7 +187,7 @@ impl ConvertToDeltaBuilder {
}

/// Specify the partition strategy of the Parquet table
/// Currently only hive-partitioning is supproted for Parquet paths
/// Currently only hive-partitioning is supported for Parquet paths
pub fn with_partition_strategy(mut self, strategy: PartitionStrategy) -> Self {
self.partition_strategy = strategy;
self
Expand Down Expand Up @@ -402,7 +402,7 @@ impl ConvertToDeltaBuilder {

let mut arrow_schema = batch_builder.schema().as_ref().clone();

// Arrow schema of Parquet files may have conflicting metatdata
// Arrow schema of Parquet files may have conflicting metadata
// Since Arrow schema metadata is not used to generate Delta table schema, we set the metadata field to an empty HashMap
arrow_schema.metadata = HashMap::new();
arrow_schemas.push(arrow_schema);
Expand Down
2 changes: 1 addition & 1 deletion crates/core/src/operations/create.rs
Original file line number Diff line number Diff line change
Expand Up @@ -224,7 +224,7 @@ impl CreateBuilder {
self
}

/// Specify whether to raise an error if the table properties in the configuration are not TablePropertys
/// Specify whether to raise an error if the table properties in the configuration are not TableProperties
pub fn with_raise_if_key_not_exists(mut self, raise_if_key_not_exists: bool) -> Self {
self.raise_if_key_not_exists = raise_if_key_not_exists;
self
Expand Down
8 changes: 4 additions & 4 deletions crates/core/src/operations/delete.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
//! Delete records from a Delta Table that statisfy a predicate
//! Delete records from a Delta Table that satisfy a predicate
//!
//! When a predicate is not provided then all records are deleted from the Delta
//! Table. Otherwise a scan of the Delta table is performed to mark any files
Expand Down Expand Up @@ -134,7 +134,7 @@ impl DeleteBuilder {
self
}

/// Additonal information to write to the commit
/// Additional information to write to the commit
pub fn with_commit_properties(mut self, commit_properties: CommitProperties) -> Self {
self.commit_properties = commit_properties;
self
Expand Down Expand Up @@ -184,7 +184,7 @@ impl ExtensionPlanner for DeleteMetricExtensionPlanner {
}

#[allow(clippy::too_many_arguments)]
async fn excute_non_empty_expr(
async fn execute_non_empty_expr(
snapshot: &DeltaTableState,
log_store: LogStoreRef,
state: &SessionState,
Expand Down Expand Up @@ -330,7 +330,7 @@ async fn execute(

let mut actions = {
let write_start = Instant::now();
let add = excute_non_empty_expr(
let add = execute_non_empty_expr(
&snapshot,
log_store.clone(),
&state,
Expand Down
2 changes: 1 addition & 1 deletion crates/core/src/operations/filesystem_check.rs
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ impl FileSystemCheckBuilder {
self
}

/// Additonal information to write to the commit
/// Additional information to write to the commit
pub fn with_commit_properties(mut self, commit_properties: CommitProperties) -> Self {
self.commit_properties = commit_properties;
self
Expand Down
4 changes: 2 additions & 2 deletions crates/core/src/operations/merge/barrier.rs
Original file line number Diff line number Diff line change
Expand Up @@ -547,8 +547,8 @@ mod tests {
}

#[tokio::test]
async fn test_barrier_changing_indicies() {
// Validate implementation can handle different dictionary indicies between batches
async fn test_barrier_changing_indices() {
// Validate implementation can handle different dictionary indices between batches

let schema = get_schema();
let mut batches = vec![];
Expand Down
4 changes: 2 additions & 2 deletions crates/core/src/operations/merge/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1195,12 +1195,12 @@ async fn execute(
LogicalPlanBuilder::from(plan).project(fields)?.build()?
};

let distrbute_expr = col(file_column.as_str());
let distribute_expr = col(file_column.as_str());

let merge_barrier = LogicalPlan::Extension(Extension {
node: Arc::new(MergeBarrier {
input: new_columns.clone(),
expr: distrbute_expr,
expr: distribute_expr,
file_column,
}),
});
Expand Down
2 changes: 1 addition & 1 deletion crates/core/src/operations/optimize.rs
Original file line number Diff line number Diff line change
Expand Up @@ -267,7 +267,7 @@ impl<'a> OptimizeBuilder<'a> {
self
}

/// Additonal information to write to the commit
/// Additional information to write to the commit
pub fn with_commit_properties(mut self, commit_properties: CommitProperties) -> Self {
self.commit_properties = commit_properties;
self
Expand Down
6 changes: 3 additions & 3 deletions crates/core/src/operations/transaction/conflict_checker.rs
Original file line number Diff line number Diff line change
Expand Up @@ -411,7 +411,7 @@ impl<'a> ConflictChecker<'a> {
);
if curr_read < win_read || win_write < curr_write {
return Err(CommitConflictError::ProtocolChanged(
format!("reqired read/write {win_read}/{win_write}, current read/write {curr_read}/{curr_write}"),
format!("required read/write {win_read}/{win_write}, current read/write {curr_read}/{curr_write}"),
));
};
}
Expand Down Expand Up @@ -638,7 +638,7 @@ pub(super) fn can_downgrade_to_snapshot_isolation<'a>(
match isolation_level {
IsolationLevel::Serializable => !data_changed,
IsolationLevel::WriteSerializable => !data_changed && !operation.changes_data(),
IsolationLevel::SnapshotIsolation => false, // this case should never happen, since spanpshot isolation canot be configured on table
IsolationLevel::SnapshotIsolation => false, // this case should never happen, since spanpshot isolation cannot be configured on table
}
}

Expand Down Expand Up @@ -857,7 +857,7 @@ mod tests {
setup_actions.push(file_part1);
let result = execute_test(
Some(setup_actions),
// filter matches neither exisiting nor added files
// filter matches neither existing nor added files
Some(col("value").lt(lit::<i32>(0))),
vec![file_part2],
vec![file_part3],
Expand Down
Loading

0 comments on commit 0850bef

Please sign in to comment.