Skip to content

Commit

Permalink
fix clippy warnings
Browse files Browse the repository at this point in the history
  • Loading branch information
JanKaul committed Feb 8, 2024
1 parent 781ba0d commit a489550
Show file tree
Hide file tree
Showing 11 changed files with 28 additions and 32 deletions.
12 changes: 5 additions & 7 deletions datafusion_iceberg/src/materialized_view.rs
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ pub async fn refresh_materialized_view(
storage_table.source_tables(branch.clone()).await?
} {
Some(x) => x.clone(),
None => find_relations(&sql)?
None => find_relations(sql)?
.into_iter()
.map(|x| {
Ok(SourceTable::new(
Expand All @@ -68,18 +68,16 @@ pub async fn refresh_materialized_view(
let identifier = base_table.identifier();
let catalog_name = identifier.catalog();
let catalog = catalog_list
.catalog(&catalog_name)
.catalog(catalog_name)
.await
.ok_or(Error::NotFound(
"Catalog".to_owned(),
catalog_name.to_owned(),
))?;

let tabular = match catalog
.load_table(&Identifier::try_new(&vec![
identifier.namespace().clone(),
identifier.table_name().clone(),
])?)
.load_table(&Identifier::try_new(&[identifier.namespace().clone(),
identifier.table_name().clone()])?)
.await?
{
Tabular::View(_) => {
Expand Down Expand Up @@ -123,7 +121,7 @@ pub async fn refresh_materialized_view(
.into_iter()
.flat_map(|(catalog_name, base_table, _, last_snapshot_id)| {
let identifier = base_table.identifier().to_string().to_owned();
let uuid = base_table.metadata().uuid().clone();
let uuid = *base_table.metadata().uuid();

let table = Arc::new(DataFusionTable::new(
base_table,
Expand Down
2 changes: 1 addition & 1 deletion datafusion_iceberg/src/statistics.rs
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ impl DataFusionTable {
Tabular::View(_) => Err(Error::NotSupported("Statistics for views".to_string())),
Tabular::MaterializedView(mv) => {
let table = Table::new(
Identifier::try_new(&vec!["temp".to_owned()]).map_err(Error::from)?,
Identifier::try_new(&["temp".to_owned()]).map_err(Error::from)?,
mv.catalog(),
mv.storage_table()
.await
Expand Down
2 changes: 1 addition & 1 deletion datafusion_iceberg/src/table.rs
Original file line number Diff line number Diff line change
Expand Up @@ -194,7 +194,7 @@ impl TableProvider for DataFusionTable {
}
Tabular::MaterializedView(mv) => {
let table = Table::new(
Identifier::try_new(&vec!["temp".to_owned()]).map_err(Error::from)?,
Identifier::try_new(&["temp".to_owned()]).map_err(Error::from)?,
mv.catalog(),
mv.storage_table()
.await
Expand Down
6 changes: 3 additions & 3 deletions iceberg-catalog-sql/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -247,7 +247,7 @@ impl Catalog for SqlCatalog {
// Create metadata

// Write metadata to object_store
let bucket = parse_bucket(&metadata.location())?;
let bucket = parse_bucket(metadata.location())?;
let object_store = self.object_store(bucket);

let location = &metadata.location();
Expand Down Expand Up @@ -351,7 +351,7 @@ impl Catalog for SqlCatalog {
let mut metadata = metadata.clone();
let metadata_location = match &mut metadata {
TabularMetadata::View(metadata) => {
if !check_view_requirements(&commit.requirements, &metadata) {
if !check_view_requirements(&commit.requirements, metadata) {
return Err(IcebergError::InvalidFormat(
"View requirements not valid".to_owned(),
));
Expand All @@ -372,7 +372,7 @@ impl Catalog for SqlCatalog {
Ok(metadata_location)
}
TabularMetadata::MaterializedView(metadata) => {
if !check_view_requirements(&commit.requirements, &metadata) {
if !check_view_requirements(&commit.requirements, metadata) {
return Err(IcebergError::InvalidFormat(
"Materialized view requirements not valid".to_owned(),
));
Expand Down
10 changes: 5 additions & 5 deletions iceberg-rust-spec/src/spec/manifest.rs
Original file line number Diff line number Diff line change
Expand Up @@ -684,7 +684,7 @@ impl DataFile {
file_format: value.file_format,
partition: value
.partition
.cast(schema.fields(), &partition_spec.fields())?,
.cast(schema.fields(), partition_spec.fields())?,
record_count: value.record_count,
file_size_in_bytes: value.file_size_in_bytes,
column_sizes: value.column_sizes,
Expand Down Expand Up @@ -718,7 +718,7 @@ impl DataFile {
file_format: value.file_format,
partition: value
.partition
.cast(schema.fields(), &partition_spec.fields())?,
.cast(schema.fields(), partition_spec.fields())?,
record_count: value.record_count,
file_size_in_bytes: value.file_size_in_bytes,
column_sizes: value.column_sizes,
Expand Down Expand Up @@ -1576,7 +1576,7 @@ mod tests {
};

let partition_schema = partition_value_schema(
&table_metadata.default_partition_spec().unwrap().fields(),
table_metadata.default_partition_spec().unwrap().fields(),
table_metadata.current_schema(None).unwrap(),
)
.unwrap();
Expand Down Expand Up @@ -1706,7 +1706,7 @@ mod tests {
};

let partition_schema = partition_value_schema(
&table_metadata.default_partition_spec().unwrap().fields(),
table_metadata.default_partition_spec().unwrap().fields(),
table_metadata.current_schema(None).unwrap(),
)
.unwrap();
Expand Down Expand Up @@ -1795,7 +1795,7 @@ mod tests {
.unwrap();

let raw_schema =
partition_value_schema(&spec.fields(), &table_schema.try_into().unwrap()).unwrap();
partition_value_schema(spec.fields(), &table_schema.try_into().unwrap()).unwrap();

let schema = apache_avro::Schema::parse_str(&raw_schema).unwrap();

Expand Down
10 changes: 4 additions & 6 deletions iceberg-rust-spec/src/spec/snapshot.rs
Original file line number Diff line number Diff line change
Expand Up @@ -234,8 +234,10 @@ pub(crate) mod _serde {
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone)]
#[serde(rename_all = "lowercase")]
/// The operation field is used by some operations, like snapshot expiration, to skip processing certain snapshots.
#[derive(Default)]
pub enum Operation {
/// Only data files were added and no files were removed.
#[default]
Append,
/// Data and delete files were added and removed without changing table data;
/// i.e., compaction, changing the data file format, or relocating data files.
Expand All @@ -246,11 +248,7 @@ pub enum Operation {
Delete,
}

impl Default for Operation {
fn default() -> Self {
Operation::Append
}
}


#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone, Default)]
/// Summarises the changes in the snapshot.
Expand Down Expand Up @@ -365,7 +363,7 @@ impl FullIdentifier {
}

pub fn parse(input: &str) -> Result<Self, Error> {
let mut parts = input.split(".");
let mut parts = input.split('.');
let catalog_name = parts
.next()
.ok_or(Error::InvalidFormat("Input is empty".to_string()))?
Expand Down
2 changes: 1 addition & 1 deletion iceberg-rust-spec/src/spec/values.rs
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,7 @@ impl Struct {
.ok_or(Error::InvalidFormat("partition spec".to_string()))?;
Ok((
field.name.clone(),
field.field_type.tranform(&partition_field.transform())?,
field.field_type.tranform(partition_field.transform())?,
))
})
.collect::<Result<HashMap<_, _>, Error>>()?;
Expand Down
2 changes: 1 addition & 1 deletion iceberg-rust/src/arrow/write.rs
Original file line number Diff line number Diff line change
Expand Up @@ -166,7 +166,7 @@ async fn write_parquet_files(
size,
&metadata,
schema,
&partition_spec.fields(),
partition_spec.fields(),
)?)
}
})
Expand Down
4 changes: 2 additions & 2 deletions iceberg-rust/src/materialized_view/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -98,12 +98,12 @@ impl MaterializedView {
/// Get the storage table of the materialized view
pub async fn storage_table(&self) -> Result<StorageTable, Error> {
let storage_table_location = &self.metadata.materialization;
let bucket = parse_bucket(&storage_table_location)?;
let bucket = parse_bucket(storage_table_location)?;
if let TabularMetadata::Table(metadata) = serde_json::from_str(std::str::from_utf8(
&self
.catalog()
.object_store(bucket)
.get(&strip_prefix(&storage_table_location).into())
.get(&strip_prefix(storage_table_location).into())
.await?
.bytes()
.await?,
Expand Down
2 changes: 1 addition & 1 deletion iceberg-rust/src/table/transaction/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ impl<'table> TableTransaction<'table> {
// Execute the table operations
let (mut requirements, mut updates) = (Vec::new(), Vec::new());
for operation in self.operations.into_values() {
let (requirement, update) = operation.execute(&self.table).await?;
let (requirement, update) = operation.execute(self.table).await?;

if let Some(requirement) = requirement {
requirements.push(requirement);
Expand Down
8 changes: 4 additions & 4 deletions iceberg-rust/src/table/transaction/operation.rs
Original file line number Diff line number Diff line change
Expand Up @@ -142,7 +142,7 @@ impl Operation {
let partition_values = partition_values_in_bounds(
summary,
datafiles.keys(),
&partition_spec.fields(),
partition_spec.fields(),
schema,
);
if !partition_values.is_empty() {
Expand Down Expand Up @@ -377,7 +377,7 @@ pub(crate) async fn write_manifest(
branch: Option<String>,
) -> Result<ManifestListEntry, Error> {
let manifest_schema = ManifestEntry::schema(
&partition_value_schema(&table_metadata.default_partition_spec()?.fields(), schema)?,
&partition_value_schema(table_metadata.default_partition_spec()?.fields(), schema)?,
&table_metadata.format_version,
)?;

Expand Down Expand Up @@ -429,8 +429,8 @@ pub(crate) async fn write_manifest(
added_rows_count += datafile.record_count();
update_partitions(
manifest.partitions.as_mut().unwrap(),
&datafile.partition(),
&partition_columns,
datafile.partition(),
partition_columns,
)?;

let manifest_entry = ManifestEntry::builder()
Expand Down

0 comments on commit a489550

Please sign in to comment.