Skip to content

Commit c8c07bd

Browse files
committed
chore: update to iox 6689e6cf3e6296ab0ba1a1b9a18f243f7e881e85
* chore: update rust toolchain to 1.90.0
1 parent f1bc156 commit c8c07bd

File tree

165 files changed

+15893
-1601
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

165 files changed

+15893
-1601
lines changed

Cargo.toml

Lines changed: 9 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -15,9 +15,11 @@ members = [
1515
"influxdb2_client",
1616
"iox_http",
1717
"iox_query_influxql",
18+
"iox_query_influxql_rewrite",
1819
"iox_query",
1920
"iox_system_tables",
2021
"iox_time",
22+
"iox_v1_query_api",
2123
"logfmt",
2224
"meta_data_cache",
2325
"metric_exporters",
@@ -74,26 +76,26 @@ arrow-schema = { version = "55" }
7476
bincode = { version = "2", default-features = false, features = ["alloc", "derive"] }
7577
# Use DataFusion fork
7678
# See https://github.com/influxdata/arrow-datafusion/pull/73 for contents
77-
datafusion = { git = "https://github.com/influxdata/arrow-datafusion.git", rev = "a9cf9aca9ebf0d6c04e0861d2baebffa0ba77dbc" }
78-
datafusion-proto = { git = "https://github.com/influxdata/arrow-datafusion.git", rev = "a9cf9aca9ebf0d6c04e0861d2baebffa0ba77dbc" }
79+
datafusion = { git = "https://github.com/influxdata/arrow-datafusion.git", rev = "ee81b1cc652bde6c131973d091b178836692112d" }
80+
datafusion-proto = { git = "https://github.com/influxdata/arrow-datafusion.git", rev = "ee81b1cc652bde6c131973d091b178836692112d" }
7981
hashbrown = { version = "0.14.5" }
8082
http = { version = "1" }
8183
http-body = { version = "1" }
8284
http-body-util = { version = "0.1" }
8385
hyper = { version = "1" }
8486
hyper-util = { version = "0.1" }
85-
object_store = { version = "0.12.3", features = ["aws", "azure", "gcp"] }
87+
object_store = { version = "0.12.4", features = ["aws", "azure", "gcp"] }
8688
parquet = { version = "55", features = ["object_store"] }
87-
pbjson = { version = "0.7" }
88-
pbjson-build = { version = "0.7" }
89+
pbjson = { version = "0.8" }
90+
pbjson-build = { version = "0.8" }
8991
pbjson-types = { version = "0.7" }
9092
proptest = { version = "1", default-features = false, features = ["std"] }
9193
prost = { version = "0.13" }
9294
prost-build = { version = "0.13" }
9395
prost-types = { version = "0.13" }
9496
reqwest = { version = "0.12", default-features = false }
95-
rstest = { version = "0.21" }
96-
sqlx = { version = "0.8.6", features = ["sqlite"] }
97+
rstest = { version = "0.26" }
98+
sqlx = { version = "0.8.6" }
9799
tower = { version = "0.5" }
98100
tracing = { version = "0.1", features = ["log", "max_level_trace"] }
99101
tracing-log = { version = "0.2" }

arrow_util/Cargo.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ comfy-table = { version = "7.2", default-features = false }
2222
hashbrown = { workspace = true }
2323
num-traits = "0.2"
2424
parquet = { workspace = true }
25-
regex = "1.11.2"
25+
regex = "1.12.2"
2626
snafu = "0.8"
2727
uuid = "1"
2828
workspace-hack = { version = "0.1", path = "../workspace-hack" }

authz/Cargo.toml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -25,10 +25,10 @@ snafu = "0.8"
2525

2626
[dev-dependencies]
2727
assert_matches = "1.5.0"
28-
parking_lot = "0.12.4"
28+
parking_lot = "0.12.5"
2929
paste = "1.0.15"
3030
test_helpers_authz = { path = "../test_helpers_authz" }
31-
tokio = "1.47.1"
31+
tokio = "1.48.0"
3232

3333
[features]
3434
http = ["dep:http"]

backoff/Cargo.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@ license.workspace = true
99
workspace = true
1010

1111
[dependencies]
12-
tokio = { version = "1.47", features = ["macros", "time"] }
12+
tokio = { version = "1.48", features = ["macros", "time"] }
1313
tracing = { workspace = true }
1414
rand = "0.9"
1515
snafu = "0.8"

catalog_cache/Cargo.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ iox_http_util = { path = "../iox_http_util" }
1919
tracing = { workspace = true }
2020
reqwest = { workspace = true }
2121
snafu = "0.8"
22-
tokio = { version = "1.47", default-features = false, features = [
22+
tokio = { version = "1.48", default-features = false, features = [
2323
"macros",
2424
"rt",
2525
] }

catalog_cache/benches/list_encode.rs

Lines changed: 17 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -103,12 +103,25 @@ fn encode_partition_snapshot(i: usize) -> Bytes {
103103
let partition_key = PartitionKey::from(format!("arbitrary_{i}"));
104104
let expected_partition_hash_id = PartitionHashId::new(table_id, &partition_key);
105105
let generation = 6;
106-
let parquet_file_defaults = ParquetFile {
106+
107+
let partition = Partition::new_catalog_only(
108+
partition_id,
109+
table_id,
110+
partition_key.clone(),
111+
Default::default(),
112+
Default::default(),
113+
Default::default(),
114+
Default::default(),
115+
None, // max_time
116+
Default::default(),
117+
);
118+
// Create associated Parquet file
119+
let parquet_files = vec![ParquetFile {
107120
id: ParquetFileId::new(7 + i as i64),
108121
namespace_id,
109122
table_id,
110123
partition_id,
111-
partition_hash_id: Some(expected_partition_hash_id.clone()),
124+
partition_hash_id: expected_partition_hash_id.clone(),
112125
object_store_id: ObjectStoreId::from_str("00000000-0000-0001-0000-000000000000").unwrap(),
113126
min_time: Timestamp::new(2),
114127
max_time: Timestamp::new(3),
@@ -120,31 +133,9 @@ fn encode_partition_snapshot(i: usize) -> Bytes {
120133
column_set: ColumnSet::empty(),
121134
max_l0_created_at: Timestamp::new(6),
122135
source: None,
123-
};
136+
}];
124137

125-
let partition = Partition::new_catalog_only(
126-
partition_id,
127-
Some(expected_partition_hash_id.clone()),
128-
table_id,
129-
partition_key.clone(),
130-
Default::default(),
131-
Default::default(),
132-
Default::default(),
133-
Default::default(),
134-
None, // max_time
135-
);
136-
// Create associated Parquet files:
137-
let parquet_files = vec![
138-
// one addressed by numeric ID,
139-
ParquetFile {
140-
partition_hash_id: None,
141-
..parquet_file_defaults.clone()
142-
},
143-
// one addressed by hash ID.
144-
parquet_file_defaults.clone(),
145-
];
146-
147-
// Encode the partition and its Parquet files,
138+
// Encode the partition and its Parquet file
148139
let snapshot = PartitionSnapshot::encode(
149140
namespace_id,
150141
partition,

client_util/Cargo.toml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -16,13 +16,13 @@ reqwest = { workspace = true, features = ["stream", "rustls-tls-native-roots"] }
1616
# This direct dependency on rustls can probably be removed when tonic is upgraded to 0.13+.
1717
# See <https://github.com/influxdata/influxdb_iox/issues/14683> for more details.
1818
rustls = { version = "0.23", default-features = false }
19-
thiserror = "2.0.16"
19+
thiserror = "2.0.17"
2020
tonic = { version = "0.12", features = ["gzip", "tls", "tls-native-roots", "zstd"] }
2121
tower = { workspace = true }
2222
workspace-hack = { version = "0.1", path = "../workspace-hack" }
2323

2424
[dev-dependencies]
25-
tokio = { version = "1.47", features = [
25+
tokio = { version = "1.48", features = [
2626
"macros",
2727
"parking_lot",
2828
"rt-multi-thread",

data_types/Cargo.toml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ arrow = { workspace = true }
1414
arrow-buffer = { workspace = true }
1515
bytes = "1.10"
1616
chrono = { version = "0.4", default-features = false }
17-
croaring = "2.4.0"
17+
croaring = "2.5.1"
1818
influxdb-line-protocol = { path = "../influxdb_line_protocol" }
1919
iox_time = { path = "../iox_time" }
2020
generated_types = { path = "../generated_types" }
@@ -33,7 +33,7 @@ sqlx = { workspace = true, features = [
3333
"postgres",
3434
"uuid",
3535
] }
36-
thiserror = "2.0.16"
36+
thiserror = "2.0.17"
3737
uuid = { version = "1", features = ["v4"] }
3838
workspace-hack = { version = "0.1", path = "../workspace-hack" }
3939

data_types/src/lib.rs

Lines changed: 27 additions & 53 deletions
Original file line numberDiff line numberDiff line change
@@ -810,15 +810,30 @@ pub struct TableSchema {
810810

811811
/// the table's columns by their name
812812
pub columns: ColumnsByName,
813+
814+
/// Whether or not iceberg is enabled for this table
815+
pub iceberg_enabled: bool,
813816
}
814817

815818
impl TableSchema {
816-
/// Initialize new `TableSchema` from the information in the given `Table`.
819+
/// Initialize new [`TableSchema`] from the information in the given [`Table`].
817820
pub fn new_empty_from(table: &Table) -> Self {
818821
Self {
819822
id: table.id,
820823
partition_template: table.partition_template.clone(),
821824
columns: ColumnsByName::default(),
825+
iceberg_enabled: table.iceberg_enabled,
826+
}
827+
}
828+
829+
/// Initialize a new [`TableSchema`] with the given id, no columns, default partition, and
830+
/// iceberg disabled.
831+
pub fn new_with(id: TableId) -> Self {
832+
Self {
833+
id,
834+
partition_template: TablePartitionTemplateOverride::default(),
835+
columns: ColumnsByName::default(),
836+
iceberg_enabled: false,
822837
}
823838
}
824839

@@ -1077,8 +1092,8 @@ pub struct ParquetFile {
10771092
pub table_id: TableId,
10781093
/// the partition identifier
10791094
pub partition_id: PartitionId,
1080-
/// the optional partition hash id
1081-
pub partition_hash_id: Option<PartitionHashId>,
1095+
/// the partition hash id
1096+
pub partition_hash_id: PartitionHashId,
10821097
/// the uuid used in the object store path for this file
10831098
pub object_store_id: ObjectStoreId,
10841099
/// the min timestamp of data in this file
@@ -1178,11 +1193,7 @@ impl ParquetFile {
11781193

11791194
/// Estimate the memory consumption of this object and its contents
11801195
pub fn size(&self) -> usize {
1181-
let hash_id = self
1182-
.partition_hash_id
1183-
.as_ref()
1184-
.map(|x| x.size())
1185-
.unwrap_or_default();
1196+
let hash_id = self.partition_hash_id.size();
11861197

11871198
size_of_val(self) + hash_id + self.column_set.size() - size_of_val(&self.column_set)
11881199
}
@@ -1211,7 +1222,7 @@ impl ParquetFile {
12111222

12121223
/// Temporary to aid incremental migration
12131224
pub fn transition_partition_id(&self) -> TransitionPartitionId {
1214-
TransitionPartitionId::from_parts(self.partition_id, self.partition_hash_id.clone())
1225+
TransitionPartitionId::from_parts(self.partition_id, Some(self.partition_hash_id.clone()))
12151226
}
12161227
}
12171228

@@ -1222,10 +1233,7 @@ impl From<ParquetFile> for catalog_proto::ParquetFile {
12221233
namespace_id: v.namespace_id.get(),
12231234
table_id: v.table_id.get(),
12241235
partition_id: v.partition_id.get(),
1225-
partition_hash_id: v
1226-
.partition_hash_id
1227-
.map(|x| x.as_bytes().to_vec())
1228-
.unwrap_or_default(),
1236+
partition_hash_id: v.partition_hash_id.as_bytes().to_vec(),
12291237
object_store_id: v.object_store_id.to_string(),
12301238
min_time: v.min_time.get(),
12311239
max_time: v.max_time.get(),
@@ -1266,11 +1274,7 @@ impl TryFrom<catalog_proto::ParquetFile> for ParquetFile {
12661274
namespace_id: NamespaceId::new(v.namespace_id),
12671275
table_id: TableId::new(v.table_id),
12681276
partition_id: PartitionId::new(v.partition_id),
1269-
partition_hash_id: if v.partition_hash_id.is_empty() {
1270-
None
1271-
} else {
1272-
Some(v.partition_hash_id[..].try_into()?)
1273-
},
1277+
partition_hash_id: v.partition_hash_id[..].try_into()?,
12741278
object_store_id: ObjectStoreId::from_str(&v.object_store_id)?,
12751279
min_time: Timestamp::new(v.min_time),
12761280
max_time: Timestamp::new(v.max_time),
@@ -1346,7 +1350,7 @@ pub struct ParquetFileParams {
13461350
/// the partition identifier
13471351
pub partition_id: PartitionId,
13481352
/// the partition hash ID
1349-
pub partition_hash_id: Option<PartitionHashId>,
1353+
pub partition_hash_id: PartitionHashId,
13501354
/// the uuid used in the object store path for this file
13511355
pub object_store_id: ObjectStoreId,
13521356
/// the min timestamp of data in this file
@@ -3329,6 +3333,7 @@ mod tests {
33293333
id: TableId::new(1),
33303334
partition_template: Default::default(),
33313335
columns: ColumnsByName::default(),
3336+
iceberg_enabled: false,
33323337
};
33333338
let schema2 = TableSchema {
33343339
id: TableId::new(2),
@@ -3339,6 +3344,7 @@ mod tests {
33393344
name: String::from("foo"),
33403345
column_type: ColumnType::Bool,
33413346
}]),
3347+
iceberg_enabled: false,
33423348
};
33433349
assert!(schema1.size() < schema2.size());
33443350
}
@@ -3361,11 +3367,7 @@ mod tests {
33613367
id: NamespaceId::new(1),
33623368
active_tables: BTreeMap::from([(
33633369
String::from("foo"),
3364-
TableSchema {
3365-
id: TableId::new(1),
3366-
columns: ColumnsByName::default(),
3367-
partition_template: Default::default(),
3368-
},
3370+
TableSchema::new_with(TableId::new(1)),
33693371
)]),
33703372
deleted_tables: BTreeSet::new(),
33713373
partition_template: Default::default(),
@@ -3412,41 +3414,13 @@ mod tests {
34123414

34133415
#[test]
34143416
fn catalog_service_parquet_file_serde_roundtrip() {
3415-
// This part of the test can be removed when all partitions have hash IDs.
3416-
let old_style_parquet_file = ParquetFile {
3417-
id: ParquetFileId::new(3),
3418-
namespace_id: NamespaceId::new(4),
3419-
table_id: TableId::new(5),
3420-
partition_id: PartitionId::new(6),
3421-
partition_hash_id: None, // this is the important part for this test
3422-
object_store_id: ObjectStoreId::new(),
3423-
min_time: Timestamp::new(30),
3424-
max_time: Timestamp::new(50),
3425-
to_delete: None,
3426-
file_size_bytes: 1024,
3427-
row_count: 42,
3428-
compaction_level: CompactionLevel::Initial,
3429-
created_at: Timestamp::new(70),
3430-
column_set: ColumnSet::empty(),
3431-
max_l0_created_at: Timestamp::new(70),
3432-
source: None,
3433-
};
3434-
let catalog_proto_old_style_parquet_file =
3435-
catalog_proto::ParquetFile::from(old_style_parquet_file.clone());
3436-
let round_trip_old_style_parquet_file =
3437-
ParquetFile::try_from(catalog_proto_old_style_parquet_file).unwrap();
3438-
assert_eq!(old_style_parquet_file, round_trip_old_style_parquet_file);
3439-
34403417
let table_id = TableId::new(5);
34413418
let parquet_file = ParquetFile {
34423419
id: ParquetFileId::new(3),
34433420
namespace_id: NamespaceId::new(4),
34443421
table_id,
34453422
partition_id: PartitionId::new(6),
3446-
partition_hash_id: Some(PartitionHashId::new(
3447-
table_id,
3448-
&PartitionKey::from("arbitrary"),
3449-
)),
3423+
partition_hash_id: PartitionHashId::new(table_id, &PartitionKey::from("arbitrary")),
34503424
object_store_id: ObjectStoreId::new(),
34513425
min_time: Timestamp::new(30),
34523426
max_time: Timestamp::new(50),

0 commit comments

Comments
 (0)