diff --git a/Cargo.lock b/Cargo.lock index ed705adf7a..8cdb2875fd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2372,7 +2372,7 @@ dependencies = [ [[package]] name = "fluvio" -version = "0.24.0" +version = "0.25.0" dependencies = [ "anyhow", "async-channel 1.9.0", diff --git a/Cargo.toml b/Cargo.toml index 6b5830b726..27ca32db56 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -166,7 +166,7 @@ k8-diff = { version = "0.1.2" } trybuild = { branch = "check_option", git = "https://github.com/infinyon/trybuild" } # Internal fluvio dependencies -fluvio = { version = "0.24.0", path = "crates/fluvio" } +fluvio = { version = "0.25.0", path = "crates/fluvio" } fluvio-auth = { path = "crates/fluvio-auth" } fluvio-channel = { path = "crates/fluvio-channel" } fluvio-cli-common = { path = "crates/fluvio-cli-common"} diff --git a/crates/fluvio/Cargo.toml b/crates/fluvio/Cargo.toml index fbc3bed577..0bb1bd4373 100644 --- a/crates/fluvio/Cargo.toml +++ b/crates/fluvio/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "fluvio" -version = "0.24.0" +version = "0.25.0" edition = "2021" license = "Apache-2.0" authors = ["Fluvio Contributors "] diff --git a/crates/fluvio/src/producer/error.rs b/crates/fluvio/src/producer/error.rs index 4bedac61e2..fe23c82e89 100644 --- a/crates/fluvio/src/producer/error.rs +++ b/crates/fluvio/src/producer/error.rs @@ -9,8 +9,8 @@ use crate::producer::PartitionId; #[derive(thiserror::Error, Debug, Clone)] #[non_exhaustive] pub enum ProducerError { - #[error("the given record is larger than the max_request_size ({0} bytes)")] - RecordTooLarge(usize), + #[error("record size ({0} bytes), exceeded maximum request size ({0} bytes)")] + RecordTooLarge(usize, usize), #[error("failed to send record metadata: {0}")] SendRecordMetadata(#[from] async_channel::SendError), #[error("failed to get record metadata")] diff --git a/crates/fluvio/src/producer/memory_batch.rs b/crates/fluvio/src/producer/memory_batch.rs index 286459a3f7..317974bddd 100644 --- a/crates/fluvio/src/producer/memory_batch.rs +++ b/crates/fluvio/src/producer/memory_batch.rs @@ -59,7 +59,10 @@ impl MemoryBatch { // Error if the record is too large if actual_batch_size > self.write_limit { self.is_full = true; - return Err(ProducerError::RecordTooLarge(actual_batch_size)); + return Err(ProducerError::RecordTooLarge( + record_size, + actual_batch_size, + )); } // is full, but is first record, add to the batch and then we will send it directly diff --git a/rfc/produce-message-size.md b/rfc/produce-message-size.md index ed5cac2445..b114a26d21 100644 --- a/rfc/produce-message-size.md +++ b/rfc/produce-message-size.md @@ -57,7 +57,7 @@ fluvio produce large-data-topic --max-request-size 16384 --file large-data-file. Will be displayed the following error: ```bash -the given record is larger than the max_request_size (16384 bytes). +record size (xyz bytes), exceeded maximum request size (1048576 bytes) ``` ### Compression diff --git a/tests/cli/fluvio_smoke_tests/produce-error.bats b/tests/cli/fluvio_smoke_tests/produce-error.bats index ad48b5130a..5fd5ba97d2 100644 --- a/tests/cli/fluvio_smoke_tests/produce-error.bats +++ b/tests/cli/fluvio_smoke_tests/produce-error.bats @@ -49,7 +49,7 @@ teardown_file() { skip "don't check output on stable version" fi - assert_output --partial "the given record is larger than the max_request_size" + assert_output --partial "exceeded maximum request size" } # This should fail due to wrong compression algorithm