From 9b3aab18a3065c870d3b21157a98e37cf78438f3 Mon Sep 17 00:00:00 2001 From: Sijie Yang Date: Fri, 11 Oct 2024 19:12:27 +0800 Subject: [PATCH 01/11] Update the building workflow (#404) --- .github/workflows/rust.yml | 7 +++++-- src/build.rs | 9 ++++----- 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index d91b60fc..b2ef09ef 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -144,8 +144,11 @@ jobs: toolchain: stable target: 'aarch64-unknown-linux-ohos,armv7-unknown-linux-ohos,x86_64-unknown-linux-ohos' - - name: Run cargo build - run: cargo install ohrs && ohrs build -- --verbose --features ffi --release + - name: Install ohrs + run: cargo install ohrs + + - name: Run ohrs build + run: ohrs build -- --verbose --features ffi --release static_analysis: name: Static analysis diff --git a/src/build.rs b/src/build.rs index 3e7dc5b2..98f5ba68 100644 --- a/src/build.rs +++ b/src/build.rs @@ -115,11 +115,7 @@ fn new_boringssl_cmake_config() -> cmake::Config { for (name, value) in *params { boringssl_cmake.define(name, value); } - // common arguments for ohos help us to ignore some error - boringssl_cmake - .define("CMAKE_C_FLAGS", "-Wno-unused-command-line-argument"); - boringssl_cmake - .define("CMAKE_CXX_FLAGS", "-Wno-unused-command-line-argument"); + break; } } @@ -129,6 +125,9 @@ fn new_boringssl_cmake_config() -> cmake::Config { let toolchain_file = ohos_ndk_home.join("native/build/cmake/ohos.toolchain.cmake"); let toolchain_file = toolchain_file.to_str().unwrap(); boringssl_cmake.define("CMAKE_TOOLCHAIN_FILE", toolchain_file); + // common arguments for ohos help us to ignore some error + boringssl_cmake.define("CMAKE_C_FLAGS", "-Wno-unused-command-line-argument"); + boringssl_cmake.define("CMAKE_CXX_FLAGS", "-Wno-unused-command-line-argument"); } } From ca009e2fe1dbb50111ec84ff64f7fb4eec16104a Mon Sep 17 00:00:00 2001 From: Sijie Yang Date: Sat, 12 Oct 2024 09:09:39 +0800 Subject: [PATCH 02/11] Update integration test workflow (#405) --- .github/workflows/tquic-integration.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/tquic-integration.yml b/.github/workflows/tquic-integration.yml index b901d89e..0e61a9c8 100644 --- a/.github/workflows/tquic-integration.yml +++ b/.github/workflows/tquic-integration.yml @@ -26,5 +26,5 @@ jobs: - name: Run integration tests for disable_1rtt_encryption run: | cd tools/tests/ - bash ./tquic_tools_test.sh -b ../../target/debug/ -t multipath_roundrobin -c '~~disable-encryption' -s '~~disable-encryption' + bash ./tquic_tools_test.sh -b ../../target/release/ -t multipath_roundrobin -c '~~disable-encryption' -s '~~disable-encryption' From 05828002c236980e0b62c2131ac169ff4081f7d7 Mon Sep 17 00:00:00 2001 From: Sijie Yang Date: Thu, 17 Oct 2024 11:24:09 +0800 Subject: [PATCH 03/11] Temporarily disable some integration tests (#410) --- .github/workflows/tquic-integration.yml | 5 ----- 1 file changed, 5 deletions(-) diff --git a/.github/workflows/tquic-integration.yml b/.github/workflows/tquic-integration.yml index 0e61a9c8..aba03618 100644 --- a/.github/workflows/tquic-integration.yml +++ b/.github/workflows/tquic-integration.yml @@ -23,8 +23,3 @@ jobs: run: | cd tools/tests/ bash ./tquic_tools_test.sh -b ../../target/release/ -t multipath_redundant,multipath_minrtt,multipath_roundrobin -f 1000M -p 5 - - name: Run integration tests for disable_1rtt_encryption - run: | - cd tools/tests/ - bash ./tquic_tools_test.sh -b ../../target/release/ -t multipath_roundrobin -c '~~disable-encryption' -s '~~disable-encryption' - From 4ad434c9dad9c32a626dd8efb8e85409dbb8db33 Mon Sep 17 00:00:00 2001 From: a-andre <13609565+a-andre@users.noreply.github.com> Date: Thu, 17 Oct 2024 08:03:36 +0200 Subject: [PATCH 04/11] Some fixes for tquic_tools_test.sh (#407) - Fix handling of options 'l' and 's' - Return error code if argument is missing or invalid option was passed --- tools/tests/tquic_tools_test.sh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tools/tests/tquic_tools_test.sh b/tools/tests/tquic_tools_test.sh index 50d1959b..182fbaae 100755 --- a/tools/tests/tquic_tools_test.sh +++ b/tools/tests/tquic_tools_test.sh @@ -38,9 +38,6 @@ cleanup() { exit $EXIT_CODE } -# Ensure that all child processes have exited. -trap 'cleanup' EXIT - show_help() { echo "Usage: $0 [options]" echo " -b, Set the directory of tquic_client/tquic_server." @@ -55,7 +52,7 @@ show_help() { echo " -h, Display this help and exit." } -while getopts ":b:w:t:f:p:g:l:c:sh" opt; do +while getopts ":b:w:t:f:p:g:c:s:lh" opt; do case $opt in b) BIN_DIR="$OPTARG" @@ -101,6 +98,9 @@ while getopts ":b:w:t:f:p:g:l:c:sh" opt; do esac done +# Ensure that all child processes have exited. +trap 'cleanup' EXIT + if [[ ! -f "$BIN_DIR/tquic_client" || ! -f "$BIN_DIR/tquic_server" ]]; then echo "Not found tquic_client/tquic_server. Please specify the directory for them by '-b' option." show_help From 2622b4c3c60713a2a7c6ad9b0606b45e7dd21421 Mon Sep 17 00:00:00 2001 From: Sijie Yang Date: Thu, 17 Oct 2024 15:07:53 +0800 Subject: [PATCH 05/11] Minor tweaks for tquic_time_offset.py (#412) --- tools/script/tquic_time_offset.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) mode change 100644 => 100755 tools/script/tquic_time_offset.py diff --git a/tools/script/tquic_time_offset.py b/tools/script/tquic_time_offset.py old mode 100644 new mode 100755 index 15a712cf..d24d3991 --- a/tools/script/tquic_time_offset.py +++ b/tools/script/tquic_time_offset.py @@ -65,15 +65,22 @@ def plot_offsets(timestamps, offsets, connection_trace_id, stream_id): description="Analyze TQUIC logs to get the relationship between stream offset and time." ) parser.add_argument( - "--log_file", type=str, help="Path to the TQUIC debug log file", required=True + "-l", + "--log_file", + type=str, + help="Path to the TQUIC debug log file", + required=True, ) parser.add_argument( + "-c", "--connection_trace_id", type=str, help="Connection trace id, eg. SERVER-c6d45bc005585f42", required=True, ) - parser.add_argument("--stream_id", type=int, help="Stream id, eg. 0", required=True) + parser.add_argument( + "-s", "--stream_id", type=int, help="Stream id, eg. 0", required=True + ) args = parser.parse_args() # Calling with command-line arguments From 215b5db02ab8e5c1f73c05f227b0bd0e282a2995 Mon Sep 17 00:00:00 2001 From: Sijie Yang Date: Fri, 18 Oct 2024 12:33:51 +0800 Subject: [PATCH 06/11] Fix checking packet header under disable_1rtt_encryption mode (#413) --- .github/workflows/tquic-integration.yml | 4 +++ src/connection/connection.rs | 8 +++++- src/packet.rs | 36 ++++++++++++++++--------- 3 files changed, 34 insertions(+), 14 deletions(-) diff --git a/.github/workflows/tquic-integration.yml b/.github/workflows/tquic-integration.yml index aba03618..0b2d9687 100644 --- a/.github/workflows/tquic-integration.yml +++ b/.github/workflows/tquic-integration.yml @@ -23,3 +23,7 @@ jobs: run: | cd tools/tests/ bash ./tquic_tools_test.sh -b ../../target/release/ -t multipath_redundant,multipath_minrtt,multipath_roundrobin -f 1000M -p 5 + - name: Run integration tests for disable_1rtt_encryption + run: | + cd tools/tests/ + bash ./tquic_tools_test.sh -b ../../target/release/ -t multipath_minrtt -c '~~disable-encryption' -s '~~disable-encryption' diff --git a/src/connection/connection.rs b/src/connection/connection.rs index ebf84f08..2af19cee 100644 --- a/src/connection/connection.rs +++ b/src/connection/connection.rs @@ -1680,7 +1680,11 @@ impl Connection { .tls_session .get_overhead(level) .ok_or(Error::InternalError)?; - let total_overhead = pkt_num_offset + pkt_num_len + crypto_overhead; + let total_overhead = if !self.is_encryption_disabled(hdr.pkt_type) { + pkt_num_offset + pkt_num_len + crypto_overhead + } else { + pkt_num_offset + pkt_num_len + }; match left.checked_sub(total_overhead) { Some(val) => left = val, @@ -1727,6 +1731,8 @@ impl Connection { // fields) in bytes let payload_len = write_status.written; if pkt_type != PacketType::OneRTT { + // Note: This type of packet is always encrypted, even if the disable_1rtt_encryption + // transport parameter is successfully negotiated. let len = pkt_num_len + payload_len + crypto_overhead; let mut out = &mut out[hdr_offset..]; out.write_varint_with_len(len as u64, crate::LENGTH_FIELD_LEN)?; diff --git a/src/packet.rs b/src/packet.rs index d346adee..f2d9f998 100644 --- a/src/packet.rs +++ b/src/packet.rs @@ -541,33 +541,43 @@ pub(crate) fn decrypt_header( aead: &Open, plaintext_mode: bool, ) -> Result<()> { - if pkt_buf.len() < pkt_num_offset + MAX_PKT_NUM_LEN + SAMPLE_LEN { + let pkt_buf_min = if !plaintext_mode { + pkt_num_offset + MAX_PKT_NUM_LEN + SAMPLE_LEN + } else { + // All aspects of encryption on 1-RTT packets are removed and it is no + // longer including an AEAD tag. + pkt_num_offset + MAX_PKT_NUM_LEN + }; + if pkt_buf.len() < pkt_buf_min { return Err(Error::BufferTooShort); } + // Decrypt packet haader if needed let mut first = pkt_buf[0]; - let sample_start = pkt_num_offset + MAX_PKT_NUM_LEN; - let sample = &pkt_buf[sample_start..sample_start + SAMPLE_LEN]; - let mask = aead.new_mask(sample)?; - - // Remove protection of bits in the first byte - if !plaintext_mode { + let (pkt_num_len, pkt_num_buf) = if !plaintext_mode { + // Remove protection of bits in the first byte + let sample_start = pkt_num_offset + MAX_PKT_NUM_LEN; + let sample = &pkt_buf[sample_start..sample_start + SAMPLE_LEN]; + let mask = aead.new_mask(sample)?; if PacketHeader::long_header(first) { first ^= mask[0] & 0x0f; } else { first ^= mask[0] & 0x1f; } - } - let pkt_num_len = usize::from((first & PKT_NUM_LEN_MASK) + 1); - let pkt_num_buf = &mut pkt_buf[pkt_num_offset..pkt_num_offset + pkt_num_len]; + let pkt_num_len = usize::from((first & PKT_NUM_LEN_MASK) + 1); + let pkt_num_buf = &mut pkt_buf[pkt_num_offset..pkt_num_offset + pkt_num_len]; - // Remove protection of packet number field - if !plaintext_mode { + // Remove protection of packet number field for i in 0..pkt_num_len { pkt_num_buf[i] ^= mask[i + 1]; } - } + (pkt_num_len, pkt_num_buf) + } else { + let pkt_num_len = usize::from((first & PKT_NUM_LEN_MASK) + 1); + let pkt_num_buf = &mut pkt_buf[pkt_num_offset..pkt_num_offset + pkt_num_len]; + (pkt_num_len, pkt_num_buf) + }; // Extract packet number corresponding to the length. let pkt_num = { From 78c8908095e7959ad33598a17aabd265aa240928 Mon Sep 17 00:00:00 2001 From: Sijie Yang Date: Mon, 21 Oct 2024 18:34:42 +0800 Subject: [PATCH 07/11] Fix the length of trancated packet number (#414) --- src/packet.rs | 55 ++++++++++++++++++++++++++++++++++++++++----------- 1 file changed, 43 insertions(+), 12 deletions(-) diff --git a/src/packet.rs b/src/packet.rs index f2d9f998..22297fc4 100644 --- a/src/packet.rs +++ b/src/packet.rs @@ -674,7 +674,7 @@ pub(crate) fn packet_num_len(pkt_num: u64, largest_acked: Option) -> usize pkt_num.saturating_add(1) }; - let min_bits = u64::BITS - num_unacked.leading_zeros(); // log(num_unacked, 2) + 1 + let min_bits = u64::BITS - num_unacked.leading_zeros() + 1; // ceil(log(num_unacked, 2)) + 1 ((min_bits + 7) / 8) as usize // ceil(min_bits / 8) } @@ -1164,29 +1164,60 @@ mod tests { fn packet_num() -> Result<()> { let test_cases = [ (0, None, 1), - (254, Some(0), 1), - (255, Some(0), 1), - (256, Some(0), 2), - (65534, Some(0), 2), - (65535, Some(0), 2), - (65536, Some(0), 3), - (16777214, Some(0), 3), - (16777215, Some(0), 3), - (16777216, Some(0), 4), - (4294967295, Some(0), 4), - (4294967296, Some(1), 4), + (127, Some(0), 1), + (128, Some(0), 2), + (129, Some(0), 2), + (1174, Some(996), 2), + (32767, Some(0), 2), + (32768, Some(0), 3), + (8388607, Some(0), 3), + (8388608, Some(0), 4), + (2147483647, Some(0), 4), + (2147483648, Some(1), 4), (4294967296, Some(4294967295), 1), (4611686018427387903, Some(4611686018427387902), 1), ]; + const U8_MAX: u64 = (1 << 8) - 1; + const U16_MAX: u64 = (1 << 16) - 1; + const U24_MAX: u64 = (1 << 24) - 1; + const U32_MAX: u64 = (1 << 32) - 1; + let mut buf = [0; 4]; for case in test_cases { let pkt_num = case.0; let largest_acked = case.1; let pkt_num_len = packet_num_len(pkt_num, largest_acked); assert_eq!(pkt_num_len, case.2); + // The sender MUST use a packet number size able to represent more + // than twice as large a range as the difference between the largest + // acknowledged packet number and the packet number being sent. + let range = (pkt_num - largest_acked.unwrap_or(0)) * 2 as u64; + if range <= U8_MAX { + assert!(pkt_num_len == 1); + } else if range <= U16_MAX { + assert!(pkt_num_len == 2); + } else if range <= U24_MAX { + assert!(pkt_num_len == 3); + } else if range <= U32_MAX { + assert!(pkt_num_len == 4); + } else { + unreachable!(); + } let len = encode_packet_num(pkt_num, pkt_num_len, &mut buf[..])?; assert_eq!(len, pkt_num_len); + + let mut b = &buf[..]; + let pkt_num_truncated = match len { + 1 => u64::from(b.read_u8()?), + 2 => u64::from(b.read_u16()?), + 3 => u64::from(b.read_u24()?), + 4 => u64::from(b.read_u32()?), + _ => unreachable!(), + }; + let pkt_num_decoded = + decode_packet_num(largest_acked.unwrap_or(0), pkt_num_truncated, len); + assert_eq!(pkt_num, pkt_num_decoded); } // Test case in A.2. Sample Packet Number Encoding Algorithm From 9cc6a629776ae72efee0971b85744ff5c6b72459 Mon Sep 17 00:00:00 2001 From: Sijie Yang Date: Tue, 22 Oct 2024 14:45:33 +0800 Subject: [PATCH 08/11] Add `--recv` option for tquic_time_offset.py (#415) --- tools/script/tquic_time_offset.py | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/tools/script/tquic_time_offset.py b/tools/script/tquic_time_offset.py index d24d3991..c0bd8f0d 100755 --- a/tools/script/tquic_time_offset.py +++ b/tools/script/tquic_time_offset.py @@ -9,8 +9,10 @@ import argparse import matplotlib.pyplot as plt +STREAM_SEND_FORMAT=r"{} sent packet OneRTT.*?STREAM id={} off=(\d+) len=\d+ fin=(?:true|false)" +STREAM_RECV_FORMAT=r"{} recv frame STREAM id={} off=(\d+) len=\d+ fin=(?:true|false)" -def parse_log(log_file, cid, stream_id): +def parse_log(log_file, cid, stream_id, recv): with open(log_file, "r") as file: log_data = file.readlines() @@ -19,11 +21,8 @@ def parse_log(log_file, cid, stream_id): # Refine the regular expression to match timestamps and stream offsets timestamp_pattern = re.compile(r"\[(\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3})Z") - connection_stream_pattern = re.compile( - r"{} sent packet OneRTT.*?STREAM id={} off=(\d+) len=\d+ fin=(?:true|false)".format( - cid, stream_id - ) - ) + stream_format = STREAM_RECV_FORMAT if recv else STREAM_SEND_FORMAT + connection_stream_pattern = re.compile(stream_format.format(cid, stream_id)) for line in log_data: timestamp_match = timestamp_pattern.search(line) @@ -57,6 +56,7 @@ def plot_offsets(timestamps, offsets, connection_trace_id, stream_id): plt.matplotlib.dates.DateFormatter("%H:%M:%S.%f") ) plt.savefig(output_file_name) + print("Found %d items, figure %s" % (len(timestamps), output_file_name)) if __name__ == "__main__": @@ -81,10 +81,13 @@ def plot_offsets(timestamps, offsets, connection_trace_id, stream_id): parser.add_argument( "-s", "--stream_id", type=int, help="Stream id, eg. 0", required=True ) + parser.add_argument( + "-r", "--recv", type=bool, help="Recv side instead of send side", default=False + ) args = parser.parse_args() # Calling with command-line arguments timestamps, offsets = parse_log( - args.log_file, args.connection_trace_id, args.stream_id + args.log_file, args.connection_trace_id, args.stream_id, args.recv ) plot_offsets(timestamps, offsets, args.connection_trace_id, args.stream_id) From 50df594d80fea22948521f02fbb8a42b8fbdfcaa Mon Sep 17 00:00:00 2001 From: Sijie Yang Date: Thu, 24 Oct 2024 14:10:28 +0800 Subject: [PATCH 09/11] Add qlog feature flag to reduce the size of complied library (#416) --- .github/workflows/tquic-features.yml | 30 +++++++++++++ Cargo.toml | 17 ++++++-- src/connection/connection.rs | 24 ++++++++++ src/connection/recovery.rs | 65 +++++++++++++++++++++++----- src/ffi.rs | 3 ++ src/frame.rs | 6 ++- src/lib.rs | 2 + src/packet.rs | 2 + src/trans_param.rs | 3 ++ 9 files changed, 137 insertions(+), 15 deletions(-) create mode 100644 .github/workflows/tquic-features.yml diff --git a/.github/workflows/tquic-features.yml b/.github/workflows/tquic-features.yml new file mode 100644 index 00000000..54270377 --- /dev/null +++ b/.github/workflows/tquic-features.yml @@ -0,0 +1,30 @@ +name: Features + +on: + push: + branches: [ "develop" ] + pull_request: + branches: [ "develop" ] + +env: + CARGO_TERM_COLOR: always + +jobs: + features: + name: Build and test + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + submodules: 'recursive' + - name: Update rust + run: rustup update + - name: Build without default features + run: cargo build --all --no-default-features && cargo test --no-default-features + - name: Build with feature(s) ffi + run: cargo build --all --no-default-features -F ffi && cargo test --no-default-features -F ffi + - name: Build with feature(s) qlog + run: cargo build --all --no-default-features -F qlog && cargo test --no-default-features -F qlog + - name: Build with feature(s) ffi,qlog + run: cargo build --all --no-default-features -F ffi,qlog && cargo test --no-default-features -F ffi,qlog + diff --git a/Cargo.toml b/Cargo.toml index 21133693..e72ff607 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -25,10 +25,19 @@ include = [ "/deps/boringssl/LICENSE", ] +[package.metadata.docs.rs] +no-default-features = true +features = ["qlog"] + [features] +default = ["qlog"] + # build the FFI API ffi = [] +# enable support for the qlog +qlog = ["dep:serde", "dep:serde_json", "dep:serde_derive", "dep:serde_with"] + [dependencies] bytes = "1" rustc-hash = "1.1" @@ -43,10 +52,10 @@ strum_macros = "0.24" rand = "0.8.5" smallvec = { version = "1.10", features = ["serde", "union"] } lru = "0.12" -serde = { version = "1.0.139", features = ["derive"] } -serde_json = { version = "1.0", features = ["preserve_order"] } -serde_derive = "1.0" -serde_with = "3.0.0" +serde = { version = "1.0.139", features = ["derive"], optional=true } +serde_json = { version = "1.0", features = ["preserve_order"], optional=true } +serde_derive = { version = "1.0", optional=true } +serde_with = { version="3.0.0", optional=true } hex = "0.4" priority-queue = "1.3.2" sfv = { version = "0.9" } diff --git a/src/connection/connection.rs b/src/connection/connection.rs index 2af19cee..cee0ed8c 100644 --- a/src/connection/connection.rs +++ b/src/connection/connection.rs @@ -52,7 +52,9 @@ use crate::multipath_scheduler::*; use crate::packet; use crate::packet::PacketHeader; use crate::packet::PacketType; +#[cfg(feature = "qlog")] use crate::qlog; +#[cfg(feature = "qlog")] use crate::qlog::events; use crate::tls; use crate::tls::Keys; @@ -160,6 +162,7 @@ pub struct Connection { context: Option>, /// Qlog writer + #[cfg(feature = "qlog")] qlog: Option, /// Unique trace id for deubg logging @@ -272,6 +275,7 @@ impl Connection { events: EventQueue::default(), queues: None, context: None, + #[cfg(feature = "qlog")] qlog: None, trace_id, }; @@ -357,6 +361,7 @@ impl Connection { /// Set qlog output to the given [`writer`] /// /// [`Writer`]: https://doc.rust-lang.org/std/io/trait.Write.html + #[cfg(feature = "qlog")] pub fn set_qlog( &mut self, writer: Box, @@ -603,6 +608,7 @@ impl Connection { // Process each QUIC frame in the QUIC packet let mut ack_eliciting_pkt = false; let mut probing_pkt = true; + #[cfg(feature = "qlog")] let mut qframes = vec![]; while !payload.is_empty() { @@ -613,6 +619,7 @@ impl Connection { if !frame.probing() { probing_pkt = false; } + #[cfg(feature = "qlog")] if self.qlog.is_some() { qframes.push(frame.to_qlog()); } @@ -622,6 +629,7 @@ impl Connection { } // Write events to qlog. + #[cfg(feature = "qlog")] if let Some(qlog) = &mut self.qlog { // Write TransportPacketReceived event to qlog. Self::qlog_quic_packet_received(qlog, &hdr, pkt_num, read, payload_len, qframes); @@ -750,6 +758,7 @@ impl Connection { space_id, &mut self.spaces, handshake_status, + #[cfg(feature = "qlog")] self.qlog.as_mut(), now, )?; @@ -1205,6 +1214,7 @@ impl Connection { self.flags.insert(AppliedPeerTransportParams); // Write TransportParametersSet event to qlog. + #[cfg(feature = "qlog")] if let Some(qlog) = &mut self.qlog { Self::qlog_quic_params_set( qlog, @@ -1427,6 +1437,7 @@ impl Connection { .on_stream_frame_acked(stream_id, offset, length); // Write QuicStreamDataMoved event to qlog + #[cfg(feature = "qlog")] if let Some(qlog) = &mut self.qlog { Self::qlog_quic_data_acked(qlog, stream_id, offset, length); } @@ -1788,6 +1799,7 @@ impl Connection { ); // Write events to qlog. + #[cfg(feature = "qlog")] if let Some(qlog) = &mut self.qlog { // Write TransportPacketSent event to qlog. let mut qframes = Vec::with_capacity(sent_pkt.frames.len()); @@ -3231,6 +3243,7 @@ impl Connection { path.space_id, &mut self.spaces, handshake_status, + #[cfg(feature = "qlog")] self.qlog.as_mut(), now, ); @@ -3238,6 +3251,7 @@ impl Connection { self.stats.lost_bytes += lost_bytes; // Write RecoveryMetricsUpdate event to qlog. + #[cfg(feature = "qlog")] if let Some(qlog) = &mut self.qlog { path.recovery.qlog_recovery_metrics_updated(qlog); } @@ -3832,6 +3846,7 @@ impl Connection { match self.streams.stream_read(stream_id, out) { Ok((read, fin)) => { // Write QuicStreamDataMoved event to qlog + #[cfg(feature = "qlog")] if let Some(qlog) = &mut self.qlog { Self::qlog_transport_data_read(qlog, stream_id, read_off.unwrap_or(0), read); } @@ -3850,6 +3865,7 @@ impl Connection { match self.streams.stream_write(stream_id, buf, fin) { Ok(written) => { // Write QuicStreamDataMoved event to qlog + #[cfg(feature = "qlog")] if let Some(qlog) = &mut self.qlog { Self::qlog_transport_data_write( qlog, @@ -4072,6 +4088,7 @@ impl Connection { } /// Write a QuicParametersSet event to the qlog. + #[cfg(feature = "qlog")] fn qlog_quic_params_set( qlog: &mut qlog::QlogWriter, params: &TransportParams, @@ -4083,6 +4100,7 @@ impl Connection { } /// Write a QuicPacketReceived event to the qlog. + #[cfg(feature = "qlog")] fn qlog_quic_packet_received( qlog: &mut qlog::QlogWriter, hdr: &PacketHeader, @@ -4118,6 +4136,7 @@ impl Connection { } /// Write a QuicPacketSent event to the qlog. + #[cfg(feature = "qlog")] fn qlog_quic_packet_sent( qlog: &mut qlog::QlogWriter, hdr: &PacketHeader, @@ -4156,6 +4175,7 @@ impl Connection { } /// Write a QuicStreamDataMoved event to the qlog. + #[cfg(feature = "qlog")] fn qlog_quic_data_acked( qlog: &mut qlog::QlogWriter, stream_id: u64, @@ -4174,6 +4194,7 @@ impl Connection { } /// Write a QuicStreamDataMoved event to the qlog. + #[cfg(feature = "qlog")] fn qlog_transport_data_read( qlog: &mut qlog::QlogWriter, stream_id: u64, @@ -4192,6 +4213,7 @@ impl Connection { } /// Write a QuicStreamDataMoved event to the qlog. + #[cfg(feature = "qlog")] fn qlog_transport_data_write( qlog: &mut qlog::QlogWriter, stream_id: u64, @@ -6277,6 +6299,7 @@ pub(crate) mod tests { } #[test] + #[cfg(feature = "qlog")] fn ping() -> Result<()> { let mut client_config = TestPair::new_test_config(false)?; client_config.enable_dplpmtud(false); @@ -7638,6 +7661,7 @@ pub(crate) mod tests { } #[test] + #[cfg(feature = "qlog")] fn conn_write_qlog() -> Result<()> { let clog = NamedTempFile::new().unwrap(); let mut cfile = clog.reopen().unwrap(); diff --git a/src/connection/recovery.rs b/src/connection/recovery.rs index bb4c9669..64ec2bcf 100644 --- a/src/connection/recovery.rs +++ b/src/connection/recovery.rs @@ -35,7 +35,9 @@ use crate::congestion_control::CongestionController; use crate::congestion_control::Pacer; use crate::connection::Timer; use crate::frame; +#[cfg(feature = "qlog")] use crate::qlog; +#[cfg(feature = "qlog")] use crate::qlog::events::EventData; use crate::ranges::RangeSet; use crate::Error; @@ -114,6 +116,7 @@ pub struct Recovery { /// It tracks the last metrics used for emitting qlog RecoveryMetricsUpdated /// event. + #[cfg(feature = "qlog")] last_metrics: RecoveryMetrics, /// Trace id. @@ -140,6 +143,7 @@ impl Recovery { cache_pkt_size: conf.max_datagram_size, last_cwnd_limited_time: None, stats: PathStats::default(), + #[cfg(feature = "qlog")] last_metrics: RecoveryMetrics::default(), trace_id: String::from(""), } @@ -228,7 +232,7 @@ impl Recovery { space_id: SpaceId, spaces: &mut PacketNumSpaceMap, handshake_status: HandshakeStatus, - qlog: Option<&mut qlog::QlogWriter>, + #[cfg(feature = "qlog")] qlog: Option<&mut qlog::QlogWriter>, now: Instant, ) -> Result<(u64, u64)> { let space = spaces.get_mut(space_id).ok_or(Error::InternalError)?; @@ -279,7 +283,12 @@ impl Recovery { } // Detect lost packets - let (lost_packets, lost_bytes) = self.detect_lost_packets(space, qlog, now); + let (lost_packets, lost_bytes) = self.detect_lost_packets( + space, + #[cfg(feature = "qlog")] + qlog, + now, + ); // Remove acked or lost packets from sent queue in batch. self.drain_sent_packets(space, now, self.rtt.smoothed_rtt()); @@ -415,7 +424,7 @@ impl Recovery { fn detect_lost_packets( &mut self, space: &mut PacketNumSpace, - mut qlog: Option<&mut qlog::QlogWriter>, + #[cfg(feature = "qlog")] mut qlog: Option<&mut qlog::QlogWriter>, now: Instant, ) -> (u64, u64) { space.loss_time = None; @@ -467,6 +476,7 @@ impl Recovery { if !unacked.pmtu_probe { latest_lost_packet = Some(unacked.clone()); } + #[cfg(feature = "qlog")] if let Some(qlog) = qlog.as_mut() { self.qlog_recovery_packet_lost(qlog, unacked); } @@ -584,7 +594,7 @@ impl Recovery { space_id: SpaceId, spaces: &mut PacketNumSpaceMap, handshake_status: HandshakeStatus, - qlog: Option<&mut qlog::QlogWriter>, + #[cfg(feature = "qlog")] qlog: Option<&mut qlog::QlogWriter>, now: Instant, ) -> (u64, u64) { let (earliest_loss_time, sid) = self.get_loss_time_and_space(space_id, spaces); @@ -596,7 +606,12 @@ impl Recovery { // Loss timer mode if earliest_loss_time.is_some() { // Time threshold loss detection. - let (lost_packets, lost_bytes) = self.detect_lost_packets(space, qlog, now); + let (lost_packets, lost_bytes) = self.detect_lost_packets( + space, + #[cfg(feature = "qlog")] + qlog, + now, + ); self.drain_sent_packets(space, now, self.rtt.smoothed_rtt()); self.set_loss_detection_timer(space_id, spaces, handshake_status, now); return (lost_packets, lost_bytes); @@ -937,6 +952,7 @@ impl Recovery { } /// Write a qlog RecoveryMetricsUpdated event if any recovery metric is updated. + #[cfg(feature = "qlog")] pub(crate) fn qlog_recovery_metrics_updated(&mut self, qlog: &mut qlog::QlogWriter) { let mut updated = false; @@ -1009,6 +1025,7 @@ impl Recovery { } /// Write a qlog RecoveryPacketLost event. + #[cfg(feature = "qlog")] pub(crate) fn qlog_recovery_packet_lost( &mut self, qlog: &mut qlog::QlogWriter, @@ -1029,6 +1046,7 @@ impl Recovery { } /// Metrics used for emitting qlog RecoveryMetricsUpdated event. +#[cfg(feature = "qlog")] #[derive(Default)] struct RecoveryMetrics { /// The minimum RTT observed on the path, ignoring ack delay @@ -1141,6 +1159,7 @@ mod tests { SpaceId::Handshake, &mut spaces, status, + #[cfg(feature = "qlog")] None, now, )?; @@ -1150,8 +1169,14 @@ mod tests { // Advance ticks until loss timeout now = recovery.loss_detection_timer().unwrap(); - let (lost_pkts, lost_bytes) = - recovery.on_loss_detection_timeout(SpaceId::Handshake, &mut spaces, status, None, now); + let (lost_pkts, lost_bytes) = recovery.on_loss_detection_timeout( + SpaceId::Handshake, + &mut spaces, + status, + #[cfg(feature = "qlog")] + None, + now, + ); assert_eq!(lost_pkts, 1); assert_eq!(lost_bytes, 1001); assert_eq!(spaces.get(space_id).unwrap().ack_eliciting_in_flight, 0); @@ -1213,6 +1238,7 @@ mod tests { SpaceId::Handshake, &mut spaces, status, + #[cfg(feature = "qlog")] None, now, )?; @@ -1228,6 +1254,7 @@ mod tests { SpaceId::Handshake, &mut spaces, status, + #[cfg(feature = "qlog")] None, now, )?; @@ -1281,6 +1308,7 @@ mod tests { SpaceId::Handshake, &mut spaces, status, + #[cfg(feature = "qlog")] None, now, )?; @@ -1291,8 +1319,14 @@ mod tests { // Advance ticks until pto timeout now = recovery.loss_detection_timer().unwrap(); - let (lost_pkts, lost_bytes) = - recovery.on_loss_detection_timeout(SpaceId::Handshake, &mut spaces, status, None, now); + let (lost_pkts, lost_bytes) = recovery.on_loss_detection_timeout( + SpaceId::Handshake, + &mut spaces, + status, + #[cfg(feature = "qlog")] + None, + now, + ); assert_eq!(recovery.pto_count, 1); assert_eq!(lost_pkts, 0); assert_eq!(lost_bytes, 0); @@ -1351,6 +1385,7 @@ mod tests { SpaceId::Handshake, &mut spaces, status, + #[cfg(feature = "qlog")] None, now, )?; @@ -1454,7 +1489,16 @@ mod tests { vec![500..950], )); // Fake receiving duplicated ACK. - recovery.on_ack_received(&ack, 0, SpaceId::Data, &mut spaces, status, None, now)?; + recovery.on_ack_received( + &ack, + 0, + SpaceId::Data, + &mut spaces, + status, + #[cfg(feature = "qlog")] + None, + now, + )?; assert!(check_acked_packets( &spaces.get(SpaceId::Data).unwrap().sent, vec![500..950], @@ -1536,6 +1580,7 @@ mod tests { SpaceId::Handshake, &mut spaces, status, + #[cfg(feature = "qlog")] None, now, )?; diff --git a/src/ffi.rs b/src/ffi.rs index d971f249..e178ad59 100644 --- a/src/ffi.rs +++ b/src/ffi.rs @@ -114,6 +114,7 @@ use crate::h3::Http3Config; use crate::h3::Http3Event; use crate::h3::Http3Headers; use crate::h3::NameValue; +#[cfg(feature = "qlog")] use crate::qlog::events; use crate::tls::SslCtx; use crate::tls::TlsConfig; @@ -1390,6 +1391,7 @@ pub extern "C" fn quic_conn_set_keylog_fd(conn: &mut Connection, fd: c_int) { /// `data` is a qlog message and `argp` is user-defined data that will be passed to the callback. /// `title` and `desc` respectively refer to the "title" and "description" sections of qlog. #[no_mangle] +#[cfg(feature = "qlog")] pub extern "C" fn quic_conn_set_qlog( conn: &mut Connection, cb: extern "C" fn(data: *const u8, data_len: size_t, argp: *mut c_void), @@ -1412,6 +1414,7 @@ pub extern "C" fn quic_conn_set_qlog( /// Set qlog file. /// Note: The API is not applicable for Windows. #[no_mangle] +#[cfg(feature = "qlog")] #[cfg(unix)] pub extern "C" fn quic_conn_set_qlog_fd( conn: &mut Connection, diff --git a/src/frame.rs b/src/frame.rs index 52814dc7..a5ec9657 100644 --- a/src/frame.rs +++ b/src/frame.rs @@ -19,11 +19,14 @@ use crate::codec::Decoder; use crate::codec::Encoder; use crate::error::Error; use crate::packet::PacketType; +#[cfg(feature = "qlog")] use crate::qlog; +#[cfg(feature = "qlog")] use crate::qlog::events::ErrorSpace; +#[cfg(feature = "qlog")] use crate::qlog::events::QuicFrame; +#[cfg(feature = "qlog")] use crate::qlog::events::StreamType; -use crate::qlog::events::TokenType; use crate::ranges::RangeSet; use crate::token::ResetToken; use crate::ConnectionId; @@ -767,6 +770,7 @@ impl Frame { } } + #[cfg(feature = "qlog")] pub fn to_qlog(&self) -> QuicFrame { match self { Frame::Paddings { .. } => QuicFrame::Padding, diff --git a/src/lib.rs b/src/lib.rs index 8f483b9c..a533ed70 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -47,6 +47,7 @@ //! and dependencies: //! //! * `ffi`: Build and expose the FFI API. +//! * `qlog`: Enable support for the qlog. #![allow(unused_imports)] #![allow(dead_code)] @@ -1238,6 +1239,7 @@ mod tls; #[path = "h3/h3.rs"] pub mod h3; +#[cfg(feature = "qlog")] #[path = "qlog/qlog.rs"] mod qlog; diff --git a/src/packet.rs b/src/packet.rs index 22297fc4..bb09395d 100644 --- a/src/packet.rs +++ b/src/packet.rs @@ -26,6 +26,7 @@ use self::PacketType::*; use crate::codec::Decoder; use crate::codec::Encoder; use crate::connection::space::SpaceId; +#[cfg(feature = "qlog")] use crate::qlog; use crate::ranges; use crate::tls; @@ -132,6 +133,7 @@ impl PacketType { } /// Get the packet type for Qlog. + #[cfg(feature = "qlog")] pub fn to_qlog(self) -> qlog::events::PacketType { match self { VersionNegotiation => qlog::events::PacketType::VersionNegotiation, diff --git a/src/trans_param.rs b/src/trans_param.rs index 3ad26988..b2db8262 100644 --- a/src/trans_param.rs +++ b/src/trans_param.rs @@ -22,7 +22,9 @@ use crate::codec; use crate::codec::Decoder; use crate::codec::Encoder; use crate::error::Error; +#[cfg(feature = "qlog")] use crate::qlog; +#[cfg(feature = "qlog")] use crate::qlog::events::EventData; use crate::tls; use crate::token::ResetToken; @@ -406,6 +408,7 @@ impl TransportParams { } /// Create TransportParametersSet event data for Qlog. + #[cfg(feature = "qlog")] pub fn to_qlog(&self, owner: qlog::events::Owner, cipher: Option) -> EventData { let original_destination_connection_id = Some(format!( "{:?}", From 59e18d337b9c6d4265dcfdbed63b0c94d71f1329 Mon Sep 17 00:00:00 2001 From: Sijie Yang Date: Fri, 25 Oct 2024 16:21:38 +0800 Subject: [PATCH 10/11] Optimize pacing for acknowledgement packets (#417) --- src/connection/connection.rs | 5 +++++ src/connection/recovery.rs | 37 +++++++++++++++++++++++++++--------- src/connection/space.rs | 4 ++++ 3 files changed, 37 insertions(+), 9 deletions(-) diff --git a/src/connection/connection.rs b/src/connection/connection.rs index cee0ed8c..08efa977 100644 --- a/src/connection/connection.rs +++ b/src/connection/connection.rs @@ -1786,6 +1786,7 @@ impl Connection { in_flight: write_status.in_flight, has_data: write_status.has_data, pmtu_probe: write_status.is_pmtu_probe, + pacing: write_status.pacing, frames: write_status.frames, rate_sample_state: Default::default(), buffer_flags: write_status.buffer_flags, @@ -1930,6 +1931,7 @@ impl Connection { if !st.is_probe && !r.can_send() { return Err(Error::Done); } + st.pacing = true; // Write PMTU probe frames // Note: To probe the path MTU, the write size will exceed `left` but @@ -4483,6 +4485,9 @@ struct FrameWriteStatus { /// Whether it is a PMTU probe packet is_pmtu_probe: bool, + /// Whether it consumes the pacer's tokens + pacing: bool, + /// Packet overhead (i.e. packet header and crypto overhead) in bytes overhead: usize, diff --git a/src/connection/recovery.rs b/src/connection/recovery.rs index 64ec2bcf..3d5699cd 100644 --- a/src/connection/recovery.rs +++ b/src/connection/recovery.rs @@ -167,6 +167,7 @@ impl Recovery { ) { let in_flight = pkt.in_flight; let ack_eliciting = pkt.ack_eliciting; + let pacing = pkt.pacing; let sent_size = pkt.sent_size; pkt.time_sent = now; @@ -218,7 +219,9 @@ impl Recovery { } // Update pacing tokens number. - self.pacer.on_sent(sent_size as u64); + if pacing { + self.pacer.on_sent(sent_size as u64); + } } /// Handle packet acknowledgment event. @@ -845,8 +848,29 @@ impl Recovery { /// Check whether this path can still send packets. pub(crate) fn can_send(&mut self) -> bool { - self.bytes_in_flight < self.congestion.congestion_window() as usize - && (!self.pacer.enabled() || self.can_pacing()) + // Check congestion controller + if self.bytes_in_flight >= self.congestion.congestion_window() as usize { + trace!( + "{} sending is limited by congestion controller, inflight {}, window {}", + self.trace_id, + self.bytes_in_flight, + self.congestion.congestion_window() + ); + return false; + } + + // Check pacer + if self.pacer.enabled() && !self.can_pacing() { + trace!( + "{} sending is limited by pacer, pacing timer {:?}", + self.trace_id, + self.pacer_timer + .map(|t| t.saturating_duration_since(Instant::now())) + ); + return false; + } + + true } fn can_pacing(&mut self) -> bool { @@ -865,12 +889,7 @@ impl Recovery { ); } - if self.pacer_timer.is_none() { - true - } else { - trace!("{} pacing timer is {:?}", self.trace_id, self.pacer_timer); - false - } + self.pacer_timer.is_none() } /// Update statistics for the packet sent event diff --git a/src/connection/space.rs b/src/connection/space.rs index 6ec9f577..154cc95c 100644 --- a/src/connection/space.rs +++ b/src/connection/space.rs @@ -369,6 +369,9 @@ pub struct SentPacket { /// Whether it is a PMUT probe packet pub pmtu_probe: bool, + /// Whether it consumes the pacer's tokens + pub pacing: bool, + /// The number of bytes sent in the packet, not including UDP or IP overhead, /// but including QUIC framing overhead. pub sent_size: usize, @@ -393,6 +396,7 @@ impl Default for SentPacket { in_flight: false, has_data: false, pmtu_probe: false, + pacing: false, sent_size: 0, rate_sample_state: RateSamplePacketState::default(), buffer_flags: BufferFlags::default(), From 75138e998764d40750e2a931695072798b1bd79d Mon Sep 17 00:00:00 2001 From: Sijie Yang Date: Sun, 20 Oct 2024 06:32:50 +0000 Subject: [PATCH 11/11] Update CHANGELOG and VERSION --- CHANGELOG.md | 14 ++++++++++++++ Cargo.toml | 2 +- tools/Cargo.toml | 4 ++-- 3 files changed, 17 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 597b7882..7dc934cb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,19 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [v1.4.0] - 2024-10-28 + +### Added +- Add `qlog` feature flag to reduce the size of complied library +- Optimize pacing for acknowledgement packets +- Minor tweaks for tquic_time_offset.py + +### Fixed +- Fix checking packet header under disable_1rtt_encryption mode +- Fix the length of trancated packet number +- Some fixes for tquic_tools_test.sh + + ## [v1.3.1] - 2024-10-11 ### Added @@ -326,6 +339,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Provide example clients and servers. +[v1.4.0]: https://github.com/tencent/tquic/compare/v1.3.1...v1.4.0 [v1.3.1]: https://github.com/tencent/tquic/compare/v1.3.0...v1.3.1 [v1.3.0]: https://github.com/tencent/tquic/compare/v1.2.0...v1.3.0 [v1.2.0]: https://github.com/tencent/tquic/compare/v1.1.0...v1.2.0 diff --git a/Cargo.toml b/Cargo.toml index e72ff607..eae6b676 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "tquic" -version = "1.3.1" +version = "1.4.0" edition = "2021" rust-version = "1.70.0" license = "Apache-2.0" diff --git a/tools/Cargo.toml b/tools/Cargo.toml index ebb48ab8..d02d49b4 100644 --- a/tools/Cargo.toml +++ b/tools/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "tquic_tools" -version = "1.3.1" +version = "1.4.0" edition = "2021" rust-version = "1.70.0" license = "Apache-2.0" @@ -22,7 +22,7 @@ slab = "0.4" rand = "0.8.5" statrs = "0.16" signal-hook = "0.3.17" -tquic = { path = "..", version = "1.3.1"} +tquic = { path = "..", version = "1.4.0"} [target."cfg(unix)".dependencies] jemallocator = { version = "0.5", package = "tikv-jemallocator" }