diff --git a/compute_tools/src/extension_server.rs b/compute_tools/src/extension_server.rs index 6ef7e0837f81..da2d107b5488 100644 --- a/compute_tools/src/extension_server.rs +++ b/compute_tools/src/extension_server.rs @@ -107,7 +107,7 @@ pub fn get_pg_version(pgbin: &str) -> String { // pg_config --version returns a (platform specific) human readable string // such as "PostgreSQL 15.4". We parse this to v14/v15/v16 etc. let human_version = get_pg_config("--version", pgbin); - return parse_pg_version(&human_version).to_string(); + parse_pg_version(&human_version).to_string() } fn parse_pg_version(human_version: &str) -> &str { diff --git a/libs/pageserver_api/src/models/partitioning.rs b/libs/pageserver_api/src/models/partitioning.rs index f6644be6359c..69832b9a0dee 100644 --- a/libs/pageserver_api/src/models/partitioning.rs +++ b/libs/pageserver_api/src/models/partitioning.rs @@ -16,7 +16,7 @@ impl serde::Serialize for Partitioning { { pub struct KeySpace<'a>(&'a crate::keyspace::KeySpace); - impl<'a> serde::Serialize for KeySpace<'a> { + impl serde::Serialize for KeySpace<'_> { fn serialize(&self, serializer: S) -> std::result::Result where S: serde::Serializer, @@ -44,7 +44,7 @@ impl serde::Serialize for Partitioning { pub struct WithDisplay<'a, T>(&'a T); -impl<'a, T: std::fmt::Display> serde::Serialize for WithDisplay<'a, T> { +impl serde::Serialize for WithDisplay<'_, T> { fn serialize(&self, serializer: S) -> std::result::Result where S: serde::Serializer, @@ -55,7 +55,7 @@ impl<'a, T: std::fmt::Display> serde::Serialize for WithDisplay<'a, T> { pub struct KeyRange<'a>(&'a std::ops::Range); -impl<'a> serde::Serialize for KeyRange<'a> { +impl serde::Serialize for KeyRange<'_> { fn serialize(&self, serializer: S) -> Result where S: serde::Serializer, diff --git a/libs/postgres_backend/src/lib.rs b/libs/postgres_backend/src/lib.rs index 085540e7b97b..9d274b25e6c3 100644 --- a/libs/postgres_backend/src/lib.rs +++ b/libs/postgres_backend/src/lib.rs @@ -921,12 +921,11 @@ impl PostgresBackendReader { /// A futures::AsyncWrite implementation that wraps all data written to it in CopyData /// messages. /// - pub struct CopyDataWriter<'a, IO> { pgb: &'a mut PostgresBackend, } -impl<'a, IO: AsyncRead + AsyncWrite + Unpin> AsyncWrite for CopyDataWriter<'a, IO> { +impl AsyncWrite for CopyDataWriter<'_, IO> { fn poll_write( self: Pin<&mut Self>, cx: &mut std::task::Context<'_>, diff --git a/libs/pq_proto/src/lib.rs b/libs/pq_proto/src/lib.rs index a01191bd5de3..9ffaaba584c2 100644 --- a/libs/pq_proto/src/lib.rs +++ b/libs/pq_proto/src/lib.rs @@ -727,7 +727,7 @@ pub const SQLSTATE_INTERNAL_ERROR: &[u8; 5] = b"XX000"; pub const SQLSTATE_ADMIN_SHUTDOWN: &[u8; 5] = b"57P01"; pub const SQLSTATE_SUCCESSFUL_COMPLETION: &[u8; 5] = b"00000"; -impl<'a> BeMessage<'a> { +impl BeMessage<'_> { /// Serialize `message` to the given `buf`. /// Apart from smart memory managemet, BytesMut is good here as msg len /// precedes its body and it is handy to write it down first and then fill diff --git a/libs/tenant_size_model/src/svg.rs b/libs/tenant_size_model/src/svg.rs index 0de2890bb414..25ebb1c3d8c9 100644 --- a/libs/tenant_size_model/src/svg.rs +++ b/libs/tenant_size_model/src/svg.rs @@ -97,7 +97,7 @@ pub fn draw_svg( Ok(result) } -impl<'a> SvgDraw<'a> { +impl SvgDraw<'_> { fn calculate_svg_layout(&mut self) { // Find x scale let segments = &self.storage.segments; diff --git a/libs/tracing-utils/src/http.rs b/libs/tracing-utils/src/http.rs index e6fdf9be45ff..2168beee8809 100644 --- a/libs/tracing-utils/src/http.rs +++ b/libs/tracing-utils/src/http.rs @@ -82,7 +82,7 @@ where fn extract_remote_context(headers: &HeaderMap) -> opentelemetry::Context { struct HeaderExtractor<'a>(&'a HeaderMap); - impl<'a> opentelemetry::propagation::Extractor for HeaderExtractor<'a> { + impl opentelemetry::propagation::Extractor for HeaderExtractor<'_> { fn get(&self, key: &str) -> Option<&str> { self.0.get(key).and_then(|value| value.to_str().ok()) } diff --git a/libs/utils/src/lsn.rs b/libs/utils/src/lsn.rs index 06d5c27ebf50..3ec2c130bdb4 100644 --- a/libs/utils/src/lsn.rs +++ b/libs/utils/src/lsn.rs @@ -37,7 +37,7 @@ impl<'de> Deserialize<'de> for Lsn { is_human_readable_deserializer: bool, } - impl<'de> Visitor<'de> for LsnVisitor { + impl Visitor<'_> for LsnVisitor { type Value = Lsn; fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { diff --git a/libs/utils/src/poison.rs b/libs/utils/src/poison.rs index c3e2fba20c3c..ab9ebb3c5a8f 100644 --- a/libs/utils/src/poison.rs +++ b/libs/utils/src/poison.rs @@ -73,7 +73,7 @@ impl Poison { /// and subsequent calls to [`Poison::check_and_arm`] will fail with an error. pub struct Guard<'a, T>(&'a mut Poison); -impl<'a, T> Guard<'a, T> { +impl Guard<'_, T> { pub fn data(&self) -> &T { &self.0.data } @@ -94,7 +94,7 @@ impl<'a, T> Guard<'a, T> { } } -impl<'a, T> Drop for Guard<'a, T> { +impl Drop for Guard<'_, T> { fn drop(&mut self) { match self.0.state { State::Clean => { diff --git a/libs/utils/src/shard.rs b/libs/utils/src/shard.rs index d146010b417a..782cddc599b0 100644 --- a/libs/utils/src/shard.rs +++ b/libs/utils/src/shard.rs @@ -164,7 +164,7 @@ impl TenantShardId { } } -impl<'a> std::fmt::Display for ShardSlug<'a> { +impl std::fmt::Display for ShardSlug<'_> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!( f, diff --git a/libs/utils/src/simple_rcu.rs b/libs/utils/src/simple_rcu.rs index 01750b2aef12..6700f86e4aea 100644 --- a/libs/utils/src/simple_rcu.rs +++ b/libs/utils/src/simple_rcu.rs @@ -152,7 +152,7 @@ pub struct RcuWriteGuard<'a, V> { inner: RwLockWriteGuard<'a, RcuInner>, } -impl<'a, V> Deref for RcuWriteGuard<'a, V> { +impl Deref for RcuWriteGuard<'_, V> { type Target = V; fn deref(&self) -> &V { @@ -160,7 +160,7 @@ impl<'a, V> Deref for RcuWriteGuard<'a, V> { } } -impl<'a, V> RcuWriteGuard<'a, V> { +impl RcuWriteGuard<'_, V> { /// /// Store a new value. The new value will be written to the Rcu immediately, /// and will be immediately seen by any `read` calls that start afterwards. diff --git a/libs/utils/src/sync/heavier_once_cell.rs b/libs/utils/src/sync/heavier_once_cell.rs index dc711fb0284e..66c20655547b 100644 --- a/libs/utils/src/sync/heavier_once_cell.rs +++ b/libs/utils/src/sync/heavier_once_cell.rs @@ -219,7 +219,7 @@ impl<'a, T> CountWaitingInitializers<'a, T> { } } -impl<'a, T> Drop for CountWaitingInitializers<'a, T> { +impl Drop for CountWaitingInitializers<'_, T> { fn drop(&mut self) { self.0.initializers.fetch_sub(1, Ordering::Relaxed); } @@ -250,7 +250,7 @@ impl std::ops::DerefMut for Guard<'_, T> { } } -impl<'a, T> Guard<'a, T> { +impl Guard<'_, T> { /// Take the current value, and a new permit for it's deinitialization. /// /// The permit will be on a semaphore part of the new internal value, and any following diff --git a/libs/utils/src/tracing_span_assert.rs b/libs/utils/src/tracing_span_assert.rs index d24c81ad0bcc..add2fa79202c 100644 --- a/libs/utils/src/tracing_span_assert.rs +++ b/libs/utils/src/tracing_span_assert.rs @@ -184,23 +184,23 @@ mod tests { struct MemoryIdentity<'a>(&'a dyn Extractor); - impl<'a> MemoryIdentity<'a> { + impl MemoryIdentity<'_> { fn as_ptr(&self) -> *const () { self.0 as *const _ as *const () } } - impl<'a> PartialEq for MemoryIdentity<'a> { + impl PartialEq for MemoryIdentity<'_> { fn eq(&self, other: &Self) -> bool { self.as_ptr() == other.as_ptr() } } - impl<'a> Eq for MemoryIdentity<'a> {} - impl<'a> Hash for MemoryIdentity<'a> { + impl Eq for MemoryIdentity<'_> {} + impl Hash for MemoryIdentity<'_> { fn hash(&self, state: &mut H) { self.as_ptr().hash(state); } } - impl<'a> fmt::Debug for MemoryIdentity<'a> { + impl fmt::Debug for MemoryIdentity<'_> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{:p}: {}", self.as_ptr(), self.0.id()) } diff --git a/pageserver/compaction/src/helpers.rs b/pageserver/compaction/src/helpers.rs index 8ed1d1608201..9dbb6ecedf23 100644 --- a/pageserver/compaction/src/helpers.rs +++ b/pageserver/compaction/src/helpers.rs @@ -133,7 +133,7 @@ enum LazyLoadLayer<'a, E: CompactionJobExecutor> { Loaded(VecDeque<>::DeltaEntry<'a>>), Unloaded(&'a E::DeltaLayer), } -impl<'a, E: CompactionJobExecutor> LazyLoadLayer<'a, E> { +impl LazyLoadLayer<'_, E> { fn min_key(&self) -> E::Key { match self { Self::Loaded(entries) => entries.front().unwrap().key(), @@ -147,23 +147,23 @@ impl<'a, E: CompactionJobExecutor> LazyLoadLayer<'a, E> { } } } -impl<'a, E: CompactionJobExecutor> PartialOrd for LazyLoadLayer<'a, E> { +impl PartialOrd for LazyLoadLayer<'_, E> { fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) } } -impl<'a, E: CompactionJobExecutor> Ord for LazyLoadLayer<'a, E> { +impl Ord for LazyLoadLayer<'_, E> { fn cmp(&self, other: &Self) -> std::cmp::Ordering { // reverse order so that we get a min-heap (other.min_key(), other.min_lsn()).cmp(&(self.min_key(), self.min_lsn())) } } -impl<'a, E: CompactionJobExecutor> PartialEq for LazyLoadLayer<'a, E> { +impl PartialEq for LazyLoadLayer<'_, E> { fn eq(&self, other: &Self) -> bool { self.cmp(other) == std::cmp::Ordering::Equal } } -impl<'a, E: CompactionJobExecutor> Eq for LazyLoadLayer<'a, E> {} +impl Eq for LazyLoadLayer<'_, E> {} type LoadFuture<'a, E> = BoxFuture<'a, anyhow::Result>>; diff --git a/pageserver/src/consumption_metrics/upload.rs b/pageserver/src/consumption_metrics/upload.rs index 0325ee403a4c..1eb25d337b48 100644 --- a/pageserver/src/consumption_metrics/upload.rs +++ b/pageserver/src/consumption_metrics/upload.rs @@ -198,7 +198,7 @@ fn serialize_in_chunks<'a>( } } - impl<'a> ExactSizeIterator for Iter<'a> {} + impl ExactSizeIterator for Iter<'_> {} let buffer = bytes::BytesMut::new(); let inner = input.chunks(chunk_size); diff --git a/pageserver/src/disk_usage_eviction_task.rs b/pageserver/src/disk_usage_eviction_task.rs index 7ab2ba87420f..ca44fbe6ae6e 100644 --- a/pageserver/src/disk_usage_eviction_task.rs +++ b/pageserver/src/disk_usage_eviction_task.rs @@ -654,7 +654,7 @@ impl std::fmt::Debug for EvictionCandidate { let ts = chrono::DateTime::::from(self.last_activity_ts); let ts = ts.to_rfc3339_opts(chrono::SecondsFormat::Nanos, true); struct DisplayIsDebug<'a, T>(&'a T); - impl<'a, T: std::fmt::Display> std::fmt::Debug for DisplayIsDebug<'a, T> { + impl std::fmt::Debug for DisplayIsDebug<'_, T> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{}", self.0) } diff --git a/pageserver/src/metrics.rs b/pageserver/src/metrics.rs index b76efa5b487b..3e824b59fb86 100644 --- a/pageserver/src/metrics.rs +++ b/pageserver/src/metrics.rs @@ -1189,7 +1189,7 @@ struct GlobalAndPerTimelineHistogramTimer<'a, 'c> { op: SmgrQueryType, } -impl<'a, 'c> Drop for GlobalAndPerTimelineHistogramTimer<'a, 'c> { +impl Drop for GlobalAndPerTimelineHistogramTimer<'_, '_> { fn drop(&mut self) { let elapsed = self.start.elapsed(); let ex_throttled = self @@ -1560,7 +1560,7 @@ impl BasebackupQueryTime { } } -impl<'a, 'c> BasebackupQueryTimeOngoingRecording<'a, 'c> { +impl BasebackupQueryTimeOngoingRecording<'_, '_> { pub(crate) fn observe(self, res: &Result) { let elapsed = self.start.elapsed(); let ex_throttled = self diff --git a/pageserver/src/statvfs.rs b/pageserver/src/statvfs.rs index 205605bc86a8..4e8be58d583b 100644 --- a/pageserver/src/statvfs.rs +++ b/pageserver/src/statvfs.rs @@ -90,7 +90,7 @@ pub mod mock { let used_bytes = walk_dir_disk_usage(tenants_dir, name_filter.as_deref()).unwrap(); // round it up to the nearest block multiple - let used_blocks = (used_bytes + (blocksize - 1)) / blocksize; + let used_blocks = used_bytes.div_ceil(*blocksize); if used_blocks > *total_blocks { panic!( diff --git a/pageserver/src/tenant/block_io.rs b/pageserver/src/tenant/block_io.rs index 3afa3a86b948..1c82e5454de9 100644 --- a/pageserver/src/tenant/block_io.rs +++ b/pageserver/src/tenant/block_io.rs @@ -50,13 +50,13 @@ impl From> for BlockLease<'static> { } #[cfg(test)] -impl<'a> From> for BlockLease<'a> { +impl From> for BlockLease<'_> { fn from(value: std::sync::Arc<[u8; PAGE_SZ]>) -> Self { BlockLease::Arc(value) } } -impl<'a> Deref for BlockLease<'a> { +impl Deref for BlockLease<'_> { type Target = [u8; PAGE_SZ]; fn deref(&self) -> &Self::Target { diff --git a/pageserver/src/tenant/disk_btree.rs b/pageserver/src/tenant/disk_btree.rs index 0107b0ac7e9c..b302cbc97559 100644 --- a/pageserver/src/tenant/disk_btree.rs +++ b/pageserver/src/tenant/disk_btree.rs @@ -131,7 +131,7 @@ struct OnDiskNode<'a, const L: usize> { values: &'a [u8], } -impl<'a, const L: usize> OnDiskNode<'a, L> { +impl OnDiskNode<'_, L> { /// /// Interpret a PAGE_SZ page as a node. /// diff --git a/pageserver/src/tenant/remote_timeline_client.rs b/pageserver/src/tenant/remote_timeline_client.rs index 450084aca249..14b894d17c01 100644 --- a/pageserver/src/tenant/remote_timeline_client.rs +++ b/pageserver/src/tenant/remote_timeline_client.rs @@ -2182,7 +2182,7 @@ pub(crate) struct UploadQueueAccessor<'a> { inner: std::sync::MutexGuard<'a, UploadQueue>, } -impl<'a> UploadQueueAccessor<'a> { +impl UploadQueueAccessor<'_> { pub(crate) fn latest_uploaded_index_part(&self) -> &IndexPart { match &*self.inner { UploadQueue::Initialized(x) => &x.clean.0, diff --git a/pageserver/src/tenant/secondary/heatmap_uploader.rs b/pageserver/src/tenant/secondary/heatmap_uploader.rs index 0aad5bf392d5..e680fd705b42 100644 --- a/pageserver/src/tenant/secondary/heatmap_uploader.rs +++ b/pageserver/src/tenant/secondary/heatmap_uploader.rs @@ -108,7 +108,6 @@ impl scheduler::Completion for WriteComplete { /// when we last did a write. We only populate this after doing at least one /// write for a tenant -- this avoids holding state for tenants that have /// uploads disabled. - struct UploaderTenantState { // This Weak only exists to enable culling idle instances of this type // when the Tenant has been deallocated. diff --git a/pageserver/src/tenant/storage_layer.rs b/pageserver/src/tenant/storage_layer.rs index 99bd0ece5763..a229b5956059 100644 --- a/pageserver/src/tenant/storage_layer.rs +++ b/pageserver/src/tenant/storage_layer.rs @@ -705,7 +705,7 @@ pub mod tests { /// Useful with `Key`, which has too verbose `{:?}` for printing multiple layers. struct RangeDisplayDebug<'a, T: std::fmt::Display>(&'a Range); -impl<'a, T: std::fmt::Display> std::fmt::Debug for RangeDisplayDebug<'a, T> { +impl std::fmt::Debug for RangeDisplayDebug<'_, T> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{}..{}", self.0.start, self.0.end) } diff --git a/pageserver/src/tenant/storage_layer/delta_layer.rs b/pageserver/src/tenant/storage_layer/delta_layer.rs index 8be7d7876f82..d1079876f8fe 100644 --- a/pageserver/src/tenant/storage_layer/delta_layer.rs +++ b/pageserver/src/tenant/storage_layer/delta_layer.rs @@ -529,8 +529,7 @@ impl DeltaLayerWriterInner { key_end: Key, ctx: &RequestContext, ) -> anyhow::Result<(PersistentLayerDesc, Utf8PathBuf)> { - let index_start_blk = - ((self.blob_writer.size() + PAGE_SZ as u64 - 1) / PAGE_SZ as u64) as u32; + let index_start_blk = self.blob_writer.size().div_ceil(PAGE_SZ as u64) as u32; let mut file = self.blob_writer.into_inner(ctx).await?; diff --git a/pageserver/src/tenant/storage_layer/image_layer.rs b/pageserver/src/tenant/storage_layer/image_layer.rs index de8155f455d1..6c1a943470c2 100644 --- a/pageserver/src/tenant/storage_layer/image_layer.rs +++ b/pageserver/src/tenant/storage_layer/image_layer.rs @@ -828,8 +828,7 @@ impl ImageLayerWriterInner { ctx: &RequestContext, end_key: Option, ) -> anyhow::Result<(PersistentLayerDesc, Utf8PathBuf)> { - let index_start_blk = - ((self.blob_writer.size() + PAGE_SZ as u64 - 1) / PAGE_SZ as u64) as u32; + let index_start_blk = self.blob_writer.size().div_ceil(PAGE_SZ as u64) as u32; // Calculate compression ratio let compressed_size = self.blob_writer.size() - PAGE_SZ as u64; // Subtract PAGE_SZ for header diff --git a/pageserver/src/tenant/storage_layer/layer.rs b/pageserver/src/tenant/storage_layer/layer.rs index f29a33bae6de..38a7cd09aff3 100644 --- a/pageserver/src/tenant/storage_layer/layer.rs +++ b/pageserver/src/tenant/storage_layer/layer.rs @@ -978,7 +978,7 @@ impl LayerInner { let timeline = self .timeline .upgrade() - .ok_or_else(|| DownloadError::TimelineShutdown)?; + .ok_or(DownloadError::TimelineShutdown)?; // count cancellations, which currently remain largely unexpected let init_cancelled = scopeguard::guard((), |_| LAYER_IMPL_METRICS.inc_init_cancelled()); diff --git a/pageserver/src/tenant/storage_layer/layer_name.rs b/pageserver/src/tenant/storage_layer/layer_name.rs index ffe7ca5f3e40..8e750e1187a3 100644 --- a/pageserver/src/tenant/storage_layer/layer_name.rs +++ b/pageserver/src/tenant/storage_layer/layer_name.rs @@ -339,7 +339,7 @@ impl<'de> serde::Deserialize<'de> for LayerName { struct LayerNameVisitor; -impl<'de> serde::de::Visitor<'de> for LayerNameVisitor { +impl serde::de::Visitor<'_> for LayerNameVisitor { type Value = LayerName; fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { diff --git a/pageserver/src/tenant/storage_layer/merge_iterator.rs b/pageserver/src/tenant/storage_layer/merge_iterator.rs index 0831fd9530da..f91e27241ddc 100644 --- a/pageserver/src/tenant/storage_layer/merge_iterator.rs +++ b/pageserver/src/tenant/storage_layer/merge_iterator.rs @@ -99,21 +99,21 @@ impl<'a> PeekableLayerIterRef<'a> { } } -impl<'a> std::cmp::PartialEq for IteratorWrapper<'a> { +impl std::cmp::PartialEq for IteratorWrapper<'_> { fn eq(&self, other: &Self) -> bool { self.cmp(other) == Ordering::Equal } } -impl<'a> std::cmp::Eq for IteratorWrapper<'a> {} +impl std::cmp::Eq for IteratorWrapper<'_> {} -impl<'a> std::cmp::PartialOrd for IteratorWrapper<'a> { +impl std::cmp::PartialOrd for IteratorWrapper<'_> { fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) } } -impl<'a> std::cmp::Ord for IteratorWrapper<'a> { +impl std::cmp::Ord for IteratorWrapper<'_> { fn cmp(&self, other: &Self) -> std::cmp::Ordering { use std::cmp::Ordering; let a = self.peek_next_key_lsn_value(); diff --git a/pageserver/src/tenant/vectored_blob_io.rs b/pageserver/src/tenant/vectored_blob_io.rs index 792c769b4fc0..0c037910344c 100644 --- a/pageserver/src/tenant/vectored_blob_io.rs +++ b/pageserver/src/tenant/vectored_blob_io.rs @@ -73,7 +73,7 @@ impl<'a> BufView<'a> { } } -impl<'a> Deref for BufView<'a> { +impl Deref for BufView<'_> { type Target = [u8]; fn deref(&self) -> &Self::Target { @@ -84,7 +84,7 @@ impl<'a> Deref for BufView<'a> { } } -impl<'a> AsRef<[u8]> for BufView<'a> { +impl AsRef<[u8]> for BufView<'_> { fn as_ref(&self) -> &[u8] { match self { BufView::Slice(slice) => slice, @@ -196,11 +196,6 @@ pub(crate) struct ChunkedVectoredReadBuilder { max_read_size: Option, } -/// Computes x / d rounded up. -fn div_round_up(x: usize, d: usize) -> usize { - (x + (d - 1)) / d -} - impl ChunkedVectoredReadBuilder { const CHUNK_SIZE: usize = virtual_file::get_io_buffer_alignment(); /// Start building a new vectored read. @@ -220,7 +215,7 @@ impl ChunkedVectoredReadBuilder { .expect("First insertion always succeeds"); let start_blk_no = start_offset as usize / Self::CHUNK_SIZE; - let end_blk_no = div_round_up(end_offset as usize, Self::CHUNK_SIZE); + let end_blk_no = (end_offset as usize).div_ceil(Self::CHUNK_SIZE); Self { start_blk_no, end_blk_no, @@ -248,7 +243,7 @@ impl ChunkedVectoredReadBuilder { pub(crate) fn extend(&mut self, start: u64, end: u64, meta: BlobMeta) -> VectoredReadExtended { tracing::trace!(start, end, "trying to extend"); let start_blk_no = start as usize / Self::CHUNK_SIZE; - let end_blk_no = div_round_up(end as usize, Self::CHUNK_SIZE); + let end_blk_no = (end as usize).div_ceil(Self::CHUNK_SIZE); let not_limited_by_max_read_size = { if let Some(max_read_size) = self.max_read_size { @@ -975,12 +970,4 @@ mod tests { round_trip_test_compressed(&blobs, true).await?; Ok(()) } - - #[test] - fn test_div_round_up() { - const CHUNK_SIZE: usize = 512; - assert_eq!(1, div_round_up(200, CHUNK_SIZE)); - assert_eq!(1, div_round_up(CHUNK_SIZE, CHUNK_SIZE)); - assert_eq!(2, div_round_up(CHUNK_SIZE + 1, CHUNK_SIZE)); - } } diff --git a/pageserver/src/virtual_file.rs b/pageserver/src/virtual_file.rs index d260116b3892..5a364b7aaf03 100644 --- a/pageserver/src/virtual_file.rs +++ b/pageserver/src/virtual_file.rs @@ -724,9 +724,9 @@ impl VirtualFileInner { *handle_guard = handle; - return Ok(FileGuard { + Ok(FileGuard { slot_guard: slot_guard.downgrade(), - }); + }) } pub fn remove(self) { diff --git a/proxy/src/auth/credentials.rs b/proxy/src/auth/credentials.rs index fa6bc4c6f5b7..465e427f7c69 100644 --- a/proxy/src/auth/credentials.rs +++ b/proxy/src/auth/credentials.rs @@ -193,7 +193,7 @@ impl<'de> serde::de::Deserialize<'de> for IpPattern { D: serde::Deserializer<'de>, { struct StrVisitor; - impl<'de> serde::de::Visitor<'de> for StrVisitor { + impl serde::de::Visitor<'_> for StrVisitor { type Value = IpPattern; fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { diff --git a/proxy/src/config.rs b/proxy/src/config.rs index 0d5ebd88f99e..3baa7ec751eb 100644 --- a/proxy/src/config.rs +++ b/proxy/src/config.rs @@ -558,7 +558,7 @@ pub struct RetryConfig { } impl RetryConfig { - /// Default options for RetryConfig. + // Default options for RetryConfig. /// Total delay for 5 retries with 200ms base delay and 2 backoff factor is about 6s. pub const CONNECT_TO_COMPUTE_DEFAULT_VALUES: &'static str = diff --git a/proxy/src/context/parquet.rs b/proxy/src/context/parquet.rs index b0ad0e45662b..3432ac5ff660 100644 --- a/proxy/src/context/parquet.rs +++ b/proxy/src/context/parquet.rs @@ -104,7 +104,7 @@ struct Options<'a> { options: &'a StartupMessageParams, } -impl<'a> serde::Serialize for Options<'a> { +impl serde::Serialize for Options<'_> { fn serialize(&self, s: S) -> Result where S: serde::Serializer, diff --git a/proxy/src/intern.rs b/proxy/src/intern.rs index 09fd9657d076..49aab917e4ee 100644 --- a/proxy/src/intern.rs +++ b/proxy/src/intern.rs @@ -55,7 +55,7 @@ impl std::ops::Deref for InternedString { impl<'de, Id: InternId> serde::de::Deserialize<'de> for InternedString { fn deserialize>(d: D) -> Result { struct Visitor(PhantomData); - impl<'de, Id: InternId> serde::de::Visitor<'de> for Visitor { + impl serde::de::Visitor<'_> for Visitor { type Value = InternedString; fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { diff --git a/proxy/src/lib.rs b/proxy/src/lib.rs index 74bc778a36c4..a7b3d45c95e6 100644 --- a/proxy/src/lib.rs +++ b/proxy/src/lib.rs @@ -76,11 +76,7 @@ ) )] // List of temporarily allowed lints to unblock beta/nightly. -#![allow( - unknown_lints, - // TODO: 1.82: Add `use` where necessary and remove from this list. - impl_trait_overcaptures, -)] +#![allow(unknown_lints)] use std::convert::Infallible; diff --git a/proxy/src/proxy/tests/mod.rs b/proxy/src/proxy/tests/mod.rs index 88175d73b163..3f54b0661b4e 100644 --- a/proxy/src/proxy/tests/mod.rs +++ b/proxy/src/proxy/tests/mod.rs @@ -73,11 +73,11 @@ impl ClientConfig<'_> { self, ) -> anyhow::Result< impl tokio_postgres::tls::TlsConnect< - S, - Error = impl std::fmt::Debug, - Future = impl Send, - Stream = RustlsStream, - >, + S, + Error = impl std::fmt::Debug + use, + Future = impl Send + use, + Stream = RustlsStream, + > + use, > { let mut mk = MakeRustlsConnect::new(self.config); let tls = MakeTlsConnect::::make_tls_connect(&mut mk, self.hostname)?; diff --git a/proxy/src/scram/exchange.rs b/proxy/src/scram/exchange.rs index 493295c938f6..6a13f645a5ab 100644 --- a/proxy/src/scram/exchange.rs +++ b/proxy/src/scram/exchange.rs @@ -218,16 +218,12 @@ impl sasl::Mechanism for Exchange<'_> { self.state = ExchangeState::SaltSent(sent); Ok(Step::Continue(self, msg)) } - #[allow(unreachable_patterns)] // TODO: 1.82: simply drop this match - Step::Success(x, _) => match x {}, Step::Failure(msg) => Ok(Step::Failure(msg)), } } ExchangeState::SaltSent(sent) => { match sent.transition(self.secret, &self.tls_server_end_point, input)? { Step::Success(keys, msg) => Ok(Step::Success(keys, msg)), - #[allow(unreachable_patterns)] // TODO: 1.82: simply drop this match - Step::Continue(x, _) => match x {}, Step::Failure(msg) => Ok(Step::Failure(msg)), } } diff --git a/proxy/src/serverless/conn_pool.rs b/proxy/src/serverless/conn_pool.rs index b97c6565101e..8401e3a1c9a5 100644 --- a/proxy/src/serverless/conn_pool.rs +++ b/proxy/src/serverless/conn_pool.rs @@ -11,13 +11,6 @@ use tokio_postgres::tls::NoTlsStream; use tokio_postgres::{AsyncMessage, Socket}; use tokio_util::sync::CancellationToken; use tracing::{error, info, info_span, warn, Instrument}; - -use crate::context::RequestMonitoring; -use crate::control_plane::messages::MetricsAuxInfo; -use crate::metrics::Metrics; - -use super::conn_pool_lib::{Client, ClientInnerExt, ConnInfo, GlobalConnPool}; - #[cfg(test)] use { super::conn_pool_lib::GlobalConnPoolOptions, @@ -25,6 +18,11 @@ use { std::{sync::atomic, time::Duration}, }; +use super::conn_pool_lib::{Client, ClientInnerExt, ConnInfo, GlobalConnPool}; +use crate::context::RequestMonitoring; +use crate::control_plane::messages::MetricsAuxInfo; +use crate::metrics::Metrics; + #[derive(Debug, Clone)] pub(crate) struct ConnInfoWithAuth { pub(crate) conn_info: ConnInfo, diff --git a/proxy/src/serverless/conn_pool_lib.rs b/proxy/src/serverless/conn_pool_lib.rs index 6e964ce8789f..844730194d76 100644 --- a/proxy/src/serverless/conn_pool_lib.rs +++ b/proxy/src/serverless/conn_pool_lib.rs @@ -1,25 +1,23 @@ +use std::collections::HashMap; +use std::ops::Deref; +use std::sync::atomic::{self, AtomicUsize}; +use std::sync::{Arc, Weak}; +use std::time::Duration; + use dashmap::DashMap; use parking_lot::RwLock; use rand::Rng; -use std::{collections::HashMap, sync::Arc, sync::Weak, time::Duration}; -use std::{ - ops::Deref, - sync::atomic::{self, AtomicUsize}, -}; use tokio_postgres::ReadyForQueryStatus; +use tracing::{debug, info, Span}; +use super::backend::HttpConnError; +use super::conn_pool::ClientInnerRemote; +use crate::auth::backend::ComputeUserInfo; +use crate::context::RequestMonitoring; use crate::control_plane::messages::ColdStartInfo; use crate::metrics::{HttpEndpointPoolsGuard, Metrics}; use crate::usage_metrics::{Ids, MetricCounter, USAGE_METRICS}; -use crate::{ - auth::backend::ComputeUserInfo, context::RequestMonitoring, DbName, EndpointCacheKey, RoleName, -}; - -use super::conn_pool::ClientInnerRemote; -use tracing::info; -use tracing::{debug, Span}; - -use super::backend::HttpConnError; +use crate::{DbName, EndpointCacheKey, RoleName}; #[derive(Debug, Clone)] pub(crate) struct ConnInfo { @@ -482,7 +480,7 @@ impl Client { }) } - pub(crate) fn do_drop(&mut self) -> Option { + pub(crate) fn do_drop(&mut self) -> Option> { let conn_info = self.conn_info.clone(); let client = self .inner diff --git a/proxy/src/serverless/http_conn_pool.rs b/proxy/src/serverless/http_conn_pool.rs index 79bb19328ffb..363e3979761c 100644 --- a/proxy/src/serverless/http_conn_pool.rs +++ b/proxy/src/serverless/http_conn_pool.rs @@ -10,12 +10,11 @@ use rand::Rng; use tokio::net::TcpStream; use tracing::{debug, error, info, info_span, Instrument}; +use super::conn_pool_lib::{ClientInnerExt, ConnInfo}; use crate::context::RequestMonitoring; use crate::control_plane::messages::{ColdStartInfo, MetricsAuxInfo}; use crate::metrics::{HttpEndpointPoolsGuard, Metrics}; use crate::usage_metrics::{Ids, MetricCounter, USAGE_METRICS}; - -use super::conn_pool_lib::{ClientInnerExt, ConnInfo}; use crate::EndpointCacheKey; pub(crate) type Send = http2::SendRequest; diff --git a/proxy/src/serverless/json.rs b/proxy/src/serverless/json.rs index 8c56d317ccc4..569e2da5715a 100644 --- a/proxy/src/serverless/json.rs +++ b/proxy/src/serverless/json.rs @@ -155,10 +155,10 @@ fn pg_text_to_json(pg_value: Option<&str>, pg_type: &Type) -> Result Result { - _pg_array_parse(pg_array, elem_type, false).map(|(v, _)| v) + pg_array_parse_inner(pg_array, elem_type, false).map(|(v, _)| v) } -fn _pg_array_parse( +fn pg_array_parse_inner( pg_array: &str, elem_type: &Type, nested: bool, @@ -211,7 +211,7 @@ fn _pg_array_parse( '{' if !quote => { level += 1; if level > 1 { - let (res, off) = _pg_array_parse(&pg_array[i..], elem_type, true)?; + let (res, off) = pg_array_parse_inner(&pg_array[i..], elem_type, true)?; entries.push(res); for _ in 0..off - 1 { pg_array_chr.next(); diff --git a/proxy/src/serverless/local_conn_pool.rs b/proxy/src/serverless/local_conn_pool.rs index c4fdd00f7859..a01afd28208f 100644 --- a/proxy/src/serverless/local_conn_pool.rs +++ b/proxy/src/serverless/local_conn_pool.rs @@ -25,7 +25,6 @@ use crate::context::RequestMonitoring; use crate::control_plane::messages::{ColdStartInfo, MetricsAuxInfo}; use crate::metrics::Metrics; use crate::usage_metrics::{Ids, MetricCounter, USAGE_METRICS}; - use crate::{DbName, RoleName}; struct ConnPoolEntry { @@ -530,7 +529,7 @@ impl LocalClient { }) } - fn do_drop(&mut self) -> Option { + fn do_drop(&mut self) -> Option> { let conn_info = self.conn_info.clone(); let client = self .inner diff --git a/proxy/src/serverless/sql_over_http.rs b/proxy/src/serverless/sql_over_http.rs index bb5eb390a6bc..6fbb044669ab 100644 --- a/proxy/src/serverless/sql_over_http.rs +++ b/proxy/src/serverless/sql_over_http.rs @@ -38,7 +38,6 @@ use crate::error::{ErrorKind, ReportableError, UserFacingError}; use crate::metrics::{HttpDirection, Metrics}; use crate::proxy::{run_until_cancelled, NeonOptions}; use crate::serverless::backend::HttpConnError; - use crate::usage_metrics::{MetricCounter, MetricCounterRecorder}; use crate::{DbName, RoleName}; diff --git a/proxy/src/usage_metrics.rs b/proxy/src/usage_metrics.rs index c5384c0b0ec0..f944d5aec34b 100644 --- a/proxy/src/usage_metrics.rs +++ b/proxy/src/usage_metrics.rs @@ -375,7 +375,7 @@ pub async fn task_backup( let now = Utc::now(); collect_metrics_backup_iteration( &USAGE_METRICS.backup_endpoints, - &storage, + storage.as_ref(), &hostname, prev, now, @@ -395,7 +395,7 @@ pub async fn task_backup( #[instrument(skip_all)] async fn collect_metrics_backup_iteration( endpoints: &DashMap, FastHasher>, - storage: &Option, + storage: Option<&GenericRemoteStorage>, hostname: &str, prev: DateTime, now: DateTime, @@ -446,7 +446,7 @@ async fn collect_metrics_backup_iteration( } async fn upload_events_chunk( - storage: &Option, + storage: Option<&GenericRemoteStorage>, chunk: EventChunk<'_, Event>, remote_path: &RemotePath, cancel: &CancellationToken, @@ -577,10 +577,10 @@ mod tests { // counter is unregistered assert!(metrics.endpoints.is_empty()); - collect_metrics_backup_iteration(&metrics.backup_endpoints, &None, "foo", now, now, 1000) + collect_metrics_backup_iteration(&metrics.backup_endpoints, None, "foo", now, now, 1000) .await; assert!(!metrics.backup_endpoints.is_empty()); - collect_metrics_backup_iteration(&metrics.backup_endpoints, &None, "foo", now, now, 1000) + collect_metrics_backup_iteration(&metrics.backup_endpoints, None, "foo", now, now, 1000) .await; // backup counter is unregistered after the second iteration assert!(metrics.backup_endpoints.is_empty()); diff --git a/proxy/src/waiters.rs b/proxy/src/waiters.rs index 7e07f6a2affe..330e73f02f92 100644 --- a/proxy/src/waiters.rs +++ b/proxy/src/waiters.rs @@ -73,7 +73,7 @@ struct DropKey<'a, T> { registry: &'a Waiters, } -impl<'a, T> Drop for DropKey<'a, T> { +impl Drop for DropKey<'_, T> { fn drop(&mut self) { self.registry.0.lock().remove(&self.key); } diff --git a/safekeeper/src/timeline.rs b/safekeeper/src/timeline.rs index 3494b0b764f7..41b9490088fe 100644 --- a/safekeeper/src/timeline.rs +++ b/safekeeper/src/timeline.rs @@ -122,7 +122,7 @@ impl<'a> WriteGuardSharedState<'a> { } } -impl<'a> Deref for WriteGuardSharedState<'a> { +impl Deref for WriteGuardSharedState<'_> { type Target = SharedState; fn deref(&self) -> &Self::Target { @@ -130,13 +130,13 @@ impl<'a> Deref for WriteGuardSharedState<'a> { } } -impl<'a> DerefMut for WriteGuardSharedState<'a> { +impl DerefMut for WriteGuardSharedState<'_> { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.guard } } -impl<'a> Drop for WriteGuardSharedState<'a> { +impl Drop for WriteGuardSharedState<'_> { fn drop(&mut self) { let term_flush_lsn = TermLsn::from((self.guard.sk.last_log_term(), self.guard.sk.flush_lsn()));