Skip to content

Commit

Permalink
pageserver: make RemoteTimelineClient::schedule_index_upload infall…
Browse files Browse the repository at this point in the history
…ible (#10155)

Remove an unnecessary `Result` and address a `FIXME`.
  • Loading branch information
erikgrinaker authored Dec 16, 2024
1 parent 6565fd4 commit 3d30a7a
Showing 1 changed file with 17 additions and 22 deletions.
39 changes: 17 additions & 22 deletions pageserver/src/tenant/remote_timeline_client.rs
Original file line number Diff line number Diff line change
Expand Up @@ -749,7 +749,7 @@ impl RemoteTimelineClient {
// ahead of what's _actually_ on the remote during index upload.
upload_queue.dirty.metadata = metadata.clone();

self.schedule_index_upload(upload_queue)?;
self.schedule_index_upload(upload_queue);

Ok(())
}
Expand All @@ -770,7 +770,7 @@ impl RemoteTimelineClient {

upload_queue.dirty.metadata.apply(update);

self.schedule_index_upload(upload_queue)?;
self.schedule_index_upload(upload_queue);

Ok(())
}
Expand Down Expand Up @@ -809,7 +809,7 @@ impl RemoteTimelineClient {
if let Some(archived_at_set) = need_upload_scheduled {
let intended_archived_at = archived_at_set.then(|| Utc::now().naive_utc());
upload_queue.dirty.archived_at = intended_archived_at;
self.schedule_index_upload(upload_queue)?;
self.schedule_index_upload(upload_queue);
}

let need_wait = need_change(&upload_queue.clean.0.archived_at, state).is_some();
Expand All @@ -824,7 +824,7 @@ impl RemoteTimelineClient {
let mut guard = self.upload_queue.lock().unwrap();
let upload_queue = guard.initialized_mut()?;
upload_queue.dirty.import_pgdata = state;
self.schedule_index_upload(upload_queue)?;
self.schedule_index_upload(upload_queue);
Ok(())
}

Expand All @@ -843,17 +843,14 @@ impl RemoteTimelineClient {
let upload_queue = guard.initialized_mut()?;

if upload_queue.latest_files_changes_since_metadata_upload_scheduled > 0 {
self.schedule_index_upload(upload_queue)?;
self.schedule_index_upload(upload_queue);
}

Ok(())
}

/// Launch an index-file upload operation in the background (internal function)
fn schedule_index_upload(
self: &Arc<Self>,
upload_queue: &mut UploadQueueInitialized,
) -> Result<(), NotInitialized> {
fn schedule_index_upload(self: &Arc<Self>, upload_queue: &mut UploadQueueInitialized) {
let disk_consistent_lsn = upload_queue.dirty.metadata.disk_consistent_lsn();
// fix up the duplicated field
upload_queue.dirty.disk_consistent_lsn = disk_consistent_lsn;
Expand All @@ -880,7 +877,6 @@ impl RemoteTimelineClient {

// Launch the task immediately, if possible
self.launch_queued_tasks(upload_queue);
Ok(())
}

/// Reparent this timeline to a new parent.
Expand Down Expand Up @@ -909,7 +905,7 @@ impl RemoteTimelineClient {
upload_queue.dirty.metadata.reparent(new_parent);
upload_queue.dirty.lineage.record_previous_ancestor(&prev);

self.schedule_index_upload(upload_queue)?;
self.schedule_index_upload(upload_queue);

Some(self.schedule_barrier0(upload_queue))
}
Expand Down Expand Up @@ -948,7 +944,7 @@ impl RemoteTimelineClient {
assert!(prev.is_none(), "copied layer existed already {layer}");
}

self.schedule_index_upload(upload_queue)?;
self.schedule_index_upload(upload_queue);

Some(self.schedule_barrier0(upload_queue))
}
Expand Down Expand Up @@ -1004,7 +1000,7 @@ impl RemoteTimelineClient {
upload_queue.dirty.gc_blocking = current
.map(|x| x.with_reason(reason))
.or_else(|| Some(index::GcBlocking::started_now_for(reason)));
self.schedule_index_upload(upload_queue)?;
self.schedule_index_upload(upload_queue);
Some(self.schedule_barrier0(upload_queue))
}
}
Expand Down Expand Up @@ -1057,8 +1053,7 @@ impl RemoteTimelineClient {
upload_queue.dirty.gc_blocking =
current.as_ref().and_then(|x| x.without_reason(reason));
assert!(wanted(upload_queue.dirty.gc_blocking.as_ref()));
// FIXME: bogus ?
self.schedule_index_upload(upload_queue)?;
self.schedule_index_upload(upload_queue);
Some(self.schedule_barrier0(upload_queue))
}
}
Expand Down Expand Up @@ -1125,8 +1120,8 @@ impl RemoteTimelineClient {
let mut guard = self.upload_queue.lock().unwrap();
let upload_queue = guard.initialized_mut()?;

let with_metadata = self
.schedule_unlinking_of_layers_from_index_part0(upload_queue, names.iter().cloned())?;
let with_metadata =
self.schedule_unlinking_of_layers_from_index_part0(upload_queue, names.iter().cloned());

self.schedule_deletion_of_unlinked0(upload_queue, with_metadata);

Expand All @@ -1153,7 +1148,7 @@ impl RemoteTimelineClient {

let names = gc_layers.iter().map(|x| x.layer_desc().layer_name());

self.schedule_unlinking_of_layers_from_index_part0(upload_queue, names)?;
self.schedule_unlinking_of_layers_from_index_part0(upload_queue, names);

self.launch_queued_tasks(upload_queue);

Expand All @@ -1166,7 +1161,7 @@ impl RemoteTimelineClient {
self: &Arc<Self>,
upload_queue: &mut UploadQueueInitialized,
names: I,
) -> Result<Vec<(LayerName, LayerFileMetadata)>, NotInitialized>
) -> Vec<(LayerName, LayerFileMetadata)>
where
I: IntoIterator<Item = LayerName>,
{
Expand Down Expand Up @@ -1208,10 +1203,10 @@ impl RemoteTimelineClient {
// index_part update, because that needs to be uploaded before we can actually delete the
// files.
if upload_queue.latest_files_changes_since_metadata_upload_scheduled > 0 {
self.schedule_index_upload(upload_queue)?;
self.schedule_index_upload(upload_queue);
}

Ok(with_metadata)
with_metadata
}

/// Schedules deletion for layer files which have previously been unlinked from the
Expand Down Expand Up @@ -1302,7 +1297,7 @@ impl RemoteTimelineClient {

let names = compacted_from.iter().map(|x| x.layer_desc().layer_name());

self.schedule_unlinking_of_layers_from_index_part0(upload_queue, names)?;
self.schedule_unlinking_of_layers_from_index_part0(upload_queue, names);
self.launch_queued_tasks(upload_queue);

Ok(())
Expand Down

1 comment on commit 3d30a7a

@github-actions
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

7095 tests run: 6796 passed, 2 failed, 297 skipped (full report)


Failures on Postgres 17

# Run all failed tests locally:
scripts/pytest -vv -n $(nproc) -k "test_pageserver_small_inmemory_layers[debug-pg17-False] or test_pageserver_small_inmemory_layers[debug-pg17-True]"
Flaky tests (5)

Postgres 17

Test coverage report is not available

The comment gets automatically updated with the latest test results
3d30a7a at 2024-12-16T16:51:14.393Z :recycle:

Please sign in to comment.