Skip to content

Commit

Permalink
add test_region_allocation_after_delete
Browse files Browse the repository at this point in the history
  • Loading branch information
jmpesp committed Apr 3, 2024
1 parent a083bb8 commit 73073a9
Showing 1 changed file with 77 additions and 0 deletions.
77 changes: 77 additions & 0 deletions nexus/tests/integration_tests/disks.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2389,6 +2389,83 @@ async fn test_single_region_allocate_for_replace_not_enough_zpools(
assert_eq!(datasets_and_regions.len(), REGION_REDUNDANCY_THRESHOLD);
}

// Confirm that a region set can start at N, a region can be deleted, and the
// allocation CTE can bring the redundancy back to N.
#[nexus_test]
async fn test_region_allocation_after_delete(
cptestctx: &ControlPlaneTestContext,
) {
let nexus = &cptestctx.server.apictx().nexus;
let datastore = nexus.datastore();
let opctx =
OpContext::for_tests(cptestctx.logctx.log.new(o!()), datastore.clone());

// Create three 10 GiB zpools, each with one dataset.
let _disk_test = DiskTest::new(&cptestctx).await;

// Assert default is still 10 GiB
assert_eq!(10, DiskTest::DEFAULT_ZPOOL_SIZE_GIB);

// Create a disk
let client = &cptestctx.external_client;
let _project_id = create_project_and_pool(client).await;

let disk = create_disk(&client, PROJECT_NAME, DISK_NAME).await;

// Assert disk has three allocated regions
let disk_id = disk.identity.id;
let (.., db_disk) = LookupPath::new(&opctx, &datastore)
.disk_id(disk_id)
.fetch()
.await
.unwrap_or_else(|_| panic!("test disk {:?} should exist", disk_id));

let allocated_regions =
datastore.get_allocated_regions(db_disk.volume_id).await.unwrap();
assert_eq!(allocated_regions.len(), REGION_REDUNDANCY_THRESHOLD);

// Delete one of the regions
let region_to_delete: &nexus_db_model::Region = &allocated_regions[0].1;
datastore
.regions_hard_delete(&opctx.log, vec![region_to_delete.id()])
.await
.unwrap();

// Assert disk's volume has one less allocated region
let allocated_regions =
datastore.get_allocated_regions(db_disk.volume_id).await.unwrap();
assert_eq!(allocated_regions.len(), REGION_REDUNDANCY_THRESHOLD - 1);

let region_total_size: ByteCount = ByteCount::try_from(
region_to_delete.block_size().to_bytes()
* region_to_delete.blocks_per_extent()
* region_to_delete.extent_count(),
)
.unwrap();

// Rerun disk region allocation
datastore
.disk_region_allocate(
&opctx,
db_disk.volume_id,
&params::DiskSource::Blank {
block_size: params::BlockSize::try_from(
region_to_delete.block_size().to_bytes() as u32,
)
.unwrap(),
},
region_total_size,
&RegionAllocationStrategy::Random { seed: None },
)
.await
.unwrap();

// Assert redundancy was restored
let allocated_regions =
datastore.get_allocated_regions(db_disk.volume_id).await.unwrap();
assert_eq!(allocated_regions.len(), REGION_REDUNDANCY_THRESHOLD);
}

async fn disk_get(client: &ClientTestContext, disk_url: &str) -> Disk {
NexusRequest::object_get(client, disk_url)
.authn_as(AuthnMode::PrivilegedUser)
Expand Down

0 comments on commit 73073a9

Please sign in to comment.