diff --git a/qa/workunits/ceph-disk/ceph-disk-test.py b/qa/workunits/ceph-disk/ceph-disk-test.py index 68847b16f27a4..03266ee506660 100644 --- a/qa/workunits/ceph-disk/ceph-disk-test.py +++ b/qa/workunits/ceph-disk/ceph-disk-test.py @@ -44,6 +44,7 @@ LOG = logging.getLogger('CephDisk') + class CephDisk: def __init__(self): @@ -64,10 +65,12 @@ def sh(command): return output.strip() def unused_disks(self, pattern='[vs]d.'): - names = filter(lambda x: re.match(pattern, x), os.listdir("/sys/block")) + names = filter( + lambda x: re.match(pattern, x), os.listdir("/sys/block")) if not names: return [] - disks = json.loads(self.sh("ceph-disk list --format json " + " ".join(names))) + disks = json.loads( + self.sh("ceph-disk list --format json " + " ".join(names))) unused = [] for disk in disks: if 'partitions' not in disk: @@ -106,9 +109,11 @@ def get_journal_partition(self, uuid): for partition in disk['partitions']: if partition['path'] == journal_dev: if 'journal_for' in partition: - assert partition['journal_for'] == data_partition['path'] + assert partition[ + 'journal_for'] == data_partition['path'] return partition - raise Exception("journal for uuid = " + uuid + " not found in " + str(disks)) + raise Exception( + "journal for uuid = " + uuid + " not found in " + str(disks)) def destroy_osd(self, uuid): id = self.sh("ceph osd create " + uuid) @@ -160,6 +165,7 @@ def check_osd_status(self, uuid, have_journal=False): journal_partition = self.get_journal_partition(uuid) assert journal_partition + class TestCephDisk(object): def setup_class(self): @@ -179,7 +185,7 @@ def setup(self): def test_deactivate_reactivate_osd(self): c = CephDisk() - have_journal=True + have_journal = True disk = c.unused_disks()[0] osd_uuid = str(uuid.uuid1()) c.sh("ceph-disk zap " + disk) @@ -258,7 +264,6 @@ def activate_reactivate_dmcrypt(self, type): c.check_osd_status(osd_uuid, have_journal) c.destroy_osd(osd_uuid) - def test_activate_dmcrypt_plain(self): c = CephDisk() c.conf['global']['osd dmcrypt type'] = 'plain' @@ -321,16 +326,59 @@ def test_activate_with_journal(self): c.helper("pool_read_write") c.destroy_osd(osd_uuid) + def test_activate_with_journal_dev_is_symlink(self): + c = CephDisk() + disk = c.unused_disks()[0] + osd_uuid = str(uuid.uuid1()) + tempdir = tempfile.mkdtemp() + symlink = os.path.join(tempdir, 'osd') + os.symlink(disk, symlink) + c.sh("ceph-disk zap " + symlink) + c.sh("ceph-disk prepare --osd-uuid " + osd_uuid + + " " + symlink) + c.wait_for_osd_up(osd_uuid) + device = json.loads(c.sh("ceph-disk list --format json " + symlink))[0] + assert len(device['partitions']) == 2 + data_partition = c.get_osd_partition(osd_uuid) + assert data_partition['type'] == 'data' + assert data_partition['state'] == 'active' + journal_partition = c.get_journal_partition(osd_uuid) + assert journal_partition + c.helper("pool_read_write") + c.destroy_osd(osd_uuid) + c.sh("ceph-disk zap " + symlink) + os.unlink(symlink) + os.rmdir(tempdir) + def test_activate_separated_journal(self): c = CephDisk() disks = c.unused_disks() data_disk = disks[0] journal_disk = disks[1] osd_uuid = self.activate_separated_journal(data_disk, journal_disk) - c.helper("pool_read_write 1") # 1 == pool size + c.helper("pool_read_write 1") # 1 == pool size c.destroy_osd(osd_uuid) c.sh("ceph-disk zap " + data_disk + " " + journal_disk) + def test_activate_separated_journal_dev_is_symlink(self): + c = CephDisk() + disks = c.unused_disks() + data_disk = disks[0] + journal_disk = disks[1] + tempdir = tempfile.mkdtemp() + data_symlink = os.path.join(tempdir, 'osd') + os.symlink(data_disk, data_symlink) + journal_symlink = os.path.join(tempdir, 'journal') + os.symlink(journal_disk, journal_symlink) + osd_uuid = self.activate_separated_journal( + data_symlink, journal_symlink) + c.helper("pool_read_write 1") # 1 == pool size + c.destroy_osd(osd_uuid) + c.sh("ceph-disk zap " + data_symlink + " " + journal_symlink) + os.unlink(data_symlink) + os.unlink(journal_symlink) + os.rmdir(tempdir) + def activate_separated_journal(self, data_disk, journal_disk): c = CephDisk() have_journal = True @@ -338,7 +386,8 @@ def activate_separated_journal(self, data_disk, journal_disk): c.sh("ceph-disk prepare --osd-uuid " + osd_uuid + " " + data_disk + " " + journal_disk) c.wait_for_osd_up(osd_uuid) - device = json.loads(c.sh("ceph-disk list --format json " + data_disk))[0] + device = json.loads( + c.sh("ceph-disk list --format json " + data_disk))[0] assert len(device['partitions']) == 1 c.check_osd_status(osd_uuid, have_journal) return osd_uuid @@ -357,15 +406,17 @@ def test_activate_two_separated_journal(self): other_data_disk = disks[1] journal_disk = disks[2] osd_uuid = self.activate_separated_journal(data_disk, journal_disk) - other_osd_uuid = self.activate_separated_journal(other_data_disk, journal_disk) + other_osd_uuid = self.activate_separated_journal( + other_data_disk, journal_disk) # # read/write can only succeed if the two osds are up because # the pool needs two OSD # - c.helper("pool_read_write 2") # 2 == pool size + c.helper("pool_read_write 2") # 2 == pool size c.destroy_osd(osd_uuid) c.destroy_osd(other_osd_uuid) - c.sh("ceph-disk zap " + data_disk + " " + journal_disk + " " + other_data_disk) + c.sh("ceph-disk zap " + data_disk + " " + + journal_disk + " " + other_data_disk) # # Create an OSD and reuse an existing journal partition @@ -389,9 +440,10 @@ def test_activate_reuse_journal(self): # c.sh("ceph-disk prepare --osd-uuid " + osd_uuid + " " + data_disk + " " + journal_path) - c.helper("pool_read_write 1") # 1 == pool size + c.helper("pool_read_write 1") # 1 == pool size c.wait_for_osd_up(osd_uuid) - device = json.loads(c.sh("ceph-disk list --format json " + data_disk))[0] + device = json.loads( + c.sh("ceph-disk list --format json " + data_disk))[0] assert len(device['partitions']) == 1 c.check_osd_status(osd_uuid) journal_partition = c.get_journal_partition(osd_uuid) @@ -405,7 +457,8 @@ def test_activate_reuse_journal(self): def test_activate_multipath(self): c = CephDisk() if c.sh("lsb_release -si") != 'CentOS': - pytest.skip("see issue https://bugs.launchpad.net/ubuntu/+source/multipath-tools/+bug/1488688") + pytest.skip( + "see issue https://bugs.launchpad.net/ubuntu/+source/multipath-tools/+bug/1488688") c.ensure_sd() # # Figure out the name of the multipath device @@ -413,7 +466,8 @@ def test_activate_multipath(self): disk = c.unused_disks('sd.')[0] c.sh("mpathconf --enable || true") c.sh("multipath " + disk) - holders = os.listdir("/sys/block/" + os.path.basename(disk) + "/holders") + holders = os.listdir( + "/sys/block/" + os.path.basename(disk) + "/holders") assert 1 == len(holders) name = open("/sys/block/" + holders[0] + "/dm/name").read() multipath = "/dev/mapper/" + name @@ -425,7 +479,8 @@ def test_activate_multipath(self): c.sh("ceph-disk prepare --osd-uuid " + osd_uuid + " " + multipath) c.wait_for_osd_up(osd_uuid) - device = json.loads(c.sh("ceph-disk list --format json " + multipath))[0] + device = json.loads( + c.sh("ceph-disk list --format json " + multipath))[0] assert len(device['partitions']) == 2 data_partition = c.get_osd_partition(osd_uuid) assert data_partition['type'] == 'data' @@ -438,6 +493,7 @@ def test_activate_multipath(self): c.sh("multipath -F") c.unload_scsi_debug() + class CephDiskTest(CephDisk): def main(self, argv): diff --git a/src/ceph-disk b/src/ceph-disk index 39a235683023c..7f4a00988f314 100755 --- a/src/ceph-disk +++ b/src/ceph-disk @@ -1220,9 +1220,9 @@ def get_free_partition_index(dev): 'BYT;' not in lines): raise Error('parted output expected to contain one of ' + 'CHH; CYL; or BYT; : ' + lines) - if dev not in lines: + if os.path.realpath(dev) not in lines: raise Error('parted output expected to contain ' + dev + ': ' + lines) - _, partitions = lines.split(dev) + _, partitions = lines.split(os.path.realpath(dev)) partition_numbers = extract_parted_partition_numbers(partitions) if partition_numbers: return max(partition_numbers) + 1