Skip to content

Commit

Permalink
Merge pull request ceph#6219 from joejulian/master_fix_13438_parted_m…
Browse files Browse the repository at this point in the history
…ust_contain_dev

Compare parted output with the dereferenced path

Reviewed-by: Loic Dachary <[email protected]>
  • Loading branch information
Loic Dachary committed Nov 22, 2015
2 parents 09cd89e + 9cbe132 commit 90a37c7
Show file tree
Hide file tree
Showing 2 changed files with 74 additions and 18 deletions.
88 changes: 72 additions & 16 deletions qa/workunits/ceph-disk/ceph-disk-test.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@

LOG = logging.getLogger('CephDisk')


class CephDisk:

def __init__(self):
Expand All @@ -64,10 +65,12 @@ def sh(command):
return output.strip()

def unused_disks(self, pattern='[vs]d.'):
names = filter(lambda x: re.match(pattern, x), os.listdir("/sys/block"))
names = filter(
lambda x: re.match(pattern, x), os.listdir("/sys/block"))
if not names:
return []
disks = json.loads(self.sh("ceph-disk list --format json " + " ".join(names)))
disks = json.loads(
self.sh("ceph-disk list --format json " + " ".join(names)))
unused = []
for disk in disks:
if 'partitions' not in disk:
Expand Down Expand Up @@ -106,9 +109,11 @@ def get_journal_partition(self, uuid):
for partition in disk['partitions']:
if partition['path'] == journal_dev:
if 'journal_for' in partition:
assert partition['journal_for'] == data_partition['path']
assert partition[
'journal_for'] == data_partition['path']
return partition
raise Exception("journal for uuid = " + uuid + " not found in " + str(disks))
raise Exception(
"journal for uuid = " + uuid + " not found in " + str(disks))

def destroy_osd(self, uuid):
id = self.sh("ceph osd create " + uuid)
Expand Down Expand Up @@ -160,6 +165,7 @@ def check_osd_status(self, uuid, have_journal=False):
journal_partition = self.get_journal_partition(uuid)
assert journal_partition


class TestCephDisk(object):

def setup_class(self):
Expand All @@ -179,7 +185,7 @@ def setup(self):

def test_deactivate_reactivate_osd(self):
c = CephDisk()
have_journal=True
have_journal = True
disk = c.unused_disks()[0]
osd_uuid = str(uuid.uuid1())
c.sh("ceph-disk zap " + disk)
Expand Down Expand Up @@ -258,7 +264,6 @@ def activate_reactivate_dmcrypt(self, type):
c.check_osd_status(osd_uuid, have_journal)
c.destroy_osd(osd_uuid)


def test_activate_dmcrypt_plain(self):
c = CephDisk()
c.conf['global']['osd dmcrypt type'] = 'plain'
Expand Down Expand Up @@ -321,24 +326,68 @@ def test_activate_with_journal(self):
c.helper("pool_read_write")
c.destroy_osd(osd_uuid)

def test_activate_with_journal_dev_is_symlink(self):
c = CephDisk()
disk = c.unused_disks()[0]
osd_uuid = str(uuid.uuid1())
tempdir = tempfile.mkdtemp()
symlink = os.path.join(tempdir, 'osd')
os.symlink(disk, symlink)
c.sh("ceph-disk zap " + symlink)
c.sh("ceph-disk prepare --osd-uuid " + osd_uuid +
" " + symlink)
c.wait_for_osd_up(osd_uuid)
device = json.loads(c.sh("ceph-disk list --format json " + symlink))[0]
assert len(device['partitions']) == 2
data_partition = c.get_osd_partition(osd_uuid)
assert data_partition['type'] == 'data'
assert data_partition['state'] == 'active'
journal_partition = c.get_journal_partition(osd_uuid)
assert journal_partition
c.helper("pool_read_write")
c.destroy_osd(osd_uuid)
c.sh("ceph-disk zap " + symlink)
os.unlink(symlink)
os.rmdir(tempdir)

def test_activate_separated_journal(self):
c = CephDisk()
disks = c.unused_disks()
data_disk = disks[0]
journal_disk = disks[1]
osd_uuid = self.activate_separated_journal(data_disk, journal_disk)
c.helper("pool_read_write 1") # 1 == pool size
c.helper("pool_read_write 1") # 1 == pool size
c.destroy_osd(osd_uuid)
c.sh("ceph-disk zap " + data_disk + " " + journal_disk)

def test_activate_separated_journal_dev_is_symlink(self):
c = CephDisk()
disks = c.unused_disks()
data_disk = disks[0]
journal_disk = disks[1]
tempdir = tempfile.mkdtemp()
data_symlink = os.path.join(tempdir, 'osd')
os.symlink(data_disk, data_symlink)
journal_symlink = os.path.join(tempdir, 'journal')
os.symlink(journal_disk, journal_symlink)
osd_uuid = self.activate_separated_journal(
data_symlink, journal_symlink)
c.helper("pool_read_write 1") # 1 == pool size
c.destroy_osd(osd_uuid)
c.sh("ceph-disk zap " + data_symlink + " " + journal_symlink)
os.unlink(data_symlink)
os.unlink(journal_symlink)
os.rmdir(tempdir)

def activate_separated_journal(self, data_disk, journal_disk):
c = CephDisk()
have_journal = True
osd_uuid = str(uuid.uuid1())
c.sh("ceph-disk prepare --osd-uuid " + osd_uuid +
" " + data_disk + " " + journal_disk)
c.wait_for_osd_up(osd_uuid)
device = json.loads(c.sh("ceph-disk list --format json " + data_disk))[0]
device = json.loads(
c.sh("ceph-disk list --format json " + data_disk))[0]
assert len(device['partitions']) == 1
c.check_osd_status(osd_uuid, have_journal)
return osd_uuid
Expand All @@ -357,15 +406,17 @@ def test_activate_two_separated_journal(self):
other_data_disk = disks[1]
journal_disk = disks[2]
osd_uuid = self.activate_separated_journal(data_disk, journal_disk)
other_osd_uuid = self.activate_separated_journal(other_data_disk, journal_disk)
other_osd_uuid = self.activate_separated_journal(
other_data_disk, journal_disk)
#
# read/write can only succeed if the two osds are up because
# the pool needs two OSD
#
c.helper("pool_read_write 2") # 2 == pool size
c.helper("pool_read_write 2") # 2 == pool size
c.destroy_osd(osd_uuid)
c.destroy_osd(other_osd_uuid)
c.sh("ceph-disk zap " + data_disk + " " + journal_disk + " " + other_data_disk)
c.sh("ceph-disk zap " + data_disk + " " +
journal_disk + " " + other_data_disk)

#
# Create an OSD and reuse an existing journal partition
Expand All @@ -389,9 +440,10 @@ def test_activate_reuse_journal(self):
#
c.sh("ceph-disk prepare --osd-uuid " + osd_uuid +
" " + data_disk + " " + journal_path)
c.helper("pool_read_write 1") # 1 == pool size
c.helper("pool_read_write 1") # 1 == pool size
c.wait_for_osd_up(osd_uuid)
device = json.loads(c.sh("ceph-disk list --format json " + data_disk))[0]
device = json.loads(
c.sh("ceph-disk list --format json " + data_disk))[0]
assert len(device['partitions']) == 1
c.check_osd_status(osd_uuid)
journal_partition = c.get_journal_partition(osd_uuid)
Expand All @@ -405,15 +457,17 @@ def test_activate_reuse_journal(self):
def test_activate_multipath(self):
c = CephDisk()
if c.sh("lsb_release -si") != 'CentOS':
pytest.skip("see issue https://bugs.launchpad.net/ubuntu/+source/multipath-tools/+bug/1488688")
pytest.skip(
"see issue https://bugs.launchpad.net/ubuntu/+source/multipath-tools/+bug/1488688")
c.ensure_sd()
#
# Figure out the name of the multipath device
#
disk = c.unused_disks('sd.')[0]
c.sh("mpathconf --enable || true")
c.sh("multipath " + disk)
holders = os.listdir("/sys/block/" + os.path.basename(disk) + "/holders")
holders = os.listdir(
"/sys/block/" + os.path.basename(disk) + "/holders")
assert 1 == len(holders)
name = open("/sys/block/" + holders[0] + "/dm/name").read()
multipath = "/dev/mapper/" + name
Expand All @@ -425,7 +479,8 @@ def test_activate_multipath(self):
c.sh("ceph-disk prepare --osd-uuid " + osd_uuid +
" " + multipath)
c.wait_for_osd_up(osd_uuid)
device = json.loads(c.sh("ceph-disk list --format json " + multipath))[0]
device = json.loads(
c.sh("ceph-disk list --format json " + multipath))[0]
assert len(device['partitions']) == 2
data_partition = c.get_osd_partition(osd_uuid)
assert data_partition['type'] == 'data'
Expand All @@ -438,6 +493,7 @@ def test_activate_multipath(self):
c.sh("multipath -F")
c.unload_scsi_debug()


class CephDiskTest(CephDisk):

def main(self, argv):
Expand Down
4 changes: 2 additions & 2 deletions src/ceph-disk
Original file line number Diff line number Diff line change
Expand Up @@ -1220,9 +1220,9 @@ def get_free_partition_index(dev):
'BYT;' not in lines):
raise Error('parted output expected to contain one of ' +
'CHH; CYL; or BYT; : ' + lines)
if dev not in lines:
if os.path.realpath(dev) not in lines:
raise Error('parted output expected to contain ' + dev + ': ' + lines)
_, partitions = lines.split(dev)
_, partitions = lines.split(os.path.realpath(dev))
partition_numbers = extract_parted_partition_numbers(partitions)
if partition_numbers:
return max(partition_numbers) + 1
Expand Down

0 comments on commit 90a37c7

Please sign in to comment.