From b7d4f9625bfe642f739fa8a098d9364469f5ce3b Mon Sep 17 00:00:00 2001 From: Leonid Usov Date: Sun, 18 Feb 2024 12:09:53 +0200 Subject: [PATCH] mds/quiesce: use LocalLock for iquiesce and a caps mask for a lock-free quiesce --- qa/tasks/cephfs/test_quiesce.py | 23 +----- src/include/ceph_fs.h | 29 ++++---- src/mds/CInode.cc | 25 +++++-- src/mds/CInode.h | 4 +- src/mds/Locker.cc | 107 +++++++++++++++++++-------- src/mds/Locker.h | 2 +- src/mds/MDCache.cc | 125 ++++++++++++++++---------------- src/mds/Mutation.h | 1 + src/mds/SimpleLock.h | 2 +- 9 files changed, 183 insertions(+), 135 deletions(-) diff --git a/qa/tasks/cephfs/test_quiesce.py b/qa/tasks/cephfs/test_quiesce.py index 91a5812b9307c3..c981c67c3a638d 100644 --- a/qa/tasks/cephfs/test_quiesce.py +++ b/qa/tasks/cephfs/test_quiesce.py @@ -188,14 +188,6 @@ def _verify_quiesce(self, rank=0, root=None, splitauth=False): visited = set() locks_expected = set([ "iquiesce", - "isnap", - "ipolicy", - "ifile", - "inest", - "idft", - "iauth", - "ilink", - "ixattr", ]) for inode in cache: ino = inode['ino'] @@ -212,12 +204,8 @@ def _verify_quiesce(self, rank=0, root=None, splitauth=False): for lock in op['type_data']['locks']: lock_type = lock['lock']['type'] if lock_type == "iquiesce": - if ino == root_ino: - self.assertEqual(lock['flags'], 1) - self.assertEqual(lock['lock']['state'], 'sync') - else: - self.assertEqual(lock['flags'], 4) - self.assertEqual(lock['lock']['state'], 'xlock') + self.assertEqual(lock['flags'], 4) + self.assertEqual(lock['lock']['state'], 'xlock') elif lock_type == "isnap": self.assertEqual(lock['flags'], 1) self.assertEqual(lock['lock']['state'][:4], 'sync') @@ -228,14 +216,9 @@ def _verify_quiesce(self, rank=0, root=None, splitauth=False): else: self.assertEqual(lock['flags'], 1) self.assertEqual(lock['lock']['state'][:4], 'sync') - elif lock_type in ("ipolicy", "inest", "idft", "iauth", "ilink", "ixattr"): + elif lock_type in ("iauth", "ilink", "ixattr"): self.assertEqual(lock['flags'], 1) self.assertEqual(lock['lock']['state'][:4], 'sync') - elif lock_type == "iversion": - # only regular files acquire xlock (on ifile) - self.assertEqual((mode & S_IFMT), S_IFREG) - self.assertEqual(lock['flags'], 2) - self.assertEqual(lock['lock']['state'][:4], 'lock') else: # no iflock self.assertFalse(lock_type.startswith("i")) diff --git a/src/include/ceph_fs.h b/src/include/ceph_fs.h index 49b45f26eb35b7..993236355de223 100644 --- a/src/include/ceph_fs.h +++ b/src/include/ceph_fs.h @@ -333,22 +333,23 @@ extern const char *ceph_mds_state_name(int s); */ #define CEPH_LOCK_DN (1 << 0) #define CEPH_LOCK_DVERSION (1 << 1) -#define CEPH_LOCK_IQUIESCE (1 << 4) /* mds internal */ -#define CEPH_LOCK_ISNAP (1 << 5) /* snapshot lock. MDS internal */ -#define CEPH_LOCK_IPOLICY (1 << 6) /* policy lock on dirs. MDS internal */ -#define CEPH_LOCK_IFILE (1 << 7) -#define CEPH_LOCK_INEST (1 << 8) /* mds internal */ -#define CEPH_LOCK_IDFT (1 << 9) /* dir frag tree */ -#define CEPH_LOCK_IAUTH (1 << 10) -#define CEPH_LOCK_ILINK (1 << 11) -#define CEPH_LOCK_IXATTR (1 << 12) -#define CEPH_LOCK_IFLOCK (1 << 13) /* advisory file locks */ -#define CEPH_LOCK_IVERSION (1 << 14) /* mds internal */ - -#define CEPH_LOCK_IFIRST CEPH_LOCK_IQUIESCE +#define CEPH_LOCK_ISNAP (1 << 4) /* snapshot lock. MDS internal */ +#define CEPH_LOCK_IPOLICY (1 << 5) /* policy lock on dirs. MDS internal */ +#define CEPH_LOCK_IFILE (1 << 6) +#define CEPH_LOCK_INEST (1 << 7) /* mds internal */ +#define CEPH_LOCK_IDFT (1 << 8) /* dir frag tree */ +#define CEPH_LOCK_IAUTH (1 << 9) +#define CEPH_LOCK_ILINK (1 << 10) +#define CEPH_LOCK_IXATTR (1 << 11) +#define CEPH_LOCK_IFLOCK (1 << 12) /* advisory file locks */ +#define CEPH_LOCK_IVERSION (1 << 13) /* mds internal */ +#define CEPH_LOCK_IQUIESCE (1 << 14) /* mds internal */ + +#define CEPH_LOCK_IFIRST CEPH_LOCK_ISNAP #define CEPH_LOCK_ILAST CEPH_LOCK_IVERSION -static inline bool is_inode_lock(int l) { +static inline bool is_inode_lock(int l) +{ return (CEPH_LOCK_IFIRST <= l && l <= CEPH_LOCK_ILAST); } diff --git a/src/mds/CInode.cc b/src/mds/CInode.cc index 047a2dbfda6f44..8edf33c89596a2 100644 --- a/src/mds/CInode.cc +++ b/src/mds/CInode.cc @@ -2882,6 +2882,7 @@ bool CInode::freeze_inode(int auth_pin_allowance) const static int lock_types[] = { CEPH_LOCK_IVERSION, CEPH_LOCK_IFILE, CEPH_LOCK_IAUTH, CEPH_LOCK_ILINK, CEPH_LOCK_IDFT, CEPH_LOCK_IXATTR, CEPH_LOCK_ISNAP, CEPH_LOCK_INEST, CEPH_LOCK_IFLOCK, CEPH_LOCK_IPOLICY, 0 + //TODO: add iquiesce here? }; for (int i = 0; lock_types[i]; ++i) { auto lock = get_lock(lock_types[i]); @@ -3532,6 +3533,15 @@ void CInode::export_client_caps(map& cl) } } +int CInode::get_caps_quiesce_mask() const +{ + if (quiescelock.can_wrlock()) { + return CEPH_CAP_ANY; + } else { + return CEPH_CAP_ANY_SHARED | CEPH_CAP_PIN /*?*/; + } +} + // caps allowed int CInode::get_caps_liked() const { @@ -3558,30 +3568,33 @@ int CInode::get_caps_allowed_ever() const int CInode::get_caps_allowed_by_type(int type) const { - return + return get_caps_quiesce_mask() & ( CEPH_CAP_PIN | (filelock.gcaps_allowed(type) << filelock.get_cap_shift()) | (authlock.gcaps_allowed(type) << authlock.get_cap_shift()) | (xattrlock.gcaps_allowed(type) << xattrlock.get_cap_shift()) | - (linklock.gcaps_allowed(type) << linklock.get_cap_shift()); + (linklock.gcaps_allowed(type) << linklock.get_cap_shift()) + ); } int CInode::get_caps_careful() const { - return + return get_caps_quiesce_mask() & ( (filelock.gcaps_careful() << filelock.get_cap_shift()) | (authlock.gcaps_careful() << authlock.get_cap_shift()) | (xattrlock.gcaps_careful() << xattrlock.get_cap_shift()) | - (linklock.gcaps_careful() << linklock.get_cap_shift()); + (linklock.gcaps_careful() << linklock.get_cap_shift()) + ); } int CInode::get_xlocker_mask(client_t client) const { - return + return get_caps_quiesce_mask() & ( (filelock.gcaps_xlocker_mask(client) << filelock.get_cap_shift()) | (authlock.gcaps_xlocker_mask(client) << authlock.get_cap_shift()) | (xattrlock.gcaps_xlocker_mask(client) << xattrlock.get_cap_shift()) | - (linklock.gcaps_xlocker_mask(client) << linklock.get_cap_shift()); + (linklock.gcaps_xlocker_mask(client) << linklock.get_cap_shift()) + ); } int CInode::get_caps_allowed_for_client(Session *session, Capability *cap, diff --git a/src/mds/CInode.h b/src/mds/CInode.h index 7589597a2412f8..d043aa95024d41 100644 --- a/src/mds/CInode.h +++ b/src/mds/CInode.h @@ -1105,7 +1105,7 @@ class CInode : public MDSCacheObject, public InodeStoreBase, public Counter quiesce_op_flags; + // xlocks bool need_quiescelock = !skip_quiesce; for (size_t i = 0; i < lov.size(); ++i) { @@ -255,7 +259,16 @@ bool Locker::acquire_locks(const MDRequestRef& mdr, SimpleLock *lock = p.lock; MDSCacheObject *object = lock->get_parent(); auto t = lock->get_type(); - + + if (t == CEPH_LOCK_IQUIESCE) { + quiesce_op_flags[object] = p.flags; + need_quiescelock = false; + // remove this lock from the vector, + // we'll add it back below + lov.erase(lov.begin() + i); + --i; + continue; + } if (p.is_xlock()) { if ((lock->get_type() == CEPH_LOCK_ISNAP || lock->get_type() == CEPH_LOCK_IPOLICY) && @@ -316,10 +329,10 @@ bool Locker::acquire_locks(const MDRequestRef& mdr, case CEPH_LOCK_IQUIESCE: break; default: - CInode *in = static_cast(object); - if (need_quiescelock) { - need_quiescelock = false; - lov.add_rdlock(&in->quiescelock, i + 1); + CInode* in = static_cast(object); + if (need_quiescelock && (lock->get_cap_shift() > 0)) { + dout(15) << "need shared quiesce lock for " << p << " on " << SimpleLock::get_lock_type_name(t) << " of " << in << dendl; + quiesce_op_flags[in] |= LockOp::WRLOCK; } if (!in->is_auth()) continue; @@ -347,19 +360,17 @@ bool Locker::acquire_locks(const MDRequestRef& mdr, << " in case we need to request a scatter" << dendl; mustpin.insert(object); } - if (need_quiescelock && is_inode_lock(t) && t != CEPH_LOCK_IQUIESCE) { - CInode *in = static_cast(object); - lov.add_rdlock(&in->quiescelock, i + 1); - need_quiescelock = false; + if (need_quiescelock && (lock->get_cap_shift() > 0)) { + dout(15) << "need shared quiesce lock for " << p << " on " << SimpleLock::get_lock_type_name(t) << " of " << object << dendl; + quiesce_op_flags[object] |= LockOp::WRLOCK; } } else if (p.is_remote_wrlock()) { dout(20) << " must remote_wrlock on mds." << p.wrlock_target << " " << *lock << " " << *object << dendl; mustpin.insert(object); - if (need_quiescelock && is_inode_lock(t) && t != CEPH_LOCK_IQUIESCE) { - CInode *in = static_cast(object); - lov.add_rdlock(&in->quiescelock, i + 1); - need_quiescelock = false; + if (need_quiescelock && (lock->get_cap_shift() > 0)) { + dout(15) << "need shared quiesce lock for " << p << " on " << SimpleLock::get_lock_type_name(t) << " of " << object << dendl; + quiesce_op_flags[object] |= LockOp::WRLOCK; } } else if (p.is_rdlock()) { dout(20) << " must rdlock " << *lock << " " << *object << dendl; @@ -385,26 +396,46 @@ bool Locker::acquire_locks(const MDRequestRef& mdr, * xlocked, then all locks are dropped (s.f. * Locker::handle_quiesce_failure). So adding the quiescelock can never * contribute to deadlock. + * + * UPD - we can further simplify and say that no read operation + * should be taking the quiesce lock. We'll have to handle it a bit + * differently when we decide to implement an exclusive quiesce */ - if (need_quiescelock && !mdr->is_rdlocked(lock)) { - /* Can we get the lock without waiting? */ - if (!lock->can_rdlock(client)) { - /* To prevent deadlock where an op holds a parent snaplock - * (Locker::try_rdlock_snap_layout), add quiescelock. - */ - CInode *in = static_cast(object); - lov.add_rdlock(&in->quiescelock, i + 1); - need_quiescelock = false; - } - } + // if (need_quiescelock && !mdr->is_rdlocked(lock) && (lock->get_cap_shift() > 0)) { + // /* Can we get the lock without waiting? */ + // if (!lock->can_rdlock(client)) { + // /* To prevent deadlock where an op holds a parent snaplock + // * (Locker::try_rdlock_snap_layout), add quiescelock. + // */ + // quiesce_op_flags[object] |= LockOp::WRLOCK; + // } + // } } else { ceph_assert(0 == "locker unknown lock operation"); } } lov.sort_and_merge(); - + + bool injected_quiesce_lock = false; + for (auto &[co, flags]: quiesce_op_flags){ + CInode *in = static_cast(co); + if (flags & LockOp::XLOCK) { + dout(15) << "injecting an exclusive quiesce lock for inode " << in << " under mdr: " << mdr << dendl; + lov.add_xlock(&in->quiescelock, 0); + injected_quiesce_lock = true; + } else if (flags & LockOp::WRLOCK) { + dout(15) << "injecting a shared quiesce lock for inode " << in << " under mdr: " << mdr << dendl; + lov.add_wrlock(&in->quiescelock, 0); + injected_quiesce_lock = true; + } + } + + if (!injected_quiesce_lock) { + dout(20) << "no quiesce lock taken for mdr: " << mdr << dendl; + } + // AUTH PINS map > mustpin_remote; // mds -> (object set) @@ -576,7 +607,7 @@ bool Locker::acquire_locks(const MDRequestRef& mdr, cancel_locking(mdr.get(), &issue_set); if (!xlock_start(lock, mdr)) { if (t == CEPH_LOCK_IQUIESCE) { - handle_quiesce_failure(mdr, marker.message); + marker.message = "failed to xlock quiesce, waiting"; } else { marker.message = "failed to xlock, waiting"; } @@ -865,7 +896,7 @@ void Locker::drop_rdlocks_for_early_reply(MutationImpl *mut) issue_caps_set(need_issue); } -void Locker::drop_rdlock(MutationImpl* mut, SimpleLock* what) +void Locker::drop_lock(MutationImpl* mut, SimpleLock* what) { dout(20) << __func__ << ": " << *what << dendl; @@ -873,9 +904,14 @@ void Locker::drop_rdlock(MutationImpl* mut, SimpleLock* what) auto* lock = it->lock; if (lock == what) { dout(20) << __func__ << ": found lock " << *lock << dendl; - ceph_assert(it->is_rdlock()); bool ni = false; - rdlock_finish(it, mut, &ni); + if (it->is_xlock()) { + xlock_finish(it, mut, &ni); + } else if (it->is_wrlock()) { + wrlock_finish(it, mut, &ni); + } else if (it->is_rdlock()) { + rdlock_finish(it, mut, &ni); + } if (ni) { set need_issue; need_issue.insert(static_cast(lock->get_parent())); @@ -1841,6 +1877,7 @@ void Locker::wrlock_force(SimpleLock *lock, MutationRef& mut) switch (lock->get_type()) { case CEPH_LOCK_DVERSION: case CEPH_LOCK_IVERSION: + case CEPH_LOCK_IQUIESCE: return local_wrlock_grab(static_cast(lock), mut); default: break; @@ -1892,6 +1929,7 @@ bool Locker::wrlock_start(const MutationImpl::LockOp &op, const MDRequestRef& mu switch (lock->get_type()) { case CEPH_LOCK_DVERSION: case CEPH_LOCK_IVERSION: + case CEPH_LOCK_IQUIESCE: return local_wrlock_start(static_cast(lock), mut); default: break; @@ -1957,6 +1995,7 @@ void Locker::wrlock_finish(const MutationImpl::lock_iterator& it, MutationImpl * switch (lock->get_type()) { case CEPH_LOCK_DVERSION: case CEPH_LOCK_IVERSION: + case CEPH_LOCK_IQUIESCE: return local_wrlock_finish(it, mut); default: break; @@ -2045,6 +2084,7 @@ bool Locker::xlock_start(SimpleLock *lock, const MDRequestRef& mut) switch (lock->get_type()) { case CEPH_LOCK_DVERSION: case CEPH_LOCK_IVERSION: + case CEPH_LOCK_IQUIESCE: return local_xlock_start(static_cast(lock), mut); default: break; @@ -2171,6 +2211,7 @@ void Locker::xlock_finish(const MutationImpl::lock_iterator& it, MutationImpl *m switch (lock->get_type()) { case CEPH_LOCK_DVERSION: case CEPH_LOCK_IVERSION: + case CEPH_LOCK_IQUIESCE: return local_xlock_finish(it, mut); default: break; @@ -5548,6 +5589,14 @@ void Locker::local_xlock_finish(const MutationImpl::lock_iterator& it, MutationI lock->put_xlock(); mut->locks.erase(it); + if (lock->get_type() == CEPH_LOCK_IQUIESCE) { + auto in = static_cast(lock->get_parent()); + set issue_set { in }; + // reevaluate everything related related to caps + eval_cap_gather(in, &issue_set); + issue_caps_set(issue_set); + } + lock->finish_waiters(SimpleLock::WAIT_STABLE | SimpleLock::WAIT_WR | SimpleLock::WAIT_RD); diff --git a/src/mds/Locker.h b/src/mds/Locker.h index cfc0d9ace9a1ec..0a500f09be15be 100644 --- a/src/mds/Locker.h +++ b/src/mds/Locker.h @@ -67,7 +67,7 @@ class Locker { void set_xlocks_done(MutationImpl *mut, bool skip_dentry=false); void drop_non_rdlocks(MutationImpl *mut, std::set *pneed_issue=0); void drop_rdlocks_for_early_reply(MutationImpl *mut); - void drop_rdlock(MutationImpl* mut, SimpleLock* what); + void drop_lock(MutationImpl* mut, SimpleLock* what); void drop_locks_for_fragment_unfreeze(MutationImpl *mut); int get_cap_bit_for_lock_cache(int op); diff --git a/src/mds/MDCache.cc b/src/mds/MDCache.cc index f2b6b078275b80..a659fa69ac83b2 100644 --- a/src/mds/MDCache.cc +++ b/src/mds/MDCache.cc @@ -13531,17 +13531,10 @@ void MDCache::dispatch_quiesce_inode(const MDRequestRef& mdr) dout(20) << __func__ << " " << *mdr << " quiescing " << *in << dendl; - { /* Acquire authpins on `in` to prevent migrations after this rank considers * it (and its children) quiesced. */ - MutationImpl::LockOpVec lov; - if (!mds->locker->acquire_locks(mdr, lov, nullptr, {in}, false, true)) { - return; - } - } - /* TODO: Consider: * * rank0 is auth for /foo @@ -13554,43 +13547,31 @@ void MDCache::dispatch_quiesce_inode(const MDRequestRef& mdr) * The solution is probably to have rank1 mark /foo has STATE_QUIESCED and reject export ops from rank0. */ - if (in->is_auth()) { - /* Acquire rdlocks on anything which prevents writing. - * - * Because files are treated specially allowing multiple reader/writers, we - * need an xlock here to recall all write caps. This unfortunately means - * there can be no readers. + if (in->is_auth() || splitauth) { + /* Acquire all caps-related locks to revoke unwanted caps * * The xlock on the quiescelock is important to prevent future requests * from blocking on other inode locks while holding path traversal locks. * See dev doc doc/dev/mds_internals/quiesce.rst for more details. */ - MutationImpl::LockOpVec lov; - lov.add_rdlock(&in->authlock); - lov.add_rdlock(&in->dirfragtreelock); - if (in->is_file()) { - lov.add_xlock(&in->filelock); - } else { - lov.add_rdlock(&in->filelock); - } - lov.add_rdlock(&in->linklock); - lov.add_rdlock(&in->nestlock); - lov.add_rdlock(&in->policylock); - // N.B.: NO xlock/wrlock on quiescelock; we need to allow access to mksnap/lookup - // This is an unfortunate inconsistency. It may be possible to circumvent - // this issue by having those ops acquire the quiscelock only if necessary. - if (is_root) { - lov.add_rdlock(&in->quiescelock); - } else { - lov.add_xlock(&in->quiescelock); /* !! */ - } - lov.add_rdlock(&in->snaplock); - lov.add_rdlock(&in->xattrlock); + // take the quiesce lock and the first of the four capability releated locks + lov.add_xlock(&in->quiescelock); /* !! */ + + /* + * Because files are treated specially allowing multiple reader/writers, we + * need an xlock here to recall all write caps. This unfortunately means + * there can be no readers. + */ + lov.add_xlock(&in->filelock); + lov.add_xlock(&in->authlock); + lov.add_xlock(&in->linklock); + lov.add_xlock(&in->xattrlock); + if (!mds->locker->acquire_locks(mdr, lov, nullptr, {in}, false, true)) { return; } - } else if (!splitauth) { + } else { dout(5) << "auth is split and splitauth is false: " << *in << dendl; qs.add_failed(mdr, -EPERM); mds->server->respond_to_request(mdr, -EPERM); @@ -13608,6 +13589,16 @@ void MDCache::dispatch_quiesce_inode(const MDRequestRef& mdr) return; } + // since we're holding onto the quiesce lock, + // we don't need the other capabilities related locks anymore + // as the quiesce mask will prevent any non-shared capabilities from being issued + // see CInode::get_caps_quiesce_mask() + + mds->locker->drop_lock(mdr.get(), &in->filelock); + mds->locker->drop_lock(mdr.get(), &in->authlock); + mds->locker->drop_lock(mdr.get(), &in->linklock); + mds->locker->drop_lock(mdr.get(), &in->xattrlock); + if (in->is_dir()) { for (auto& dir : in->get_dirfrags()) { if (!dir->is_auth() && !splitauth) { @@ -13622,36 +13613,44 @@ void MDCache::dispatch_quiesce_inode(const MDRequestRef& mdr) auto& qops = qrmdr->more()->quiesce_ops; for (auto& dir : in->get_dirfrags()) { for (auto& [dnk, dn] : *dir) { - auto* in = dn->get_projected_inode(); - if (!in) { - continue; - } - - if (auto it = qops.find(in); it != qops.end()) { - dout(25) << __func__ << ": existing quiesce metareqid: " << it->second << dendl; - if (auto reqit = active_requests.find(it->second); reqit != active_requests.end()) { - auto& qimdr = reqit->second; - dout(25) << __func__ << ": found in-progress " << qimdr << dendl; - continue; + auto recurse = [this, mds=mds, &qops, &count, delay, &qfinisher, &qs, &gather, func=__func__](CInode* in) { + if (auto it = qops.find(in); it != qops.end()) { + dout(25) << func << ": existing quiesce metareqid: " << it->second << dendl; + if (auto reqit = active_requests.find(it->second); reqit != active_requests.end()) { + auto& qimdr = reqit->second; + dout(25) << func << ": found in-progress " << qimdr << dendl; + return; + } } + dout(10) << func << ": scheduling op to quiesce " << *in << dendl; + + MDRequestRef qimdr = request_start_internal(CEPH_MDS_OP_QUIESCE_INODE); + qimdr->set_filepath(filepath(in->ino())); + qimdr->internal_op_finish = gather.new_sub(); + qimdr->internal_op_private = qfinisher; + qops[in] = qimdr->reqid; + qs.inc_inodes(); + if (delay > 0ms) { + mds->timer.add_event_after(delay, new LambdaContext([cache=this,qimdr](int r) { + cache->dispatch_request(qimdr); + })); + } else { + dispatch_request(qimdr); + } + if (!(++count % mds->heartbeat_reset_grace())) { + mds->heartbeat_reset(); + } + }; + + + auto inode = dn->get_linkage()->inode; + if (inode) { + recurse(inode); } - dout(10) << __func__ << ": scheduling op to quiesce " << *in << dendl; - - MDRequestRef qimdr = request_start_internal(CEPH_MDS_OP_QUIESCE_INODE); - qimdr->set_filepath(filepath(in->ino())); - qimdr->internal_op_finish = gather.new_sub(); - qimdr->internal_op_private = qfinisher; - qops[in] = qimdr->reqid; - qs.inc_inodes(); - if (delay > 0ms) { - mds->timer.add_event_after(delay, new LambdaContext([cache=this,qimdr](int r) { - cache->dispatch_request(qimdr); - })); - } else { - dispatch_request(qimdr); - } - if (!(++count % mds->heartbeat_reset_grace())) { - mds->heartbeat_reset(); + auto pinode = dn->get_projected_inode(); + if (pinode && pinode != inode) { + dout(10) << __func__ << " descending into a projected dentry" << dendl; + recurse(inode); } } } diff --git a/src/mds/Mutation.h b/src/mds/Mutation.h index 4b9d0d7528c898..ce2a6eb32eac73 100644 --- a/src/mds/Mutation.h +++ b/src/mds/Mutation.h @@ -51,6 +51,7 @@ struct MutationImpl : public TrackedOp { mds_rank_t remote_auth_pinned = MDS_RANK_NONE; }; + // held locks struct LockOp { enum { diff --git a/src/mds/SimpleLock.h b/src/mds/SimpleLock.h index c61bc4b2cd04bf..d2c40895512a21 100644 --- a/src/mds/SimpleLock.h +++ b/src/mds/SimpleLock.h @@ -41,7 +41,6 @@ struct LockType { explicit LockType(int t) : type(t) { switch (type) { case CEPH_LOCK_DN: - case CEPH_LOCK_IQUIESCE: case CEPH_LOCK_IAUTH: case CEPH_LOCK_ILINK: case CEPH_LOCK_IXATTR: @@ -59,6 +58,7 @@ struct LockType { break; case CEPH_LOCK_DVERSION: case CEPH_LOCK_IVERSION: + case CEPH_LOCK_IQUIESCE: sm = &sm_locallock; break; default: