Page Menu
Home
FreeBSD
Search
Configure Global Search
Log In
Files
F109921316
D40600.diff
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Flag For Later
Award Token
Size
5 KB
Referenced Files
None
Subscribers
None
D40600.diff
View Options
diff --git a/sys/kern/vfs_lookup.c b/sys/kern/vfs_lookup.c
--- a/sys/kern/vfs_lookup.c
+++ b/sys/kern/vfs_lookup.c
@@ -933,7 +933,6 @@
int lkflags_save;
int ni_dvp_unlocked;
int crosslkflags;
- bool crosslock;
/*
* Setup: break out flag bits into variables.
@@ -1301,39 +1300,24 @@
mp = dp->v_mountedhere;
KASSERT(mp != NULL,
("%s: NULL mountpoint for VIRF_MOUNTPOINT vnode", __func__));
- crosslock = (dp->v_vflag & VV_CROSSLOCK) != 0;
crosslkflags = compute_cn_lkflags(mp, cnp->cn_lkflags,
cnp->cn_flags);
- if (__predict_false(crosslock)) {
- /*
- * We are going to be holding the vnode lock, which
- * in this case is shared by the root vnode of the
- * filesystem mounted at mp, across the call to
- * VFS_ROOT(). Make the situation clear to the
- * filesystem by passing LK_CANRECURSE if the
- * lock is held exclusive, or by clearinng
- * LK_NODDLKTREAT to allow recursion on the shared
- * lock in the presence of an exclusive waiter.
- */
- if (VOP_ISLOCKED(dp) == LK_EXCLUSIVE) {
- crosslkflags &= ~LK_SHARED;
- crosslkflags |= LK_EXCLUSIVE | LK_CANRECURSE;
- } else if ((crosslkflags & LK_EXCLUSIVE) != 0) {
- vn_lock(dp, LK_UPGRADE | LK_RETRY);
- if (VN_IS_DOOMED(dp)) {
- error = ENOENT;
- goto bad2;
- }
- if (dp->v_mountedhere != mp) {
- continue;
- }
- } else
- crosslkflags &= ~LK_NODDLKTREAT;
+ error = vfs_busy(mp, MBF_NOWAIT);
+ if (__predict_false(error != 0)) {
+ int lkflags = compute_cn_lkflags(dp->v_mount,
+ cnp->cn_lkflags, cnp->cn_flags);
+ VOP_UNLOCK(dp);
+ error = vfs_busy(mp, 0);
+ vn_lock(dp, lkflags | LK_RETRY);
+ if (error != 0) {
+ error = 0;
+ continue;
+ } else if (dp->v_mountedhere != mp) {
+ vfs_unbusy(mp);
+ continue;
+ }
}
- if (vfs_busy(mp, 0) != 0)
- continue;
- if (__predict_true(!crosslock))
- vput(dp);
+ vput(dp);
if (dp != ndp->ni_dvp)
vput(ndp->ni_dvp);
else
@@ -1342,8 +1326,6 @@
ndp->ni_dvp = vp_crossmp;
error = VFS_ROOT(mp, crosslkflags, &tdp);
vfs_unbusy(mp);
- if (__predict_false(crosslock))
- vput(dp);
if (vn_lock(vp_crossmp, LK_SHARED | LK_NOWAIT))
panic("vp_crossmp exclusively locked or reclaimed");
if (error != 0) {
diff --git a/sys/kern/vfs_mount.c b/sys/kern/vfs_mount.c
--- a/sys/kern/vfs_mount.c
+++ b/sys/kern/vfs_mount.c
@@ -1814,7 +1814,7 @@
}
static void
-dounmount_cleanup(struct mount *mp, struct vnode *coveredvp, int mntkflags)
+dounmount_cleanup(struct mount *mp, int mntkflags)
{
mtx_assert(MNT_MTX(mp), MA_OWNED);
@@ -1825,10 +1825,6 @@
}
vfs_op_exit_locked(mp);
MNT_IUNLOCK(mp);
- if (coveredvp != NULL) {
- VOP_UNLOCK(coveredvp);
- vdrop(coveredvp);
- }
vn_finished_write(mp);
vfs_rel(mp);
}
@@ -2125,7 +2121,6 @@
struct vnode *coveredvp, *rootvp;
int error;
uint64_t async_flag;
- int mnt_gen_r;
unsigned int retries;
KASSERT((flags & MNT_DEFERRED) == 0 ||
@@ -2235,24 +2230,6 @@
if ((flags & MNT_DEFERRED) != 0)
vfs_ref(mp);
- if ((coveredvp = mp->mnt_vnodecovered) != NULL) {
- mnt_gen_r = mp->mnt_gen;
- VI_LOCK(coveredvp);
- vholdl(coveredvp);
- vn_lock(coveredvp, LK_EXCLUSIVE | LK_INTERLOCK | LK_RETRY);
- /*
- * Check for mp being unmounted while waiting for the
- * covered vnode lock.
- */
- if (coveredvp->v_mountedhere != mp ||
- coveredvp->v_mountedhere->mnt_gen != mnt_gen_r) {
- VOP_UNLOCK(coveredvp);
- vdrop(coveredvp);
- vfs_rel(mp);
- return (EBUSY);
- }
- }
-
vfs_op_enter(mp);
vn_start_write(NULL, &mp, V_WAIT);
@@ -2260,12 +2237,12 @@
if ((mp->mnt_kern_flag & MNTK_UNMOUNT) != 0 ||
(mp->mnt_flag & MNT_UPDATE) != 0 ||
!TAILQ_EMPTY(&mp->mnt_uppers)) {
- dounmount_cleanup(mp, coveredvp, 0);
+ dounmount_cleanup(mp, 0);
return (EBUSY);
}
mp->mnt_kern_flag |= MNTK_UNMOUNT;
rootvp = vfs_cache_root_clear(mp);
- if (coveredvp != NULL)
+ if ((coveredvp = mp->mnt_vnodecovered) != NULL)
vn_seqc_write_begin(coveredvp);
if (flags & MNT_NONBUSY) {
MNT_IUNLOCK(mp);
@@ -2273,7 +2250,7 @@
MNT_ILOCK(mp);
if (error != 0) {
vn_seqc_write_end(coveredvp);
- dounmount_cleanup(mp, coveredvp, MNTK_UNMOUNT);
+ dounmount_cleanup(mp, MNTK_UNMOUNT);
if (rootvp != NULL) {
vn_seqc_write_end(rootvp);
vrele(rootvp);
@@ -2297,6 +2274,9 @@
mp->mnt_kern_flag |= MNTK_DRAINING;
error = msleep(&mp->mnt_lockref, MNT_MTX(mp), PVFS,
"mount drain", 0);
+ KASSERT((mp->mnt_kern_flag & MNTK_DRAINING) == 0,
+ ("%s: MNTK_DRAINING not cleared on mp %p @ %s:%d",
+ __func__, mp, __FILE__, __LINE__));
}
MNT_IUNLOCK(mp);
KASSERT(mp->mnt_lockref == 0,
@@ -2306,6 +2286,16 @@
("%s: invalid return value for msleep in the drain path @ %s:%d",
__func__, __FILE__, __LINE__));
+ if (coveredvp != NULL) {
+ vn_lock(coveredvp, LK_EXCLUSIVE | LK_RETRY);
+ KASSERT(coveredvp->v_mountedhere == mp,
+ ("%s: coveredvp->v_mountedhere(%p) != mp(%p) with MNTK_UNMOUNT set",
+ __func__, coveredvp->v_mountedhere, mp));
+ KASSERT(mp->mnt_vnodecovered == coveredvp,
+ ("%s: mp->mnt_vnodecovered(%p) != coveredvp(%p) with MNTK_UNMOUNT set",
+ __func__, mp->mnt_vnodecovered, coveredvp));
+ }
+
/*
* We want to keep the vnode around so that we can vn_seqc_write_end
* after we are done with unmount. Downgrade our reference to a mere
@@ -2356,7 +2346,6 @@
if (coveredvp) {
vn_seqc_write_end(coveredvp);
VOP_UNLOCK(coveredvp);
- vdrop(coveredvp);
}
if (rootvp != NULL) {
vn_seqc_write_end(rootvp);
@@ -2376,7 +2365,6 @@
vn_seqc_write_end_locked(coveredvp);
VI_UNLOCK(coveredvp);
VOP_UNLOCK(coveredvp);
- vdrop(coveredvp);
}
mount_devctl_event("UNMOUNT", mp, false);
if (rootvp != NULL) {
File Metadata
Details
Attached
Mime Type
text/plain
Expires
Wed, Feb 12, 6:19 AM (17 h, 57 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
16607144
Default Alt Text
D40600.diff (5 KB)
Attached To
Mode
D40600: vfs_lookup(): remove VV_CROSSLOCK logic
Attached
Detach File
Event Timeline
Log In to Comment