From 8d3635c4dbe64a3fbc5694cc4df287bc660a55b4 Mon Sep 17 00:00:00 2001 From: Attilio Rao Date: Wed, 2 Sep 2009 17:33:51 +0000 Subject: [PATCH] Fix some bugs related to adaptive spinning: In the lockmgr support: - GIANT_RESTORE() is just called when the sleep finishes, so the current code can ends up into a giant unlock problem. Fix it by appropriately call GIANT_RESTORE() when needed. Note that this is not exactly ideal because for any interation of the adaptive spinning we drop and restore Giant, but the overhead should be not a factor. - In the lock held in exclusive mode case, after the adaptive spinning is brought to completition, we should just retry to acquire the lock instead to fallthrough. Fix that. - Fix a style nit In the sx support: - Call GIANT_SAVE() before than looping. This saves some overhead because in the current code GIANT_SAVE() is called several times. Tested by: Giovanni Trematerra --- sys/kern/kern_lock.c | 13 +++++++++++-- sys/kern/kern_sx.c | 2 +- 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/sys/kern/kern_lock.c b/sys/kern/kern_lock.c index 29ae4accfa7..e6f2f536249 100644 --- a/sys/kern/kern_lock.c +++ b/sys/kern/kern_lock.c @@ -467,7 +467,10 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk, /* * If the owner is running on another CPU, spin until * the owner stops running or the state of the lock - * changes. + * changes. We need a double-state handle here + * because for a failed acquisition the lock can be + * either held in exclusive mode or shared mode + * (for the writer starvation avoidance technique). */ if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 && LK_HOLDER(x) != LK_KERNPROC) { @@ -491,8 +494,10 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk, while (LK_HOLDER(lk->lk_lock) == (uintptr_t)owner && TD_IS_RUNNING(owner)) cpu_spinwait(); + GIANT_RESTORE(); + continue; } else if (LK_CAN_ADAPT(lk, flags) && - (x & LK_SHARE) !=0 && LK_SHARERS(x) && + (x & LK_SHARE) != 0 && LK_SHARERS(x) && spintries < alk_retries) { if (flags & LK_INTERLOCK) { class->lc_unlock(ilk); @@ -511,6 +516,7 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk, break; cpu_spinwait(); } + GIANT_RESTORE(); if (i != alk_loops) continue; } @@ -704,6 +710,8 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk, while (LK_HOLDER(lk->lk_lock) == (uintptr_t)owner && TD_IS_RUNNING(owner)) cpu_spinwait(); + GIANT_RESTORE(); + continue; } else if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) != 0 && LK_SHARERS(x) && spintries < alk_retries) { @@ -727,6 +735,7 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk, break; cpu_spinwait(); } + GIANT_RESTORE(); if (i != alk_loops) continue; } diff --git a/sys/kern/kern_sx.c b/sys/kern/kern_sx.c index 4a78444c09c..c00b267e5d2 100644 --- a/sys/kern/kern_sx.c +++ b/sys/kern/kern_sx.c @@ -531,13 +531,13 @@ _sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, const char *file, continue; } } else if (SX_SHARERS(x) && spintries < asx_retries) { + GIANT_SAVE(); spintries++; for (i = 0; i < asx_loops; i++) { if (LOCK_LOG_TEST(&sx->lock_object, 0)) CTR4(KTR_LOCK, "%s: shared spinning on %p with %u and %u", __func__, sx, spintries, i); - GIANT_SAVE(); x = sx->sx_lock; if ((x & SX_LOCK_SHARED) == 0 || SX_SHARERS(x) == 0) -- 2.45.2