svn commit: r327399 - head/sys/kern

Mateusz Guzik mjg at FreeBSD.org
Sun Dec 31 00:47:06 UTC 2017


Author: mjg
Date: Sun Dec 31 00:47:04 2017
New Revision: 327399
URL: https://svnweb.freebsd.org/changeset/base/327399

Log:
  locks: re-check the reason to go to sleep after locking sleepq/turnstile
  
  In both rw and sx locks we always go to sleep if the lock owner is not
  running.
  
  We do spin for some time if the lock is read-locked.
  
  However, if we decide to go to sleep due to the lock owner being off cpu
  and after sleepq/turnstile gets acquired the lock is read-locked, we should
  fallback to the aforementioned wait.

Modified:
  head/sys/kern/kern_rwlock.c
  head/sys/kern/kern_sx.c

Modified: head/sys/kern/kern_rwlock.c
==============================================================================
--- head/sys/kern/kern_rwlock.c	Sun Dec 31 00:46:41 2017	(r327398)
+++ head/sys/kern/kern_rwlock.c	Sun Dec 31 00:47:04 2017	(r327399)
@@ -872,6 +872,7 @@ __rw_wlock_hard(volatile uintptr_t *c, uintptr_t v LOC
 #ifdef ADAPTIVE_RWLOCKS
 	int spintries = 0;
 	int i, n;
+	int sleep_reason = 0;
 #endif
 	uintptr_t x;
 #ifdef LOCK_PROFILING
@@ -952,6 +953,7 @@ __rw_wlock_hard(volatile uintptr_t *c, uintptr_t v LOC
 		 * running on another CPU, spin until the owner stops
 		 * running or the state of the lock changes.
 		 */
+		sleep_reason = 1;
 		owner = lv_rw_wowner(v);
 		if (!(v & RW_LOCK_READ) && TD_IS_RUNNING(owner)) {
 			if (LOCK_LOG_TEST(&rw->lock_object, 0))
@@ -995,6 +997,7 @@ __rw_wlock_hard(volatile uintptr_t *c, uintptr_t v LOC
 #endif
 			if (i != rowner_loops)
 				continue;
+			sleep_reason = 2;
 		}
 #endif
 		ts = turnstile_trywait(&rw->lock_object);
@@ -1015,6 +1018,9 @@ retry_ts:
 				turnstile_cancel(ts);
 				continue;
 			}
+		} else if (RW_READERS(v) > 0 && sleep_reason == 1) {
+			turnstile_cancel(ts);
+			continue;
 		}
 #endif
 		/*

Modified: head/sys/kern/kern_sx.c
==============================================================================
--- head/sys/kern/kern_sx.c	Sun Dec 31 00:46:41 2017	(r327398)
+++ head/sys/kern/kern_sx.c	Sun Dec 31 00:47:04 2017	(r327399)
@@ -534,6 +534,7 @@ _sx_xlock_hard(struct sx *sx, uintptr_t x, int opts LO
 	volatile struct thread *owner;
 	u_int i, n, spintries = 0;
 	bool adaptive;
+	int sleep_reason = 0;
 #endif
 #ifdef LOCK_PROFILING
 	uint64_t waittime = 0;
@@ -647,6 +648,7 @@ _sx_xlock_hard(struct sx *sx, uintptr_t x, int opts LO
 				    sched_tdname(curthread), "running");
 				continue;
 			}
+			sleep_reason = 1;
 		} else if (SX_SHARERS(x) && spintries < asx_retries) {
 			KTR_STATE1(KTR_SCHED, "thread",
 			    sched_tdname(curthread), "spinning",
@@ -671,6 +673,7 @@ _sx_xlock_hard(struct sx *sx, uintptr_t x, int opts LO
 			    sched_tdname(curthread), "running");
 			if (i != asx_loops)
 				continue;
+			sleep_reason = 2;
 		}
 #endif
 sleepq:
@@ -695,9 +698,14 @@ retry_sleepq:
 		 * chain lock.  If so, drop the sleep queue lock and try
 		 * again.
 		 */
-		if (!(x & SX_LOCK_SHARED) && adaptive) {
-			owner = (struct thread *)SX_OWNER(x);
-			if (TD_IS_RUNNING(owner)) {
+		if (adaptive) {
+			if (!(x & SX_LOCK_SHARED)) {
+				owner = (struct thread *)SX_OWNER(x);
+				if (TD_IS_RUNNING(owner)) {
+					sleepq_release(&sx->lock_object);
+					continue;
+				}
+			} else if (SX_SHARERS(x) > 0 && sleep_reason == 1) {
 				sleepq_release(&sx->lock_object);
 				continue;
 			}


More information about the svn-src-all mailing list