PERFORCE change 129598 for review
John Birrell
jb at FreeBSD.org
Mon Nov 26 15:04:27 PST 2007
http://perforce.freebsd.org/chv.cgi?CH=129598
Change 129598 by jb at jb_freebsd1 on 2007/11/26 23:03:40
IFC
Affected files ...
.. //depot/projects/dtrace/src/share/mk/sys.mk#17 integrate
.. //depot/projects/dtrace/src/sys/boot/i386/gptboot/gptldr.S#2 integrate
.. //depot/projects/dtrace/src/sys/boot/i386/pmbr/pmbr.s#2 integrate
.. //depot/projects/dtrace/src/sys/kern/kern_mutex.c#15 integrate
.. //depot/projects/dtrace/src/sys/kern/kern_rwlock.c#10 integrate
.. //depot/projects/dtrace/src/sys/net80211/ieee80211_scan_sta.c#5 integrate
Differences ...
==== //depot/projects/dtrace/src/share/mk/sys.mk#17 (text+ko) ====
@@ -1,5 +1,5 @@
# from: @(#)sys.mk 8.2 (Berkeley) 3/21/94
-# $FreeBSD: src/share/mk/sys.mk,v 1.96 2007/11/22 23:21:12 jb Exp $
+# $FreeBSD: src/share/mk/sys.mk,v 1.97 2007/11/26 21:46:21 jb Exp $
unix ?= We run FreeBSD, not UNIX.
.FreeBSD ?= true
@@ -36,9 +36,9 @@
.else
CC ?= cc
.if ${MACHINE_ARCH} == "arm"
-CFLAGS ?= -O -pipe
+CFLAGS ?= -O -fno-strict-aliasing -pipe
.else
-CFLAGS ?= -O2 -pipe
+CFLAGS ?= -O2 -fno-strict-aliasing -pipe
.endif
.if defined(NO_STRICT_ALIASING)
CFLAGS += -fno-strict-aliasing
==== //depot/projects/dtrace/src/sys/boot/i386/gptboot/gptldr.S#2 (text+ko) ====
@@ -27,7 +27,9 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $FreeBSD: src/sys/boot/i386/gptboot/gptldr.S,v 1.1 2007/10/24 21:32:59 jhb Exp $
+ * $FreeBSD: src/sys/boot/i386/gptboot/gptldr.S,v 1.2 2007/11/26 21:29:58 jhb Exp $
+ *
+ * Partly from: src/sys/boot/i386/boot2/boot1.S 1.31
*/
/* Memory Locations */
==== //depot/projects/dtrace/src/sys/boot/i386/pmbr/pmbr.s#2 (text+ko) ====
@@ -27,7 +27,9 @@
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
-# $FreeBSD: src/sys/boot/i386/pmbr/pmbr.s,v 1.1 2007/10/24 21:32:59 jhb Exp $
+# $FreeBSD: src/sys/boot/i386/pmbr/pmbr.s,v 1.2 2007/11/26 21:29:59 jhb Exp $
+#
+# Partly from: src/sys/boot/i386/mbr/mbr.s 1.7
# A 512 byte PMBR boot manager that looks for a FreeBSD boot GPT partition
# and boots it.
==== //depot/projects/dtrace/src/sys/kern/kern_mutex.c#15 (text+ko) ====
@@ -34,7 +34,7 @@
*/
#include <sys/cdefs.h>
-__FBSDID("$FreeBSD: src/sys/kern/kern_mutex.c,v 1.200 2007/11/18 14:43:52 attilio Exp $");
+__FBSDID("$FreeBSD: src/sys/kern/kern_mutex.c,v 1.201 2007/11/26 22:37:35 attilio Exp $");
#include "opt_adaptive_mutexes.h"
#include "opt_ddb.h"
@@ -335,6 +335,31 @@
m->lock_object.lo_name, (void *)m->mtx_lock, file, line);
while (!_obtain_lock(m, tid)) {
+#ifdef ADAPTIVE_MUTEXES
+ /*
+ * If the owner is running on another CPU, spin until the
+ * owner stops running or the state of the lock changes.
+ */
+ v = m->mtx_lock;
+ if (v != MTX_UNOWNED) {
+ owner = (struct thread *)(v & ~MTX_FLAGMASK);
+#ifdef ADAPTIVE_GIANT
+ if (TD_IS_RUNNING(owner)) {
+#else
+ if (m != &Giant && TD_IS_RUNNING(owner)) {
+#endif
+ if (LOCK_LOG_TEST(&m->lock_object, 0))
+ CTR3(KTR_LOCK,
+ "%s: spinning on %p held by %p",
+ __func__, m, owner);
+ while (mtx_owner(m) == owner &&
+ TD_IS_RUNNING(owner))
+ cpu_spinwait();
+ continue;
+ }
+ }
+#endif
+
ts = turnstile_trywait(&m->lock_object);
v = m->mtx_lock;
@@ -350,37 +375,34 @@
MPASS(v != MTX_CONTESTED);
+#ifdef ADAPTIVE_MUTEXES
/*
- * If the mutex isn't already contested and a failure occurs
- * setting the contested bit, the mutex was either released
- * or the state of the MTX_RECURSED bit changed.
+ * If the current owner of the lock is executing on another
+ * CPU quit the hard path and try to spin.
*/
- if ((v & MTX_CONTESTED) == 0 &&
- !atomic_cmpset_ptr(&m->mtx_lock, v, v | MTX_CONTESTED)) {
+ owner = (struct thread *)(v & ~MTX_FLAGMASK);
+#ifdef ADAPTIVE_GIANT
+ if (TD_IS_RUNNING(owner)) {
+#else
+ if (m != &Giant && TD_IS_RUNNING(owner)) {
+#endif
turnstile_cancel(ts);
cpu_spinwait();
continue;
}
+#endif
-#ifdef ADAPTIVE_MUTEXES
/*
- * If the current owner of the lock is executing on another
- * CPU, spin instead of blocking.
+ * If the mutex isn't already contested and a failure occurs
+ * setting the contested bit, the mutex was either released
+ * or the state of the MTX_RECURSED bit changed.
*/
- owner = (struct thread *)(v & ~MTX_FLAGMASK);
-#ifdef ADAPTIVE_GIANT
- if (TD_IS_RUNNING(owner))
-#else
- if (m != &Giant && TD_IS_RUNNING(owner))
-#endif
- {
+ if ((v & MTX_CONTESTED) == 0 &&
+ !atomic_cmpset_ptr(&m->mtx_lock, v, v | MTX_CONTESTED)) {
turnstile_cancel(ts);
- while (mtx_owner(m) == owner && TD_IS_RUNNING(owner)) {
- cpu_spinwait();
- }
+ cpu_spinwait();
continue;
}
-#endif /* ADAPTIVE_MUTEXES */
/*
* We definitely must sleep for this lock.
@@ -589,17 +611,7 @@
if (LOCK_LOG_TEST(&m->lock_object, opts))
CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m);
-#ifdef ADAPTIVE_MUTEXES
- if (ts == NULL) {
- _release_lock_quick(m);
- if (LOCK_LOG_TEST(&m->lock_object, opts))
- CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p no sleepers", m);
- turnstile_chain_unlock(&m->lock_object);
- return;
- }
-#else
MPASS(ts != NULL);
-#endif
turnstile_broadcast(ts, TS_EXCLUSIVE_QUEUE);
_release_lock_quick(m);
/*
==== //depot/projects/dtrace/src/sys/kern/kern_rwlock.c#10 (text+ko) ====
@@ -32,7 +32,7 @@
*/
#include <sys/cdefs.h>
-__FBSDID("$FreeBSD: src/sys/kern/kern_rwlock.c,v 1.31 2007/11/18 14:43:52 attilio Exp $");
+__FBSDID("$FreeBSD: src/sys/kern/kern_rwlock.c,v 1.32 2007/11/26 22:37:35 attilio Exp $");
#include "opt_ddb.h"
#include "opt_no_adaptive_rwlocks.h"
@@ -290,6 +290,28 @@
continue;
}
+#ifdef ADAPTIVE_RWLOCKS
+ /*
+ * If the owner is running on another CPU, spin until
+ * the owner stops running or the state of the lock
+ * changes.
+ */
+ owner = (struct thread *)RW_OWNER(x);
+ if (TD_IS_RUNNING(owner)) {
+ if (LOCK_LOG_TEST(&rw->lock_object, 0))
+ CTR3(KTR_LOCK, "%s: spinning on %p held by %p",
+ __func__, rw, owner);
+#ifdef LOCK_PROFILING_SHARED
+ lock_profile_obtain_lock_failed(&rw->lock_object,
+ &contested, &waittime);
+#endif
+ while ((struct thread*)RW_OWNER(rw->rw_lock) == owner &&
+ TD_IS_RUNNING(owner))
+ cpu_spinwait();
+ continue;
+ }
+#endif
+
/*
* Okay, now it's the hard case. Some other thread already
* has a write lock, so acquire the turnstile lock so we can
@@ -309,7 +331,20 @@
continue;
}
+#ifdef ADAPTIVE_RWLOCKS
/*
+ * If the current owner of the lock is executing on another
+ * CPU quit the hard path and try to spin.
+ */
+ owner = (struct thread *)RW_OWNER(x);
+ if (TD_IS_RUNNING(owner)) {
+ turnstile_cancel(ts);
+ cpu_spinwait();
+ continue;
+ }
+#endif
+
+ /*
* Ok, it's still a write lock. If the RW_LOCK_READ_WAITERS
* flag is already set, then we can go ahead and block. If
* it is not set then try to set it. If we fail to set it
@@ -327,30 +362,7 @@
__func__, rw);
}
-#ifdef ADAPTIVE_RWLOCKS
/*
- * If the owner is running on another CPU, spin until
- * the owner stops running or the state of the lock
- * changes.
- */
- owner = (struct thread *)RW_OWNER(x);
- if (TD_IS_RUNNING(owner)) {
- turnstile_cancel(ts);
- if (LOCK_LOG_TEST(&rw->lock_object, 0))
- CTR3(KTR_LOCK, "%s: spinning on %p held by %p",
- __func__, rw, owner);
-#ifdef LOCK_PROFILING_SHARED
- lock_profile_obtain_lock_failed(&rw->lock_object,
- &contested, &waittime);
-#endif
- while ((struct thread*)RW_OWNER(rw->rw_lock)== owner &&
- TD_IS_RUNNING(owner))
- cpu_spinwait();
- continue;
- }
-#endif
-
- /*
* We were unable to acquire the lock and the read waiters
* flag is set, so we must block on the turnstile.
*/
@@ -532,6 +544,27 @@
rw->lock_object.lo_name, (void *)rw->rw_lock, file, line);
while (!_rw_write_lock(rw, tid)) {
+#ifdef ADAPTIVE_RWLOCKS
+ /*
+ * If the lock is write locked and the owner is
+ * running on another CPU, spin until the owner stops
+ * running or the state of the lock changes.
+ */
+ v = rw->rw_lock;
+ owner = (struct thread *)RW_OWNER(v);
+ if (!(v & RW_LOCK_READ) && TD_IS_RUNNING(owner)) {
+ if (LOCK_LOG_TEST(&rw->lock_object, 0))
+ CTR3(KTR_LOCK, "%s: spinning on %p held by %p",
+ __func__, rw, owner);
+ lock_profile_obtain_lock_failed(&rw->lock_object,
+ &contested, &waittime);
+ while ((struct thread*)RW_OWNER(rw->rw_lock) == owner &&
+ TD_IS_RUNNING(owner))
+ cpu_spinwait();
+ continue;
+ }
+#endif
+
ts = turnstile_trywait(&rw->lock_object);
v = rw->rw_lock;
@@ -545,6 +578,21 @@
continue;
}
+#ifdef ADAPTIVE_RWLOCKS
+ /*
+ * If the current owner of the lock is executing on another
+ * CPU quit the hard path and try to spin.
+ */
+ if (!(v & RW_LOCK_READ)) {
+ owner = (struct thread *)RW_OWNER(v);
+ if (TD_IS_RUNNING(owner)) {
+ turnstile_cancel(ts);
+ cpu_spinwait();
+ continue;
+ }
+ }
+#endif
+
/*
* If the lock was released by a writer with both readers
* and writers waiting and a reader hasn't woken up and
@@ -586,27 +634,6 @@
__func__, rw);
}
-#ifdef ADAPTIVE_RWLOCKS
- /*
- * If the lock is write locked and the owner is
- * running on another CPU, spin until the owner stops
- * running or the state of the lock changes.
- */
- owner = (struct thread *)RW_OWNER(v);
- if (!(v & RW_LOCK_READ) && TD_IS_RUNNING(owner)) {
- turnstile_cancel(ts);
- if (LOCK_LOG_TEST(&rw->lock_object, 0))
- CTR3(KTR_LOCK, "%s: spinning on %p held by %p",
- __func__, rw, owner);
- lock_profile_obtain_lock_failed(&rw->lock_object,
- &contested, &waittime);
- while ((struct thread*)RW_OWNER(rw->rw_lock)== owner &&
- TD_IS_RUNNING(owner))
- cpu_spinwait();
- continue;
- }
-#endif
-
/*
* We were unable to acquire the lock and the write waiters
* flag is set, so we must block on the turnstile.
@@ -654,22 +681,7 @@
turnstile_chain_lock(&rw->lock_object);
ts = turnstile_lookup(&rw->lock_object);
-#ifdef ADAPTIVE_RWLOCKS
- /*
- * There might not be a turnstile for this lock if all of
- * the waiters are adaptively spinning. In that case, just
- * reset the lock to the unlocked state and return.
- */
- if (ts == NULL) {
- atomic_store_rel_ptr(&rw->rw_lock, RW_UNLOCKED);
- if (LOCK_LOG_TEST(&rw->lock_object, 0))
- CTR2(KTR_LOCK, "%s: %p no sleepers", __func__, rw);
- turnstile_chain_unlock(&rw->lock_object);
- return;
- }
-#else
MPASS(ts != NULL);
-#endif
/*
* Use the same algo as sx locks for now. Prefer waking up shared
@@ -686,42 +698,14 @@
* above. There is probably a potential priority inversion in
* there that could be worked around either by waking both queues
* of waiters or doing some complicated lock handoff gymnastics.
- *
- * Note that in the ADAPTIVE_RWLOCKS case, if both flags are
- * set, there might not be any actual writers on the turnstile
- * as they might all be spinning. In that case, we don't want
- * to preserve the RW_LOCK_WRITE_WAITERS flag as the turnstile
- * is going to go away once we wakeup all the readers.
*/
v = RW_UNLOCKED;
if (rw->rw_lock & RW_LOCK_READ_WAITERS) {
queue = TS_SHARED_QUEUE;
-#ifdef ADAPTIVE_RWLOCKS
- if (rw->rw_lock & RW_LOCK_WRITE_WAITERS &&
- !turnstile_empty(ts, TS_EXCLUSIVE_QUEUE))
- v |= RW_LOCK_WRITE_WAITERS;
-#else
v |= (rw->rw_lock & RW_LOCK_WRITE_WAITERS);
-#endif
} else
queue = TS_EXCLUSIVE_QUEUE;
-#ifdef ADAPTIVE_RWLOCKS
- /*
- * We have to make sure that we actually have waiters to
- * wakeup. If they are all spinning, then we just need to
- * disown the turnstile and return.
- */
- if (turnstile_empty(ts, queue)) {
- if (LOCK_LOG_TEST(&rw->lock_object, 0))
- CTR2(KTR_LOCK, "%s: %p no sleepers 2", __func__, rw);
- atomic_store_rel_ptr(&rw->rw_lock, v);
- turnstile_disown(ts);
- turnstile_chain_unlock(&rw->lock_object);
- return;
- }
-#endif
-
/* Wake up all waiters for the specific queue. */
if (LOCK_LOG_TEST(&rw->lock_object, 0))
CTR3(KTR_LOCK, "%s: %p waking up %s waiters", __func__, rw,
@@ -772,19 +756,12 @@
* Try to switch from one reader to a writer again. This time
* we honor the current state of the RW_LOCK_WRITE_WAITERS
* flag. If we obtain the lock with the flag set, then claim
- * ownership of the turnstile. In the ADAPTIVE_RWLOCKS case
- * it is possible for there to not be an associated turnstile
- * even though there are waiters if all of the waiters are
- * spinning.
+ * ownership of the turnstile.
*/
v = rw->rw_lock & RW_LOCK_WRITE_WAITERS;
success = atomic_cmpset_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v,
tid | v);
-#ifdef ADAPTIVE_RWLOCKS
- if (success && v && turnstile_lookup(&rw->lock_object) != NULL)
-#else
if (success && v)
-#endif
turnstile_claim(ts);
else
turnstile_cancel(ts);
@@ -837,26 +814,9 @@
* Downgrade from a write lock while preserving
* RW_LOCK_WRITE_WAITERS and give up ownership of the
* turnstile. If there are any read waiters, wake them up.
- *
- * For ADAPTIVE_RWLOCKS, we have to allow for the fact that
- * all of the read waiters might be spinning. In that case,
- * act as if RW_LOCK_READ_WAITERS is not set. Also, only
- * preserve the RW_LOCK_WRITE_WAITERS flag if at least one
- * writer is blocked on the turnstile.
*/
ts = turnstile_lookup(&rw->lock_object);
-#ifdef ADAPTIVE_RWLOCKS
- if (ts == NULL)
- v &= ~(RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS);
- else if (v & RW_LOCK_READ_WAITERS &&
- turnstile_empty(ts, TS_SHARED_QUEUE))
- v &= ~RW_LOCK_READ_WAITERS;
- else if (v & RW_LOCK_WRITE_WAITERS &&
- turnstile_empty(ts, TS_EXCLUSIVE_QUEUE))
- v &= ~RW_LOCK_WRITE_WAITERS;
-#else
MPASS(ts != NULL);
-#endif
if (v & RW_LOCK_READ_WAITERS)
turnstile_broadcast(ts, TS_SHARED_QUEUE);
atomic_store_rel_ptr(&rw->rw_lock, RW_READERS_LOCK(1) |
==== //depot/projects/dtrace/src/sys/net80211/ieee80211_scan_sta.c#5 (text+ko) ====
@@ -24,7 +24,7 @@
*/
#include <sys/cdefs.h>
-__FBSDID("$FreeBSD: src/sys/net80211/ieee80211_scan_sta.c,v 1.7 2007/11/23 05:58:37 sam Exp $");
+__FBSDID("$FreeBSD: src/sys/net80211/ieee80211_scan_sta.c,v 1.8 2007/11/26 21:28:18 sam Exp $");
/*
* IEEE 802.11 station scanning support.
@@ -290,11 +290,12 @@
*/
c = ieee80211_find_channel_byieee(ic, sp->bchan,
sp->curchan->ic_flags);
- if (c == NULL && ise->se_chan == NULL) {
+ if (c != NULL) {
+ ise->se_chan = c;
+ } else if (ise->se_chan == NULL) {
/* should not happen, pick something */
- c = sp->curchan;
+ ise->se_chan = sp->curchan;
}
- ise->se_chan = c;
} else
ise->se_chan = sp->curchan;
ise->se_fhdwell = sp->fhdwell;
More information about the p4-projects
mailing list