svn commit: r196334 - in head/sys: kern sys
Attilio Rao
attilio at FreeBSD.org
Mon Aug 17 16:17:21 UTC 2009
Author: attilio
Date: Mon Aug 17 16:17:21 2009
New Revision: 196334
URL: http://svn.freebsd.org/changeset/base/196334
Log:
* Change the scope of the ASSERT_ATOMIC_LOAD() from a generic check to
a pointer-fetching specific operation check. Consequently, rename the
operation ASSERT_ATOMIC_LOAD_PTR().
* Fix the implementation of ASSERT_ATOMIC_LOAD_PTR() by checking
directly alignment on the word boundry, for all the given specific
architectures. That's a bit too strict for some common case, but it
assures safety.
* Add a comment explaining the scope of the macro
* Add a new stub in the lockmgr specific implementation
Tested by: marcel (initial version), marius
Reviewed by: rwatson, jhb (comment specific review)
Approved by: re (kib)
Modified:
head/sys/kern/kern_lock.c
head/sys/kern/kern_mutex.c
head/sys/kern/kern_rwlock.c
head/sys/kern/kern_sx.c
head/sys/sys/systm.h
Modified: head/sys/kern/kern_lock.c
==============================================================================
--- head/sys/kern/kern_lock.c Mon Aug 17 16:16:46 2009 (r196333)
+++ head/sys/kern/kern_lock.c Mon Aug 17 16:17:21 2009 (r196334)
@@ -334,6 +334,9 @@ lockinit(struct lock *lk, int pri, const
int iflags;
MPASS((flags & ~LK_INIT_MASK) == 0);
+ ASSERT_ATOMIC_LOAD_PTR(lk->lk_lock,
+ ("%s: lockmgr not aligned for %s: %p", __func__, wmesg,
+ &lk->lk_lock));
iflags = LO_SLEEPABLE | LO_UPGRADABLE;
if (flags & LK_CANRECURSE)
Modified: head/sys/kern/kern_mutex.c
==============================================================================
--- head/sys/kern/kern_mutex.c Mon Aug 17 16:16:46 2009 (r196333)
+++ head/sys/kern/kern_mutex.c Mon Aug 17 16:17:21 2009 (r196334)
@@ -783,8 +783,9 @@ mtx_init(struct mtx *m, const char *name
MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE |
MTX_NOWITNESS | MTX_DUPOK | MTX_NOPROFILE)) == 0);
- ASSERT_ATOMIC_LOAD(m->mtx_lock, ("%s: mtx_lock not aligned for %s: %p",
- __func__, name, &m->mtx_lock));
+ ASSERT_ATOMIC_LOAD_PTR(m->mtx_lock,
+ ("%s: mtx_lock not aligned for %s: %p", __func__, name,
+ &m->mtx_lock));
#ifdef MUTEX_DEBUG
/* Diagnostic and error correction */
Modified: head/sys/kern/kern_rwlock.c
==============================================================================
--- head/sys/kern/kern_rwlock.c Mon Aug 17 16:16:46 2009 (r196333)
+++ head/sys/kern/kern_rwlock.c Mon Aug 17 16:17:21 2009 (r196334)
@@ -174,8 +174,9 @@ rw_init_flags(struct rwlock *rw, const c
MPASS((opts & ~(RW_DUPOK | RW_NOPROFILE | RW_NOWITNESS | RW_QUIET |
RW_RECURSE)) == 0);
- ASSERT_ATOMIC_LOAD(rw->rw_lock, ("%s: rw_lock not aligned for %s: %p",
- __func__, name, &rw->rw_lock));
+ ASSERT_ATOMIC_LOAD_PTR(rw->rw_lock,
+ ("%s: rw_lock not aligned for %s: %p", __func__, name,
+ &rw->rw_lock));
flags = LO_UPGRADABLE;
if (opts & RW_DUPOK)
Modified: head/sys/kern/kern_sx.c
==============================================================================
--- head/sys/kern/kern_sx.c Mon Aug 17 16:16:46 2009 (r196333)
+++ head/sys/kern/kern_sx.c Mon Aug 17 16:17:21 2009 (r196334)
@@ -205,8 +205,9 @@ sx_init_flags(struct sx *sx, const char
MPASS((opts & ~(SX_QUIET | SX_RECURSE | SX_NOWITNESS | SX_DUPOK |
SX_NOPROFILE | SX_NOADAPTIVE)) == 0);
- ASSERT_ATOMIC_LOAD(sx->sx_lock, ("%s: sx_lock not aligned for %s: %p",
- __func__, description, &sx->sx_lock));
+ ASSERT_ATOMIC_LOAD_PTR(sx->sx_lock,
+ ("%s: sx_lock not aligned for %s: %p", __func__, description,
+ &sx->sx_lock));
flags = LO_SLEEPABLE | LO_UPGRADABLE;
if (opts & SX_DUPOK)
Modified: head/sys/sys/systm.h
==============================================================================
--- head/sys/sys/systm.h Mon Aug 17 16:16:46 2009 (r196333)
+++ head/sys/sys/systm.h Mon Aug 17 16:17:21 2009 (r196334)
@@ -89,9 +89,16 @@ extern int maxusers; /* system tune hin
#define __CTASSERT(x, y) typedef char __assert ## y[(x) ? 1 : -1]
#endif
-#define ASSERT_ATOMIC_LOAD(var,msg) \
- KASSERT(sizeof(var) <= sizeof(uintptr_t) && \
- ALIGN(&(var)) == (uintptr_t)&(var), msg)
+/*
+ * Assert that a pointer can be loaded from memory atomically.
+ *
+ * This assertion enforces stronger alignment than necessary. For example,
+ * on some architectures, atomicity for unaligned loads will depend on
+ * whether or not the load spans multiple cache lines.
+ */
+#define ASSERT_ATOMIC_LOAD_PTR(var, msg) \
+ KASSERT(sizeof(var) == sizeof(void *) && \
+ ((uintptr_t)&(var) & (sizeof(void *) - 1)) == 0, msg)
/*
* XXX the hints declarations are even more misplaced than most declarations
More information about the svn-src-all
mailing list