svn commit: r197984 - in stable/7/sys: . contrib/pf kern sys
Attilio Rao
attilio at FreeBSD.org
Mon Oct 12 15:56:29 UTC 2009
Author: attilio
Date: Mon Oct 12 15:56:27 2009
New Revision: 197984
URL: http://svn.freebsd.org/changeset/base/197984
Log:
MFC r1979643:
When releasing a read/shared lock we need to use a write memory barrier
in order to avoid, on architectures which doesn't have strong ordered
writes, CPU instructions reordering.
Modified:
stable/7/sys/ (props changed)
stable/7/sys/contrib/pf/ (props changed)
stable/7/sys/kern/kern_rwlock.c
stable/7/sys/kern/kern_sx.c
stable/7/sys/sys/rwlock.h
stable/7/sys/sys/sx.h
Modified: stable/7/sys/kern/kern_rwlock.c
==============================================================================
--- stable/7/sys/kern/kern_rwlock.c Mon Oct 12 15:49:48 2009 (r197983)
+++ stable/7/sys/kern/kern_rwlock.c Mon Oct 12 15:56:27 2009 (r197984)
@@ -455,7 +455,7 @@ _rw_runlock(struct rwlock *rw, const cha
*/
x = rw->rw_lock;
if (RW_READERS(x) > 1) {
- if (atomic_cmpset_ptr(&rw->rw_lock, x,
+ if (atomic_cmpset_rel_ptr(&rw->rw_lock, x,
x - RW_ONE_READER)) {
if (LOCK_LOG_TEST(&rw->lock_object, 0))
CTR4(KTR_LOCK,
@@ -492,8 +492,8 @@ _rw_runlock(struct rwlock *rw, const cha
* to the multiple read locks case.
*/
MPASS(x == RW_READERS_LOCK(1));
- if (atomic_cmpset_ptr(&rw->rw_lock, RW_READERS_LOCK(1),
- RW_UNLOCKED)) {
+ if (atomic_cmpset_rel_ptr(&rw->rw_lock,
+ RW_READERS_LOCK(1), RW_UNLOCKED)) {
if (LOCK_LOG_TEST(&rw->lock_object, 0))
CTR2(KTR_LOCK, "%s: %p last succeeded",
__func__, rw);
@@ -530,7 +530,7 @@ _rw_runlock(struct rwlock *rw, const cha
* acquired a read lock, so drop the turnstile lock and
* restart.
*/
- if (!atomic_cmpset_ptr(&rw->rw_lock,
+ if (!atomic_cmpset_rel_ptr(&rw->rw_lock,
RW_READERS_LOCK(1) | RW_LOCK_WRITE_WAITERS, RW_UNLOCKED)) {
turnstile_chain_unlock(&rw->lock_object);
continue;
Modified: stable/7/sys/kern/kern_sx.c
==============================================================================
--- stable/7/sys/kern/kern_sx.c Mon Oct 12 15:49:48 2009 (r197983)
+++ stable/7/sys/kern/kern_sx.c Mon Oct 12 15:56:27 2009 (r197984)
@@ -829,7 +829,7 @@ _sx_sunlock_hard(struct sx *sx, const ch
* so, just drop one and return.
*/
if (SX_SHARERS(x) > 1) {
- if (atomic_cmpset_ptr(&sx->sx_lock, x,
+ if (atomic_cmpset_rel_ptr(&sx->sx_lock, x,
x - SX_ONE_SHARER)) {
if (LOCK_LOG_TEST(&sx->lock_object, 0))
CTR4(KTR_LOCK,
@@ -847,8 +847,8 @@ _sx_sunlock_hard(struct sx *sx, const ch
*/
if (!(x & SX_LOCK_EXCLUSIVE_WAITERS)) {
MPASS(x == SX_SHARERS_LOCK(1));
- if (atomic_cmpset_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1),
- SX_LOCK_UNLOCKED)) {
+ if (atomic_cmpset_rel_ptr(&sx->sx_lock,
+ SX_SHARERS_LOCK(1), SX_LOCK_UNLOCKED)) {
if (LOCK_LOG_TEST(&sx->lock_object, 0))
CTR2(KTR_LOCK, "%s: %p last succeeded",
__func__, sx);
@@ -871,7 +871,7 @@ _sx_sunlock_hard(struct sx *sx, const ch
* Note that the state of the lock could have changed,
* so if it fails loop back and retry.
*/
- if (!atomic_cmpset_ptr(&sx->sx_lock,
+ if (!atomic_cmpset_rel_ptr(&sx->sx_lock,
SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS,
SX_LOCK_UNLOCKED)) {
sleepq_release(&sx->lock_object);
Modified: stable/7/sys/sys/rwlock.h
==============================================================================
--- stable/7/sys/sys/rwlock.h Mon Oct 12 15:49:48 2009 (r197983)
+++ stable/7/sys/sys/rwlock.h Mon Oct 12 15:56:27 2009 (r197984)
@@ -54,13 +54,6 @@
*
* When the lock is not locked by any thread, it is encoded as a read lock
* with zero waiters.
- *
- * A note about memory barriers. Write locks need to use the same memory
- * barriers as mutexes: _acq when acquiring a write lock and _rel when
- * releasing a write lock. Read locks also need to use an _acq barrier when
- * acquiring a read lock. However, since read locks do not update any
- * locked data (modulo bugs of course), no memory barrier is needed when
- * releasing a read lock.
*/
#define RW_LOCK_READ 0x01
Modified: stable/7/sys/sys/sx.h
==============================================================================
--- stable/7/sys/sys/sx.h Mon Oct 12 15:49:48 2009 (r197983)
+++ stable/7/sys/sys/sx.h Mon Oct 12 15:56:27 2009 (r197984)
@@ -62,13 +62,6 @@
*
* When the lock is not locked by any thread, it is encoded as a
* shared lock with zero waiters.
- *
- * A note about memory barriers. Exclusive locks need to use the same
- * memory barriers as mutexes: _acq when acquiring an exclusive lock
- * and _rel when releasing an exclusive lock. On the other side,
- * shared lock needs to use an _acq barrier when acquiring the lock
- * but, since they don't update any locked data, no memory barrier is
- * needed when releasing a shared lock.
*/
#define SX_LOCK_SHARED 0x01
@@ -201,7 +194,7 @@ __sx_sunlock(struct sx *sx, const char *
uintptr_t x = sx->sx_lock;
if (x == (SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS) ||
- !atomic_cmpset_ptr(&sx->sx_lock, x, x - SX_ONE_SHARER))
+ !atomic_cmpset_rel_ptr(&sx->sx_lock, x, x - SX_ONE_SHARER))
_sx_sunlock_hard(sx, file, line);
}
More information about the svn-src-stable-7
mailing list