svn commit: r244416 - stable/9/sys/kern
Konstantin Belousov
kib at FreeBSD.org
Wed Dec 19 04:24:12 UTC 2012
Author: kib
Date: Wed Dec 19 04:24:11 2012
New Revision: 244416
URL: http://svnweb.freebsd.org/changeset/base/244416
Log:
MFC r243901:
Fixes to ensure the integrity of the callwheel tailqs.
MFC r243912 (by attilio):
Rearrange comments, use cached callout flags when callout could have
been already destroyed.
Modified:
stable/9/sys/kern/kern_timeout.c
Directory Properties:
stable/9/sys/ (props changed)
Modified: stable/9/sys/kern/kern_timeout.c
==============================================================================
--- stable/9/sys/kern/kern_timeout.c Wed Dec 19 04:18:21 2012 (r244415)
+++ stable/9/sys/kern/kern_timeout.c Wed Dec 19 04:24:11 2012 (r244416)
@@ -441,15 +441,13 @@ static void
callout_cc_del(struct callout *c, struct callout_cpu *cc)
{
- if (cc->cc_next == c)
- cc->cc_next = TAILQ_NEXT(c, c_links.tqe);
- if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
- c->c_func = NULL;
- SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle);
- }
+ if ((c->c_flags & CALLOUT_LOCAL_ALLOC) == 0)
+ return;
+ c->c_func = NULL;
+ SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle);
}
-static struct callout *
+static void
softclock_call_cc(struct callout *c, struct callout_cpu *cc, int *mpcalls,
int *lockcalls, int *gcalls)
{
@@ -471,7 +469,9 @@ softclock_call_cc(struct callout *c, str
static timeout_t *lastfunc;
#endif
- cc->cc_next = TAILQ_NEXT(c, c_links.tqe);
+ KASSERT((c->c_flags & (CALLOUT_PENDING | CALLOUT_ACTIVE)) ==
+ (CALLOUT_PENDING | CALLOUT_ACTIVE),
+ ("softclock_call_cc: pend|act %p %x", c, c->c_flags));
class = (c->c_lock != NULL) ? LOCK_CLASS(c->c_lock) : NULL;
sharedlock = (c->c_flags & CALLOUT_SHAREDLOCK) ? 0 : 1;
c_lock = c->c_lock;
@@ -539,20 +539,7 @@ softclock_call_cc(struct callout *c, str
class->lc_unlock(c_lock);
skip:
CC_LOCK(cc);
- /*
- * If the current callout is locally allocated (from
- * timeout(9)) then put it on the freelist.
- *
- * Note: we need to check the cached copy of c_flags because
- * if it was not local, then it's not safe to deref the
- * callout pointer.
- */
- if (c_flags & CALLOUT_LOCAL_ALLOC) {
- KASSERT(c->c_flags == CALLOUT_LOCAL_ALLOC,
- ("corrupted callout"));
- c->c_func = NULL;
- SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle);
- }
+ KASSERT(cc->cc_curr == c, ("mishandled cc_curr"));
cc->cc_curr = NULL;
if (cc->cc_waiting) {
/*
@@ -561,13 +548,22 @@ skip:
* If the callout was scheduled for
* migration just cancel it.
*/
- if (cc_cme_migrating(cc))
+ if (cc_cme_migrating(cc)) {
cc_cme_cleanup(cc);
+
+ /*
+ * It should be assert here that the callout is not
+ * destroyed but that is not easy.
+ */
+ c->c_flags &= ~CALLOUT_DFRMIGRATION;
+ }
cc->cc_waiting = 0;
CC_UNLOCK(cc);
wakeup(&cc->cc_waiting);
CC_LOCK(cc);
} else if (cc_cme_migrating(cc)) {
+ KASSERT((c_flags & CALLOUT_LOCAL_ALLOC) == 0,
+ ("Migrating legacy callout %p", c));
#ifdef SMP
/*
* If the callout was scheduled for
@@ -580,23 +576,20 @@ skip:
cc_cme_cleanup(cc);
/*
- * Handle deferred callout stops
+ * It should be assert here that the callout is not destroyed
+ * but that is not easy.
+ *
+ * As first thing, handle deferred callout stops.
*/
if ((c->c_flags & CALLOUT_DFRMIGRATION) == 0) {
CTR3(KTR_CALLOUT,
"deferred cancelled %p func %p arg %p",
c, new_func, new_arg);
callout_cc_del(c, cc);
- goto nextc;
+ return;
}
-
c->c_flags &= ~CALLOUT_DFRMIGRATION;
- /*
- * It should be assert here that the
- * callout is not destroyed but that
- * is not easy.
- */
new_cc = callout_cpu_switch(c, cc, new_cpu);
callout_cc_add(c, new_cc, new_ticks, new_func, new_arg,
new_cpu);
@@ -606,10 +599,19 @@ skip:
panic("migration should not happen");
#endif
}
-#ifdef SMP
-nextc:
-#endif
- return (cc->cc_next);
+ /*
+ * If the current callout is locally allocated (from
+ * timeout(9)) then put it on the freelist.
+ *
+ * Note: we need to check the cached copy of c_flags because
+ * if it was not local, then it's not safe to deref the
+ * callout pointer.
+ */
+ KASSERT((c_flags & CALLOUT_LOCAL_ALLOC) == 0 ||
+ c->c_flags == CALLOUT_LOCAL_ALLOC,
+ ("corrupted callout"));
+ if (c_flags & CALLOUT_LOCAL_ALLOC)
+ callout_cc_del(c, cc);
}
/*
@@ -676,10 +678,12 @@ softclock(void *arg)
steps = 0;
}
} else {
+ cc->cc_next = TAILQ_NEXT(c, c_links.tqe);
TAILQ_REMOVE(bucket, c, c_links.tqe);
- c = softclock_call_cc(c, cc, &mpcalls,
+ softclock_call_cc(c, cc, &mpcalls,
&lockcalls, &gcalls);
steps = 0;
+ c = cc->cc_next;
}
}
}
@@ -1024,6 +1028,8 @@ again:
CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p",
c, c->c_func, c->c_arg);
+ if (cc->cc_next == c)
+ cc->cc_next = TAILQ_NEXT(c, c_links.tqe);
TAILQ_REMOVE(&cc->cc_callwheel[c->c_time & callwheelmask], c,
c_links.tqe);
callout_cc_del(c, cc);
More information about the svn-src-stable-9
mailing list