svn commit: r236530 - in projects/calloutng/sys: kern sys
Davide Italiano
davide at FreeBSD.org
Sun Jun 3 22:27:17 UTC 2012
Author: davide
Date: Sun Jun 3 22:27:17 2012
New Revision: 236530
URL: http://svn.freebsd.org/changeset/base/236530
Log:
- Everytime callout_tick() is called it looks for the nearest future
event and communicate it to eventtimers subsystem via
via callout_new_inserted(). So, we don't need anymore to call
callout_tickstofirst() when we're in getnextcpuevent(), and we can get
rid of it.
- Completely refactor the cpu_new_callout() function (many thanks to
Alexander Motin that helped me there with this code).
- Switch back from getbinuptime() to binuptime() to measure present time in
callout_tick().
Pointed out by: mav, bde
Modified:
projects/calloutng/sys/kern/kern_clocksource.c
projects/calloutng/sys/kern/kern_timeout.c
projects/calloutng/sys/sys/callout.h
Modified: projects/calloutng/sys/kern/kern_clocksource.c
==============================================================================
--- projects/calloutng/sys/kern/kern_clocksource.c Sun Jun 3 21:03:16 2012 (r236529)
+++ projects/calloutng/sys/kern/kern_clocksource.c Sun Jun 3 22:27:17 2012 (r236530)
@@ -72,9 +72,7 @@ static int round_freq(struct eventtimer
static void getnextcpuevent(struct bintime *event, int idle);
static void getnextevent(struct bintime *event);
static int handleevents(struct bintime *now, int fake);
-#ifdef SMP
static void cpu_new_callout(int cpu, struct bintime bt);
-#endif
static struct mtx et_hw_mtx;
@@ -274,24 +272,28 @@ handleevents(struct bintime *now, int fa
static void
getnextcpuevent(struct bintime *event, int idle)
{
- struct bintime tmp;
struct pcpu_state *state;
-
+ struct bintime tmp;
+ int hardfreq;
+
state = DPCPU_PTR(timerstate);
- /* Handle hardclock() events. */
+ /* Handle hardclock() events, skipping some is CPU is idle. */
*event = state->nexthard;
+ if (idle || (!activetick && !profiling &&
+ (timer->et_flags & ET_FLAGS_PERCPU) == 0)) {
+ hardfreq = idle ? 4 : (stathz / 2);
+ if (curcpu == CPU_FIRST() && tc_min_ticktock_freq > hardfreq)
+ hardfreq = tc_min_ticktock_freq;
+ if (hz > hardfreq) {
+ tmp = hardperiod;
+ bintime_mul(&tmp, hz / hardfreq - 1);
+ bintime_add(event, &tmp);
+ }
+ }
/* Handle callout events. */
- tmp = callout_tickstofirst();
- if (state->nextcall.sec == -1)
- state->nextcall = tmp;
- if (bintime_cmp(&tmp, &state->nextcall, <) &&
- (tmp.sec != -1)) {
- state->nextcall = tmp;
- }
- if (bintime_cmp(event, &state->nextcall, >) &&
- (state->nextcall.sec != -1)) {
+ if (state->nextcall.sec != -1 &&
+ bintime_cmp(event, &state->nextcall, >))
*event = state->nextcall;
- }
if (!idle) { /* If CPU is active - handle other types of events. */
if (bintime_cmp(event, &state->nextstat, >))
*event = state->nextstat;
@@ -634,9 +636,7 @@ cpu_initclocks_bsp(void)
#endif
state->nextcall.sec = -1;
}
-#ifdef SMP
callout_new_inserted = cpu_new_callout;
-#endif
periodic = want_periodic;
/* Grab requested timer or the best of present. */
if (timername[0])
@@ -864,74 +864,48 @@ clocksource_cyc_set(const struct bintime
}
#endif
-#ifdef SMP
static void
cpu_new_callout(int cpu, struct bintime bt)
{
struct bintime now;
struct pcpu_state *state;
- CTR3(KTR_SPARE2, "new co at %d: on %d in %d",
- curcpu, cpu, ticks);
+ CTR4(KTR_SPARE2, "new co at %d: on %d at %d.%08x%08x",
+ curcpu, cpu, (unsigned int)(bt.frac >> 32),
+ (unsigned int)(bt.frac & 0xffffffff));
state = DPCPU_ID_PTR(cpu, timerstate);
ET_HW_LOCK(state);
- if (state->idle == 0 || busy) {
+
+ /* If there is callout time already set earlier -- do nothing. */
+ if (state->nextcall.sec != -1 &&
+ bintime_cmp(&bt, &state->nextcall, >=)) {
ET_HW_UNLOCK(state);
return;
}
- /*
- * If timer is periodic - just update next event time for target CPU.
- * If timer is global - there is chance it is already programmed.
- */
- if (periodic || (timer->et_flags & ET_FLAGS_PERCPU) == 0) {
- /*
- * Update next callout time. We can do this only if
- * this one on which we're running is the target CPU.
- */
- if (!periodic) {
- if (bintime_cmp(&bt, &state->nextcall, ==)) {
- ET_HW_UNLOCK(state);
- return;
- }
- if (state->nextcall.sec == -1 ||
- bintime_cmp(&bt, &state->nextcall, <))
- state->nextcall = bt;
- if (bintime_cmp(&state->nextcall, &state->nextevent, >=)) {
- ET_HW_UNLOCK(state);
- return;
- }
- state->nextevent = state->nextcall;
- if (cpu == curcpu) {
- loadtimer(&now, 0);
- ET_HW_UNLOCK(state);
- }
- else
- goto out;
- }
- if (bintime_cmp(&state->nexthard, &state->nextevent, <))
- state->nextevent = state->nexthard;
- if (periodic ||
- bintime_cmp(&state->nextevent, &nexttick, >=)) {
- ET_HW_UNLOCK(state);
- return;
- }
+ state->nextcall = bt;
+ /* If there is some some other event set earlier -- do nothing. */
+ if (bintime_cmp(&state->nextcall, &state->nextevent, >=)) {
+ ET_HW_UNLOCK(state);
+ return;
}
-out:
- /*
- * Otherwise we have to wake that CPU up, as we can't get present
- * bintime to reprogram global timer from here. If timer is per-CPU,
- * we by definition can't do it from here.
- */
- ET_HW_UNLOCK(state);
- if (timer->et_flags & ET_FLAGS_PERCPU) {
- state->handle = 1;
- ipi_cpu(cpu, IPI_HARDCLOCK);
- } else {
- if (!cpu_idle_wakeup(cpu))
- ipi_cpu(cpu, IPI_AST);
+ state->nextevent = state->nextcall;
+ /* If timer is periodic -- there is nothing to reprogram. */
+ if (periodic) {
+ ET_HW_UNLOCK(state);
+ return;
}
+ /* If timer is global or of the current CPU -- reprogram it. */
+ if ((timer->et_flags & ET_FLAGS_PERCPU) == 0 || cpu == curcpu) {
+ binuptime(&now);
+ loadtimer(&now, 0);
+ ET_HW_UNLOCK(state);
+ return;
+ }
+ /* Otherwise make other CPU to reprogram it. */
+ state->handle = 1;
+ ET_HW_UNLOCK(state);
+ ipi_cpu(cpu, IPI_HARDCLOCK);
}
-#endif
/*
* Report or change the active event timers hardware.
Modified: projects/calloutng/sys/kern/kern_timeout.c
==============================================================================
--- projects/calloutng/sys/kern/kern_timeout.c Sun Jun 3 21:03:16 2012 (r236529)
+++ projects/calloutng/sys/kern/kern_timeout.c Sun Jun 3 22:27:17 2012 (r236530)
@@ -364,7 +364,9 @@ callout_tick(void)
struct callout_tailq *sc;
struct bintime now;
struct bintime bt;
- int need_softclock, first, last;
+ struct bintime limit;
+ struct bintime next;
+ int cpu, first, flag, future, last, need_softclock;
/*
* Process callouts at a very low cpu priority, so we don't keep the
@@ -373,7 +375,7 @@ callout_tick(void)
need_softclock = 0;
cc = CC_SELF();
mtx_lock_spin_flags(&cc->cc_lock, MTX_QUIET);
- getbinuptime(&now);
+ binuptime(&now);
/*
* getbinuptime() may be inaccurate and return time up to 1/HZ in the past.
* In order to avoid the possible loss of one or more events look back 1/HZ
@@ -395,20 +397,46 @@ callout_tick(void)
first &= callwheelmask;
last &= callwheelmask;
}
+ cpu = curcpu;
+ next.sec = -1;
+ next .frac = -1;
+ limit.sec = 0;
+ limit.frac = 4611686018427250000; /* 1/4 sec */
+ bintime_add(&limit,&now);
+ future = get_bucket(&limit);
+ flag = 0;
for (;;) {
sc = &cc->cc_callwheel[first];
TAILQ_FOREACH(tmp, sc, c_links.tqe) {
- if (bintime_cmp(&tmp->c_time,&now, <=)) {
+ if ((!flag || flag == 1) &&
+ bintime_cmp(&tmp->c_time, &now, <=)) {
TAILQ_INSERT_TAIL(cc->cc_localexp,tmp,c_staiter);
TAILQ_REMOVE(sc, tmp, c_links.tqe);
tmp->c_flags |= CALLOUT_PROCESSED;
need_softclock = 1;
}
+ if ((flag == 1 || flag == 2) &&
+ bintime_cmp(&tmp->c_time, &now, >)) {
+ if (next.sec == -1 ||
+ bintime_cmp(&tmp->c_time, &next, <)) {
+ next = tmp->c_time;
+ cpu = tmp->c_cpu;
+ }
+ }
}
+ if (first == ((last - 1) & callwheelmask))
+ flag = 1;
if (first == last)
+ flag = 2;
+ if (first == future || next.sec != -1)
break;
first = (first + 1) & callwheelmask;
}
+ if (next.sec == -1)
+ next = limit;
+ if (callout_new_inserted != NULL)
+ (*callout_new_inserted)(cpu,
+ next);
cc->cc_softticks = now;
mtx_unlock_spin_flags(&cc->cc_lock, MTX_QUIET);
/*
@@ -420,43 +448,6 @@ callout_tick(void)
}
}
-struct bintime
-callout_tickstofirst(void)
-{
- struct callout_cpu *cc;
- struct callout *c;
- struct callout_tailq *sc;
- struct bintime tmp;
- struct bintime now;
- int bucket;
-
- tmp.sec = 0;
- tmp.frac = 0;
- cc = CC_SELF();
- mtx_lock_spin_flags(&cc->cc_lock, MTX_QUIET);
- binuptime(&now);
- for (bucket = 0; bucket < callwheelsize; ++bucket) {
- sc = &cc->cc_callwheel[bucket];
- TAILQ_FOREACH( c, sc, c_links.tqe ) {
- if (tmp.sec == 0 && tmp.frac == 0)
- tmp = c->c_time;
- if (bintime_cmp(&c->c_time, &now, <))
- tmp = now;
- if (bintime_cmp(&c->c_time, &tmp, <=))
- tmp = c->c_time;
-
- }
- }
- if (tmp.sec == 0 && tmp.frac == 0) {
- cc->cc_firsttick.sec = -1;
- cc->cc_firsttick.frac = -1;
- }
- else
- cc->cc_firsttick = tmp;
- mtx_unlock_spin_flags(&cc->cc_lock, MTX_QUIET);
- return (cc->cc_firsttick);
-}
-
static struct callout_cpu *
callout_lock(struct callout *c)
{
@@ -854,7 +845,7 @@ callout_reset_on(struct callout *c, int
*/
FREQ2BT(hz,&bt);
- binuptime(&now);
+ getbinuptime(&now);
bintime_mul(&bt,to_ticks);
bintime_add(&bt,&now);
/*
Modified: projects/calloutng/sys/sys/callout.h
==============================================================================
--- projects/calloutng/sys/sys/callout.h Sun Jun 3 21:03:16 2012 (r236529)
+++ projects/calloutng/sys/sys/callout.h Sun Jun 3 22:27:17 2012 (r236530)
@@ -80,7 +80,6 @@ int callout_schedule_on(struct callout *
#define callout_stop(c) _callout_stop_safe(c, 0)
int _callout_stop_safe(struct callout *, int);
void callout_tick(void);
-struct bintime callout_tickstofirst(void);
extern void (*callout_new_inserted)(int cpu, struct bintime bt);
#endif
More information about the svn-src-projects
mailing list