svn commit: r244211 - in projects/calloutng/sys: kern sys
Alexander Motin
mav at FreeBSD.org
Fri Dec 14 14:45:20 UTC 2012
Author: mav
Date: Fri Dec 14 14:45:18 2012
New Revision: 244211
URL: http://svnweb.freebsd.org/changeset/base/244211
Log:
Make precision representation single-sided: 0/+prec instead of -prec/+prec.
Double-sided representation appeared to be useful in very small number of
cases, while atempts to adapt single-sided APIs to double-sided backend
caused big mistakes on long periods without giving any benefits.
Depending on CPU activity, new implementation caps mistake by 1ms..0.5s
on long intervals, still staying within specified precision on short ones.
Modified:
projects/calloutng/sys/kern/kern_clocksource.c
projects/calloutng/sys/kern/kern_tc.c
projects/calloutng/sys/kern/kern_time.c
projects/calloutng/sys/kern/kern_timeout.c
projects/calloutng/sys/kern/sys_generic.c
projects/calloutng/sys/sys/time.h
Modified: projects/calloutng/sys/kern/kern_clocksource.c
==============================================================================
--- projects/calloutng/sys/kern/kern_clocksource.c Fri Dec 14 13:01:16 2012 (r244210)
+++ projects/calloutng/sys/kern/kern_clocksource.c Fri Dec 14 14:45:18 2012 (r244211)
@@ -867,9 +867,9 @@ cpu_new_callout(int cpu, struct bintime
struct bintime now;
struct pcpu_state *state;
- CTR5(KTR_SPARE2, "new co at %d: on %d at %d.%08x%08x",
- curcpu, cpu, (int)(bt.sec), (u_int)(bt.frac >> 32),
- (u_int)(bt.frac & 0xffffffff));
+ CTR6(KTR_SPARE2, "new co at %d: on %d at %d.%08x - %d.%08x",
+ curcpu, cpu, (int)(bt_opt.sec), (u_int)(bt_opt.frac >> 32),
+ (int)(bt.sec), (u_int)(bt.frac >> 32));
state = DPCPU_ID_PTR(cpu, timerstate);
ET_HW_LOCK(state);
Modified: projects/calloutng/sys/kern/kern_tc.c
==============================================================================
--- projects/calloutng/sys/kern/kern_tc.c Fri Dec 14 13:01:16 2012 (r244210)
+++ projects/calloutng/sys/kern/kern_tc.c Fri Dec 14 14:45:18 2012 (r244211)
@@ -121,7 +121,6 @@ SYSCTL_INT(_kern_timecounter, OID_AUTO,
×tepwarnings, 0, "Log time steps");
struct bintime bt_timethreshold;
-struct bintime halftick_bt;
struct bintime tick_bt;
int tc_timeexp;
int tc_timepercentage = TC_DEFAULTPERC;
@@ -1775,8 +1774,6 @@ inittimecounter(void *dummy)
tc_adjprecision();
tick_rate = hz / tc_tick;
FREQ2BT(tick_rate, &tick_bt);
- halftick_bt = tick_bt;
- bintime_divpow2(&halftick_bt, 1);
p = (tc_tick * 1000000) / hz;
printf("Timecounters tick every %d.%03u msec\n", p / 1000, p % 1000);
Modified: projects/calloutng/sys/kern/kern_time.c
==============================================================================
--- projects/calloutng/sys/kern/kern_time.c Fri Dec 14 13:01:16 2012 (r244210)
+++ projects/calloutng/sys/kern/kern_time.c Fri Dec 14 14:45:18 2012 (r244211)
@@ -496,7 +496,6 @@ kern_nanosleep(struct thread *td, struct
if (TIMESEL(&bt, &tmp))
bintime_add(&bt, &tick_bt);
bintime_add(&bt, &tmp);
- bintime_add(&bt, &bt_prec);
error = tsleep_bt(&nanowait, PWAIT | PCATCH, "nanslp", &bt, &bt_prec);
TIMESEL(&btt, &tmp);
if (error != EWOULDBLOCK) {
Modified: projects/calloutng/sys/kern/kern_timeout.c
==============================================================================
--- projects/calloutng/sys/kern/kern_timeout.c Fri Dec 14 13:01:16 2012 (r244210)
+++ projects/calloutng/sys/kern/kern_timeout.c Fri Dec 14 14:45:18 2012 (r244211)
@@ -383,7 +383,7 @@ get_bucket(struct bintime *bt)
void
callout_process(struct bintime *now)
{
- struct bintime max, min, next, next_opt, tmp_max, tmp_min;
+ struct bintime first, last, max, tmp_max;
struct callout *tmp;
struct callout_cpu *cc;
struct callout_tailq *sc;
@@ -403,11 +403,12 @@ callout_process(struct bintime *now)
nowb = callout_hash(now);
/* Compute the last bucket and minimum time of the bucket after it. */
- next = next_opt = *now;
- bintime_addx(&next, (uint64_t)3 << (64 - 2)); /* 0.75s */
- next.frac &= (0xffffffffffffffffLLU << (64 - CC_HASH_SHIFT));
- bintime_addx(&next_opt, (uint64_t)3 << (64 - 3)); /* 0.37s */
- lastb = callout_hash(&next) - 1;
+ first = last = *now;
+ bintime_addx(&first, (uint64_t)3 << (64 - 3)); /* 0.37s */
+ bintime_addx(&last, (uint64_t)3 << (64 - 2)); /* 0.75s */
+ last.frac &= (0xffffffffffffffffLLU << (64 - CC_HASH_SHIFT));
+ lastb = callout_hash(&last) - 1;
+ max = last;
/*
* Check if we wrapped around the entire wheel from the last scan.
@@ -422,9 +423,6 @@ callout_process(struct bintime *now)
firstb &= callwheelmask;
/* Iterate callwheel from firstb to nowb and then up to lastb. */
- min.sec = TIME_T_MAX;
- min.frac = UINT64_MAX;
- max = next;
exit_allowed = 0;
for (;;) {
exit_wanted = 0;
@@ -432,13 +430,10 @@ callout_process(struct bintime *now)
tmp = TAILQ_FIRST(sc);
while (tmp != NULL) {
/* Compute allowed time range for the event */
- tmp_max = tmp_min = tmp->c_time;
- if (bintime_isset(&tmp->c_precision)) {
- bintime_add(&tmp_max, &tmp->c_precision);
- bintime_sub(&tmp_min, &tmp->c_precision);
- }
+ tmp_max = tmp->c_time;
+ bintime_add(&tmp_max, &tmp->c_precision);
/* Run the callout if present time within allowed. */
- if (bintime_cmp(&tmp_min, now, <=)) {
+ if (bintime_cmp(&tmp->c_time, now, <=)) {
/*
* Consumer told us the callout may be run
* directly from hardware interrupt context.
@@ -463,32 +458,21 @@ callout_process(struct bintime *now)
continue;
}
/* Skip events from distant future. */
- if (bintime_cmp(&tmp_min, &next, >=))
+ if (bintime_cmp(&tmp->c_time, &max, >=))
goto next;
/*
- * This is the fist event we're going to process or
- * event maximal time is less than present minimal.
- * In both cases, take it.
- */
- if (bintime_cmp(&tmp_max, &min, <)) {
- max = tmp_max;
- min = tmp_min;
- goto next;
- }
- /*
* Event minimal time is bigger than present maximal
* time, so it cannot be aggregated.
*/
- if (bintime_cmp(&tmp_min, &max, >)) {
+ if (bintime_cmp(&tmp->c_time, &last, >)) {
exit_wanted = 1;
goto next;
}
- /*
- * If neither of the two previous happened, just take
- * the intersection of events.
- */
- min = (bintime_cmp(&tmp_min, &min, >)) ? tmp_min : min;
- max = (bintime_cmp(&tmp_max, &max, <)) ? tmp_max : max;
+ /* Update first and last time, respecting this event. */
+ if (bintime_cmp(&tmp->c_time, &first, <))
+ first = tmp->c_time;
+ if (bintime_cmp(&tmp_max, &last, <))
+ last = tmp_max;
next:
tmp = TAILQ_NEXT(tmp, c_links.tqe);
}
@@ -507,25 +491,9 @@ next:
firstb = (firstb + 1) & callwheelmask;
}
cc->cc_exec_next_dir = NULL;
- if (min.sec != TIME_T_MAX) {
- /*
- * Now that we found something to aggregate, schedule an
- * interrupt in the middle of the previously calculated range.
- */
- if (bintime_cmp(&max, &min, !=)) {
- bintime_add(&max, &min);
- next = max;
- next.frac >>= 1;
- if (next.sec & 1)
- next.frac |= ((uint64_t)1 << 63);
- next.sec >>= 1;
- } else
- next = max;
- next_opt = min;
- }
if (callout_new_inserted != NULL)
- (*callout_new_inserted)(cpu, next, next_opt);
- cc->cc_firstevent = next;
+ (*callout_new_inserted)(cpu, last, first);
+ cc->cc_firstevent = last;
cc->cc_lastscan = *now;
#ifdef CALLOUT_PROFILING
avg_depth_dir += (depth_dir * 1000 - avg_depth_dir) >> 8;
@@ -570,7 +538,7 @@ callout_cc_add(struct callout *c, struct
struct bintime to_bintime, struct bintime precision, void (*func)(void *),
void *arg, int cpu, int flags)
{
- struct bintime bt;
+ struct bintime last;
int bucket;
CC_LOCK_ASSERT(cc);
@@ -593,15 +561,13 @@ callout_cc_add(struct callout *c, struct
* Inform the eventtimers(4) subsystem there's a new callout
* that has been inserted, but only if really required.
*/
- bt = c->c_time;
- bintime_add(&bt, &c->c_precision);
+ last = c->c_time;
+ bintime_add(&last, &c->c_precision);
if (callout_new_inserted != NULL &&
- (bintime_cmp(&bt, &cc->cc_firstevent, <) ||
+ (bintime_cmp(&last, &cc->cc_firstevent, <) ||
!bintime_isset(&cc->cc_firstevent))) {
- cc->cc_firstevent = c->c_time;
- bt = c->c_time;
- bintime_sub(&bt, &c->c_precision);
- (*callout_new_inserted)(cpu, c->c_time, bt);
+ cc->cc_firstevent = last;
+ (*callout_new_inserted)(cpu, last, c->c_time);
}
}
@@ -939,11 +905,11 @@ _callout_reset_on(struct callout *c, str
bintime_mul(&to_bt, to_ticks);
bintime_add(&to_bt, &now);
if (C_PRELGET(flags) < 0) {
- pr = halftick_bt;
+ pr = tick_bt;
} else {
to_ticks >>= C_PRELGET(flags);
if (to_ticks == 0)
- pr = halftick_bt;
+ pr = tick_bt;
else
bintime_mul(&pr, to_ticks);
}
Modified: projects/calloutng/sys/kern/sys_generic.c
==============================================================================
--- projects/calloutng/sys/kern/sys_generic.c Fri Dec 14 13:01:16 2012 (r244210)
+++ projects/calloutng/sys/kern/sys_generic.c Fri Dec 14 14:45:18 2012 (r244211)
@@ -1010,7 +1010,6 @@ kern_select(struct thread *td, int nd, f
if (TIMESEL(&rbt, &abt))
bintime_add(&abt, &tick_bt);
bintime_add(&abt, &rbt);
- bintime_add(&abt, &precision);
} else {
abt.sec = 0;
abt.frac = 0;
@@ -1293,7 +1292,6 @@ sys_poll(td, uap)
if (TIMESEL(&rbt, &abt))
bintime_add(&abt, &tick_bt);
bintime_add(&abt, &rbt);
- bintime_add(&abt, &precision);
} else {
abt.sec = 0;
abt.frac = 0;
Modified: projects/calloutng/sys/sys/time.h
==============================================================================
--- projects/calloutng/sys/sys/time.h Fri Dec 14 13:01:16 2012 (r244210)
+++ projects/calloutng/sys/sys/time.h Fri Dec 14 14:45:18 2012 (r244211)
@@ -299,7 +299,6 @@ void resettodr(void);
extern time_t time_second;
extern time_t time_uptime;
extern struct bintime boottimebin;
-extern struct bintime halftick_bt;
extern struct bintime tick_bt;
extern struct timeval boottime;
extern int tc_timeexp;
More information about the svn-src-projects
mailing list