svn commit: r236894 - projects/calloutng/sys/kern
Davide Italiano
davide at FreeBSD.org
Mon Jun 11 17:17:29 UTC 2012
Author: davide
Date: Mon Jun 11 17:17:28 2012
New Revision: 236894
URL: http://svn.freebsd.org/changeset/base/236894
Log:
CTR4 has four arguments but we need five, change it to CTR5.
Modify some printed value to obtain more significant kernel traces.
While here, Bruce Evans told me that "unsigned int" is spelled "u_int" in
KNF, so replace it where needed.
Discussed with: mav
Modified:
projects/calloutng/sys/kern/kern_clocksource.c
projects/calloutng/sys/kern/kern_timeout.c
Modified: projects/calloutng/sys/kern/kern_clocksource.c
==============================================================================
--- projects/calloutng/sys/kern/kern_clocksource.c Mon Jun 11 16:35:47 2012 (r236893)
+++ projects/calloutng/sys/kern/kern_clocksource.c Mon Jun 11 17:17:28 2012 (r236894)
@@ -168,8 +168,8 @@ hardclockintr(void)
state = DPCPU_PTR(timerstate);
now = state->now;
CTR4(KTR_SPARE2, "ipi at %d: now %d.%08x%08x",
- curcpu, now.sec, (unsigned int)(now.frac >> 32),
- (unsigned int)(now.frac & 0xffffffff));
+ curcpu, now.sec, (u_int)(now.frac >> 32),
+ (u_int)(now.frac & 0xffffffff));
done = handleevents(&now, 0);
return (done ? FILTER_HANDLED : FILTER_STRAY);
}
@@ -188,8 +188,8 @@ handleevents(struct bintime *now, int fa
int done, runs;
CTR4(KTR_SPARE2, "handle at %d: now %d.%08x%08x",
- curcpu, now->sec, (unsigned int)(now->frac >> 32),
- (unsigned int)(now->frac & 0xffffffff));
+ curcpu, now->sec, (u_int)(now->frac >> 32),
+ (u_int)(now->frac & 0xffffffff));
done = 0;
if (fake) {
frame = NULL;
@@ -339,8 +339,8 @@ getnextevent(struct bintime *event)
*event = nexthard;
}
CTR5(KTR_SPARE2, "next at %d: next %d.%08x%08x by %d",
- curcpu, event->sec, (unsigned int)(event->frac >> 32),
- (unsigned int)(event->frac & 0xffffffff), c);
+ curcpu, event->sec, (u_int)(event->frac >> 32),
+ (u_int)(event->frac & 0xffffffff), c);
}
/* Hardware timer callback function. */
@@ -372,8 +372,8 @@ timercb(struct eventtimer *et, void *arg
}
state->now = now;
CTR4(KTR_SPARE2, "intr at %d: now %d.%08x%08x",
- curcpu, now.sec, (unsigned int)(now.frac >> 32),
- (unsigned int)(now.frac & 0xffffffff));
+ curcpu, (int)(now.sec), (u_int)(now.frac >> 32),
+ (u_int)(now.frac & 0xffffffff));
#ifdef SMP
/* Prepare broadcasting to other CPUs for non-per-CPU timers. */
@@ -444,8 +444,8 @@ loadtimer(struct bintime *now, int start
if (new.frac < tmp) /* Left less then passed. */
bintime_add(&new, &timerperiod);
CTR5(KTR_SPARE2, "load p at %d: now %d.%08x first in %d.%08x",
- curcpu, now->sec, (unsigned int)(now->frac >> 32),
- new.sec, (unsigned int)(new.frac >> 32));
+ curcpu, now->sec, (u_int)(now->frac >> 32),
+ new.sec, (u_int)(new.frac >> 32));
*next = new;
bintime_add(next, now);
et_start(timer, &new, &timerperiod);
@@ -454,8 +454,8 @@ loadtimer(struct bintime *now, int start
getnextevent(&new);
eq = bintime_cmp(&new, next, ==);
CTR5(KTR_SPARE2, "load at %d: next %d.%08x%08x eq %d",
- curcpu, new.sec, (unsigned int)(new.frac >> 32),
- (unsigned int)(new.frac & 0xffffffff),
+ curcpu, new.sec, (u_int)(new.frac >> 32),
+ (u_int)(new.frac & 0xffffffff),
eq);
if (!eq) {
*next = new;
@@ -788,8 +788,8 @@ cpu_idleclock(void)
else
binuptime(&now);
CTR4(KTR_SPARE2, "idle at %d: now %d.%08x%08x",
- curcpu, now.sec, (unsigned int)(now.frac >> 32),
- (unsigned int)(now.frac & 0xffffffff));
+ curcpu, now.sec, (u_int)(now.frac >> 32),
+ (u_int)(now.frac & 0xffffffff));
getnextcpuevent(&t, 1);
ET_HW_LOCK(state);
state->idle = 1;
@@ -817,8 +817,8 @@ cpu_activeclock(void)
else
binuptime(&now);
CTR4(KTR_SPARE2, "active at %d: now %d.%08x%08x",
- curcpu, now.sec, (unsigned int)(now.frac >> 32),
- (unsigned int)(now.frac & 0xffffffff));
+ curcpu, now.sec, (u_int)(now.frac >> 32),
+ (u_int)(now.frac & 0xffffffff));
spinlock_enter();
td = curthread;
td->td_intr_nesting_level++;
@@ -841,11 +841,11 @@ clocksource_cyc_set(const struct bintime
binuptime(&now);
CTR4(KTR_SPARE2, "set_cyc at %d: now %d.%08x%08x",
- curcpu, now.sec, (unsigned int)(now.frac >> 32),
- (unsigned int)(now.frac & 0xffffffff));
+ curcpu, now.sec, (u_int)(now.frac >> 32),
+ (u_int)(now.frac & 0xffffffff));
CTR4(KTR_SPARE2, "set_cyc at %d: t %d.%08x%08x",
- curcpu, t->sec, (unsigned int)(t->frac >> 32),
- (unsigned int)(t->frac & 0xffffffff));
+ curcpu, t->sec, (u_int)(t->frac >> 32),
+ (u_int)(t->frac & 0xffffffff));
ET_HW_LOCK(state);
if (bintime_cmp(t, &state->nextcyc, ==)) {
@@ -870,9 +870,9 @@ cpu_new_callout(int cpu, struct bintime
struct bintime now;
struct pcpu_state *state;
- CTR4(KTR_SPARE2, "new co at %d: on %d at %d.%08x%08x",
- curcpu, cpu, (unsigned int)(bt.frac >> 32),
- (unsigned int)(bt.frac & 0xffffffff));
+ CTR5(KTR_SPARE2, "new co at %d: on %d at %d.%08x%08x",
+ curcpu, cpu, (int)(bt.sec), (u_int)(bt.frac >> 32),
+ (u_int)(bt.frac & 0xffffffff));
state = DPCPU_ID_PTR(cpu, timerstate);
ET_HW_LOCK(state);
Modified: projects/calloutng/sys/kern/kern_timeout.c
==============================================================================
--- projects/calloutng/sys/kern/kern_timeout.c Mon Jun 11 16:35:47 2012 (r236893)
+++ projects/calloutng/sys/kern/kern_timeout.c Mon Jun 11 17:17:28 2012 (r236894)
@@ -896,8 +896,9 @@ callout_reset_bt_on(struct callout *c, s
cc->cc_migration_arg = arg;
c->c_flags |= CALLOUT_DFRMIGRATION;
CTR6(KTR_CALLOUT,
- "migration of %p func %p arg %p in %ld %ld to %u deferred",
- c, c->c_func, c->c_arg, bt.sec, bt.frac, cpu);
+ "migration of %p func %p arg %p in %d.%08x to %u deferred",
+ c, c->c_func, c->c_arg, (int)(bt.sec),
+ (u_int)(bt.frac >> 32), cpu);
CC_UNLOCK(cc);
return (cancelled);
}
@@ -906,8 +907,9 @@ callout_reset_bt_on(struct callout *c, s
#endif
callout_cc_add(c, cc, bt, ftn, arg, cpu, direct);
- CTR6(KTR_CALLOUT, "%sscheduled %p func %p arg %p in %ld %ld",
- cancelled ? "re" : "", c, c->c_func, c->c_arg, bt.sec, bt.frac);
+ CTR6(KTR_CALLOUT, "%sscheduled %p func %p arg %p in %d.%08x",
+ cancelled ? "re" : "", c, c->c_func, c->c_arg, (int)(bt.sec),
+ (u_int)(bt.frac >> 32));
CC_UNLOCK(cc);
return (cancelled);
More information about the svn-src-projects
mailing list