svn commit: r304843 - in stable/11/sys: compat/linprocfs fs/devfs fs/fdescfs fs/nfs fs/procfs kern net net/altq netpfil/ipfw nfs rpc/rpcsec_gss sys
Konstantin Belousov
kib at FreeBSD.org
Fri Aug 26 10:04:13 UTC 2016
Author: kib
Date: Fri Aug 26 10:04:10 2016
New Revision: 304843
URL: https://svnweb.freebsd.org/changeset/base/304843
Log:
MFC r303382:
Provide the getboottime(9) and getboottimebin(9) KPI.
MFC r303387:
Prevent parallel tc_windup() calls. Keep boottime in timehands,
and adjust it from tc_windup().
MFC notes:
The boottime and boottimebin globals are still exported from
the kernel dyn symbol table in stable/11, but their declarations are
removed from sys/time.h. This preserves KBI but not KPI, while all
in-tree consumers are converted to getboottime().
The variables are updated after tc_setclock_mtx is dropped, which gives
approximately same unlocked bugs as before.
The boottime and boottimebin locals in several sys/kern_tc.c functions
were renamed by adding the '_x' suffix to avoid name conficts.
Modified:
stable/11/sys/compat/linprocfs/linprocfs.c
stable/11/sys/fs/devfs/devfs_vnops.c
stable/11/sys/fs/fdescfs/fdesc_vnops.c
stable/11/sys/fs/nfs/nfsport.h
stable/11/sys/fs/procfs/procfs_status.c
stable/11/sys/kern/kern_acct.c
stable/11/sys/kern/kern_clock.c
stable/11/sys/kern/kern_proc.c
stable/11/sys/kern/kern_tc.c
stable/11/sys/kern/sys_procdesc.c
stable/11/sys/net/altq/altq_subr.c
stable/11/sys/net/bpf.c
stable/11/sys/netpfil/ipfw/ip_fw_sockopt.c
stable/11/sys/nfs/nfs_lock.c
stable/11/sys/rpc/rpcsec_gss/svc_rpcsec_gss.c
stable/11/sys/sys/time.h
Directory Properties:
stable/11/ (props changed)
Modified: stable/11/sys/compat/linprocfs/linprocfs.c
==============================================================================
--- stable/11/sys/compat/linprocfs/linprocfs.c Fri Aug 26 09:42:51 2016 (r304842)
+++ stable/11/sys/compat/linprocfs/linprocfs.c Fri Aug 26 10:04:10 2016 (r304843)
@@ -447,9 +447,11 @@ linprocfs_dostat(PFS_FILL_ARGS)
struct pcpu *pcpu;
long cp_time[CPUSTATES];
long *cp;
+ struct timeval boottime;
int i;
read_cpu_time(cp_time);
+ getboottime(&boottime);
sbuf_printf(sb, "cpu %ld %ld %ld %ld\n",
T2J(cp_time[CP_USER]),
T2J(cp_time[CP_NICE]),
@@ -624,10 +626,12 @@ static int
linprocfs_doprocstat(PFS_FILL_ARGS)
{
struct kinfo_proc kp;
+ struct timeval boottime;
char state;
static int ratelimit = 0;
vm_offset_t startcode, startdata;
+ getboottime(&boottime);
sx_slock(&proctree_lock);
PROC_LOCK(p);
fill_kinfo_proc(p, &kp);
Modified: stable/11/sys/fs/devfs/devfs_vnops.c
==============================================================================
--- stable/11/sys/fs/devfs/devfs_vnops.c Fri Aug 26 09:42:51 2016 (r304842)
+++ stable/11/sys/fs/devfs/devfs_vnops.c Fri Aug 26 10:04:10 2016 (r304843)
@@ -707,10 +707,11 @@ devfs_getattr(struct vop_getattr_args *a
{
struct vnode *vp = ap->a_vp;
struct vattr *vap = ap->a_vap;
- int error;
struct devfs_dirent *de;
struct devfs_mount *dmp;
struct cdev *dev;
+ struct timeval boottime;
+ int error;
error = devfs_populate_vp(vp);
if (error != 0)
@@ -740,6 +741,7 @@ devfs_getattr(struct vop_getattr_args *a
vap->va_blocksize = DEV_BSIZE;
vap->va_type = vp->v_type;
+ getboottime(&boottime);
#define fix(aa) \
do { \
if ((aa).tv_sec <= 3600) { \
Modified: stable/11/sys/fs/fdescfs/fdesc_vnops.c
==============================================================================
--- stable/11/sys/fs/fdescfs/fdesc_vnops.c Fri Aug 26 09:42:51 2016 (r304842)
+++ stable/11/sys/fs/fdescfs/fdesc_vnops.c Fri Aug 26 10:04:10 2016 (r304843)
@@ -394,7 +394,9 @@ fdesc_getattr(struct vop_getattr_args *a
{
struct vnode *vp = ap->a_vp;
struct vattr *vap = ap->a_vap;
+ struct timeval boottime;
+ getboottime(&boottime);
vap->va_mode = S_IRUSR|S_IXUSR|S_IRGRP|S_IXGRP|S_IROTH|S_IXOTH;
vap->va_fileid = VTOFDESC(vp)->fd_ix;
vap->va_uid = 0;
Modified: stable/11/sys/fs/nfs/nfsport.h
==============================================================================
--- stable/11/sys/fs/nfs/nfsport.h Fri Aug 26 09:42:51 2016 (r304842)
+++ stable/11/sys/fs/nfs/nfsport.h Fri Aug 26 10:04:10 2016 (r304843)
@@ -872,7 +872,7 @@ int newnfs_realign(struct mbuf **, int);
/*
* Set boottime.
*/
-#define NFSSETBOOTTIME(b) ((b) = boottime)
+#define NFSSETBOOTTIME(b) (getboottime(&b))
/*
* The size of directory blocks in the buffer cache.
Modified: stable/11/sys/fs/procfs/procfs_status.c
==============================================================================
--- stable/11/sys/fs/procfs/procfs_status.c Fri Aug 26 09:42:51 2016 (r304842)
+++ stable/11/sys/fs/procfs/procfs_status.c Fri Aug 26 10:04:10 2016 (r304843)
@@ -70,6 +70,7 @@ procfs_doprocstatus(PFS_FILL_ARGS)
const char *wmesg;
char *pc;
char *sep;
+ struct timeval boottime;
int pid, ppid, pgid, sid;
int i;
@@ -129,6 +130,7 @@ procfs_doprocstatus(PFS_FILL_ARGS)
calcru(p, &ut, &st);
PROC_STATUNLOCK(p);
start = p->p_stats->p_start;
+ getboottime(&boottime);
timevaladd(&start, &boottime);
sbuf_printf(sb, " %jd,%ld %jd,%ld %jd,%ld",
(intmax_t)start.tv_sec, start.tv_usec,
Modified: stable/11/sys/kern/kern_acct.c
==============================================================================
--- stable/11/sys/kern/kern_acct.c Fri Aug 26 09:42:51 2016 (r304842)
+++ stable/11/sys/kern/kern_acct.c Fri Aug 26 10:04:10 2016 (r304843)
@@ -389,7 +389,7 @@ acct_process(struct thread *td)
acct.ac_stime = encode_timeval(st);
/* (4) The elapsed time the command ran (and its starting time) */
- tmp = boottime;
+ getboottime(&tmp);
timevaladd(&tmp, &p->p_stats->p_start);
acct.ac_btime = tmp.tv_sec;
microuptime(&tmp);
Modified: stable/11/sys/kern/kern_clock.c
==============================================================================
--- stable/11/sys/kern/kern_clock.c Fri Aug 26 09:42:51 2016 (r304842)
+++ stable/11/sys/kern/kern_clock.c Fri Aug 26 10:04:10 2016 (r304843)
@@ -381,7 +381,9 @@ volatile int ticks;
int psratio;
static DPCPU_DEFINE(int, pcputicks); /* Per-CPU version of ticks. */
-static int global_hardclock_run = 0;
+#ifdef DEVICE_POLLING
+static int devpoll_run = 0;
+#endif
/*
* Initialize clock frequencies and start both clocks running.
@@ -584,15 +586,15 @@ hardclock_cnt(int cnt, int usermode)
#endif
/* We are in charge to handle this tick duty. */
if (newticks > 0) {
- /* Dangerous and no need to call these things concurrently. */
- if (atomic_cmpset_acq_int(&global_hardclock_run, 0, 1)) {
- tc_ticktock(newticks);
+ tc_ticktock(newticks);
#ifdef DEVICE_POLLING
+ /* Dangerous and no need to call these things concurrently. */
+ if (atomic_cmpset_acq_int(&devpoll_run, 0, 1)) {
/* This is very short and quick. */
hardclock_device_poll();
-#endif /* DEVICE_POLLING */
- atomic_store_rel_int(&global_hardclock_run, 0);
+ atomic_store_rel_int(&devpoll_run, 0);
}
+#endif /* DEVICE_POLLING */
#ifdef SW_WATCHDOG
if (watchdog_enabled > 0) {
i = atomic_fetchadd_int(&watchdog_ticks, -newticks);
Modified: stable/11/sys/kern/kern_proc.c
==============================================================================
--- stable/11/sys/kern/kern_proc.c Fri Aug 26 09:42:51 2016 (r304842)
+++ stable/11/sys/kern/kern_proc.c Fri Aug 26 10:04:10 2016 (r304843)
@@ -872,6 +872,7 @@ fill_kinfo_proc_only(struct proc *p, str
struct session *sp;
struct ucred *cred;
struct sigacts *ps;
+ struct timeval boottime;
/* For proc_realparent. */
sx_assert(&proctree_lock, SX_LOCKED);
@@ -953,6 +954,7 @@ fill_kinfo_proc_only(struct proc *p, str
kp->ki_nice = p->p_nice;
kp->ki_fibnum = p->p_fibnum;
kp->ki_start = p->p_stats->p_start;
+ getboottime(&boottime);
timevaladd(&kp->ki_start, &boottime);
PROC_STATLOCK(p);
rufetch(p, &kp->ki_rusage);
Modified: stable/11/sys/kern/kern_tc.c
==============================================================================
--- stable/11/sys/kern/kern_tc.c Fri Aug 26 09:42:51 2016 (r304842)
+++ stable/11/sys/kern/kern_tc.c Fri Aug 26 10:04:10 2016 (r304843)
@@ -70,6 +70,7 @@ struct timehands {
struct bintime th_offset;
struct timeval th_microtime;
struct timespec th_nanotime;
+ struct bintime th_boottime;
/* Fields not to be copied in tc_windup start with th_generation. */
u_int th_generation;
struct timehands *th_next;
@@ -125,7 +126,7 @@ SYSCTL_PROC(_kern_timecounter, OID_AUTO,
static int tc_chosen; /* Non-zero if a specific tc was chosen via sysctl. */
-static void tc_windup(void);
+static void tc_windup(struct bintime *new_boottimebin);
static void cpu_tick_calibrate(int);
void dtrace_getnanotime(struct timespec *tsp);
@@ -133,18 +134,22 @@ void dtrace_getnanotime(struct timespec
static int
sysctl_kern_boottime(SYSCTL_HANDLER_ARGS)
{
+ struct timeval boottime_x;
+
+ getboottime(&boottime_x);
+
#ifndef __mips__
#ifdef SCTL_MASK32
int tv[2];
if (req->flags & SCTL_MASK32) {
- tv[0] = boottime.tv_sec;
- tv[1] = boottime.tv_usec;
- return SYSCTL_OUT(req, tv, sizeof(tv));
- } else
+ tv[0] = boottime_x.tv_sec;
+ tv[1] = boottime_x.tv_usec;
+ return (SYSCTL_OUT(req, tv, sizeof(tv)));
+ }
#endif
#endif
- return SYSCTL_OUT(req, &boottime, sizeof(boottime));
+ return (SYSCTL_OUT(req, &boottime_x, sizeof(boottime_x)));
}
static int
@@ -224,9 +229,17 @@ fbclock_microuptime(struct timeval *tvp)
void
fbclock_bintime(struct bintime *bt)
{
+ struct timehands *th;
+ unsigned int gen;
- fbclock_binuptime(bt);
- bintime_add(bt, &boottimebin);
+ do {
+ th = timehands;
+ gen = atomic_load_acq_int(&th->th_generation);
+ *bt = th->th_offset;
+ bintime_addx(bt, th->th_scale * tc_delta(th));
+ bintime_add(bt, &th->th_boottime);
+ atomic_thread_fence_acq();
+ } while (gen == 0 || gen != th->th_generation);
}
void
@@ -299,9 +312,9 @@ fbclock_getbintime(struct bintime *bt)
th = timehands;
gen = atomic_load_acq_int(&th->th_generation);
*bt = th->th_offset;
+ bintime_add(bt, &th->th_boottime);
atomic_thread_fence_acq();
} while (gen == 0 || gen != th->th_generation);
- bintime_add(bt, &boottimebin);
}
void
@@ -368,9 +381,17 @@ microuptime(struct timeval *tvp)
void
bintime(struct bintime *bt)
{
+ struct timehands *th;
+ u_int gen;
- binuptime(bt);
- bintime_add(bt, &boottimebin);
+ do {
+ th = timehands;
+ gen = atomic_load_acq_int(&th->th_generation);
+ *bt = th->th_offset;
+ bintime_addx(bt, th->th_scale * tc_delta(th));
+ bintime_add(bt, &th->th_boottime);
+ atomic_thread_fence_acq();
+ } while (gen == 0 || gen != th->th_generation);
}
void
@@ -443,9 +464,9 @@ getbintime(struct bintime *bt)
th = timehands;
gen = atomic_load_acq_int(&th->th_generation);
*bt = th->th_offset;
+ bintime_add(bt, &th->th_boottime);
atomic_thread_fence_acq();
} while (gen == 0 || gen != th->th_generation);
- bintime_add(bt, &boottimebin);
}
void
@@ -477,6 +498,29 @@ getmicrotime(struct timeval *tvp)
}
#endif /* FFCLOCK */
+void
+getboottime(struct timeval *boottime_x)
+{
+ struct bintime boottimebin_x;
+
+ getboottimebin(&boottimebin_x);
+ bintime2timeval(&boottimebin_x, boottime_x);
+}
+
+void
+getboottimebin(struct bintime *boottimebin_x)
+{
+ struct timehands *th;
+ u_int gen;
+
+ do {
+ th = timehands;
+ gen = atomic_load_acq_int(&th->th_generation);
+ *boottimebin_x = th->th_boottime;
+ atomic_thread_fence_acq();
+ } while (gen == 0 || gen != th->th_generation);
+}
+
#ifdef FFCLOCK
/*
* Support for feed-forward synchronization algorithms. This is heavily inspired
@@ -1093,6 +1137,7 @@ int
sysclock_snap2bintime(struct sysclock_snap *cs, struct bintime *bt,
int whichclock, uint32_t flags)
{
+ struct bintime boottimebin_x;
#ifdef FFCLOCK
struct bintime bt2;
uint64_t period;
@@ -1106,8 +1151,10 @@ sysclock_snap2bintime(struct sysclock_sn
if (cs->delta > 0)
bintime_addx(bt, cs->fb_info.th_scale * cs->delta);
- if ((flags & FBCLOCK_UPTIME) == 0)
- bintime_add(bt, &boottimebin);
+ if ((flags & FBCLOCK_UPTIME) == 0) {
+ getboottimebin(&boottimebin_x);
+ bintime_add(bt, &boottimebin_x);
+ }
break;
#ifdef FFCLOCK
case SYSCLOCK_FFWD:
@@ -1216,10 +1263,12 @@ tc_getfrequency(void)
return (timehands->th_counter->tc_frequency);
}
+static struct mtx tc_setclock_mtx;
+MTX_SYSINIT(tc_setclock_init, &tc_setclock_mtx, "tcsetc", MTX_SPIN);
+
/*
* Step our concept of UTC. This is done by modifying our estimate of
* when we booted.
- * XXX: not locked.
*/
void
tc_setclock(struct timespec *ts)
@@ -1227,26 +1276,26 @@ tc_setclock(struct timespec *ts)
struct timespec tbef, taft;
struct bintime bt, bt2;
- cpu_tick_calibrate(1);
- nanotime(&tbef);
timespec2bintime(ts, &bt);
+ nanotime(&tbef);
+ mtx_lock_spin(&tc_setclock_mtx);
+ cpu_tick_calibrate(1);
binuptime(&bt2);
bintime_sub(&bt, &bt2);
- bintime_add(&bt2, &boottimebin);
- boottimebin = bt;
- bintime2timeval(&bt, &boottime);
/* XXX fiddle all the little crinkly bits around the fiords... */
- tc_windup();
- nanotime(&taft);
+ tc_windup(&bt);
+ mtx_unlock_spin(&tc_setclock_mtx);
+ getboottimebin(&boottimebin);
+ bintime2timeval(&boottimebin, &boottime);
if (timestepwarnings) {
+ nanotime(&taft);
log(LOG_INFO,
"Time stepped from %jd.%09ld to %jd.%09ld (%jd.%09ld)\n",
(intmax_t)tbef.tv_sec, tbef.tv_nsec,
(intmax_t)taft.tv_sec, taft.tv_nsec,
(intmax_t)ts->tv_sec, ts->tv_nsec);
}
- cpu_tick_calibrate(1);
}
/*
@@ -1255,7 +1304,7 @@ tc_setclock(struct timespec *ts)
* timecounter and/or do seconds processing in NTP. Slightly magic.
*/
static void
-tc_windup(void)
+tc_windup(struct bintime *new_boottimebin)
{
struct bintime bt;
struct timehands *th, *tho;
@@ -1279,6 +1328,8 @@ tc_windup(void)
th->th_generation = 0;
atomic_thread_fence_rel();
bcopy(tho, th, offsetof(struct timehands, th_generation));
+ if (new_boottimebin != NULL)
+ th->th_boottime = *new_boottimebin;
/*
* Capture a timecounter delta on the current timecounter and if
@@ -1328,7 +1379,7 @@ tc_windup(void)
* case we missed a leap second.
*/
bt = th->th_offset;
- bintime_add(&bt, &boottimebin);
+ bintime_add(&bt, &th->th_boottime);
i = bt.sec - tho->th_microtime.tv_sec;
if (i > LARGE_STEP)
i = 2;
@@ -1336,7 +1387,7 @@ tc_windup(void)
t = bt.sec;
ntp_update_second(&th->th_adjustment, &bt.sec);
if (bt.sec != t)
- boottimebin.sec += bt.sec - t;
+ th->th_boottime.sec += bt.sec - t;
}
/* Update the UTC timestamps used by the get*() functions. */
/* XXX shouldn't do this here. Should force non-`get' versions. */
@@ -1759,7 +1810,7 @@ pps_event(struct pps_state *pps, int eve
tcount &= pps->capth->th_counter->tc_counter_mask;
bt = pps->capth->th_offset;
bintime_addx(&bt, pps->capth->th_scale * tcount);
- bintime_add(&bt, &boottimebin);
+ bintime_add(&bt, &pps->capth->th_boottime);
bintime2timespec(&bt, &ts);
/* If the timecounter was wound up underneath us, bail out. */
@@ -1832,11 +1883,14 @@ tc_ticktock(int cnt)
{
static int count;
- count += cnt;
- if (count < tc_tick)
- return;
- count = 0;
- tc_windup();
+ if (mtx_trylock_spin(&tc_setclock_mtx)) {
+ count += cnt;
+ if (count >= tc_tick) {
+ count = 0;
+ tc_windup(NULL);
+ }
+ mtx_unlock_spin(&tc_setclock_mtx);
+ }
}
static void __inline
@@ -1911,7 +1965,9 @@ inittimecounter(void *dummy)
/* warm up new timecounter (again) and get rolling. */
(void)timecounter->tc_get_timecount(timecounter);
(void)timecounter->tc_get_timecount(timecounter);
- tc_windup();
+ mtx_lock_spin(&tc_setclock_mtx);
+ tc_windup(NULL);
+ mtx_unlock_spin(&tc_setclock_mtx);
}
SYSINIT(timecounter, SI_SUB_CLOCKS, SI_ORDER_SECOND, inittimecounter, NULL);
@@ -2085,7 +2141,7 @@ tc_fill_vdso_timehands(struct vdso_timeh
vdso_th->th_offset_count = th->th_offset_count;
vdso_th->th_counter_mask = th->th_counter->tc_counter_mask;
vdso_th->th_offset = th->th_offset;
- vdso_th->th_boottime = boottimebin;
+ vdso_th->th_boottime = th->th_boottime;
enabled = cpu_fill_vdso_timehands(vdso_th, th->th_counter);
if (!vdso_th_enable)
enabled = 0;
@@ -2106,8 +2162,8 @@ tc_fill_vdso_timehands32(struct vdso_tim
vdso_th32->th_counter_mask = th->th_counter->tc_counter_mask;
vdso_th32->th_offset.sec = th->th_offset.sec;
*(uint64_t *)&vdso_th32->th_offset.frac[0] = th->th_offset.frac;
- vdso_th32->th_boottime.sec = boottimebin.sec;
- *(uint64_t *)&vdso_th32->th_boottime.frac[0] = boottimebin.frac;
+ vdso_th32->th_boottime.sec = th->th_boottime.sec;
+ *(uint64_t *)&vdso_th32->th_boottime.frac[0] = th->th_boottime.frac;
enabled = cpu_fill_vdso_timehands32(vdso_th32, th->th_counter);
if (!vdso_th_enable)
enabled = 0;
Modified: stable/11/sys/kern/sys_procdesc.c
==============================================================================
--- stable/11/sys/kern/sys_procdesc.c Fri Aug 26 09:42:51 2016 (r304842)
+++ stable/11/sys/kern/sys_procdesc.c Fri Aug 26 10:04:10 2016 (r304843)
@@ -517,7 +517,7 @@ procdesc_stat(struct file *fp, struct st
struct thread *td)
{
struct procdesc *pd;
- struct timeval pstart;
+ struct timeval pstart, boottime;
/*
* XXXRW: Perhaps we should cache some more information from the
@@ -532,6 +532,7 @@ procdesc_stat(struct file *fp, struct st
/* Set birth and [acm] times to process start time. */
pstart = pd->pd_proc->p_stats->p_start;
+ getboottime(&boottime);
timevaladd(&pstart, &boottime);
TIMEVAL_TO_TIMESPEC(&pstart, &sb->st_birthtim);
sb->st_atim = sb->st_birthtim;
Modified: stable/11/sys/net/altq/altq_subr.c
==============================================================================
--- stable/11/sys/net/altq/altq_subr.c Fri Aug 26 09:42:51 2016 (r304842)
+++ stable/11/sys/net/altq/altq_subr.c Fri Aug 26 10:04:10 2016 (r304843)
@@ -1027,9 +1027,10 @@ read_machclk(void)
panic("read_machclk");
#endif
} else {
- struct timeval tv;
+ struct timeval tv, boottime;
microtime(&tv);
+ getboottime(&boottime);
val = (((u_int64_t)(tv.tv_sec - boottime.tv_sec) * 1000000
+ tv.tv_usec) << MACHCLK_SHIFT);
}
Modified: stable/11/sys/net/bpf.c
==============================================================================
--- stable/11/sys/net/bpf.c Fri Aug 26 09:42:51 2016 (r304842)
+++ stable/11/sys/net/bpf.c Fri Aug 26 10:04:10 2016 (r304843)
@@ -2328,12 +2328,13 @@ bpf_hdrlen(struct bpf_d *d)
static void
bpf_bintime2ts(struct bintime *bt, struct bpf_ts *ts, int tstype)
{
- struct bintime bt2;
+ struct bintime bt2, boottimebin;
struct timeval tsm;
struct timespec tsn;
if ((tstype & BPF_T_MONOTONIC) == 0) {
bt2 = *bt;
+ getboottimebin(&boottimebin);
bintime_add(&bt2, &boottimebin);
bt = &bt2;
}
Modified: stable/11/sys/netpfil/ipfw/ip_fw_sockopt.c
==============================================================================
--- stable/11/sys/netpfil/ipfw/ip_fw_sockopt.c Fri Aug 26 09:42:51 2016 (r304842)
+++ stable/11/sys/netpfil/ipfw/ip_fw_sockopt.c Fri Aug 26 10:04:10 2016 (r304843)
@@ -395,6 +395,7 @@ swap_map(struct ip_fw_chain *chain, stru
static void
export_cntr1_base(struct ip_fw *krule, struct ip_fw_bcounter *cntr)
{
+ struct timeval boottime;
cntr->size = sizeof(*cntr);
@@ -403,21 +404,26 @@ export_cntr1_base(struct ip_fw *krule, s
cntr->bcnt = counter_u64_fetch(krule->cntr + 1);
cntr->timestamp = krule->timestamp;
}
- if (cntr->timestamp > 0)
+ if (cntr->timestamp > 0) {
+ getboottime(&boottime);
cntr->timestamp += boottime.tv_sec;
+ }
}
static void
export_cntr0_base(struct ip_fw *krule, struct ip_fw_bcounter0 *cntr)
{
+ struct timeval boottime;
if (krule->cntr != NULL) {
cntr->pcnt = counter_u64_fetch(krule->cntr);
cntr->bcnt = counter_u64_fetch(krule->cntr + 1);
cntr->timestamp = krule->timestamp;
}
- if (cntr->timestamp > 0)
+ if (cntr->timestamp > 0) {
+ getboottime(&boottime);
cntr->timestamp += boottime.tv_sec;
+ }
}
/*
@@ -2056,11 +2062,13 @@ ipfw_getrules(struct ip_fw_chain *chain,
char *ep = bp + space;
struct ip_fw *rule;
struct ip_fw_rule0 *dst;
+ struct timeval boottime;
int error, i, l, warnflag;
time_t boot_seconds;
warnflag = 0;
+ getboottime(&boottime);
boot_seconds = boottime.tv_sec;
for (i = 0; i < chain->n_rules; i++) {
rule = chain->map[i];
Modified: stable/11/sys/nfs/nfs_lock.c
==============================================================================
--- stable/11/sys/nfs/nfs_lock.c Fri Aug 26 09:42:51 2016 (r304842)
+++ stable/11/sys/nfs/nfs_lock.c Fri Aug 26 10:04:10 2016 (r304843)
@@ -241,6 +241,7 @@ nfs_dolock(struct vop_advlock_args *ap)
struct flock *fl;
struct proc *p;
struct nfsmount *nmp;
+ struct timeval boottime;
td = curthread;
p = td->td_proc;
@@ -284,6 +285,7 @@ nfs_dolock(struct vop_advlock_args *ap)
p->p_nlminfo = malloc(sizeof(struct nlminfo),
M_NLMINFO, M_WAITOK | M_ZERO);
p->p_nlminfo->pid_start = p->p_stats->p_start;
+ getboottime(&boottime);
timevaladd(&p->p_nlminfo->pid_start, &boottime);
}
msg.lm_msg_ident.pid_start = p->p_nlminfo->pid_start;
Modified: stable/11/sys/rpc/rpcsec_gss/svc_rpcsec_gss.c
==============================================================================
--- stable/11/sys/rpc/rpcsec_gss/svc_rpcsec_gss.c Fri Aug 26 09:42:51 2016 (r304842)
+++ stable/11/sys/rpc/rpcsec_gss/svc_rpcsec_gss.c Fri Aug 26 10:04:10 2016 (r304843)
@@ -504,11 +504,13 @@ svc_rpc_gss_find_client(struct svc_rpc_g
{
struct svc_rpc_gss_client *client;
struct svc_rpc_gss_client_list *list;
+ struct timeval boottime;
unsigned long hostid;
rpc_gss_log_debug("in svc_rpc_gss_find_client(%d)", id->ci_id);
getcredhostid(curthread->td_ucred, &hostid);
+ getboottime(&boottime);
if (id->ci_hostid != hostid || id->ci_boottime != boottime.tv_sec)
return (NULL);
@@ -537,6 +539,7 @@ svc_rpc_gss_create_client(void)
{
struct svc_rpc_gss_client *client;
struct svc_rpc_gss_client_list *list;
+ struct timeval boottime;
unsigned long hostid;
rpc_gss_log_debug("in svc_rpc_gss_create_client()");
@@ -547,6 +550,7 @@ svc_rpc_gss_create_client(void)
sx_init(&client->cl_lock, "GSS-client");
getcredhostid(curthread->td_ucred, &hostid);
client->cl_id.ci_hostid = hostid;
+ getboottime(&boottime);
client->cl_id.ci_boottime = boottime.tv_sec;
client->cl_id.ci_id = svc_rpc_gss_next_clientid++;
list = &svc_rpc_gss_client_hash[client->cl_id.ci_id % CLIENT_HASH_SIZE];
Modified: stable/11/sys/sys/time.h
==============================================================================
--- stable/11/sys/sys/time.h Fri Aug 26 09:42:51 2016 (r304842)
+++ stable/11/sys/sys/time.h Fri Aug 26 10:04:10 2016 (r304843)
@@ -372,8 +372,6 @@ void resettodr(void);
extern volatile time_t time_second;
extern volatile time_t time_uptime;
-extern struct bintime boottimebin;
-extern struct timeval boottime;
extern struct bintime tc_tick_bt;
extern sbintime_t tc_tick_sbt;
extern struct bintime tick_bt;
@@ -440,6 +438,9 @@ void getbintime(struct bintime *bt);
void getnanotime(struct timespec *tsp);
void getmicrotime(struct timeval *tvp);
+void getboottime(struct timeval *boottime);
+void getboottimebin(struct bintime *boottimebin);
+
/* Other functions */
int itimerdecr(struct itimerval *itp, int usec);
int itimerfix(struct timeval *tv);
More information about the svn-src-stable
mailing list