svn commit: r333769 - in head/sys: kern sys
Matt Macy
mmacy at FreeBSD.org
Thu May 17 21:39:16 UTC 2018
Author: mmacy
Date: Thu May 17 21:39:15 2018
New Revision: 333769
URL: https://svnweb.freebsd.org/changeset/base/333769
Log:
epoch: skip poll function call in hardclock unless there are callbacks pending
Reported by: mjg
Approved by: sbruno
Modified:
head/sys/kern/kern_clock.c
head/sys/kern/subr_epoch.c
head/sys/sys/epoch.h
Modified: head/sys/kern/kern_clock.c
==============================================================================
--- head/sys/kern/kern_clock.c Thu May 17 21:23:14 2018 (r333768)
+++ head/sys/kern/kern_clock.c Thu May 17 21:39:15 2018 (r333769)
@@ -49,6 +49,7 @@ __FBSDID("$FreeBSD$");
#include <sys/systm.h>
#include <sys/callout.h>
#include <sys/epoch.h>
+#include <sys/gtaskqueue.h>
#include <sys/kdb.h>
#include <sys/kernel.h>
#include <sys/kthread.h>
@@ -468,7 +469,8 @@ hardclock_cpu(int usermode)
PMC_SOFT_CALL_TF( , , clock, hard, td->td_intr_frame);
#endif
callout_process(sbinuptime());
- epoch_pcpu_poll();
+ if (__predict_false(DPCPU_GET(epoch_cb_count)))
+ GROUPTASK_ENQUEUE(DPCPU_PTR(epoch_cb_task));
}
/*
@@ -574,7 +576,8 @@ hardclock_cnt(int cnt, int usermode)
}
if (curcpu == CPU_FIRST())
cpu_tick_calibration();
- epoch_pcpu_poll();
+ if (__predict_false(DPCPU_GET(epoch_cb_count)))
+ GROUPTASK_ENQUEUE(DPCPU_PTR(epoch_cb_task));
}
void
Modified: head/sys/kern/subr_epoch.c
==============================================================================
--- head/sys/kern/subr_epoch.c Thu May 17 21:23:14 2018 (r333768)
+++ head/sys/kern/subr_epoch.c Thu May 17 21:39:15 2018 (r333769)
@@ -111,8 +111,8 @@ struct epoch {
epoch_t allepochs[MAX_EPOCHS];
-static DPCPU_DEFINE(struct grouptask, cb_task);
-static DPCPU_DEFINE(int, cb_count);
+DPCPU_DEFINE(struct grouptask, epoch_cb_task);
+DPCPU_DEFINE(int, epoch_cb_count);
static __read_mostly int domcount[MAXMEMDOM];
static __read_mostly int domoffsets[MAXMEMDOM];
@@ -157,8 +157,8 @@ epoch_init(void *arg __unused)
}
done:
CPU_FOREACH(cpu) {
- GROUPTASK_INIT(DPCPU_ID_PTR(cpu, cb_task), 0, epoch_call_task, NULL);
- taskqgroup_attach_cpu(qgroup_softirq, DPCPU_ID_PTR(cpu, cb_task), NULL, cpu, -1, "epoch call task");
+ GROUPTASK_INIT(DPCPU_ID_PTR(cpu, epoch_cb_task), 0, epoch_call_task, NULL);
+ taskqgroup_attach_cpu(qgroup_softirq, DPCPU_ID_PTR(cpu, epoch_cb_task), NULL, cpu, -1, "epoch call task");
}
inited = 1;
global_epoch = epoch_alloc();
@@ -533,7 +533,7 @@ epoch_call(epoch_t epoch, epoch_context_t ctx, void (*
counter_u64_add(epoch->e_frees, 1);
critical_enter();
- *DPCPU_PTR(cb_count) += 1;
+ *DPCPU_PTR(epoch_cb_count) += 1;
eps = epoch->e_pcpu[curcpu];
ck_epoch_call(&eps->eps_record.er_record, cb, (ck_epoch_cb_t*)callback);
critical_exit();
@@ -566,7 +566,7 @@ epoch_call_task(void *arg __unused)
total += npending - record->n_pending;
}
epoch_exit_private(§ion);
- *DPCPU_PTR(cb_count) -= total;
+ *DPCPU_PTR(epoch_cb_count) -= total;
critical_exit();
head = ck_stack_batch_pop_npsc(&cb_stack);
@@ -576,14 +576,6 @@ epoch_call_task(void *arg __unused)
next = CK_STACK_NEXT(cursor);
entry->function(entry);
}
-}
-
-void
-epoch_pcpu_poll(void)
-{
-
- if (DPCPU_GET(cb_count))
- GROUPTASK_ENQUEUE(DPCPU_PTR(cb_task));
}
int
Modified: head/sys/sys/epoch.h
==============================================================================
--- head/sys/sys/epoch.h Thu May 17 21:23:14 2018 (r333768)
+++ head/sys/sys/epoch.h Thu May 17 21:39:15 2018 (r333769)
@@ -36,6 +36,8 @@ struct epoch;
typedef struct epoch *epoch_t;
extern epoch_t global_epoch;
+DPCPU_DECLARE(int, epoch_cb_count);
+DPCPU_DECLARE(struct grouptask, epoch_cb_task);
struct epoch_context {
void *data[2];
@@ -49,7 +51,6 @@ void epoch_enter_internal(epoch_t epoch, struct thread
void epoch_exit_internal(epoch_t epoch, struct thread *td);
void epoch_wait(epoch_t epoch);
void epoch_call(epoch_t epoch, epoch_context_t ctx, void (*callback) (epoch_context_t));
-void epoch_pcpu_poll(void);
int in_epoch(void);
static __inline void
More information about the svn-src-all
mailing list