svn commit: r340404 - in head/sys: kern sys
Gleb Smirnoff
glebius at FreeBSD.org
Tue Nov 13 19:02:13 UTC 2018
Author: glebius
Date: Tue Nov 13 19:02:11 2018
New Revision: 340404
URL: https://svnweb.freebsd.org/changeset/base/340404
Log:
Uninline epoch(9) entrance and exit. There is no proof that modern
processors would benefit from avoiding a function call, but bloating
code. In fact, clang created an uninlined real function for many
object files in the network stack.
- Move epoch_private.h into subr_epoch.c. Code copied exactly, avoiding
any changes, including style(9).
- Remove private copies of critical_enter/exit.
Reviewed by: kib, jtl
Differential Revision: https://reviews.freebsd.org/D17879
Deleted:
head/sys/sys/epoch_private.h
Modified:
head/sys/kern/subr_epoch.c
head/sys/sys/epoch.h
Modified: head/sys/kern/subr_epoch.c
==============================================================================
--- head/sys/kern/subr_epoch.c Tue Nov 13 18:49:43 2018 (r340403)
+++ head/sys/kern/subr_epoch.c Tue Nov 13 19:02:11 2018 (r340404)
@@ -55,6 +55,40 @@ __FBSDID("$FreeBSD$");
static MALLOC_DEFINE(M_EPOCH, "epoch", "epoch based reclamation");
+typedef struct epoch_thread {
+#ifdef EPOCH_TRACKER_DEBUG
+ uint64_t et_magic_pre;
+#endif
+ TAILQ_ENTRY(epoch_thread) et_link; /* Epoch queue. */
+ struct thread *et_td; /* pointer to thread in section */
+ ck_epoch_section_t et_section; /* epoch section object */
+#ifdef EPOCH_TRACKER_DEBUG
+ uint64_t et_magic_post;
+#endif
+} *epoch_thread_t;
+TAILQ_HEAD (epoch_tdlist, epoch_thread);
+
+#ifdef __amd64__
+#define EPOCH_ALIGN CACHE_LINE_SIZE*2
+#else
+#define EPOCH_ALIGN CACHE_LINE_SIZE
+#endif
+
+typedef struct epoch_record {
+ ck_epoch_record_t er_read_record;
+ ck_epoch_record_t er_write_record;
+ volatile struct epoch_tdlist er_tdlist;
+ volatile uint32_t er_gen;
+ uint32_t er_cpuid;
+} __aligned(EPOCH_ALIGN) *epoch_record_t;
+
+struct epoch {
+ struct ck_epoch e_epoch __aligned(EPOCH_ALIGN);
+ epoch_record_t e_pcpu_record;
+ int e_idx;
+ int e_flags;
+};
+
/* arbitrary --- needs benchmarking */
#define MAX_ADAPTIVE_SPIN 100
#define MAX_EPOCHS 64
@@ -157,6 +191,15 @@ epoch_ctor(epoch_t epoch)
}
}
+static void
+epoch_adjust_prio(struct thread *td, u_char prio)
+{
+
+ thread_lock(td);
+ sched_prio(td, prio);
+ thread_unlock(td);
+}
+
epoch_t
epoch_alloc(int flags)
{
@@ -192,32 +235,110 @@ epoch_free(epoch_t epoch)
free(epoch, M_EPOCH);
}
+static epoch_record_t
+epoch_currecord(epoch_t epoch)
+{
+
+ return (zpcpu_get_cpu(epoch->e_pcpu_record, curcpu));
+}
+
+#define INIT_CHECK(epoch) \
+ do { \
+ if (__predict_false((epoch) == NULL)) \
+ return; \
+ } while (0)
+
void
-epoch_enter_preempt_KBI(epoch_t epoch, epoch_tracker_t et)
+epoch_enter_preempt(epoch_t epoch, epoch_tracker_t et)
{
+ struct epoch_record *er;
+ struct epoch_thread *etd;
+ struct thread_lite *td;
- epoch_enter_preempt(epoch, et);
+ MPASS(cold || epoch != NULL);
+ INIT_CHECK(epoch);
+ etd = (void *)et;
+ MPASS(epoch->e_flags & EPOCH_PREEMPT);
+#ifdef EPOCH_TRACKER_DEBUG
+ etd->et_magic_pre = EPOCH_MAGIC0;
+ etd->et_magic_post = EPOCH_MAGIC1;
+#endif
+ td = (struct thread_lite *)curthread;
+ etd->et_td = (void*)td;
+ td->td_epochnest++;
+ critical_enter();
+ sched_pin_lite(td);
+
+ td->td_pre_epoch_prio = td->td_priority;
+ er = epoch_currecord(epoch);
+ TAILQ_INSERT_TAIL(&er->er_tdlist, etd, et_link);
+ ck_epoch_begin(&er->er_read_record, (ck_epoch_section_t *)&etd->et_section);
+ critical_exit();
}
void
-epoch_exit_preempt_KBI(epoch_t epoch, epoch_tracker_t et)
+epoch_enter(epoch_t epoch)
{
+ struct thread_lite *td;
+ epoch_record_t er;
- epoch_exit_preempt(epoch, et);
+ MPASS(cold || epoch != NULL);
+ INIT_CHECK(epoch);
+ td = (struct thread_lite *)curthread;
+
+ td->td_epochnest++;
+ critical_enter();
+ er = epoch_currecord(epoch);
+ ck_epoch_begin(&er->er_read_record, NULL);
}
void
-epoch_enter_KBI(epoch_t epoch)
+epoch_exit_preempt(epoch_t epoch, epoch_tracker_t et)
{
+ struct epoch_record *er;
+ struct epoch_thread *etd;
+ struct thread_lite *td;
- epoch_enter(epoch);
+ INIT_CHECK(epoch);
+ td = (struct thread_lite *)curthread;
+ critical_enter();
+ sched_unpin_lite(td);
+ MPASS(td->td_epochnest);
+ td->td_epochnest--;
+ er = epoch_currecord(epoch);
+ MPASS(epoch->e_flags & EPOCH_PREEMPT);
+ etd = (void *)et;
+ MPASS(etd != NULL);
+ MPASS(etd->et_td == (struct thread *)td);
+#ifdef EPOCH_TRACKER_DEBUG
+ MPASS(etd->et_magic_pre == EPOCH_MAGIC0);
+ MPASS(etd->et_magic_post == EPOCH_MAGIC1);
+ etd->et_magic_pre = 0;
+ etd->et_magic_post = 0;
+#endif
+ etd->et_td = (void*)0xDEADBEEF;
+ ck_epoch_end(&er->er_read_record,
+ (ck_epoch_section_t *)&etd->et_section);
+ TAILQ_REMOVE(&er->er_tdlist, etd, et_link);
+ er->er_gen++;
+ if (__predict_false(td->td_pre_epoch_prio != td->td_priority))
+ epoch_adjust_prio((struct thread *)td, td->td_pre_epoch_prio);
+ critical_exit();
}
void
-epoch_exit_KBI(epoch_t epoch)
+epoch_exit(epoch_t epoch)
{
+ struct thread_lite *td;
+ epoch_record_t er;
- epoch_exit(epoch);
+ INIT_CHECK(epoch);
+ td = (struct thread_lite *)curthread;
+ MPASS(td->td_epochnest);
+ td->td_epochnest--;
+ er = epoch_currecord(epoch);
+ ck_epoch_end(&er->er_read_record, NULL);
+ critical_exit();
}
/*
@@ -545,12 +666,4 @@ int
in_epoch(epoch_t epoch)
{
return (in_epoch_verbose(epoch, 0));
-}
-
-void
-epoch_adjust_prio(struct thread *td, u_char prio)
-{
- thread_lock(td);
- sched_prio(td, prio);
- thread_unlock(td);
}
Modified: head/sys/sys/epoch.h
==============================================================================
--- head/sys/sys/epoch.h Tue Nov 13 18:49:43 2018 (r340403)
+++ head/sys/sys/epoch.h Tue Nov 13 19:02:11 2018 (r340404)
@@ -29,27 +29,18 @@
#ifndef _SYS_EPOCH_H_
#define _SYS_EPOCH_H_
-#ifdef _KERNEL
-#include <sys/lock.h>
-#include <sys/pcpu.h>
-#endif
-struct epoch;
-typedef struct epoch *epoch_t;
+/*
+ * XXXGL: temporarily keep epoch_tracker exposed to userland until
+ * we remove trackers embedded into network structs.
+ */
-#define EPOCH_PREEMPT 0x1
-#define EPOCH_LOCKED 0x2
-
-extern epoch_t global_epoch;
-extern epoch_t global_epoch_preempt;
-
struct epoch_context {
void *data[2];
} __aligned(sizeof(void *));
typedef struct epoch_context *epoch_context_t;
-
struct epoch_tracker {
void *datap[3];
#ifdef EPOCH_TRACKER_DEBUG
@@ -61,6 +52,19 @@ struct epoch_tracker {
typedef struct epoch_tracker *epoch_tracker_t;
+#ifdef _KERNEL
+#include <sys/lock.h>
+#include <sys/pcpu.h>
+
+struct epoch;
+typedef struct epoch *epoch_t;
+
+#define EPOCH_PREEMPT 0x1
+#define EPOCH_LOCKED 0x2
+
+extern epoch_t global_epoch;
+extern epoch_t global_epoch_preempt;
+
epoch_t epoch_alloc(int flags);
void epoch_free(epoch_t epoch);
void epoch_wait(epoch_t epoch);
@@ -68,26 +72,15 @@ void epoch_wait_preempt(epoch_t epoch);
void epoch_call(epoch_t epoch, epoch_context_t ctx, void (*callback) (epoch_context_t));
int in_epoch(epoch_t epoch);
int in_epoch_verbose(epoch_t epoch, int dump_onfail);
-#ifdef _KERNEL
DPCPU_DECLARE(int, epoch_cb_count);
DPCPU_DECLARE(struct grouptask, epoch_cb_task);
#define EPOCH_MAGIC0 0xFADECAFEF00DD00D
#define EPOCH_MAGIC1 0xBADDBABEDEEDFEED
-void epoch_enter_preempt_KBI(epoch_t epoch, epoch_tracker_t et);
-void epoch_exit_preempt_KBI(epoch_t epoch, epoch_tracker_t et);
-void epoch_enter_KBI(epoch_t epoch);
-void epoch_exit_KBI(epoch_t epoch);
+void epoch_enter_preempt(epoch_t epoch, epoch_tracker_t et);
+void epoch_exit_preempt(epoch_t epoch, epoch_tracker_t et);
+void epoch_enter(epoch_t epoch);
+void epoch_exit(epoch_t epoch);
-
-#if defined(KLD_MODULE) && !defined(KLD_TIED)
-#define epoch_enter_preempt(e, t) epoch_enter_preempt_KBI((e), (t))
-#define epoch_exit_preempt(e, t) epoch_exit_preempt_KBI((e), (t))
-#define epoch_enter(e) epoch_enter_KBI((e))
-#define epoch_exit(e) epoch_exit_KBI((e))
-#else
-#include <sys/epoch_private.h>
-#endif /* KLD_MODULE */
-
-#endif /* _KERNEL */
-#endif
+#endif /* _KERNEL */
+#endif /* _SYS_EPOCH_H_ */
More information about the svn-src-all
mailing list