svn commit: r328805 - in stable/11/sys: kern sys
Alexander Motin
mav at FreeBSD.org
Fri Feb 2 18:38:30 UTC 2018
Author: mav
Date: Fri Feb 2 18:38:29 2018
New Revision: 328805
URL: https://svnweb.freebsd.org/changeset/base/328805
Log:
MFC r312293,312305,312434,312698,312759,312760,312814,312815,322672:
Synchronize gtaskqueue vs SMP initialization order with head.
This should fix panics on boot, caused by recent Linux KPI merges.
Modified:
stable/11/sys/kern/subr_gtaskqueue.c
stable/11/sys/sys/gtaskqueue.h
Directory Properties:
stable/11/ (props changed)
Modified: stable/11/sys/kern/subr_gtaskqueue.c
==============================================================================
--- stable/11/sys/kern/subr_gtaskqueue.c Fri Feb 2 18:12:09 2018 (r328804)
+++ stable/11/sys/kern/subr_gtaskqueue.c Fri Feb 2 18:38:29 2018 (r328805)
@@ -633,6 +633,31 @@ taskqgroup_find(struct taskqgroup *qgroup, void *uniq)
return (idx);
}
+/*
+ * smp_started is unusable since it is not set for UP kernels or even for
+ * SMP kernels when there is 1 CPU. This is usually handled by adding a
+ * (mp_ncpus == 1) test, but that would be broken here since we need to
+ * to synchronize with the SI_SUB_SMP ordering. Even in the pure SMP case
+ * smp_started only gives a fuzzy ordering relative to SI_SUB_SMP.
+ *
+ * So maintain our own flag. It must be set after all CPUs are started
+ * and before SI_SUB_SMP:SI_ORDER_ANY so that the SYSINIT for delayed
+ * adjustment is properly delayed. SI_ORDER_FOURTH is clearly before
+ * SI_ORDER_ANY and unclearly after the CPUs are started. It would be
+ * simpler for adjustment to pass a flag indicating if it is delayed.
+ */
+
+static int tqg_smp_started;
+
+static void
+tqg_record_smp_started(void *arg)
+{
+ tqg_smp_started = 1;
+}
+
+SYSINIT(tqg_record_smp_started, SI_SUB_SMP, SI_ORDER_FOURTH,
+ tqg_record_smp_started, NULL);
+
void
taskqgroup_attach(struct taskqgroup *qgroup, struct grouptask *gtask,
void *uniq, int irq, char *name)
@@ -649,7 +674,7 @@ taskqgroup_attach(struct taskqgroup *qgroup, struct gr
qgroup->tqg_queue[qid].tgc_cnt++;
LIST_INSERT_HEAD(&qgroup->tqg_queue[qid].tgc_tasks, gtask, gt_list);
gtask->gt_taskqueue = qgroup->tqg_queue[qid].tgc_taskq;
- if (irq != -1 && (smp_started || mp_ncpus == 1)) {
+ if (irq != -1 && tqg_smp_started) {
gtask->gt_cpu = qgroup->tqg_queue[qid].tgc_cpu;
CPU_ZERO(&mask);
CPU_SET(qgroup->tqg_queue[qid].tgc_cpu, &mask);
@@ -699,7 +724,7 @@ taskqgroup_attach_cpu(struct taskqgroup *qgroup, struc
gtask->gt_irq = irq;
gtask->gt_cpu = cpu;
mtx_lock(&qgroup->tqg_lock);
- if (smp_started || mp_ncpus == 1) {
+ if (tqg_smp_started) {
for (i = 0; i < qgroup->tqg_cnt; i++)
if (qgroup->tqg_queue[i].tgc_cpu == cpu) {
qid = i;
@@ -719,7 +744,7 @@ taskqgroup_attach_cpu(struct taskqgroup *qgroup, struc
CPU_ZERO(&mask);
CPU_SET(cpu, &mask);
- if (irq != -1 && (smp_started || mp_ncpus == 1))
+ if (irq != -1 && tqg_smp_started)
intr_setaffinity(irq, &mask);
return (0);
}
@@ -733,7 +758,7 @@ taskqgroup_attach_cpu_deferred(struct taskqgroup *qgro
qid = -1;
irq = gtask->gt_irq;
cpu = gtask->gt_cpu;
- MPASS(smp_started || mp_ncpus == 1);
+ MPASS(tqg_smp_started);
mtx_lock(&qgroup->tqg_lock);
for (i = 0; i < qgroup->tqg_cnt; i++)
if (qgroup->tqg_queue[i].tgc_cpu == cpu) {
@@ -826,9 +851,10 @@ _taskqgroup_adjust(struct taskqgroup *qgroup, int cnt,
mtx_assert(&qgroup->tqg_lock, MA_OWNED);
- if (cnt < 1 || cnt * stride > mp_ncpus || (!smp_started && (mp_ncpus != 1))) {
- printf("taskqgroup_adjust failed cnt: %d stride: %d mp_ncpus: %d smp_started: %d\n",
- cnt, stride, mp_ncpus, smp_started);
+ if (cnt < 1 || cnt * stride > mp_ncpus || !tqg_smp_started) {
+ printf("%s: failed cnt: %d stride: %d "
+ "mp_ncpus: %d tqg_smp_started: %d\n",
+ __func__, cnt, stride, mp_ncpus, tqg_smp_started);
return (EINVAL);
}
if (qgroup->tqg_adjusting) {
Modified: stable/11/sys/sys/gtaskqueue.h
==============================================================================
--- stable/11/sys/sys/gtaskqueue.h Fri Feb 2 18:12:09 2018 (r328804)
+++ stable/11/sys/sys/gtaskqueue.h Fri Feb 2 18:38:29 2018 (r328805)
@@ -80,8 +80,6 @@ int taskqgroup_adjust(struct taskqgroup *qgroup, int c
#define TASKQGROUP_DECLARE(name) \
extern struct taskqgroup *qgroup_##name
-
-#ifdef EARLY_AP_STARTUP
#define TASKQGROUP_DEFINE(name, cnt, stride) \
\
struct taskqgroup *qgroup_##name; \
@@ -90,23 +88,9 @@ static void \
taskqgroup_define_##name(void *arg) \
{ \
qgroup_##name = taskqgroup_create(#name); \
- taskqgroup_adjust(qgroup_##name, (cnt), (stride)); \
} \
\
-SYSINIT(taskqgroup_##name, SI_SUB_INIT_IF, SI_ORDER_FIRST, \
- taskqgroup_define_##name, NULL)
-#else
-#define TASKQGROUP_DEFINE(name, cnt, stride) \
- \
-struct taskqgroup *qgroup_##name; \
- \
-static void \
-taskqgroup_define_##name(void *arg) \
-{ \
- qgroup_##name = taskqgroup_create(#name); \
-} \
- \
-SYSINIT(taskqgroup_##name, SI_SUB_INIT_IF, SI_ORDER_FIRST, \
+SYSINIT(taskqgroup_##name, SI_SUB_TASKQ, SI_ORDER_FIRST, \
taskqgroup_define_##name, NULL); \
\
static void \
@@ -116,10 +100,8 @@ taskqgroup_adjust_##name(void *arg) \
} \
\
SYSINIT(taskqgroup_adj_##name, SI_SUB_SMP, SI_ORDER_ANY, \
- taskqgroup_adjust_##name, NULL); \
- \
-struct __hack
-#endif
+ taskqgroup_adjust_##name, NULL)
+
TASKQGROUP_DECLARE(net);
TASKQGROUP_DECLARE(softirq);
More information about the svn-src-stable-11
mailing list