svn commit: r334152 - in stable/11/sys: amd64/amd64 amd64/include dev/cpuctl i386/include x86/acpica x86/include x86/x86
Konstantin Belousov
kib at FreeBSD.org
Thu May 24 13:17:27 UTC 2018
Author: kib
Date: Thu May 24 13:17:24 2018
New Revision: 334152
URL: https://svnweb.freebsd.org/changeset/base/334152
Log:
MFC r334004:
Add Intel Spec Store Bypass Disable control.
This also includes the i386/include/pcpu.h part of the r334018.
Security: CVE-2018-3639
Approved by: re (gjb)
Modified:
stable/11/sys/amd64/amd64/initcpu.c
stable/11/sys/amd64/amd64/machdep.c
stable/11/sys/amd64/include/md_var.h
stable/11/sys/dev/cpuctl/cpuctl.c
stable/11/sys/i386/include/pcpu.h
stable/11/sys/x86/acpica/acpi_wakeup.c
stable/11/sys/x86/include/x86_var.h
stable/11/sys/x86/x86/cpu_machdep.c
Directory Properties:
stable/11/ (props changed)
Modified: stable/11/sys/amd64/amd64/initcpu.c
==============================================================================
--- stable/11/sys/amd64/amd64/initcpu.c Thu May 24 12:14:14 2018 (r334151)
+++ stable/11/sys/amd64/amd64/initcpu.c Thu May 24 13:17:24 2018 (r334152)
@@ -222,6 +222,7 @@ initializecpu(void)
pg_nx = PG_NX;
}
hw_ibrs_recalculate();
+ hw_ssb_recalculate(false);
switch (cpu_vendor_id) {
case CPU_VENDOR_AMD:
init_amd();
Modified: stable/11/sys/amd64/amd64/machdep.c
==============================================================================
--- stable/11/sys/amd64/amd64/machdep.c Thu May 24 12:14:14 2018 (r334151)
+++ stable/11/sys/amd64/amd64/machdep.c Thu May 24 13:17:24 2018 (r334152)
@@ -1850,6 +1850,7 @@ hammer_time(u_int64_t modulep, u_int64_t physfree)
thread0.td_critnest = 0;
TUNABLE_INT_FETCH("hw.ibrs_disable", &hw_ibrs_disable);
+ TUNABLE_INT_FETCH("hw.spec_store_bypass_disable", &hw_ssb_disable);
/* Location of kernel stack for locore */
return ((u_int64_t)thread0.td_pcb);
Modified: stable/11/sys/amd64/include/md_var.h
==============================================================================
--- stable/11/sys/amd64/include/md_var.h Thu May 24 12:14:14 2018 (r334151)
+++ stable/11/sys/amd64/include/md_var.h Thu May 24 13:17:24 2018 (r334152)
@@ -37,6 +37,7 @@
extern uint64_t *vm_page_dump;
extern int hw_lower_amd64_sharedpage;
extern int hw_ibrs_disable;
+extern int hw_ssb_disable;
/*
* The file "conf/ldscript.amd64" defines the symbol "kernphys". Its
Modified: stable/11/sys/dev/cpuctl/cpuctl.c
==============================================================================
--- stable/11/sys/dev/cpuctl/cpuctl.c Thu May 24 12:14:14 2018 (r334151)
+++ stable/11/sys/dev/cpuctl/cpuctl.c Thu May 24 13:17:24 2018 (r334152)
@@ -527,6 +527,7 @@ cpuctl_do_eval_cpu_features(int cpu, struct thread *td
identify_cpu2();
hw_ibrs_recalculate();
restore_cpu(oldcpu, is_bound, td);
+ hw_ssb_recalculate(true);
printcpuinfo();
return (0);
}
Modified: stable/11/sys/i386/include/pcpu.h
==============================================================================
--- stable/11/sys/i386/include/pcpu.h Thu May 24 12:14:14 2018 (r334151)
+++ stable/11/sys/i386/include/pcpu.h Thu May 24 13:17:24 2018 (r334152)
@@ -68,7 +68,8 @@
caddr_t pc_cmap_addr2; \
vm_offset_t pc_qmap_addr; /* KVA for temporary mappings */\
uint32_t pc_smp_tlb_done; /* TLB op acknowledgement */ \
- char __pad[189]
+ uint32_t pc_ibpb_set; \
+ char __pad[185]
#ifdef _KERNEL
Modified: stable/11/sys/x86/acpica/acpi_wakeup.c
==============================================================================
--- stable/11/sys/x86/acpica/acpi_wakeup.c Thu May 24 12:14:14 2018 (r334151)
+++ stable/11/sys/x86/acpica/acpi_wakeup.c Thu May 24 13:17:24 2018 (r334152)
@@ -225,6 +225,7 @@ acpi_sleep_machdep(struct acpi_softc *sc, int state)
#endif
#ifdef __amd64__
hw_ibrs_active = 0;
+ hw_ssb_active = 0;
cpu_stdext_feature3 = 0;
CPU_FOREACH(i) {
pc = pcpu_find(i);
Modified: stable/11/sys/x86/include/x86_var.h
==============================================================================
--- stable/11/sys/x86/include/x86_var.h Thu May 24 12:14:14 2018 (r334151)
+++ stable/11/sys/x86/include/x86_var.h Thu May 24 13:17:24 2018 (r334152)
@@ -83,6 +83,7 @@ extern int use_xsave;
extern uint64_t xsave_mask;
extern int pti;
extern int hw_ibrs_active;
+extern int hw_ssb_active;
struct pcb;
struct thread;
@@ -133,6 +134,7 @@ int isa_nmi(int cd);
void handle_ibrs_entry(void);
void handle_ibrs_exit(void);
void hw_ibrs_recalculate(void);
+void hw_ssb_recalculate(bool all_cpus);
void nmi_call_kdb(u_int cpu, u_int type, struct trapframe *frame);
void nmi_call_kdb_smp(u_int type, struct trapframe *frame);
void nmi_handle_intr(u_int type, struct trapframe *frame);
Modified: stable/11/sys/x86/x86/cpu_machdep.c
==============================================================================
--- stable/11/sys/x86/x86/cpu_machdep.c Thu May 24 12:14:14 2018 (r334151)
+++ stable/11/sys/x86/x86/cpu_machdep.c Thu May 24 13:17:24 2018 (r334152)
@@ -158,6 +158,7 @@ void
acpi_cpu_idle_mwait(uint32_t mwait_hint)
{
int *state;
+ uint64_t v;
/*
* A comment in Linux patch claims that 'CPUs run faster with
@@ -174,13 +175,26 @@ acpi_cpu_idle_mwait(uint32_t mwait_hint)
KASSERT(atomic_load_int(state) == STATE_SLEEPING,
("cpu_mwait_cx: wrong monitorbuf state"));
atomic_store_int(state, STATE_MWAIT);
- handle_ibrs_exit();
+ if (PCPU_GET(ibpb_set) || hw_ssb_active) {
+ v = rdmsr(MSR_IA32_SPEC_CTRL);
+ wrmsr(MSR_IA32_SPEC_CTRL, v & ~(IA32_SPEC_CTRL_IBRS |
+ IA32_SPEC_CTRL_STIBP | IA32_SPEC_CTRL_SSBD));
+ } else {
+ v = 0;
+ }
cpu_monitor(state, 0, 0);
if (atomic_load_int(state) == STATE_MWAIT)
cpu_mwait(MWAIT_INTRBREAK, mwait_hint);
- handle_ibrs_entry();
/*
+ * SSB cannot be disabled while we sleep, or rather, if it was
+ * disabled, the sysctl thread will bind to our cpu to tweak
+ * MSR.
+ */
+ if (v != 0)
+ wrmsr(MSR_IA32_SPEC_CTRL, v);
+
+ /*
* We should exit on any event that interrupts mwait, because
* that event might be a wanted interrupt.
*/
@@ -836,3 +850,93 @@ hw_ibrs_disable_handler(SYSCTL_HANDLER_ARGS)
SYSCTL_PROC(_hw, OID_AUTO, ibrs_disable, CTLTYPE_INT | CTLFLAG_RWTUN |
CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0, hw_ibrs_disable_handler, "I",
"Disable Indirect Branch Restricted Speculation");
+
+int hw_ssb_active;
+int hw_ssb_disable;
+
+SYSCTL_INT(_hw, OID_AUTO, spec_store_bypass_disable_active, CTLFLAG_RD,
+ &hw_ssb_active, 0,
+ "Speculative Store Bypass Disable active");
+
+static void
+hw_ssb_set_one(bool enable)
+{
+ uint64_t v;
+
+ v = rdmsr(MSR_IA32_SPEC_CTRL);
+ if (enable)
+ v |= (uint64_t)IA32_SPEC_CTRL_SSBD;
+ else
+ v &= ~(uint64_t)IA32_SPEC_CTRL_SSBD;
+ wrmsr(MSR_IA32_SPEC_CTRL, v);
+}
+
+static void
+hw_ssb_set(bool enable, bool for_all_cpus)
+{
+ struct thread *td;
+ int bound_cpu, i, is_bound;
+
+ if ((cpu_stdext_feature3 & CPUID_STDEXT3_SSBD) == 0) {
+ hw_ssb_active = 0;
+ return;
+ }
+ hw_ssb_active = enable;
+ if (for_all_cpus) {
+ td = curthread;
+ thread_lock(td);
+ is_bound = sched_is_bound(td);
+ bound_cpu = td->td_oncpu;
+ CPU_FOREACH(i) {
+ sched_bind(td, i);
+ hw_ssb_set_one(enable);
+ }
+ if (is_bound)
+ sched_bind(td, bound_cpu);
+ else
+ sched_unbind(td);
+ thread_unlock(td);
+ } else {
+ hw_ssb_set_one(enable);
+ }
+}
+
+void
+hw_ssb_recalculate(bool all_cpus)
+{
+
+ switch (hw_ssb_disable) {
+ default:
+ hw_ssb_disable = 0;
+ /* FALLTHROUGH */
+ case 0: /* off */
+ hw_ssb_set(false, all_cpus);
+ break;
+ case 1: /* on */
+ hw_ssb_set(true, all_cpus);
+ break;
+ case 2: /* auto */
+ hw_ssb_set((cpu_ia32_arch_caps & IA32_ARCH_CAP_SSBD_NO) != 0 ?
+ false : true, all_cpus);
+ break;
+ }
+}
+
+static int
+hw_ssb_disable_handler(SYSCTL_HANDLER_ARGS)
+{
+ int error, val;
+
+ val = hw_ssb_disable;
+ error = sysctl_handle_int(oidp, &val, 0, req);
+ if (error != 0 || req->newptr == NULL)
+ return (error);
+ hw_ssb_disable = val;
+ hw_ssb_recalculate(true);
+ return (0);
+}
+SYSCTL_PROC(_hw, OID_AUTO, spec_store_bypass_disable, CTLTYPE_INT |
+ CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0,
+ hw_ssb_disable_handler, "I",
+ "Speculative Store Bypass Disable (0 - off, 1 - on, 2 - auto");
+
More information about the svn-src-stable
mailing list