git: 2b887687edc2 - main - Hyper-V: TLB flush enlightment using hypercall
Date: Fri, 07 Jun 2024 08:01:17 UTC
The branch main has been updated by whu: URL: https://cgit.FreeBSD.org/src/commit/?id=2b887687edc25bb4553f0d8a1183f454a85d413d commit 2b887687edc25bb4553f0d8a1183f454a85d413d Author: Souradeep Chakrabarti <schakrabarti@microsoft.com> AuthorDate: 2024-06-07 07:56:07 +0000 Commit: Wei Hu <whu@FreeBSD.org> CommitDate: 2024-06-07 07:56:07 +0000 Hyper-V: TLB flush enlightment using hypercall Currently FreeBSD uses IPI based TLB flushing for remote TLB flushing. Hyper-V allows hypercalls to flush local and remote TLB. The use of Hyper-V hypercalls gives significant performance improvement in TLB operations. This patch set during test has shown near to 40 percent TLB performance improvement. Also this patch adds rep hypercall implementation as well. Reviewed by: whu, kib Tested by: whu Authored-by: Souradeep Chakrabarti <schakrabarti@microsoft.com> Co-Authored-by: Erni Sri Satya Vennela <ernis@microsoft.com> MFC after: 1 week Sponsored by: Microsoft Differential Revision: https://reviews.freebsd.org/D45521 --- sys/conf/files.amd64 | 5 +- sys/dev/hyperv/vmbus/hyperv.c | 63 ++++++++ sys/dev/hyperv/vmbus/hyperv_mmu.c | 309 ++++++++++++++++++++++++++++++++++++++ sys/dev/hyperv/vmbus/hyperv_mmu.h | 57 +++++++ sys/dev/hyperv/vmbus/hyperv_var.h | 11 ++ sys/dev/hyperv/vmbus/vmbus.c | 36 +++++ sys/dev/hyperv/vmbus/vmbus_var.h | 54 +++++++ sys/modules/hyperv/vmbus/Makefile | 4 +- 8 files changed, 535 insertions(+), 4 deletions(-) diff --git a/sys/conf/files.amd64 b/sys/conf/files.amd64 index add27418ce08..19a16d42043f 100644 --- a/sys/conf/files.amd64 +++ b/sys/conf/files.amd64 @@ -128,8 +128,9 @@ dev/enic/vnic_intr.c optional enic dev/enic/vnic_rq.c optional enic dev/enic/vnic_wq.c optional enic dev/ftgpio/ftgpio.c optional ftgpio superio -dev/hyperv/vmbus/amd64/hyperv_machdep.c optional hyperv -dev/hyperv/vmbus/amd64/vmbus_vector.S optional hyperv +dev/hyperv/vmbus/amd64/hyperv_machdep.c optional hyperv +dev/hyperv/vmbus/amd64/vmbus_vector.S optional hyperv +dev/hyperv/vmbus/hyperv_mmu.c optional hyperv dev/iavf/if_iavf_iflib.c optional iavf pci \ compile-with "${NORMAL_C} -I$S/dev/iavf" dev/iavf/iavf_lib.c optional iavf pci \ diff --git a/sys/dev/hyperv/vmbus/hyperv.c b/sys/dev/hyperv/vmbus/hyperv.c index e0e85a022090..1f85203146d0 100644 --- a/sys/dev/hyperv/vmbus/hyperv.c +++ b/sys/dev/hyperv/vmbus/hyperv.c @@ -35,6 +35,7 @@ #include <sys/malloc.h> #include <sys/systm.h> #include <sys/timetc.h> +#include <sys/cpuset.h> #include <vm/vm.h> #include <vm/vm_extern.h> @@ -50,6 +51,7 @@ #include <dev/hyperv/vmbus/x86/hyperv_machdep.h> #include <dev/hyperv/vmbus/x86/hyperv_reg.h> #endif +#include <dev/hyperv/vmbus/vmbus_var.h> #include <dev/hyperv/vmbus/hyperv_common_reg.h> #include <dev/hyperv/vmbus/hyperv_var.h> @@ -72,10 +74,12 @@ MSR_HV_GUESTID_OSID_FREEBSD | \ MSR_HV_GUESTID_OSTYPE_FREEBSD) + static bool hyperv_identify(void); static void hypercall_memfree(void); static struct hypercall_ctx hypercall_context; + uint64_t hypercall_post_message(bus_addr_t msg_paddr) { @@ -90,6 +94,65 @@ hypercall_signal_event(bus_addr_t monprm_paddr) HYPERCALL_SIGNAL_EVENT, monprm_paddr, 0); } +static inline int hv_result(uint64_t status) +{ + return status & HV_HYPERCALL_RESULT_MASK; +} + +static inline bool hv_result_success(uint64_t status) +{ + return hv_result(status) == HV_STATUS_SUCCESS; +} + +static inline unsigned int hv_repcomp(uint64_t status) +{ + /* Bits [43:32] of status have 'Reps completed' data. */ + return ((status & HV_HYPERCALL_REP_COMP_MASK) >> + HV_HYPERCALL_REP_COMP_OFFSET); +} + +/* + * Rep hypercalls. Callers of this functions are supposed to ensure that + * rep_count and varhead_size comply with Hyper-V hypercall definition. + */ +uint64_t +hv_do_rep_hypercall(uint16_t code, uint16_t rep_count, uint16_t varhead_size, + uint64_t input, uint64_t output) +{ + uint64_t control = code; + uint64_t status; + uint16_t rep_comp; + + control |= (uint64_t)varhead_size << HV_HYPERCALL_VARHEAD_OFFSET; + control |= (uint64_t)rep_count << HV_HYPERCALL_REP_COMP_OFFSET; + + do { + status = hypercall_do_md(control, input, output); + if (!hv_result_success(status)) + return status; + + rep_comp = hv_repcomp(status); + + control &= ~HV_HYPERCALL_REP_START_MASK; + control |= (uint64_t)rep_comp << HV_HYPERCALL_REP_START_OFFSET; + + } while (rep_comp < rep_count); + if (hv_result_success(status)) + return HV_STATUS_SUCCESS; + + return status; +} + +uint64_t +hypercall_do_md(uint64_t input_val, uint64_t input_addr, uint64_t out_addr) +{ + uint64_t phys_inaddr, phys_outaddr; + phys_inaddr = input_addr ? vtophys(input_addr) : 0; + phys_outaddr = out_addr ? vtophys(out_addr) : 0; + return hypercall_md(hypercall_context.hc_addr, + input_val, phys_inaddr, phys_outaddr); +} + int hyperv_guid2str(const struct hyperv_guid *guid, char *buf, size_t sz) { diff --git a/sys/dev/hyperv/vmbus/hyperv_mmu.c b/sys/dev/hyperv/vmbus/hyperv_mmu.c new file mode 100644 index 000000000000..13b1f52fa1f6 --- /dev/null +++ b/sys/dev/hyperv/vmbus/hyperv_mmu.c @@ -0,0 +1,309 @@ +/*- + * Copyright (c) 2009-2012,2016-2024 Microsoft Corp. + * Copyright (c) 2012 NetApp Inc. + * Copyright (c) 2012 Citrix Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice unmodified, this list of conditions, and the following + * disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <sys/param.h> +#include <sys/bus.h> +#include <sys/kernel.h> +#include <sys/linker.h> +#include <sys/lock.h> +#include <sys/malloc.h> +#include <sys/module.h> +#include <sys/mutex.h> +#include <sys/sbuf.h> +#include <sys/smp.h> +#include <sys/sysctl.h> +#include <sys/systm.h> +#include <sys/proc.h> +#include <sys/sched.h> +#include <sys/kdb.h> +#include <vm/vm.h> +#include <vm/pmap.h> + +#include <machine/bus.h> +#include <dev/hyperv/vmbus/x86/hyperv_machdep.h> +#include <dev/hyperv/vmbus/x86/hyperv_reg.h> +#include <dev/hyperv/include/hyperv.h> +#include <dev/hyperv/vmbus/hyperv_var.h> +#include <dev/hyperv/vmbus/vmbus_reg.h> +#include <dev/hyperv/vmbus/vmbus_var.h> +#include <dev/hyperv/vmbus/hyperv_common_reg.h> +#include "hyperv_mmu.h" + +static inline int fill_gva_list(uint64_t gva_list[], + unsigned long start, unsigned long end) +{ + int gva_n = 0; + unsigned long cur = start, diff; + + do { + diff = end > cur ? end - cur : 0; + + gva_list[gva_n] = cur; + /* + * Lower 12 bits encode the number of additional + * pages to flush (in addition to the 'cur' page). + */ + if (diff >= HV_TLB_FLUSH_UNIT) { + gva_list[gva_n] |= PAGE_MASK; + cur += HV_TLB_FLUSH_UNIT; + } else if (diff) { + gva_list[gva_n] |= (diff - 1) >> PAGE_SHIFT; + cur = end; + } + + gva_n++; + + } while (cur < end); + + return gva_n; +} + + +inline int hv_cpumask_to_vpset(struct hv_vpset *vpset, + const cpuset_t *cpus, struct vmbus_softc * sc) +{ + int cpu, vcpu, vcpu_bank, vcpu_offset, nr_bank = 1; + int max_vcpu_bank = hv_max_vp_index / HV_VCPUS_PER_SPARSE_BANK; + + /* + * vpset.valid_bank_mask can represent up to + * HV_MAX_SPARSE_VCPU_BANKS banks + */ + if (max_vcpu_bank >= HV_MAX_SPARSE_VCPU_BANKS) + return 0; + + /* + * Clear all banks up to the maximum possible bank as hv_tlb_flush_ex + * structs are not cleared between calls, we risk flushing unneeded + * vCPUs otherwise. + */ + for (vcpu_bank = 0; vcpu_bank <= max_vcpu_bank; vcpu_bank++) + vpset->bank_contents[vcpu_bank] = 0; + + /* + * Some banks may end up being empty but this is acceptable. + */ + CPU_FOREACH_ISSET(cpu, cpus) { + vcpu = VMBUS_PCPU_GET(sc, vcpuid, cpu); + if (vcpu == -1) + return -1; + vcpu_bank = vcpu / HV_VCPUS_PER_SPARSE_BANK; + vcpu_offset = vcpu % HV_VCPUS_PER_SPARSE_BANK; + set_bit(vcpu_offset, (unsigned long *) + &vpset->bank_contents[vcpu_bank]); + if (vcpu_bank >= nr_bank) + nr_bank = vcpu_bank + 1; + } + vpset->valid_bank_mask = GENMASK_ULL(nr_bank - 1, 0); + return nr_bank; +} + + + + +void +hv_vm_tlb_flush(pmap_t pmap, vm_offset_t addr1, vm_offset_t addr2, + enum invl_op_codes op, struct vmbus_softc *sc, smp_invl_local_cb_t curcpu_cb) +{ + cpuset_t tmp_mask, mask; + struct hyperv_tlb_flush *flush; + int cpu, vcpu; + int max_gvas, gva_n; + uint64_t status = 0; + uint64_t cr3; + + /* + * Hyper-V doesn't handle the invalidating cache. Let system handle it. + */ + if (op == INVL_OP_CACHE) + return smp_targeted_tlb_shootdown_native(pmap, addr1, addr2, + curcpu_cb, op); + + flush = *DPCPU_PTR(hv_pcpu_mem); + if (flush == NULL) + return smp_targeted_tlb_shootdown_native(pmap, addr1, addr2, + curcpu_cb, op); + /* + * It is not necessary to signal other CPUs while booting or + * when in the debugger. + */ + if (__predict_false(kdb_active || KERNEL_PANICKED() || !smp_started)) + goto local_cb; + + KASSERT(curthread->td_pinned > 0, ("curthread not pinned")); + + /* + * Make a stable copy of the set of CPUs on which the pmap is active. + * See if we have to interrupt other CPUs. + */ + CPU_COPY(pmap_invalidate_cpu_mask(pmap), &tmp_mask); + CPU_COPY(pmap_invalidate_cpu_mask(pmap), &mask); + CPU_CLR(curcpu, &tmp_mask); + if (CPU_EMPTY(&tmp_mask)) + goto local_cb; + + /* + * Initiator must have interrupts enabled, which prevents + * non-invalidation IPIs that take smp_ipi_mtx spinlock, + * from deadlocking with us. On the other hand, preemption + * must be disabled to pin initiator to the instance of the + * pcpu pc_smp_tlb data and scoreboard line. + */ + KASSERT((read_rflags() & PSL_I) != 0, + ("hv_tlb_flush: interrupts disabled")); + critical_enter(); + flush->processor_mask = 0; + cr3 = pmap->pm_cr3; + + if (op == INVL_OP_TLB || op == INVL_OP_TLB_INVPCID || + op == INVL_OP_TLB_INVPCID_PTI || op == INVL_OP_TLB_PCID) { + flush->address_space = 0; + flush->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES; + } else { + + flush->address_space = cr3; + flush->address_space &= ~CR3_PCID_MASK; + flush->flags = 0; + } + if(CPU_CMP(&mask, &all_cpus) == 0) { + flush->flags |= HV_FLUSH_ALL_PROCESSORS; + } else { + if (CPU_FLS(&mask) < mp_ncpus && CPU_FLS(&mask) >= 64) + goto do_ex_hypercall; + + CPU_FOREACH_ISSET(cpu, &mask) { + vcpu = VMBUS_PCPU_GET(sc, vcpuid, cpu); + if (vcpu >= 64) + goto do_ex_hypercall; + + set_bit(vcpu, &flush->processor_mask); + } + if (!flush->processor_mask ) + goto native; + } + max_gvas = (PAGE_SIZE - sizeof(*flush)) / sizeof(flush->gva_list[0]); + if (addr2 == 0) { + flush->flags |= HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY; + status = hypercall_do_md(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE, + (uint64_t)flush, (uint64_t)NULL); + } else if ((addr2 && (addr2 -addr1)/HV_TLB_FLUSH_UNIT) > max_gvas) { + status = hypercall_do_md(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE, + (uint64_t)flush, (uint64_t)NULL); + } else { + gva_n = fill_gva_list(flush->gva_list, addr1, addr2); + + status = hv_do_rep_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST, + gva_n, 0, (uint64_t)flush, (uint64_t)NULL); + + } + if(status) + goto native; + sched_unpin(); + critical_exit(); + return; + +local_cb: + critical_enter(); + curcpu_cb(pmap, addr1, addr2); + sched_unpin(); + critical_exit(); + return; +do_ex_hypercall: + status = hv_flush_tlb_others_ex(pmap, addr1, addr2, mask, op, sc); + if (status) + goto native; + sched_unpin(); + critical_exit(); + return; +native: + sched_unpin(); + critical_exit(); + return smp_targeted_tlb_shootdown_native(pmap, addr1, + addr2, curcpu_cb, op); +} + +uint64_t +hv_flush_tlb_others_ex(pmap_t pmap, vm_offset_t addr1, vm_offset_t addr2, + const cpuset_t mask, enum invl_op_codes op, struct vmbus_softc *sc) +{ + int nr_bank = 0, max_gvas, gva_n; + struct hv_tlb_flush_ex *flush; + if(*DPCPU_PTR(hv_pcpu_mem) == NULL) + return EINVAL; + flush = *DPCPU_PTR(hv_pcpu_mem); + uint64_t status = 0; + uint64_t cr3; + + if (!(hyperv_recommends & HYPERV_X64_EX_PROCESSOR_MASKS_RECOMMENDED)) + return EINVAL; + + cr3 = pmap->pm_cr3; + if (op == INVL_OP_TLB) { + flush->address_space = 0; + flush->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES; + } else { + + flush->address_space = cr3; + flush->address_space &= ~CR3_PCID_MASK; + flush->flags = 0; + } + + flush->hv_vp_set.valid_bank_mask = 0; + + flush->hv_vp_set.format = HV_GENERIC_SET_SPARSE_4K; + nr_bank = hv_cpumask_to_vpset(&flush->hv_vp_set, &mask, sc); + if (nr_bank < 0) + return EINVAL; + + /* + * We can flush not more than max_gvas with one hypercall. Flush the + * whole address space if we were asked to do more. + */ + max_gvas = (PAGE_SIZE - sizeof(*flush) - nr_bank * + sizeof(flush->hv_vp_set.bank_contents[0])) / + sizeof(flush->hv_vp_set.bank_contents[0]); + + if (addr2 == 0) { + flush->flags |= HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY; + status = hv_do_rep_hypercall( + HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX, + 0, nr_bank, (uint64_t)flush, (uint64_t)NULL); + } else if (addr2 && + ((addr2 - addr1)/HV_TLB_FLUSH_UNIT) > max_gvas) { + status = hv_do_rep_hypercall( + HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX, + 0, nr_bank, (uint64_t)flush, (uint64_t)NULL); + } else { + gva_n = fill_gva_list(&flush->hv_vp_set.bank_contents[nr_bank], + addr1, addr2); + status = hv_do_rep_hypercall( + HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX, + gva_n, nr_bank, (uint64_t)flush, (uint64_t)NULL); + } + return status; +} diff --git a/sys/dev/hyperv/vmbus/hyperv_mmu.h b/sys/dev/hyperv/vmbus/hyperv_mmu.h new file mode 100644 index 000000000000..e62948d74181 --- /dev/null +++ b/sys/dev/hyperv/vmbus/hyperv_mmu.h @@ -0,0 +1,57 @@ +/*- + * Copyright (c) 2009-2012,2016-2024 Microsoft Corp. + * Copyright (c) 2012 NetApp Inc. + * Copyright (c) 2012 Citrix Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice unmodified, this list of conditions, and the following + * disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _HYPERV_MMU_H_ +#define _HYPERV_MMU_H_ + +#include "vmbus_var.h" + +#define HV_VCPUS_PER_SPARSE_BANK (64) +#define HV_MAX_SPARSE_VCPU_BANKS (64) + + +struct hyperv_tlb_flush { + uint64_t address_space; + uint64_t flags; + uint64_t processor_mask; + uint64_t gva_list[]; +}__packed; + +struct hv_vpset { + uint64_t format; + uint64_t valid_bank_mask; + uint64_t bank_contents[]; +} __packed; + +struct hv_tlb_flush_ex { + uint64_t address_space; + uint64_t flags; + struct hv_vpset hv_vp_set; +} __packed; + +#endif diff --git a/sys/dev/hyperv/vmbus/hyperv_var.h b/sys/dev/hyperv/vmbus/hyperv_var.h index 67f6cc4ef706..62cce9026ab0 100644 --- a/sys/dev/hyperv/vmbus/hyperv_var.h +++ b/sys/dev/hyperv/vmbus/hyperv_var.h @@ -33,7 +33,18 @@ struct hypercall_ctx { void *hc_addr; vm_paddr_t hc_paddr; }; + uint64_t hypercall_post_message(bus_addr_t msg_paddr); uint64_t hypercall_signal_event(bus_addr_t monprm_paddr); +uint64_t hypercall_do_md(uint64_t input, uint64_t in_addr, + uint64_t out_addr); +struct hv_vpset; +struct vmbus_softc; +uint64_t +hv_do_rep_hypercall(uint16_t code, uint16_t rep_count, uint16_t varhead_size, + uint64_t input, uint64_t output); +int +hv_cpumask_to_vpset(struct hv_vpset *vpset, const cpuset_t *cpus, + struct vmbus_softc *sc); #endif /* !_HYPERV_VAR_H_ */ diff --git a/sys/dev/hyperv/vmbus/vmbus.c b/sys/dev/hyperv/vmbus/vmbus.c index 3cc210a5003c..a3daed05c21a 100644 --- a/sys/dev/hyperv/vmbus/vmbus.c +++ b/sys/dev/hyperv/vmbus/vmbus.c @@ -139,6 +139,8 @@ static void vmbus_event_proc_dummy(struct vmbus_softc *, int); static bus_dma_tag_t vmbus_get_dma_tag(device_t parent, device_t child); static struct vmbus_softc *vmbus_sc; +static void free_pcpu_ptr(void); +static void alloc_pcpu_ptr(void); SYSCTL_NODE(_hw, OID_AUTO, vmbus, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Hyper-V vmbus"); @@ -208,6 +210,9 @@ static driver_t vmbus_driver = { sizeof(struct vmbus_softc) }; +uint32_t hv_max_vp_index; +DPCPU_DEFINE(void *, hv_pcpu_mem); + DRIVER_MODULE(vmbus, pcib, vmbus_driver, NULL, NULL); DRIVER_MODULE(vmbus, acpi_syscontainer, vmbus_driver, NULL, NULL); @@ -739,6 +744,7 @@ vmbus_synic_setup(void *xsc) int cpu = curcpu; uint64_t val, orig; uint32_t sint; + void **hv_cpu_mem; if (hyperv_features & CPUID_HV_MSR_VP_INDEX) { /* Save virtual processor id. */ @@ -748,6 +754,11 @@ vmbus_synic_setup(void *xsc) VMBUS_PCPU_GET(sc, vcpuid, cpu) = 0; } + if (VMBUS_PCPU_GET(sc, vcpuid, cpu) > hv_max_vp_index) + hv_max_vp_index = VMBUS_PCPU_GET(sc, vcpuid, cpu); + hv_cpu_mem = DPCPU_ID_PTR(cpu, hv_pcpu_mem); + *hv_cpu_mem = contigmalloc(PAGE_SIZE, M_DEVBUF, M_WAITOK | M_ZERO, + 0ul, ~0ul, PAGE_SIZE, 0); /* * Setup the SynIC message. */ @@ -786,6 +797,16 @@ vmbus_synic_setup(void *xsc) WRMSR(MSR_HV_SCONTROL, val); } +#if defined(__x86_64__) +void +hyperv_vm_tlb_flush(pmap_t pmap, vm_offset_t addr1, vm_offset_t addr2, + smp_invl_local_cb_t curcpu_cb, enum invl_op_codes op) +{ + struct vmbus_softc *sc = vmbus_get_softc(); + return hv_vm_tlb_flush(pmap, addr1, addr2, op, sc, curcpu_cb); +} +#endif /*__x86_64__*/ + static void vmbus_synic_teardown(void *arg) { @@ -820,6 +841,7 @@ vmbus_synic_teardown(void *arg) */ orig = RDMSR(MSR_HV_SIEFP); WRMSR(MSR_HV_SIEFP, (orig & MSR_HV_SIEFP_RSVD_MASK)); + free_pcpu_ptr(); } static int @@ -1373,6 +1395,16 @@ vmbus_probe(device_t dev) return (BUS_PROBE_DEFAULT); } + +static void free_pcpu_ptr(void) +{ + int cpu = curcpu; + void **hv_cpu_mem; + hv_cpu_mem = DPCPU_ID_PTR(cpu, hv_pcpu_mem); + if(*hv_cpu_mem) + contigfree(*hv_cpu_mem, PAGE_SIZE, M_DEVBUF); +} + /** * @brief Main vmbus driver initialization routine. * @@ -1470,6 +1502,10 @@ vmbus_doattach(struct vmbus_softc *sc) smp_rendezvous(NULL, vmbus_synic_setup, NULL, sc); sc->vmbus_flags |= VMBUS_FLAG_SYNIC; +#if defined(__x86_64__) + smp_targeted_tlb_shootdown = &hyperv_vm_tlb_flush; +#endif + /* * Initialize vmbus, e.g. connect to Hypervisor. */ diff --git a/sys/dev/hyperv/vmbus/vmbus_var.h b/sys/dev/hyperv/vmbus/vmbus_var.h index 023d27c52cea..4f0668476716 100644 --- a/sys/dev/hyperv/vmbus/vmbus_var.h +++ b/sys/dev/hyperv/vmbus/vmbus_var.h @@ -32,6 +32,11 @@ #include <sys/taskqueue.h> #include <sys/rman.h> +#include <vm/vm.h> +#include <vm/vm_extern.h> +#include <vm/vm_param.h> +#include <vm/pmap.h> + #include <dev/pci/pcivar.h> #include <dev/pci/pcib_private.h> @@ -137,6 +142,40 @@ struct vmbus_softc { #define VMBUS_PCPU_GET(sc, field, cpu) (sc)->vmbus_pcpu[(cpu)].field #define VMBUS_PCPU_PTR(sc, field, cpu) &(sc)->vmbus_pcpu[(cpu)].field +#define HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE 0x0002 +#define HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX 0x0013 +#define HV_FLUSH_ALL_PROCESSORS BIT(0) +#define HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES BIT(1) +#define HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY BIT(2) +#define HV_TLB_FLUSH_UNIT (4096 * PAGE_SIZE) + + +#define BIT(n) (1ULL << (n)) +#define BITS_PER_LONG (sizeof(long) * NBBY) +#define BIT_MASK(nr) (1UL << ((nr) & (BITS_PER_LONG - 1))) +#define BIT_WORD(nr) ((nr) / BITS_PER_LONG) +#define set_bit(i, a) \ + atomic_set_long(&((volatile unsigned long *)(a))[BIT_WORD(i)], BIT_MASK(i)) + +#define GENMASK_ULL(h, l) (((~0ULL) >> (64 - (h) - 1)) & ((~0ULL) << (l))) + +#define HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST 0x0003 +#define HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX 0x0014 +#define HYPERV_X64_EX_PROCESSOR_MASKS_RECOMMENDED BIT(11) +#define HV_HYPERCALL_RESULT_MASK GENMASK_ULL(15, 0) +#define HV_STATUS_SUCCESS 0 +#define HV_HYPERCALL_REP_COMP_MASK GENMASK_ULL(43, 32) +#define HV_HYPERCALL_REP_COMP_OFFSET 32 + +#define HV_HYPERCALL_VARHEAD_OFFSET 17 + +#define HV_HYPERCALL_REP_START_MASK GENMASK_ULL(59, 48) +#define HV_HYPERCALL_REP_START_OFFSET 48 + +enum HV_GENERIC_SET_FORMAT { + HV_GENERIC_SET_SPARSE_4K, + HV_GENERIC_SET_ALL, +}; struct vmbus_channel; struct trapframe; @@ -176,4 +215,19 @@ void vmbus_synic_setup1(void *xsc); void vmbus_synic_teardown1(void); int vmbus_setup_intr1(struct vmbus_softc *sc); void vmbus_intr_teardown1(struct vmbus_softc *sc); + +DPCPU_DECLARE(void *, hv_pcpu_mem); + +extern uint32_t hv_max_vp_index; + + +#if defined(__x86_64__) +void hyperv_vm_tlb_flush(pmap_t, vm_offset_t, + vm_offset_t, smp_invl_local_cb_t, enum invl_op_codes); +uint64_t hv_flush_tlb_others_ex(pmap_t, vm_offset_t, vm_offset_t, + cpuset_t, enum invl_op_codes, struct vmbus_softc *); +void hv_vm_tlb_flush(pmap_t, vm_offset_t, vm_offset_t, + enum invl_op_codes, struct vmbus_softc *, + smp_invl_local_cb_t); +#endif /* __x86_64__ */ #endif /* !_VMBUS_VAR_H_ */ diff --git a/sys/modules/hyperv/vmbus/Makefile b/sys/modules/hyperv/vmbus/Makefile index 1659d5186493..9863f4db46ca 100644 --- a/sys/modules/hyperv/vmbus/Makefile +++ b/sys/modules/hyperv/vmbus/Makefile @@ -14,8 +14,8 @@ SRCS= hyperv.c \ vmbus_res.c \ vmbus_xact.c -.if ${MACHINE_CPUARCH} != "i386" && ${MACHINE_CPUARCH} != "aarch64" -SRCS+= vmbus_vector.S +.if ${MACHINE_CPUARCH} == "amd64" +SRCS+= vmbus_vector.S hyperv_mmu.c .endif .if ${MACHINE_CPUARCH} != "aarch64" SRCS+= vmbus_et.c hyperv_x86.c vmbus_x86.c