git: 36ca4b79b848 - stable/13 - amd64: Define KVA regions for KMSAN shadow maps
- Go to: [ bottom of page ] [ top of archives ] [ this month ]
Date: Wed, 03 Nov 2021 13:15:33 UTC
The branch stable/13 has been updated by markj: URL: https://cgit.FreeBSD.org/src/commit/?id=36ca4b79b84870cd7da2fe59a359d2c299b54032 commit 36ca4b79b84870cd7da2fe59a359d2c299b54032 Author: Mark Johnston <markj@FreeBSD.org> AuthorDate: 2021-08-10 20:25:39 +0000 Commit: Mark Johnston <markj@FreeBSD.org> CommitDate: 2021-11-02 22:17:58 +0000 amd64: Define KVA regions for KMSAN shadow maps KMSAN requires two shadow maps, each one-to-one with the kernel map. Allocate regions of the kernels PML4 page for them. Add functions to create mappings in the shadow map regions, these will be used by the KMSAN runtime. Reviewed by: alc, kib Sponsored by: The FreeBSD Foundation (cherry picked from commit f95f780ea4e163ce9a0295a699f41f0a7e1591d4) --- sys/amd64/amd64/pmap.c | 78 +++++++++++++++++++++++++++++++++++++++++++++ sys/amd64/include/pmap.h | 13 ++++++++ sys/amd64/include/vmparam.h | 13 ++++++-- 3 files changed, 102 insertions(+), 2 deletions(-) diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c index 353ee18a862a..d2a168ca7f70 100644 --- a/sys/amd64/amd64/pmap.c +++ b/sys/amd64/amd64/pmap.c @@ -11358,6 +11358,76 @@ pmap_kasan_enter(vm_offset_t va) } #endif +#ifdef KMSAN +static vm_page_t +pmap_kmsan_enter_alloc_4k(void) +{ + vm_page_t m; + + m = vm_page_alloc(NULL, 0, VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | + VM_ALLOC_WIRED | VM_ALLOC_ZERO); + if (m == NULL) + panic("%s: no memory to grow shadow map", __func__); + if ((m->flags & PG_ZERO) == 0) + pmap_zero_page(m); + return (m); +} + +static vm_page_t +pmap_kmsan_enter_alloc_2m(void) +{ + vm_page_t m; + + m = vm_page_alloc_contig(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | + VM_ALLOC_WIRED, NPTEPG, 0, ~0ul, NBPDR, 0, VM_MEMATTR_DEFAULT); + if (m != NULL) + memset((void *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)), 0, NBPDR); + return (m); +} + +/* + * Grow the shadow or origin maps by at least one 4KB page at the specified + * address. Use 2MB pages when possible. + */ +void +pmap_kmsan_enter(vm_offset_t va) +{ + pdp_entry_t *pdpe; + pd_entry_t *pde; + pt_entry_t *pte; + vm_page_t m; + + mtx_assert(&kernel_map->system_mtx, MA_OWNED); + + pdpe = pmap_pdpe(kernel_pmap, va); + if ((*pdpe & X86_PG_V) == 0) { + m = pmap_kmsan_enter_alloc_4k(); + *pdpe = (pdp_entry_t)(VM_PAGE_TO_PHYS(m) | X86_PG_RW | + X86_PG_V | pg_nx); + } + pde = pmap_pdpe_to_pde(pdpe, va); + if ((*pde & X86_PG_V) == 0) { + m = pmap_kmsan_enter_alloc_2m(); + if (m != NULL) { + *pde = (pd_entry_t)(VM_PAGE_TO_PHYS(m) | X86_PG_RW | + X86_PG_PS | X86_PG_V | X86_PG_A | X86_PG_M | pg_nx); + } else { + m = pmap_kmsan_enter_alloc_4k(); + *pde = (pd_entry_t)(VM_PAGE_TO_PHYS(m) | X86_PG_RW | + X86_PG_V | pg_nx); + } + } + if ((*pde & X86_PG_PS) != 0) + return; + pte = pmap_pde_to_pte(pde, va); + if ((*pte & X86_PG_V) != 0) + return; + m = pmap_kmsan_enter_alloc_4k(); + *pte = (pt_entry_t)(VM_PAGE_TO_PHYS(m) | X86_PG_RW | X86_PG_V | + X86_PG_M | X86_PG_A | pg_nx); +} +#endif + /* * Track a range of the kernel's virtual address space that is contiguous * in various mapping attributes. @@ -11539,6 +11609,14 @@ sysctl_kmaps(SYSCTL_HANDLER_ARGS) case KASANPML4I: sbuf_printf(sb, "\nKASAN shadow map:\n"); break; +#endif +#ifdef KMSAN + case KMSANSHADPML4I: + sbuf_printf(sb, "\nKMSAN shadow map:\n"); + break; + case KMSANORIGPML4I: + sbuf_printf(sb, "\nKMSAN origin map:\n"); + break; #endif case KPML4BASE: sbuf_printf(sb, "\nKernel map:\n"); diff --git a/sys/amd64/include/pmap.h b/sys/amd64/include/pmap.h index c5c1714f2f94..bd6a8c006813 100644 --- a/sys/amd64/include/pmap.h +++ b/sys/amd64/include/pmap.h @@ -201,6 +201,13 @@ */ #define NKASANPML4E ((NKPML4E + 7) / 8) +/* + * Number of PML4 slots for the KMSAN shadow and origin maps. These are + * one-to-one with the kernel map. + */ +#define NKMSANSHADPML4E NKPML4E +#define NKMSANORIGPML4E NKPML4E + /* * We use the same numbering of the page table pages for 5-level and * 4-level paging structures. @@ -251,6 +258,9 @@ #define KASANPML4I (DMPML4I - NKASANPML4E) /* Below the direct map */ +#define KMSANSHADPML4I (KPML4BASE - NKMSANSHADPML4E) +#define KMSANORIGPML4I (DMPML4I - NKMSANORIGPML4E) + /* Large map: index of the first and max last pml4 entry */ #define LMSPML4I (PML4PML4I + 1) #define LMEPML4I (KASANPML4I - 1) @@ -521,6 +531,9 @@ vm_page_t pmap_page_alloc_below_4g(bool zeroed); #ifdef KASAN void pmap_kasan_enter(vm_offset_t); #endif +#ifdef KMSAN +void pmap_kmsan_enter(vm_offset_t); +#endif #endif /* _KERNEL */ diff --git a/sys/amd64/include/vmparam.h b/sys/amd64/include/vmparam.h index 88fd29b80be3..61d0dea54210 100644 --- a/sys/amd64/include/vmparam.h +++ b/sys/amd64/include/vmparam.h @@ -170,9 +170,10 @@ * 0xffff804020100fff - 0xffff807fffffffff unused * 0xffff808000000000 - 0xffff847fffffffff large map (can be tuned up) * 0xffff848000000000 - 0xfffff77fffffffff unused (large map extends there) - * 0xfffff78000000000 - 0xfffff7ffffffffff 512GB KASAN shadow map + * 0xfffff60000000000 - 0xfffff7ffffffffff 2TB KMSAN origin map, optional + * 0xfffff78000000000 - 0xfffff7bfffffffff 512GB KASAN shadow map, optional * 0xfffff80000000000 - 0xfffffbffffffffff 4TB direct map - * 0xfffffc0000000000 - 0xfffffdffffffffff unused + * 0xfffffc0000000000 - 0xfffffdffffffffff 2TB KMSAN shadow map, optional * 0xfffffe0000000000 - 0xffffffffffffffff 2TB kernel map * * Within the kernel map: @@ -191,6 +192,14 @@ #define KASAN_MIN_ADDRESS KV4ADDR(KASANPML4I, 0, 0, 0) #define KASAN_MAX_ADDRESS KV4ADDR(KASANPML4I + NKASANPML4E, 0, 0, 0) +#define KMSAN_SHAD_MIN_ADDRESS KV4ADDR(KMSANSHADPML4I, 0, 0, 0) +#define KMSAN_SHAD_MAX_ADDRESS KV4ADDR(KMSANSHADPML4I + NKMSANSHADPML4E, \ + 0, 0, 0) + +#define KMSAN_ORIG_MIN_ADDRESS KV4ADDR(KMSANORIGPML4I, 0, 0, 0) +#define KMSAN_ORIG_MAX_ADDRESS KV4ADDR(KMSANORIGPML4I + NKMSANORIGPML4E, \ + 0, 0, 0) + #define LARGEMAP_MIN_ADDRESS KV4ADDR(LMSPML4I, 0, 0, 0) #define LARGEMAP_MAX_ADDRESS KV4ADDR(LMEPML4I + 1, 0, 0, 0)