git: 59f192c507c4 - main - riscv: Add various pmap definitions needed to support SV48 mode
- Go to: [ bottom of page ] [ top of archives ] [ this month ]
Date: Tue, 01 Mar 2022 14:40:00 UTC
The branch main has been updated by markj: URL: https://cgit.FreeBSD.org/src/commit/?id=59f192c507c49990b603f774bbb824fab71ee4c3 commit 59f192c507c49990b603f774bbb824fab71ee4c3 Author: Mark Johnston <markj@FreeBSD.org> AuthorDate: 2022-03-01 14:04:01 +0000 Commit: Mark Johnston <markj@FreeBSD.org> CommitDate: 2022-03-01 14:39:43 +0000 riscv: Add various pmap definitions needed to support SV48 mode No functional change intended. Reviewed by: jhb MFC after: 1 week Sponsored by: The FreeBSD Foundation Differential Revision: https://reviews.freebsd.org/D34272 --- sys/riscv/include/pmap.h | 7 +++++++ sys/riscv/include/pte.h | 10 ++++++---- sys/riscv/include/vmparam.h | 12 ++++++++---- sys/riscv/riscv/pmap.c | 18 +++++++++++++++++- 4 files changed, 38 insertions(+), 9 deletions(-) diff --git a/sys/riscv/include/pmap.h b/sys/riscv/include/pmap.h index 40f99befade1..8ba46f0d61ae 100644 --- a/sys/riscv/include/pmap.h +++ b/sys/riscv/include/pmap.h @@ -137,6 +137,13 @@ extern vm_offset_t virtual_end; #define L1_MAPPABLE_P(va, pa, size) \ ((((va) | (pa)) & L1_OFFSET) == 0 && (size) >= L1_SIZE) +enum pmap_mode { + PMAP_MODE_SV39, + PMAP_MODE_SV48, +}; + +extern enum pmap_mode pmap_mode; + struct thread; #define pmap_vm_page_alloc_check(m) diff --git a/sys/riscv/include/pte.h b/sys/riscv/include/pte.h index 965faa7eea9e..f066f821cf2a 100644 --- a/sys/riscv/include/pte.h +++ b/sys/riscv/include/pte.h @@ -44,22 +44,24 @@ typedef uint64_t pt_entry_t; /* page table entry */ typedef uint64_t pn_t; /* page number */ #endif -/* Level 0 table, 512GiB per entry */ +/* Level 0 table, 512GiB per entry, SV48 only */ #define L0_SHIFT 39 +#define L0_SIZE (1UL << L0_SHIFT) +#define L0_OFFSET (L0_SIZE - 1) /* Level 1 table, 1GiB per entry */ #define L1_SHIFT 30 -#define L1_SIZE (1 << L1_SHIFT) +#define L1_SIZE (1UL << L1_SHIFT) #define L1_OFFSET (L1_SIZE - 1) /* Level 2 table, 2MiB per entry */ #define L2_SHIFT 21 -#define L2_SIZE (1 << L2_SHIFT) +#define L2_SIZE (1UL << L2_SHIFT) #define L2_OFFSET (L2_SIZE - 1) /* Level 3 table, 4KiB per entry */ #define L3_SHIFT 12 -#define L3_SIZE (1 << L3_SHIFT) +#define L3_SIZE (1UL << L3_SHIFT) #define L3_OFFSET (L3_SIZE - 1) #define Ln_ENTRIES_SHIFT 9 diff --git a/sys/riscv/include/vmparam.h b/sys/riscv/include/vmparam.h index 4ed95def2caa..d65664eff94e 100644 --- a/sys/riscv/include/vmparam.h +++ b/sys/riscv/include/vmparam.h @@ -180,8 +180,10 @@ ((va) - DMAP_MIN_ADDRESS) + dmap_phys_base; \ }) -#define VM_MIN_USER_ADDRESS (0x0000000000000000UL) -#define VM_MAX_USER_ADDRESS (0x0000004000000000UL) +#define VM_MIN_USER_ADDRESS_SV39 (0x0000000000000000UL) +#define VM_MAX_USER_ADDRESS_SV39 (0x0000004000000000UL) +#define VM_MIN_USER_ADDRESS VM_MIN_USER_ADDRESS_SV39 +#define VM_MAX_USER_ADDRESS VM_MAX_USER_ADDRESS_SV39 #define VM_MINUSER_ADDRESS (VM_MIN_USER_ADDRESS) #define VM_MAXUSER_ADDRESS (VM_MAX_USER_ADDRESS) @@ -191,8 +193,10 @@ (((va) < VM_MAX_USER_ADDRESS) || ((va) >= VM_MIN_KERNEL_ADDRESS)) #define KERNBASE (VM_MIN_KERNEL_ADDRESS) -#define SHAREDPAGE (VM_MAXUSER_ADDRESS - PAGE_SIZE) -#define USRSTACK SHAREDPAGE +#define SHAREDPAGE_SV39 (VM_MAX_USER_ADDRESS_SV39 - PAGE_SIZE) +#define SHAREDPAGE SHAREDPAGE_SV39 +#define USRSTACK SHAREDPAGE_SV39 +#define PS_STRINGS_SV39 (USRSTACK_SV39 - sizeof(struct ps_strings)) #define VM_EARLY_DTB_ADDRESS (VM_MAX_KERNEL_ADDRESS - (2 * L2_SIZE)) diff --git a/sys/riscv/riscv/pmap.c b/sys/riscv/riscv/pmap.c index d26d6113ee36..1d46e9e817c1 100644 --- a/sys/riscv/riscv/pmap.c +++ b/sys/riscv/riscv/pmap.c @@ -160,7 +160,19 @@ __FBSDID("$FreeBSD$"); #include <machine/pcb.h> #include <machine/sbi.h> -#define NUL1E (Ln_ENTRIES * Ln_ENTRIES) +/* + * Boundary values for the page table page index space: + * + * L3 pages: [0, NUL2E) + * L2 pages: [NUL2E, NUL2E + NUL1E) + * L1 pages: [NUL2E + NUL1E, NUL2E + NUL1E + NUL0E) + * + * Note that these ranges are used in both SV39 and SV48 mode. In SV39 mode the + * ranges are not fully populated since there are at most Ln_ENTRIES^2 L3 pages + * in a set of page tables. + */ +#define NUL0E Ln_ENTRIES +#define NUL1E (Ln_ENTRIES * NUL0E) #define NUL2E (Ln_ENTRIES * NUL1E) #if !defined(DIAGNOSTIC) @@ -179,6 +191,7 @@ __FBSDID("$FreeBSD$"); #define PV_STAT(x) do { } while (0) #endif +#define pmap_l1_pindex(v) (NUL2E + ((v) >> L1_SHIFT)) #define pmap_l2_pindex(v) ((v) >> L2_SHIFT) #define pa_to_pvh(pa) (&pv_table[pa_index(pa)]) @@ -219,6 +232,8 @@ __FBSDID("$FreeBSD$"); LIST_HEAD(pmaplist, pmap); static struct pmaplist allpmaps = LIST_HEAD_INITIALIZER(); +enum pmap_mode __read_frequently pmap_mode = PMAP_MODE_SV39; + struct pmap kernel_pmap_store; vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */ @@ -338,6 +353,7 @@ pagezero(void *p) bzero(p, PAGE_SIZE); } +#define pmap_l0_index(va) (((va) >> L0_SHIFT) & Ln_ADDR_MASK) #define pmap_l1_index(va) (((va) >> L1_SHIFT) & Ln_ADDR_MASK) #define pmap_l2_index(va) (((va) >> L2_SHIFT) & Ln_ADDR_MASK) #define pmap_l3_index(va) (((va) >> L3_SHIFT) & Ln_ADDR_MASK)