git: fb32ba6aa44d - main - amd64/arm64: Eliminate unnecessary demotions in pmap_protect()

From: Alan Cox <alc_at_FreeBSD.org>
Date: Sat, 06 Jul 2024 20:49:34 UTC
The branch main has been updated by alc:

URL: https://cgit.FreeBSD.org/src/commit/?id=fb32ba6aa44dc86e70ad06b44f93a9709e78f3d1

commit fb32ba6aa44dc86e70ad06b44f93a9709e78f3d1
Author:     Alan Cox <alc@FreeBSD.org>
AuthorDate: 2024-07-05 18:20:01 +0000
Commit:     Alan Cox <alc@FreeBSD.org>
CommitDate: 2024-07-06 20:48:10 +0000

    amd64/arm64: Eliminate unnecessary demotions in pmap_protect()
    
    In pmap_protect(), when the mapping isn't changing, we don't need to
    perform a superpage demotion, even though the requested change doesn't
    cover the entire superpage.
    
    Reviewed by:    kib
    MFC after:      1 week
    Differential Revision:  https://reviews.freebsd.org/D45886
---
 sys/amd64/amd64/pmap.c | 21 +++++++++++++++++----
 sys/arm64/arm64/pmap.c |  3 ++-
 2 files changed, 19 insertions(+), 5 deletions(-)

diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index 57943e815b5b..2bcf671be243 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -6796,8 +6796,7 @@ retry_pdpe:
 		 */
 		if ((ptpaddr & PG_PS) != 0) {
 			/*
-			 * Are we protecting the entire large page?  If not,
-			 * demote the mapping and fall through.
+			 * Are we protecting the entire large page?
 			 */
 			if (sva + NBPDR == va_next && eva >= va_next) {
 				/*
@@ -6807,9 +6806,23 @@ retry_pdpe:
 				if (pmap_protect_pde(pmap, pde, sva, prot))
 					anychanged = true;
 				continue;
-			} else if (!pmap_demote_pde(pmap, pde, sva)) {
+			}
+
+			/*
+			 * Does the large page mapping need to change?  If so,
+			 * demote it and fall through.
+			 */
+			pbits = ptpaddr;
+			if ((prot & VM_PROT_WRITE) == 0)
+				pbits &= ~(PG_RW | PG_M);
+			if ((prot & VM_PROT_EXECUTE) == 0)
+				pbits |= pg_nx;
+			if (ptpaddr == pbits || !pmap_demote_pde(pmap, pde,
+			    sva)) {
 				/*
-				 * The large page mapping was destroyed.
+				 * Either the large page mapping doesn't need
+				 * to change, or it was destroyed during
+				 * demotion.
 				 */
 				continue;
 			}
diff --git a/sys/arm64/arm64/pmap.c b/sys/arm64/arm64/pmap.c
index a9cb8c7fe468..29552f722aa4 100644
--- a/sys/arm64/arm64/pmap.c
+++ b/sys/arm64/arm64/pmap.c
@@ -4373,7 +4373,8 @@ pmap_mask_set_locked(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, pt_entry_t m
 			if (sva + L2_SIZE == va_next && eva >= va_next) {
 				pmap_protect_l2(pmap, l2, sva, mask, nbits);
 				continue;
-			} else if (pmap_demote_l2(pmap, l2, sva) == NULL)
+			} else if ((pmap_load(l2) & mask) == nbits ||
+			    pmap_demote_l2(pmap, l2, sva) == NULL)
 				continue;
 		}
 		KASSERT((pmap_load(l2) & ATTR_DESCR_MASK) == L2_TABLE,