svn commit: r315285 - in stable/11/sys: kern sys vm
Mateusz Guzik
mjg at FreeBSD.org
Tue Mar 14 20:43:06 UTC 2017
Author: mjg
Date: Tue Mar 14 20:43:04 2017
New Revision: 315285
URL: https://svnweb.freebsd.org/changeset/base/315285
Log:
MFC r312724,r312901,r312902:
hwpmc: partially depessimize munmap handling if the module is not loaded
HWPMC_HOOKS is enabled in GENERIC and triggers some work avoidable in the
common (module not loaded) case.
In particular this avoids permission checks + lock downgrade
singlethreaded and in cases were an executable mapping is found the pmc
sx lock is no longer bounced.
Note this is a band aid.
==
hwpmc: partially depessimize mmap handling if the module is not loaded
In particular this means the pmc sx lock is no longer taken when an
executable mapping succeeds.
==
hwpmc: annotate pmc_hook and pmc_intr as __read_mostly
Modified:
stable/11/sys/kern/kern_pmc.c
stable/11/sys/kern/vfs_vnops.c
stable/11/sys/sys/pmckern.h
stable/11/sys/vm/vm_mmap.c
Directory Properties:
stable/11/ (props changed)
Modified: stable/11/sys/kern/kern_pmc.c
==============================================================================
--- stable/11/sys/kern/kern_pmc.c Tue Mar 14 20:39:06 2017 (r315284)
+++ stable/11/sys/kern/kern_pmc.c Tue Mar 14 20:43:04 2017 (r315285)
@@ -59,10 +59,10 @@ MALLOC_DEFINE(M_PMCHOOKS, "pmchooks", "M
const int pmc_kernel_version = PMC_KERNEL_VERSION;
/* Hook variable. */
-int (*pmc_hook)(struct thread *td, int function, void *arg) = NULL;
+int __read_mostly (*pmc_hook)(struct thread *td, int function, void *arg) = NULL;
/* Interrupt handler */
-int (*pmc_intr)(int cpu, struct trapframe *tf) = NULL;
+int __read_mostly (*pmc_intr)(int cpu, struct trapframe *tf) = NULL;
/* Bitmask of CPUs requiring servicing at hardclock time */
volatile cpuset_t pmc_cpumask;
Modified: stable/11/sys/kern/vfs_vnops.c
==============================================================================
--- stable/11/sys/kern/vfs_vnops.c Tue Mar 14 20:39:06 2017 (r315284)
+++ stable/11/sys/kern/vfs_vnops.c Tue Mar 14 20:43:04 2017 (r315285)
@@ -2479,10 +2479,12 @@ vn_mmap(struct file *fp, vm_map_t map, v
}
#ifdef HWPMC_HOOKS
/* Inform hwpmc(4) if an executable is being mapped. */
- if (error == 0 && (prot & VM_PROT_EXECUTE) != 0) {
- pkm.pm_file = vp;
- pkm.pm_address = (uintptr_t) *addr;
- PMC_CALL_HOOK(td, PMC_FN_MMAP, (void *) &pkm);
+ if (PMC_HOOK_INSTALLED(PMC_FN_MMAP)) {
+ if ((prot & VM_PROT_EXECUTE) != 0 && error == 0) {
+ pkm.pm_file = vp;
+ pkm.pm_address = (uintptr_t) *addr;
+ PMC_CALL_HOOK(td, PMC_FN_MMAP, (void *) &pkm);
+ }
}
#endif
return (error);
Modified: stable/11/sys/sys/pmckern.h
==============================================================================
--- stable/11/sys/sys/pmckern.h Tue Mar 14 20:39:06 2017 (r315284)
+++ stable/11/sys/sys/pmckern.h Tue Mar 14 20:43:04 2017 (r315285)
@@ -174,6 +174,9 @@ extern const int pmc_kernel_version;
/* PMC soft per cpu trapframe */
extern struct trapframe pmc_tf[MAXCPU];
+/* Quick check if preparatory work is necessary */
+#define PMC_HOOK_INSTALLED(cmd) __predict_false(pmc_hook != NULL)
+
/* Hook invocation; for use within the kernel */
#define PMC_CALL_HOOK(t, cmd, arg) \
do { \
Modified: stable/11/sys/vm/vm_mmap.c
==============================================================================
--- stable/11/sys/vm/vm_mmap.c Tue Mar 14 20:39:06 2017 (r315284)
+++ stable/11/sys/vm/vm_mmap.c Tue Mar 14 20:43:04 2017 (r315285)
@@ -500,6 +500,7 @@ kern_munmap(struct thread *td, uintptr_t
#ifdef HWPMC_HOOKS
struct pmckern_map_out pkm;
vm_map_entry_t entry;
+ bool pmc_handled;
#endif
vm_offset_t addr;
vm_size_t pageoff;
@@ -524,20 +525,24 @@ kern_munmap(struct thread *td, uintptr_t
return (EINVAL);
vm_map_lock(map);
#ifdef HWPMC_HOOKS
- /*
- * Inform hwpmc if the address range being unmapped contains
- * an executable region.
- */
- pkm.pm_address = (uintptr_t) NULL;
- if (vm_map_lookup_entry(map, addr, &entry)) {
- for (;
- entry != &map->header && entry->start < addr + size;
- entry = entry->next) {
- if (vm_map_check_protection(map, entry->start,
- entry->end, VM_PROT_EXECUTE) == TRUE) {
- pkm.pm_address = (uintptr_t) addr;
- pkm.pm_size = (size_t) size;
- break;
+ pmc_handled = false;
+ if (PMC_HOOK_INSTALLED(PMC_FN_MUNMAP)) {
+ pmc_handled = true;
+ /*
+ * Inform hwpmc if the address range being unmapped contains
+ * an executable region.
+ */
+ pkm.pm_address = (uintptr_t) NULL;
+ if (vm_map_lookup_entry(map, addr, &entry)) {
+ for (;
+ entry != &map->header && entry->start < addr + size;
+ entry = entry->next) {
+ if (vm_map_check_protection(map, entry->start,
+ entry->end, VM_PROT_EXECUTE) == TRUE) {
+ pkm.pm_address = (uintptr_t) addr;
+ pkm.pm_size = (size_t) size;
+ break;
+ }
}
}
}
@@ -545,14 +550,16 @@ kern_munmap(struct thread *td, uintptr_t
vm_map_delete(map, addr, addr + size);
#ifdef HWPMC_HOOKS
- /* downgrade the lock to prevent a LOR with the pmc-sx lock */
- vm_map_lock_downgrade(map);
- if (pkm.pm_address != (uintptr_t) NULL)
- PMC_CALL_HOOK(td, PMC_FN_MUNMAP, (void *) &pkm);
- vm_map_unlock_read(map);
-#else
- vm_map_unlock(map);
+ if (__predict_false(pmc_handled)) {
+ /* downgrade the lock to prevent a LOR with the pmc-sx lock */
+ vm_map_lock_downgrade(map);
+ if (pkm.pm_address != (uintptr_t) NULL)
+ PMC_CALL_HOOK(td, PMC_FN_MUNMAP, (void *) &pkm);
+ vm_map_unlock_read(map);
+ } else
#endif
+ vm_map_unlock(map);
+
/* vm_map_delete returns nothing but KERN_SUCCESS anyway */
return (0);
}
More information about the svn-src-stable
mailing list