svn commit: r308733 - head/sys/vm
Konstantin Belousov
kib at FreeBSD.org
Wed Nov 16 16:34:18 UTC 2016
Author: kib
Date: Wed Nov 16 16:34:17 2016
New Revision: 308733
URL: https://svnweb.freebsd.org/changeset/base/308733
Log:
Move the fast fault path into the separate function.
Reviewed by: alc
Sponsored by: The FreeBSD Foundation
MFC after: 1 week
Modified:
head/sys/vm/vm_fault.c
Modified: head/sys/vm/vm_fault.c
==============================================================================
--- head/sys/vm/vm_fault.c Wed Nov 16 16:14:01 2016 (r308732)
+++ head/sys/vm/vm_fault.c Wed Nov 16 16:34:17 2016 (r308733)
@@ -246,6 +246,48 @@ vm_fault_dirty(vm_map_entry_t entry, vm_
vm_pager_page_unswapped(m);
}
+static void
+vm_fault_fill_hold(vm_page_t *m_hold, vm_page_t m)
+{
+
+ if (m_hold != NULL) {
+ *m_hold = m;
+ vm_page_lock(m);
+ vm_page_hold(m);
+ vm_page_unlock(m);
+ }
+}
+
+/*
+ * Unlocks fs.first_object and fs.map on success.
+ */
+static int
+vm_fault_soft_fast(struct faultstate *fs, vm_offset_t vaddr, vm_prot_t prot,
+ int fault_type, int fault_flags, boolean_t wired, vm_page_t *m_hold)
+{
+ vm_page_t m;
+ int rv;
+
+ MPASS(fs->vp == NULL);
+ m = vm_page_lookup(fs->first_object, fs->first_pindex);
+ /* A busy page can be mapped for read|execute access. */
+ if (m == NULL || ((prot & VM_PROT_WRITE) != 0 &&
+ vm_page_busied(m)) || m->valid != VM_PAGE_BITS_ALL)
+ return (KERN_FAILURE);
+ rv = pmap_enter(fs->map->pmap, vaddr, m, prot, fault_type |
+ PMAP_ENTER_NOSLEEP | (wired ? PMAP_ENTER_WIRED : 0), 0);
+ if (rv != KERN_SUCCESS)
+ return (rv);
+ vm_fault_fill_hold(m_hold, m);
+ vm_fault_dirty(fs->entry, m, prot, fault_type, fault_flags, false);
+ VM_OBJECT_RUNLOCK(fs->first_object);
+ if (!wired)
+ vm_fault_prefault(fs, vaddr, PFBAK, PFFOR);
+ vm_map_lookup_done(fs->map, fs->entry);
+ curthread->td_ru.ru_minflt++;
+ return (KERN_SUCCESS);
+}
+
/*
* vm_fault:
*
@@ -294,7 +336,6 @@ vm_fault_hold(vm_map_t map, vm_offset_t
struct vnode *vp;
vm_object_t next_object, retry_object;
vm_offset_t e_end, e_start;
- vm_page_t m;
vm_pindex_t retry_pindex;
vm_prot_t prot, retry_prot;
int ahead, alloc_req, behind, cluster_offset, error, era, faultcount;
@@ -376,36 +417,15 @@ RetryFault:;
(fs.first_object->flags & OBJ_TMPFS_NODE) == 0) ||
(fs.first_object->flags & OBJ_MIGHTBEDIRTY) != 0)) {
VM_OBJECT_RLOCK(fs.first_object);
- if ((prot & VM_PROT_WRITE) != 0 &&
- (fs.first_object->type == OBJT_VNODE ||
- (fs.first_object->flags & OBJ_TMPFS_NODE) != 0) &&
- (fs.first_object->flags & OBJ_MIGHTBEDIRTY) == 0)
- goto fast_failed;
- m = vm_page_lookup(fs.first_object, fs.first_pindex);
- /* A busy page can be mapped for read|execute access. */
- if (m == NULL || ((prot & VM_PROT_WRITE) != 0 &&
- vm_page_busied(m)) || m->valid != VM_PAGE_BITS_ALL)
- goto fast_failed;
- result = pmap_enter(fs.map->pmap, vaddr, m, prot,
- fault_type | PMAP_ENTER_NOSLEEP | (wired ? PMAP_ENTER_WIRED :
- 0), 0);
- if (result != KERN_SUCCESS)
- goto fast_failed;
- if (m_hold != NULL) {
- *m_hold = m;
- vm_page_lock(m);
- vm_page_hold(m);
- vm_page_unlock(m);
- }
- vm_fault_dirty(fs.entry, m, prot, fault_type, fault_flags,
- false);
- VM_OBJECT_RUNLOCK(fs.first_object);
- if (!wired)
- vm_fault_prefault(&fs, vaddr, PFBAK, PFFOR);
- vm_map_lookup_done(fs.map, fs.entry);
- curthread->td_ru.ru_minflt++;
- return (KERN_SUCCESS);
-fast_failed:
+ if ((prot & VM_PROT_WRITE) == 0 ||
+ (fs.first_object->type != OBJT_VNODE &&
+ (fs.first_object->flags & OBJ_TMPFS_NODE) == 0) ||
+ (fs.first_object->flags & OBJ_MIGHTBEDIRTY) != 0) {
+ rv = vm_fault_soft_fast(&fs, vaddr, prot, fault_type,
+ fault_flags, wired, m_hold);
+ if (rv == KERN_SUCCESS)
+ return (rv);
+ }
if (!VM_OBJECT_TRYUPGRADE(fs.first_object)) {
VM_OBJECT_RUNLOCK(fs.first_object);
VM_OBJECT_WLOCK(fs.first_object);
More information about the svn-src-all
mailing list