svn commit: r268038 - user/attilio/rm_vmobj_cache/sys/vm
Attilio Rao
attilio at FreeBSD.org
Mon Jun 30 11:15:11 UTC 2014
Author: attilio
Date: Mon Jun 30 11:15:10 2014
New Revision: 268038
URL: http://svnweb.freebsd.org/changeset/base/268038
Log:
- Implement a "disposed" pagequeue, where pages are firstly freed to
satisfy the pageout free requirements.
This frees happen just before the inactive queue scan, in pass == 1
- Implement vm_page_dispose() that inserts pages into the disposed queue.
Pages must be unbusy, unwired, unheld, managed and clean.
Modified:
user/attilio/rm_vmobj_cache/sys/vm/vm_page.c
user/attilio/rm_vmobj_cache/sys/vm/vm_page.h
user/attilio/rm_vmobj_cache/sys/vm/vm_pageout.c
Modified: user/attilio/rm_vmobj_cache/sys/vm/vm_page.c
==============================================================================
--- user/attilio/rm_vmobj_cache/sys/vm/vm_page.c Mon Jun 30 09:59:23 2014 (r268037)
+++ user/attilio/rm_vmobj_cache/sys/vm/vm_page.c Mon Jun 30 11:15:10 2014 (r268038)
@@ -2437,6 +2437,47 @@ vm_page_deactivate(vm_page_t m)
}
/*
+ * Move the specified page to the disposed queue.
+ *
+ * XXXWIP
+ *
+ * The page must be locked.
+ * The page also must be unqueued already and not wired or busy.
+ * Finally, the page must also belong to an object, so it must not be
+ * unmanaged.
+ */
+static inline void
+vm_page_dispose(vm_page_t m)
+{
+ struct vm_pagequeue *pq;
+ int queue;
+
+ vm_page_lock_assert(m, MA_OWNED);
+ KASSERT(m->queue == PQ_NONE,
+ ("vm_page_dispose: page %p already queued on %u queue", m,
+ m->queue));
+
+ if (m->hold_count != 0)
+ panic("vm_page_dispose: page %p hold count %d",
+ m, m->hold_count);
+ if (m->wire_count != 0)
+ panic("vm_page_dispose: page %p wire count %d",
+ m, m->wire_count);
+ if (vm_page_busied(m))
+ panic("vm_page_dispose: page %p is busied", m);
+ if ((m->oflags & VPO_UNMANAGED) != 0)
+ panic("vm_page_dispose: page %p is unmanaged", m);
+
+ m->flags &= ~PG_WINATCFLS;
+ pq = &vm_phys_domain(m)->vmd_pagequeues[PQ_DISPOSED];
+ vm_pagequeue_lock(pq);
+ m->queue = PQ_DISPOSED;
+ TAILQ_INSERT_TAIL(&pq->pq_pl, m, plinks.q);
+ vm_pagequeue_cnt_inc(pq);
+ vm_pagequeue_unlock(pq);
+}
+
+/*
* vm_page_try_to_cache:
*
* Returns 0 on failure, 1 on success
Modified: user/attilio/rm_vmobj_cache/sys/vm/vm_page.h
==============================================================================
--- user/attilio/rm_vmobj_cache/sys/vm/vm_page.h Mon Jun 30 09:59:23 2014 (r268037)
+++ user/attilio/rm_vmobj_cache/sys/vm/vm_page.h Mon Jun 30 11:15:10 2014 (r268038)
@@ -439,6 +439,7 @@ int vm_page_try_to_free (vm_page_t);
void vm_page_deactivate (vm_page_t);
void vm_page_dequeue(vm_page_t m);
void vm_page_dequeue_locked(vm_page_t m);
+void vm_page_dispose(vm_page_t m);
vm_page_t vm_page_find_least(vm_object_t, vm_pindex_t);
vm_page_t vm_page_getfake(vm_paddr_t paddr, vm_memattr_t memattr);
void vm_page_initfake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr);
Modified: user/attilio/rm_vmobj_cache/sys/vm/vm_pageout.c
==============================================================================
--- user/attilio/rm_vmobj_cache/sys/vm/vm_pageout.c Mon Jun 30 09:59:23 2014 (r268037)
+++ user/attilio/rm_vmobj_cache/sys/vm/vm_pageout.c Mon Jun 30 11:15:10 2014 (r268038)
@@ -244,10 +244,10 @@ vm_pageout_init_marker(vm_page_t marker,
* vm_pageout_fallback_object_lock:
*
* Lock vm object currently associated with `m'. VM_OBJECT_TRYWLOCK is
- * known to have failed and page queue must be either PQ_ACTIVE or
- * PQ_INACTIVE. To avoid lock order violation, unlock the page queues
- * while locking the vm object. Use marker page to detect page queue
- * changes and maintain notion of next page on page queue. Return
+ * known to have failed and page queue must be either PQ_ACTIVE,
+ * PQ_INACTIVE or PQ_DISPOSED. To avoid lock order violation, unlock the
+ * page queues while locking the vm object. Use marker page to detect page
+ * queue changes and maintain notion of next page on page queue. Return
* TRUE if no changes were detected, FALSE otherwise. vm object is
* locked on return.
*
@@ -901,7 +901,7 @@ vm_pageout_map_deactivate_pages(map, des
* vm_pageout_scan does the dirty work for the pageout daemon.
*
* pass 0 - Update active LRU/deactivate pages
- * pass 1 - Move inactive to cache or free
+ * pass 1 - Free disposed pages and move inactive to cache or free
* pass 2 - Launder dirty pages
*/
static void
@@ -952,6 +952,75 @@ vm_pageout_scan(struct vm_domain *vmd, i
} else
page_shortage = deficit = 0;
+ pq = &vmd->vmd_pagequeues[PQ_DISPOSED];
+ maxscan = pq->pq_cnt;
+ vm_pagequeue_lock(pq);
+ for (m = TAILQ_FIRST(&pq->pq_pl);
+ m != NULL && maxscan-- > 0 && page_shortage > 0;
+ m = next) {
+ vm_pagequeue_assert_locked(pq);
+ KASSERT(m->queue == PQ_DISPOSED, ("Disposed queue %p", m));
+
+ PCPU_INC(cnt.v_pdpages);
+ next = TAILQ_NEXT(m, plinks.q);
+
+ /*
+ * skip marker pages
+ */
+ if (m->flags & PG_MARKER)
+ continue;
+
+ KASSERT((m->flags & PG_FICTITIOUS) == 0,
+ ("Fictitious page %p cannot be in disposed queue", m));
+ KASSERT((m->oflags & VPO_UNMANAGED) == 0,
+ ("Unmanaged page %p cannot be in disposed queue", m));
+
+ /*
+ * The page or object lock acquisitions fail if the
+ * page was removed from the queue or moved to a
+ * different position within the queue. In either
+ * case, addl_page_shortage should not be incremented.
+ */
+ if (!vm_pageout_page_lock(m, &next)) {
+ vm_page_unlock(m);
+ continue;
+ }
+ object = m->object;
+ if (!VM_OBJECT_TRYWLOCK(object) &&
+ !vm_pageout_fallback_object_lock(m, &next)) {
+ vm_page_unlock(m);
+ VM_OBJECT_WUNLOCK(object);
+ continue;
+ }
+ vm_page_test_dirty(m);
+
+ if (m->dirty != 0)
+ panic("Disposed page %p is dirty", m);
+ if (pmap_page_is_mapped(m))
+ panic("Disposed page %p has active mappings", m);
+ if ((m->aflags & PGA_REFERENCED) != 0)
+ panic("Disposed page %p is referenced", m);
+
+ /*
+ * These checks are already present when inserting pages
+ * into the disposed queue, so make them just asserts here.
+ */
+ KASSERT(!vm_page_busied(m) && m->hold_count == 0 &&
+ m->wire_count == 0, ("Disposed page %p busied", m));
+
+ /*
+ * Dequeue the page first in order to avoid pagequeue
+ * lock recursion.
+ */
+ vm_page_dequeue_locked(m);
+ vm_page_free(m);
+ vm_page_unlock(m);
+ VM_OBJECT_WUNLOCK(object);
+ PCPU_INC(cnt.v_dfree);
+ --page_shortage;
+ }
+ vm_pagequeue_unlock(pq);
+
/*
* maxlaunder limits the number of dirty pages we flush per scan.
* For most systems a smaller value (16 or 32) is more robust under
More information about the svn-src-user
mailing list