PERFORCE change 89397 for review

Kip Macy kmacy at FreeBSD.org
Sun Jan 8 20:01:37 PST 2006


http://perforce.freebsd.org/chv.cgi?CH=89397

Change 89397 by kmacy at kmacy:freebsd7_xen3 on 2006/01/09 04:01:15

	BSDify balloon driver

Affected files ...

.. //depot/projects/xen3/src/sys/dev/xen/balloon/balloon.c#2 edit
.. //depot/projects/xen3/src/sys/i386-xen/include/xenvar.h#2 edit

Differences ...

==== //depot/projects/xen3/src/sys/dev/xen/balloon/balloon.c#2 (text+ko) ====

@@ -33,9 +33,9 @@
 #include <sys/lock.h>
 #include <sys/mutex.h>
 
-#include<machine/xenbus.h>
-
-#define PAGES2KB(_p) ((_p)<<(PAGE_SHIFT-10))
+#include <machine/hypervisor-ifs.h>
+#include <machine/xen-os.h>
+#include <machine/xenbus.h>
 
 /*
  * Protects atomic reservation decrease/increase against concurrent increases.
@@ -60,63 +60,54 @@
  */
 static unsigned long driver_pages;
 
+struct balloon_entry {
+	vm_page_t page;
+	STAILQ_ENTRY(balloon_entry) list;
+};
+
 /* List of ballooned pages, threaded through the mem_map array. */
-static LIST_HEAD(ballooned_pages);
+static STAILQ_HEAD(,balloon_entry) ballooned_pages;
+
 static unsigned long balloon_low, balloon_high;
 
-#if 0
+
 /* Main work function, always executed in process context. */
 static void balloon_process(void *unused);
-static DECLARE_WORK(balloon_worker, balloon_process, NULL);
-static struct timer_list balloon_timer;
 
-/* Use the private and mapping fields of struct page as a list. */
-#define PAGE_TO_LIST(p) ((struct list_head *)&p->private)
-#define LIST_TO_PAGE(l)				\
-	(list_entry(((unsigned long *)l), struct page, private))
-#define UNLIST_PAGE(p)				\
-	do {					\
-		list_del(PAGE_TO_LIST(p));	\
-		p->mapping = NULL;		\
-		p->private = 0;			\
-	} while(0)
-
 #define IPRINTK(fmt, args...) \
 	printk(KERN_INFO "xen_mem: " fmt, ##args)
 #define WPRINTK(fmt, args...) \
 	printk(KERN_WARNING "xen_mem: " fmt, ##args)
-#endif
 
 /* balloon_append: add the given page to the balloon. */
 static void 
-balloon_append(struct page *page)
+balloon_append(vm_page_t page)
 {
-	/* Lowmem is re-populated first, so highmem pages go at list tail. */
-	if (PageHighMem(page)) {
-		list_add_tail(PAGE_TO_LIST(page), &ballooned_pages);
-		balloon_high++;
-	} else {
-		list_add(PAGE_TO_LIST(page), &ballooned_pages);
-		balloon_low++;
-	}
+	struct balloon_entry *entry;
+
+	entry = malloc(sizeof(struct balloon_entry), M_WAITOK);
+
+	STAILQ_INSERT_HEAD(&ballooned_pages, entry, list);
+	balloon_low++;
 }
 
 /* balloon_retrieve: rescue a page from the balloon, if it is not empty. */
-static struct page *
+static vm_page_t
 balloon_retrieve(void)
 {
-	struct page *page;
+	vm_page_t page;
+	struct balloon_entry *entry;
 
-	if (list_empty(&ballooned_pages))
+	if (STAILQ_EMPTY(&ballooned_pages))
 		return NULL;
 
-	page = LIST_TO_PAGE(ballooned_pages.next);
-	UNLIST_PAGE(page);
+	entry = STAILQ_FIRST(&ballooned_pages);
+	STAILQ_REMOVE_HEAD(&ballooned_pages, list);
 
-	if (PageHighMem(page))
-		balloon_high--;
-	else
-		balloon_low--;
+	page = entry->page;
+	free(entry, M_DEVBUF);
+	
+	balloon_low--;
 
 	return page;
 }
@@ -151,9 +142,9 @@
 	if (nr_pages > (PAGE_SIZE / sizeof(unsigned long)))
 		nr_pages = PAGE_SIZE / sizeof(unsigned long);
 
-	mfn_list = (unsigned long *)__get_free_page(GFP_KERNEL);
+	mfn_list = (unsigned long *)malloc(PAGE_SIZE, M_DEVBUF, M_NOWAIT);
 	if (mfn_list == NULL)
-		return -ENOMEM;
+		return ENOMEM;
 
 
 	reservation.extent_start = mfn_list;
@@ -167,36 +158,26 @@
 		reservation.nr_extents   = rc;
 		ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
 				&reservation);
-		BUG_ON(ret != rc);
+		PANIC_IF(ret != rc);
 		hard_limit = current_pages + rc - driver_pages;
 		goto out;
 	}
 
 	for (i = 0; i < nr_pages; i++) {
 		page = balloon_retrieve();
-		BUG_ON(page == NULL);
+		PANIC_IF(page == NULL);
 
-		pfn = page_to_pfn(page);
-		BUG_ON(phys_to_machine_mapping_valid(pfn));
+		pfn = (VM_PAGE_TO_PHYS(page) >> PAGE_SHIFT);
+		PANIC_IF(phys_to_machine_mapping_valid(pfn));
 
 		/* Update P->M and M->P tables. */
-		set_phys_to_machine(pfn, mfn_list[i]);
+		PFNTOMFN(pfn) = mfn_list[i];
 		xen_machphys_update(mfn_list[i], pfn);
             
-		/* Link back into the page tables if not highmem. */
-		if (pfn < max_low_pfn) {
-			int ret;
-			ret = HYPERVISOR_update_va_mapping(
-				(unsigned long)__va(pfn << PAGE_SHIFT),
-				pfn_pte_ma(mfn_list[i], PAGE_KERNEL),
-				0);
-			BUG_ON(ret);
-		}
-
 		/* Relinquish the page back to the allocator. */
 		ClearPageReserved(page);
 		set_page_count(page, 1);
-		__free_page(page);
+		vm_page_free(page);
 	}
 
 	current_pages += nr_pages;
@@ -205,7 +186,7 @@
  out:
 	balloon_unlock(flags);
 
-	free_page((unsigned long)mfn_list);
+	free((mfn_list);
 
 	return 0;
 }
@@ -227,60 +208,43 @@
 	if (nr_pages > (PAGE_SIZE / sizeof(unsigned long)))
 		nr_pages = PAGE_SIZE / sizeof(unsigned long);
 
-	mfn_list = (unsigned long *)__get_free_page(GFP_KERNEL);
+	mfn_list = (unsigned long *)malloc(PAGE_SIZE, M_DEVBUF, M_NOWAIT);
 	if (mfn_list == NULL)
-		return -ENOMEM;
+		return ENOMEM;
 
 	for (i = 0; i < nr_pages; i++) {
-		if ((page = alloc_page(GFP_HIGHUSER)) == NULL) {
+		int color = 0;
+		if ((page = vm_page_alloc(NULL, color++, 
+					  VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | 
+					  VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) {
 			nr_pages = i;
 			need_sleep = 1;
 			break;
 		}
-
-		pfn = page_to_pfn(page);
-		mfn_list[i] = pfn_to_mfn(pfn);
-
-		if (!PageHighMem(page)) {
-			v = phys_to_virt(pfn << PAGE_SHIFT);
-			scrub_pages(v, 1);
-			ret = HYPERVISOR_update_va_mapping(
-				(unsigned long)v, __pte_ma(0), 0);
-			BUG_ON(ret);
-		}
-#ifdef CONFIG_XEN_SCRUB_PAGES
-		else {
-			v = kmap(page);
-			scrub_pages(v, 1);
-			kunmap(page);
-		}
-#endif
+		pfn = (VM_PAGE_TO_PHYS(page) >> PAGE_SHIFT);
+		mfn_list[i] = PFNTOMFN(pfn);
 	}
 
-	/* Ensure that ballooned highmem pages don't have kmaps. */
-	kmap_flush_unused();
-	flush_tlb_all();
-
 	balloon_lock(flags);
 
 	/* No more mappings: invalidate P2M and add to balloon. */
 	for (i = 0; i < nr_pages; i++) {
-		pfn = mfn_to_pfn(mfn_list[i]);
-		set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
-		balloon_append(pfn_to_page(pfn));
+		pfn = MFNTOPFN(mfn_list[i]);
+		PFNTOMFN(pfn) = INVALID_P2M_ENTRY;
+		balloon_append(PHYS_TO_VM_PAGE(pfn << PAGE_SHIFT));
 	}
 
 	reservation.extent_start = mfn_list;
 	reservation.nr_extents   = nr_pages;
 	ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
-	BUG_ON(ret != nr_pages);
+	PANIC_IF(ret != nr_pages);
 
 	current_pages -= nr_pages;
 	totalram_pages = current_pages;
 
 	balloon_unlock(flags);
 
-	free_page((unsigned long)mfn_list);
+	free(mfn_list, M_DEVBUF);
 
 	return need_sleep;
 }
@@ -313,10 +277,11 @@
 		
 		/* Schedule more work if there is some still to be done. */
 		if (current_target() != current_pages)
-			mod_timer(&balloon_timer, jiffies + HZ);
+			timeout(balloon_alarm, NULL, ticks + HZ);
 
-		
+			msleep(balloon_process, balloon_lock, 0, "balloon", -1);
 	}
+
 }
 
 /* Resets the Xen limit, sets new target, and kicks off processing. */
@@ -326,7 +291,7 @@
 	/* No need for lock. Not read-modify-write updates. */
 	hard_limit   = ~0UL;
 	target_pages = target;
-	wakeup(&balloon_process);
+	wakeup(balloon_process);
 }
 
 static struct xenbus_watch target_watch =
@@ -390,9 +355,8 @@
     
 	/* Initialise the balloon with excess memory space. */
 	for (pfn = xen_start_info->nr_pages; pfn < max_pfn; pfn++) {
-		page = pfn_to_page(pfn);
-		if (!PageReserved(page))
-			balloon_append(page);
+		page = PHYS_TO_VM_PAGE(pfn << PAGE_SHIFT);
+		balloon_append(page);
 	}
 
 	target_watch.callback = watch_target;
@@ -425,7 +389,7 @@
 	set_pte_at(&init_mm, addr, pte, __pte_ma(0));
 	set_phys_to_machine(__pa(addr) >> PAGE_SHIFT, INVALID_P2M_ENTRY);
 	ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
-	BUG_ON(ret != 1);
+	PANIC_IF(ret != 1);
 	return 0;
 }
 
@@ -433,37 +397,43 @@
 vm_page_t
 balloon_alloc_empty_page_range(unsigned long nr_pages)
 {
-	unsigned long vstart, flags;
+	unsigned long flags;
 	vm_page_t pages;
-	int ret;
-	
+	int i;
+	unsigned long *mfn_list;
+	struct xen_memory_reservation reservation = {
+		.address_bits = 0,
+		.extent_order = 0,
+		.domid        = DOMID_SELF
+	};
 
 	pages = vm_page_alloc_contig(nr_pages, 0, -1, 4, 4)
 	if (pages == NULL)
 		return NULL;
-#if 0
-	scrub_pages(vstart, 1 << order);
-/* I don't think we need to worry about unmapping these pages */
-	ret = generic_page_range(
-		&init_mm, vstart, PAGE_SIZE << order, dealloc_pte_fn, NULL);
-	BUG_ON(ret);
-#endif
+	
+	mfn_list = malloc(nr_pages*sizeof(unsigned long), M_DEVBUF, M_WAITOK);
+	
+	for (i = 0; i < nr_pages; i++) {
+		mfn_list[i] = PFNTOMFN(VM_PAGE_TO_PHYS(pages[i]) >> PAGE_SHIFT);
+		PFNTOMFN(i) = INVALID_P2M_ENTRY;
+		reservation.extent_start = mfn_list;
+		reservation.nr_extents = nr_pages;
+		PANIC_IF(HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation) != nr_pages);
+	}
+
 	current_pages -= nr_pages;
 
 	wakeup(balloon_process);
 
-	flush_tlb_all();
-
 	return pages;
 }
 
-void balloon_dealloc_empty_page_range(
-	struct page *page, unsigned long nr_pages)
+void 
+balloon_dealloc_empty_page_range(vm_page_t page, unsigned long nr_pages)
 {
 	unsigned long i, flags;
-	unsigned int  order = get_order(nr_pages * PAGE_SIZE);
 
-	for (i = 0; i < (1UL << order); i++)
+	for (i = 0; i < nr_pages; i++)
 		balloon_append(page + i);
 
 	wakeup(balloon_process);

==== //depot/projects/xen3/src/sys/i386-xen/include/xenvar.h#2 (text+ko) ====

@@ -16,7 +16,8 @@
 if (xendebug_flags & argflags) XENPRINTF("(file=%s, line=%d) " _f "\n", __FILE__, __LINE__, ## _a);
 
 extern unsigned long *xen_machine_phys;
-#define PTOM(i) (((unsigned long *)xen_phys_machine)[i])
+#define PFNTOMFN(i) (((unsigned long *)xen_phys_machine)[i])
+#define MFNTOPFN(i) (xen_machine_phys[i])
 #define phystomach(pa) ((((unsigned long *)xen_phys_machine)[(pa >> PAGE_SHIFT)]) << PAGE_SHIFT)
 void xpq_init(void);
 


More information about the p4-projects mailing list