PERFORCE change 89355 for review

Kip Macy kmacy at FreeBSD.org
Sat Jan 7 22:38:20 PST 2006


http://perforce.freebsd.org/chv.cgi?CH=89355

Change 89355 by kmacy at kmacy:freebsd7_xen3 on 2006/01/08 06:37:53

	simplify the initvalues routine in preparation for adding back kernload

Affected files ...

.. //depot/projects/xen3/src/sys/i386-xen/i386-xen/xen_machdep.c#2 edit

Differences ...

==== //depot/projects/xen3/src/sys/i386-xen/i386-xen/xen_machdep.c#2 (text+ko) ====

@@ -581,10 +581,13 @@
 struct ringbuf_head *xen_store; /* XXX move me */
 char *console_page;
 
-/* Linux infection */
-#define PAGE_OFFSET  KERNBASE
-#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
 #define PFN_UP(x)    (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
+#define VTOP(x) ((unsigned long)(x) - KERNBASE)
+#define VTOPFN(x) (((unsigned long)(x) - KERNBASE) >> PAGE_SHIFT)
+#define PFNTOV(x) (((unsigned long)(x)  << PAGE_SHIFT) + KERNBASE)
+#define PG_KERNEL  (PG_V | PG_A | PG_RW | PG_M)
+
+
 void
 initvalues(start_info_t *startinfo)
 { 
@@ -602,7 +605,9 @@
 
     xen_start_info = startinfo;
     xen_phys_machine = (unsigned long *)startinfo->mfn_list;
-    unsigned long tmpindex = ((__pa(xen_start_info->pt_base) >> PAGE_SHIFT) + xen_start_info->nr_pt_frames) + 3 /* number of pages allocated after the pts + 1*/;
+
+    /* number of pages allocated after the pts + 1*/;
+    unsigned long tmpindex = (VTOPFN(xen_start_info->pt_base) + xen_start_info->nr_pt_frames) + 3;
     xendebug_flags = 0xffffffff;
     /* 
      * pre-zero unused mapped pages - mapped on 4MB boundary
@@ -610,10 +615,10 @@
 #ifndef PAE
     round_tmpindex = (((tmpindex - 1) / 1024) + 1)*1024;
 #endif
-    bzero((char *)(KERNBASE + (tmpindex << PAGE_SHIFT)), (round_tmpindex - tmpindex)*PAGE_SIZE); 
+    bzero((char *)PFNTOV(tmpindex), (round_tmpindex - tmpindex)*PAGE_SIZE); 
 
-    IdlePTD = (pd_entry_t *)xpmap_ptom(__pa(startinfo->pt_base));
-    KPTphys = xpmap_ptom(__pa(startinfo->pt_base + PAGE_SIZE));
+    IdlePTD = (pd_entry_t *)xpmap_ptom(VTOP(startinfo->pt_base));
+    KPTphys = xpmap_ptom(VTOP(startinfo->pt_base + PAGE_SIZE));
     XENPRINTF("IdlePTD %p\n", IdlePTD);
     XENPRINTF("nr_pages: %ld shared_info: 0x%lx flags: 0x%lx pt_base: 0x%lx "
 	      "mod_start: 0x%lx mod_len: 0x%lx\n",
@@ -622,26 +627,20 @@
 	      xen_start_info->mod_start, xen_start_info->mod_len);
 
 
-
-#if 0
-    /* Map proc0's UPAGES */
-    proc0uarea = (struct user *)(KERNBASE + (tmpindex << PAGE_SHIFT));
-    tmpindex += UAREA_PAGES;
-#endif
     /* Map proc0's KSTACK */
-    proc0kstack = KERNBASE + (tmpindex << PAGE_SHIFT);
+    proc0kstack = PFNTOV(tmpindex);
     tmpindex += KSTACK_PAGES;    
     
     /* allocate page for gdt */
-    gdt = (union descriptor *)(KERNBASE + (tmpindex << PAGE_SHIFT));
+    gdt = (union descriptor *)PFNTOV(tmpindex);
     tmpindex++; 
 
     /* allocate page for ldt */
-    ldt = (union descriptor *)(KERNBASE + (tmpindex << PAGE_SHIFT));
+    ldt = (union descriptor *)PFNTOV(tmpindex);
     tmpindex++; 
 
     /* initialize page directory shadow page */
-    pdir_shadow = (vm_offset_t *)(KERNBASE + (tmpindex << PAGE_SHIFT));
+    pdir_shadow = (vm_offset_t *)PFNTOV(tmpindex);
     i686_pagezero(pdir_shadow);
     pdir_shadow_ma = xpmap_ptom(tmpindex << PAGE_SHIFT);
     PT_SET_MA(pdir_shadow, pdir_shadow_ma | PG_V | PG_A);
@@ -650,7 +649,7 @@
     /* setup shadow mapping first so vtomach will work */
     xen_pt_pin((vm_paddr_t)pdir_shadow_ma);
     xen_queue_pt_update((vm_paddr_t)(IdlePTD + PTDPTDI), 
-			pdir_shadow_ma | PG_V | PG_A | PG_RW | PG_M);
+			pdir_shadow_ma | PG_KERNEL);
     xen_queue_pt_update(pdir_shadow_ma + PTDPTDI*sizeof(vm_paddr_t), 
 			((vm_paddr_t)IdlePTD) | PG_V | PG_A);
     xen_queue_pt_update(pdir_shadow_ma + KPTDI*sizeof(vm_paddr_t), 
@@ -661,30 +660,29 @@
 
 
 #ifdef SMP
-#if 0
     /* allocate cpu0 private page */
-    cpu0prvpage = (KERNBASE + (tmpindex << PAGE_SHIFT));
+    cpu0prvpage = PFNTOV(tmpindex);
     tmpindex++; 
-#endif
+
     /* allocate SMP page table */
-    SMPpt = (unsigned long *)(KERNBASE + (tmpindex << PAGE_SHIFT));
-#if 0
+    SMPpt = (unsigned long *)PFNTOV(tmpindex);
+
     /* Map the private page into the SMP page table */
     SMPpt[0] = vtomach(cpu0prvpage) | PG_RW | PG_M | PG_V | PG_A;
-#endif
+
     /* map SMP page table RO */
     PT_SET_MA(SMPpt, *vtopte((vm_offset_t)SMPpt) & ~PG_RW);
 
     /* put the page table into the page directory */
     xen_queue_pt_update((vm_paddr_t)(IdlePTD + MPPTDI), 
-			xpmap_ptom((tmpindex << PAGE_SHIFT))| PG_M | PG_RW | PG_V | PG_A);
+			xpmap_ptom((tmpindex << PAGE_SHIFT))| PG_KERNEL);
     xen_queue_pt_update(pdir_shadow_ma + MPPTDI*sizeof(vm_paddr_t), 
 			xpmap_ptom((tmpindex << PAGE_SHIFT))| PG_V | PG_A);
     tmpindex++;
 #endif
 
 #ifdef PMAP_DEBUG    
-    pteinfo_list = (pteinfo_t *)(KERNBASE + (tmpindex << PAGE_SHIFT));
+    pteinfo_list = (pteinfo_t *)PFNTOV(tmpindex);
     tmpindex +=  ((xen_start_info->nr_pages >> 10) + 1)*(1 + XPQ_CALL_DEPTH*XPQ_CALL_COUNT);
     
     if (tmpindex > 980)
@@ -698,25 +696,25 @@
     /* allocate remainder of NKPT pages */
     for (i = 0; i < NKPT-1; i++, tmpindex++) {
 	xen_queue_pt_update((vm_paddr_t)(IdlePTD + KPTDI + i + 1), 
-			    xpmap_ptom((tmpindex << PAGE_SHIFT)| PG_M | PG_RW | PG_V | PG_A));
+			    xpmap_ptom((tmpindex << PAGE_SHIFT)| PG_KERNEL));
 	xen_queue_pt_update(pdir_shadow_ma + (KPTDI + i + 1)*sizeof(vm_paddr_t), 
 			    xpmap_ptom((tmpindex << PAGE_SHIFT)| PG_V | PG_A));
     }
     tmpindex += NKPT-1;
     PT_UPDATES_FLUSH();
 
-    HYPERVISOR_shared_info = (shared_info_t *)(KERNBASE + (tmpindex << PAGE_SHIFT));
+    HYPERVISOR_shared_info = (shared_info_t *)PFNTOV(tmpindex);
     PT_SET_MA(HYPERVISOR_shared_info, 
-	      xen_start_info->shared_info | PG_A | PG_V | PG_RW | PG_M);
+	      xen_start_info->shared_info | PG_KERNEL);
     tmpindex++;
 
-    xen_store = (struct ringbuf_head *)(KERNBASE + (tmpindex << PAGE_SHIFT));
+    xen_store = (struct ringbuf_head *)PFNTOV(tmpindex);
     PT_SET_MA(xen_store,
-	      (xen_start_info->store_mfn << PAGE_SHIFT)| PG_A | PG_V | PG_RW | PG_M);
+	      (xen_start_info->store_mfn << PAGE_SHIFT)| PG_KERNEL);
     tmpindex++;
-    console_page = (char *)(KERNBASE + (tmpindex << PAGE_SHIFT));
+    console_page = (char *)PFNTOV(tmpindex);
     PT_SET_MA(console_page,
-	      (xen_start_info->console_mfn << PAGE_SHIFT)| PG_A | PG_V | PG_RW | PG_M);
+	      (xen_start_info->console_mfn << PAGE_SHIFT)| PG_KERNEL);
     tmpindex++;
     
 
@@ -727,7 +725,7 @@
 	    for (j = 0; j < npages; j++) {
 		    vm_paddr_t ma = xpmap_ptom(tmpindex << PAGE_SHIFT);
 		    tmpindex++;
-		    PT_SET_VA_MA(SMPpt + i*npages + j, ma | PG_A | PG_V | PG_RW | PG_M, FALSE);
+		    PT_SET_VA_MA(SMPpt + i*npages + j, ma | PG_KERNEL, FALSE);
 	    }
     }
     xen_flush_queue();


More information about the p4-projects mailing list