PERFORCE change 28188 for review
Peter Wemm
peter at FreeBSD.org
Fri Apr 4 22:53:01 PST 2003
http://perforce.freebsd.org/chv.cgi?CH=28188
Change 28188 by peter at peter_overcee on 2003/04/04 22:52:14
Apply blunt force to make pmap.c compile. There's still a bit of linkage
to do and some fixes to make while aiming for the lower 4G VM target.
(eg: PML4pml4e -> PTDpde needs to be dealt with in pmap_is_current())
Affected files ...
.. //depot/projects/hammer/sys/x86_64/include/pmap.h#10 edit
.. //depot/projects/hammer/sys/x86_64/x86_64/pmap.c#13 edit
Differences ...
==== //depot/projects/hammer/sys/x86_64/include/pmap.h#10 (text+ko) ====
@@ -151,9 +151,13 @@
extern pd_entry_t PDmap[];
extern pdp_entry_t PDPmap[];
extern pml4_entry_t PML4[];
+extern pdp_entry_t PDP[];
+extern pd_entry_t PTD[];
extern pd_entry_t PML4pml4e[];
-extern pd_entry_t *IdlePML4; /* physical address of "Idle" state directory */
+extern pml4_entry_t *IdlePML4; /* physical address of "Idle" state directory */
+extern pdp_entry_t *IdlePDP; /* physical address of "Idle" state directory */
+extern pd_entry_t *IdlePTD; /* physical address of "Idle" state directory */
#endif
#ifdef _KERNEL
@@ -209,10 +213,8 @@
u_long pm_active; /* active on cpus */
struct pmap_statistics pm_stats; /* pmap statistics */
LIST_ENTRY(pmap) pm_list; /* List of all pmaps */
-#ifdef PAE
- pdpt_entry_t *pm_pdpt; /* KVA of page director pointer
- table */
-#endif
+ pdp_entry_t *pm_pdp; /* KVA of level 3 page table */
+ pml4_entry_t *pm_pml4; /* KVA of level 4 page table */
};
#define pmap_page_is_mapped(m) (!TAILQ_EMPTY(&(m)->md.pv_list))
==== //depot/projects/hammer/sys/x86_64/x86_64/pmap.c#13 (text+ko) ====
@@ -187,9 +187,8 @@
vm_offset_t kernel_vm_end;
u_int32_t KERNend; /* Written in 32 bit mode */
-#ifdef PAE
-static uma_zone_t pdptzone;
-#endif
+static uma_zone_t pdpzone;
+static uma_zone_t pml4zone;
/*
* Data for the pv entry allocation mechanism
@@ -240,11 +239,8 @@
static int pmap_unuse_pt(pmap_t, vm_offset_t, vm_page_t);
static vm_offset_t pmap_kmem_choose(vm_offset_t addr);
static void *pmap_pv_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait);
-#ifdef PAE
-static void *pmap_pdpt_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait);
-#endif
-
-static pd_entry_t pdir4mb;
+static void *pmap_pdp_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait);
+static void *pmap_pml4_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait);
CTASSERT(1 << PDESHIFT == sizeof(pd_entry_t));
CTASSERT(1 << PTESHIFT == sizeof(pt_entry_t));
@@ -260,10 +256,6 @@
{
vm_offset_t newaddr = addr;
-#ifndef DISABLE_PSE
- if (cpu_feature & CPUID_PSE)
- newaddr = (addr + (NBPDR - 1)) & ~(NBPDR - 1);
-#endif
return newaddr;
}
@@ -309,10 +301,9 @@
/*
* Initialize the kernel pmap (which is statically allocated).
*/
- kernel_pmap->pm_pdir = (pd_entry_t *) (KERNBASE + (u_int)IdlePTD);
-#ifdef PAE
- kernel_pmap->pm_pdpt = (pdpt_entry_t *) (KERNBASE + (u_int)IdlePDPT);
-#endif
+ kernel_pmap->pm_pdir = (pd_entry_t *) (KERNBASE + (u_long)IdlePTD);
+ kernel_pmap->pm_pdp = (pdp_entry_t *) (KERNBASE + (u_long)IdlePDP);
+ kernel_pmap->pm_pml4 = (pdp_entry_t *) (KERNBASE + (u_long)IdlePML4);
kernel_pmap->pm_active = -1; /* don't allow deactivation */
TAILQ_INIT(&kernel_pmap->pm_pvlist);
LIST_INIT(&allpmaps);
@@ -372,37 +363,12 @@
PTD[i] = 0;
pgeflag = 0;
-#ifndef DISABLE_PG_G
- if (cpu_feature & CPUID_PGE)
- pgeflag = PG_G;
-#endif
/*
* Initialize the 4MB page size flag
*/
pseflag = 0;
-/*
- * The 4MB page version of the initial
- * kernel page mapping.
- */
- pdir4mb = 0;
-#ifndef DISABLE_PSE
- if (cpu_feature & CPUID_PSE)
- pseflag = PG_PS;
-#endif
-#ifndef DISABLE_PSE
- if (pseflag) {
- pd_entry_t ptditmp;
- /*
- * Note that we have enabled PSE mode
- */
- ptditmp = *(PTmap + i386_btop(KERNBASE));
- ptditmp &= ~(NBPDR - 1);
- ptditmp |= PG_V | PG_RW | PG_PS | PG_U | pgeflag;
- pdir4mb = ptditmp;
- }
-#endif
/*
* Turn on PGE/PSE.
*/
@@ -424,28 +390,10 @@
load_cr4(rcr4() | CR4_PGE);
invltlb(); /* Insurance */
}
-#ifndef DISABLE_PSE
- if (pseflag && (cpu_feature & CPUID_PSE)) {
- load_cr4(rcr4() | CR4_PSE);
- invltlb(); /* Insurance */
- }
-#endif
if (PCPU_GET(cpuid) == 0) {
-#ifndef DISABLE_PSE
- if (pdir4mb) {
- kernel_pmap->pm_pdir[KPTDI] = PTD[KPTDI] = pdir4mb;
- invltlb(); /* Insurance */
- }
-#endif
if (pgeflag) {
/* Turn on PG_G for text, data, bss pages. */
va = (vm_offset_t)btext;
-#ifndef DISABLE_PSE
- if (pseflag && (cpu_feature & CPUID_PSE)) {
- if (va < KERNBASE + (1 << PDRSHIFT))
- va = KERNBASE + (1 << PDRSHIFT);
- }
-#endif
endva = KERNBASE + KERNend;
while (va < endva) {
pte = vtopte(va);
@@ -470,14 +418,19 @@
return (void *)kmem_alloc(kernel_map, bytes);
}
-#ifdef PAE
+static void *
+pmap_pdp_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
+{
+ *flags = UMA_SLAB_PRIV;
+ return (void *)kmem_alloc(kernel_map, bytes);
+}
+
static void *
-pmap_pdpt_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
+pmap_pml4_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
{
*flags = UMA_SLAB_PRIV;
- return (contigmalloc(PAGE_SIZE, NULL, 0, 0x0ULL, 0xffffffffULL, 1, 0));
+ return (void *)kmem_alloc(kernel_map, bytes);
}
-#endif
/*
* Initialize the pmap module.
@@ -517,11 +470,13 @@
uma_zone_set_allocf(pvzone, pmap_pv_allocf);
uma_prealloc(pvzone, initial_pvs);
-#ifdef PAE
- pdptzone = uma_zcreate("PDPT", NPGPTD * sizeof(pdpt_entry_t), NULL,
- NULL, NULL, NULL, (NPGPTD * sizeof(pdpt_entry_t)) - 1, 0);
- uma_zone_set_allocf(pdptzone, pmap_pdpt_allocf);
-#endif
+ pdpzone = uma_zcreate("PDPT", NPGPTD * sizeof(pdp_entry_t), NULL,
+ NULL, NULL, NULL, (NPGPTD * sizeof(pdp_entry_t)) - 1, 0);
+ uma_zone_set_allocf(pdpzone, pmap_pdp_allocf);
+
+ pml4zone = uma_zcreate("PML4", sizeof(pml4_entry_t), NULL,
+ NULL, NULL, NULL, sizeof(pml4_entry_t) - 1, 0);
+ uma_zone_set_allocf(pml4zone, pmap_pml4_allocf);
/*
* Now it is safe to enable pv_table recording.
@@ -622,7 +577,7 @@
pmap_is_current(pmap_t pmap)
{
return (pmap == kernel_pmap ||
- (pmap->pm_pdir[PTDPTDI] & PG_FRAME) == (PTDpde[0] & PG_FRAME));
+ (pmap->pm_pdir[PTDPTDI] & PG_FRAME) == (PML4pml4e[0] & PG_FRAME));
}
/*
@@ -652,7 +607,7 @@
*PMAP1 = newpf | PG_RW | PG_V;
pmap_invalidate_page(kernel_pmap, (vm_offset_t)PADDR1);
}
- return PADDR1 + (i386_btop(va) & (NPTEPG - 1));
+ return PADDR1 + (x86_64_btop(va) & (NPTEPG - 1));
}
return (0);
}
@@ -1037,7 +992,7 @@
* Do an invltlb to make the invalidated mapping
* take effect immediately.
*/
- pteva = VM_MAXUSER_ADDRESS + i386_ptob(m->pindex);
+ pteva = VM_MAXUSER_ADDRESS + x86_64_ptob(m->pindex);
pmap_invalidate_page(pmap, pteva);
}
@@ -1097,9 +1052,8 @@
{
pmap->pm_pdir = (pd_entry_t *)(KERNBASE + (vm_offset_t)IdlePTD);
-#ifdef PAE
- pmap->pm_pdpt = (pdpt_entry_t *)(KERNBASE + (vm_offset_t)IdlePDPT);
-#endif
+ pmap->pm_pdp = (pdp_entry_t *)(KERNBASE + (vm_offset_t)IdlePDP);
+ pmap->pm_pml4 = (pml4_entry_t *)(KERNBASE + (vm_offset_t)IdlePML4);
pmap->pm_active = 0;
TAILQ_INIT(&pmap->pm_pvlist);
bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
@@ -1127,14 +1081,10 @@
if (pmap->pm_pdir == NULL) {
pmap->pm_pdir = (pd_entry_t *)kmem_alloc_pageable(kernel_map,
NBPTD);
-#ifdef PAE
- pmap->pm_pdpt = uma_zalloc(pdptzone, M_WAITOK | M_ZERO);
- KASSERT(((vm_offset_t)pmap->pm_pdpt &
- ((NPGPTD * sizeof(pdpt_entry_t)) - 1)) == 0,
- ("pmap_pinit: pdpt misaligned"));
- KASSERT(pmap_kextract((vm_offset_t)pmap->pm_pdpt) < (4ULL<<30),
- ("pmap_pinit: pdpt above 4g"));
-#endif
+ pmap->pm_pdp = (pdp_entry_t *)kmem_alloc_pageable(kernel_map,
+ PAGE_SIZE);
+ pmap->pm_pml4 = (pml4_entry_t *)kmem_alloc_pageable(kernel_map,
+ PAGE_SIZE);
}
/*
@@ -1175,7 +1125,7 @@
pa = VM_PAGE_TO_PHYS(ptdpg[i]);
pmap->pm_pdir[PTDPTDI + i] = pa | PG_V | PG_RW | PG_A | PG_M;
#ifdef PAE
- pmap->pm_pdpt[i] = pa | PG_V;
+ pmap->pm_pdp[i] = pa | PG_V;
#endif
}
@@ -1242,7 +1192,7 @@
*/
if ((m->flags & PG_ZERO) == 0) {
if (pmap_is_current(pmap)) {
- pteva = VM_MAXUSER_ADDRESS + i386_ptob(ptepindex);
+ pteva = VM_MAXUSER_ADDRESS + x86_64_ptob(ptepindex);
bzero((caddr_t) pteva, PAGE_SIZE);
} else {
pmap_zero_page(m);
@@ -1347,7 +1297,7 @@
for (i = 0; i < NPGPTD; i++) {
m = TAILQ_FIRST(&object->memq);
#ifdef PAE
- KASSERT(VM_PAGE_TO_PHYS(m) == (pmap->pm_pdpt[i] & PG_FRAME),
+ KASSERT(VM_PAGE_TO_PHYS(m) == (pmap->pm_pdp[i] & PG_FRAME),
("pmap_release: got wrong ptd page"));
#endif
m->wire_count--;
@@ -2203,7 +2153,7 @@
return;
}
- psize = i386_btop(size);
+ psize = x86_64_btop(size);
if ((object->type != OBJT_VNODE) ||
((limit & MAP_PREFAULT_PARTIAL) && (psize > MAX_INIT_PT) &&
@@ -2251,7 +2201,7 @@
vm_page_busy(p);
vm_page_unlock_queues();
mpte = pmap_enter_quick(pmap,
- addr + i386_ptob(tmpidx), p, mpte);
+ addr + x86_64_ptob(tmpidx), p, mpte);
vm_page_lock_queues();
vm_page_wakeup(p);
}
@@ -2506,7 +2456,7 @@
if (*CMAP2)
panic("pmap_zero_page: CMAP2 busy");
*CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M;
- invlpg((u_int)CADDR2);
+ invlpg((u_long)CADDR2);
pagezero(CADDR2);
*CMAP2 = 0;
mtx_unlock(&CMAPCADDR12_lock);
@@ -2526,7 +2476,7 @@
if (*CMAP2)
panic("pmap_zero_page: CMAP2 busy");
*CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M;
- invlpg((u_int)CADDR2);
+ invlpg((u_long)CADDR2);
if (off == 0 && size == PAGE_SIZE)
pagezero(CADDR2);
else
@@ -2548,7 +2498,7 @@
if (*CMAP3)
panic("pmap_zero_page: CMAP3 busy");
*CMAP3 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M;
- invlpg((u_int)CADDR3);
+ invlpg((u_long)CADDR3);
pagezero(CADDR3);
*CMAP3 = 0;
}
@@ -2570,8 +2520,8 @@
panic("pmap_copy_page: CMAP2 busy");
*CMAP1 = PG_V | VM_PAGE_TO_PHYS(src) | PG_A;
*CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(dst) | PG_A | PG_M;
- invlpg((u_int)CADDR1);
- invlpg((u_int)CADDR2);
+ invlpg((u_long)CADDR1);
+ invlpg((u_long)CADDR2);
bcopy(CADDR1, CADDR2, PAGE_SIZE);
*CMAP1 = 0;
*CMAP2 = 0;
@@ -3052,16 +3002,12 @@
{
struct proc *p = td->td_proc;
pmap_t pmap;
- u_int32_t cr3;
+ u_int64_t cr3;
critical_enter();
pmap = vmspace_pmap(td->td_proc->p_vmspace);
pmap->pm_active |= PCPU_GET(cpumask);
-#ifdef PAE
- cr3 = vtophys(pmap->pm_pdpt);
-#else
- cr3 = vtophys(pmap->pm_pdir);
-#endif
+ cr3 = vtophys(pmap->pm_pml4);
/* XXXKSE this is wrong.
* pmap_activate is for the current thread on the current cpu
*/
More information about the p4-projects
mailing list