PERFORCE change 130076 for review
Kip Macy
kmacy at FreeBSD.org
Sun Dec 2 19:41:01 PST 2007
http://perforce.freebsd.org/chv.cgi?CH=130076
Change 130076 by kmacy at entropy_kmacy_xen31 on 2007/12/03 03:40:07
integrate xen support directly in to native version of machdep.c
to minimize future divergence from native
Affected files ...
.. //depot/projects/xen31/sys/conf/files.i386#5 edit
.. //depot/projects/xen31/sys/dev/cxgb/cxgb_osdep.h#2 edit
.. //depot/projects/xen31/sys/i386/i386/machdep.c#5 edit
.. //depot/projects/xen31/sys/i386/include/pcpu.h#2 edit
.. //depot/projects/xen31/sys/i386/include/pmap.h#3 edit
Differences ...
==== //depot/projects/xen31/sys/conf/files.i386#5 (text+ko) ====
@@ -296,8 +296,7 @@
i386/i386/locore.s optional native no-obj
i386/xen/locore.s optional xen no-obj
i386/i386/longrun.c optional cpu_enable_longrun
-i386/i386/machdep.c optional native
-i386/xen/machdep.c optional xen
+i386/i386/machdep.c standard
i386/xen/xen_machdep.c optional xen
i386/i386/mem.c optional mem
i386/i386/minidump_machdep.c standard
==== //depot/projects/xen31/sys/dev/cxgb/cxgb_osdep.h#2 (text+ko) ====
@@ -58,11 +58,12 @@
};
+#ifndef PANIC_IF
#define PANIC_IF(exp) do { \
if (exp) \
panic("BUG: %s", exp); \
} while (0)
-
+#endif
#define m_get_priority(m) ((uintptr_t)(m)->m_pkthdr.rcvif)
#define m_set_priority(m, pri) ((m)->m_pkthdr.rcvif = (struct ifnet *)((uintptr_t)pri))
@@ -124,7 +125,9 @@
#if defined(__i386__) || defined(__amd64__)
+#ifndef mb
#define mb() __asm volatile("mfence":::"memory")
+#endif
#define rmb() __asm volatile("lfence":::"memory")
#define wmb() __asm volatile("sfence" ::: "memory")
#define smp_mb() mb()
==== //depot/projects/xen31/sys/i386/i386/machdep.c#5 (text+ko) ====
@@ -54,6 +54,7 @@
#include "opt_npx.h"
#include "opt_perfmon.h"
#include "opt_xbox.h"
+#include "opt_global.h"
#include <sys/param.h>
#include <sys/proc.h>
@@ -141,6 +142,25 @@
uint32_t arch_i386_xbox_memsize = 0;
#endif
+#ifdef XEN
+/* XEN includes */
+#include <machine/xen/hypervisor-ifs.h>
+#include <machine/xen/xen-os.h>
+#include <machine/xen/hypervisor.h>
+#include <machine/xen/xenvar.h>
+#include <machine/xen/xenfunc.h>
+#include <machine/xen/xen_intr.h>
+
+void Xhypervisor_callback(void);
+void failsafe_callback(void);
+
+int gdt_set;
+extern trap_info_t trap_table[];
+struct proc_ldt default_proc_ldt;
+extern int init_first;
+int running_xen = 1;
+#endif
+
/* Sanity check for __curthread() */
CTASSERT(offsetof(struct pcpu, pc_curthread) == 0);
@@ -258,8 +278,9 @@
*/
bufinit();
vm_pager_bufferinit();
-
+#ifndef XEN
cpu_setregs();
+#endif
}
/*
@@ -1089,6 +1110,25 @@
return (0);
}
+static int cpu_idle_hlt = 1;
+TUNABLE_INT("machdep.cpu_idle_hlt", &cpu_idle_hlt);
+SYSCTL_INT(_machdep, OID_AUTO, cpu_idle_hlt, CTLFLAG_RW,
+ &cpu_idle_hlt, 0, "Idle loop HLT enable");
+
+#ifdef XEN
+void
+cpu_halt(void)
+{
+ HYPERVISOR_shutdown(SHUTDOWN_poweroff);
+}
+
+static void
+cpu_idle_default(void)
+{
+ idle_block();
+}
+
+#else
/*
* Shutdown the CPU as much as possible
*/
@@ -1114,10 +1154,6 @@
* XXX I'm turning it on for SMP as well by default for now. It seems to
* help lock contention somewhat, and this is critical for HTT. -Peter
*/
-static int cpu_idle_hlt = 1;
-TUNABLE_INT("machdep.cpu_idle_hlt", &cpu_idle_hlt);
-SYSCTL_INT(_machdep, OID_AUTO, cpu_idle_hlt, CTLFLAG_RW,
- &cpu_idle_hlt, 0, "Idle loop HLT enable");
static void
cpu_idle_default(void)
@@ -1129,7 +1165,7 @@
*/
__asm __volatile("sti; hlt");
}
-
+#endif
/*
* Note that we have to be careful here to avoid a race between checking
* sched_runnable() and actually halting. If we don't do this, we may waste
@@ -1279,10 +1315,16 @@
*/
int _default_ldt;
+#ifdef XEN
+union descriptor *gdt;
+union descriptor *ldt;
+#else
union descriptor gdt[NGDT * MAXCPU]; /* global descriptor table */
+union descriptor ldt[NLDT]; /* local descriptor table */
+#endif
+
static struct gate_descriptor idt0[NIDT];
struct gate_descriptor *idt = &idt0[0]; /* interrupt descriptor table */
-union descriptor ldt[NLDT]; /* local descriptor table */
struct region_descriptor r_gdt, r_idt; /* table descriptors */
struct mtx dt_lock; /* lock for GDT and LDT */
@@ -1316,7 +1358,7 @@
{ 0x0, /* segment base address */
0xfffff, /* length - all address space */
SDT_MEMRWA, /* segment type */
- 0, /* segment descriptor priority level */
+ SEL_KPL, /* segment descriptor priority level */
1, /* segment descriptor present */
0, 0,
1, /* default 32 vs 16 bit size */
@@ -1343,7 +1385,7 @@
{ 0x0, /* segment base address */
0xfffff, /* length - all address space */
SDT_MEMERA, /* segment type */
- 0, /* segment descriptor priority level */
+ SEL_KPL, /* segment descriptor priority level */
1, /* segment descriptor present */
0, 0,
1, /* default 32 vs 16 bit size */
@@ -1352,7 +1394,7 @@
{ 0x0, /* segment base address */
0xfffff, /* length - all address space */
SDT_MEMRWA, /* segment type */
- 0, /* segment descriptor priority level */
+ SEL_KPL, /* segment descriptor priority level */
1, /* segment descriptor present */
0, 0,
1, /* default 32 vs 16 bit size */
@@ -1379,7 +1421,7 @@
{ 0x400, /* segment base address */
0xfffff, /* length */
SDT_MEMRWA, /* segment type */
- 0, /* segment descriptor priority level */
+ SEL_KPL, /* segment descriptor priority level */
1, /* segment descriptor present */
0, 0,
1, /* default 32 vs 16 bit size */
@@ -1389,11 +1431,12 @@
0x0, /* segment base address */
sizeof(struct i386tss)-1,/* length */
SDT_SYS386TSS, /* segment type */
- 0, /* segment descriptor priority level */
+ SEL_KPL, /* segment descriptor priority level */
1, /* segment descriptor present */
0, 0,
0, /* unused - default 32 vs 16 bit size */
0 /* limit granularity (byte/page units)*/ },
+#ifndef XEN
/* GLDT_SEL 10 LDT Descriptor */
{ (int) ldt, /* segment base address */
sizeof(ldt)-1, /* length - all address space */
@@ -1402,12 +1445,12 @@
1, /* segment descriptor present */
0, 0,
0, /* unused - default 32 vs 16 bit size */
- 0 /* limit granularity (byte/page units)*/ },
+ 0 /* limit granularity (byte/page units)*/ },
/* GUSERLDT_SEL 11 User LDT Descriptor per process */
{ (int) ldt, /* segment base address */
(512 * sizeof(union descriptor)-1), /* length */
SDT_SYSLDT, /* segment type */
- 0, /* segment descriptor priority level */
+ SEL_KPL, /* segment descriptor priority level */
1, /* segment descriptor present */
0, 0,
0, /* unused - default 32 vs 16 bit size */
@@ -1416,7 +1459,7 @@
{ (int) &dblfault_tss, /* segment base address */
sizeof(struct i386tss)-1,/* length - all address space */
SDT_SYS386TSS, /* segment type */
- 0, /* segment descriptor priority level */
+ SEL_KPL, /* segment descriptor priority level */
1, /* segment descriptor present */
0, 0,
0, /* unused - default 32 vs 16 bit size */
@@ -1425,7 +1468,7 @@
{ 0, /* segment base address (overwritten) */
0xfffff, /* length */
SDT_MEMERA, /* segment type */
- 0, /* segment descriptor priority level */
+ SEL_KPL, /* segment descriptor priority level */
1, /* segment descriptor present */
0, 0,
0, /* default 32 vs 16 bit size */
@@ -1434,7 +1477,7 @@
{ 0, /* segment base address (overwritten) */
0xfffff, /* length */
SDT_MEMERA, /* segment type */
- 0, /* segment descriptor priority level */
+ SEL_KPL, /* segment descriptor priority level */
1, /* segment descriptor present */
0, 0,
0, /* default 32 vs 16 bit size */
@@ -1443,7 +1486,7 @@
{ 0, /* segment base address (overwritten) */
0xfffff, /* length */
SDT_MEMRWA, /* segment type */
- 0, /* segment descriptor priority level */
+ SEL_KPL, /* segment descriptor priority level */
1, /* segment descriptor present */
0, 0,
1, /* default 32 vs 16 bit size */
@@ -1452,7 +1495,7 @@
{ 0, /* segment base address (overwritten) */
0xfffff, /* length */
SDT_MEMRWA, /* segment type */
- 0, /* segment descriptor priority level */
+ SEL_KPL, /* segment descriptor priority level */
1, /* segment descriptor present */
0, 0,
0, /* default 32 vs 16 bit size */
@@ -1461,7 +1504,7 @@
{ 0, /* segment base address (overwritten) */
0xfffff, /* length */
SDT_MEMRWA, /* segment type */
- 0, /* segment descriptor priority level */
+ SEL_KPL, /* segment descriptor priority level */
1, /* segment descriptor present */
0, 0,
0, /* default 32 vs 16 bit size */
@@ -1470,11 +1513,12 @@
{ 0x0, /* segment base address */
0x0, /* length */
0, /* segment type */
- 0, /* segment descriptor priority level */
+ SEL_KPL, /* segment descriptor priority level */
0, /* segment descriptor present */
0, 0,
0, /* default 32 vs 16 bit size */
0 /* limit granularity (byte/page units)*/ },
+#endif /* !XEN */
};
static struct soft_segment_descriptor ldt_segs[] = {
@@ -1659,7 +1703,16 @@
goto physmap_done;
}
#endif
-
+#if defined(XEN)
+ Maxmem = xen_start_info->nr_pages - init_first;
+ physmem = Maxmem;
+ basemem = 0;
+ physmap[0] = init_first << PAGE_SHIFT;
+ physmap[1] = ptoa(Maxmem) - round_page(MSGBUF_SIZE);
+ physmap_idx = 0;
+ goto physmap_done;
+#endif
+
hasbrokenint12 = 0;
TUNABLE_INT_FETCH("hw.hasbrokenint12", &hasbrokenint12);
bzero(&vmf, sizeof(vmf));
@@ -1836,7 +1889,7 @@
vmf.vmf_ah = 0x88;
vm86_intcall(0x15, &vmf);
extmem = vmf.vmf_ax;
-#else
+#elif !defined(XEN)
/*
* Prefer the RTC value for extended memory.
*/
@@ -2067,8 +2120,250 @@
for (off = 0; off < round_page(MSGBUF_SIZE); off += PAGE_SIZE)
pmap_kenter((vm_offset_t)msgbufp + off, phys_avail[pa_indx] +
off);
+
+ PT_UPDATES_FLUSH();
+}
+
+#ifdef XEN
+void
+init386(int first)
+{
+
+ int error, gsel_tss, metadata_missing, x;
+ unsigned long gdtmachpfn;
+ struct pcpu *pc;
+
+ thread0.td_kstack = proc0kstack;
+ thread0.td_pcb = (struct pcb *)
+ (thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1;
+
+ /*
+ * This may be done better later if it gets more high level
+ * components in it. If so just link td->td_proc here.
+ */
+ proc_linkup0(&proc0, &thread0);
+
+ metadata_missing = 0;
+ if (xen_start_info->mod_start) {
+ preload_metadata = (caddr_t)xen_start_info->mod_start;
+ preload_bootstrap_relocate(KERNBASE);
+ } else {
+ metadata_missing = 1;
+ }
+ if (envmode == 1)
+ kern_envp = static_env;
+ else if ((caddr_t)xen_start_info->cmd_line)
+ kern_envp = xen_setbootenv((caddr_t)xen_start_info->cmd_line);
+
+ boothowto |= xen_boothowto(kern_envp);
+
+ /* Init basic tunables, hz etc */
+ init_param1();
+
+ /*
+ * Make gdt memory segments. All segments cover the full 4GB
+ * of address space and permissions are enforced at page level.
+ */
+
+ /*
+ * XEN occupies the upper 64MB of virtual address space
+ * At its base it manages an array mapping machine page frames
+ * to physical page frames - hence we need to be able to
+ * access 4GB - (64MB - 4MB + 64k)
+ */
+ gdt_segs[GCODE_SEL].ssd_limit = atop(0 - ((1 << 26) -
+ (1 << 22) + (1 << 16)));
+ gdt_segs[GDATA_SEL].ssd_limit = atop(0 - ((1 << 26) -
+ (1 << 22) + (1 << 16)));
+ gdt_segs[GUCODE_SEL].ssd_limit = atop(0 - ((1 << 26) -
+ (1 << 22) + (1 << 16)));
+ gdt_segs[GUDATA_SEL].ssd_limit = atop(0 - ((1 << 26) -
+ (1 << 22) + (1 << 16)));
+ gdt_segs[GUFS_SEL].ssd_limit = atop(0 - ((1 << 26) -
+ (1 << 22) + (1 << 16)));
+ gdt_segs[GUGS_SEL].ssd_limit = atop(0 - ((1 << 26) -
+ (1 << 22) + (1 << 16)));
+
+ pc = &__pcpu[0];
+ gdt_segs[GPRIV_SEL].ssd_limit = atop(0 - 1);
+ gdt_segs[GPRIV_SEL].ssd_base = (int) pc;
+ gdt_segs[GPROC0_SEL].ssd_base = (int) &pc->pc_common_tss;
+
+ for (x = 0; x < NGDT; x++)
+ ssdtosd(&gdt_segs[x], &gdt[x].sd);
+
+
+ mtx_init(&dt_lock, "descriptor tables", NULL, MTX_SPIN);
+
+ printk("gdt=%p\n", gdt);
+ printk("PTmap=%p\n", PTmap);
+ printk("addr=%p\n", *vtopte((unsigned long)gdt) & ~PG_RW);
+
+ PT_SET_MA(gdt, *vtopte((unsigned long)gdt) & ~PG_RW);
+ gdtmachpfn = vtomach(gdt) >> PAGE_SHIFT;
+ PANIC_IF(HYPERVISOR_set_gdt(&gdtmachpfn, 512) != 0);
+ lgdt(&r_gdt /* unused */);
+ gdt_set = 1;
+
+ if ((error = HYPERVISOR_set_trap_table(trap_table)) != 0) {
+ panic("set_trap_table failed - error %d\n", error);
+ }
+ HYPERVISOR_set_callbacks(GSEL(GCODE_SEL, SEL_KPL),
+ (unsigned long)Xhypervisor_callback,
+ GSEL(GCODE_SEL, SEL_KPL), (unsigned long)failsafe_callback);
+
+ pcpu_init(pc, 0, sizeof(struct pcpu));
+ PCPU_SET(prvspace, pc);
+ PCPU_SET(curthread, &thread0);
+ PCPU_SET(curpcb, thread0.td_pcb);
+ PCPU_SET(pdir, (unsigned long)IdlePTD);
+
+ /*
+ * Initialize mutexes.
+ *
+ * icu_lock: in order to allow an interrupt to occur in a critical
+ * section, to set pcpu->ipending (etc...) properly, we
+ * must be able to get the icu lock, so it can't be
+ * under witness.
+ */
+ mutex_init();
+ mtx_init(&icu_lock, "icu", NULL, MTX_SPIN | MTX_NOWITNESS | MTX_NOPROFILE);
+
+ /* make ldt memory segments */
+ ldt_segs[LUCODE_SEL].ssd_limit = atop(0 - 1);
+ ldt_segs[LUDATA_SEL].ssd_limit = atop(0 - 1);
+ for (x = 0; x < sizeof ldt_segs / sizeof ldt_segs[0]; x++)
+ ssdtosd(&ldt_segs[x], &ldt[x].sd);
+
+ default_proc_ldt.ldt_base = (caddr_t)ldt;
+ default_proc_ldt.ldt_len = 6;
+ _default_ldt = (int)&default_proc_ldt;
+ PCPU_SET(currentldt, _default_ldt)
+ PT_SET_MA(ldt, *vtopte((unsigned long)ldt) & ~PG_RW);
+ xen_set_ldt((unsigned long) ldt, (sizeof ldt_segs / sizeof ldt_segs[0]));
+
+#ifdef XBOX
+ /*
+ * The following code queries the PCI ID of 0:0:0. For the XBOX,
+ * This should be 0x10de / 0x02a5.
+ *
+ * This is exactly what Linux does.
+ */
+ outl(0xcf8, 0x80000000);
+ if (inl(0xcfc) == 0x02a510de) {
+ arch_i386_is_xbox = 1;
+ pic16l_setled(XBOX_LED_GREEN);
+
+ /*
+ * We are an XBOX, but we may have either 64MB or 128MB of
+ * memory. The PCI host bridge should be programmed for this,
+ * so we just query it.
+ */
+ outl(0xcf8, 0x80000084);
+ arch_i386_xbox_memsize = (inl(0xcfc) == 0x7FFFFFF) ? 128 : 64;
+ }
+#endif /* XBOX */
+#if defined (XEN_PRIVILEGED)
+ /*
+ * Initialize the i8254 before the console so that console
+ * initialization can use DELAY().
+ */
+ i8254_init();
+#endif
+ /*
+ * Initialize the console before we print anything out.
+ */
+ cninit();
+
+ if (metadata_missing)
+ printf("WARNING: loader(8) metadata is missing!\n");
+
+#ifdef DEV_ISA
+ if (xen_start_info->flags & SIF_PRIVILEGED) {
+ elcr_probe();
+ atpic_startup();
+ }
+#endif
+
+#ifdef DDB
+ ksym_start = bootinfo.bi_symtab;
+ ksym_end = bootinfo.bi_esymtab;
+#endif
+
+ kdb_init();
+
+#ifdef KDB
+ if (boothowto & RB_KDB)
+ kdb_enter("Boot flags requested debugger");
+#endif
+
+ finishidentcpu(); /* Final stage of CPU initialization */
+ setidt(IDT_UD, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL,
+ GSEL(GCODE_SEL, SEL_KPL));
+ setidt(IDT_GP, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL,
+ GSEL(GCODE_SEL, SEL_KPL));
+ initializecpu(); /* Initialize CPU registers */
+
+ /* make an initial tss so cpu can get interrupt stack on syscall! */
+ /* Note: -16 is so we can grow the trapframe if we came from vm86 */
+ PCPU_SET(common_tss.tss_esp0, thread0.td_kstack +
+ KSTACK_PAGES * PAGE_SIZE - sizeof(struct pcb) - 16);
+ PCPU_SET(common_tss.tss_ss0, GSEL(GDATA_SEL, SEL_KPL));
+ gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
+ HYPERVISOR_stack_switch(GSEL(GDATA_SEL, SEL_KPL),
+ PCPU_GET(common_tss.tss_esp0));
+
+
+ /* pointer to selector slot for %fs/%gs */
+ PCPU_SET(fsgs_gdt, &gdt[GUFS_SEL].sd);
+
+ dblfault_tss.tss_esp = dblfault_tss.tss_esp0 = dblfault_tss.tss_esp1 =
+ dblfault_tss.tss_esp2 = (int)&dblfault_stack[sizeof(dblfault_stack)];
+ dblfault_tss.tss_ss = dblfault_tss.tss_ss0 = dblfault_tss.tss_ss1 =
+ dblfault_tss.tss_ss2 = GSEL(GDATA_SEL, SEL_KPL);
+#ifdef PAE
+ dblfault_tss.tss_cr3 = (int)IdlePDPT;
+#else
+ dblfault_tss.tss_cr3 = (int)IdlePTD;
+#endif
+ dblfault_tss.tss_eip = (int)dblfault_handler;
+ dblfault_tss.tss_eflags = PSL_KERNEL;
+ dblfault_tss.tss_ds = dblfault_tss.tss_es =
+ dblfault_tss.tss_gs = GSEL(GDATA_SEL, SEL_KPL);
+ dblfault_tss.tss_fs = GSEL(GPRIV_SEL, SEL_KPL);
+ dblfault_tss.tss_cs = GSEL(GCODE_SEL, SEL_KPL);
+ dblfault_tss.tss_ldt = GSEL(GLDT_SEL, SEL_KPL);
+
+ vm86_initialize();
+ getmemsize(first);
+ init_param2(physmem);
+
+ /* now running on new page tables, configured,and u/iom is accessible */
+
+ msgbufinit(msgbufp, MSGBUF_SIZE);
+
+ /* transfer to user mode */
+
+ _ucodesel = GSEL(GUCODE_SEL, SEL_UPL);
+ _udatasel = GSEL(GUDATA_SEL, SEL_UPL);
+
+ /* setup proc 0's pcb */
+ thread0.td_pcb->pcb_flags = 0;
+#ifdef PAE
+ thread0.td_pcb->pcb_cr3 = (int)IdlePDPT;
+#else
+ thread0.td_pcb->pcb_cr3 = (int)IdlePTD;
+#endif
+ thread0.td_pcb->pcb_ext = 0;
+ thread0.td_frame = &proc0_tf;
+ thread0.td_pcb->pcb_fsd = PCPU_GET(fsgs_gdt)[0];
+ thread0.td_pcb->pcb_gsd = PCPU_GET(fsgs_gdt)[1];
}
+
+
+#else
+
void
init386(first)
int first;
@@ -2331,6 +2626,7 @@
thread0.td_pcb->pcb_ext = 0;
thread0.td_frame = &proc0_tf;
}
+#endif /* !XEN */
void
cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
==== //depot/projects/xen31/sys/i386/include/pcpu.h#2 (text+ko) ====
@@ -45,6 +45,9 @@
* to each CPU's data can be set up for things like "check curproc on all
* other processors"
*/
+
+
+#ifdef XEN
#define PCPU_MD_FIELDS \
struct pcpu *pc_prvspace; /* Self-reference */ \
struct pmap *pc_curpmap; \
@@ -55,8 +58,30 @@
int pc_currentldt; \
u_int pc_acpi_id; /* ACPI CPU id */ \
u_int pc_apic_id; \
+ int pc_private_tss; /* Flag indicating private tss*/\
+ u_int pc_cr3; /* track cr3 for R1/R3*/ \
+ u_int pc_pdir; \
+ u_int pc_lazypmap; \
+ u_int pc_rendezvous; \
+ u_int pc_cpuast
+
+
+#else
+#define PCPU_MD_FIELDS \
+ struct pcpu *pc_prvspace; /* Self-reference */ \
+ struct pmap *pc_curpmap; \
+ struct i386tss pc_common_tss; \
+ struct segment_descriptor pc_common_tssd; \
+ struct segment_descriptor *pc_tss_gdt; \
+ struct segment_descriptor *pc_fsgs_gdt; \
+ int pc_currentldt; \
+ u_int pc_acpi_id; /* ACPI CPU id */ \
+ u_int pc_apic_id; \
int pc_private_tss /* Flag indicating private tss */
+
+#endif /* !XEN */
+
#ifdef lint
extern struct pcpu *pcpup;
==== //depot/projects/xen31/sys/i386/include/pmap.h#3 (text+ko) ====
@@ -189,6 +189,45 @@
#define vtopte(va) (PTmap + i386_btop(va))
#define vtophys(va) pmap_kextract((vm_offset_t)(va))
+
+
+#ifdef XEN
+#include <machine/xen/xen-os.h>
+#include <machine/xen/xenvar.h>
+#include <machine/xen/xenpmap.h>
+#ifndef TRUE
+#define TRUE 1
+#endif
+#ifndef FALSE
+#define FALSE 0
+#endif
+
+static __inline vm_paddr_t
+pmap_kextract_ma(vm_offset_t va)
+{
+ vm_paddr_t ma;
+ if ((ma = PTD[va >> PDRSHIFT]) & PG_PS) {
+ ma = (ma & ~(NBPDR - 1)) | (va & (NBPDR - 1));
+ } else {
+ ma = (*vtopte(va) & PG_FRAME) | (va & PAGE_MASK);
+ }
+ return ma;
+}
+
+static __inline vm_paddr_t
+pmap_kextract(vm_offset_t va)
+{
+ return xpmap_mtop(pmap_kextract_ma(va));
+}
+#define vtomach(va) pmap_kextract_ma(((vm_offset_t) (va)))
+
+vm_paddr_t pmap_extract_ma(struct pmap *pmap, vm_offset_t va);
+
+void pmap_kenter_ma(vm_offset_t va, vm_paddr_t pa);
+void pmap_map_readonly(struct pmap *pmap, vm_offset_t va, int len);
+void pmap_map_readwrite(struct pmap *pmap, vm_offset_t va, int len);
+
+#else
/*
* Routine: pmap_kextract
* Function:
@@ -304,6 +343,7 @@
#define pte_clear(ptep) pte_store((ptep), (pt_entry_t)0ULL)
#define pde_store(pdep, pde) pte_store((pdep), (pde))
+#endif /* !XEN */
#endif /* _KERNEL */
More information about the p4-projects
mailing list