svn commit: r262748 - head/sys/amd64/amd64
Jung-uk Kim
jkim at FreeBSD.org
Tue Mar 4 20:07:38 UTC 2014
Author: jkim
Date: Tue Mar 4 20:07:36 2014
New Revision: 262748
URL: http://svnweb.freebsd.org/changeset/base/262748
Log:
Properly save and restore CR0.
MFC after: 3 days
Modified:
head/sys/amd64/amd64/cpu_switch.S
head/sys/amd64/amd64/mpboot.S
head/sys/amd64/amd64/pmap.c
Modified: head/sys/amd64/amd64/cpu_switch.S
==============================================================================
--- head/sys/amd64/amd64/cpu_switch.S Tue Mar 4 19:49:41 2014 (r262747)
+++ head/sys/amd64/amd64/cpu_switch.S Tue Mar 4 20:07:36 2014 (r262748)
@@ -552,8 +552,10 @@ END(resumectx)
*/
ENTRY(ctx_fpusave)
movq %cr0,%rax
+ pushq %rax
clts
call fpusave
+ popq %rax
movq %rax,%cr0
ret
END(ctx_fpusave)
Modified: head/sys/amd64/amd64/mpboot.S
==============================================================================
--- head/sys/amd64/amd64/mpboot.S Tue Mar 4 19:49:41 2014 (r262747)
+++ head/sys/amd64/amd64/mpboot.S Tue Mar 4 20:07:36 2014 (r262748)
@@ -36,6 +36,7 @@
.p2align 4,0
.globl mptramp_start
mptramp_start:
+#ifndef __clang__
.code16
/*
* The AP enters here in response to the startup IPI.
@@ -65,6 +66,43 @@ mptramp_start:
/* Enable protected mode */
movl $CR0_PE, %eax
mov %eax, %cr0
+#else
+ /*
+ * The AP enters here in response to the startup IPI.
+ * We are in real mode. %cs is the only segment register set.
+ */
+ cli /* make sure no interrupts */
+ mov %cs, %eax /* copy %cs to %ds. Remember these */
+ mov %eax, %ds /* are offsets rather than selectors */
+ mov %eax, %ss
+
+ /*
+ * Find relocation base and patch the gdt descript and ljmp targets
+ */
+ .byte 0x66
+ xorl %ebx, %ebx
+ mov %cs, %ebx
+ .byte 0x66
+ sall $4, %ebx /* %ebx is now our relocation base */
+ .byte 0x66, 0x09, 0x1e
+ .word lgdt_desc-mptramp_start+2
+ .byte 0x66, 0x09, 0x1e
+ .word jmp_32-mptramp_start+2
+ .byte 0x66, 0x09, 0x1e
+ .word jmp_64-mptramp_start+1
+
+ /*
+ * Load the descriptor table pointer. We'll need it when running
+ * in 16 bit protected mode.
+ */
+ .byte 0x0f, 0x01, 0x16
+ .word lgdt_desc-mptramp_start
+
+ /* Enable protected mode */
+ .byte 0x66
+ movl $CR0_PE, %eax
+ mov %eax, %cr0
+#endif
/*
* Now execute a far jump to turn on protected mode. This
@@ -88,7 +126,7 @@ jmp_32:
.code32
protmode:
mov $bootdata-gdt, %eax
- mov %ax, %ds
+ mov %eax, %ds
/* Turn on the PAE, PSE and PGE bits for when paging is enabled */
mov %cr4, %eax
Modified: head/sys/amd64/amd64/pmap.c
==============================================================================
--- head/sys/amd64/amd64/pmap.c Tue Mar 4 19:49:41 2014 (r262747)
+++ head/sys/amd64/amd64/pmap.c Tue Mar 4 20:07:36 2014 (r262748)
@@ -146,6 +146,13 @@ __FBSDID("$FreeBSD$");
#endif
static __inline boolean_t
+pmap_type_guest(pmap_t pmap)
+{
+
+ return ((pmap->pm_type == PT_EPT) || (pmap->pm_type == PT_RVI));
+}
+
+static __inline boolean_t
pmap_emulate_ad_bits(pmap_t pmap)
{
@@ -159,6 +166,7 @@ pmap_valid_bit(pmap_t pmap)
switch (pmap->pm_type) {
case PT_X86:
+ case PT_RVI:
mask = X86_PG_V;
break;
case PT_EPT:
@@ -181,6 +189,7 @@ pmap_rw_bit(pmap_t pmap)
switch (pmap->pm_type) {
case PT_X86:
+ case PT_RVI:
mask = X86_PG_RW;
break;
case PT_EPT:
@@ -205,6 +214,7 @@ pmap_global_bit(pmap_t pmap)
case PT_X86:
mask = X86_PG_G;
break;
+ case PT_RVI:
case PT_EPT:
mask = 0;
break;
@@ -222,6 +232,7 @@ pmap_accessed_bit(pmap_t pmap)
switch (pmap->pm_type) {
case PT_X86:
+ case PT_RVI:
mask = X86_PG_A;
break;
case PT_EPT:
@@ -244,6 +255,7 @@ pmap_modified_bit(pmap_t pmap)
switch (pmap->pm_type) {
case PT_X86:
+ case PT_RVI:
mask = X86_PG_M;
break;
case PT_EPT:
@@ -1102,6 +1114,9 @@ pmap_swap_pat(pmap_t pmap, pt_entry_t en
if ((entry & x86_pat_bits) != 0)
entry ^= x86_pat_bits;
break;
+ case PT_RVI:
+ /* XXX: PAT support. */
+ break;
case PT_EPT:
/*
* Nothing to do - the memory attributes are represented
@@ -1145,6 +1160,11 @@ pmap_cache_bits(pmap_t pmap, int mode, b
cache_bits |= PG_NC_PWT;
break;
+ case PT_RVI:
+ /* XXX: PAT support. */
+ cache_bits = 0;
+ break;
+
case PT_EPT:
cache_bits = EPT_PG_IGNORE_PAT | EPT_PG_MEMORY_TYPE(mode);
break;
@@ -1165,6 +1185,10 @@ pmap_cache_mask(pmap_t pmap, boolean_t i
case PT_X86:
mask = is_pde ? X86_PG_PDE_CACHE : X86_PG_PTE_CACHE;
break;
+ case PT_RVI:
+ /* XXX: PAT support. */
+ mask = 0;
+ break;
case PT_EPT:
mask = EPT_PG_IGNORE_PAT | EPT_PG_MEMORY_TYPE(0x7);
break;
@@ -1189,6 +1213,7 @@ pmap_update_pde_store(pmap_t pmap, pd_en
switch (pmap->pm_type) {
case PT_X86:
break;
+ case PT_RVI:
case PT_EPT:
/*
* XXX
@@ -1224,7 +1249,7 @@ pmap_update_pde_invalidate(pmap_t pmap,
{
pt_entry_t PG_G;
- if (pmap->pm_type == PT_EPT)
+ if (pmap_type_guest(pmap))
return;
KASSERT(pmap->pm_type == PT_X86,
@@ -1338,7 +1363,7 @@ pmap_invalidate_page(pmap_t pmap, vm_off
cpuset_t other_cpus;
u_int cpuid;
- if (pmap->pm_type == PT_EPT) {
+ if (pmap_type_guest(pmap)) {
pmap_invalidate_ept(pmap);
return;
}
@@ -1416,7 +1441,7 @@ pmap_invalidate_range(pmap_t pmap, vm_of
vm_offset_t addr;
u_int cpuid;
- if (pmap->pm_type == PT_EPT) {
+ if (pmap_type_guest(pmap)) {
pmap_invalidate_ept(pmap);
return;
}
@@ -1475,7 +1500,7 @@ pmap_invalidate_all(pmap_t pmap)
uint64_t cr3;
u_int cpuid;
- if (pmap->pm_type == PT_EPT) {
+ if (pmap_type_guest(pmap)) {
pmap_invalidate_ept(pmap);
return;
}
@@ -1595,7 +1620,7 @@ pmap_update_pde(pmap_t pmap, vm_offset_t
cpuid = PCPU_GET(cpuid);
other_cpus = all_cpus;
CPU_CLR(cpuid, &other_cpus);
- if (pmap == kernel_pmap || pmap->pm_type == PT_EPT)
+ if (pmap == kernel_pmap || pmap_type_guest(pmap))
active = all_cpus;
else {
active = pmap->pm_active;
@@ -1633,6 +1658,7 @@ pmap_invalidate_page(pmap_t pmap, vm_off
if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active))
invlpg(va);
break;
+ case PT_RVI:
case PT_EPT:
pmap->pm_eptgen++;
break;
@@ -1652,6 +1678,7 @@ pmap_invalidate_range(pmap_t pmap, vm_of
for (addr = sva; addr < eva; addr += PAGE_SIZE)
invlpg(addr);
break;
+ case PT_RVI:
case PT_EPT:
pmap->pm_eptgen++;
break;
@@ -1669,6 +1696,7 @@ pmap_invalidate_all(pmap_t pmap)
if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active))
invltlb();
break;
+ case PT_RVI:
case PT_EPT:
pmap->pm_eptgen++;
break;
More information about the svn-src-all
mailing list