svn commit: r298740 - head/sys/arm/arm
Michal Meloun
mmel at FreeBSD.org
Thu Apr 28 12:05:09 UTC 2016
Author: mmel
Date: Thu Apr 28 12:05:07 2016
New Revision: 298740
URL: https://svnweb.freebsd.org/changeset/base/298740
Log:
ARM: Use kernel pmap as intermediate mapping in context switch.
On ARM, we can directly switch between translation tables only when
the size of the mapping for any given virtual address is the same in
the old and new translation tables. The load of new TTB and subsequent
TLB flush is not atomic operation. So speculative page table walk can
load TLB entry from new mapping while rest of TLB entries are still the
old ones. In worst case, this can lead to situation when TLB cache can
contain multiple matching TLB entries. One (from old mapping) L2 entry
for VA + 4k and one (from new mapping) L1 entry for VA.
Thus, we must switch to kernel pmap translation table as intermediate
mapping because all sizes of these (old pmap and kernel pmap) mappings
are same (or unmapped). The same is true for switch from kernel pmap
translation table to new pmap one.
Modified:
head/sys/arm/arm/swtch-v6.S
Modified: head/sys/arm/arm/swtch-v6.S
==============================================================================
--- head/sys/arm/arm/swtch-v6.S Thu Apr 28 12:04:12 2016 (r298739)
+++ head/sys/arm/arm/swtch-v6.S Thu Apr 28 12:05:07 2016 (r298740)
@@ -114,25 +114,37 @@ __FBSDID("$FreeBSD$");
.Lblocked_lock:
.word _C_LABEL(blocked_lock)
-ENTRY(cpu_context_switch) /* QQQ: What about macro instead of function? */
+ENTRY(cpu_context_switch)
DSB
- mcr CP15_TTBR0(r0) /* set the new TTB */
+ /*
+ * We can directly switch between translation tables only when the
+ * size of the mapping for any given virtual address is the same
+ * in the old and new translation tables.
+ * Thus, we must switch to kernel pmap translation table as
+ * intermediate mapping because all sizes of these mappings are same
+ * (or unmapped). The same is true for switch from kernel pmap
+ * translation table to new pmap one.
+ */
+ mov r2, #(CPU_ASID_KERNEL)
+ ldr r1, =(_C_LABEL(pmap_kern_ttb))
+ ldr r1, [r1]
+ mcr CP15_TTBR0(r1) /* switch to kernel TTB */
+ ISB
+ mcr CP15_TLBIASID(r2) /* flush not global TLBs */
+ DSB
+ mcr CP15_TTBR0(r0) /* switch to new TTB */
ISB
- mov r0, #(CPU_ASID_KERNEL)
- mcr CP15_TLBIASID(r0) /* flush not global TLBs */
+ /*
+ * We must flush not global TLBs again because PT2MAP mapping
+ * is different.
+ */
+ mcr CP15_TLBIASID(r2) /* flush not global TLBs */
/*
* Flush entire Branch Target Cache because of the branch predictor
* is not architecturally invisible. See ARM Architecture Reference
* Manual ARMv7-A and ARMv7-R edition, page B2-1264(65), Branch
* predictors and Requirements for branch predictor maintenance
* operations sections.
- *
- * QQQ: The predictor is virtually addressed and holds virtual target
- * addresses. Therefore, if mapping is changed, the predictor cache
- * must be flushed.The flush is part of entire i-cache invalidation
- * what is always called when code mapping is changed. So herein,
- * it's the only place where standalone predictor flush must be
- * executed in kernel (except self modifying code case).
*/
mcr CP15_BPIALL /* flush entire Branch Target Cache */
DSB
More information about the svn-src-head
mailing list