svn commit: r298468 - in user/ngie/bsnmp_cleanup: contrib/bsnmp/snmpd sys/arm/arm sys/arm/include sys/contrib/rdma/krping sys/ofed/drivers/infiniband/core sys/ofed/drivers/infiniband/ulp/ipoib sys/...
Garrett Cooper
ngie at FreeBSD.org
Fri Apr 22 09:46:24 UTC 2016
Author: ngie
Date: Fri Apr 22 09:46:22 2016
New Revision: 298468
URL: https://svnweb.freebsd.org/changeset/base/298468
Log:
MFhead at r298467
Modified:
user/ngie/bsnmp_cleanup/contrib/bsnmp/snmpd/main.c
user/ngie/bsnmp_cleanup/sys/arm/arm/pmap-v6.c
user/ngie/bsnmp_cleanup/sys/arm/include/cpu-v6.h
user/ngie/bsnmp_cleanup/sys/arm/include/pmap_var.h
user/ngie/bsnmp_cleanup/sys/contrib/rdma/krping/krping.c
user/ngie/bsnmp_cleanup/sys/ofed/drivers/infiniband/core/addr.c
user/ngie/bsnmp_cleanup/sys/ofed/drivers/infiniband/ulp/ipoib/ipoib_cm.c
user/ngie/bsnmp_cleanup/sys/ufs/ufs/ufs_extattr.c
user/ngie/bsnmp_cleanup/usr.bin/xlint/lint1/decl.c
user/ngie/bsnmp_cleanup/usr.sbin/bhyve/fwctl.c
user/ngie/bsnmp_cleanup/usr.sbin/bhyve/pci_ahci.c
Directory Properties:
user/ngie/bsnmp_cleanup/ (props changed)
Modified: user/ngie/bsnmp_cleanup/contrib/bsnmp/snmpd/main.c
==============================================================================
--- user/ngie/bsnmp_cleanup/contrib/bsnmp/snmpd/main.c Fri Apr 22 09:44:49 2016 (r298467)
+++ user/ngie/bsnmp_cleanup/contrib/bsnmp/snmpd/main.c Fri Apr 22 09:46:22 2016 (r298468)
@@ -2813,7 +2813,7 @@ usm_new_user(uint8_t *eid, uint32_t elen
if ((uuser = (struct usm_user *)malloc(sizeof(*uuser))) == NULL)
return (NULL);
- memset(uuser, 0, sizeof(struct usm_user));
+ memset(uuser, 0, sizeof(*uuser));
strlcpy(uuser->suser.sec_name, uname, SNMP_ADM_STR32_SIZ);
memcpy(uuser->user_engine_id, eid, elen);
uuser->user_engine_len = elen;
Modified: user/ngie/bsnmp_cleanup/sys/arm/arm/pmap-v6.c
==============================================================================
--- user/ngie/bsnmp_cleanup/sys/arm/arm/pmap-v6.c Fri Apr 22 09:44:49 2016 (r298467)
+++ user/ngie/bsnmp_cleanup/sys/arm/arm/pmap-v6.c Fri Apr 22 09:46:22 2016 (r298468)
@@ -1531,6 +1531,14 @@ static u_long pmap_pte1_promotions;
SYSCTL_ULONG(_vm_pmap_pte1, OID_AUTO, promotions, CTLFLAG_RD,
&pmap_pte1_promotions, 0, "1MB page promotions");
+static u_long pmap_pte1_kern_demotions;
+SYSCTL_ULONG(_vm_pmap_pte1, OID_AUTO, kern_demotions, CTLFLAG_RD,
+ &pmap_pte1_kern_demotions, 0, "1MB page kernel demotions");
+
+static u_long pmap_pte1_kern_promotions;
+SYSCTL_ULONG(_vm_pmap_pte1, OID_AUTO, kern_promotions, CTLFLAG_RD,
+ &pmap_pte1_kern_promotions, 0, "1MB page kernel promotions");
+
static __inline ttb_entry_t
pmap_ttb_get(pmap_t pmap)
{
@@ -3198,6 +3206,166 @@ pmap_pv_insert_pte1(pmap_t pmap, vm_offs
return (FALSE);
}
+static inline void
+pmap_tlb_flush_pte1(pmap_t pmap, vm_offset_t va, pt1_entry_t npte1)
+{
+
+ /* Kill all the small mappings or the big one only. */
+ if (pte1_is_section(npte1))
+ pmap_tlb_flush_range(pmap, pte1_trunc(va), PTE1_SIZE);
+ else
+ pmap_tlb_flush(pmap, pte1_trunc(va));
+}
+
+/*
+ * Update kernel pte1 on all pmaps.
+ *
+ * The following function is called only on one cpu with disabled interrupts.
+ * In SMP case, smp_rendezvous_cpus() is used to stop other cpus. This way
+ * nobody can invoke explicit hardware table walk during the update of pte1.
+ * Unsolicited hardware table walk can still happen, invoked by speculative
+ * data or instruction prefetch or even by speculative hardware table walk.
+ *
+ * The break-before-make approach should be implemented here. However, it's
+ * not so easy to do that for kernel mappings as it would be unhappy to unmap
+ * itself unexpectedly but voluntarily.
+ */
+static void
+pmap_update_pte1_kernel(vm_offset_t va, pt1_entry_t npte1)
+{
+ pmap_t pmap;
+ pt1_entry_t *pte1p;
+
+ /*
+ * Get current pmap. Interrupts should be disabled here
+ * so PCPU_GET() is done atomically.
+ */
+ pmap = PCPU_GET(curpmap);
+ if (pmap == NULL)
+ pmap = kernel_pmap;
+
+ /*
+ * (1) Change pte1 on current pmap.
+ * (2) Flush all obsolete TLB entries on current CPU.
+ * (3) Change pte1 on all pmaps.
+ * (4) Flush all obsolete TLB entries on all CPUs in SMP case.
+ */
+
+ pte1p = pmap_pte1(pmap, va);
+ pte1_store(pte1p, npte1);
+
+ /* Kill all the small mappings or the big one only. */
+ if (pte1_is_section(npte1)) {
+ pmap_pte1_kern_promotions++;
+ tlb_flush_range_local(pte1_trunc(va), PTE1_SIZE);
+ } else {
+ pmap_pte1_kern_demotions++;
+ tlb_flush_local(pte1_trunc(va));
+ }
+
+ /*
+ * In SMP case, this function is called when all cpus are at smp
+ * rendezvous, so there is no need to use 'allpmaps_lock' lock here.
+ * In UP case, the function is called with this lock locked.
+ */
+ LIST_FOREACH(pmap, &allpmaps, pm_list) {
+ pte1p = pmap_pte1(pmap, va);
+ pte1_store(pte1p, npte1);
+ }
+
+#ifdef SMP
+ /* Kill all the small mappings or the big one only. */
+ if (pte1_is_section(npte1))
+ tlb_flush_range(pte1_trunc(va), PTE1_SIZE);
+ else
+ tlb_flush(pte1_trunc(va));
+#endif
+}
+
+#ifdef SMP
+struct pte1_action {
+ vm_offset_t va;
+ pt1_entry_t npte1;
+ u_int update; /* CPU that updates the PTE1 */
+};
+
+static void
+pmap_update_pte1_action(void *arg)
+{
+ struct pte1_action *act = arg;
+
+ if (act->update == PCPU_GET(cpuid))
+ pmap_update_pte1_kernel(act->va, act->npte1);
+}
+
+/*
+ * Change pte1 on current pmap.
+ * Note that kernel pte1 must be changed on all pmaps.
+ *
+ * By ARM ARM manual, the behaviour is UNPREDICABLE when two or more TLB
+ * entries map same VA. It's a problem when either promotion or demotion
+ * is being done. The pte1 update and appropriate TLB flush must be done
+ * atomically in general.
+ */
+static void
+pmap_change_pte1(pmap_t pmap, pt1_entry_t *pte1p, vm_offset_t va,
+ pt1_entry_t npte1)
+{
+
+ if (pmap == kernel_pmap) {
+ struct pte1_action act;
+
+ sched_pin();
+ act.va = va;
+ act.npte1 = npte1;
+ act.update = PCPU_GET(cpuid);
+ smp_rendezvous_cpus(all_cpus, smp_no_rendevous_barrier,
+ pmap_update_pte1_action, NULL, &act);
+ sched_unpin();
+ } else {
+ register_t cspr;
+
+ /*
+ * Use break-before-make approach for changing userland
+ * mappings. It can cause L1 translation aborts on other
+ * cores in SMP case. So, special treatment is implemented
+ * in pmap_fault(). Interrups are disabled here to make it
+ * without any interruption as quick as possible.
+ */
+ cspr = disable_interrupts(PSR_I | PSR_F);
+ pte1_clear(pte1p);
+ pmap_tlb_flush_pte1(pmap, va, npte1);
+ pte1_store(pte1p, npte1);
+ restore_interrupts(cspr);
+ }
+}
+#else
+static void
+pmap_change_pte1(pmap_t pmap, pt1_entry_t *pte1p, vm_offset_t va,
+ pt1_entry_t npte1)
+{
+
+ if (pmap == kernel_pmap) {
+ mtx_lock_spin(&allpmaps_lock);
+ pmap_update_pte1_kernel(va, npte1);
+ mtx_unlock_spin(&allpmaps_lock);
+ } else {
+ register_t cspr;
+
+ /*
+ * Use break-before-make approach for changing userland
+ * mappings. It's absolutely safe in UP case when interrupts
+ * are disabled.
+ */
+ cspr = disable_interrupts(PSR_I | PSR_F);
+ pte1_clear(pte1p);
+ pmap_tlb_flush_pte1(pmap, va, npte1);
+ pte1_store(pte1p, npte1);
+ restore_interrupts(cspr);
+ }
+}
+#endif
+
/*
* Tries to promote the NPTE2_IN_PT2, contiguous 4KB page mappings that are
* within a single page table page (PT2) to a single 1MB page mapping.
@@ -3230,7 +3398,6 @@ pmap_promote_pte1(pmap_t pmap, pt1_entry
* within a 1MB page.
*/
fpte2p = pmap_pte2_quick(pmap, pte1_trunc(va));
-setpte1:
fpte2 = pte2_load(fpte2p);
if ((fpte2 & ((PTE2_FRAME & PTE1_OFFSET) | PTE2_A | PTE2_V)) !=
(PTE2_A | PTE2_V)) {
@@ -3249,16 +3416,9 @@ setpte1:
/*
* When page is not modified, PTE2_RO can be set without
* a TLB invalidation.
- *
- * Note: When modified bit is being set, then in hardware case,
- * the TLB entry is re-read (updated) from PT2, and in
- * software case (abort), the PTE2 is read from PT2 and
- * TLB flushed if changed. The following cmpset() solves
- * any race with setting this bit in both cases.
*/
- if (!pte2_cmpset(fpte2p, fpte2, fpte2 | PTE2_RO))
- goto setpte1;
fpte2 |= PTE2_RO;
+ pte2_store(fpte2p, fpte2);
}
/*
@@ -3269,7 +3429,6 @@ setpte1:
fpte2_fav = (fpte2 & (PTE2_FRAME | PTE2_A | PTE2_V));
fpte2_fav += PTE1_SIZE - PTE2_SIZE; /* examine from the end */
for (pte2p = fpte2p + NPTE2_IN_PT2 - 1; pte2p > fpte2p; pte2p--) {
-setpte2:
pte2 = pte2_load(pte2p);
if ((pte2 & (PTE2_FRAME | PTE2_A | PTE2_V)) != fpte2_fav) {
pmap_pte1_p_failures++;
@@ -3282,9 +3441,8 @@ setpte2:
* When page is not modified, PTE2_RO can be set
* without a TLB invalidation. See note above.
*/
- if (!pte2_cmpset(pte2p, pte2, pte2 | PTE2_RO))
- goto setpte2;
pte2 |= PTE2_RO;
+ pte2_store(pte2p, pte2);
pteva = pte1_trunc(va) | (pte2 & PTE1_OFFSET &
PTE2_FRAME);
CTR3(KTR_PMAP, "%s: protect for va %#x in pmap %p",
@@ -3313,8 +3471,8 @@ setpte2:
("%s: PT2 page's pindex is wrong", __func__));
/*
- * Get pte1 from pte2 format.
- */
+ * Get pte1 from pte2 format.
+ */
npte1 = (fpte2 & PTE1_FRAME) | ATTR_TO_L1(fpte2) | PTE1_V;
/*
@@ -3324,19 +3482,9 @@ setpte2:
pmap_pv_promote_pte1(pmap, va, pte1_pa(npte1));
/*
- * Map the section.
- */
- if (pmap == kernel_pmap)
- pmap_kenter_pte1(va, npte1);
- else
- pte1_store(pte1p, npte1);
- /*
- * Flush old small mappings. We call single pmap_tlb_flush() in
- * pmap_demote_pte1() and pmap_remove_pte1(), so we must be sure that
- * no small mappings survive. We assume that given pmap is current and
- * don't play game with PTE2_NG.
+ * Promote the mappings.
*/
- pmap_tlb_flush_range(pmap, pte1_trunc(va), PTE1_SIZE);
+ pmap_change_pte1(pmap, pte1p, va, npte1);
pmap_pte1_promotions++;
CTR3(KTR_PMAP, "%s: success for va %#x in pmap %p",
@@ -3618,17 +3766,7 @@ pmap_demote_pte1(pmap_t pmap, pt1_entry_
* another processor changing the setting of PTE1_A and/or PTE1_NM
* between the read above and the store below.
*/
- if (pmap == kernel_pmap)
- pmap_kenter_pte1(va, npte1);
- else
- pte1_store(pte1p, npte1);
-
- /*
- * Flush old big mapping. The mapping should occupy one and only
- * TLB entry. So, pmap_tlb_flush() called with aligned address
- * should be sufficient.
- */
- pmap_tlb_flush(pmap, pte1_trunc(va));
+ pmap_change_pte1(pmap, pte1p, va, npte1);
/*
* Demote the pv entry. This depends on the earlier demotion
@@ -4655,7 +4793,7 @@ pmap_protect_pte1(pmap_t pmap, pt1_entry
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
KASSERT((sva & PTE1_OFFSET) == 0,
("%s: sva is not 1mpage aligned", __func__));
-retry:
+
opte1 = npte1 = pte1_load(pte1p);
if (pte1_is_managed(opte1)) {
eva = sva + PTE1_SIZE;
@@ -4676,8 +4814,7 @@ retry:
*/
if (npte1 != opte1) {
- if (!pte1_cmpset(pte1p, opte1, npte1))
- goto retry;
+ pte1_store(pte1p, npte1);
pmap_tlb_flush(pmap, sva);
}
}
@@ -4779,7 +4916,7 @@ resume:
for (pte2p = pmap_pte2_quick(pmap, sva); sva != nextva; pte2p++,
sva += PAGE_SIZE) {
vm_page_t m;
-retry:
+
opte2 = npte2 = pte2_load(pte2p);
if (!pte2_is_valid(opte2))
continue;
@@ -4803,9 +4940,7 @@ retry:
*/
if (npte2 != opte2) {
-
- if (!pte2_cmpset(pte2p, opte2, npte2))
- goto retry;
+ pte2_store(pte2p, npte2);
pmap_tlb_flush(pmap, sva);
}
}
@@ -5287,12 +5422,9 @@ small_mappings:
KASSERT(!pte1_is_section(pte1_load(pte1p)), ("%s: found"
" a section in page %p's pv list", __func__, m));
pte2p = pmap_pte2_quick(pmap, pv->pv_va);
-retry:
opte2 = pte2_load(pte2p);
if (!(opte2 & PTE2_RO)) {
- if (!pte2_cmpset(pte2p, opte2,
- opte2 | (PTE2_RO | PTE2_NM)))
- goto retry;
+ pte2_store(pte2p, opte2 | PTE2_RO | PTE2_NM);
if (pte2_is_dirty(opte2))
vm_page_dirty(m);
pmap_tlb_flush(pmap, pv->pv_va);
@@ -6200,33 +6332,61 @@ pmap_fault(pmap_t pmap, vm_offset_t far,
}
/*
+ * A pmap lock is used below for handling of access and R/W emulation
+ * aborts. They were handled by atomic operations before so some
+ * analysis of new situation is needed to answer the following question:
+ * Is it safe to use the lock even for these aborts?
+ *
+ * There may happen two cases in general:
+ *
+ * (1) Aborts while the pmap lock is locked already - this should not
+ * happen as pmap lock is not recursive. However, under pmap lock only
+ * internal kernel data should be accessed and such data should be
+ * mapped with A bit set and NM bit cleared. If double abort happens,
+ * then a mapping of data which has caused it must be fixed. Further,
+ * all new mappings are always made with A bit set and the bit can be
+ * cleared only on managed mappings.
+ *
+ * (2) Aborts while another lock(s) is/are locked - this already can
+ * happen. However, there is no difference here if it's either access or
+ * R/W emulation abort, or if it's some other abort.
+ */
+
+ PMAP_LOCK(pmap);
+#ifdef SMP
+ /*
+ * Special treatment due to break-before-make approach done when
+ * pte1 is updated for userland mapping during section promotion or
+ * demotion. If not catched here, pmap_enter() can find a section
+ * mapping on faulting address. That is not allowed.
+ */
+ if (idx == FAULT_TRAN_L1 && usermode && cp15_ats1cur_check(far) == 0) {
+ PMAP_UNLOCK(pmap);
+ return (KERN_SUCCESS);
+ }
+#endif
+ /*
* Accesss bits for page and section. Note that the entry
* is not in TLB yet, so TLB flush is not necessary.
*
* QQQ: This is hardware emulation, we do not call userret()
* for aborts from user mode.
- * We do not lock PMAP, so cmpset() is a need. Hopefully,
- * no one removes the mapping when we are here.
*/
if (idx == FAULT_ACCESS_L2) {
pte2p = pt2map_entry(far);
-pte2_seta:
pte2 = pte2_load(pte2p);
if (pte2_is_valid(pte2)) {
- if (!pte2_cmpset(pte2p, pte2, pte2 | PTE2_A)) {
- goto pte2_seta;
- }
+ pte2_store(pte2p, pte2 | PTE2_A);
+ PMAP_UNLOCK(pmap);
return (KERN_SUCCESS);
}
}
if (idx == FAULT_ACCESS_L1) {
pte1p = pmap_pte1(pmap, far);
-pte1_seta:
pte1 = pte1_load(pte1p);
if (pte1_is_section(pte1)) {
- if (!pte1_cmpset(pte1p, pte1, pte1 | PTE1_A)) {
- goto pte1_seta;
- }
+ pte1_store(pte1p, pte1 | PTE1_A);
+ PMAP_UNLOCK(pmap);
return (KERN_SUCCESS);
}
}
@@ -6238,32 +6398,26 @@ pte1_seta:
*
* QQQ: This is hardware emulation, we do not call userret()
* for aborts from user mode.
- * We do not lock PMAP, so cmpset() is a need. Hopefully,
- * no one removes the mapping when we are here.
*/
if ((fsr & FSR_WNR) && (idx == FAULT_PERM_L2)) {
pte2p = pt2map_entry(far);
-pte2_setrw:
pte2 = pte2_load(pte2p);
if (pte2_is_valid(pte2) && !(pte2 & PTE2_RO) &&
(pte2 & PTE2_NM)) {
- if (!pte2_cmpset(pte2p, pte2, pte2 & ~PTE2_NM)) {
- goto pte2_setrw;
- }
+ pte2_store(pte2p, pte2 & ~PTE2_NM);
tlb_flush(trunc_page(far));
+ PMAP_UNLOCK(pmap);
return (KERN_SUCCESS);
}
}
if ((fsr & FSR_WNR) && (idx == FAULT_PERM_L1)) {
pte1p = pmap_pte1(pmap, far);
-pte1_setrw:
pte1 = pte1_load(pte1p);
if (pte1_is_section(pte1) && !(pte1 & PTE1_RO) &&
(pte1 & PTE1_NM)) {
- if (!pte1_cmpset(pte1p, pte1, pte1 & ~PTE1_NM)) {
- goto pte1_setrw;
- }
+ pte1_store(pte1p, pte1 & ~PTE1_NM);
tlb_flush(pte1_trunc(far));
+ PMAP_UNLOCK(pmap);
return (KERN_SUCCESS);
}
}
@@ -6278,9 +6432,6 @@ pte1_setrw:
/*
* Read an entry in PT2TAB associated with both pmap and far.
* It's safe because PT2TAB is always mapped.
- *
- * QQQ: We do not lock PMAP, so false positives could happen if
- * the mapping is removed concurrently.
*/
pte2 = pt2tab_load(pmap_pt2tab_entry(pmap, far));
if (pte2_is_valid(pte2)) {
@@ -6303,6 +6454,7 @@ pte1_setrw:
}
}
#endif
+ PMAP_UNLOCK(pmap);
return (KERN_FAILURE);
}
Modified: user/ngie/bsnmp_cleanup/sys/arm/include/cpu-v6.h
==============================================================================
--- user/ngie/bsnmp_cleanup/sys/arm/include/cpu-v6.h Fri Apr 22 09:44:49 2016 (r298467)
+++ user/ngie/bsnmp_cleanup/sys/arm/include/cpu-v6.h Fri Apr 22 09:46:22 2016 (r298468)
@@ -181,6 +181,8 @@ _RF0(cp15_actlr_get, CP15_ACTLR(%0))
_WF1(cp15_actlr_set, CP15_ACTLR(%0))
_WF1(cp15_ats1cpr_set, CP15_ATS1CPR(%0))
_WF1(cp15_ats1cpw_set, CP15_ATS1CPW(%0))
+_WF1(cp15_ats1cur_set, CP15_ATS1CUR(%0))
+_WF1(cp15_ats1cuw_set, CP15_ATS1CUW(%0))
_RF0(cp15_par_get, CP15_PAR(%0))
_RF0(cp15_sctlr_get, CP15_SCTLR(%0))
@@ -581,6 +583,52 @@ cp15_ttbr_set(uint32_t reg)
isb();
tlb_flush_all_ng_local();
}
-#endif /* _KERNEL */
+
+/*
+ * Functions for address checking:
+ *
+ * cp15_ats1cpr_check() ... check stage 1 privileged (PL1) read access
+ * cp15_ats1cpw_check() ... check stage 1 privileged (PL1) write access
+ * cp15_ats1cur_check() ... check stage 1 unprivileged (PL0) read access
+ * cp15_ats1cuw_check() ... check stage 1 unprivileged (PL0) write access
+ *
+ * They must be called while interrupts are disabled to get consistent result.
+ */
+static __inline int
+cp15_ats1cpr_check(vm_offset_t addr)
+{
+
+ cp15_ats1cpr_set(addr);
+ isb();
+ return (cp15_par_get() & 0x01 ? EFAULT : 0);
+}
+
+static __inline int
+cp15_ats1cpw_check(vm_offset_t addr)
+{
+
+ cp15_ats1cpw_set(addr);
+ isb();
+ return (cp15_par_get() & 0x01 ? EFAULT : 0);
+}
+
+static __inline int
+cp15_ats1cur_check(vm_offset_t addr)
+{
+
+ cp15_ats1cur_set(addr);
+ isb();
+ return (cp15_par_get() & 0x01 ? EFAULT : 0);
+}
+
+static __inline int
+cp15_ats1cuw_check(vm_offset_t addr)
+{
+
+ cp15_ats1cuw_set(addr);
+ isb();
+ return (cp15_par_get() & 0x01 ? EFAULT : 0);
+}
+#endif /* !__ARM_ARCH < 6 */
#endif /* !MACHINE_CPU_V6_H */
Modified: user/ngie/bsnmp_cleanup/sys/arm/include/pmap_var.h
==============================================================================
--- user/ngie/bsnmp_cleanup/sys/arm/include/pmap_var.h Fri Apr 22 09:44:49 2016 (r298467)
+++ user/ngie/bsnmp_cleanup/sys/arm/include/pmap_var.h Fri Apr 22 09:46:22 2016 (r298468)
@@ -143,7 +143,8 @@ static __inline void
pte1_store(pt1_entry_t *pte1p, pt1_entry_t pte1)
{
- atomic_store_rel_int(pte1p, pte1);
+ dmb();
+ *pte1p = pte1;
pte1_sync(pte1p);
}
@@ -158,22 +159,11 @@ static __inline void
pte1_clear_bit(pt1_entry_t *pte1p, uint32_t bit)
{
- atomic_clear_int(pte1p, bit);
+ *pte1p &= ~bit;
pte1_sync(pte1p);
}
static __inline boolean_t
-pte1_cmpset(pt1_entry_t *pte1p, pt1_entry_t opte1, pt1_entry_t npte1)
-{
- boolean_t ret;
-
- ret = atomic_cmpset_int(pte1p, opte1, npte1);
- if (ret) pte1_sync(pte1p);
-
- return (ret);
-}
-
-static __inline boolean_t
pte1_is_link(pt1_entry_t pte1)
{
@@ -231,7 +221,8 @@ pte1_load_clear(pt1_entry_t *pte1p)
{
pt1_entry_t opte1;
- opte1 = atomic_readandclear_int(pte1p);
+ opte1 = *pte1p;
+ *pte1p = 0;
pte1_sync(pte1p);
return (opte1);
}
@@ -240,7 +231,7 @@ static __inline void
pte1_set_bit(pt1_entry_t *pte1p, uint32_t bit)
{
- atomic_set_int(pte1p, bit);
+ *pte1p |= bit;
pte1_sync(pte1p);
}
@@ -292,7 +283,8 @@ static __inline void
pte2_store(pt2_entry_t *pte2p, pt2_entry_t pte2)
{
- atomic_store_rel_int(pte2p, pte2);
+ dmb();
+ *pte2p = pte2;
pte2_sync(pte2p);
}
@@ -307,22 +299,11 @@ static __inline void
pte2_clear_bit(pt2_entry_t *pte2p, uint32_t bit)
{
- atomic_clear_int(pte2p, bit);
+ *pte2p &= ~bit;
pte2_sync(pte2p);
}
static __inline boolean_t
-pte2_cmpset(pt2_entry_t *pte2p, pt2_entry_t opte2, pt2_entry_t npte2)
-{
- boolean_t ret;
-
- ret = atomic_cmpset_int(pte2p, opte2, npte2);
- if (ret) pte2_sync(pte2p);
-
- return (ret);
-}
-
-static __inline boolean_t
pte2_is_dirty(pt2_entry_t pte2)
{
@@ -364,7 +345,8 @@ pte2_load_clear(pt2_entry_t *pte2p)
{
pt2_entry_t opte2;
- opte2 = atomic_readandclear_int(pte2p);
+ opte2 = *pte2p;
+ *pte2p = 0;
pte2_sync(pte2p);
return (opte2);
}
@@ -373,7 +355,7 @@ static __inline void
pte2_set_bit(pt2_entry_t *pte2p, uint32_t bit)
{
- atomic_set_int(pte2p, bit);
+ *pte2p |= bit;
pte2_sync(pte2p);
}
@@ -386,9 +368,9 @@ pte2_set_wired(pt2_entry_t *pte2p, boole
* so pte2_sync() is not needed.
*/
if (wired)
- atomic_set_int(pte2p, PTE2_W);
+ *pte2p |= PTE2_W;
else
- atomic_clear_int(pte2p, PTE2_W);
+ *pte2p &= ~PTE2_W;
}
static __inline vm_paddr_t
Modified: user/ngie/bsnmp_cleanup/sys/contrib/rdma/krping/krping.c
==============================================================================
--- user/ngie/bsnmp_cleanup/sys/contrib/rdma/krping/krping.c Fri Apr 22 09:44:49 2016 (r298467)
+++ user/ngie/bsnmp_cleanup/sys/contrib/rdma/krping/krping.c Fri Apr 22 09:46:22 2016 (r298468)
@@ -890,7 +890,7 @@ static u32 krping_rdma_rkey(struct krpin
post_inv,
cb->fastreg_wr.wr.fast_reg.rkey,
cb->fastreg_wr.wr.fast_reg.page_shift,
- cb->fastreg_wr.wr.fast_reg.length,
+ (unsigned)cb->fastreg_wr.wr.fast_reg.length,
(uintmax_t)cb->fastreg_wr.wr.fast_reg.iova_start,
cb->fastreg_wr.wr.fast_reg.page_list_len);
Modified: user/ngie/bsnmp_cleanup/sys/ofed/drivers/infiniband/core/addr.c
==============================================================================
--- user/ngie/bsnmp_cleanup/sys/ofed/drivers/infiniband/core/addr.c Fri Apr 22 09:44:49 2016 (r298467)
+++ user/ngie/bsnmp_cleanup/sys/ofed/drivers/infiniband/core/addr.c Fri Apr 22 09:46:22 2016 (r298468)
@@ -404,6 +404,8 @@ mcast:
break;
#endif
default:
+ KASSERT(0, ("rdma_addr_resolve: Unreachable"));
+ error = EINVAL;
break;
}
RTFREE(rte);
Modified: user/ngie/bsnmp_cleanup/sys/ofed/drivers/infiniband/ulp/ipoib/ipoib_cm.c
==============================================================================
--- user/ngie/bsnmp_cleanup/sys/ofed/drivers/infiniband/ulp/ipoib/ipoib_cm.c Fri Apr 22 09:44:49 2016 (r298467)
+++ user/ngie/bsnmp_cleanup/sys/ofed/drivers/infiniband/ulp/ipoib/ipoib_cm.c Fri Apr 22 09:46:22 2016 (r298468)
@@ -481,6 +481,8 @@ void ipoib_cm_handle_rx_wc(struct ipoib_
int has_srq;
u_short proto;
+ CURVNET_SET_QUIET(dev->if_vnet);
+
ipoib_dbg_data(priv, "cm recv completion: id %d, status: %d\n",
wr_id, wc->status);
@@ -496,7 +498,7 @@ void ipoib_cm_handle_rx_wc(struct ipoib_
} else
ipoib_warn(priv, "cm recv completion event with wrid %d (> %d)\n",
wr_id, ipoib_recvq_size);
- return;
+ goto done;
}
p = wc->qp->qp_context;
@@ -520,7 +522,7 @@ void ipoib_cm_handle_rx_wc(struct ipoib_
queue_work(ipoib_workqueue, &priv->cm.rx_reap_task);
spin_unlock(&priv->lock);
}
- return;
+ goto done;
}
}
@@ -579,6 +581,9 @@ repost:
"for buf %d\n", wr_id);
}
}
+done:
+ CURVNET_RESTORE();
+ return;
}
static inline int post_send(struct ipoib_dev_priv *priv,
Modified: user/ngie/bsnmp_cleanup/sys/ufs/ufs/ufs_extattr.c
==============================================================================
--- user/ngie/bsnmp_cleanup/sys/ufs/ufs/ufs_extattr.c Fri Apr 22 09:44:49 2016 (r298467)
+++ user/ngie/bsnmp_cleanup/sys/ufs/ufs/ufs_extattr.c Fri Apr 22 09:46:22 2016 (r298468)
@@ -69,6 +69,8 @@ __FBSDID("$FreeBSD$");
#ifdef UFS_EXTATTR
+FEATURE(ufs_extattr, "ufs extended attribute support");
+
static MALLOC_DEFINE(M_UFS_EXTATTR, "ufs_extattr", "ufs extended attribute");
static int ufs_extattr_sync = 0;
Modified: user/ngie/bsnmp_cleanup/usr.bin/xlint/lint1/decl.c
==============================================================================
--- user/ngie/bsnmp_cleanup/usr.bin/xlint/lint1/decl.c Fri Apr 22 09:44:49 2016 (r298467)
+++ user/ngie/bsnmp_cleanup/usr.bin/xlint/lint1/decl.c Fri Apr 22 09:46:22 2016 (r298468)
@@ -1105,7 +1105,7 @@ align(int al, int len)
if (al > dcs->d_stralign)
dcs->d_stralign = al;
- no = (dcs->d_offset + (al - 1)) & ~(al - 1);
+ no = roundup2(dcs->d_offset, al);
if (len == 0 || dcs->d_offset + len > no)
dcs->d_offset = no;
}
Modified: user/ngie/bsnmp_cleanup/usr.sbin/bhyve/fwctl.c
==============================================================================
--- user/ngie/bsnmp_cleanup/usr.sbin/bhyve/fwctl.c Fri Apr 22 09:44:49 2016 (r298467)
+++ user/ngie/bsnmp_cleanup/usr.sbin/bhyve/fwctl.c Fri Apr 22 09:46:22 2016 (r298468)
@@ -348,7 +348,7 @@ fwctl_request_data(uint32_t value)
/* Make sure remaining size is >= 0 */
rinfo.req_size -= sizeof(uint32_t);
- remlen = (rinfo.req_size > 0) ? rinfo.req_size: 0;
+ remlen = MAX(rinfo.req_size, 0);
(*rinfo.req_op->op_data)(value, remlen);
Modified: user/ngie/bsnmp_cleanup/usr.sbin/bhyve/pci_ahci.c
==============================================================================
--- user/ngie/bsnmp_cleanup/usr.sbin/bhyve/pci_ahci.c Fri Apr 22 09:44:49 2016 (r298467)
+++ user/ngie/bsnmp_cleanup/usr.sbin/bhyve/pci_ahci.c Fri Apr 22 09:46:22 2016 (r298468)
@@ -741,7 +741,7 @@ read_prdt(struct ahci_port *p, int slot,
dbcsz = (prdt->dbc & DBCMASK) + 1;
ptr = paddr_guest2host(ahci_ctx(p->pr_sc), prdt->dba, dbcsz);
- sublen = len < dbcsz ? len : dbcsz;
+ sublen = MIN(len, dbcsz);
memcpy(to, ptr, sublen);
len -= sublen;
to += sublen;
@@ -847,7 +847,7 @@ write_prdt(struct ahci_port *p, int slot
dbcsz = (prdt->dbc & DBCMASK) + 1;
ptr = paddr_guest2host(ahci_ctx(p->pr_sc), prdt->dba, dbcsz);
- sublen = len < dbcsz ? len : dbcsz;
+ sublen = MIN(len, dbcsz);
memcpy(ptr, from, sublen);
len -= sublen;
from += sublen;
More information about the svn-src-user
mailing list