svn commit: r257768 - in stable/9: sys/conf sys/dev/e1000 sys/dev/ixgbe sys/dev/netmap sys/dev/re sys/net tools/tools/netmap
Luigi Rizzo
luigi at FreeBSD.org
Wed Nov 6 22:36:39 UTC 2013
Author: luigi
Date: Wed Nov 6 22:36:36 2013
New Revision: 257768
URL: http://svnweb.freebsd.org/changeset/base/257768
Log:
Merge from head: sync the netmap code with the one in HEAD
Added:
stable/9/sys/dev/netmap/netmap_mem2.h (contents, props changed)
stable/9/tools/tools/netmap/vale-ctl.c (contents, props changed)
Modified:
stable/9/sys/conf/files
stable/9/sys/dev/e1000/if_em.c
stable/9/sys/dev/e1000/if_igb.c
stable/9/sys/dev/e1000/if_lem.c
stable/9/sys/dev/ixgbe/ixgbe.c
stable/9/sys/dev/netmap/if_em_netmap.h
stable/9/sys/dev/netmap/if_igb_netmap.h
stable/9/sys/dev/netmap/if_lem_netmap.h
stable/9/sys/dev/netmap/if_re_netmap.h
stable/9/sys/dev/netmap/ixgbe_netmap.h
stable/9/sys/dev/netmap/netmap.c
stable/9/sys/dev/netmap/netmap_kern.h
stable/9/sys/dev/netmap/netmap_mem2.c
stable/9/sys/dev/re/if_re.c
stable/9/sys/net/netmap.h
stable/9/sys/net/netmap_user.h
stable/9/tools/tools/netmap/Makefile
stable/9/tools/tools/netmap/bridge.c
stable/9/tools/tools/netmap/nm_util.c
stable/9/tools/tools/netmap/pcap.c
stable/9/tools/tools/netmap/pkt-gen.c
Modified: stable/9/sys/conf/files
==============================================================================
--- stable/9/sys/conf/files Wed Nov 6 22:35:52 2013 (r257767)
+++ stable/9/sys/conf/files Wed Nov 6 22:36:36 2013 (r257768)
@@ -1601,6 +1601,7 @@ dev/my/if_my.c optional my
dev/ncv/ncr53c500.c optional ncv
dev/ncv/ncr53c500_pccard.c optional ncv pccard
dev/netmap/netmap.c optional netmap
+dev/netmap/netmap_mem2.c optional netmap
dev/nge/if_nge.c optional nge
dev/nxge/if_nxge.c optional nxge \
compile-with "${NORMAL_C} ${NO_WSELF_ASSIGN}"
Modified: stable/9/sys/dev/e1000/if_em.c
==============================================================================
--- stable/9/sys/dev/e1000/if_em.c Wed Nov 6 22:35:52 2013 (r257767)
+++ stable/9/sys/dev/e1000/if_em.c Wed Nov 6 22:36:36 2013 (r257768)
@@ -3835,8 +3835,7 @@ em_txeof(struct tx_ring *txr)
EM_TX_LOCK_ASSERT(txr);
#ifdef DEV_NETMAP
- if (netmap_tx_irq(ifp, txr->me |
- (NETMAP_LOCKED_ENTER | NETMAP_LOCKED_EXIT)))
+ if (netmap_tx_irq(ifp, txr->me))
return;
#endif /* DEV_NETMAP */
@@ -4432,8 +4431,10 @@ em_rxeof(struct rx_ring *rxr, int count,
EM_RX_LOCK(rxr);
#ifdef DEV_NETMAP
- if (netmap_rx_irq(ifp, rxr->me | NETMAP_LOCKED_ENTER, &processed))
+ if (netmap_rx_irq(ifp, rxr->me, &processed)) {
+ EM_RX_UNLOCK(rxr);
return (FALSE);
+ }
#endif /* DEV_NETMAP */
for (i = rxr->next_to_check, processed = 0; count != 0;) {
Modified: stable/9/sys/dev/e1000/if_igb.c
==============================================================================
--- stable/9/sys/dev/e1000/if_igb.c Wed Nov 6 22:35:52 2013 (r257767)
+++ stable/9/sys/dev/e1000/if_igb.c Wed Nov 6 22:36:36 2013 (r257768)
@@ -3910,8 +3910,7 @@ igb_txeof(struct tx_ring *txr)
IGB_TX_LOCK_ASSERT(txr);
#ifdef DEV_NETMAP
- if (netmap_tx_irq(ifp, txr->me |
- (NETMAP_LOCKED_ENTER|NETMAP_LOCKED_EXIT)))
+ if (netmap_tx_irq(ifp, txr->me ))
return (FALSE);
#endif /* DEV_NETMAP */
if (txr->tx_avail == adapter->num_tx_desc) {
@@ -4766,8 +4765,10 @@ igb_rxeof(struct igb_queue *que, int cou
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
#ifdef DEV_NETMAP
- if (netmap_rx_irq(ifp, rxr->me | NETMAP_LOCKED_ENTER, &processed))
+ if (netmap_rx_irq(ifp, rxr->me, &processed)) {
+ IGB_RX_UNLOCK(rxr);
return (FALSE);
+ }
#endif /* DEV_NETMAP */
/* Main clean loop */
Modified: stable/9/sys/dev/e1000/if_lem.c
==============================================================================
--- stable/9/sys/dev/e1000/if_lem.c Wed Nov 6 22:35:52 2013 (r257767)
+++ stable/9/sys/dev/e1000/if_lem.c Wed Nov 6 22:36:36 2013 (r257768)
@@ -2985,7 +2985,7 @@ lem_txeof(struct adapter *adapter)
EM_TX_LOCK_ASSERT(adapter);
#ifdef DEV_NETMAP
- if (netmap_tx_irq(ifp, 0 | (NETMAP_LOCKED_ENTER|NETMAP_LOCKED_EXIT)))
+ if (netmap_tx_irq(ifp, 0))
return;
#endif /* DEV_NETMAP */
if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
@@ -3454,8 +3454,10 @@ lem_rxeof(struct adapter *adapter, int c
BUS_DMASYNC_POSTREAD);
#ifdef DEV_NETMAP
- if (netmap_rx_irq(ifp, 0 | NETMAP_LOCKED_ENTER, &rx_sent))
+ if (netmap_rx_irq(ifp, 0, &rx_sent)) {
+ EM_RX_UNLOCK(adapter);
return (FALSE);
+ }
#endif /* DEV_NETMAP */
if (!((current_desc->status) & E1000_RXD_STAT_DD)) {
Modified: stable/9/sys/dev/ixgbe/ixgbe.c
==============================================================================
--- stable/9/sys/dev/ixgbe/ixgbe.c Wed Nov 6 22:35:52 2013 (r257767)
+++ stable/9/sys/dev/ixgbe/ixgbe.c Wed Nov 6 22:36:36 2013 (r257768)
@@ -3620,16 +3620,11 @@ ixgbe_txeof(struct tx_ring *txr)
* means the user thread should not be woken up);
* - the driver ignores tx interrupts unless netmap_mitigate=0
* or the slot has the DD bit set.
- *
- * When the driver has separate locks, we need to
- * release and re-acquire txlock to avoid deadlocks.
- * XXX see if we can find a better way.
*/
if (!netmap_mitigate ||
(kring->nr_kflags < kring->nkr_num_slots &&
txd[kring->nr_kflags].wb.status & IXGBE_TXD_STAT_DD)) {
- netmap_tx_irq(ifp, txr->me |
- (NETMAP_LOCKED_ENTER|NETMAP_LOCKED_EXIT));
+ netmap_tx_irq(ifp, txr->me);
}
return;
}
@@ -4421,8 +4416,10 @@ ixgbe_rxeof(struct ix_queue *que)
#ifdef DEV_NETMAP
/* Same as the txeof routine: wakeup clients on intr. */
- if (netmap_rx_irq(ifp, rxr->me | NETMAP_LOCKED_ENTER, &processed))
+ if (netmap_rx_irq(ifp, rxr->me, &processed)) {
+ IXGBE_RX_UNLOCK(rxr);
return (FALSE);
+ }
#endif /* DEV_NETMAP */
for (i = rxr->next_to_check; count != 0;) {
Modified: stable/9/sys/dev/netmap/if_em_netmap.h
==============================================================================
--- stable/9/sys/dev/netmap/if_em_netmap.h Wed Nov 6 22:35:52 2013 (r257767)
+++ stable/9/sys/dev/netmap/if_em_netmap.h Wed Nov 6 22:36:36 2013 (r257768)
@@ -43,35 +43,6 @@ static void em_netmap_block_tasks(struct
static void em_netmap_unblock_tasks(struct adapter *);
-static void
-em_netmap_lock_wrapper(struct ifnet *ifp, int what, u_int queueid)
-{
- struct adapter *adapter = ifp->if_softc;
-
- ASSERT(queueid < adapter->num_queues);
- switch (what) {
- case NETMAP_CORE_LOCK:
- EM_CORE_LOCK(adapter);
- break;
- case NETMAP_CORE_UNLOCK:
- EM_CORE_UNLOCK(adapter);
- break;
- case NETMAP_TX_LOCK:
- EM_TX_LOCK(&adapter->tx_rings[queueid]);
- break;
- case NETMAP_TX_UNLOCK:
- EM_TX_UNLOCK(&adapter->tx_rings[queueid]);
- break;
- case NETMAP_RX_LOCK:
- EM_RX_LOCK(&adapter->rx_rings[queueid]);
- break;
- case NETMAP_RX_UNLOCK:
- EM_RX_UNLOCK(&adapter->rx_rings[queueid]);
- break;
- }
-}
-
-
// XXX do we need to block/unblock the tasks ?
static void
em_netmap_block_tasks(struct adapter *adapter)
@@ -137,7 +108,7 @@ em_netmap_reg(struct ifnet *ifp, int ono
ifp->if_capenable |= IFCAP_NETMAP;
na->if_transmit = ifp->if_transmit;
- ifp->if_transmit = netmap_start;
+ ifp->if_transmit = netmap_transmit;
em_init_locked(adapter);
if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) == 0) {
@@ -160,7 +131,7 @@ fail:
* Reconcile kernel and user view of the transmit ring.
*/
static int
-em_netmap_txsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
+em_netmap_txsync(struct ifnet *ifp, u_int ring_nr, int flags)
{
struct adapter *adapter = ifp->if_softc;
struct tx_ring *txr = &adapter->tx_rings[ring_nr];
@@ -176,8 +147,6 @@ em_netmap_txsync(struct ifnet *ifp, u_in
if (k > lim)
return netmap_ring_reinit(kring);
- if (do_lock)
- EM_TX_LOCK(txr);
bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
BUS_DMASYNC_POSTREAD);
@@ -202,8 +171,6 @@ em_netmap_txsync(struct ifnet *ifp, u_in
u_int len = slot->len;
if (addr == netmap_buffer_base || len > NETMAP_BUF_SIZE) {
- if (do_lock)
- EM_TX_UNLOCK(txr);
return netmap_ring_reinit(kring);
}
@@ -252,8 +219,6 @@ em_netmap_txsync(struct ifnet *ifp, u_in
/* update avail to what the kernel knows */
ring->avail = kring->nr_hwavail;
- if (do_lock)
- EM_TX_UNLOCK(txr);
return 0;
}
@@ -262,7 +227,7 @@ em_netmap_txsync(struct ifnet *ifp, u_in
* Reconcile kernel and user view of the receive ring.
*/
static int
-em_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
+em_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int flags)
{
struct adapter *adapter = ifp->if_softc;
struct rx_ring *rxr = &adapter->rx_rings[ring_nr];
@@ -270,16 +235,13 @@ em_netmap_rxsync(struct ifnet *ifp, u_in
struct netmap_kring *kring = &na->rx_rings[ring_nr];
struct netmap_ring *ring = kring->ring;
u_int j, l, n, lim = kring->nkr_num_slots - 1;
- int force_update = do_lock || kring->nr_kflags & NKR_PENDINTR;
+ int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR;
u_int k = ring->cur, resvd = ring->reserved;
k = ring->cur;
if (k > lim)
return netmap_ring_reinit(kring);
- if (do_lock)
- EM_RX_LOCK(rxr);
-
/* XXX check sync modes */
bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
@@ -334,8 +296,6 @@ em_netmap_rxsync(struct ifnet *ifp, u_in
void *addr = PNMB(slot, &paddr);
if (addr == netmap_buffer_base) { /* bad buf */
- if (do_lock)
- EM_RX_UNLOCK(rxr);
return netmap_ring_reinit(kring);
}
@@ -364,8 +324,6 @@ em_netmap_rxsync(struct ifnet *ifp, u_in
}
/* tell userspace that there are new packets */
ring->avail = kring->nr_hwavail - resvd;
- if (do_lock)
- EM_RX_UNLOCK(rxr);
return 0;
}
@@ -378,12 +336,11 @@ em_netmap_attach(struct adapter *adapter
bzero(&na, sizeof(na));
na.ifp = adapter->ifp;
- na.separate_locks = 1;
+ na.na_flags = NAF_BDG_MAYSLEEP;
na.num_tx_desc = adapter->num_tx_desc;
na.num_rx_desc = adapter->num_rx_desc;
na.nm_txsync = em_netmap_txsync;
na.nm_rxsync = em_netmap_rxsync;
- na.nm_lock = em_netmap_lock_wrapper;
na.nm_register = em_netmap_reg;
netmap_attach(&na, adapter->num_queues);
}
Modified: stable/9/sys/dev/netmap/if_igb_netmap.h
==============================================================================
--- stable/9/sys/dev/netmap/if_igb_netmap.h Wed Nov 6 22:35:52 2013 (r257767)
+++ stable/9/sys/dev/netmap/if_igb_netmap.h Wed Nov 6 22:36:36 2013 (r257768)
@@ -39,38 +39,6 @@
/*
- * wrapper to export locks to the generic code
- */
-static void
-igb_netmap_lock_wrapper(struct ifnet *ifp, int what, u_int queueid)
-{
- struct adapter *adapter = ifp->if_softc;
-
- ASSERT(queueid < adapter->num_queues);
- switch (what) {
- case NETMAP_CORE_LOCK:
- IGB_CORE_LOCK(adapter);
- break;
- case NETMAP_CORE_UNLOCK:
- IGB_CORE_UNLOCK(adapter);
- break;
- case NETMAP_TX_LOCK:
- IGB_TX_LOCK(&adapter->tx_rings[queueid]);
- break;
- case NETMAP_TX_UNLOCK:
- IGB_TX_UNLOCK(&adapter->tx_rings[queueid]);
- break;
- case NETMAP_RX_LOCK:
- IGB_RX_LOCK(&adapter->rx_rings[queueid]);
- break;
- case NETMAP_RX_UNLOCK:
- IGB_RX_UNLOCK(&adapter->rx_rings[queueid]);
- break;
- }
-}
-
-
-/*
* register-unregister routine
*/
static int
@@ -92,7 +60,7 @@ igb_netmap_reg(struct ifnet *ifp, int on
ifp->if_capenable |= IFCAP_NETMAP;
na->if_transmit = ifp->if_transmit;
- ifp->if_transmit = netmap_start;
+ ifp->if_transmit = netmap_transmit;
igb_init_locked(adapter);
if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) == 0) {
@@ -114,7 +82,7 @@ fail:
* Reconcile kernel and user view of the transmit ring.
*/
static int
-igb_netmap_txsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
+igb_netmap_txsync(struct ifnet *ifp, u_int ring_nr, int flags)
{
struct adapter *adapter = ifp->if_softc;
struct tx_ring *txr = &adapter->tx_rings[ring_nr];
@@ -130,8 +98,6 @@ igb_netmap_txsync(struct ifnet *ifp, u_i
if (k > lim)
return netmap_ring_reinit(kring);
- if (do_lock)
- IGB_TX_LOCK(txr);
bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
BUS_DMASYNC_POSTREAD);
@@ -153,7 +119,14 @@ igb_netmap_txsync(struct ifnet *ifp, u_i
/* curr is the current slot in the nic ring */
union e1000_adv_tx_desc *curr =
(union e1000_adv_tx_desc *)&txr->tx_base[l];
- struct igb_tx_buffer *txbuf = &txr->tx_buffers[l];
+#ifndef IGB_MEDIA_RESET
+/* at the same time as IGB_MEDIA_RESET was defined, the
+ * tx buffer descriptor was renamed, so use this to revert
+ * back to the old name.
+ */
+#define igb_tx_buf igb_tx_buffer
+#endif
+ struct igb_tx_buf *txbuf = &txr->tx_buffers[l];
int flags = ((slot->flags & NS_REPORT) ||
j == 0 || j == report_frequency) ?
E1000_ADVTXD_DCMD_RS : 0;
@@ -162,8 +135,6 @@ igb_netmap_txsync(struct ifnet *ifp, u_i
u_int len = slot->len;
if (addr == netmap_buffer_base || len > NETMAP_BUF_SIZE) {
- if (do_lock)
- IGB_TX_UNLOCK(txr);
return netmap_ring_reinit(kring);
}
@@ -223,8 +194,6 @@ igb_netmap_txsync(struct ifnet *ifp, u_i
/* update avail to what the kernel knows */
ring->avail = kring->nr_hwavail;
- if (do_lock)
- IGB_TX_UNLOCK(txr);
return 0;
}
@@ -233,7 +202,7 @@ igb_netmap_txsync(struct ifnet *ifp, u_i
* Reconcile kernel and user view of the receive ring.
*/
static int
-igb_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
+igb_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int flags)
{
struct adapter *adapter = ifp->if_softc;
struct rx_ring *rxr = &adapter->rx_rings[ring_nr];
@@ -241,16 +210,13 @@ igb_netmap_rxsync(struct ifnet *ifp, u_i
struct netmap_kring *kring = &na->rx_rings[ring_nr];
struct netmap_ring *ring = kring->ring;
u_int j, l, n, lim = kring->nkr_num_slots - 1;
- int force_update = do_lock || kring->nr_kflags & NKR_PENDINTR;
+ int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR;
u_int k = ring->cur, resvd = ring->reserved;
k = ring->cur;
if (k > lim)
return netmap_ring_reinit(kring);
- if (do_lock)
- IGB_RX_LOCK(rxr);
-
/* XXX check sync modes */
bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
@@ -303,8 +269,6 @@ igb_netmap_rxsync(struct ifnet *ifp, u_i
void *addr = PNMB(slot, &paddr);
if (addr == netmap_buffer_base) { /* bad buf */
- if (do_lock)
- IGB_RX_UNLOCK(rxr);
return netmap_ring_reinit(kring);
}
@@ -332,8 +296,6 @@ igb_netmap_rxsync(struct ifnet *ifp, u_i
}
/* tell userspace that there are new packets */
ring->avail = kring->nr_hwavail - resvd;
- if (do_lock)
- IGB_RX_UNLOCK(rxr);
return 0;
}
@@ -346,12 +308,11 @@ igb_netmap_attach(struct adapter *adapte
bzero(&na, sizeof(na));
na.ifp = adapter->ifp;
- na.separate_locks = 1;
+ na.na_flags = NAF_BDG_MAYSLEEP;
na.num_tx_desc = adapter->num_tx_desc;
na.num_rx_desc = adapter->num_rx_desc;
na.nm_txsync = igb_netmap_txsync;
na.nm_rxsync = igb_netmap_rxsync;
- na.nm_lock = igb_netmap_lock_wrapper;
na.nm_register = igb_netmap_reg;
netmap_attach(&na, adapter->num_queues);
}
Modified: stable/9/sys/dev/netmap/if_lem_netmap.h
==============================================================================
--- stable/9/sys/dev/netmap/if_lem_netmap.h Wed Nov 6 22:35:52 2013 (r257767)
+++ stable/9/sys/dev/netmap/if_lem_netmap.h Wed Nov 6 22:36:36 2013 (r257768)
@@ -39,35 +39,6 @@
#include <dev/netmap/netmap_kern.h>
-static void
-lem_netmap_lock_wrapper(struct ifnet *ifp, int what, u_int ringid)
-{
- struct adapter *adapter = ifp->if_softc;
-
- /* only one ring here so ignore the ringid */
- switch (what) {
- case NETMAP_CORE_LOCK:
- EM_CORE_LOCK(adapter);
- break;
- case NETMAP_CORE_UNLOCK:
- EM_CORE_UNLOCK(adapter);
- break;
- case NETMAP_TX_LOCK:
- EM_TX_LOCK(adapter);
- break;
- case NETMAP_TX_UNLOCK:
- EM_TX_UNLOCK(adapter);
- break;
- case NETMAP_RX_LOCK:
- EM_RX_LOCK(adapter);
- break;
- case NETMAP_RX_UNLOCK:
- EM_RX_UNLOCK(adapter);
- break;
- }
-}
-
-
/*
* Register/unregister
*/
@@ -81,6 +52,8 @@ lem_netmap_reg(struct ifnet *ifp, int on
if (na == NULL)
return EINVAL;
+ EM_CORE_LOCK(adapter);
+
lem_disable_intr(adapter);
/* Tell the stack that the interface is no longer active */
@@ -95,7 +68,7 @@ lem_netmap_reg(struct ifnet *ifp, int on
ifp->if_capenable |= IFCAP_NETMAP;
na->if_transmit = ifp->if_transmit;
- ifp->if_transmit = netmap_start;
+ ifp->if_transmit = netmap_transmit;
lem_init_locked(adapter);
if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) == 0) {
@@ -114,6 +87,8 @@ fail:
taskqueue_unblock(adapter->tq); // XXX do we need this ?
#endif /* !EM_LEGCY_IRQ */
+ EM_CORE_UNLOCK(adapter);
+
return (error);
}
@@ -122,7 +97,7 @@ fail:
* Reconcile kernel and user view of the transmit ring.
*/
static int
-lem_netmap_txsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
+lem_netmap_txsync(struct ifnet *ifp, u_int ring_nr, int flags)
{
struct adapter *adapter = ifp->if_softc;
struct netmap_adapter *na = NA(ifp);
@@ -133,13 +108,16 @@ lem_netmap_txsync(struct ifnet *ifp, u_i
/* generate an interrupt approximately every half ring */
int report_frequency = kring->nkr_num_slots >> 1;
+ ND("%s: hwofs %d, hwcur %d hwavail %d lease %d cur %d avail %d",
+ ifp->if_xname,
+ kring->nkr_hwofs, kring->nr_hwcur, kring->nr_hwavail,
+ kring->nkr_hwlease,
+ ring->cur, ring->avail);
/* take a copy of ring->cur now, and never read it again */
k = ring->cur;
if (k > lim)
return netmap_ring_reinit(kring);
- if (do_lock)
- EM_TX_LOCK(adapter);
bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
BUS_DMASYNC_POSTREAD);
/*
@@ -147,6 +125,8 @@ lem_netmap_txsync(struct ifnet *ifp, u_i
* netmap ring, l is the corresponding index in the NIC ring.
*/
j = kring->nr_hwcur;
+ if (netmap_verbose > 255)
+ RD(5, "device %s send %d->%d", ifp->if_xname, j, k);
if (j != k) { /* we have new packets to send */
l = netmap_idx_k2n(kring, j);
for (n = 0; j != k; n++) {
@@ -163,13 +143,12 @@ lem_netmap_txsync(struct ifnet *ifp, u_i
u_int len = slot->len;
if (addr == netmap_buffer_base || len > NETMAP_BUF_SIZE) {
- if (do_lock)
- EM_TX_UNLOCK(adapter);
return netmap_ring_reinit(kring);
}
+ ND("slot %d NIC %d %s", j, l, nm_dump_buf(addr, len, 128, NULL));
slot->flags &= ~NS_REPORT;
- if (slot->flags & NS_BUF_CHANGED) {
+ if (1 || slot->flags & NS_BUF_CHANGED) {
/* buffer has changed, reload map */
netmap_reload_map(adapter->txtag, txbuf->map, addr);
curr->buffer_addr = htole64(paddr);
@@ -180,11 +159,13 @@ lem_netmap_txsync(struct ifnet *ifp, u_i
htole32( adapter->txd_cmd | len |
(E1000_TXD_CMD_EOP | flags) );
+ ND("len %d kring %d nic %d", len, j, l);
bus_dmamap_sync(adapter->txtag, txbuf->map,
BUS_DMASYNC_PREWRITE);
j = (j == lim) ? 0 : j + 1;
l = (l == lim) ? 0 : l + 1;
}
+ ND("sent %d packets from %d, TDT now %d", n, kring->nr_hwcur, l);
kring->nr_hwcur = k; /* the saved ring->cur */
kring->nr_hwavail -= n;
@@ -199,6 +180,7 @@ lem_netmap_txsync(struct ifnet *ifp, u_i
/* record completed transmissions using TDH */
l = E1000_READ_REG(&adapter->hw, E1000_TDH(0));
+ ND("tdh is now %d", l);
if (l >= kring->nkr_num_slots) { /* XXX can it happen ? */
D("bad TDH %d", l);
l -= kring->nkr_num_slots;
@@ -208,6 +190,9 @@ lem_netmap_txsync(struct ifnet *ifp, u_i
/* some tx completed, increment hwavail. */
if (delta < 0)
delta += kring->nkr_num_slots;
+ if (netmap_verbose > 255)
+ RD(5, "%s tx recover %d bufs",
+ ifp->if_xname, delta);
adapter->next_tx_to_clean = l;
kring->nr_hwavail += delta;
}
@@ -215,8 +200,6 @@ lem_netmap_txsync(struct ifnet *ifp, u_i
/* update avail to what the kernel knows */
ring->avail = kring->nr_hwavail;
- if (do_lock)
- EM_TX_UNLOCK(adapter);
return 0;
}
@@ -225,21 +208,19 @@ lem_netmap_txsync(struct ifnet *ifp, u_i
* Reconcile kernel and user view of the receive ring.
*/
static int
-lem_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
+lem_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int flags)
{
struct adapter *adapter = ifp->if_softc;
struct netmap_adapter *na = NA(ifp);
struct netmap_kring *kring = &na->rx_rings[ring_nr];
struct netmap_ring *ring = kring->ring;
int j, l, n, lim = kring->nkr_num_slots - 1;
- int force_update = do_lock || kring->nr_kflags & NKR_PENDINTR;
+ int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR;
u_int k = ring->cur, resvd = ring->reserved;
if (k > lim)
return netmap_ring_reinit(kring);
- if (do_lock)
- EM_RX_LOCK(adapter);
/* XXX check sync modes */
bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
@@ -251,6 +232,10 @@ lem_netmap_rxsync(struct ifnet *ifp, u_i
*/
l = adapter->next_rx_desc_to_check;
j = netmap_idx_n2k(kring, l);
+ ND("%s: next NIC %d kring %d (ofs %d), hwcur %d hwavail %d cur %d avail %d",
+ ifp->if_xname,
+ l, j, kring->nkr_hwofs, kring->nr_hwcur, kring->nr_hwavail,
+ ring->cur, ring->avail);
if (netmap_no_pendintr || force_update) {
uint16_t slot_flags = kring->nkr_slot_flags;
@@ -266,6 +251,8 @@ lem_netmap_rxsync(struct ifnet *ifp, u_i
D("bogus pkt size at %d", j);
len = 0;
}
+ ND("\n%s", nm_dump_buf(NMB(&ring->slot[j]),
+ len, 128, NULL));
ring->slot[j].len = len;
ring->slot[j].flags = slot_flags;
bus_dmamap_sync(adapter->rxtag,
@@ -300,8 +287,6 @@ lem_netmap_rxsync(struct ifnet *ifp, u_i
void *addr = PNMB(slot, &paddr);
if (addr == netmap_buffer_base) { /* bad buf */
- if (do_lock)
- EM_RX_UNLOCK(adapter);
return netmap_ring_reinit(kring);
}
@@ -332,8 +317,6 @@ lem_netmap_rxsync(struct ifnet *ifp, u_i
}
/* tell userspace that there are new packets */
ring->avail = kring->nr_hwavail - resvd;
- if (do_lock)
- EM_RX_UNLOCK(adapter);
return 0;
}
@@ -346,12 +329,11 @@ lem_netmap_attach(struct adapter *adapte
bzero(&na, sizeof(na));
na.ifp = adapter->ifp;
- na.separate_locks = 1;
+ na.na_flags = NAF_BDG_MAYSLEEP;
na.num_tx_desc = adapter->num_tx_desc;
na.num_rx_desc = adapter->num_rx_desc;
na.nm_txsync = lem_netmap_txsync;
na.nm_rxsync = lem_netmap_rxsync;
- na.nm_lock = lem_netmap_lock_wrapper;
na.nm_register = lem_netmap_reg;
netmap_attach(&na, 1);
}
Modified: stable/9/sys/dev/netmap/if_re_netmap.h
==============================================================================
--- stable/9/sys/dev/netmap/if_re_netmap.h Wed Nov 6 22:35:52 2013 (r257767)
+++ stable/9/sys/dev/netmap/if_re_netmap.h Wed Nov 6 22:36:36 2013 (r257768)
@@ -39,33 +39,6 @@
/*
- * wrapper to export locks to the generic code
- * We should not use the tx/rx locks
- */
-static void
-re_netmap_lock_wrapper(struct ifnet *ifp, int what, u_int queueid)
-{
- struct rl_softc *adapter = ifp->if_softc;
-
- switch (what) {
- case NETMAP_CORE_LOCK:
- RL_LOCK(adapter);
- break;
- case NETMAP_CORE_UNLOCK:
- RL_UNLOCK(adapter);
- break;
-
- case NETMAP_TX_LOCK:
- case NETMAP_RX_LOCK:
- case NETMAP_TX_UNLOCK:
- case NETMAP_RX_UNLOCK:
- D("invalid lock call %d, no tx/rx locks here", what);
- break;
- }
-}
-
-
-/*
* support for netmap register/unregisted. We are already under core lock.
* only called on the first register or the last unregister.
*/
@@ -88,7 +61,7 @@ re_netmap_reg(struct ifnet *ifp, int ono
/* save if_transmit to restore it later */
na->if_transmit = ifp->if_transmit;
- ifp->if_transmit = netmap_start;
+ ifp->if_transmit = netmap_transmit;
re_init_locked(adapter);
@@ -111,7 +84,7 @@ fail:
* Reconcile kernel and user view of the transmit ring.
*/
static int
-re_netmap_txsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
+re_netmap_txsync(struct ifnet *ifp, u_int ring_nr, int flags)
{
struct rl_softc *sc = ifp->if_softc;
struct rl_txdesc *txd = sc->rl_ldata.rl_tx_desc;
@@ -124,9 +97,6 @@ re_netmap_txsync(struct ifnet *ifp, u_in
if (k > lim)
return netmap_ring_reinit(kring);
- if (do_lock)
- RL_LOCK(sc);
-
/* Sync the TX descriptor list */
bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag,
sc->rl_ldata.rl_tx_list_map,
@@ -164,8 +134,6 @@ re_netmap_txsync(struct ifnet *ifp, u_in
int len = slot->len;
if (addr == netmap_buffer_base || len > NETMAP_BUF_SIZE) {
- if (do_lock)
- RL_UNLOCK(sc);
// XXX what about prodidx ?
return netmap_ring_reinit(kring);
}
@@ -200,8 +168,6 @@ re_netmap_txsync(struct ifnet *ifp, u_in
/* start ? */
CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START);
}
- if (do_lock)
- RL_UNLOCK(sc);
return 0;
}
@@ -210,7 +176,7 @@ re_netmap_txsync(struct ifnet *ifp, u_in
* Reconcile kernel and user view of the receive ring.
*/
static int
-re_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
+re_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int flags)
{
struct rl_softc *sc = ifp->if_softc;
struct rl_rxdesc *rxd = sc->rl_ldata.rl_rx_desc;
@@ -218,15 +184,13 @@ re_netmap_rxsync(struct ifnet *ifp, u_in
struct netmap_kring *kring = &na->rx_rings[ring_nr];
struct netmap_ring *ring = kring->ring;
int j, l, n, lim = kring->nkr_num_slots - 1;
- int force_update = do_lock || kring->nr_kflags & NKR_PENDINTR;
+ int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR;
u_int k = ring->cur, resvd = ring->reserved;
k = ring->cur;
if (k > lim)
return netmap_ring_reinit(kring);
- if (do_lock)
- RL_LOCK(sc);
/* XXX check sync modes */
bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
sc->rl_ldata.rl_rx_list_map,
@@ -291,8 +255,6 @@ re_netmap_rxsync(struct ifnet *ifp, u_in
void *addr = PNMB(slot, &paddr);
if (addr == netmap_buffer_base) { /* bad buf */
- if (do_lock)
- RL_UNLOCK(sc);
return netmap_ring_reinit(kring);
}
@@ -323,8 +285,6 @@ re_netmap_rxsync(struct ifnet *ifp, u_in
}
/* tell userspace that there are new packets */
ring->avail = kring->nr_hwavail - resvd;
- if (do_lock)
- RL_UNLOCK(sc);
return 0;
}
@@ -411,12 +371,11 @@ re_netmap_attach(struct rl_softc *sc)
bzero(&na, sizeof(na));
na.ifp = sc->rl_ifp;
- na.separate_locks = 0;
+ na.na_flags = NAF_BDG_MAYSLEEP;
na.num_tx_desc = sc->rl_ldata.rl_tx_desc_cnt;
na.num_rx_desc = sc->rl_ldata.rl_rx_desc_cnt;
na.nm_txsync = re_netmap_txsync;
na.nm_rxsync = re_netmap_rxsync;
- na.nm_lock = re_netmap_lock_wrapper;
na.nm_register = re_netmap_reg;
netmap_attach(&na, 1);
}
Modified: stable/9/sys/dev/netmap/ixgbe_netmap.h
==============================================================================
--- stable/9/sys/dev/netmap/ixgbe_netmap.h Wed Nov 6 22:35:52 2013 (r257767)
+++ stable/9/sys/dev/netmap/ixgbe_netmap.h Wed Nov 6 22:36:36 2013 (r257768)
@@ -72,37 +72,6 @@ SYSCTL_INT(_dev_netmap, OID_AUTO, ix_rx_
SYSCTL_INT(_dev_netmap, OID_AUTO, ix_rx_miss_bufs,
CTLFLAG_RW, &ix_rx_miss_bufs, 0, "potentially missed rx intr bufs");
-/*
- * wrapper to export locks to the generic netmap code.
- */
-static void
-ixgbe_netmap_lock_wrapper(struct ifnet *_a, int what, u_int queueid)
-{
- struct adapter *adapter = _a->if_softc;
-
- ASSERT(queueid < adapter->num_queues);
- switch (what) {
- case NETMAP_CORE_LOCK:
- IXGBE_CORE_LOCK(adapter);
- break;
- case NETMAP_CORE_UNLOCK:
- IXGBE_CORE_UNLOCK(adapter);
- break;
- case NETMAP_TX_LOCK:
- IXGBE_TX_LOCK(&adapter->tx_rings[queueid]);
- break;
- case NETMAP_TX_UNLOCK:
- IXGBE_TX_UNLOCK(&adapter->tx_rings[queueid]);
- break;
- case NETMAP_RX_LOCK:
- IXGBE_RX_LOCK(&adapter->rx_rings[queueid]);
- break;
- case NETMAP_RX_UNLOCK:
- IXGBE_RX_UNLOCK(&adapter->rx_rings[queueid]);
- break;
- }
-}
-
static void
set_crcstrip(struct ixgbe_hw *hw, int onoff)
@@ -155,6 +124,7 @@ ixgbe_netmap_reg(struct ifnet *ifp, int
if (na == NULL)
return EINVAL; /* no netmap support here */
+ IXGBE_CORE_LOCK(adapter);
ixgbe_disable_intr(adapter);
/* Tell the stack that the interface is no longer active */
@@ -166,7 +136,7 @@ ixgbe_netmap_reg(struct ifnet *ifp, int
/* save if_transmit and replace with our routine */
na->if_transmit = ifp->if_transmit;
- ifp->if_transmit = netmap_start;
+ ifp->if_transmit = netmap_transmit;
/*
* reinitialize the adapter, now with netmap flag set,
@@ -186,6 +156,7 @@ fail:
ixgbe_init_locked(adapter); /* also enables intr */
}
set_crcstrip(&adapter->hw, onoff);
+ IXGBE_CORE_UNLOCK(adapter);
return (error);
}
@@ -213,12 +184,11 @@ fail:
*
* ring->avail is never used, only checked for bogus values.
*
- * do_lock is set iff the function is called from the ioctl handler.
- * In this case, grab a lock around the body, and also reclaim transmitted
+ * I flags & FORCE_RECLAIM, reclaim transmitted
* buffers irrespective of interrupt mitigation.
*/
static int
-ixgbe_netmap_txsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
+ixgbe_netmap_txsync(struct ifnet *ifp, u_int ring_nr, int flags)
{
struct adapter *adapter = ifp->if_softc;
struct tx_ring *txr = &adapter->tx_rings[ring_nr];
@@ -237,8 +207,6 @@ ixgbe_netmap_txsync(struct ifnet *ifp, u
if (k > lim)
return netmap_ring_reinit(kring);
- if (do_lock)
- IXGBE_TX_LOCK(txr);
bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
BUS_DMASYNC_POSTREAD);
@@ -303,8 +271,6 @@ ixgbe_netmap_txsync(struct ifnet *ifp, u
*/
if (addr == netmap_buffer_base || len > NETMAP_BUF_SIZE) {
ring_reset:
- if (do_lock)
- IXGBE_TX_UNLOCK(txr);
return netmap_ring_reinit(kring);
}
@@ -347,7 +313,7 @@ ring_reset:
* In all cases kring->nr_kflags indicates which slot will be
* checked upon a tx interrupt (nkr_num_slots means none).
*/
- if (do_lock) {
+ if (flags & NAF_FORCE_RECLAIM) {
j = 1; /* forced reclaim, ignore interrupts */
kring->nr_kflags = kring->nkr_num_slots;
} else if (kring->nr_hwavail > 0) {
@@ -422,8 +388,6 @@ ring_reset:
/* update avail to what the kernel knows */
ring->avail = kring->nr_hwavail;
- if (do_lock)
- IXGBE_TX_UNLOCK(txr);
return 0;
}
@@ -442,10 +406,11 @@ ring_reset:
* from nr_hwavail, make the descriptors available for the next reads,
* and set kring->nr_hwcur = ring->cur and ring->avail = kring->nr_hwavail.
*
- * do_lock has a special meaning: please refer to txsync.
+ * If (flags & NAF_FORCE_READ) also check for incoming packets irrespective
+ * of whether or not we received an interrupt.
*/
static int
-ixgbe_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
+ixgbe_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int flags)
{
struct adapter *adapter = ifp->if_softc;
struct rx_ring *rxr = &adapter->rx_rings[ring_nr];
@@ -453,14 +418,12 @@ ixgbe_netmap_rxsync(struct ifnet *ifp, u
struct netmap_kring *kring = &na->rx_rings[ring_nr];
struct netmap_ring *ring = kring->ring;
u_int j, l, n, lim = kring->nkr_num_slots - 1;
- int force_update = do_lock || kring->nr_kflags & NKR_PENDINTR;
+ int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR;
u_int k = ring->cur, resvd = ring->reserved;
if (k > lim)
return netmap_ring_reinit(kring);
- if (do_lock)
- IXGBE_RX_LOCK(rxr);
/* XXX check sync modes */
bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
@@ -571,13 +534,9 @@ ixgbe_netmap_rxsync(struct ifnet *ifp, u
/* tell userspace that there are new packets */
ring->avail = kring->nr_hwavail - resvd;
- if (do_lock)
- IXGBE_RX_UNLOCK(rxr);
return 0;
ring_reset:
- if (do_lock)
- IXGBE_RX_UNLOCK(rxr);
return netmap_ring_reinit(kring);
}
@@ -597,12 +556,11 @@ ixgbe_netmap_attach(struct adapter *adap
bzero(&na, sizeof(na));
na.ifp = adapter->ifp;
- na.separate_locks = 1; /* this card has separate rx/tx locks */
+ na.na_flags = NAF_BDG_MAYSLEEP;
na.num_tx_desc = adapter->num_tx_desc;
na.num_rx_desc = adapter->num_rx_desc;
na.nm_txsync = ixgbe_netmap_txsync;
na.nm_rxsync = ixgbe_netmap_rxsync;
- na.nm_lock = ixgbe_netmap_lock_wrapper;
na.nm_register = ixgbe_netmap_reg;
netmap_attach(&na, adapter->num_queues);
}
Modified: stable/9/sys/dev/netmap/netmap.c
==============================================================================
--- stable/9/sys/dev/netmap/netmap.c Wed Nov 6 22:35:52 2013 (r257767)
+++ stable/9/sys/dev/netmap/netmap.c Wed Nov 6 22:36:36 2013 (r257768)
@@ -23,7 +23,6 @@
* SUCH DAMAGE.
*/
-#define NM_BRIDGE
/*
* This module supports memory mapped access to network devices,
@@ -52,18 +51,84 @@
* packets on the output interface.
* 6. select() or poll() can be used to wait for events on individual
* transmit or receive queues (or all queues for a given interface).
- */
+ *
-#ifdef linux
-#include "bsd_glue.h"
-static netdev_tx_t linux_netmap_start(struct sk_buff *skb, struct net_device *dev);
-#endif /* linux */
+ SYNCHRONIZATION (USER)
-#ifdef __APPLE__
-#include "osx_glue.h"
-#endif /* __APPLE__ */
+The netmap rings and data structures may be shared among multiple
+user threads or even independent processes.
+Any synchronization among those threads/processes is delegated
+to the threads themselves. Only one thread at a time can be in
+a system call on the same netmap ring. The OS does not enforce
+this and only guarantees against system crashes in case of
+invalid usage.
+
+ LOCKING (INTERNAL)
*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***
More information about the svn-src-stable-9
mailing list