svn commit: r341477 - in stable/11/sys: conf dev/cxgbe dev/e1000 dev/ixgbe dev/ixl dev/netmap dev/re modules/netmap net
Vincenzo Maffione
vmaffione at FreeBSD.org
Tue Dec 4 17:41:02 UTC 2018
Author: vmaffione
Date: Tue Dec 4 17:40:56 2018
New Revision: 341477
URL: https://svnweb.freebsd.org/changeset/base/341477
Log:
MFC r339639
netmap: align codebase to the current upstream (sha 8374e1a7e6941)
Changelist:
- Move large parts of VALE code to a new file and header netmap_bdg.[ch].
This is useful to reuse the code within upcoming projects.
- Improvements and bug fixes to pipes and monitors.
- Introduce nm_os_onattach(), nm_os_onenter() and nm_os_onexit() to
handle differences between FreeBSD and Linux.
- Introduce some new helper functions to handle more host rings and fake
rings (netmap_all_rings(), netmap_real_rings(), ...)
- Added new sysctl to enable/disable hw checksum in emulated netmap mode.
- nm_inject: add support for NS_MOREFRAG
Approved by: gnn (mentor)
Differential Revision: https://reviews.freebsd.org/D17364
Added:
stable/11/sys/dev/netmap/netmap_bdg.c (contents, props changed)
stable/11/sys/dev/netmap/netmap_bdg.h (contents, props changed)
stable/11/sys/dev/netmap/netmap_legacy.c (contents, props changed)
stable/11/sys/dev/netmap/netmap_pt.c (contents, props changed)
stable/11/sys/net/netmap_legacy.h (contents, props changed)
stable/11/sys/net/netmap_virt.h (contents, props changed)
Modified:
stable/11/sys/conf/files
stable/11/sys/dev/cxgbe/t4_netmap.c
stable/11/sys/dev/e1000/if_em.c
stable/11/sys/dev/e1000/if_igb.c
stable/11/sys/dev/e1000/if_lem.c
stable/11/sys/dev/ixgbe/if_ix.c
stable/11/sys/dev/ixgbe/if_ixv.c
stable/11/sys/dev/ixgbe/ix_txrx.c
stable/11/sys/dev/ixgbe/ixgbe_netmap.c
stable/11/sys/dev/ixl/ixl_pf_main.c
stable/11/sys/dev/ixl/ixl_txrx.c
stable/11/sys/dev/netmap/if_em_netmap.h
stable/11/sys/dev/netmap/if_igb_netmap.h
stable/11/sys/dev/netmap/if_ixl_netmap.h
stable/11/sys/dev/netmap/if_lem_netmap.h
stable/11/sys/dev/netmap/if_re_netmap.h
stable/11/sys/dev/netmap/if_vtnet_netmap.h
stable/11/sys/dev/netmap/ixgbe_netmap.h
stable/11/sys/dev/netmap/netmap.c
stable/11/sys/dev/netmap/netmap_freebsd.c
stable/11/sys/dev/netmap/netmap_generic.c
stable/11/sys/dev/netmap/netmap_kern.h
stable/11/sys/dev/netmap/netmap_mbq.c
stable/11/sys/dev/netmap/netmap_mbq.h
stable/11/sys/dev/netmap/netmap_mem2.c
stable/11/sys/dev/netmap/netmap_mem2.h
stable/11/sys/dev/netmap/netmap_monitor.c
stable/11/sys/dev/netmap/netmap_offloadings.c
stable/11/sys/dev/netmap/netmap_pipe.c
stable/11/sys/dev/netmap/netmap_vale.c
stable/11/sys/dev/re/if_re.c
stable/11/sys/modules/netmap/Makefile
stable/11/sys/net/iflib.c
stable/11/sys/net/netmap.h
stable/11/sys/net/netmap_user.h
Modified: stable/11/sys/conf/files
==============================================================================
--- stable/11/sys/conf/files Tue Dec 4 16:53:28 2018 (r341476)
+++ stable/11/sys/conf/files Tue Dec 4 17:40:56 2018 (r341477)
@@ -2476,7 +2476,10 @@ dev/netmap/netmap_mem2.c optional netmap
dev/netmap/netmap_monitor.c optional netmap
dev/netmap/netmap_offloadings.c optional netmap
dev/netmap/netmap_pipe.c optional netmap
+dev/netmap/netmap_pt.c optional netmap
dev/netmap/netmap_vale.c optional netmap
+dev/netmap/netmap_legacy.c optional netmap
+dev/netmap/netmap_bdg.c optional netmap
# compile-with "${NORMAL_C} -Wconversion -Wextra"
dev/nfsmb/nfsmb.c optional nfsmb pci
dev/nge/if_nge.c optional nge
Modified: stable/11/sys/dev/cxgbe/t4_netmap.c
==============================================================================
--- stable/11/sys/dev/cxgbe/t4_netmap.c Tue Dec 4 16:53:28 2018 (r341476)
+++ stable/11/sys/dev/cxgbe/t4_netmap.c Tue Dec 4 17:40:56 2018 (r341477)
@@ -909,7 +909,7 @@ t4_nm_intr(void *arg)
struct adapter *sc = vi->pi->adapter;
struct ifnet *ifp = vi->ifp;
struct netmap_adapter *na = NA(ifp);
- struct netmap_kring *kring = &na->rx_rings[nm_rxq->nid];
+ struct netmap_kring *kring = na->rx_rings[nm_rxq->nid];
struct netmap_ring *ring = kring->ring;
struct iq_desc *d = &nm_rxq->iq_desc[nm_rxq->iq_cidx];
const void *cpl;
@@ -950,7 +950,7 @@ t4_nm_intr(void *arg)
case CPL_RX_PKT:
ring->slot[fl_cidx].len = G_RSPD_LEN(lq) -
sc->params.sge.fl_pktshift;
- ring->slot[fl_cidx].flags = kring->nkr_slot_flags;
+ ring->slot[fl_cidx].flags = 0;
fl_cidx += (lq & F_RSPD_NEWBUF) ? 1 : 0;
fl_credits += (lq & F_RSPD_NEWBUF) ? 1 : 0;
if (__predict_false(fl_cidx == nm_rxq->fl_sidx))
Modified: stable/11/sys/dev/e1000/if_em.c
==============================================================================
--- stable/11/sys/dev/e1000/if_em.c Tue Dec 4 16:53:28 2018 (r341476)
+++ stable/11/sys/dev/e1000/if_em.c Tue Dec 4 17:40:56 2018 (r341477)
@@ -3644,7 +3644,7 @@ em_setup_transmit_ring(struct tx_ring *txr)
}
#ifdef DEV_NETMAP
if (slot) {
- int si = netmap_idx_n2k(&na->tx_rings[txr->me], i);
+ int si = netmap_idx_n2k(na->tx_rings[txr->me], i);
uint64_t paddr;
void *addr;
@@ -4436,7 +4436,7 @@ em_setup_receive_ring(struct rx_ring *rxr)
rxbuf = &rxr->rx_buffers[j];
#ifdef DEV_NETMAP
if (slot) {
- int si = netmap_idx_n2k(&na->rx_rings[rxr->me], j);
+ int si = netmap_idx_n2k(na->rx_rings[rxr->me], j);
uint64_t paddr;
void *addr;
@@ -4741,7 +4741,7 @@ em_initialize_receive_unit(struct adapter *adapter)
*/
if (if_getcapenable(ifp) & IFCAP_NETMAP) {
struct netmap_adapter *na = netmap_getna(adapter->ifp);
- rdt -= nm_kr_rxspace(&na->rx_rings[i]);
+ rdt -= nm_kr_rxspace(na->rx_rings[i]);
}
#endif /* DEV_NETMAP */
E1000_WRITE_REG(hw, E1000_RDT(i), rdt);
Modified: stable/11/sys/dev/e1000/if_igb.c
==============================================================================
--- stable/11/sys/dev/e1000/if_igb.c Tue Dec 4 16:53:28 2018 (r341476)
+++ stable/11/sys/dev/e1000/if_igb.c Tue Dec 4 17:40:56 2018 (r341477)
@@ -3604,7 +3604,7 @@ igb_setup_transmit_ring(struct tx_ring *txr)
}
#ifdef DEV_NETMAP
if (slot) {
- int si = netmap_idx_n2k(&na->tx_rings[txr->me], i);
+ int si = netmap_idx_n2k(na->tx_rings[txr->me], i);
/* no need to set the address */
netmap_load_map(na, txr->txtag, txbuf->map, NMB(na, slot + si));
}
@@ -4420,7 +4420,7 @@ igb_setup_receive_ring(struct rx_ring *rxr)
#ifdef DEV_NETMAP
if (slot) {
/* slot sj is mapped to the j-th NIC-ring entry */
- int sj = netmap_idx_n2k(&na->rx_rings[rxr->me], j);
+ int sj = netmap_idx_n2k(na->rx_rings[rxr->me], j);
uint64_t paddr;
void *addr;
@@ -4806,7 +4806,7 @@ igb_initialize_receive_units(struct adapter *adapter)
*/
if (ifp->if_capenable & IFCAP_NETMAP) {
struct netmap_adapter *na = NA(adapter->ifp);
- struct netmap_kring *kring = &na->rx_rings[i];
+ struct netmap_kring *kring = na->rx_rings[i];
int t = rxr->next_to_refresh - nm_kr_rxspace(kring);
if (t >= adapter->num_rx_desc)
Modified: stable/11/sys/dev/e1000/if_lem.c
==============================================================================
--- stable/11/sys/dev/e1000/if_lem.c Tue Dec 4 16:53:28 2018 (r341476)
+++ stable/11/sys/dev/e1000/if_lem.c Tue Dec 4 17:40:56 2018 (r341477)
@@ -2792,7 +2792,7 @@ lem_setup_transmit_structures(struct adapter *adapter)
#ifdef DEV_NETMAP
if (slot) {
/* the i-th NIC entry goes to slot si */
- int si = netmap_idx_n2k(&na->tx_rings[0], i);
+ int si = netmap_idx_n2k(na->tx_rings[0], i);
uint64_t paddr;
void *addr;
@@ -3374,7 +3374,7 @@ lem_setup_receive_structures(struct adapter *adapter)
#ifdef DEV_NETMAP
if (slot) {
/* the i-th NIC entry goes to slot si */
- int si = netmap_idx_n2k(&na->rx_rings[0], i);
+ int si = netmap_idx_n2k(na->rx_rings[0], i);
uint64_t paddr;
void *addr;
@@ -3498,7 +3498,7 @@ lem_initialize_receive_unit(struct adapter *adapter)
/* preserve buffers already made available to clients */
if (if_getcapenable(ifp) & IFCAP_NETMAP) {
struct netmap_adapter *na = netmap_getna(adapter->ifp);
- rctl -= nm_kr_rxspace(&na->rx_rings[0]);
+ rctl -= nm_kr_rxspace(na->rx_rings[0]);
}
#endif /* DEV_NETMAP */
E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), rctl);
Modified: stable/11/sys/dev/ixgbe/if_ix.c
==============================================================================
--- stable/11/sys/dev/ixgbe/if_ix.c Tue Dec 4 16:53:28 2018 (r341476)
+++ stable/11/sys/dev/ixgbe/if_ix.c Tue Dec 4 17:40:56 2018 (r341477)
@@ -2986,7 +2986,7 @@ ixgbe_init_locked(struct adapter *adapter)
if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
(ifp->if_capenable & IFCAP_NETMAP)) {
struct netmap_adapter *na = NA(adapter->ifp);
- struct netmap_kring *kring = &na->rx_rings[i];
+ struct netmap_kring *kring = na->rx_rings[i];
int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
Modified: stable/11/sys/dev/ixgbe/if_ixv.c
==============================================================================
--- stable/11/sys/dev/ixgbe/if_ixv.c Tue Dec 4 16:53:28 2018 (r341476)
+++ stable/11/sys/dev/ixgbe/if_ixv.c Tue Dec 4 17:40:56 2018 (r341477)
@@ -1490,7 +1490,7 @@ ixv_initialize_receive_units(struct adapter *adapter)
if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
(ifp->if_capenable & IFCAP_NETMAP)) {
struct netmap_adapter *na = NA(adapter->ifp);
- struct netmap_kring *kring = &na->rx_rings[i];
+ struct netmap_kring *kring = na->rx_rings[i];
int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
Modified: stable/11/sys/dev/ixgbe/ix_txrx.c
==============================================================================
--- stable/11/sys/dev/ixgbe/ix_txrx.c Tue Dec 4 16:53:28 2018 (r341476)
+++ stable/11/sys/dev/ixgbe/ix_txrx.c Tue Dec 4 17:40:56 2018 (r341477)
@@ -589,7 +589,7 @@ ixgbe_setup_transmit_ring(struct tx_ring *txr)
* netmap_idx_n2k() handles wraparounds properly.
*/
if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) && slot) {
- int si = netmap_idx_n2k(&na->tx_rings[txr->me], i);
+ int si = netmap_idx_n2k(na->tx_rings[txr->me], i);
netmap_load_map(na, txr->txtag,
txbuf->map, NMB(na, slot + si));
}
@@ -991,7 +991,7 @@ ixgbe_txeof(struct tx_ring *txr)
if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
(adapter->ifp->if_capenable & IFCAP_NETMAP)) {
struct netmap_adapter *na = NA(adapter->ifp);
- struct netmap_kring *kring = &na->tx_rings[txr->me];
+ struct netmap_kring *kring = na->tx_rings[txr->me];
txd = txr->tx_base;
bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
BUS_DMASYNC_POSTREAD);
@@ -1402,7 +1402,7 @@ ixgbe_setup_receive_ring(struct rx_ring *rxr)
* an mbuf, so end the block with a continue;
*/
if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) && slot) {
- int sj = netmap_idx_n2k(&na->rx_rings[rxr->me], j);
+ int sj = netmap_idx_n2k(na->rx_rings[rxr->me], j);
uint64_t paddr;
void *addr;
Modified: stable/11/sys/dev/ixgbe/ixgbe_netmap.c
==============================================================================
--- stable/11/sys/dev/ixgbe/ixgbe_netmap.c Tue Dec 4 16:53:28 2018 (r341476)
+++ stable/11/sys/dev/ixgbe/ixgbe_netmap.c Tue Dec 4 17:40:56 2018 (r341477)
@@ -409,7 +409,6 @@ ixgbe_netmap_rxsync(struct netmap_kring *kring, int fl
*/
if (netmap_no_pendintr || force_update) {
int crclen = (ix_crcstrip) ? 0 : 4;
- uint16_t slot_flags = kring->nkr_slot_flags;
nic_i = rxr->next_to_check; // or also k2n(kring->nr_hwtail)
nm_i = netmap_idx_n2k(kring, nic_i);
@@ -421,7 +420,7 @@ ixgbe_netmap_rxsync(struct netmap_kring *kring, int fl
if ((staterr & IXGBE_RXD_STAT_DD) == 0)
break;
ring->slot[nm_i].len = le16toh(curr->wb.upper.length) - crclen;
- ring->slot[nm_i].flags = slot_flags;
+ ring->slot[nm_i].flags = 0;
bus_dmamap_sync(rxr->ptag,
rxr->rx_buffers[nic_i].pmap, BUS_DMASYNC_POSTREAD);
nm_i = nm_next(nm_i, lim);
Modified: stable/11/sys/dev/ixl/ixl_pf_main.c
==============================================================================
--- stable/11/sys/dev/ixl/ixl_pf_main.c Tue Dec 4 16:53:28 2018 (r341476)
+++ stable/11/sys/dev/ixl/ixl_pf_main.c Tue Dec 4 17:40:56 2018 (r341477)
@@ -2252,7 +2252,7 @@ ixl_initialize_vsi(struct ixl_vsi *vsi)
/* preserve queue */
if (vsi->ifp->if_capenable & IFCAP_NETMAP) {
struct netmap_adapter *na = NA(vsi->ifp);
- struct netmap_kring *kring = &na->rx_rings[i];
+ struct netmap_kring *kring = na->rx_rings[i];
int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
wr32(vsi->hw, I40E_QRX_TAIL(que->me), t);
} else
Modified: stable/11/sys/dev/ixl/ixl_txrx.c
==============================================================================
--- stable/11/sys/dev/ixl/ixl_txrx.c Tue Dec 4 16:53:28 2018 (r341476)
+++ stable/11/sys/dev/ixl/ixl_txrx.c Tue Dec 4 17:40:56 2018 (r341477)
@@ -613,7 +613,7 @@ ixl_init_tx_ring(struct ixl_queue *que)
* netmap slot index, si
*/
if (slot) {
- int si = netmap_idx_n2k(&na->tx_rings[que->me], i);
+ int si = netmap_idx_n2k(na->tx_rings[que->me], i);
netmap_load_map(na, buf->tag, buf->map, NMB(na, slot + si));
}
#endif /* DEV_NETMAP */
@@ -1414,7 +1414,7 @@ ixl_init_rx_ring(struct ixl_queue *que)
* an mbuf, so end the block with a continue;
*/
if (slot) {
- int sj = netmap_idx_n2k(&na->rx_rings[que->me], j);
+ int sj = netmap_idx_n2k(na->rx_rings[que->me], j);
uint64_t paddr;
void *addr;
Modified: stable/11/sys/dev/netmap/if_em_netmap.h
==============================================================================
--- stable/11/sys/dev/netmap/if_em_netmap.h Tue Dec 4 16:53:28 2018 (r341476)
+++ stable/11/sys/dev/netmap/if_em_netmap.h Tue Dec 4 17:40:56 2018 (r341477)
@@ -233,8 +233,6 @@ em_netmap_rxsync(struct netmap_kring *kring, int flags
* First part: import newly received packets.
*/
if (netmap_no_pendintr || force_update) {
- uint16_t slot_flags = kring->nkr_slot_flags;
-
nic_i = rxr->next_to_check;
nm_i = netmap_idx_n2k(kring, nic_i);
@@ -245,7 +243,7 @@ em_netmap_rxsync(struct netmap_kring *kring, int flags
if ((staterr & E1000_RXD_STAT_DD) == 0)
break;
ring->slot[nm_i].len = le16toh(curr->wb.upper.length);
- ring->slot[nm_i].flags = slot_flags;
+ ring->slot[nm_i].flags = 0;
bus_dmamap_sync(rxr->rxtag, rxr->rx_buffers[nic_i].map,
BUS_DMASYNC_POSTREAD);
nm_i = nm_next(nm_i, lim);
Modified: stable/11/sys/dev/netmap/if_igb_netmap.h
==============================================================================
--- stable/11/sys/dev/netmap/if_igb_netmap.h Tue Dec 4 16:53:28 2018 (r341476)
+++ stable/11/sys/dev/netmap/if_igb_netmap.h Tue Dec 4 17:40:56 2018 (r341477)
@@ -215,8 +215,6 @@ igb_netmap_rxsync(struct netmap_kring *kring, int flag
* First part: import newly received packets.
*/
if (netmap_no_pendintr || force_update) {
- uint16_t slot_flags = kring->nkr_slot_flags;
-
nic_i = rxr->next_to_check;
nm_i = netmap_idx_n2k(kring, nic_i);
@@ -227,7 +225,7 @@ igb_netmap_rxsync(struct netmap_kring *kring, int flag
if ((staterr & E1000_RXD_STAT_DD) == 0)
break;
ring->slot[nm_i].len = le16toh(curr->wb.upper.length);
- ring->slot[nm_i].flags = slot_flags;
+ ring->slot[nm_i].flags = 0;
bus_dmamap_sync(rxr->ptag,
rxr->rx_buffers[nic_i].pmap, BUS_DMASYNC_POSTREAD);
nm_i = nm_next(nm_i, lim);
Modified: stable/11/sys/dev/netmap/if_ixl_netmap.h
==============================================================================
--- stable/11/sys/dev/netmap/if_ixl_netmap.h Tue Dec 4 16:53:28 2018 (r341476)
+++ stable/11/sys/dev/netmap/if_ixl_netmap.h Tue Dec 4 17:40:56 2018 (r341477)
@@ -59,7 +59,7 @@ extern int ixl_rx_miss, ixl_rx_miss_bufs, ixl_crcstrip
/*
* device-specific sysctl variables:
*
- * ixl_crcstrip: 0: keep CRC in rx frames (default), 1: strip it.
+ * ixl_crcstrip: 0: NIC keeps CRC in rx frames, 1: NIC strips it (default).
* During regular operations the CRC is stripped, but on some
* hardware reception of frames not multiple of 64 is slower,
* so using crcstrip=0 helps in benchmarks.
@@ -67,14 +67,14 @@ extern int ixl_rx_miss, ixl_rx_miss_bufs, ixl_crcstrip
* ixl_rx_miss, ixl_rx_miss_bufs:
* count packets that might be missed due to lost interrupts.
*/
+int ixl_rx_miss, ixl_rx_miss_bufs, ixl_crcstrip = 1;
SYSCTL_DECL(_dev_netmap);
/*
* The xl driver by default strips CRCs and we do not override it.
*/
-int ixl_rx_miss, ixl_rx_miss_bufs, ixl_crcstrip = 1;
#if 0
SYSCTL_INT(_dev_netmap, OID_AUTO, ixl_crcstrip,
- CTLFLAG_RW, &ixl_crcstrip, 1, "strip CRC on rx frames");
+ CTLFLAG_RW, &ixl_crcstrip, 1, "NIC strips CRC on rx frames");
#endif
SYSCTL_INT(_dev_netmap, OID_AUTO, ixl_rx_miss,
CTLFLAG_RW, &ixl_rx_miss, 0, "potentially missed rx intr");
@@ -130,7 +130,7 @@ ixl_netmap_attach(struct ixl_vsi *vsi)
na.ifp = vsi->ifp;
na.na_flags = NAF_BDG_MAYSLEEP;
// XXX check that queues is set.
- printf("queues is %p\n", vsi->queues);
+ nm_prinf("queues is %p\n", vsi->queues);
if (vsi->queues) {
na.num_tx_desc = vsi->queues[0].num_desc;
na.num_rx_desc = vsi->queues[0].num_desc;
@@ -332,7 +332,6 @@ ixl_netmap_rxsync(struct netmap_kring *kring, int flag
*/
if (netmap_no_pendintr || force_update) {
int crclen = ixl_crcstrip ? 0 : 4;
- uint16_t slot_flags = kring->nkr_slot_flags;
nic_i = rxr->next_check; // or also k2n(kring->nr_hwtail)
nm_i = netmap_idx_n2k(kring, nic_i);
@@ -347,7 +346,7 @@ ixl_netmap_rxsync(struct netmap_kring *kring, int flag
break;
ring->slot[nm_i].len = ((qword & I40E_RXD_QW1_LENGTH_PBUF_MASK)
>> I40E_RXD_QW1_LENGTH_PBUF_SHIFT) - crclen;
- ring->slot[nm_i].flags = slot_flags;
+ ring->slot[nm_i].flags = 0;
bus_dmamap_sync(rxr->ptag,
rxr->buffers[nic_i].pmap, BUS_DMASYNC_POSTREAD);
nm_i = nm_next(nm_i, lim);
Modified: stable/11/sys/dev/netmap/if_lem_netmap.h
==============================================================================
--- stable/11/sys/dev/netmap/if_lem_netmap.h Tue Dec 4 16:53:28 2018 (r341476)
+++ stable/11/sys/dev/netmap/if_lem_netmap.h Tue Dec 4 17:40:56 2018 (r341477)
@@ -35,12 +35,8 @@
#include <net/netmap.h>
#include <sys/selinfo.h>
-#include <vm/vm.h>
-#include <vm/pmap.h> /* vtophys ? */
#include <dev/netmap/netmap_kern.h>
-extern int netmap_adaptive_io;
-
/*
* Register/unregister. We are already under netmap lock.
*/
@@ -81,6 +77,22 @@ lem_netmap_reg(struct netmap_adapter *na, int onoff)
}
+static void
+lem_netmap_intr(struct netmap_adapter *na, int onoff)
+{
+ struct ifnet *ifp = na->ifp;
+ struct adapter *adapter = ifp->if_softc;
+
+ EM_CORE_LOCK(adapter);
+ if (onoff) {
+ lem_enable_intr(adapter);
+ } else {
+ lem_disable_intr(adapter);
+ }
+ EM_CORE_UNLOCK(adapter);
+}
+
+
/*
* Reconcile kernel and user view of the transmit ring.
*/
@@ -99,10 +111,6 @@ lem_netmap_txsync(struct netmap_kring *kring, int flag
/* device-specific */
struct adapter *adapter = ifp->if_softc;
-#ifdef NIC_PARAVIRT
- struct paravirt_csb *csb = adapter->csb;
- uint64_t *csbd = (uint64_t *)(csb + 1);
-#endif /* NIC_PARAVIRT */
bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
BUS_DMASYNC_POSTREAD);
@@ -113,19 +121,6 @@ lem_netmap_txsync(struct netmap_kring *kring, int flag
nm_i = kring->nr_hwcur;
if (nm_i != head) { /* we have new packets to send */
-#ifdef NIC_PARAVIRT
- int do_kick = 0;
- uint64_t t = 0; // timestamp
- int n = head - nm_i;
- if (n < 0)
- n += lim + 1;
- if (csb) {
- t = rdtsc(); /* last timestamp */
- csbd[16] += t - csbd[0]; /* total Wg */
- csbd[17] += n; /* Wg count */
- csbd[0] = t;
- }
-#endif /* NIC_PARAVIRT */
nic_i = netmap_idx_k2n(kring, nm_i);
while (nm_i != head) {
struct netmap_slot *slot = &ring->slot[nm_i];
@@ -166,38 +161,8 @@ lem_netmap_txsync(struct netmap_kring *kring, int flag
bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
-#ifdef NIC_PARAVIRT
- /* set unconditionally, then also kick if needed */
- if (csb) {
- t = rdtsc();
- if (csb->host_need_txkick == 2) {
- /* can compute an update of delta */
- int64_t delta = t - csbd[3];
- if (delta < 0)
- delta = -delta;
- if (csbd[8] == 0 || delta < csbd[8]) {
- csbd[8] = delta;
- csbd[9]++;
- }
- csbd[10]++;
- }
- csb->guest_tdt = nic_i;
- csbd[18] += t - csbd[0]; // total wp
- csbd[19] += n;
- }
- if (!csb || !csb->guest_csb_on || (csb->host_need_txkick & 1))
- do_kick = 1;
- if (do_kick)
-#endif /* NIC_PARAVIRT */
/* (re)start the tx unit up to slot nic_i (excluded) */
E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), nic_i);
-#ifdef NIC_PARAVIRT
- if (do_kick) {
- uint64_t t1 = rdtsc();
- csbd[20] += t1 - t; // total Np
- csbd[21]++;
- }
-#endif /* NIC_PARAVIRT */
}
/*
@@ -206,93 +171,6 @@ lem_netmap_txsync(struct netmap_kring *kring, int flag
if (ticks != kring->last_reclaim || flags & NAF_FORCE_RECLAIM || nm_kr_txempty(kring)) {
kring->last_reclaim = ticks;
/* record completed transmissions using TDH */
-#ifdef NIC_PARAVIRT
- /* host updates tdh unconditionally, and we have
- * no side effects on reads, so we can read from there
- * instead of exiting.
- */
- if (csb) {
- static int drain = 0, nodrain=0, good = 0, bad = 0, fail = 0;
- u_int x = adapter->next_tx_to_clean;
- csbd[19]++; // XXX count reclaims
- nic_i = csb->host_tdh;
- if (csb->guest_csb_on) {
- if (nic_i == x) {
- bad++;
- csbd[24]++; // failed reclaims
- /* no progress, request kick and retry */
- csb->guest_need_txkick = 1;
- mb(); // XXX barrier
- nic_i = csb->host_tdh;
- } else {
- good++;
- }
- if (nic_i != x) {
- csb->guest_need_txkick = 2;
- if (nic_i == csb->guest_tdt)
- drain++;
- else
- nodrain++;
-#if 1
- if (netmap_adaptive_io) {
- /* new mechanism: last half ring (or so)
- * released one slot at a time.
- * This effectively makes the system spin.
- *
- * Take next_to_clean + 1 as a reference.
- * tdh must be ahead or equal
- * On entry, the logical order is
- * x < tdh = nic_i
- * We first push tdh up to avoid wraps.
- * The limit is tdh-ll (half ring).
- * if tdh-256 < x we report x;
- * else we report tdh-256
- */
- u_int tdh = nic_i;
- u_int ll = csbd[15];
- u_int delta = lim/8;
- if (netmap_adaptive_io == 2 || ll > delta)
- csbd[15] = ll = delta;
- else if (netmap_adaptive_io == 1 && ll > 1) {
- csbd[15]--;
- }
-
- if (nic_i >= kring->nkr_num_slots) {
- RD(5, "bad nic_i %d on input", nic_i);
- }
- x = nm_next(x, lim);
- if (tdh < x)
- tdh += lim + 1;
- if (tdh <= x + ll) {
- nic_i = x;
- csbd[25]++; //report n + 1;
- } else {
- tdh = nic_i;
- if (tdh < ll)
- tdh += lim + 1;
- nic_i = tdh - ll;
- csbd[26]++; // report tdh - ll
- }
- }
-#endif
- } else {
- /* we stop, count whether we are idle or not */
- int bh_active = csb->host_need_txkick & 2 ? 4 : 0;
- csbd[27+ csb->host_need_txkick]++;
- if (netmap_adaptive_io == 1) {
- if (bh_active && csbd[15] > 1)
- csbd[15]--;
- else if (!bh_active && csbd[15] < lim/2)
- csbd[15]++;
- }
- bad--;
- fail++;
- }
- }
- RD(1, "drain %d nodrain %d good %d retry %d fail %d",
- drain, nodrain, good, bad, fail);
- } else
-#endif /* !NIC_PARAVIRT */
nic_i = E1000_READ_REG(&adapter->hw, E1000_TDH(0));
if (nic_i >= kring->nkr_num_slots) { /* XXX can it happen ? */
D("TDH wrap %d", nic_i);
@@ -324,21 +202,10 @@ lem_netmap_rxsync(struct netmap_kring *kring, int flag
/* device-specific */
struct adapter *adapter = ifp->if_softc;
-#ifdef NIC_PARAVIRT
- struct paravirt_csb *csb = adapter->csb;
- uint32_t csb_mode = csb && csb->guest_csb_on;
- uint32_t do_host_rxkick = 0;
-#endif /* NIC_PARAVIRT */
if (head > lim)
return netmap_ring_reinit(kring);
-#ifdef NIC_PARAVIRT
- if (csb_mode) {
- force_update = 1;
- csb->guest_need_rxkick = 0;
- }
-#endif /* NIC_PARAVIRT */
/* XXX check sync modes */
bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
@@ -347,8 +214,6 @@ lem_netmap_rxsync(struct netmap_kring *kring, int flag
* First part: import newly received packets.
*/
if (netmap_no_pendintr || force_update) {
- uint16_t slot_flags = kring->nkr_slot_flags;
-
nic_i = adapter->next_rx_desc_to_check;
nm_i = netmap_idx_n2k(kring, nic_i);
@@ -357,23 +222,6 @@ lem_netmap_rxsync(struct netmap_kring *kring, int flag
uint32_t staterr = le32toh(curr->status);
int len;
-#ifdef NIC_PARAVIRT
- if (csb_mode) {
- if ((staterr & E1000_RXD_STAT_DD) == 0) {
- /* don't bother to retry if more than 1 pkt */
- if (n > 1)
- break;
- csb->guest_need_rxkick = 1;
- wmb();
- staterr = le32toh(curr->status);
- if ((staterr & E1000_RXD_STAT_DD) == 0) {
- break;
- } else { /* we are good */
- csb->guest_need_rxkick = 0;
- }
- }
- } else
-#endif /* NIC_PARAVIRT */
if ((staterr & E1000_RXD_STAT_DD) == 0)
break;
len = le16toh(curr->length) - 4; // CRC
@@ -382,7 +230,7 @@ lem_netmap_rxsync(struct netmap_kring *kring, int flag
len = 0;
}
ring->slot[nm_i].len = len;
- ring->slot[nm_i].flags = slot_flags;
+ ring->slot[nm_i].flags = 0;
bus_dmamap_sync(adapter->rxtag,
adapter->rx_buffer_area[nic_i].map,
BUS_DMASYNC_POSTREAD);
@@ -390,18 +238,6 @@ lem_netmap_rxsync(struct netmap_kring *kring, int flag
nic_i = nm_next(nic_i, lim);
}
if (n) { /* update the state variables */
-#ifdef NIC_PARAVIRT
- if (csb_mode) {
- if (n > 1) {
- /* leave one spare buffer so we avoid rxkicks */
- nm_i = nm_prev(nm_i, lim);
- nic_i = nm_prev(nic_i, lim);
- n--;
- } else {
- csb->guest_need_rxkick = 1;
- }
- }
-#endif /* NIC_PARAVIRT */
ND("%d new packets at nic %d nm %d tail %d",
n,
adapter->next_rx_desc_to_check,
@@ -440,10 +276,6 @@ lem_netmap_rxsync(struct netmap_kring *kring, int flag
curr->status = 0;
bus_dmamap_sync(adapter->rxtag, rxbuf->map,
BUS_DMASYNC_PREREAD);
-#ifdef NIC_PARAVIRT
- if (csb_mode && csb->host_rxkick_at == nic_i)
- do_host_rxkick = 1;
-#endif /* NIC_PARAVIRT */
nm_i = nm_next(nm_i, lim);
nic_i = nm_next(nic_i, lim);
}
@@ -455,12 +287,6 @@ lem_netmap_rxsync(struct netmap_kring *kring, int flag
* so move nic_i back by one unit
*/
nic_i = nm_prev(nic_i, lim);
-#ifdef NIC_PARAVIRT
- /* set unconditionally, then also kick if needed */
- if (csb)
- csb->guest_rdt = nic_i;
- if (!csb_mode || do_host_rxkick)
-#endif /* NIC_PARAVIRT */
E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), nic_i);
}
@@ -486,6 +312,7 @@ lem_netmap_attach(struct adapter *adapter)
na.nm_rxsync = lem_netmap_rxsync;
na.nm_register = lem_netmap_reg;
na.num_tx_rings = na.num_rx_rings = 1;
+ na.nm_intr = lem_netmap_intr;
netmap_attach(&na);
}
Modified: stable/11/sys/dev/netmap/if_re_netmap.h
==============================================================================
--- stable/11/sys/dev/netmap/if_re_netmap.h Tue Dec 4 16:53:28 2018 (r341476)
+++ stable/11/sys/dev/netmap/if_re_netmap.h Tue Dec 4 17:40:56 2018 (r341477)
@@ -199,7 +199,6 @@ re_netmap_rxsync(struct netmap_kring *kring, int flags
* is to stop right before nm_hwcur.
*/
if (netmap_no_pendintr || force_update) {
- uint16_t slot_flags = kring->nkr_slot_flags;
uint32_t stop_i = nm_prev(kring->nr_hwcur, lim);
nic_i = sc->rl_ldata.rl_rx_prodidx; /* next pkt to check */
@@ -216,7 +215,7 @@ re_netmap_rxsync(struct netmap_kring *kring, int flags
/* XXX subtract crc */
total_len = (total_len < 4) ? 0 : total_len - 4;
ring->slot[nm_i].len = total_len;
- ring->slot[nm_i].flags = slot_flags;
+ ring->slot[nm_i].flags = 0;
/* sync was in re_newbuf() */
bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag,
rxd[nic_i].rx_dmamap, BUS_DMASYNC_POSTREAD);
@@ -303,7 +302,7 @@ re_netmap_tx_init(struct rl_softc *sc)
/* l points in the netmap ring, i points in the NIC ring */
for (i = 0; i < n; i++) {
uint64_t paddr;
- int l = netmap_idx_n2k(&na->tx_rings[0], i);
+ int l = netmap_idx_n2k(na->tx_rings[0], i);
void *addr = PNMB(na, slot + l, &paddr);
desc[i].rl_bufaddr_lo = htole32(RL_ADDR_LO(paddr));
@@ -329,11 +328,11 @@ re_netmap_rx_init(struct rl_softc *sc)
* Do not release the slots owned by userspace,
* and also keep one empty.
*/
- max_avail = n - 1 - nm_kr_rxspace(&na->rx_rings[0]);
+ max_avail = n - 1 - nm_kr_rxspace(na->rx_rings[0]);
for (nic_i = 0; nic_i < n; nic_i++) {
void *addr;
uint64_t paddr;
- uint32_t nm_i = netmap_idx_n2k(&na->rx_rings[0], nic_i);
+ uint32_t nm_i = netmap_idx_n2k(na->rx_rings[0], nic_i);
addr = PNMB(na, slot + nm_i, &paddr);
Modified: stable/11/sys/dev/netmap/if_vtnet_netmap.h
==============================================================================
--- stable/11/sys/dev/netmap/if_vtnet_netmap.h Tue Dec 4 16:53:28 2018 (r341476)
+++ stable/11/sys/dev/netmap/if_vtnet_netmap.h Tue Dec 4 17:40:56 2018 (r341477)
@@ -122,12 +122,13 @@ vtnet_netmap_txsync(struct netmap_kring *kring, int fl
struct SOFTC_T *sc = ifp->if_softc;
struct vtnet_txq *txq = &sc->vtnet_txqs[ring_nr];
struct virtqueue *vq = txq->vtntx_vq;
+ int interrupts = !(kring->nr_kflags & NKR_NOINTR);
/*
* First part: process new packets to send.
*/
rmb();
-
+
nm_i = kring->nr_hwcur;
if (nm_i != head) { /* we have new packets to send */
struct sglist *sg = txq->vtntx_sg;
@@ -179,10 +180,12 @@ vtnet_netmap_txsync(struct netmap_kring *kring, int fl
ring->head, ring->tail, virtqueue_nused(vq),
(virtqueue_dump(vq), 1));
virtqueue_notify(vq);
- virtqueue_enable_intr(vq); // like postpone with 0
+ if (interrupts) {
+ virtqueue_enable_intr(vq); // like postpone with 0
+ }
}
-
+
/* Free used slots. We only consider our own used buffers, recognized
* by the token we passed to virtqueue_add_outbuf.
*/
@@ -209,7 +212,7 @@ vtnet_netmap_txsync(struct netmap_kring *kring, int fl
if (nm_i != kring->nr_hwtail /* && vtnet_txq_below_threshold(txq) == 0*/) {
ND(3, "disable intr, hwcur %d", nm_i);
virtqueue_disable_intr(vq);
- } else {
+ } else if (interrupts) {
ND(3, "enable intr, hwcur %d", nm_i);
virtqueue_postpone_intr(vq, VQ_POSTPONE_SHORT);
}
@@ -277,6 +280,7 @@ vtnet_netmap_rxsync(struct netmap_kring *kring, int fl
u_int const lim = kring->nkr_num_slots - 1;
u_int const head = kring->rhead;
int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR;
+ int interrupts = !(kring->nr_kflags & NKR_NOINTR);
/* device-specific */
struct SOFTC_T *sc = ifp->if_softc;
@@ -297,7 +301,6 @@ vtnet_netmap_rxsync(struct netmap_kring *kring, int fl
* and vtnet_netmap_init_buffers().
*/
if (netmap_no_pendintr || force_update) {
- uint16_t slot_flags = kring->nkr_slot_flags;
struct netmap_adapter *token;
nm_i = kring->nr_hwtail;
@@ -309,7 +312,7 @@ vtnet_netmap_rxsync(struct netmap_kring *kring, int fl
break;
if (likely(token == (void *)rxq)) {
ring->slot[nm_i].len = len;
- ring->slot[nm_i].flags = slot_flags;
+ ring->slot[nm_i].flags = 0;
nm_i = nm_next(nm_i, lim);
n++;
} else {
@@ -334,7 +337,9 @@ vtnet_netmap_rxsync(struct netmap_kring *kring, int fl
kring->nr_hwcur = err;
virtqueue_notify(vq);
/* After draining the queue may need an intr from the hypervisor */
- vtnet_rxq_enable_intr(rxq);
+ if (interrupts) {
+ vtnet_rxq_enable_intr(rxq);
+ }
}
ND("[C] h %d c %d t %d hwcur %d hwtail %d",
@@ -345,6 +350,28 @@ vtnet_netmap_rxsync(struct netmap_kring *kring, int fl
}
+/* Enable/disable interrupts on all virtqueues. */
+static void
+vtnet_netmap_intr(struct netmap_adapter *na, int onoff)
+{
+ struct SOFTC_T *sc = na->ifp->if_softc;
+ int i;
+
+ for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
+ struct vtnet_rxq *rxq = &sc->vtnet_rxqs[i];
+ struct vtnet_txq *txq = &sc->vtnet_txqs[i];
+ struct virtqueue *txvq = txq->vtntx_vq;
+
+ if (onoff) {
+ vtnet_rxq_enable_intr(rxq);
+ virtqueue_enable_intr(txvq);
+ } else {
+ vtnet_rxq_disable_intr(rxq);
+ virtqueue_disable_intr(txvq);
+ }
+ }
+}
+
/* Make RX virtqueues buffers pointing to netmap buffers. */
static int
vtnet_netmap_init_rx_buffers(struct SOFTC_T *sc)
@@ -356,7 +383,7 @@ vtnet_netmap_init_rx_buffers(struct SOFTC_T *sc)
if (!nm_native_on(na))
return 0;
for (r = 0; r < na->num_rx_rings; r++) {
- struct netmap_kring *kring = &na->rx_rings[r];
+ struct netmap_kring *kring = na->rx_rings[r];
struct vtnet_rxq *rxq = &sc->vtnet_rxqs[r];
struct virtqueue *vq = rxq->vtnrx_vq;
struct netmap_slot* slot;
@@ -380,29 +407,6 @@ vtnet_netmap_init_rx_buffers(struct SOFTC_T *sc)
return 1;
}
-/* Update the virtio-net device configurations. Number of queues can
- * change dinamically, by 'ethtool --set-channels $IFNAME combined $N'.
- * This is actually the only way virtio-net can currently enable
- * the multiqueue mode.
- * XXX note that we seem to lose packets if the netmap ring has more
- * slots than the queue
- */
-static int
-vtnet_netmap_config(struct netmap_adapter *na, u_int *txr, u_int *txd,
- u_int *rxr, u_int *rxd)
-{
- struct ifnet *ifp = na->ifp;
- struct SOFTC_T *sc = ifp->if_softc;
-
- *txr = *rxr = sc->vtnet_max_vq_pairs;
- *rxd = 512; // sc->vtnet_rx_nmbufs;
- *txd = *rxd; // XXX
- D("vtnet config txq=%d, txd=%d rxq=%d, rxd=%d",
- *txr, *txd, *rxr, *rxd);
-
- return 0;
-}
-
static void
vtnet_netmap_attach(struct SOFTC_T *sc)
{
@@ -416,7 +420,7 @@ vtnet_netmap_attach(struct SOFTC_T *sc)
na.nm_register = vtnet_netmap_reg;
na.nm_txsync = vtnet_netmap_txsync;
na.nm_rxsync = vtnet_netmap_rxsync;
- na.nm_config = vtnet_netmap_config;
+ na.nm_intr = vtnet_netmap_intr;
na.num_tx_rings = na.num_rx_rings = sc->vtnet_max_vq_pairs;
D("max rings %d", sc->vtnet_max_vq_pairs);
netmap_attach(&na);
Modified: stable/11/sys/dev/netmap/ixgbe_netmap.h
==============================================================================
--- stable/11/sys/dev/netmap/ixgbe_netmap.h Tue Dec 4 16:53:28 2018 (r341476)
+++ stable/11/sys/dev/netmap/ixgbe_netmap.h Tue Dec 4 17:40:56 2018 (r341477)
@@ -53,7 +53,7 @@ void ixgbe_netmap_attach(struct adapter *adapter);
/*
* device-specific sysctl variables:
*
- * ix_crcstrip: 0: keep CRC in rx frames (default), 1: strip it.
+ * ix_crcstrip: 0: NIC keeps CRC in rx frames (default), 1: NIC strips it.
* During regular operations the CRC is stripped, but on some
* hardware reception of frames not multiple of 64 is slower,
* so using crcstrip=0 helps in benchmarks.
@@ -65,7 +65,7 @@ SYSCTL_DECL(_dev_netmap);
static int ix_rx_miss, ix_rx_miss_bufs;
int ix_crcstrip;
SYSCTL_INT(_dev_netmap, OID_AUTO, ix_crcstrip,
- CTLFLAG_RW, &ix_crcstrip, 0, "strip CRC on rx frames");
+ CTLFLAG_RW, &ix_crcstrip, 0, "NIC strips CRC on rx frames");
SYSCTL_INT(_dev_netmap, OID_AUTO, ix_rx_miss,
CTLFLAG_RW, &ix_rx_miss, 0, "potentially missed rx intr");
SYSCTL_INT(_dev_netmap, OID_AUTO, ix_rx_miss_bufs,
@@ -109,7 +109,21 @@ set_crcstrip(struct ixgbe_hw *hw, int onoff)
IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rxc);
}
+static void
+ixgbe_netmap_intr(struct netmap_adapter *na, int onoff)
+{
+ struct ifnet *ifp = na->ifp;
+ struct adapter *adapter = ifp->if_softc;
+ IXGBE_CORE_LOCK(adapter);
+ if (onoff) {
+ ixgbe_enable_intr(adapter); // XXX maybe ixgbe_stop ?
+ } else {
+ ixgbe_disable_intr(adapter); // XXX maybe ixgbe_stop ?
+ }
+ IXGBE_CORE_UNLOCK(adapter);
+}
+
/*
* Register/unregister. We are already under netmap lock.
* Only called on the first register or the last unregister.
@@ -311,7 +325,7 @@ ixgbe_netmap_txsync(struct netmap_kring *kring, int fl
* good way.
*/
nic_i = IXGBE_READ_REG(&adapter->hw, IXGBE_IS_VF(adapter) ?
- IXGBE_VFTDH(kring->ring_id) : IXGBE_TDH(kring->ring_id));
+ IXGBE_VFTDH(kring->ring_id) : IXGBE_TDH(kring->ring_id));
if (nic_i >= kring->nkr_num_slots) { /* XXX can it happen ? */
D("TDH wrap %d", nic_i);
nic_i -= kring->nkr_num_slots;
@@ -381,7 +395,6 @@ ixgbe_netmap_rxsync(struct netmap_kring *kring, int fl
*/
if (netmap_no_pendintr || force_update) {
int crclen = (ix_crcstrip || IXGBE_IS_VF(adapter) ) ? 0 : 4;
- uint16_t slot_flags = kring->nkr_slot_flags;
nic_i = rxr->next_to_check; // or also k2n(kring->nr_hwtail)
nm_i = netmap_idx_n2k(kring, nic_i);
@@ -393,7 +406,7 @@ ixgbe_netmap_rxsync(struct netmap_kring *kring, int fl
if ((staterr & IXGBE_RXD_STAT_DD) == 0)
break;
ring->slot[nm_i].len = le16toh(curr->wb.upper.length) - crclen;
- ring->slot[nm_i].flags = slot_flags;
+ ring->slot[nm_i].flags = 0;
bus_dmamap_sync(rxr->ptag,
rxr->rx_buffers[nic_i].pmap, BUS_DMASYNC_POSTREAD);
nm_i = nm_next(nm_i, lim);
@@ -486,6 +499,7 @@ ixgbe_netmap_attach(struct adapter *adapter)
na.nm_rxsync = ixgbe_netmap_rxsync;
na.nm_register = ixgbe_netmap_reg;
na.num_tx_rings = na.num_rx_rings = adapter->num_queues;
+ na.nm_intr = ixgbe_netmap_intr;
netmap_attach(&na);
}
Modified: stable/11/sys/dev/netmap/netmap.c
==============================================================================
--- stable/11/sys/dev/netmap/netmap.c Tue Dec 4 16:53:28 2018 (r341476)
+++ stable/11/sys/dev/netmap/netmap.c Tue Dec 4 17:40:56 2018 (r341477)
@@ -1,5 +1,9 @@
/*
- * Copyright (C) 2011-2014 Matteo Landi, Luigi Rizzo. All rights reserved.
+ * Copyright (C) 2011-2014 Matteo Landi
+ * Copyright (C) 2011-2016 Luigi Rizzo
+ * Copyright (C) 2011-2016 Giuseppe Lettieri
+ * Copyright (C) 2011-2016 Vincenzo Maffione
+ * All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -133,13 +137,12 @@ ports attached to the switch)
* > select()able file descriptor on which events are reported.
*
* Internally, we allocate a netmap_priv_d structure, that will be
- * initialized on ioctl(NIOCREGIF).
+ * initialized on ioctl(NIOCREGIF). There is one netmap_priv_d
+ * structure for each open().
*
* os-specific:
- * FreeBSD: netmap_open (netmap_freebsd.c). The priv is
- * per-thread.
- * linux: linux_netmap_open (netmap_linux.c). The priv is
- * per-open.
+ * FreeBSD: see netmap_open() (netmap_freebsd.c)
+ * linux: see linux_netmap_open() (netmap_linux.c)
*
* > 2. on each descriptor, the process issues an ioctl() to identify
* > the interface that should report events to the file descriptor.
@@ -257,7 +260,7 @@ ports attached to the switch)
*
* Any network interface known to the system (including a persistent VALE
* port) can be attached to a VALE switch by issuing the
- * NETMAP_BDG_ATTACH subcommand. After the attachment, persistent VALE ports
+ * NETMAP_REQ_VALE_ATTACH command. After the attachment, persistent VALE ports
* look exactly like ephemeral VALE ports (as created in step 2 above). The
* attachment of other interfaces, instead, requires the creation of a
* netmap_bwrap_adapter. Moreover, the attached interface must be put in
@@ -299,18 +302,17 @@ ports attached to the switch)
* netmap_transmit()
* na->nm_notify == netmap_notify()
* 2) ioctl(NIOCRXSYNC)/netmap_poll() in process context
- * kring->nm_sync() == netmap_rxsync_from_host_compat
+ * kring->nm_sync() == netmap_rxsync_from_host
* netmap_rxsync_from_host(na, NULL, NULL)
* - tx to host stack
* ioctl(NIOCTXSYNC)/netmap_poll() in process context
- * kring->nm_sync() == netmap_txsync_to_host_compat
+ * kring->nm_sync() == netmap_txsync_to_host
* netmap_txsync_to_host(na)
- * NM_SEND_UP()
- * FreeBSD: na->if_input() == ?? XXX
+ * nm_os_send_up()
+ * FreeBSD: na->if_input() == ether_input()
* linux: netif_rx() with NM_MAGIC_PRIORITY_RX
*
*
- *
* -= SYSTEM DEVICE WITH GENERIC SUPPORT =-
*
* na == NA(ifp) == generic_netmap_adapter created in generic_netmap_attach()
@@ -319,10 +321,11 @@ ports attached to the switch)
* concurrently:
* 1) ioctl(NIOCTXSYNC)/netmap_poll() in process context
* kring->nm_sync() == generic_netmap_txsync()
- * linux: dev_queue_xmit() with NM_MAGIC_PRIORITY_TX
- * generic_ndo_start_xmit()
- * orig. dev. start_xmit
- * FreeBSD: na->if_transmit() == orig. dev if_transmit
+ * nm_os_generic_xmit_frame()
+ * linux: dev_queue_xmit() with NM_MAGIC_PRIORITY_TX
+ * ifp->ndo_start_xmit == generic_ndo_start_xmit()
+ * gna->save_start_xmit == orig. dev. start_xmit
+ * FreeBSD: na->if_transmit() == orig. dev if_transmit
* 2) generic_mbuf_destructor()
* na->nm_notify() == netmap_notify()
* - rx from netmap userspace:
@@ -333,24 +336,15 @@ ports attached to the switch)
* generic_rx_handler()
* mbq_safe_enqueue()
* na->nm_notify() == netmap_notify()
- * - rx from host stack:
- * concurrently:
+ * - rx from host stack
+ * FreeBSD: same as native
+ * Linux: same as native except:
* 1) host stack
- * linux: generic_ndo_start_xmit()
- * netmap_transmit()
- * FreeBSD: ifp->if_input() == netmap_transmit
- * both:
- * na->nm_notify() == netmap_notify()
- * 2) ioctl(NIOCRXSYNC)/netmap_poll() in process context
- * kring->nm_sync() == netmap_rxsync_from_host_compat
*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***
More information about the svn-src-all
mailing list