svn commit: r207165 - user/jmallett/octeon/sys/mips/cavium/octe
Juli Mallett
jmallett at FreeBSD.org
Sat Apr 24 22:09:14 UTC 2010
Author: jmallett
Date: Sat Apr 24 22:09:13 2010
New Revision: 207165
URL: http://svn.freebsd.org/changeset/base/207165
Log:
o) Disable REUSE_MBUFS_WITHOUT_FREE rather than commenting out the implementation.
o) Basic transmit support. Enough to send a DHCP request and get a response and
go on a bit before panicking.
Modified:
user/jmallett/octeon/sys/mips/cavium/octe/cavium-ethernet.h
user/jmallett/octeon/sys/mips/cavium/octe/ethernet-common.c
user/jmallett/octeon/sys/mips/cavium/octe/ethernet-defines.h
user/jmallett/octeon/sys/mips/cavium/octe/ethernet-tx.c
user/jmallett/octeon/sys/mips/cavium/octe/ethernet.c
user/jmallett/octeon/sys/mips/cavium/octe/octe.c
Modified: user/jmallett/octeon/sys/mips/cavium/octe/cavium-ethernet.h
==============================================================================
--- user/jmallett/octeon/sys/mips/cavium/octe/cavium-ethernet.h Sat Apr 24 22:01:15 2010 (r207164)
+++ user/jmallett/octeon/sys/mips/cavium/octe/cavium-ethernet.h Sat Apr 24 22:09:13 2010 (r207165)
@@ -92,12 +92,10 @@ typedef struct {
int queue; /* PKO hardware queue for the port */
int fau; /* Hardware fetch and add to count outstanding tx buffers */
int imode; /* Type of port. This is one of the enums in cvmx_helper_interface_mode_t */
- struct mbuf *tx_free_list[16];/* List of outstanding tx buffers per queue */
/* Keeping intercept_cb close the the part of stats that is most often modified helps throughput. */
cvm_oct_callback_t intercept_cb; /* Optional intecept callback defined above */
#if 0
struct ifnet_stats stats; /* Device statistics */
- struct mii_if_info mii_info; /* Generic MII info structure */
#endif
uint64_t link_info; /* Last negotiated link state */
void (*poll)(struct ifnet *ifp); /* Called periodically to check link status */
@@ -117,6 +115,8 @@ typedef struct {
uint8_t mac[6];
int phy_id;
+ struct ifqueue tx_free_queue[16];
+
struct ifmedia media;
int if_flags;
} cvm_oct_private_t;
Modified: user/jmallett/octeon/sys/mips/cavium/octe/ethernet-common.c
==============================================================================
--- user/jmallett/octeon/sys/mips/cavium/octe/ethernet-common.c Sat Apr 24 22:01:15 2010 (r207164)
+++ user/jmallett/octeon/sys/mips/cavium/octe/ethernet-common.c Sat Apr 24 22:09:13 2010 (r207165)
@@ -253,11 +253,9 @@ int cvm_oct_common_init(struct ifnet *if
#if 0
if (priv->queue != -1) {
- ifp->hard_start_xmit = cvm_oct_xmit;
if (USE_HW_TCPUDP_CHECKSUM)
ifp->features |= NETIF_F_IP_CSUM;
- } else
- ifp->hard_start_xmit = cvm_oct_xmit_pow;
+ }
#endif
count++;
Modified: user/jmallett/octeon/sys/mips/cavium/octe/ethernet-defines.h
==============================================================================
--- user/jmallett/octeon/sys/mips/cavium/octe/ethernet-defines.h Sat Apr 24 22:01:15 2010 (r207164)
+++ user/jmallett/octeon/sys/mips/cavium/octe/ethernet-defines.h Sat Apr 24 22:09:13 2010 (r207165)
@@ -76,11 +76,15 @@ AND WITH ALL FAULTS AND CAVIUM NETWORKS
#else
#define USE_32BIT_SHARED 0
#define USE_MBUFS_IN_HW 1
+#if 0
#ifdef CONFIG_NETFILTER
#define REUSE_MBUFS_WITHOUT_FREE 0
#else
#define REUSE_MBUFS_WITHOUT_FREE 1
#endif
+#else
+ #define REUSE_MBUFS_WITHOUT_FREE 0
+#endif
#endif
#define INTERRUPT_LIMIT 10000 /* Max interrupts per second per core */
Modified: user/jmallett/octeon/sys/mips/cavium/octe/ethernet-tx.c
==============================================================================
--- user/jmallett/octeon/sys/mips/cavium/octe/ethernet-tx.c Sat Apr 24 22:01:15 2010 (r207164)
+++ user/jmallett/octeon/sys/mips/cavium/octe/ethernet-tx.c Sat Apr 24 22:09:13 2010 (r207165)
@@ -72,10 +72,8 @@ int cvm_oct_xmit(struct mbuf *m, struct
int32_t in_use;
int32_t buffers_to_free;
#if REUSE_MBUFS_WITHOUT_FREE
-#if 0
unsigned char *fpa_head;
#endif
-#endif
/* Prefetch the private data structure.
It is larger that one cache line */
@@ -115,7 +113,7 @@ int cvm_oct_xmit(struct mbuf *m, struct
68 bytes whenever we are in half duplex mode. We don't handle
the case of having a small packet but no room to add the padding.
The kernel should always give us at least a cache line */
- if ((m->m_pkthdr.len < 64) && OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
+ if (__predict_false(m->m_pkthdr.len < 64) && OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
cvmx_gmxx_prtx_cfg_t gmx_prt_cfg;
int interface = INTERFACE(priv->port);
int index = INDEX(priv->port);
@@ -124,27 +122,25 @@ int cvm_oct_xmit(struct mbuf *m, struct
/* We only need to pad packet in half duplex mode */
gmx_prt_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
if (gmx_prt_cfg.s.duplex == 0) {
- panic("%s: small packet padding not yet implemented.", __func__);
-#if 0
- int add_bytes = 64 - m->len;
- if ((m_tail_pointer(m) + add_bytes) <= m_end_pointer(m))
- memset(__m_put(m, add_bytes), 0, add_bytes);
-#endif
+ static uint8_t pad[64];
+
+ if (!m_append(m, sizeof pad - m->m_pkthdr.len, pad))
+ printf("%s: unable to padd small packet.", __func__);
}
}
}
/* Build the PKO buffer pointer */
- /*
- * XXX/juli
- * Implement mbuf loading.
- */
-#if 0
+ if (m->m_pkthdr.len != m->m_len) {
+ m = m_defrag(m, M_DONTWAIT);
+ if (m->m_pkthdr.len != m->m_len)
+ panic("%s: need to load multiple segments.", __func__);
+ }
+
hw_buffer.u64 = 0;
hw_buffer.s.addr = cvmx_ptr_to_phys(m->m_data);
hw_buffer.s.pool = 0;
- hw_buffer.s.size = (unsigned long)m_end_pointer(m) - (unsigned long)m->head;
-#endif
+ hw_buffer.s.size = m->m_len;
/* Build the PKO command */
pko_command.u64 = 0;
@@ -164,7 +160,6 @@ int cvm_oct_xmit(struct mbuf *m, struct
the define REUSE_MBUFS_WITHOUT_FREE. The reuse of buffers has
shown a 25% increase in performance under some loads */
#if REUSE_MBUFS_WITHOUT_FREE
-#if 0
fpa_head = m->head + 128 - ((unsigned long)m->head&0x7f);
if (__predict_false(m->data < fpa_head)) {
/*
@@ -248,9 +243,6 @@ int cvm_oct_xmit(struct mbuf *m, struct
#endif /* CONFIG_NET_SCHED */
dont_put_mbuf_in_hw:
-#else
-#endif
- panic("%s: not ready for REUSE_MBUFS_WITHOUT_FREE yet.", __func__);
#endif /* REUSE_MBUFS_WITHOUT_FREE */
/* Check if we can use the hardware checksumming */
@@ -285,19 +277,12 @@ dont_put_mbuf_in_hw:
cvmx_pko_send_packet_prepare(priv->port, priv->queue + qos, CVMX_PKO_LOCK_CMD_QUEUE);
/* Drop this packet if we have too many already queued to the HW */
-#if 0
- if ((m_queue_len(&priv->tx_free_list[qos]) >= MAX_OUT_QUEUE_DEPTH)) {
- /*
- DEBUGPRINT("%s: Tx dropped. Too many queued\n", if_name(ifp));
- */
+ if (_IF_QFULL(&priv->tx_free_queue[qos])) {
dropped = 1;
}
/* Send the packet to the output queue */
else
-#else
- panic("%s: free queues really not implemented.", __func__);
-#endif
- if ((cvmx_pko_send_packet_finish(priv->port, priv->queue + qos, pko_command, hw_buffer, CVMX_PKO_LOCK_CMD_QUEUE))) {
+ if (__predict_false(cvmx_pko_send_packet_finish(priv->port, priv->queue + qos, pko_command, hw_buffer, CVMX_PKO_LOCK_CMD_QUEUE))) {
DEBUGPRINT("%s: Failed to send the packet\n", if_name(ifp));
dropped = 1;
}
@@ -308,7 +293,7 @@ dont_put_mbuf_in_hw:
cvmx_scratch_write64(CVMX_SCR_SCRATCH+8, old_scratch2);
}
- if ((dropped)) {
+ if (__predict_false(dropped)) {
m_freem(m);
cvmx_fau_atomic_add32(priv->fau+qos*4, -1);
ifp->if_oerrors++;
@@ -316,30 +301,26 @@ dont_put_mbuf_in_hw:
if (USE_MBUFS_IN_HW) {
/* Put this packet on the queue to be freed later */
if (pko_command.s.dontfree)
- panic("%s: need to queue mbuf to free it later.", __func__);
+ IF_ENQUEUE(&priv->tx_free_queue[qos], m);
else {
cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, -1);
cvmx_fau_atomic_add32(priv->fau+qos*4, -1);
}
} else {
/* Put this packet on the queue to be freed later */
- panic("%s: need to queue mbuf to free it later. (2)", __func__);
+ IF_ENQUEUE(&priv->tx_free_queue[qos], m);
}
}
- /* Free mbufs not in use by the hardware, possibly two at a time */
- panic("%s: need to free queued mbufs.", __func__);
-#if 0
- if (m_queue_len(&priv->tx_free_list[qos]) > in_use) {
- spin_lock(&priv->tx_free_list[qos].lock);
- /* Check again now that we have the lock. It might have changed */
- if (m_queue_len(&priv->tx_free_list[qos]) > in_use)
- dev_kfree_m(__m_dequeue(&priv->tx_free_list[qos]));
- if (m_queue_len(&priv->tx_free_list[qos]) > in_use)
- dev_kfree_m(__m_dequeue(&priv->tx_free_list[qos]));
- spin_unlock(&priv->tx_free_list[qos].lock);
+ /* Free mbufs not in use by the hardware */
+ if (_IF_QLEN(&priv->tx_free_queue[qos]) > in_use) {
+ IF_LOCK(&priv->tx_free_queue[qos]);
+ while (_IF_QLEN(&priv->tx_free_queue[qos]) > in_use) {
+ _IF_DEQUEUE(&priv->tx_free_queue[qos], m);
+ m_freem(m);
+ }
+ IF_UNLOCK(&priv->tx_free_queue[qos]);
}
-#endif
return 0;
}
@@ -360,7 +341,7 @@ int cvm_oct_xmit_pow(struct mbuf *m, str
/* Get a work queue entry */
cvmx_wqe_t *work = cvmx_fpa_alloc(CVMX_FPA_WQE_POOL);
- if ((work == NULL)) {
+ if (__predict_false(work == NULL)) {
DEBUGPRINT("%s: Failed to allocate a work queue entry\n", if_name(ifp));
ifp->if_oerrors++;
m_freem(m);
@@ -369,7 +350,7 @@ int cvm_oct_xmit_pow(struct mbuf *m, str
/* Get a packet buffer */
packet_buffer = cvmx_fpa_alloc(CVMX_FPA_PACKET_POOL);
- if ((packet_buffer == NULL)) {
+ if (__predict_false(packet_buffer == NULL)) {
DEBUGPRINT("%s: Failed to allocate a packet buffer\n",
if_name(ifp));
cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, DONT_WRITEBACK(1));
@@ -389,11 +370,7 @@ int cvm_oct_xmit_pow(struct mbuf *m, str
/* We have to copy the packet since whoever processes this packet
will free it to a hardware pool. We can't use the trick of
counting outstanding packets like in cvm_oct_xmit */
-#if 0
- memcpy(copy_location, m->data, m->len);
-#else
- panic("%s: need to implement mbuf loading.", __func__);
-#endif
+ m_copydata(m, 0, m->m_pkthdr.len, copy_location);
/* Fill in some of the work queue fields. We may need to add more
if the software at the other end needs them */
@@ -414,6 +391,7 @@ int cvm_oct_xmit_pow(struct mbuf *m, str
work->packet_ptr.s.size = CVMX_FPA_PACKET_POOL_SIZE;
work->packet_ptr.s.back = (copy_location - packet_buffer)>>7;
+ panic("%s: POW transmit not quite implemented yet.", __func__);
#if 0
if (m->protocol == htons(ETH_P_IP)) {
work->word2.s.ip_offset = 14;
@@ -540,19 +518,19 @@ int cvm_oct_transmit_qos(struct ifnet *i
pko_command.s.total_bytes = work->len;
/* Check if we can use the hardware checksumming */
- if ((work->word2.s.not_IP || work->word2.s.IP_exc))
+ if (__predict_false(work->word2.s.not_IP || work->word2.s.IP_exc))
pko_command.s.ipoffp1 = 0;
else
pko_command.s.ipoffp1 = ETHER_HDR_LEN + 1;
/* Send the packet to the output queue */
- if ((cvmx_pko_send_packet_finish(priv->port, priv->queue + qos, pko_command, hw_buffer, CVMX_PKO_LOCK_CMD_QUEUE))) {
+ if (__predict_false(cvmx_pko_send_packet_finish(priv->port, priv->queue + qos, pko_command, hw_buffer, CVMX_PKO_LOCK_CMD_QUEUE))) {
DEBUGPRINT("%s: Failed to send the packet\n", if_name(ifp));
dropped = -1;
}
critical_exit();
- if ((dropped)) {
+ if (__predict_false(dropped)) {
if (do_free)
cvm_oct_free_work(work);
ifp->if_oerrors++;
@@ -571,18 +549,10 @@ int cvm_oct_transmit_qos(struct ifnet *i
*/
void cvm_oct_tx_shutdown(struct ifnet *ifp)
{
-#if 0
cvm_oct_private_t *priv = (cvm_oct_private_t *)ifp->if_softc;
- unsigned long flags;
int qos;
for (qos = 0; qos < 16; qos++) {
- spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
- while (m_queue_len(&priv->tx_free_list[qos]))
- dev_kfree_m_any(__m_dequeue(&priv->tx_free_list[qos]));
- spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags);
+ IF_DRAIN(&priv->tx_free_queue[qos]);
}
-#else
- panic("%s: not yet implemented.", __func__);
-#endif
}
Modified: user/jmallett/octeon/sys/mips/cavium/octe/ethernet.c
==============================================================================
--- user/jmallett/octeon/sys/mips/cavium/octe/ethernet.c Sat Apr 24 22:01:15 2010 (r207164)
+++ user/jmallett/octeon/sys/mips/cavium/octe/ethernet.c Sat Apr 24 22:09:13 2010 (r207165)
@@ -396,12 +396,7 @@ int cvm_oct_init_module(device_t bus)
priv->port = CVMX_PIP_NUM_INPUT_PORTS;
priv->queue = -1;
- if_initname(ifp, "pow", 0);
device_set_desc(dev, "Cavium Octeon POW Ethernet\n");
-#if 0
- for (qos = 0; qos < 16; qos++)
- m_queue_head_init(&priv->tx_free_list[qos]);
-#endif
ifp->if_softc = priv;
@@ -452,10 +447,6 @@ int cvm_oct_init_module(device_t bus)
priv->queue = cvmx_pko_get_base_queue(priv->port);
priv->intercept_cb = NULL;
priv->fau = fau - cvmx_pko_get_num_queues(port) * 4;
-#if 0
- for (qos = 0; qos < 16; qos++)
- m_queue_head_init(&priv->tx_free_list[qos]);
-#endif
for (qos = 0; qos < cvmx_pko_get_num_queues(port); qos++)
cvmx_fau_atomic_write32(priv->fau+qos*4, 0);
Modified: user/jmallett/octeon/sys/mips/cavium/octe/octe.c
==============================================================================
--- user/jmallett/octeon/sys/mips/cavium/octe/octe.c Sat Apr 24 22:01:15 2010 (r207164)
+++ user/jmallett/octeon/sys/mips/cavium/octe/octe.c Sat Apr 24 22:09:13 2010 (r207165)
@@ -48,6 +48,7 @@
#include <sys/sockio.h>
#include <sys/sysctl.h>
+#include <net/bpf.h>
#include <net/ethernet.h>
#include <net/if.h>
#include <net/if_media.h>
@@ -60,7 +61,9 @@
#include "cavium-ethernet.h"
#include "ethernet-common.h"
+#include "ethernet-defines.h"
#include "ethernet-mdio.h"
+#include "ethernet-tx.h"
#include "miibus_if.h"
@@ -74,6 +77,7 @@ static int octe_miibus_writereg(device_
static void octe_init(void *);
static void octe_stop(void *);
+static void octe_start(struct ifnet *);
static int octe_mii_medchange(struct ifnet *);
static void octe_mii_medstat(struct ifnet *, struct ifmediareq *);
@@ -129,6 +133,7 @@ octe_attach(device_t dev)
{
struct ifnet *ifp;
cvm_oct_private_t *priv;
+ unsigned qos;
int error;
priv = device_get_softc(dev);
@@ -153,11 +158,21 @@ octe_attach(device_t dev)
ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
ifp->if_init = octe_init;
ifp->if_ioctl = octe_ioctl;
+ ifp->if_start = octe_start;
priv->if_flags = ifp->if_flags;
+ for (qos = 0; qos < 16; qos++) {
+ mtx_init(&priv->tx_free_queue[qos].ifq_mtx, ifp->if_xname, "octe tx free queue", MTX_DEF);
+ IFQ_SET_MAXLEN(&priv->tx_free_queue[qos], MAX_OUT_QUEUE_DEPTH);
+ }
+
ether_ifattach(ifp, priv->mac);
+ IFQ_SET_MAXLEN(&ifp->if_snd, 16);
+ ifp->if_snd.ifq_drv_maxlen = 16; /* XXX */
+ IFQ_SET_READY(&ifp->if_snd);
+
return (0);
}
@@ -211,6 +226,9 @@ octe_init(void *arg)
if (priv->miibus != NULL)
mii_mediachg(device_get_softc(priv->miibus));
+
+ ifp->if_drv_flags |= IFF_DRV_RUNNING;
+ ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
}
static void
@@ -224,6 +242,46 @@ octe_stop(void *arg)
if (priv->stop != NULL)
priv->stop(ifp);
+
+ ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
+}
+
+static void
+octe_start(struct ifnet *ifp)
+{
+ cvm_oct_private_t *priv;
+ struct mbuf *m;
+ int error;
+
+ priv = ifp->if_softc;
+
+ if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING)
+ return;
+
+ while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
+ IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
+
+ if (priv->queue != -1) {
+ error = cvm_oct_xmit(m, ifp);
+ } else {
+ error = cvm_oct_xmit_pow(m, ifp);
+ }
+
+ if (error != 0) {
+ /*
+ * XXX
+ * Need to implement freeing and clearing of
+ * OACTIVE at some point.
+ *
+ * XXX
+ * Incremenet errors? Maybe make xmit functions
+ * not free the packets?
+ */
+ ifp->if_drv_flags |= IFF_DRV_OACTIVE;
+ }
+
+ BPF_MTAP(ifp, m);
+ }
}
static int
@@ -283,7 +341,7 @@ octe_medstat(struct ifnet *ifp, struct i
link_info.u64 = priv->link_info;
- if (!link_info.s.link_up)
+ if (!link_info.s.link_up)
return;
ifm->ifm_status |= IFM_ACTIVE;
More information about the svn-src-user
mailing list