svn commit: r345587 - in stable/12: contrib/ofed/libcxgb4 sys/dev/cxgbe sys/dev/cxgbe/iw_cxgbe
Navdeep Parhar
np at FreeBSD.org
Wed Mar 27 18:46:51 UTC 2019
Author: np
Date: Wed Mar 27 18:46:47 2019
New Revision: 345587
URL: https://svnweb.freebsd.org/changeset/base/345587
Log:
MFC some iw_cxgbe fixes (r339667, r339891, r340063, r342266, r342270,
r342272, r342288-r342289, r343569, r344617, and r345307).
r339667:
cxgbe/iw_cxgbe: save the ep in the driver-private provider_data field.
Submitted By: Lily Wang @ Netapp
r339891:
cxgbe/iw_cxgbe: Install the socket upcall before calling soconnect to
ensure that it always runs when soisconnected does.
Submitted by: Krishnamraju Eraparaju @ Chelsio
Sponsored by: Chelsio Communications
r340063:
cxgbe/iw_cxgbe: Suppress spurious "Unexpected streaming data ..."
messages.
Submitted by: Krishnamraju Eraparaju @ Chelsio
Sponsored by: Chelsio Communications
r342266:
cxgbe/iw_cxgbe: Use DSGLs to write to card's memory when appropriate.
Submitted by: Krishnamraju Eraparaju @ Chelsio
Sponsored by: Chelsio Communications
r342270:
cxgbe/iw_cxgbe: Add a knob for testing that lets iWARP connections cycle
through 4-tuples quickly.
Submitted by: Krishnamraju Eraparaju @ Chelsio
Sponsored by: Chelsio Communications
r342272:
cxgbe/iw_cxgbe: Use -ve errno when interfacing with linuxkpi/OFED.
Submitted by: Krishnamraju Eraparaju @ Chelsio
Sponsored by: Chelsio Communications
r342288:
cxgbe/iw_cxgbe: Do not terminate CTRx messages with \n.
r342289:
cxgbe/iw_cxgbe: Remove redundant CTRs from c4iw_alloc/c4iw_rdev_open.
This information is readily available elsewhere.
Sponsored by: Chelsio Communications
r343569:
cxgbe/iw_cxgbe: Fix an address calculation in the memory registration code that
was added in r342266.
Submitted by: Krishnamraju Eraparaju @ Chelsio
Sponsored by: Chelsio Communications
r344617:
libcxgb4: Don't spam stderr. Write combining is not enabled by default
by the FreeBSD driver.
r345307:
iw_cxgbe: Remove unused smac_idx from the ep structure.
Submitted by: Krishnamraju Eraparaju @ Chelsio
Modified:
stable/12/contrib/ofed/libcxgb4/dev.c
stable/12/sys/dev/cxgbe/iw_cxgbe/cm.c
stable/12/sys/dev/cxgbe/iw_cxgbe/cq.c
stable/12/sys/dev/cxgbe/iw_cxgbe/device.c
stable/12/sys/dev/cxgbe/iw_cxgbe/iw_cxgbe.h
stable/12/sys/dev/cxgbe/iw_cxgbe/mem.c
stable/12/sys/dev/cxgbe/iw_cxgbe/provider.c
stable/12/sys/dev/cxgbe/iw_cxgbe/qp.c
stable/12/sys/dev/cxgbe/iw_cxgbe/t4.h
stable/12/sys/dev/cxgbe/t4_main.c
Directory Properties:
stable/12/ (props changed)
Modified: stable/12/contrib/ofed/libcxgb4/dev.c
==============================================================================
--- stable/12/contrib/ofed/libcxgb4/dev.c Wed Mar 27 18:22:08 2019 (r345586)
+++ stable/12/contrib/ofed/libcxgb4/dev.c Wed Mar 27 18:46:47 2019 (r345587)
@@ -202,8 +202,6 @@ static struct ibv_context *c4iw_alloc_context(struct i
* loader tunable "hw.cxl.write_combine=0"
*/
if (t5_en_wc && !context->status_page->wc_supported) {
- fprintf(stderr, "iw_cxgb4 driver doesn't support Write "
- "Combine, so regular DB writes will be used\n");
t5_en_wc = 0;
}
}
Modified: stable/12/sys/dev/cxgbe/iw_cxgbe/cm.c
==============================================================================
--- stable/12/sys/dev/cxgbe/iw_cxgbe/cm.c Wed Mar 27 18:22:08 2019 (r345586)
+++ stable/12/sys/dev/cxgbe/iw_cxgbe/cm.c Wed Mar 27 18:46:47 2019 (r345587)
@@ -174,7 +174,6 @@ static void process_newconn(struct c4iw_listen_ep *mas
free(__a, M_SONAME); \
} while (0)
-#ifdef KTR
static char *states[] = {
"idle",
"listen",
@@ -190,7 +189,6 @@ static char *states[] = {
"dead",
NULL,
};
-#endif
static void deref_cm_id(struct c4iw_ep_common *epc)
{
@@ -431,7 +429,7 @@ static void process_timeout(struct c4iw_ep *ep)
abort = 0;
break;
default:
- CTR4(KTR_IW_CXGBE, "%s unexpected state ep %p tid %u state %u\n"
+ CTR4(KTR_IW_CXGBE, "%s unexpected state ep %p tid %u state %u"
, __func__, ep, ep->hwtid, ep->com.state);
abort = 0;
}
@@ -843,7 +841,7 @@ setiwsockopt(struct socket *so)
sopt.sopt_val = (caddr_t)&on;
sopt.sopt_valsize = sizeof on;
sopt.sopt_td = NULL;
- rc = sosetopt(so, &sopt);
+ rc = -sosetopt(so, &sopt);
if (rc) {
log(LOG_ERR, "%s: can't set TCP_NODELAY on so %p (%d)\n",
__func__, so, rc);
@@ -883,7 +881,9 @@ uninit_iwarp_socket(struct socket *so)
static void
process_data(struct c4iw_ep *ep)
{
+ int ret = 0;
int disconnect = 0;
+ struct c4iw_qp_attributes attrs = {0};
CTR5(KTR_IW_CXGBE, "%s: so %p, ep %p, state %s, sbused %d", __func__,
ep->com.so, ep, states[ep->com.state], sbused(&ep->com.so->so_rcv));
@@ -898,9 +898,16 @@ process_data(struct c4iw_ep *ep)
/* Refered in process_newconn() */
c4iw_put_ep(&ep->parent_ep->com);
break;
+ case FPDU_MODE:
+ MPASS(ep->com.qp != NULL);
+ attrs.next_state = C4IW_QP_STATE_TERMINATE;
+ ret = c4iw_modify_qp(ep->com.dev, ep->com.qp,
+ C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
+ if (ret != -EINPROGRESS)
+ disconnect = 1;
+ break;
default:
- if (sbused(&ep->com.so->so_rcv))
- log(LOG_ERR, "%s: Unexpected streaming data. ep %p, "
+ log(LOG_ERR, "%s: Unexpected streaming data. ep %p, "
"state %d, so %p, so_state 0x%x, sbused %u\n",
__func__, ep, ep->com.state, ep->com.so,
ep->com.so->so_state, sbused(&ep->com.so->so_rcv));
@@ -1013,7 +1020,7 @@ process_newconn(struct c4iw_listen_ep *master_lep, str
ret = soaccept(new_so, (struct sockaddr **)&remote);
if (ret != 0) {
CTR4(KTR_IW_CXGBE,
- "%s:listen sock:%p, new sock:%p, ret:%d\n",
+ "%s:listen sock:%p, new sock:%p, ret:%d",
__func__, master_lep->com.so, new_so, ret);
if (remote != NULL)
free(remote, M_SONAME);
@@ -1180,7 +1187,24 @@ process_socket_event(struct c4iw_ep *ep)
}
/* rx data */
- process_data(ep);
+ if (sbused(&ep->com.so->so_rcv)) {
+ process_data(ep);
+ return;
+ }
+
+ /* Socket events for 'MPA Request Received' and 'Close Complete'
+ * were already processed earlier in their previous events handlers.
+ * Hence, these socket events are skipped.
+ * And any other socket events must have handled above.
+ */
+ MPASS((ep->com.state == MPA_REQ_RCVD) || (ep->com.state == MORIBUND));
+
+ if ((ep->com.state != MPA_REQ_RCVD) && (ep->com.state != MORIBUND))
+ log(LOG_ERR, "%s: Unprocessed socket event so %p, "
+ "so_state 0x%x, so_err %d, sb_state 0x%x, ep %p, ep_state %s\n",
+ __func__, so, so->so_state, so->so_error, so->so_rcv.sb_state,
+ ep, states[state]);
+
}
SYSCTL_NODE(_hw, OID_AUTO, iw_cxgbe, CTLFLAG_RD, 0, "iw_cxgbe driver parameters");
@@ -1241,6 +1265,18 @@ static int snd_win = 128 * 1024;
SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, snd_win, CTLFLAG_RWTUN, &snd_win, 0,
"TCP send window in bytes (default = 128KB)");
+int use_dsgl = 1;
+SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, use_dsgl, CTLFLAG_RWTUN, &use_dsgl, 0,
+ "Use DSGL for PBL/FastReg (default=1)");
+
+int inline_threshold = 128;
+SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, inline_threshold, CTLFLAG_RWTUN, &inline_threshold, 0,
+ "inline vs dsgl threshold (default=128)");
+
+static int reuseaddr = 0;
+SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, reuseaddr, CTLFLAG_RWTUN, &reuseaddr, 0,
+ "Enable SO_REUSEADDR & SO_REUSEPORT socket options on all iWARP client connections(default = 0)");
+
static void
start_ep_timer(struct c4iw_ep *ep)
{
@@ -1615,7 +1651,7 @@ send_abort(struct c4iw_ep *ep)
sopt.sopt_val = (caddr_t)&l;
sopt.sopt_valsize = sizeof l;
sopt.sopt_td = NULL;
- rc = sosetopt(so, &sopt);
+ rc = -sosetopt(so, &sopt);
if (rc != 0) {
log(LOG_ERR, "%s: sosetopt(%p, linger = 0) failed with %d.\n",
__func__, so, rc);
@@ -1633,6 +1669,7 @@ send_abort(struct c4iw_ep *ep)
* handler(not yet implemented) of iw_cxgbe driver.
*/
release_ep_resources(ep);
+ ep->com.state = DEAD;
return (0);
}
@@ -2272,7 +2309,7 @@ process_mpa_request(struct c4iw_ep *ep)
MPA_V2_IRD_ORD_MASK;
ep->ord = min_t(u32, ep->ord,
cur_max_read_depth(ep->com.dev));
- CTR3(KTR_IW_CXGBE, "%s initiator ird %u ord %u\n",
+ CTR3(KTR_IW_CXGBE, "%s initiator ird %u ord %u",
__func__, ep->ird, ep->ord);
if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL)
if (peer2peer) {
@@ -2426,7 +2463,7 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_c
ep->ird = 1;
}
- CTR4(KTR_IW_CXGBE, "%s %d ird %d ord %d\n", __func__, __LINE__,
+ CTR4(KTR_IW_CXGBE, "%s %d ird %d ord %d", __func__, __LINE__,
ep->ird, ep->ord);
ep->com.cm_id = cm_id;
@@ -2485,8 +2522,9 @@ static int
c4iw_sock_create(struct sockaddr_storage *laddr, struct socket **so)
{
int ret;
- int size;
+ int size, on;
struct socket *sock = NULL;
+ struct sockopt sopt;
ret = sock_create_kern(laddr->ss_family,
SOCK_STREAM, IPPROTO_TCP, &sock);
@@ -2496,7 +2534,34 @@ c4iw_sock_create(struct sockaddr_storage *laddr, struc
return ret;
}
- ret = sobind(sock, (struct sockaddr *)laddr, curthread);
+ if (reuseaddr) {
+ bzero(&sopt, sizeof(struct sockopt));
+ sopt.sopt_dir = SOPT_SET;
+ sopt.sopt_level = SOL_SOCKET;
+ sopt.sopt_name = SO_REUSEADDR;
+ on = 1;
+ sopt.sopt_val = &on;
+ sopt.sopt_valsize = sizeof(on);
+ ret = -sosetopt(sock, &sopt);
+ if (ret != 0) {
+ log(LOG_ERR, "%s: sosetopt(%p, SO_REUSEADDR) "
+ "failed with %d.\n", __func__, sock, ret);
+ }
+ bzero(&sopt, sizeof(struct sockopt));
+ sopt.sopt_dir = SOPT_SET;
+ sopt.sopt_level = SOL_SOCKET;
+ sopt.sopt_name = SO_REUSEPORT;
+ on = 1;
+ sopt.sopt_val = &on;
+ sopt.sopt_valsize = sizeof(on);
+ ret = -sosetopt(sock, &sopt);
+ if (ret != 0) {
+ log(LOG_ERR, "%s: sosetopt(%p, SO_REUSEPORT) "
+ "failed with %d.\n", __func__, sock, ret);
+ }
+ }
+
+ ret = -sobind(sock, (struct sockaddr *)laddr, curthread);
if (ret) {
CTR2(KTR_IW_CXGBE, "%s:Failed to bind socket. err %p",
__func__, ret);
@@ -2540,6 +2605,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_
goto out;
}
ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
+ cm_id->provider_data = ep;
init_timer(&ep->timer);
ep->plen = conn_param->private_data_len;
@@ -2600,22 +2666,24 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_
goto fail;
setiwsockopt(ep->com.so);
+ init_iwarp_socket(ep->com.so, &ep->com);
err = -soconnect(ep->com.so, (struct sockaddr *)&ep->com.remote_addr,
ep->com.thread);
- if (!err) {
- init_iwarp_socket(ep->com.so, &ep->com);
- goto out;
- } else
+ if (err)
goto fail_free_so;
+ CTR2(KTR_IW_CXGBE, "%s:ccE, ep %p", __func__, ep);
+ return 0;
fail_free_so:
+ uninit_iwarp_socket(ep->com.so);
+ ep->com.state = DEAD;
sock_release(ep->com.so);
fail:
deref_cm_id(&ep->com);
c4iw_put_ep(&ep->com);
ep = NULL;
out:
- CTR2(KTR_IW_CXGBE, "%s:ccE ret:%d", __func__, err);
+ CTR2(KTR_IW_CXGBE, "%s:ccE Error %d", __func__, err);
return err;
}
@@ -2677,7 +2745,7 @@ c4iw_create_listen(struct iw_cm_id *cm_id, int backlog
goto fail;
}
- rc = solisten(lep->com.so, backlog, curthread);
+ rc = -solisten(lep->com.so, backlog, curthread);
if (rc) {
CTR3(KTR_IW_CXGBE, "%s:Failed to listen on sock:%p. err %d",
__func__, lep->com.so, rc);
Modified: stable/12/sys/dev/cxgbe/iw_cxgbe/cq.c
==============================================================================
--- stable/12/sys/dev/cxgbe/iw_cxgbe/cq.c Wed Mar 27 18:22:08 2019 (r345586)
+++ stable/12/sys/dev/cxgbe/iw_cxgbe/cq.c Wed Mar 27 18:46:47 2019 (r345587)
@@ -671,7 +671,7 @@ proc_cqe:
BUG_ON(wq->sq.in_use <= 0 && wq->sq.in_use >= wq->sq.size);
wq->sq.cidx = (uint16_t)idx;
- CTR2(KTR_IW_CXGBE, "%s completing sq idx %u\n",
+ CTR2(KTR_IW_CXGBE, "%s completing sq idx %u",
__func__, wq->sq.cidx);
*cookie = wq->sq.sw_sq[wq->sq.cidx].wr_id;
t4_sq_consume(wq);
Modified: stable/12/sys/dev/cxgbe/iw_cxgbe/device.c
==============================================================================
--- stable/12/sys/dev/cxgbe/iw_cxgbe/device.c Wed Mar 27 18:22:08 2019 (r345586)
+++ stable/12/sys/dev/cxgbe/iw_cxgbe/device.c Wed Mar 27 18:46:47 2019 (r345587)
@@ -122,24 +122,7 @@ c4iw_rdev_open(struct c4iw_rdev *rdev)
rdev->qpmask = udb_density - 1;
rdev->cqshift = PAGE_SHIFT - sp->iq_s_qpp;
rdev->cqmask = ucq_density - 1;
- CTR5(KTR_IW_CXGBE, "%s dev %s stag start 0x%0x size 0x%0x num stags %d",
- __func__, device_get_nameunit(sc->dev), sc->vres.stag.start,
- sc->vres.stag.size, c4iw_num_stags(rdev));
- CTR5(KTR_IW_CXGBE, "%s pbl start 0x%0x size 0x%0x"
- " rq start 0x%0x size 0x%0x", __func__,
- sc->vres.pbl.start, sc->vres.pbl.size,
- sc->vres.rq.start, sc->vres.rq.size);
- CTR5(KTR_IW_CXGBE, "%s:qp qid start %u size %u cq qid start %u size %u",
- __func__, sc->vres.qp.start, sc->vres.qp.size,
- sc->vres.cq.start, sc->vres.cq.size);
- /*TODO
- CTR5(KTR_IW_CXGBE, "%s udb %pR db_reg %p gts_reg %p"
- "qpmask 0x%x cqmask 0x%x", __func__,
- db_reg,gts_reg,rdev->qpmask, rdev->cqmask);
- */
-
-
if (c4iw_num_stags(rdev) == 0) {
rc = -EINVAL;
goto err1;
@@ -235,11 +218,6 @@ c4iw_alloc(struct adapter *sc)
iwsc->rdev.adap = sc;
/* init various hw-queue params based on lld info */
- CTR3(KTR_IW_CXGBE, "%s: Ing. padding boundary is %d, "
- "egrsstatuspagesize = %d", __func__,
- sc->params.sge.pad_boundary,
- sc->params.sge.spg_len);
-
iwsc->rdev.hw_queue.t4_eq_status_entries =
sc->params.sge.spg_len / EQ_ESIZE;
iwsc->rdev.hw_queue.t4_max_eq_size = 65520;
Modified: stable/12/sys/dev/cxgbe/iw_cxgbe/iw_cxgbe.h
==============================================================================
--- stable/12/sys/dev/cxgbe/iw_cxgbe/iw_cxgbe.h Wed Mar 27 18:22:08 2019 (r345586)
+++ stable/12/sys/dev/cxgbe/iw_cxgbe/iw_cxgbe.h Wed Mar 27 18:46:47 2019 (r345587)
@@ -70,6 +70,9 @@
#define KTR_IW_CXGBE KTR_SPARE3
extern int c4iw_debug;
+extern int use_dsgl;
+extern int inline_threshold;
+
#define PDBG(fmt, args...) \
do { \
if (c4iw_debug) \
@@ -856,7 +859,6 @@ struct c4iw_ep {
unsigned int mpa_pkt_len;
u32 ird;
u32 ord;
- u32 smac_idx;
u32 tx_chan;
u32 mtu;
u16 mss;
Modified: stable/12/sys/dev/cxgbe/iw_cxgbe/mem.c
==============================================================================
--- stable/12/sys/dev/cxgbe/iw_cxgbe/mem.c Wed Mar 27 18:22:08 2019 (r345586)
+++ stable/12/sys/dev/cxgbe/iw_cxgbe/mem.c Wed Mar 27 18:46:47 2019 (r345587)
@@ -45,9 +45,9 @@ __FBSDID("$FreeBSD$");
#include <common/t4_msg.h>
#include "iw_cxgbe.h"
-int use_dsgl = 1;
#define T4_ULPTX_MIN_IO 32
#define C4IW_MAX_INLINE_SIZE 96
+#define T4_ULPTX_MAX_DMA 1024
static int
mr_exceeds_hw_limits(struct c4iw_dev *dev, u64 length)
@@ -57,10 +57,60 @@ mr_exceeds_hw_limits(struct c4iw_dev *dev, u64 length)
}
static int
-write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data)
+_c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr, u32 len,
+ void *data, int wait)
{
struct adapter *sc = rdev->adap;
struct ulp_mem_io *ulpmc;
+ struct ulptx_sgl *sgl;
+ u8 wr_len;
+ int ret = 0;
+ struct c4iw_wr_wait wr_wait;
+ struct wrqe *wr;
+
+ addr &= 0x7FFFFFF;
+
+ if (wait)
+ c4iw_init_wr_wait(&wr_wait);
+ wr_len = roundup(sizeof *ulpmc + sizeof *sgl, 16);
+
+ wr = alloc_wrqe(wr_len, &sc->sge.ctrlq[0]);
+ if (wr == NULL)
+ return -ENOMEM;
+ ulpmc = wrtod(wr);
+
+ memset(ulpmc, 0, wr_len);
+ INIT_ULPTX_WR(ulpmc, wr_len, 0, 0);
+ ulpmc->wr.wr_hi = cpu_to_be32(V_FW_WR_OP(FW_ULPTX_WR) |
+ (wait ? F_FW_WR_COMPL : 0));
+ ulpmc->wr.wr_lo = wait ? (u64)(unsigned long)&wr_wait : 0;
+ ulpmc->wr.wr_mid = cpu_to_be32(V_FW_WR_LEN16(DIV_ROUND_UP(wr_len, 16)));
+ ulpmc->cmd = cpu_to_be32(V_ULPTX_CMD(ULP_TX_MEM_WRITE) |
+ V_T5_ULP_MEMIO_ORDER(1) |
+ V_T5_ULP_MEMIO_FID(sc->sge.ofld_rxq[0].iq.abs_id));
+ ulpmc->dlen = cpu_to_be32(V_ULP_MEMIO_DATA_LEN(len>>5));
+ ulpmc->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(ulpmc->wr), 16));
+ ulpmc->lock_addr = cpu_to_be32(V_ULP_MEMIO_ADDR(addr));
+
+ sgl = (struct ulptx_sgl *)(ulpmc + 1);
+ sgl->cmd_nsge = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_DSGL) |
+ V_ULPTX_NSGE(1));
+ sgl->len0 = cpu_to_be32(len);
+ sgl->addr0 = cpu_to_be64((u64)data);
+
+ t4_wrq_tx(sc, wr);
+
+ if (wait)
+ ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, NULL, __func__);
+ return ret;
+}
+
+
+static int
+_c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data)
+{
+ struct adapter *sc = rdev->adap;
+ struct ulp_mem_io *ulpmc;
struct ulptx_idata *ulpsc;
u8 wr_len, *to_dp, *from_dp;
int copy_len, num_wqe, i, ret = 0;
@@ -84,7 +134,7 @@ write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u3
wr = alloc_wrqe(wr_len, &sc->sge.ctrlq[0]);
if (wr == NULL)
- return (0);
+ return -ENOMEM;
ulpmc = wrtod(wr);
memset(ulpmc, 0, wr_len);
@@ -93,7 +143,8 @@ write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u3
if (i == (num_wqe-1)) {
ulpmc->wr.wr_hi = cpu_to_be32(V_FW_WR_OP(FW_ULPTX_WR) |
F_FW_WR_COMPL);
- ulpmc->wr.wr_lo = (__force __be64)(unsigned long) &wr_wait;
+ ulpmc->wr.wr_lo =
+ (__force __be64)(unsigned long) &wr_wait;
} else
ulpmc->wr.wr_hi = cpu_to_be32(V_FW_WR_OP(FW_ULPTX_WR));
ulpmc->wr.wr_mid = cpu_to_be32(
@@ -126,6 +177,69 @@ write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u3
ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, NULL, __func__);
return ret;
}
+
+static int
+_c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data)
+{
+ struct c4iw_dev *rhp = rdev_to_c4iw_dev(rdev);
+ u32 remain = len;
+ u32 dmalen;
+ int ret = 0;
+ dma_addr_t daddr;
+ dma_addr_t save;
+
+ daddr = dma_map_single(rhp->ibdev.dma_device, data, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(rhp->ibdev.dma_device, daddr))
+ return -1;
+ save = daddr;
+
+ while (remain > inline_threshold) {
+ if (remain < T4_ULPTX_MAX_DMA) {
+ if (remain & ~T4_ULPTX_MIN_IO)
+ dmalen = remain & ~(T4_ULPTX_MIN_IO-1);
+ else
+ dmalen = remain;
+ } else
+ dmalen = T4_ULPTX_MAX_DMA;
+ remain -= dmalen;
+ ret = _c4iw_write_mem_dma_aligned(rdev, addr, dmalen,
+ (void *)daddr, !remain);
+ if (ret)
+ goto out;
+ addr += dmalen >> 5;
+ data = (u8 *)data + dmalen;
+ daddr = daddr + dmalen;
+ }
+ if (remain)
+ ret = _c4iw_write_mem_inline(rdev, addr, remain, data);
+out:
+ dma_unmap_single(rhp->ibdev.dma_device, save, len, DMA_TO_DEVICE);
+ return ret;
+}
+
+/*
+ * write len bytes of data into addr (32B aligned address)
+ * If data is NULL, clear len byte of memory to zero.
+ */
+static int
+write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len,
+ void *data)
+{
+ if (rdev->adap->params.ulptx_memwrite_dsgl && use_dsgl) {
+ if (len > inline_threshold) {
+ if (_c4iw_write_mem_dma(rdev, addr, len, data)) {
+ log(LOG_ERR, "%s: dma map "
+ "failure (non fatal)\n", __func__);
+ return _c4iw_write_mem_inline(rdev, addr, len,
+ data);
+ } else
+ return 0;
+ } else
+ return _c4iw_write_mem_inline(rdev, addr, len, data);
+ } else
+ return _c4iw_write_mem_inline(rdev, addr, len, data);
+}
+
/*
* Build and write a TPT entry.
Modified: stable/12/sys/dev/cxgbe/iw_cxgbe/provider.c
==============================================================================
--- stable/12/sys/dev/cxgbe/iw_cxgbe/provider.c Wed Mar 27 18:22:08 2019 (r345586)
+++ stable/12/sys/dev/cxgbe/iw_cxgbe/provider.c Wed Mar 27 18:46:47 2019 (r345587)
@@ -46,7 +46,7 @@ __FBSDID("$FreeBSD$");
#include "iw_cxgbe.h"
#include "user.h"
-extern int use_dsgl;
+
static int fastreg_support = 1;
module_param(fastreg_support, int, 0644);
MODULE_PARM_DESC(fastreg_support, "Advertise fastreg support (default = 1)");
Modified: stable/12/sys/dev/cxgbe/iw_cxgbe/qp.c
==============================================================================
--- stable/12/sys/dev/cxgbe/iw_cxgbe/qp.c Wed Mar 27 18:22:08 2019 (r345586)
+++ stable/12/sys/dev/cxgbe/iw_cxgbe/qp.c Wed Mar 27 18:46:47 2019 (r345587)
@@ -65,7 +65,7 @@ struct cpl_set_tcb_rpl;
#include "iw_cxgbe.h"
#include "user.h"
-extern int use_dsgl;
+
static int creds(struct toepcb *toep, struct inpcb *inp, size_t wrsize);
static int max_fr_immd = T4_MAX_FR_IMMD;//SYSCTL parameter later...
@@ -576,7 +576,7 @@ static void free_qp_work(struct work_struct *work)
ucontext = qhp->ucontext;
rhp = qhp->rhp;
- CTR3(KTR_IW_CXGBE, "%s qhp %p ucontext %p\n", __func__,
+ CTR3(KTR_IW_CXGBE, "%s qhp %p ucontext %p", __func__,
qhp, ucontext);
destroy_qp(&rhp->rdev, &qhp->wq,
ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
@@ -1475,6 +1475,22 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_q
if (qhp->attr.state == attrs->next_state)
goto out;
+ /* Return EINPROGRESS if QP is already in transition state.
+ * Eg: CLOSING->IDLE transition or *->ERROR transition.
+ * This can happen while connection is switching(due to rdma_fini)
+ * from iWARP/RDDP to TOE mode and any inflight RDMA RX data will
+ * reach TOE driver -> TCP stack -> iWARP driver. In this way
+ * iWARP driver keep receiving inflight RDMA RX data until socket
+ * is closed or aborted. And if iWARP CM is in FPDU sate, then
+ * it tries to put QP in TERM state and disconnects endpoint.
+ * But as QP is already in transition state, this event is ignored.
+ */
+ if ((qhp->attr.state >= C4IW_QP_STATE_ERROR) &&
+ (attrs->next_state == C4IW_QP_STATE_TERMINATE)) {
+ ret = -EINPROGRESS;
+ goto out;
+ }
+
switch (qhp->attr.state) {
case C4IW_QP_STATE_IDLE:
switch (attrs->next_state) {
@@ -1862,10 +1878,10 @@ c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_att
qhp->ibqp.qp_num = qhp->wq.sq.qid;
init_timer(&(qhp->timer));
- CTR5(KTR_IW_CXGBE, "%s sq id %u size %u memsize %zu num_entries %u\n",
+ CTR5(KTR_IW_CXGBE, "%s sq id %u size %u memsize %zu num_entries %u",
__func__, qhp->wq.sq.qid,
qhp->wq.sq.size, qhp->wq.sq.memsize, attrs->cap.max_send_wr);
- CTR5(KTR_IW_CXGBE, "%s rq id %u size %u memsize %zu num_entries %u\n",
+ CTR5(KTR_IW_CXGBE, "%s rq id %u size %u memsize %zu num_entries %u",
__func__, qhp->wq.rq.qid,
qhp->wq.rq.size, qhp->wq.rq.memsize, attrs->cap.max_recv_wr);
return &qhp->ibqp;
Modified: stable/12/sys/dev/cxgbe/iw_cxgbe/t4.h
==============================================================================
--- stable/12/sys/dev/cxgbe/iw_cxgbe/t4.h Wed Mar 27 18:22:08 2019 (r345586)
+++ stable/12/sys/dev/cxgbe/iw_cxgbe/t4.h Wed Mar 27 18:46:47 2019 (r345587)
@@ -490,13 +490,13 @@ t4_ring_sq_db(struct t4_wq *wq, u16 inc, union t4_wr *
/* Flush host queue memory writes. */
wmb();
if (wc && inc == 1 && wq->sq.bar2_qid == 0 && wqe) {
- CTR2(KTR_IW_CXGBE, "%s: WC wq->sq.pidx = %d\n",
+ CTR2(KTR_IW_CXGBE, "%s: WC wq->sq.pidx = %d",
__func__, wq->sq.pidx);
pio_copy((u64 __iomem *)
((u64)wq->sq.bar2_va + SGE_UDB_WCDOORBELL),
(u64 *)wqe);
} else {
- CTR2(KTR_IW_CXGBE, "%s: DB wq->sq.pidx = %d\n",
+ CTR2(KTR_IW_CXGBE, "%s: DB wq->sq.pidx = %d",
__func__, wq->sq.pidx);
writel(V_PIDX_T5(inc) | V_QID(wq->sq.bar2_qid),
(void __iomem *)((u64)wq->sq.bar2_va +
@@ -515,12 +515,12 @@ t4_ring_rq_db(struct t4_wq *wq, u16 inc, union t4_recv
/* Flush host queue memory writes. */
wmb();
if (wc && inc == 1 && wq->rq.bar2_qid == 0 && wqe) {
- CTR2(KTR_IW_CXGBE, "%s: WC wq->rq.pidx = %d\n",
+ CTR2(KTR_IW_CXGBE, "%s: WC wq->rq.pidx = %d",
__func__, wq->rq.pidx);
pio_copy((u64 __iomem *)((u64)wq->rq.bar2_va +
SGE_UDB_WCDOORBELL), (u64 *)wqe);
} else {
- CTR2(KTR_IW_CXGBE, "%s: DB wq->rq.pidx = %d\n",
+ CTR2(KTR_IW_CXGBE, "%s: DB wq->rq.pidx = %d",
__func__, wq->rq.pidx);
writel(V_PIDX_T5(inc) | V_QID(wq->rq.bar2_qid),
(void __iomem *)((u64)wq->rq.bar2_va +
@@ -604,7 +604,7 @@ static inline void t4_swcq_produce(struct t4_cq *cq)
{
cq->sw_in_use++;
if (cq->sw_in_use == cq->size) {
- CTR2(KTR_IW_CXGBE, "%s cxgb4 sw cq overflow cqid %u\n",
+ CTR2(KTR_IW_CXGBE, "%s cxgb4 sw cq overflow cqid %u",
__func__, cq->cqid);
cq->error = 1;
BUG_ON(1);
@@ -676,7 +676,7 @@ static inline int t4_next_hw_cqe(struct t4_cq *cq, str
static inline struct t4_cqe *t4_next_sw_cqe(struct t4_cq *cq)
{
if (cq->sw_in_use == cq->size) {
- CTR2(KTR_IW_CXGBE, "%s cxgb4 sw cq overflow cqid %u\n",
+ CTR2(KTR_IW_CXGBE, "%s cxgb4 sw cq overflow cqid %u",
__func__, cq->cqid);
cq->error = 1;
BUG_ON(1);
Modified: stable/12/sys/dev/cxgbe/t4_main.c
==============================================================================
--- stable/12/sys/dev/cxgbe/t4_main.c Wed Mar 27 18:22:08 2019 (r345586)
+++ stable/12/sys/dev/cxgbe/t4_main.c Wed Mar 27 18:46:47 2019 (r345587)
@@ -4058,6 +4058,18 @@ get_params__post_init(struct adapter *sc)
else
sc->params.filter2_wr_support = 0;
+ /*
+ * Find out whether we're allowed to use the ULPTX MEMWRITE DSGL.
+ * This is queried separately for the same reason as other params above.
+ */
+ param[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL);
+ val[0] = 0;
+ rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val);
+ if (rc == 0)
+ sc->params.ulptx_memwrite_dsgl = val[0] != 0;
+ else
+ sc->params.ulptx_memwrite_dsgl = false;
+
/* get capabilites */
bzero(&caps, sizeof(caps));
caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
More information about the svn-src-stable
mailing list