svn commit: r314775 - stable/11/sys/dev/cxgbe/iw_cxgbe
Navdeep Parhar
np at FreeBSD.org
Mon Mar 6 15:15:01 UTC 2017
Author: np
Date: Mon Mar 6 15:14:59 2017
New Revision: 314775
URL: https://svnweb.freebsd.org/changeset/base/314775
Log:
MFC r314509 and r314578.
r314509:
cxgbe/iw_cxgbe: Do not check the size of the memory region being
registered. T4/5/6 have no internal limit on this size. This is
probably a copy paste from the T3 iw_cxgb driver.
r314578:
cxgbe/iw_cxgbe: Implement sq/rq drain operation.
ULPs can set a qp's state to ERROR and then post a work request on the
sq and/or rq. When the reply for that work request comes back it is
guaranteed that all previous work requests posted on that queue have
been drained.
Sponsored by: Chelsio Communications
Modified:
stable/11/sys/dev/cxgbe/iw_cxgbe/cq.c
stable/11/sys/dev/cxgbe/iw_cxgbe/iw_cxgbe.h
stable/11/sys/dev/cxgbe/iw_cxgbe/mem.c
stable/11/sys/dev/cxgbe/iw_cxgbe/qp.c
stable/11/sys/dev/cxgbe/iw_cxgbe/t4.h
Directory Properties:
stable/11/ (props changed)
Modified: stable/11/sys/dev/cxgbe/iw_cxgbe/cq.c
==============================================================================
--- stable/11/sys/dev/cxgbe/iw_cxgbe/cq.c Mon Mar 6 14:56:57 2017 (r314774)
+++ stable/11/sys/dev/cxgbe/iw_cxgbe/cq.c Mon Mar 6 15:14:59 2017 (r314775)
@@ -450,6 +450,15 @@ static int poll_cq(struct t4_wq *wq, str
}
/*
+ * Special cqe for drain WR completions...
+ */
+ if (CQE_OPCODE(hw_cqe) == C4IW_DRAIN_OPCODE) {
+ *cookie = CQE_DRAIN_COOKIE(hw_cqe);
+ *cqe = *hw_cqe;
+ goto skip_cqe;
+ }
+
+ /*
* Gotta tweak READ completions:
* 1) the cqe doesn't contain the sq_wptr from the wr.
* 2) opcode not reflected from the wr.
@@ -665,6 +674,9 @@ static int c4iw_poll_cq_one(struct c4iw_
case FW_RI_FAST_REGISTER:
wc->opcode = IB_WC_FAST_REG_MR;
break;
+ case C4IW_DRAIN_OPCODE:
+ wc->opcode = IB_WC_SEND;
+ break;
default:
printf("Unexpected opcode %d "
"in the CQE received for QPID = 0x%0x\n",
Modified: stable/11/sys/dev/cxgbe/iw_cxgbe/iw_cxgbe.h
==============================================================================
--- stable/11/sys/dev/cxgbe/iw_cxgbe/iw_cxgbe.h Mon Mar 6 14:56:57 2017 (r314774)
+++ stable/11/sys/dev/cxgbe/iw_cxgbe/iw_cxgbe.h Mon Mar 6 15:14:59 2017 (r314775)
@@ -559,6 +559,8 @@ static inline int to_ib_qp_state(int c4i
return IB_QPS_ERR;
}
+#define C4IW_DRAIN_OPCODE FW_RI_SGE_EC_CR_RETURN
+
static inline u32 c4iw_ib_to_tpt_access(int a)
{
return (a & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) |
Modified: stable/11/sys/dev/cxgbe/iw_cxgbe/mem.c
==============================================================================
--- stable/11/sys/dev/cxgbe/iw_cxgbe/mem.c Mon Mar 6 14:56:57 2017 (r314774)
+++ stable/11/sys/dev/cxgbe/iw_cxgbe/mem.c Mon Mar 6 15:14:59 2017 (r314775)
@@ -341,9 +341,6 @@ static int build_phys_page_list(struct i
PAGE_SIZE - 1) & PAGE_MASK;
}
- if (*total_size > 0xFFFFFFFFULL)
- return -ENOMEM;
-
/* Find largest page shift we can use to cover buffers */
for (*shift = PAGE_SHIFT; *shift < 27; ++(*shift))
if ((1ULL << *shift) & mask)
Modified: stable/11/sys/dev/cxgbe/iw_cxgbe/qp.c
==============================================================================
--- stable/11/sys/dev/cxgbe/iw_cxgbe/qp.c Mon Mar 6 14:56:57 2017 (r314774)
+++ stable/11/sys/dev/cxgbe/iw_cxgbe/qp.c Mon Mar 6 15:14:59 2017 (r314775)
@@ -577,6 +577,66 @@ void c4iw_qp_rem_ref(struct ib_qp *qp)
wake_up(&(to_c4iw_qp(qp)->wait));
}
+static void complete_sq_drain_wr(struct c4iw_qp *qhp, struct ib_send_wr *wr)
+{
+ struct t4_cqe cqe = {};
+ struct c4iw_cq *schp;
+ unsigned long flag;
+ struct t4_cq *cq;
+
+ schp = to_c4iw_cq(qhp->ibqp.send_cq);
+ cq = &schp->cq;
+
+ PDBG("%s drain sq id %u\n", __func__, qhp->wq.sq.qid);
+ cqe.u.drain_cookie = wr->wr_id;
+ cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) |
+ V_CQE_OPCODE(C4IW_DRAIN_OPCODE) |
+ V_CQE_TYPE(1) |
+ V_CQE_SWCQE(1) |
+ V_CQE_QPID(qhp->wq.sq.qid));
+
+ spin_lock_irqsave(&schp->lock, flag);
+ cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen));
+ cq->sw_queue[cq->sw_pidx] = cqe;
+ t4_swcq_produce(cq);
+ spin_unlock_irqrestore(&schp->lock, flag);
+
+ spin_lock_irqsave(&schp->comp_handler_lock, flag);
+ (*schp->ibcq.comp_handler)(&schp->ibcq,
+ schp->ibcq.cq_context);
+ spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
+}
+
+static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr)
+{
+ struct t4_cqe cqe = {};
+ struct c4iw_cq *rchp;
+ unsigned long flag;
+ struct t4_cq *cq;
+
+ rchp = to_c4iw_cq(qhp->ibqp.recv_cq);
+ cq = &rchp->cq;
+
+ PDBG("%s drain rq id %u\n", __func__, qhp->wq.sq.qid);
+ cqe.u.drain_cookie = wr->wr_id;
+ cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) |
+ V_CQE_OPCODE(C4IW_DRAIN_OPCODE) |
+ V_CQE_TYPE(0) |
+ V_CQE_SWCQE(1) |
+ V_CQE_QPID(qhp->wq.sq.qid));
+
+ spin_lock_irqsave(&rchp->lock, flag);
+ cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen));
+ cq->sw_queue[cq->sw_pidx] = cqe;
+ t4_swcq_produce(cq);
+ spin_unlock_irqrestore(&rchp->lock, flag);
+
+ spin_lock_irqsave(&rchp->comp_handler_lock, flag);
+ (*rchp->ibcq.comp_handler)(&rchp->ibcq,
+ rchp->ibcq.cq_context);
+ spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
+}
+
int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
struct ib_send_wr **bad_wr)
{
@@ -595,7 +655,8 @@ int c4iw_post_send(struct ib_qp *ibqp, s
spin_lock_irqsave(&qhp->lock, flag);
if (t4_wq_in_error(&qhp->wq)) {
spin_unlock_irqrestore(&qhp->lock, flag);
- return -EINVAL;
+ complete_sq_drain_wr(qhp, wr);
+ return err;
}
num_wrs = t4_sq_avail(&qhp->wq);
if (num_wrs == 0) {
@@ -708,7 +769,8 @@ int c4iw_post_receive(struct ib_qp *ibqp
spin_lock_irqsave(&qhp->lock, flag);
if (t4_wq_in_error(&qhp->wq)) {
spin_unlock_irqrestore(&qhp->lock, flag);
- return -EINVAL;
+ complete_rq_drain_wr(qhp, wr);
+ return err;
}
num_wrs = t4_rq_avail(&qhp->wq);
if (num_wrs == 0) {
@@ -1303,7 +1365,12 @@ int c4iw_modify_qp(struct c4iw_dev *rhp,
}
break;
case C4IW_QP_STATE_CLOSING:
- if (!internal) {
+
+ /*
+ * Allow kernel users to move to ERROR for qp draining.
+ */
+ if (!internal && (qhp->ibqp.uobject || attrs->next_state !=
+ C4IW_QP_STATE_ERROR)) {
ret = -EINVAL;
goto out;
}
Modified: stable/11/sys/dev/cxgbe/iw_cxgbe/t4.h
==============================================================================
--- stable/11/sys/dev/cxgbe/iw_cxgbe/t4.h Mon Mar 6 14:56:57 2017 (r314774)
+++ stable/11/sys/dev/cxgbe/iw_cxgbe/t4.h Mon Mar 6 15:14:59 2017 (r314775)
@@ -203,6 +203,7 @@ struct t4_cqe {
__be32 wrid_hi;
__be32 wrid_low;
} gen;
+ u64 drain_cookie;
} u;
__be64 reserved;
__be64 bits_type_ts;
@@ -261,6 +262,7 @@ struct t4_cqe {
/* generic accessor macros */
#define CQE_WRID_HI(x) ((x)->u.gen.wrid_hi)
#define CQE_WRID_LOW(x) ((x)->u.gen.wrid_low)
+#define CQE_DRAIN_COOKIE(x) (x)->u.drain_cookie;
/* macros for flit 3 of the cqe */
#define S_CQE_GENBIT 63
More information about the svn-src-stable
mailing list