svn commit: r316122 - in stable/11/sys/dev/cxgbe: . iw_cxgbe
Navdeep Parhar
np at FreeBSD.org
Wed Mar 29 02:20:09 UTC 2017
Author: np
Date: Wed Mar 29 02:20:07 2017
New Revision: 316122
URL: https://svnweb.freebsd.org/changeset/base/316122
Log:
MFC r315201, r315920, r315921, r315922, r316008, and r316062.
r315201:
cxgbe(4): Fix an always-true assertion (reported by PVS-Studio).
sys/dev/cxgbe/t4_main.c: PVS-Studio: Expression is Always True (CWE-571) (3)
r315920:
cxgbe/iw_cxgbe: c4iw_connect should always returns a -ve errno on failure.
r315921:
cxgbe/iw_cxgbe: alloc_ep expects a gfp_t, and it's always ok to sleep during
alloc_ep.
r315922:
cxgbe/iw_cxgbe: allocations that use GFP_KERNEL (which is M_WAITOK on
FreeBSD) cannot fail.
r316008:
cxgbe/iw_cxgbe: Remove unused code.
r316062:
cxgbe/iw_cxgbe: Defer the handling of error CQEs and RDMA_TERMINATE to
the thread that deals with socket state changes. This eliminates
various bad races with the ithread.
Modified:
stable/11/sys/dev/cxgbe/iw_cxgbe/cm.c
stable/11/sys/dev/cxgbe/iw_cxgbe/iw_cxgbe.h
stable/11/sys/dev/cxgbe/t4_main.c
Directory Properties:
stable/11/ (props changed)
Modified: stable/11/sys/dev/cxgbe/iw_cxgbe/cm.c
==============================================================================
--- stable/11/sys/dev/cxgbe/iw_cxgbe/cm.c Wed Mar 29 01:32:34 2017 (r316121)
+++ stable/11/sys/dev/cxgbe/iw_cxgbe/cm.c Wed Mar 29 02:20:07 2017 (r316122)
@@ -76,20 +76,19 @@ static spinlock_t req_lock;
static TAILQ_HEAD(c4iw_ep_list, c4iw_ep_common) req_list;
static struct work_struct c4iw_task;
static struct workqueue_struct *c4iw_taskq;
-static LIST_HEAD(timeout_list);
-static spinlock_t timeout_lock;
+static LIST_HEAD(err_cqe_list);
+static spinlock_t err_cqe_lock;
static void process_req(struct work_struct *ctx);
static void start_ep_timer(struct c4iw_ep *ep);
static int stop_ep_timer(struct c4iw_ep *ep);
static int set_tcpinfo(struct c4iw_ep *ep);
static void process_timeout(struct c4iw_ep *ep);
-static void process_timedout_eps(void);
+static void process_err_cqes(void);
static enum c4iw_ep_state state_read(struct c4iw_ep_common *epc);
static void __state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state tostate);
static void state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state tostate);
static void *alloc_ep(int size, gfp_t flags);
-void __free_ep(struct c4iw_ep_common *epc);
static int find_route(__be32 local_ip, __be32 peer_ip, __be16 local_port,
__be16 peer_port, u8 tos, struct nhop4_extended *pnh4);
static void close_socket(struct socket *so);
@@ -116,7 +115,10 @@ static void process_connected(struct c4i
static int c4iw_so_upcall(struct socket *so, void *arg, int waitflag);
static void process_socket_event(struct c4iw_ep *ep);
static void release_ep_resources(struct c4iw_ep *ep);
-
+static int process_terminate(struct c4iw_ep *ep);
+static int terminate(struct sge_iq *iq, const struct rss_header *rss,
+ struct mbuf *m);
+static int add_ep_to_req_list(struct c4iw_ep *ep, int ep_events);
#define START_EP_TIMER(ep) \
do { \
CTR3(KTR_IW_CXGBE, "start_ep_timer (%s:%d) ep %p", \
@@ -225,22 +227,32 @@ static void process_timeout(struct c4iw_
return;
}
-static void process_timedout_eps(void)
+struct cqe_list_entry {
+ struct list_head entry;
+ struct c4iw_dev *rhp;
+ struct t4_cqe err_cqe;
+};
+
+static void
+process_err_cqes(void)
{
- struct c4iw_ep *ep;
+ unsigned long flag;
+ struct cqe_list_entry *cle;
- spin_lock(&timeout_lock);
- while (!list_empty(&timeout_list)) {
+ spin_lock_irqsave(&err_cqe_lock, flag);
+ while (!list_empty(&err_cqe_list)) {
struct list_head *tmp;
- tmp = timeout_list.next;
+ tmp = err_cqe_list.next;
list_del(tmp);
tmp->next = tmp->prev = NULL;
- spin_unlock(&timeout_lock);
- ep = list_entry(tmp, struct c4iw_ep, entry);
- process_timeout(ep);
- spin_lock(&timeout_lock);
+ spin_unlock_irqrestore(&err_cqe_lock, flag);
+ cle = list_entry(tmp, struct cqe_list_entry, entry);
+ c4iw_ev_dispatch(cle->rhp, &cle->err_cqe);
+ free(cle, M_CXGBE);
+ spin_lock_irqsave(&err_cqe_lock, flag);
}
- spin_unlock(&timeout_lock);
+ spin_unlock_irqrestore(&err_cqe_lock, flag);
+
return;
}
@@ -248,23 +260,31 @@ static void
process_req(struct work_struct *ctx)
{
struct c4iw_ep_common *epc;
+ unsigned long flag;
+ int ep_events;
- process_timedout_eps();
- spin_lock(&req_lock);
+ process_err_cqes();
+ spin_lock_irqsave(&req_lock, flag);
while (!TAILQ_EMPTY(&req_list)) {
epc = TAILQ_FIRST(&req_list);
TAILQ_REMOVE(&req_list, epc, entry);
epc->entry.tqe_prev = NULL;
- spin_unlock(&req_lock);
- CTR3(KTR_IW_CXGBE, "%s so :%p, ep:%p", __func__,
- epc->so, epc);
- if (epc->so)
+ ep_events = epc->ep_events;
+ epc->ep_events = 0;
+ spin_unlock_irqrestore(&req_lock, flag);
+ CTR4(KTR_IW_CXGBE, "%s: so %p, ep %p, events 0x%x", __func__,
+ epc->so, epc, ep_events);
+ if (ep_events & C4IW_EVENT_TERM)
+ process_terminate((struct c4iw_ep *)epc);
+ if (ep_events & C4IW_EVENT_TIMEOUT)
+ process_timeout((struct c4iw_ep *)epc);
+ if (ep_events & C4IW_EVENT_SOCKET)
process_socket_event((struct c4iw_ep *)epc);
c4iw_put_ep(epc);
- process_timedout_eps();
- spin_lock(&req_lock);
+ process_err_cqes();
+ spin_lock_irqsave(&req_lock, flag);
}
- spin_unlock(&req_lock);
+ spin_unlock_irqrestore(&req_lock, flag);
}
/*
@@ -695,7 +715,7 @@ process_newconn(struct iw_cm_id *parent_
MPASS(child_so != NULL);
- child_ep = alloc_ep(sizeof(*child_ep), M_WAITOK);
+ child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL);
CTR5(KTR_IW_CXGBE,
"%s: parent so %p, parent ep %p, child so %p, child ep %p",
@@ -734,28 +754,62 @@ process_newconn(struct iw_cm_id *parent_
}
static int
+add_ep_to_req_list(struct c4iw_ep *ep, int new_ep_event)
+{
+ unsigned long flag;
+
+ spin_lock_irqsave(&req_lock, flag);
+ if (ep && ep->com.so) {
+ ep->com.ep_events |= new_ep_event;
+ if (!ep->com.entry.tqe_prev) {
+ c4iw_get_ep(&ep->com);
+ TAILQ_INSERT_TAIL(&req_list, &ep->com, entry);
+ queue_work(c4iw_taskq, &c4iw_task);
+ }
+ }
+ spin_unlock_irqrestore(&req_lock, flag);
+
+ return (0);
+}
+
+static int
c4iw_so_upcall(struct socket *so, void *arg, int waitflag)
{
struct c4iw_ep *ep = arg;
- spin_lock(&req_lock);
-
CTR6(KTR_IW_CXGBE,
"%s: so %p, so_state 0x%x, ep %p, ep_state %s, tqe_prev %p",
__func__, so, so->so_state, ep, states[ep->com.state],
ep->com.entry.tqe_prev);
- if (ep && ep->com.so && !ep->com.entry.tqe_prev) {
- KASSERT(ep->com.so == so, ("%s: XXX review.", __func__));
- c4iw_get_ep(&ep->com);
- TAILQ_INSERT_TAIL(&req_list, &ep->com, entry);
- queue_work(c4iw_taskq, &c4iw_task);
- }
+ MPASS(ep->com.so == so);
+ add_ep_to_req_list(ep, C4IW_EVENT_SOCKET);
- spin_unlock(&req_lock);
return (SU_OK);
}
+
+static int
+terminate(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
+{
+ struct adapter *sc = iq->adapter;
+ const struct cpl_rdma_terminate *cpl = mtod(m, const void *);
+ unsigned int tid = GET_TID(cpl);
+ struct toepcb *toep = lookup_tid(sc, tid);
+ struct socket *so;
+ struct c4iw_ep *ep;
+
+ INP_WLOCK(toep->inp);
+ so = inp_inpcbtosocket(toep->inp);
+ ep = so->so_rcv.sb_upcallarg;
+ INP_WUNLOCK(toep->inp);
+
+ CTR3(KTR_IW_CXGBE, "%s: so %p, ep %p", __func__, so, ep);
+ add_ep_to_req_list(ep, C4IW_EVENT_TERM);
+
+ return 0;
+}
+
static void
process_socket_event(struct c4iw_ep *ep)
{
@@ -938,16 +992,6 @@ alloc_ep(int size, gfp_t gfp)
return (epc);
}
-void
-__free_ep(struct c4iw_ep_common *epc)
-{
- CTR2(KTR_IW_CXGBE, "%s:feB %p", __func__, epc);
- KASSERT(!epc->so, ("%s warning ep->so %p \n", __func__, epc->so));
- KASSERT(!epc->entry.tqe_prev, ("%s epc %p still on req list!\n", __func__, epc));
- free(epc, M_DEVBUF);
- CTR2(KTR_IW_CXGBE, "%s:feE %p", __func__, epc);
-}
-
void _c4iw_free_ep(struct kref *kref)
{
struct c4iw_ep *ep;
@@ -2134,15 +2178,7 @@ int c4iw_connect(struct iw_cm_id *cm_id,
err = -EINVAL;
goto out;
}
- ep = alloc_ep(sizeof(*ep), M_NOWAIT);
-
- if (!ep) {
-
- CTR2(KTR_IW_CXGBE, "%s:cc2 %p", __func__, cm_id);
- printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
- err = -ENOMEM;
- goto out;
- }
+ ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
init_timer(&ep->timer);
ep->plen = conn_param->private_data_len;
@@ -2202,7 +2238,7 @@ int c4iw_connect(struct iw_cm_id *cm_id,
ep->tos = 0;
ep->com.local_addr = cm_id->local_addr;
ep->com.remote_addr = cm_id->remote_addr;
- err = soconnect(ep->com.so, (struct sockaddr *)&ep->com.remote_addr,
+ err = -soconnect(ep->com.so, (struct sockaddr *)&ep->com.remote_addr,
ep->com.thread);
if (!err) {
@@ -2229,21 +2265,11 @@ out:
int
c4iw_create_listen_ep(struct iw_cm_id *cm_id, int backlog)
{
- int rc;
struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
struct c4iw_listen_ep *ep;
struct socket *so = cm_id->so;
ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
- CTR5(KTR_IW_CXGBE, "%s: cm_id %p, lso %p, ep %p, inp %p", __func__,
- cm_id, so, ep, so->so_pcb);
- if (ep == NULL) {
- log(LOG_ERR, "%s: failed to alloc memory for endpoint\n",
- __func__);
- rc = ENOMEM;
- goto failed;
- }
-
ep->com.cm_id = cm_id;
ref_cm_id(&ep->com);
ep->com.dev = dev;
@@ -2255,10 +2281,6 @@ c4iw_create_listen_ep(struct iw_cm_id *c
cm_id->provider_data = ep;
return (0);
-
-failed:
- CTR3(KTR_IW_CXGBE, "%s: cm_id %p, FAILED (%d)", __func__, cm_id, rc);
- return (-rc);
}
void
@@ -2421,29 +2443,17 @@ int c4iw_ep_redirect(void *ctx, struct d
static void ep_timeout(unsigned long arg)
{
struct c4iw_ep *ep = (struct c4iw_ep *)arg;
- int kickit = 0;
-
- CTR2(KTR_IW_CXGBE, "%s:etB %p", __func__, ep);
- spin_lock(&timeout_lock);
if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) {
/*
* Only insert if it is not already on the list.
*/
- if (!ep->entry.next) {
- list_add_tail(&ep->entry, &timeout_list);
- kickit = 1;
+ if (!(ep->com.ep_events & C4IW_EVENT_TIMEOUT)) {
+ CTR2(KTR_IW_CXGBE, "%s:et1 %p", __func__, ep);
+ add_ep_to_req_list(ep, C4IW_EVENT_TIMEOUT);
}
}
- spin_unlock(&timeout_lock);
-
- if (kickit) {
-
- CTR2(KTR_IW_CXGBE, "%s:et1 %p", __func__, ep);
- queue_work(c4iw_taskq, &c4iw_task);
- }
- CTR2(KTR_IW_CXGBE, "%s:etE %p", __func__, ep);
}
static int fw6_wr_rpl(struct adapter *sc, const __be64 *rpl)
@@ -2463,40 +2473,38 @@ static int fw6_wr_rpl(struct adapter *sc
static int fw6_cqe_handler(struct adapter *sc, const __be64 *rpl)
{
- struct t4_cqe cqe =*(const struct t4_cqe *)(&rpl[0]);
+ struct cqe_list_entry *cle;
+ unsigned long flag;
- CTR2(KTR_IW_CXGBE, "%s rpl %p", __func__, rpl);
- c4iw_ev_dispatch(sc->iwarp_softc, &cqe);
+ cle = malloc(sizeof(*cle), M_CXGBE, M_NOWAIT);
+ cle->rhp = sc->iwarp_softc;
+ cle->err_cqe = *(const struct t4_cqe *)(&rpl[0]);
+
+ spin_lock_irqsave(&err_cqe_lock, flag);
+ list_add_tail(&cle->entry, &err_cqe_list);
+ queue_work(c4iw_taskq, &c4iw_task);
+ spin_unlock_irqrestore(&err_cqe_lock, flag);
return (0);
}
-static int terminate(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
+static int
+process_terminate(struct c4iw_ep *ep)
{
- struct adapter *sc = iq->adapter;
- const struct cpl_rdma_terminate *cpl = mtod(m, const void *);
- unsigned int tid = GET_TID(cpl);
struct c4iw_qp_attributes attrs;
- struct toepcb *toep = lookup_tid(sc, tid);
- struct socket *so;
- struct c4iw_ep *ep;
-
- INP_WLOCK(toep->inp);
- so = inp_inpcbtosocket(toep->inp);
- ep = so->so_rcv.sb_upcallarg;
- INP_WUNLOCK(toep->inp);
CTR2(KTR_IW_CXGBE, "%s:tB %p %d", __func__, ep);
if (ep && ep->com.qp) {
- printk(KERN_WARNING MOD "TERM received tid %u qpid %u\n", tid,
- ep->com.qp->wq.sq.qid);
+ printk(KERN_WARNING MOD "TERM received tid %u qpid %u\n",
+ ep->hwtid, ep->com.qp->wq.sq.qid);
attrs.next_state = C4IW_QP_STATE_TERMINATE;
c4iw_modify_qp(ep->com.dev, ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, &attrs,
1);
} else
- printk(KERN_WARNING MOD "TERM received tid %u no ep/qp\n", tid);
+ printk(KERN_WARNING MOD "TERM received tid %u no ep/qp\n",
+ ep->hwtid);
CTR2(KTR_IW_CXGBE, "%s:tE %p %d", __func__, ep);
return 0;
@@ -2512,8 +2520,8 @@ int __init c4iw_cm_init(void)
TAILQ_INIT(&req_list);
spin_lock_init(&req_lock);
- INIT_LIST_HEAD(&timeout_list);
- spin_lock_init(&timeout_lock);
+ INIT_LIST_HEAD(&err_cqe_list);
+ spin_lock_init(&err_cqe_lock);
INIT_WORK(&c4iw_task, process_req);
@@ -2527,7 +2535,7 @@ int __init c4iw_cm_init(void)
void __exit c4iw_cm_term(void)
{
WARN_ON(!TAILQ_EMPTY(&req_list));
- WARN_ON(!list_empty(&timeout_list));
+ WARN_ON(!list_empty(&err_cqe_list));
flush_workqueue(c4iw_taskq);
destroy_workqueue(c4iw_taskq);
Modified: stable/11/sys/dev/cxgbe/iw_cxgbe/iw_cxgbe.h
==============================================================================
--- stable/11/sys/dev/cxgbe/iw_cxgbe/iw_cxgbe.h Wed Mar 29 01:32:34 2017 (r316121)
+++ stable/11/sys/dev/cxgbe/iw_cxgbe/iw_cxgbe.h Wed Mar 29 02:20:07 2017 (r316122)
@@ -523,6 +523,14 @@ enum c4iw_qp_state {
C4IW_QP_STATE_TOT
};
+/*
+ * IW_CXGBE event bits.
+ * These bits are used for handling all events for a particular 'ep' serially.
+ */
+#define C4IW_EVENT_SOCKET 0x0001
+#define C4IW_EVENT_TIMEOUT 0x0002
+#define C4IW_EVENT_TERM 0x0004
+
static inline int c4iw_convert_state(enum ib_qp_state ib_state)
{
switch (ib_state) {
@@ -756,6 +764,7 @@ struct c4iw_ep_common {
int rpl_done;
struct thread *thread;
struct socket *so;
+ int ep_events;
};
struct c4iw_listen_ep {
@@ -768,7 +777,6 @@ struct c4iw_ep {
struct c4iw_ep_common com;
struct c4iw_ep *parent_ep;
struct timer_list timer;
- struct list_head entry;
unsigned int atid;
u32 hwtid;
u32 snd_seq;
Modified: stable/11/sys/dev/cxgbe/t4_main.c
==============================================================================
--- stable/11/sys/dev/cxgbe/t4_main.c Wed Mar 29 01:32:34 2017 (r316121)
+++ stable/11/sys/dev/cxgbe/t4_main.c Wed Mar 29 02:20:07 2017 (r316122)
@@ -7995,9 +7995,9 @@ sysctl_tp_timer(SYSCTL_HANDLER_ARGS)
u_int cclk_ps = 1000000000 / sc->params.vpd.cclk;
MPASS(reg == A_TP_RXT_MIN || reg == A_TP_RXT_MAX ||
- reg == A_TP_PERS_MIN || reg == A_TP_PERS_MAX ||
- reg == A_TP_KEEP_IDLE || A_TP_KEEP_INTVL || reg == A_TP_INIT_SRTT ||
- reg == A_TP_FINWAIT2_TIMER);
+ reg == A_TP_PERS_MIN || reg == A_TP_PERS_MAX ||
+ reg == A_TP_KEEP_IDLE || reg == A_TP_KEEP_INTVL ||
+ reg == A_TP_INIT_SRTT || reg == A_TP_FINWAIT2_TIMER);
tre = G_TIMERRESOLUTION(t4_read_reg(sc, A_TP_TIMER_RESOLUTION));
tp_tick_us = (cclk_ps << tre) / 1000000;
More information about the svn-src-stable
mailing list