svn commit: r339403 - stable/11/sys/dev/cxgbe/common
Navdeep Parhar
np at FreeBSD.org
Wed Oct 17 02:05:32 UTC 2018
Author: np
Date: Wed Oct 17 02:05:31 2018
New Revision: 339403
URL: https://svnweb.freebsd.org/changeset/base/339403
Log:
MFC r335352:
cxgbe(4): Some mailbox commands require access to the Tx pipeline and
can time out if it's backed up due to a non-stop deluge of PAUSE frames
from a misbehaving peer. Detect this situation and toggle MPS TxEn
to allow forward progress.
Modified:
stable/11/sys/dev/cxgbe/common/t4_hw.c
Directory Properties:
stable/11/ (props changed)
Modified: stable/11/sys/dev/cxgbe/common/t4_hw.c
==============================================================================
--- stable/11/sys/dev/cxgbe/common/t4_hw.c Wed Oct 17 01:59:45 2018 (r339402)
+++ stable/11/sys/dev/cxgbe/common/t4_hw.c Wed Oct 17 02:05:31 2018 (r339403)
@@ -237,6 +237,63 @@ static void fw_asrt(struct adapter *adap, struct fw_de
be32_to_cpu(asrt->u.assert.y));
}
+struct port_tx_state {
+ uint64_t rx_pause;
+ uint64_t tx_frames;
+};
+
+static void
+read_tx_state_one(struct adapter *sc, int i, struct port_tx_state *tx_state)
+{
+ uint32_t rx_pause_reg, tx_frames_reg;
+
+ if (is_t4(sc)) {
+ tx_frames_reg = PORT_REG(i, A_MPS_PORT_STAT_TX_PORT_FRAMES_L);
+ rx_pause_reg = PORT_REG(i, A_MPS_PORT_STAT_RX_PORT_PAUSE_L);
+ } else {
+ tx_frames_reg = T5_PORT_REG(i, A_MPS_PORT_STAT_TX_PORT_FRAMES_L);
+ rx_pause_reg = T5_PORT_REG(i, A_MPS_PORT_STAT_RX_PORT_PAUSE_L);
+ }
+
+ tx_state->rx_pause = t4_read_reg64(sc, rx_pause_reg);
+ tx_state->tx_frames = t4_read_reg64(sc, tx_frames_reg);
+}
+
+static void
+read_tx_state(struct adapter *sc, struct port_tx_state *tx_state)
+{
+ int i;
+
+ for_each_port(sc, i)
+ read_tx_state_one(sc, i, &tx_state[i]);
+}
+
+static void
+check_tx_state(struct adapter *sc, struct port_tx_state *tx_state)
+{
+ uint32_t port_ctl_reg;
+ uint64_t tx_frames, rx_pause;
+ int i;
+
+ for_each_port(sc, i) {
+ rx_pause = tx_state[i].rx_pause;
+ tx_frames = tx_state[i].tx_frames;
+ read_tx_state_one(sc, i, &tx_state[i]); /* update */
+
+ if (is_t4(sc))
+ port_ctl_reg = PORT_REG(i, A_MPS_PORT_CTL);
+ else
+ port_ctl_reg = T5_PORT_REG(i, A_MPS_PORT_CTL);
+ if (t4_read_reg(sc, port_ctl_reg) & F_PORTTXEN &&
+ rx_pause != tx_state[i].rx_pause &&
+ tx_frames == tx_state[i].tx_frames) {
+ t4_set_reg_field(sc, port_ctl_reg, F_PORTTXEN, 0);
+ mdelay(1);
+ t4_set_reg_field(sc, port_ctl_reg, F_PORTTXEN, F_PORTTXEN);
+ }
+ }
+}
+
#define X_CIM_PF_NOACCESS 0xeeeeeeee
/**
* t4_wr_mbox_meat_timeout - send a command to FW through the given mailbox
@@ -278,13 +335,14 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int
};
u32 v;
u64 res;
- int i, ms, delay_idx, ret;
+ int i, ms, delay_idx, ret, next_tx_check;
const __be64 *p = cmd;
u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA);
u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL);
u32 ctl;
__be64 cmd_rpl[MBOX_LEN/8];
u32 pcie_fw;
+ struct port_tx_state tx_state[MAX_NPORTS];
if (adap->flags & CHK_MBOX_ACCESS)
ASSERT_SYNCHRONIZED_OP(adap);
@@ -373,8 +431,8 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int
CH_DUMP_MBOX(adap, mbox, data_reg);
t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW));
- t4_read_reg(adap, ctl_reg); /* flush write */
-
+ read_tx_state(adap, &tx_state[0]); /* also flushes the write_reg */
+ next_tx_check = 1000;
delay_idx = 0;
ms = delay[0];
@@ -389,6 +447,12 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int
if (pcie_fw & F_PCIE_FW_ERR)
break;
}
+
+ if (i >= next_tx_check) {
+ check_tx_state(adap, &tx_state[0]);
+ next_tx_check = i + 1000;
+ }
+
if (sleep_ok) {
ms = delay[delay_idx]; /* last element may repeat */
if (delay_idx < ARRAY_SIZE(delay) - 1)
More information about the svn-src-stable
mailing list