svn commit: r248925 - in head: sys/dev/cxgbe sys/dev/cxgbe/common sys/dev/cxgbe/tom sys/modules/cxgbe sys/modules/cxgbe/firmware sys/modules/cxgbe/t4_firmware tools/tools/cxgbetool
Navdeep Parhar
np at FreeBSD.org
Sat Mar 30 02:26:21 UTC 2013
Author: np
Date: Sat Mar 30 02:26:20 2013
New Revision: 248925
URL: http://svnweb.freebsd.org/changeset/base/248925
Log:
cxgbe(4): Add support for Chelsio's Terminator 5 (aka T5) ASIC. This
includes support for the NIC and TOE features of the 40G, 10G, and
1G/100M cards based on the T5.
The ASIC is mostly backward compatible with the Terminator 4 so cxgbe(4)
has been updated instead of writing a brand new driver. T5 cards will
show up as cxl (short for cxlgb) ports attached to the t5nex bus driver.
Sponsored by: Chelsio
Added:
head/sys/modules/cxgbe/t4_firmware/
head/sys/modules/cxgbe/t4_firmware/Makefile (contents, props changed)
head/tools/tools/cxgbetool/reg_defs_t5.c (contents, props changed)
Deleted:
head/sys/modules/cxgbe/firmware/
Modified:
head/sys/dev/cxgbe/adapter.h
head/sys/dev/cxgbe/common/common.h
head/sys/dev/cxgbe/common/t4_hw.c
head/sys/dev/cxgbe/common/t4_hw.h
head/sys/dev/cxgbe/common/t4_msg.h
head/sys/dev/cxgbe/common/t4_regs.h
head/sys/dev/cxgbe/osdep.h
head/sys/dev/cxgbe/t4_ioctl.h
head/sys/dev/cxgbe/t4_main.c
head/sys/dev/cxgbe/t4_sge.c
head/sys/dev/cxgbe/tom/t4_connect.c
head/sys/dev/cxgbe/tom/t4_cpl_io.c
head/sys/dev/cxgbe/tom/t4_ddp.c
head/sys/dev/cxgbe/tom/t4_listen.c
head/sys/dev/cxgbe/tom/t4_tom.c
head/sys/dev/cxgbe/tom/t4_tom.h
head/sys/modules/cxgbe/Makefile
head/tools/tools/cxgbetool/cxgbetool.c
Modified: head/sys/dev/cxgbe/adapter.h
==============================================================================
--- head/sys/dev/cxgbe/adapter.h Sat Mar 30 00:33:46 2013 (r248924)
+++ head/sys/dev/cxgbe/adapter.h Sat Mar 30 02:26:20 2013 (r248925)
@@ -50,9 +50,6 @@
#include "offload.h"
#include "firmware/t4fw_interface.h"
-#define T4_CFGNAME "t4fw_cfg"
-#define T4_FWNAME "t4fw"
-
MALLOC_DECLARE(M_CXGBE);
#define CXGBE_UNIMPLEMENTED(s) \
panic("%s (%s, line %d) not implemented yet.", s, __FILE__, __LINE__)
@@ -144,12 +141,6 @@ enum {
TX_WR_FLITS = SGE_MAX_WR_LEN / 8
};
-#ifdef T4_PKT_TIMESTAMP
-#define RX_COPY_THRESHOLD (MINCLSIZE - 8)
-#else
-#define RX_COPY_THRESHOLD MINCLSIZE
-#endif
-
enum {
/* adapter intr_type */
INTR_INTX = (1 << 0),
@@ -327,6 +318,9 @@ enum {
EQ_STALLED = (1 << 6), /* out of hw descriptors or dmamaps */
};
+/* Listed in order of preference. Update t4_sysctls too if you change these */
+enum {DOORBELL_UDB, DOORBELL_WRWC, DOORBELL_UDBWC, DOORBELL_KDB};
+
/*
* Egress Queue: driver is producer, T4 is consumer.
*
@@ -344,6 +338,9 @@ struct sge_eq {
struct tx_desc *desc; /* KVA of descriptor ring */
bus_addr_t ba; /* bus address of descriptor ring */
struct sge_qstat *spg; /* status page, for convenience */
+ int doorbells;
+ volatile uint32_t *udb; /* KVA of doorbell (lies within BAR2) */
+ u_int udb_qid; /* relative qid within the doorbell page */
uint16_t cap; /* max # of desc, for convenience */
uint16_t avail; /* available descriptors, for convenience */
uint16_t qsize; /* size (# of entries) of the queue */
@@ -496,6 +493,7 @@ struct sge {
int timer_val[SGE_NTIMERS];
int counter_val[SGE_NCOUNTERS];
int fl_starve_threshold;
+ int s_qpp;
int nrxq; /* total # of Ethernet rx queues */
int ntxq; /* total # of Ethernet tx tx queues */
@@ -541,6 +539,9 @@ struct adapter {
bus_space_handle_t bh;
bus_space_tag_t bt;
bus_size_t mmio_len;
+ int udbs_rid;
+ struct resource *udbs_res;
+ volatile uint8_t *udbs_base;
unsigned int pf;
unsigned int mbox;
@@ -570,6 +571,7 @@ struct adapter {
struct l2t_data *l2t; /* L2 table */
struct tid_info tids;
+ int doorbells;
int open_device_map;
#ifdef TCP_OFFLOAD
int offload_map;
@@ -748,13 +750,15 @@ t4_os_set_hw_addr(struct adapter *sc, in
bcopy(hw_addr, sc->port[idx]->hw_addr, ETHER_ADDR_LEN);
}
-static inline bool is_10G_port(const struct port_info *pi)
+static inline bool
+is_10G_port(const struct port_info *pi)
{
return ((pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G) != 0);
}
-static inline int tx_resume_threshold(struct sge_eq *eq)
+static inline int
+tx_resume_threshold(struct sge_eq *eq)
{
return (eq->qsize / 4);
@@ -778,7 +782,9 @@ void end_synchronized_op(struct adapter
/* t4_sge.c */
void t4_sge_modload(void);
-int t4_sge_init(struct adapter *);
+void t4_init_sge_cpl_handlers(struct adapter *);
+void t4_tweak_chip_settings(struct adapter *);
+int t4_read_chip_settings(struct adapter *);
int t4_create_dma_tag(struct adapter *);
int t4_destroy_dma_tag(struct adapter *);
int t4_setup_adapter_queues(struct adapter *);
Modified: head/sys/dev/cxgbe/common/common.h
==============================================================================
--- head/sys/dev/cxgbe/common/common.h Sat Mar 30 00:33:46 2013 (r248924)
+++ head/sys/dev/cxgbe/common/common.h Sat Mar 30 02:26:20 2013 (r248925)
@@ -42,15 +42,19 @@ enum {
MACADDR_LEN = 12, /* MAC Address length */
};
-enum { MEM_EDC0, MEM_EDC1, MEM_MC };
+enum { MEM_EDC0, MEM_EDC1, MEM_MC, MEM_MC0 = MEM_MC, MEM_MC1 };
enum {
MEMWIN0_APERTURE = 2048,
MEMWIN0_BASE = 0x1b800,
MEMWIN1_APERTURE = 32768,
MEMWIN1_BASE = 0x28000,
- MEMWIN2_APERTURE = 65536,
- MEMWIN2_BASE = 0x30000,
+
+ MEMWIN2_APERTURE_T4 = 65536,
+ MEMWIN2_BASE_T4 = 0x30000,
+
+ MEMWIN2_APERTURE_T5 = 128 * 1024,
+ MEMWIN2_BASE_T5 = 0x60000,
};
enum dev_master { MASTER_CANT, MASTER_MAY, MASTER_MUST };
@@ -63,15 +67,20 @@ enum {
PAUSE_AUTONEG = 1 << 2
};
-#define FW_VERSION_MAJOR 1
-#define FW_VERSION_MINOR 8
-#define FW_VERSION_MICRO 4
-#define FW_VERSION_BUILD 0
-
-#define FW_VERSION (V_FW_HDR_FW_VER_MAJOR(FW_VERSION_MAJOR) | \
- V_FW_HDR_FW_VER_MINOR(FW_VERSION_MINOR) | \
- V_FW_HDR_FW_VER_MICRO(FW_VERSION_MICRO) | \
- V_FW_HDR_FW_VER_BUILD(FW_VERSION_BUILD))
+#define FW_VERSION_MAJOR_T4 1
+#define FW_VERSION_MINOR_T4 8
+#define FW_VERSION_MICRO_T4 4
+#define FW_VERSION_BUILD_T4 0
+
+#define FW_VERSION_MAJOR_T5 0
+#define FW_VERSION_MINOR_T5 5
+#define FW_VERSION_MICRO_T5 18
+#define FW_VERSION_BUILD_T5 0
+
+struct memwin {
+ uint32_t base;
+ uint32_t aperture;
+};
struct port_stats {
u64 tx_octets; /* total # of octets in good frames */
@@ -267,18 +276,20 @@ struct adapter_params {
unsigned int cim_la_size;
- /* Used as int in sysctls, do not reduce size */
- unsigned int nports; /* # of ethernet ports */
- unsigned int portvec;
- unsigned int rev; /* chip revision */
- unsigned int offload;
+ uint8_t nports; /* # of ethernet ports */
+ uint8_t portvec;
+ unsigned int chipid:4; /* chip ID. T4 = 4, T5 = 5, ... */
+ unsigned int rev:4; /* chip revision */
+ unsigned int fpga:1; /* this is an FPGA */
+ unsigned int offload:1; /* hw is TOE capable, fw has divvied up card
+ resources for TOE operation. */
+ unsigned int bypass:1; /* this is a bypass card */
unsigned int ofldq_wr_cred;
};
-enum { /* chip revisions */
- T4_REV_A = 0,
-};
+#define CHELSIO_T4 0x4
+#define CHELSIO_T5 0x5
struct trace_params {
u32 data[TRACE_LEN / 4];
@@ -316,6 +327,31 @@ static inline int is_offload(const struc
return adap->params.offload;
}
+static inline int chip_id(struct adapter *adap)
+{
+ return adap->params.chipid;
+}
+
+static inline int chip_rev(struct adapter *adap)
+{
+ return adap->params.rev;
+}
+
+static inline int is_t4(struct adapter *adap)
+{
+ return adap->params.chipid == CHELSIO_T4;
+}
+
+static inline int is_t5(struct adapter *adap)
+{
+ return adap->params.chipid == CHELSIO_T5;
+}
+
+static inline int is_fpga(struct adapter *adap)
+{
+ return adap->params.fpga;
+}
+
static inline unsigned int core_ticks_per_usec(const struct adapter *adap)
{
return adap->params.vpd.cclk / 1000;
@@ -437,7 +473,8 @@ int t4_cim_read_la(struct adapter *adap,
void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
unsigned int *pif_req_wrptr, unsigned int *pif_rsp_wrptr);
void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp);
-int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *parity);
+int t4_mc_read(struct adapter *adap, int idx, u32 addr,
+ __be32 *data, u64 *parity);
int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *parity);
int t4_mem_read(struct adapter *adap, int mtype, u32 addr, u32 size,
__be32 *data);
Modified: head/sys/dev/cxgbe/common/t4_hw.c
==============================================================================
--- head/sys/dev/cxgbe/common/t4_hw.c Sat Mar 30 00:33:46 2013 (r248924)
+++ head/sys/dev/cxgbe/common/t4_hw.c Sat Mar 30 02:26:20 2013 (r248925)
@@ -312,6 +312,7 @@ int t4_wr_mbox_meat(struct adapter *adap
/**
* t4_mc_read - read from MC through backdoor accesses
* @adap: the adapter
+ * @idx: which MC to access
* @addr: address of first byte requested
* @data: 64 bytes of data containing the requested address
* @ecc: where to store the corresponding 64-bit ECC word
@@ -320,22 +321,40 @@ int t4_wr_mbox_meat(struct adapter *adap
* that covers the requested address @addr. If @parity is not %NULL it
* is assigned the 64-bit ECC word for the read data.
*/
-int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *ecc)
+int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
{
int i;
+ u32 mc_bist_cmd_reg, mc_bist_cmd_addr_reg, mc_bist_cmd_len_reg;
+ u32 mc_bist_status_rdata_reg, mc_bist_data_pattern_reg;
- if (t4_read_reg(adap, A_MC_BIST_CMD) & F_START_BIST)
+ if (is_t4(adap)) {
+ mc_bist_cmd_reg = A_MC_BIST_CMD;
+ mc_bist_cmd_addr_reg = A_MC_BIST_CMD_ADDR;
+ mc_bist_cmd_len_reg = A_MC_BIST_CMD_LEN;
+ mc_bist_status_rdata_reg = A_MC_BIST_STATUS_RDATA;
+ mc_bist_data_pattern_reg = A_MC_BIST_DATA_PATTERN;
+ } else {
+ mc_bist_cmd_reg = MC_REG(A_MC_P_BIST_CMD, idx);
+ mc_bist_cmd_addr_reg = MC_REG(A_MC_P_BIST_CMD_ADDR, idx);
+ mc_bist_cmd_len_reg = MC_REG(A_MC_P_BIST_CMD_LEN, idx);
+ mc_bist_status_rdata_reg = MC_REG(A_MC_P_BIST_STATUS_RDATA,
+ idx);
+ mc_bist_data_pattern_reg = MC_REG(A_MC_P_BIST_DATA_PATTERN,
+ idx);
+ }
+
+ if (t4_read_reg(adap, mc_bist_cmd_reg) & F_START_BIST)
return -EBUSY;
- t4_write_reg(adap, A_MC_BIST_CMD_ADDR, addr & ~0x3fU);
- t4_write_reg(adap, A_MC_BIST_CMD_LEN, 64);
- t4_write_reg(adap, A_MC_BIST_DATA_PATTERN, 0xc);
- t4_write_reg(adap, A_MC_BIST_CMD, V_BIST_OPCODE(1) | F_START_BIST |
- V_BIST_CMD_GAP(1));
- i = t4_wait_op_done(adap, A_MC_BIST_CMD, F_START_BIST, 0, 10, 1);
+ t4_write_reg(adap, mc_bist_cmd_addr_reg, addr & ~0x3fU);
+ t4_write_reg(adap, mc_bist_cmd_len_reg, 64);
+ t4_write_reg(adap, mc_bist_data_pattern_reg, 0xc);
+ t4_write_reg(adap, mc_bist_cmd_reg, V_BIST_OPCODE(1) |
+ F_START_BIST | V_BIST_CMD_GAP(1));
+ i = t4_wait_op_done(adap, mc_bist_cmd_reg, F_START_BIST, 0, 10, 1);
if (i)
return i;
-#define MC_DATA(i) MC_BIST_STATUS_REG(A_MC_BIST_STATUS_RDATA, i)
+#define MC_DATA(i) MC_BIST_STATUS_REG(mc_bist_status_rdata_reg, i)
for (i = 15; i >= 0; i--)
*data++ = ntohl(t4_read_reg(adap, MC_DATA(i)));
@@ -360,20 +379,47 @@ int t4_mc_read(struct adapter *adap, u32
int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
{
int i;
+ u32 edc_bist_cmd_reg, edc_bist_cmd_addr_reg, edc_bist_cmd_len_reg;
+ u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata_reg;
+
+ if (is_t4(adap)) {
+ edc_bist_cmd_reg = EDC_REG(A_EDC_BIST_CMD, idx);
+ edc_bist_cmd_addr_reg = EDC_REG(A_EDC_BIST_CMD_ADDR, idx);
+ edc_bist_cmd_len_reg = EDC_REG(A_EDC_BIST_CMD_LEN, idx);
+ edc_bist_cmd_data_pattern = EDC_REG(A_EDC_BIST_DATA_PATTERN,
+ idx);
+ edc_bist_status_rdata_reg = EDC_REG(A_EDC_BIST_STATUS_RDATA,
+ idx);
+ } else {
+/*
+ * These macro are missing in t4_regs.h file.
+ * Added temporarily for testing.
+ */
+#define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR)
+#define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx)
+ edc_bist_cmd_reg = EDC_REG_T5(A_EDC_H_BIST_CMD, idx);
+ edc_bist_cmd_addr_reg = EDC_REG_T5(A_EDC_H_BIST_CMD_ADDR, idx);
+ edc_bist_cmd_len_reg = EDC_REG_T5(A_EDC_H_BIST_CMD_LEN, idx);
+ edc_bist_cmd_data_pattern = EDC_REG_T5(A_EDC_H_BIST_DATA_PATTERN,
+ idx);
+ edc_bist_status_rdata_reg = EDC_REG_T5(A_EDC_H_BIST_STATUS_RDATA,
+ idx);
+#undef EDC_REG_T5
+#undef EDC_STRIDE_T5
+ }
- idx *= EDC_STRIDE;
- if (t4_read_reg(adap, A_EDC_BIST_CMD + idx) & F_START_BIST)
+ if (t4_read_reg(adap, edc_bist_cmd_reg) & F_START_BIST)
return -EBUSY;
- t4_write_reg(adap, A_EDC_BIST_CMD_ADDR + idx, addr & ~0x3fU);
- t4_write_reg(adap, A_EDC_BIST_CMD_LEN + idx, 64);
- t4_write_reg(adap, A_EDC_BIST_DATA_PATTERN + idx, 0xc);
- t4_write_reg(adap, A_EDC_BIST_CMD + idx,
+ t4_write_reg(adap, edc_bist_cmd_addr_reg, addr & ~0x3fU);
+ t4_write_reg(adap, edc_bist_cmd_len_reg, 64);
+ t4_write_reg(adap, edc_bist_cmd_data_pattern, 0xc);
+ t4_write_reg(adap, edc_bist_cmd_reg,
V_BIST_OPCODE(1) | V_BIST_CMD_GAP(1) | F_START_BIST);
- i = t4_wait_op_done(adap, A_EDC_BIST_CMD + idx, F_START_BIST, 0, 10, 1);
+ i = t4_wait_op_done(adap, edc_bist_cmd_reg, F_START_BIST, 0, 10, 1);
if (i)
return i;
-#define EDC_DATA(i) (EDC_BIST_STATUS_REG(A_EDC_BIST_STATUS_RDATA, i) + idx)
+#define EDC_DATA(i) EDC_BIST_STATUS_REG(edc_bist_status_rdata_reg, i)
for (i = 15; i >= 0; i--)
*data++ = ntohl(t4_read_reg(adap, EDC_DATA(i)));
@@ -425,8 +471,8 @@ int t4_mem_read(struct adapter *adap, in
/*
* Read the chip's memory block and bail if there's an error.
*/
- if (mtype == MEM_MC)
- ret = t4_mc_read(adap, pos, data, NULL);
+ if ((mtype == MEM_MC) || (mtype == MEM_MC1))
+ ret = t4_mc_read(adap, mtype - MEM_MC, pos, data, NULL);
else
ret = t4_edc_read(adap, mtype, pos, data, NULL);
if (ret)
@@ -464,7 +510,7 @@ struct t4_vpd_hdr {
#define EEPROM_STAT_ADDR 0x7bfc
#define VPD_BASE 0x400
#define VPD_BASE_OLD 0
-#define VPD_LEN 512
+#define VPD_LEN 1024
#define VPD_INFO_FLD_HDR_SIZE 3
/**
@@ -914,6 +960,7 @@ int t4_get_tp_version(struct adapter *ad
int t4_check_fw_version(struct adapter *adapter)
{
int ret, major, minor, micro;
+ int exp_major, exp_minor, exp_micro;
ret = t4_get_fw_version(adapter, &adapter->params.fw_vers);
if (!ret)
@@ -925,13 +972,30 @@ int t4_check_fw_version(struct adapter *
minor = G_FW_HDR_FW_VER_MINOR(adapter->params.fw_vers);
micro = G_FW_HDR_FW_VER_MICRO(adapter->params.fw_vers);
- if (major != FW_VERSION_MAJOR) { /* major mismatch - fail */
+ switch (chip_id(adapter)) {
+ case CHELSIO_T4:
+ exp_major = FW_VERSION_MAJOR_T4;
+ exp_minor = FW_VERSION_MINOR_T4;
+ exp_micro = FW_VERSION_MICRO_T4;
+ break;
+ case CHELSIO_T5:
+ exp_major = FW_VERSION_MAJOR_T5;
+ exp_minor = FW_VERSION_MINOR_T5;
+ exp_micro = FW_VERSION_MICRO_T5;
+ break;
+ default:
+ CH_ERR(adapter, "Unsupported chip type, %x\n",
+ chip_id(adapter));
+ return -EINVAL;
+ }
+
+ if (major != exp_major) { /* major mismatch - fail */
CH_ERR(adapter, "card FW has major version %u, driver wants "
- "%u\n", major, FW_VERSION_MAJOR);
+ "%u\n", major, exp_major);
return -EINVAL;
}
- if (minor == FW_VERSION_MINOR && micro == FW_VERSION_MICRO)
+ if (minor == exp_minor && micro == exp_micro)
return 0; /* perfect match */
/* Minor/micro version mismatch. Report it but often it's OK. */
@@ -1407,6 +1471,7 @@ out:
void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres)
{
unsigned int i, v;
+ int cim_num_obq = is_t4(adap) ? CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
for (i = 0; i < CIM_NUM_IBQ; i++) {
t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_IBQSELECT |
@@ -1416,7 +1481,7 @@ void t4_read_cimq_cfg(struct adapter *ad
*size++ = G_CIMQSIZE(v) * 256; /* value is in 256-byte units */
*thres++ = G_QUEFULLTHRSH(v) * 8; /* 8-byte unit */
}
- for (i = 0; i < CIM_NUM_OBQ; i++) {
+ for (i = 0; i < cim_num_obq; i++) {
t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
V_QUENUMSELECT(i));
v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
@@ -1452,8 +1517,12 @@ int t4_read_cim_ibq(struct adapter *adap
for (i = 0; i < n; i++, addr++) {
t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, V_IBQDBGADDR(addr) |
F_IBQDBGEN);
+ /*
+ * It might take 3-10ms before the IBQ debug read access is
+ * allowed. Wait for 1 Sec with a delay of 1 usec.
+ */
err = t4_wait_op_done(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGBUSY, 0,
- 2, 1);
+ 1000000, 1);
if (err)
return err;
*data++ = t4_read_reg(adap, A_CIM_IBQ_DBG_DATA);
@@ -1477,8 +1546,9 @@ int t4_read_cim_obq(struct adapter *adap
{
int i, err;
unsigned int addr, v, nwords;
+ int cim_num_obq = is_t4(adap) ? CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
- if (qid > 5 || (n & 3))
+ if (qid >= cim_num_obq || (n & 3))
return -EINVAL;
t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
@@ -1933,6 +2003,47 @@ static void pcie_intr_handler(struct ada
{ 0 }
};
+ static struct intr_info t5_pcie_intr_info[] = {
+ { F_MSTGRPPERR, "Master Response Read Queue parity error",
+ -1, 1 },
+ { F_MSTTIMEOUTPERR, "Master Timeout FIFO parity error", -1, 1 },
+ { F_MSIXSTIPERR, "MSI-X STI SRAM parity error", -1, 1 },
+ { F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
+ { F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
+ { F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
+ { F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
+ { F_PIOCPLGRPPERR, "PCI PIO completion Group FIFO parity error",
+ -1, 1 },
+ { F_PIOREQGRPPERR, "PCI PIO request Group FIFO parity error",
+ -1, 1 },
+ { F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
+ { F_MSTTAGQPERR, "PCI master tag queue parity error", -1, 1 },
+ { F_CREQPERR, "PCI CMD channel request parity error", -1, 1 },
+ { F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
+ { F_DREQWRPERR, "PCI DMA channel write request parity error",
+ -1, 1 },
+ { F_DREQPERR, "PCI DMA channel request parity error", -1, 1 },
+ { F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
+ { F_HREQWRPERR, "PCI HMA channel count parity error", -1, 1 },
+ { F_HREQPERR, "PCI HMA channel request parity error", -1, 1 },
+ { F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
+ { F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
+ { F_FIDPERR, "PCI FID parity error", -1, 1 },
+ { F_VFIDPERR, "PCI INTx clear parity error", -1, 1 },
+ { F_MAGRPPERR, "PCI MA group FIFO parity error", -1, 1 },
+ { F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
+ { F_IPRXHDRGRPPERR, "PCI IP Rx header group parity error",
+ -1, 1 },
+ { F_IPRXDATAGRPPERR, "PCI IP Rx data group parity error",
+ -1, 1 },
+ { F_RPLPERR, "PCI IP replay buffer parity error", -1, 1 },
+ { F_IPSOTPERR, "PCI IP SOT buffer parity error", -1, 1 },
+ { F_TRGT1GRPPERR, "PCI TRGT1 group FIFOs parity error", -1, 1 },
+ { F_READRSPERR, "Outbound read error", -1,
+ 0 },
+ { 0 }
+ };
+
int fat;
fat = t4_handle_intr_status(adapter,
@@ -1941,7 +2052,9 @@ static void pcie_intr_handler(struct ada
t4_handle_intr_status(adapter,
A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
pcie_port_intr_info) +
- t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE, pcie_intr_info);
+ t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE,
+ is_t4(adapter) ?
+ pcie_intr_info : t5_pcie_intr_info);
if (fat)
t4_fatal_err(adapter);
}
@@ -2368,9 +2481,15 @@ static void ncsi_intr_handler(struct ada
*/
static void xgmac_intr_handler(struct adapter *adap, int port)
{
- u32 v = t4_read_reg(adap, PORT_REG(port, A_XGMAC_PORT_INT_CAUSE));
+ u32 v, int_cause_reg;
- v &= F_TXFIFO_PRTY_ERR | F_RXFIFO_PRTY_ERR;
+ if (is_t4(adap))
+ int_cause_reg = PORT_REG(port, A_XGMAC_PORT_INT_CAUSE);
+ else
+ int_cause_reg = T5_PORT_REG(port, A_MAC_PORT_INT_CAUSE);
+
+ v = t4_read_reg(adap, int_cause_reg);
+ v &= (F_TXFIFO_PRTY_ERR | F_RXFIFO_PRTY_ERR);
if (!v)
return;
@@ -2378,7 +2497,7 @@ static void xgmac_intr_handler(struct ad
CH_ALERT(adap, "XGMAC %d Tx FIFO parity error\n", port);
if (v & F_RXFIFO_PRTY_ERR)
CH_ALERT(adap, "XGMAC %d Rx FIFO parity error\n", port);
- t4_write_reg(adap, PORT_REG(port, A_XGMAC_PORT_INT_CAUSE), v);
+ t4_write_reg(adap, int_cause_reg, v);
t4_fatal_err(adap);
}
@@ -3531,7 +3650,10 @@ int t4_set_trace_filter(struct adapter *
V_TFMINPKTSIZE(tp->min_len));
t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst,
V_TFOFFSET(tp->skip_ofst) | V_TFLENGTH(tp->skip_len) |
- V_TFPORT(tp->port) | F_TFEN | V_TFINVERTMATCH(tp->invert));
+ is_t4(adap) ?
+ V_TFPORT(tp->port) | F_TFEN | V_TFINVERTMATCH(tp->invert) :
+ V_T5_TFPORT(tp->port) | F_T5_TFEN |
+ V_T5_TFINVERTMATCH(tp->invert));
return 0;
}
@@ -3555,13 +3677,18 @@ void t4_get_trace_filter(struct adapter
ctla = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst);
ctlb = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst);
- *enabled = !!(ctla & F_TFEN);
+ if (is_t4(adap)) {
+ *enabled = !!(ctla & F_TFEN);
+ tp->port = G_TFPORT(ctla);
+ } else {
+ *enabled = !!(ctla & F_T5_TFEN);
+ tp->port = G_T5_TFPORT(ctla);
+ }
tp->snap_len = G_TFCAPTUREMAX(ctlb);
tp->min_len = G_TFMINPKTSIZE(ctlb);
tp->skip_ofst = G_TFOFFSET(ctla);
tp->skip_len = G_TFLENGTH(ctla);
tp->invert = !!(ctla & F_TFINVERTMATCH);
- tp->port = G_TFPORT(ctla);
ofst = (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH) * idx;
data_reg = A_MPS_TRC_FILTER0_MATCH + ofst;
@@ -3584,11 +3711,19 @@ void t4_get_trace_filter(struct adapter
void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
{
int i;
+ u32 data[2];
for (i = 0; i < PM_NSTATS; i++) {
t4_write_reg(adap, A_PM_TX_STAT_CONFIG, i + 1);
cnt[i] = t4_read_reg(adap, A_PM_TX_STAT_COUNT);
- cycles[i] = t4_read_reg64(adap, A_PM_TX_STAT_LSB);
+ if (is_t4(adap))
+ cycles[i] = t4_read_reg64(adap, A_PM_TX_STAT_LSB);
+ else {
+ t4_read_indirect(adap, A_PM_TX_DBG_CTRL,
+ A_PM_TX_DBG_DATA, data, 2,
+ A_PM_TX_DBG_STAT_MSB);
+ cycles[i] = (((u64)data[0] << 32) | data[1]);
+ }
}
}
@@ -3603,11 +3738,19 @@ void t4_pmtx_get_stats(struct adapter *a
void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
{
int i;
+ u32 data[2];
for (i = 0; i < PM_NSTATS; i++) {
t4_write_reg(adap, A_PM_RX_STAT_CONFIG, i + 1);
cnt[i] = t4_read_reg(adap, A_PM_RX_STAT_COUNT);
- cycles[i] = t4_read_reg64(adap, A_PM_RX_STAT_LSB);
+ if (is_t4(adap))
+ cycles[i] = t4_read_reg64(adap, A_PM_RX_STAT_LSB);
+ else {
+ t4_read_indirect(adap, A_PM_RX_DBG_CTRL,
+ A_PM_RX_DBG_DATA, data, 2,
+ A_PM_RX_DBG_STAT_MSB);
+ cycles[i] = (((u64)data[0] << 32) | data[1]);
+ }
}
}
@@ -3666,7 +3809,9 @@ void t4_get_port_stats(struct adapter *a
u32 bgmap = get_mps_bg_map(adap, idx);
#define GET_STAT(name) \
- t4_read_reg64(adap, PORT_REG(idx, A_MPS_PORT_STAT_##name##_L))
+ t4_read_reg64(adap, \
+ (is_t4(adap) ? PORT_REG(idx, A_MPS_PORT_STAT_##name##_L) : \
+ T5_PORT_REG(idx, A_MPS_PORT_STAT_##name##_L)))
#define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
p->tx_pause = GET_STAT(TX_PORT_PAUSE);
@@ -3745,13 +3890,19 @@ void t4_clr_port_stats(struct adapter *a
{
unsigned int i;
u32 bgmap = get_mps_bg_map(adap, idx);
+ u32 port_base_addr;
+
+ if (is_t4(adap))
+ port_base_addr = PORT_BASE(idx);
+ else
+ port_base_addr = T5_PORT_BASE(idx);
for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L;
- i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8)
- t4_write_reg(adap, PORT_REG(idx, i), 0);
+ i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8)
+ t4_write_reg(adap, port_base_addr + i, 0);
for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L;
- i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8)
- t4_write_reg(adap, PORT_REG(idx, i), 0);
+ i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8)
+ t4_write_reg(adap, port_base_addr + i, 0);
for (i = 0; i < 4; i++)
if (bgmap & (1 << i)) {
t4_write_reg(adap,
@@ -3774,7 +3925,10 @@ void t4_get_lb_stats(struct adapter *ada
u32 bgmap = get_mps_bg_map(adap, idx);
#define GET_STAT(name) \
- t4_read_reg64(adap, PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L))
+ t4_read_reg64(adap, \
+ (is_t4(adap) ? \
+ PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L) : \
+ T5_PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L)))
#define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
p->octets = GET_STAT(BYTES);
@@ -3791,8 +3945,7 @@ void t4_get_lb_stats(struct adapter *ada
p->frames_512_1023 = GET_STAT(512B_1023B);
p->frames_1024_1518 = GET_STAT(1024B_1518B);
p->frames_1519_max = GET_STAT(1519B_MAX);
- p->drop = t4_read_reg(adap, PORT_REG(idx,
- A_MPS_PORT_STAT_LB_PORT_DROP_FRAMES));
+ p->drop = GET_STAT(DROP_FRAMES);
p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
@@ -3818,14 +3971,26 @@ void t4_get_lb_stats(struct adapter *ada
void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
const u8 *addr)
{
+ u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg;
+
+ if (is_t4(adap)) {
+ mag_id_reg_l = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_LO);
+ mag_id_reg_h = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_HI);
+ port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2);
+ } else {
+ mag_id_reg_l = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_LO);
+ mag_id_reg_h = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_HI);
+ port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2);
+ }
+
if (addr) {
- t4_write_reg(adap, PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_LO),
+ t4_write_reg(adap, mag_id_reg_l,
(addr[2] << 24) | (addr[3] << 16) |
(addr[4] << 8) | addr[5]);
- t4_write_reg(adap, PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_HI),
+ t4_write_reg(adap, mag_id_reg_h,
(addr[0] << 8) | addr[1]);
}
- t4_set_reg_field(adap, PORT_REG(port, A_XGMAC_PORT_CFG2), F_MAGICEN,
+ t4_set_reg_field(adap, port_cfg_reg, F_MAGICEN,
V_MAGICEN(addr != NULL));
}
@@ -3848,16 +4013,23 @@ int t4_wol_pat_enable(struct adapter *ad
u64 mask0, u64 mask1, unsigned int crc, bool enable)
{
int i;
+ u32 port_cfg_reg;
+
+ if (is_t4(adap))
+ port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2);
+ else
+ port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2);
if (!enable) {
- t4_set_reg_field(adap, PORT_REG(port, A_XGMAC_PORT_CFG2),
- F_PATEN, 0);
+ t4_set_reg_field(adap, port_cfg_reg, F_PATEN, 0);
return 0;
}
if (map > 0xff)
return -EINVAL;
-#define EPIO_REG(name) PORT_REG(port, A_XGMAC_PORT_EPIO_##name)
+#define EPIO_REG(name) \
+ (is_t4(adap) ? PORT_REG(port, A_XGMAC_PORT_EPIO_##name) : \
+ T5_PORT_REG(port, A_MAC_PORT_EPIO_##name))
t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
t4_write_reg(adap, EPIO_REG(DATA2), mask1);
@@ -3883,7 +4055,7 @@ int t4_wol_pat_enable(struct adapter *ad
}
#undef EPIO_REG
- t4_set_reg_field(adap, PORT_REG(port, A_XGMAC_PORT_CFG2), 0, F_PATEN);
+ t4_set_reg_field(adap, port_cfg_reg, 0, F_PATEN);
return 0;
}
@@ -4763,9 +4935,12 @@ int t4_alloc_mac_filt(struct adapter *ad
int offset, ret = 0;
struct fw_vi_mac_cmd c;
unsigned int nfilters = 0;
+ unsigned int max_naddr = is_t4(adap) ?
+ NUM_MPS_CLS_SRAM_L_INSTANCES :
+ NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
unsigned int rem = naddr;
- if (naddr > NUM_MPS_CLS_SRAM_L_INSTANCES)
+ if (naddr > max_naddr)
return -EINVAL;
for (offset = 0; offset < naddr ; /**/) {
@@ -4806,10 +4981,10 @@ int t4_alloc_mac_filt(struct adapter *ad
u16 index = G_FW_VI_MAC_CMD_IDX(ntohs(p->valid_to_idx));
if (idx)
- idx[offset+i] = (index >= NUM_MPS_CLS_SRAM_L_INSTANCES
+ idx[offset+i] = (index >= max_naddr
? 0xffff
: index);
- if (index < NUM_MPS_CLS_SRAM_L_INSTANCES)
+ if (index < max_naddr)
nfilters++;
else if (hash)
*hash |= (1ULL << hash_mac_addr(addr[offset+i]));
@@ -4853,6 +5028,9 @@ int t4_change_mac(struct adapter *adap,
int ret, mode;
struct fw_vi_mac_cmd c;
struct fw_vi_mac_exact *p = c.u.exact;
+ unsigned int max_mac_addr = is_t4(adap) ?
+ NUM_MPS_CLS_SRAM_L_INSTANCES :
+ NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
if (idx < 0) /* new allocation */
idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
@@ -4867,10 +5045,10 @@ int t4_change_mac(struct adapter *adap,
V_FW_VI_MAC_CMD_IDX(idx));
memcpy(p->macaddr, addr, sizeof(p->macaddr));
- ret = t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), &c);
+ ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
if (ret == 0) {
ret = G_FW_VI_MAC_CMD_IDX(ntohs(p->valid_to_idx));
- if (ret >= NUM_MPS_CLS_SRAM_L_INSTANCES)
+ if (ret >= max_mac_addr)
ret = -ENOMEM;
}
return ret;
@@ -5188,21 +5366,6 @@ static void __devinit init_link_config(s
}
}
-static int __devinit wait_dev_ready(struct adapter *adap)
-{
- u32 whoami;
-
- whoami = t4_read_reg(adap, A_PL_WHOAMI);
-
- if (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS)
- return 0;
-
- msleep(500);
- whoami = t4_read_reg(adap, A_PL_WHOAMI);
- return (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS
- ? 0 : -EIO);
-}
-
static int __devinit get_flash_params(struct adapter *adapter)
{
int ret;
@@ -5255,21 +5418,26 @@ static void __devinit set_pcie_completio
int __devinit t4_prep_adapter(struct adapter *adapter)
{
int ret;
-
- ret = wait_dev_ready(adapter);
- if (ret < 0)
- return ret;
+ uint16_t device_id;
+ uint32_t pl_rev;
get_pci_mode(adapter, &adapter->params.pci);
- adapter->params.rev = t4_read_reg(adapter, A_PL_REV);
- /* T4A1 chip is no longer supported */
- if (adapter->params.rev == 1) {
- CH_ALERT(adapter, "T4 rev 1 chip is no longer supported\n");
- return -EINVAL;
+ pl_rev = t4_read_reg(adapter, A_PL_REV);
+ adapter->params.chipid = G_CHIPID(pl_rev);
+ adapter->params.rev = G_REV(pl_rev);
+ if (adapter->params.chipid == 0) {
+ /* T4 did not have chipid in PL_REV (T5 onwards do) */
+ adapter->params.chipid = CHELSIO_T4;
+
+ /* T4A1 chip is not supported */
+ if (adapter->params.rev == 1) {
+ CH_ALERT(adapter, "T4 rev 1 chip is not supported.\n");
+ return -EINVAL;
+ }
}
adapter->params.pci.vpd_cap_addr =
- t4_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
+ t4_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
ret = get_flash_params(adapter);
if (ret < 0)
@@ -5279,12 +5447,14 @@ int __devinit t4_prep_adapter(struct ada
if (ret < 0)
return ret;
- if (t4_read_reg(adapter, A_PCIE_REVISION) != 0) {
+ /* Cards with real ASICs have the chipid in the PCIe device id */
+ t4_os_pci_read_cfg2(adapter, PCI_DEVICE_ID, &device_id);
+ if (device_id >> 12 == adapter->params.chipid)
+ adapter->params.cim_la_size = CIMLA_SIZE;
+ else {
/* FPGA */
+ adapter->params.fpga = 1;
adapter->params.cim_la_size = 2 * CIMLA_SIZE;
- } else {
- /* ASIC */
- adapter->params.cim_la_size = CIMLA_SIZE;
}
init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
Modified: head/sys/dev/cxgbe/common/t4_hw.h
==============================================================================
--- head/sys/dev/cxgbe/common/t4_hw.h Sat Mar 30 00:33:46 2013 (r248924)
+++ head/sys/dev/cxgbe/common/t4_hw.h Sat Mar 30 02:26:20 2013 (r248925)
@@ -33,27 +33,32 @@
#include "osdep.h"
enum {
- NCHAN = 4, /* # of HW channels */
- MAX_MTU = 9600, /* max MAC MTU, excluding header + FCS */
- EEPROMSIZE = 17408, /* Serial EEPROM physical size */
- EEPROMVSIZE = 32768, /* Serial EEPROM virtual address space size */
- EEPROMPFSIZE = 1024, /* EEPROM writable area size for PFn, n>0 */
- RSS_NENTRIES = 2048, /* # of entries in RSS mapping table */
- TCB_SIZE = 128, /* TCB size */
- NMTUS = 16, /* size of MTU table */
- NCCTRL_WIN = 32, /* # of congestion control windows */
- NTX_SCHED = 8, /* # of HW Tx scheduling queues */
- PM_NSTATS = 5, /* # of PM stats */
- MBOX_LEN = 64, /* mailbox size in bytes */
- TRACE_LEN = 112, /* length of trace data and mask */
- FILTER_OPT_LEN = 36, /* filter tuple width for optional components */
- NWOL_PAT = 8, /* # of WoL patterns */
- WOL_PAT_LEN = 128, /* length of WoL patterns */
+ NCHAN = 4, /* # of HW channels */
+ MAX_MTU = 9600, /* max MAC MTU, excluding header + FCS */
+ EEPROMSIZE = 17408, /* Serial EEPROM physical size */
+ EEPROMVSIZE = 32768, /* Serial EEPROM virtual address space size */
+ EEPROMPFSIZE = 1024, /* EEPROM writable area size for PFn, n>0 */
+ RSS_NENTRIES = 2048, /* # of entries in RSS mapping table */
+ TCB_SIZE = 128, /* TCB size */
+ NMTUS = 16, /* size of MTU table */
+ NCCTRL_WIN = 32, /* # of congestion control windows */
+ NTX_SCHED = 8, /* # of HW Tx scheduling queues */
+ PM_NSTATS = 5, /* # of PM stats */
+ MBOX_LEN = 64, /* mailbox size in bytes */
+ TRACE_LEN = 112, /* length of trace data and mask */
+ FILTER_OPT_LEN = 36, /* filter tuple width of optional components */
+ NWOL_PAT = 8, /* # of WoL patterns */
+ WOL_PAT_LEN = 128, /* length of WoL patterns */
+ UDBS_SEG_SIZE = 128, /* Segment size of BAR2 doorbells */
+ UDBS_SEG_SHIFT = 7, /* log2(UDBS_SEG_SIZE) */
+ UDBS_DB_OFFSET = 8, /* offset of the 4B doorbell in a segment */
+ UDBS_WR_OFFSET = 64, /* offset of the work request in a segment */
};
enum {
CIM_NUM_IBQ = 6, /* # of CIM IBQs */
CIM_NUM_OBQ = 6, /* # of CIM OBQs */
+ CIM_NUM_OBQ_T5 = 8, /* # of CIM OBQs for T5 adapter */
CIMLA_SIZE = 2048, /* # of 32-bit words in CIM LA */
CIM_PIFLA_SIZE = 64, /* # of 192-bit words in CIM PIF LA */
CIM_MALA_SIZE = 64, /* # of 160-bit words in CIM MA LA */
@@ -80,6 +85,7 @@ enum {
SGE_CTXT_SIZE = 24, /* size of SGE context */
SGE_NTIMERS = 6, /* # of interrupt holdoff timer values */
SGE_NCOUNTERS = 4, /* # of interrupt packet counter values */
+ SGE_MAX_IQ_SIZE = 65520,
};
struct sge_qstat { /* data written to SGE queue status entries */
@@ -221,7 +227,7 @@ enum {
* Location of firmware image in FLASH.
*/
FLASH_FW_START_SEC = 8,
- FLASH_FW_NSECS = 8,
+ FLASH_FW_NSECS = 16,
FLASH_FW_START = FLASH_START(FLASH_FW_START_SEC),
FLASH_FW_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FW_NSECS),
Modified: head/sys/dev/cxgbe/common/t4_msg.h
==============================================================================
--- head/sys/dev/cxgbe/common/t4_msg.h Sat Mar 30 00:33:46 2013 (r248924)
+++ head/sys/dev/cxgbe/common/t4_msg.h Sat Mar 30 02:26:20 2013 (r248925)
@@ -104,6 +104,7 @@ enum {
CPL_RX_ISCSI_DDP = 0x49,
CPL_RX_FCOE_DIF = 0x4A,
CPL_RX_DATA_DIF = 0x4B,
+ CPL_ERR_NOTIFY = 0x4D,
CPL_RDMA_READ_REQ = 0x60,
CPL_RX_ISCSI_DIF = 0x60,
@@ -125,6 +126,7 @@ enum {
CPL_RDMA_IMM_DATA_SE = 0xAD,
CPL_TRACE_PKT = 0xB0,
+ CPL_TRACE_PKT_T5 = 0x48,
CPL_RX2TX_DATA = 0xB1,
CPL_ISCSI_DATA = 0xB2,
CPL_FCOE_DATA = 0xB3,
@@ -478,6 +480,11 @@ struct work_request_hdr {
#define V_CONN_POLICY(x) ((x) << S_CONN_POLICY)
#define G_CONN_POLICY(x) (((x) >> S_CONN_POLICY) & M_CONN_POLICY)
+#define S_FILT_INFO 28
+#define M_FILT_INFO 0xfffffffffULL
+#define V_FILT_INFO(x) ((x) << S_FILT_INFO)
+#define G_FILT_INFO(x) (((x) >> S_FILT_INFO) & M_FILT_INFO)
+
/* option 2 fields */
#define S_RSS_QUEUE 0
#define M_RSS_QUEUE 0x3FF
@@ -552,6 +559,10 @@ struct work_request_hdr {
#define V_SACK_EN(x) ((x) << S_SACK_EN)
#define F_SACK_EN V_SACK_EN(1U)
+#define S_T5_OPT_2_VALID 31
+#define V_T5_OPT_2_VALID(x) ((x) << S_T5_OPT_2_VALID)
+#define F_T5_OPT_2_VALID V_T5_OPT_2_VALID(1U)
+
struct cpl_pass_open_req {
WR_HDR;
union opcode_tid ot;
@@ -679,6 +690,10 @@ struct cpl_act_open_req {
__be32 opt2;
};
+#define S_FILTER_TUPLE 24
+#define M_FILTER_TUPLE 0xFFFFFFFFFF
+#define V_FILTER_TUPLE(x) ((x) << S_FILTER_TUPLE)
+#define G_FILTER_TUPLE(x) (((x) >> S_FILTER_TUPLE) & M_FILTER_TUPLE)
struct cpl_t5_act_open_req {
WR_HDR;
union opcode_tid ot;
@@ -1053,6 +1068,12 @@ struct cpl_tx_pkt {
#define V_TXPKT_OVLAN_IDX(x) ((x) << S_TXPKT_OVLAN_IDX)
#define G_TXPKT_OVLAN_IDX(x) (((x) >> S_TXPKT_OVLAN_IDX) & M_TXPKT_OVLAN_IDX)
+#define S_TXPKT_T5_OVLAN_IDX 12
+#define M_TXPKT_T5_OVLAN_IDX 0x7
+#define V_TXPKT_T5_OVLAN_IDX(x) ((x) << S_TXPKT_T5_OVLAN_IDX)
+#define G_TXPKT_T5_OVLAN_IDX(x) (((x) >> S_TXPKT_T5_OVLAN_IDX) & \
+ M_TXPKT_T5_OVLAN_IDX)
+
#define S_TXPKT_INTF 16
#define M_TXPKT_INTF 0xF
#define V_TXPKT_INTF(x) ((x) << S_TXPKT_INTF)
@@ -1062,10 +1083,18 @@ struct cpl_tx_pkt {
#define V_TXPKT_SPECIAL_STAT(x) ((x) << S_TXPKT_SPECIAL_STAT)
#define F_TXPKT_SPECIAL_STAT V_TXPKT_SPECIAL_STAT(1U)
+#define S_TXPKT_T5_FCS_DIS 21
+#define V_TXPKT_T5_FCS_DIS(x) ((x) << S_TXPKT_T5_FCS_DIS)
+#define F_TXPKT_T5_FCS_DIS V_TXPKT_T5_FCS_DIS(1U)
+
#define S_TXPKT_INS_OVLAN 21
*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***
More information about the svn-src-head
mailing list