svn commit: r296050 - stable/9/sys/dev/bxe
David C Somayajulu
davidcs at FreeBSD.org
Thu Feb 25 18:28:58 UTC 2016
Author: davidcs
Date: Thu Feb 25 18:28:56 2016
New Revision: 296050
URL: https://svnweb.freebsd.org/changeset/base/296050
Log:
MFC r295830
Remove dead code. Code Cleanup. Improve clarity in debug messages
Modified:
stable/9/sys/dev/bxe/bxe.c
stable/9/sys/dev/bxe/bxe.h
stable/9/sys/dev/bxe/bxe_stats.c
stable/9/sys/dev/bxe/ecore_init.h
Directory Properties:
stable/9/ (props changed)
stable/9/sys/ (props changed)
stable/9/sys/dev/ (props changed)
Modified: stable/9/sys/dev/bxe/bxe.c
==============================================================================
--- stable/9/sys/dev/bxe/bxe.c Thu Feb 25 18:27:22 2016 (r296049)
+++ stable/9/sys/dev/bxe/bxe.c Thu Feb 25 18:28:56 2016 (r296050)
@@ -124,14 +124,6 @@ static struct bxe_device_type bxe_devs[]
PCI_ANY_ID, PCI_ANY_ID,
"QLogic NetXtreme II BCM57712 MF 10GbE"
},
-#if 0
- {
- BRCM_VENDORID,
- CHIP_NUM_57712_VF,
- PCI_ANY_ID, PCI_ANY_ID,
- "QLogic NetXtreme II BCM57712 VF 10GbE"
- },
-#endif
{
BRCM_VENDORID,
CHIP_NUM_57800,
@@ -144,14 +136,6 @@ static struct bxe_device_type bxe_devs[]
PCI_ANY_ID, PCI_ANY_ID,
"QLogic NetXtreme II BCM57800 MF 10GbE"
},
-#if 0
- {
- BRCM_VENDORID,
- CHIP_NUM_57800_VF,
- PCI_ANY_ID, PCI_ANY_ID,
- "QLogic NetXtreme II BCM57800 VF 10GbE"
- },
-#endif
{
BRCM_VENDORID,
CHIP_NUM_57810,
@@ -164,14 +148,6 @@ static struct bxe_device_type bxe_devs[]
PCI_ANY_ID, PCI_ANY_ID,
"QLogic NetXtreme II BCM57810 MF 10GbE"
},
-#if 0
- {
- BRCM_VENDORID,
- CHIP_NUM_57810_VF,
- PCI_ANY_ID, PCI_ANY_ID,
- "QLogic NetXtreme II BCM57810 VF 10GbE"
- },
-#endif
{
BRCM_VENDORID,
CHIP_NUM_57811,
@@ -184,42 +160,18 @@ static struct bxe_device_type bxe_devs[]
PCI_ANY_ID, PCI_ANY_ID,
"QLogic NetXtreme II BCM57811 MF 10GbE"
},
-#if 0
- {
- BRCM_VENDORID,
- CHIP_NUM_57811_VF,
- PCI_ANY_ID, PCI_ANY_ID,
- "QLogic NetXtreme II BCM57811 VF 10GbE"
- },
-#endif
{
BRCM_VENDORID,
CHIP_NUM_57840_4_10,
PCI_ANY_ID, PCI_ANY_ID,
"QLogic NetXtreme II BCM57840 4x10GbE"
},
-#if 0
- {
- BRCM_VENDORID,
- CHIP_NUM_57840_2_20,
- PCI_ANY_ID, PCI_ANY_ID,
- "QLogic NetXtreme II BCM57840 2x20GbE"
- },
-#endif
{
BRCM_VENDORID,
CHIP_NUM_57840_MF,
PCI_ANY_ID, PCI_ANY_ID,
"QLogic NetXtreme II BCM57840 MF 10GbE"
},
-#if 0
- {
- BRCM_VENDORID,
- CHIP_NUM_57840_VF,
- PCI_ANY_ID, PCI_ANY_ID,
- "QLogic NetXtreme II BCM57840 VF 10GbE"
- },
-#endif
{
0, 0, 0, 0, NULL
}
@@ -245,10 +197,6 @@ static device_method_t bxe_methods[] = {
DEVMETHOD(device_attach, bxe_attach),
DEVMETHOD(device_detach, bxe_detach),
DEVMETHOD(device_shutdown, bxe_shutdown),
-#if 0
- DEVMETHOD(device_suspend, bxe_suspend),
- DEVMETHOD(device_resume, bxe_resume),
-#endif
/* Bus interface (bus_if.h) */
DEVMETHOD(bus_print_child, bus_generic_print_child),
DEVMETHOD(bus_driver_added, bus_generic_driver_added),
@@ -469,12 +417,6 @@ static const struct {
8, STATS_FLAGS_FUNC, "tpa_aggregated_frames"},
{ STATS_OFFSET32(total_tpa_bytes_hi),
8, STATS_FLAGS_FUNC, "tpa_bytes"},
-#if 0
- { STATS_OFFSET32(recoverable_error),
- 4, STATS_FLAGS_FUNC, "recoverable_errors" },
- { STATS_OFFSET32(unrecoverable_error),
- 4, STATS_FLAGS_FUNC, "unrecoverable_errors" },
-#endif
{ STATS_OFFSET32(eee_tx_lpi),
4, STATS_FLAGS_PORT, "eee_tx_lpi"},
{ STATS_OFFSET32(rx_calls),
@@ -527,12 +469,6 @@ static const struct {
4, STATS_FLAGS_FUNC, "tx_window_violation_std"},
{ STATS_OFFSET32(tx_window_violation_tso),
4, STATS_FLAGS_FUNC, "tx_window_violation_tso"},
-#if 0
- { STATS_OFFSET32(tx_unsupported_tso_request_ipv6),
- 4, STATS_FLAGS_FUNC, "tx_unsupported_tso_request_ipv6"},
- { STATS_OFFSET32(tx_unsupported_tso_request_not_tcp),
- 4, STATS_FLAGS_FUNC, "tx_unsupported_tso_request_not_tcp"},
-#endif
{ STATS_OFFSET32(tx_chain_lost_mbuf),
4, STATS_FLAGS_FUNC, "tx_chain_lost_mbuf"},
{ STATS_OFFSET32(tx_frames_deferred),
@@ -644,12 +580,6 @@ static const struct {
4, "tx_window_violation_std"},
{ Q_STATS_OFFSET32(tx_window_violation_tso),
4, "tx_window_violation_tso"},
-#if 0
- { Q_STATS_OFFSET32(tx_unsupported_tso_request_ipv6),
- 4, "tx_unsupported_tso_request_ipv6"},
- { Q_STATS_OFFSET32(tx_unsupported_tso_request_not_tcp),
- 4, "tx_unsupported_tso_request_not_tcp"},
-#endif
{ Q_STATS_OFFSET32(tx_chain_lost_mbuf),
4, "tx_chain_lost_mbuf"},
{ Q_STATS_OFFSET32(tx_frames_deferred),
@@ -917,12 +847,6 @@ bxe_dma_map_addr(void *arg, bus_dma_segm
} else {
dma->paddr = segs->ds_addr;
dma->nseg = nseg;
-#if 0
- BLOGD(dma->sc, DBG_LOAD,
- "DMA alloc '%s': vaddr=%p paddr=%p nseg=%d size=%lu\n",
- dma->msg, dma->vaddr, (void *)dma->paddr,
- dma->nseg, dma->size);
-#endif
}
}
@@ -1007,13 +931,6 @@ bxe_dma_free(struct bxe_softc *sc,
struct bxe_dma *dma)
{
if (dma->size > 0) {
-#if 0
- BLOGD(sc, DBG_LOAD,
- "DMA free '%s': vaddr=%p paddr=%p nseg=%d size=%lu\n",
- dma->msg, dma->vaddr, (void *)dma->paddr,
- dma->nseg, dma->size);
-#endif
-
DBASSERT(sc, (dma->tag != NULL), ("dma tag is NULL"));
bus_dmamap_sync(dma->tag, dma->map,
@@ -1054,69 +971,6 @@ bxe_reg_rd_ind(struct bxe_softc *sc,
return (val);
}
-#if 0
-void bxe_dp_dmae(struct bxe_softc *sc, struct dmae_command *dmae, int msglvl)
-{
- uint32_t src_type = dmae->opcode & DMAE_COMMAND_SRC;
-
- switch (dmae->opcode & DMAE_COMMAND_DST) {
- case DMAE_CMD_DST_PCI:
- if (src_type == DMAE_CMD_SRC_PCI)
- DP(msglvl, "DMAE: opcode 0x%08x\n"
- "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
- "comp_addr [%x:%08x], comp_val 0x%08x\n",
- dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
- dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
- dmae->comp_addr_hi, dmae->comp_addr_lo,
- dmae->comp_val);
- else
- DP(msglvl, "DMAE: opcode 0x%08x\n"
- "src [%08x], len [%d*4], dst [%x:%08x]\n"
- "comp_addr [%x:%08x], comp_val 0x%08x\n",
- dmae->opcode, dmae->src_addr_lo >> 2,
- dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
- dmae->comp_addr_hi, dmae->comp_addr_lo,
- dmae->comp_val);
- break;
- case DMAE_CMD_DST_GRC:
- if (src_type == DMAE_CMD_SRC_PCI)
- DP(msglvl, "DMAE: opcode 0x%08x\n"
- "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
- "comp_addr [%x:%08x], comp_val 0x%08x\n",
- dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
- dmae->len, dmae->dst_addr_lo >> 2,
- dmae->comp_addr_hi, dmae->comp_addr_lo,
- dmae->comp_val);
- else
- DP(msglvl, "DMAE: opcode 0x%08x\n"
- "src [%08x], len [%d*4], dst [%08x]\n"
- "comp_addr [%x:%08x], comp_val 0x%08x\n",
- dmae->opcode, dmae->src_addr_lo >> 2,
- dmae->len, dmae->dst_addr_lo >> 2,
- dmae->comp_addr_hi, dmae->comp_addr_lo,
- dmae->comp_val);
- break;
- default:
- if (src_type == DMAE_CMD_SRC_PCI)
- DP(msglvl, "DMAE: opcode 0x%08x\n"
- "src_addr [%x:%08x] len [%d * 4] dst_addr [none]\n"
- "comp_addr [%x:%08x] comp_val 0x%08x\n",
- dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
- dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
- dmae->comp_val);
- else
- DP(msglvl, "DMAE: opcode 0x%08x\n"
- "src_addr [%08x] len [%d * 4] dst_addr [none]\n"
- "comp_addr [%x:%08x] comp_val 0x%08x\n",
- dmae->opcode, dmae->src_addr_lo >> 2,
- dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
- dmae->comp_val);
- break;
- }
-
-}
-#endif
-
static int
bxe_acquire_hw_lock(struct bxe_softc *sc,
uint32_t resource)
@@ -1129,7 +983,8 @@ bxe_acquire_hw_lock(struct bxe_softc *sc
/* validate the resource is within range */
if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
- BLOGE(sc, "resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE\n", resource);
+ BLOGE(sc, "(resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE)"
+ " resource_bit 0x%x\n", resource, resource_bit);
return (-1);
}
@@ -1143,8 +998,8 @@ bxe_acquire_hw_lock(struct bxe_softc *sc
/* validate the resource is not already taken */
lock_status = REG_RD(sc, hw_lock_control_reg);
if (lock_status & resource_bit) {
- BLOGE(sc, "resource in use (status 0x%x bit 0x%x)\n",
- lock_status, resource_bit);
+ BLOGE(sc, "resource (0x%x) in use (status 0x%x bit 0x%x)\n",
+ resource, lock_status, resource_bit);
return (-1);
}
@@ -1158,7 +1013,8 @@ bxe_acquire_hw_lock(struct bxe_softc *sc
DELAY(5000);
}
- BLOGE(sc, "Resource lock timeout!\n");
+ BLOGE(sc, "Resource 0x%x resource_bit 0x%x lock timeout!\n",
+ resource, resource_bit);
return (-1);
}
@@ -1173,7 +1029,8 @@ bxe_release_hw_lock(struct bxe_softc *sc
/* validate the resource is within range */
if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
- BLOGE(sc, "resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE\n", resource);
+ BLOGE(sc, "(resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE)"
+ " resource_bit 0x%x\n", resource, resource_bit);
return (-1);
}
@@ -1187,8 +1044,8 @@ bxe_release_hw_lock(struct bxe_softc *sc
/* validate the resource is currently taken */
lock_status = REG_RD(sc, hw_lock_control_reg);
if (!(lock_status & resource_bit)) {
- BLOGE(sc, "resource not in use (status 0x%x bit 0x%x)\n",
- lock_status, resource_bit);
+ BLOGE(sc, "resource (0x%x) not in use (status 0x%x bit 0x%x)\n",
+ resource, lock_status, resource_bit);
return (-1);
}
@@ -1250,7 +1107,9 @@ bxe_acquire_nvram_lock(struct bxe_softc
}
if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
- BLOGE(sc, "Cannot get access to nvram interface\n");
+ BLOGE(sc, "Cannot get access to nvram interface "
+ "port %d val 0x%x (MCPR_NVM_SW_ARB_ARB_ARB1 << port)\n",
+ port, val);
return (-1);
}
@@ -1284,7 +1143,9 @@ bxe_release_nvram_lock(struct bxe_softc
}
if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
- BLOGE(sc, "Cannot free access to nvram interface\n");
+ BLOGE(sc, "Cannot free access to nvram interface "
+ "port %d val 0x%x (MCPR_NVM_SW_ARB_ARB_ARB1 << port)\n",
+ port, val);
return (-1);
}
@@ -1367,7 +1228,9 @@ bxe_nvram_read_dword(struct bxe_softc *s
}
if (rc == -1) {
- BLOGE(sc, "nvram read timeout expired\n");
+ BLOGE(sc, "nvram read timeout expired "
+ "(offset 0x%x cmd_flags 0x%x val 0x%x)\n",
+ offset, cmd_flags, val);
}
return (rc);
@@ -1473,7 +1336,9 @@ bxe_nvram_write_dword(struct bxe_softc *
}
if (rc == -1) {
- BLOGE(sc, "nvram write timeout expired\n");
+ BLOGE(sc, "nvram write timeout expired "
+ "(offset 0x%x cmd_flags 0x%x val 0x%x)\n",
+ offset, cmd_flags, val);
}
return (rc);
@@ -1707,7 +1572,8 @@ bxe_issue_dmae_with_comp(struct bxe_soft
if (!timeout ||
(sc->recovery_state != BXE_RECOVERY_DONE &&
sc->recovery_state != BXE_RECOVERY_NIC_LOADING)) {
- BLOGE(sc, "DMAE timeout!\n");
+ BLOGE(sc, "DMAE timeout! *wb_comp 0x%x recovery_state 0x%x\n",
+ *wb_comp, sc->recovery_state);
BXE_DMAE_UNLOCK(sc);
return (DMAE_TIMEOUT);
}
@@ -1717,7 +1583,8 @@ bxe_issue_dmae_with_comp(struct bxe_soft
}
if (*wb_comp & DMAE_PCI_ERR_FLAG) {
- BLOGE(sc, "DMAE PCI error!\n");
+ BLOGE(sc, "DMAE PCI error! *wb_comp 0x%x recovery_state 0x%x\n",
+ *wb_comp, sc->recovery_state);
BXE_DMAE_UNLOCK(sc);
return (DMAE_PCI_ERROR);
}
@@ -1952,12 +1819,6 @@ elink_cb_event_log(struct bxe_softc
...)
{
/* XXX */
-#if 0
- //va_list ap;
- va_start(ap, elink_log_id);
- _XXX_(sc, lm_log_id, ap);
- va_end(ap);
-#endif
BLOGI(sc, "ELINK EVENT LOG (%d)\n", elink_log_id);
}
@@ -1970,7 +1831,7 @@ bxe_set_spio(struct bxe_softc *sc,
/* Only 2 SPIOs are configurable */
if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) {
- BLOGE(sc, "Invalid SPIO 0x%x\n", spio);
+ BLOGE(sc, "Invalid SPIO 0x%x mode 0x%x\n", spio, mode);
return (-1);
}
@@ -2024,7 +1885,9 @@ bxe_gpio_read(struct bxe_softc *sc,
uint32_t gpio_reg;
if (gpio_num > MISC_REGISTERS_GPIO_3) {
- BLOGE(sc, "Invalid GPIO %d\n", gpio_num);
+ BLOGE(sc, "Invalid GPIO %d port 0x%x gpio_port %d gpio_shift %d"
+ " gpio_mask 0x%x\n", gpio_num, port, gpio_port, gpio_shift,
+ gpio_mask);
return (-1);
}
@@ -2050,7 +1913,9 @@ bxe_gpio_write(struct bxe_softc *sc,
uint32_t gpio_reg;
if (gpio_num > MISC_REGISTERS_GPIO_3) {
- BLOGE(sc, "Invalid GPIO %d\n", gpio_num);
+ BLOGE(sc, "Invalid GPIO %d mode 0x%x port 0x%x gpio_port %d"
+ " gpio_shift %d gpio_mask 0x%x\n",
+ gpio_num, mode, port, gpio_port, gpio_shift, gpio_mask);
return (-1);
}
@@ -2133,7 +1998,8 @@ bxe_gpio_mult_write(struct bxe_softc *sc
break;
default:
- BLOGE(sc, "Invalid GPIO mode assignment %d\n", mode);
+ BLOGE(sc, "Invalid GPIO mode assignment pins 0x%x mode 0x%x"
+ " gpio_reg 0x%x\n", pins, mode, gpio_reg);
bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
return (-1);
}
@@ -2159,7 +2025,9 @@ bxe_gpio_int_write(struct bxe_softc *sc,
uint32_t gpio_reg;
if (gpio_num > MISC_REGISTERS_GPIO_3) {
- BLOGE(sc, "Invalid GPIO %d\n", gpio_num);
+ BLOGE(sc, "Invalid GPIO %d mode 0x%x port 0x%x gpio_port %d"
+ " gpio_shift %d gpio_mask 0x%x\n",
+ gpio_num, mode, port, gpio_port, gpio_shift, gpio_mask);
return (-1);
}
@@ -2578,29 +2446,6 @@ bxe_sp_post(struct bxe_softc *sc,
* @sc: driver hanlde
* @p: pointer to rss configuration
*/
-#if 0
-static void
-bxe_debug_print_ind_table(struct bxe_softc *sc,
- struct ecore_config_rss_params *p)
-{
- int i;
-
- BLOGD(sc, DBG_LOAD, "Setting indirection table to:\n");
- BLOGD(sc, DBG_LOAD, " 0x0000: ");
- for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
- BLOGD(sc, DBG_LOAD, "0x%02x ", p->ind_table[i]);
-
- /* Print 4 bytes in a line */
- if ((i + 1 < T_ETH_INDIRECTION_TABLE_SIZE) &&
- (((i + 1) & 0x3) == 0)) {
- BLOGD(sc, DBG_LOAD, "\n");
- BLOGD(sc, DBG_LOAD, "0x%04x: ", i + 1);
- }
- }
-
- BLOGD(sc, DBG_LOAD, "\n");
-}
-#endif
/*
* FreeBSD Device probe function.
@@ -2775,13 +2620,6 @@ bxe_tx_avail(struct bxe_softc *sc,
used = SUB_S16(prod, cons);
-#if 0
- KASSERT((used < 0), ("used tx bds < 0"));
- KASSERT((used > sc->tx_ring_size), ("used tx bds > tx_ring_size"));
- KASSERT(((sc->tx_ring_size - used) > MAX_TX_AVAIL),
- ("invalid number of tx bds used"));
-#endif
-
return (int16_t)(sc->tx_ring_size) - used;
}
@@ -2827,16 +2665,6 @@ bxe_sp_event(struct bxe_softc *sc,
BLOGD(sc, DBG_SP, "fp=%d cid=%d got ramrod #%d state is %x type is %d\n",
fp->index, cid, command, sc->state, rr_cqe->ramrod_cqe.ramrod_type);
-#if 0
- /*
- * If cid is within VF range, replace the slowpath object with the
- * one corresponding to this VF
- */
- if ((cid >= BXE_FIRST_VF_CID) && (cid < BXE_FIRST_VF_CID + BXE_VF_CIDS)) {
- bxe_iov_set_queue_sp_obj(sc, cid, &q_obj);
- }
-#endif
-
switch (command) {
case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE):
BLOGD(sc, DBG_SP, "got UPDATE ramrod. CID %d\n", cid);
@@ -2888,34 +2716,10 @@ bxe_sp_event(struct bxe_softc *sc,
return;
}
-#if 0
- /* SRIOV: reschedule any 'in_progress' operations */
- bxe_iov_sp_event(sc, cid, TRUE);
-#endif
-
atomic_add_acq_long(&sc->cq_spq_left, 1);
BLOGD(sc, DBG_SP, "sc->cq_spq_left 0x%lx\n",
atomic_load_acq_long(&sc->cq_spq_left));
-
-#if 0
- if ((drv_cmd == ECORE_Q_CMD_UPDATE) && (IS_FCOE_FP(fp)) &&
- (!!bxe_test_bit(ECORE_AFEX_FCOE_Q_UPDATE_PENDING, &sc->sp_state))) {
- /*
- * If Queue update ramrod is completed for last Queue in AFEX VIF set
- * flow, then ACK MCP at the end. Mark pending ACK to MCP bit to
- * prevent case that both bits are cleared. At the end of load/unload
- * driver checks that sp_state is cleared and this order prevents
- * races.
- */
- bxe_set_bit(ECORE_AFEX_PENDING_VIFSET_MCP_ACK, &sc->sp_state);
- wmb();
- bxe_clear_bit(ECORE_AFEX_FCOE_Q_UPDATE_PENDING, &sc->sp_state);
-
- /* schedule the sp task as MCP ack is required */
- bxe_schedule_sp_task(sc);
- }
-#endif
}
/*
@@ -2956,8 +2760,15 @@ bxe_tpa_start(struct bxe_softc
tmp_bd = tpa_info->bd;
if (tmp_bd.m == NULL) {
- BLOGE(sc, "fp[%02d].tpa[%02d] mbuf not allocated!\n",
- fp->index, queue);
+ uint32_t *tmp;
+
+ tmp = (uint32_t *)cqe;
+
+ BLOGE(sc, "fp[%02d].tpa[%02d] cons[%d] prod[%d]mbuf not allocated!\n",
+ fp->index, queue, cons, prod);
+ BLOGE(sc, "cqe [0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x]\n",
+ *tmp, *(tmp+1), *(tmp+2), *(tmp+3), *(tmp+4), *(tmp+5), *(tmp+6), *(tmp+7));
+
/* XXX Error handling? */
return;
}
@@ -3038,10 +2849,17 @@ bxe_fill_frag_mbuf(struct bxe_softc
/* make sure the aggregated frame is not too big to handle */
if (pages > 8 * PAGES_PER_SGE) {
+
+ uint32_t *tmp = (uint32_t *)cqe;
+
BLOGE(sc, "fp[%02d].sge[0x%04x] has too many pages (%d)! "
"pkt_len=%d len_on_bd=%d frag_size=%d\n",
fp->index, cqe_idx, pages, le16toh(cqe->pkt_len),
tpa_info->len_on_bd, frag_size);
+
+ BLOGE(sc, "cqe [0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x]\n",
+ *tmp, *(tmp+1), *(tmp+2), *(tmp+3), *(tmp+4), *(tmp+5), *(tmp+6), *(tmp+7));
+
bxe_panic(sc, ("sge page count error\n"));
return (EINVAL);
}
@@ -3402,15 +3220,6 @@ bxe_rxeof(struct bxe_softc *sc,
uint16_t frag_size, pages;
uint8_t queue;
-#if 0
- /* sanity check */
- if (!fp->tpa_enable &&
- (CQE_TYPE_START(cqe_fp_type) || CQE_TYPE_STOP(cqe_fp_type))) {
- BLOGE(sc, "START/STOP packet while !tpa_enable type (0x%x)\n",
- CQE_TYPE(cqe_fp_type));
- }
-#endif
-
if (CQE_TYPE_START(cqe_fp_type)) {
bxe_tpa_start(sc, fp, cqe_fp->queue_index,
bd_cons, bd_prod, cqe_fp);
@@ -3616,44 +3425,8 @@ bxe_free_tx_pkt(struct bxe_softc *sc,
tx_start_bd = &fp->tx_chain[bd_idx].start_bd;
nbd = le16toh(tx_start_bd->nbd) - 1;
-#if 0
- if ((nbd - 1) > (MAX_MBUF_FRAGS + 2)) {
- bxe_panic(sc, ("BAD nbd!\n"));
- }
-#endif
-
new_cons = (tx_buf->first_bd + nbd);
-#if 0
- struct eth_tx_bd *tx_data_bd;
-
- /*
- * The following code doesn't do anything but is left here
- * for clarity on what the new value of new_cons skipped.
- */
-
- /* get the next bd */
- bd_idx = TX_BD(TX_BD_NEXT(bd_idx));
-
- /* skip the parse bd */
- --nbd;
- bd_idx = TX_BD(TX_BD_NEXT(bd_idx));
-
- /* skip the TSO split header bd since they have no mapping */
- if (tx_buf->flags & BXE_TSO_SPLIT_BD) {
- --nbd;
- bd_idx = TX_BD(TX_BD_NEXT(bd_idx));
- }
-
- /* now free frags */
- while (nbd > 0) {
- tx_data_bd = &fp->tx_chain[bd_idx].reg_bd;
- if (--nbd) {
- bd_idx = TX_BD(TX_BD_NEXT(bd_idx));
- }
- }
-#endif
-
/* free the mbuf */
if (__predict_true(tx_buf->m != NULL)) {
m_freem(tx_buf->m);
@@ -3798,7 +3571,8 @@ bxe_del_all_macs(struct bxe_softc
rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags, &ramrod_flags);
if (rc < 0) {
- BLOGE(sc, "Failed to delete MACs (%d)\n", rc);
+ BLOGE(sc, "Failed to delete MACs (%d) mac_type %d wait_for_comp 0x%x\n",
+ rc, mac_type, wait_for_comp);
}
return (rc);
@@ -3870,7 +3644,7 @@ bxe_fill_accept_flags(struct bxe_softc *
break;
default:
- BLOGE(sc, "Unknown rx_mode (%d)\n", rx_mode);
+ BLOGE(sc, "Unknown rx_mode (0x%x)\n", rx_mode);
return (-1);
}
@@ -3918,7 +3692,11 @@ bxe_set_q_rx_mode(struct bxe_softc *sc,
rc = ecore_config_rx_mode(sc, &ramrod_param);
if (rc < 0) {
- BLOGE(sc, "Set rx_mode %d failed\n", sc->rx_mode);
+ BLOGE(sc, "Set rx_mode %d cli_id 0x%x rx_mode_flags 0x%x "
+ "rx_accept_flags 0x%x tx_accept_flags 0x%x "
+ "ramrod_flags 0x%x rc %d failed\n", sc->rx_mode, cl_id,
+ (uint32_t)rx_mode_flags, (uint32_t)rx_accept_flags,
+ (uint32_t)tx_accept_flags, (uint32_t)ramrod_flags, rc);
return (rc);
}
@@ -4001,52 +3779,11 @@ bxe_send_unload_req(struct bxe_softc *sc
int unload_mode)
{
uint32_t reset_code = 0;
-#if 0
- int port = SC_PORT(sc);
- int path = SC_PATH(sc);
-#endif
/* Select the UNLOAD request mode */
if (unload_mode == UNLOAD_NORMAL) {
reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
- }
-#if 0
- else if (sc->flags & BXE_NO_WOL_FLAG) {
- reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
- } else if (sc->wol) {
- uint32_t emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
- uint8_t *mac_addr = sc->dev->dev_addr;
- uint32_t val;
- uint16_t pmc;
-
- /*
- * The mac address is written to entries 1-4 to
- * preserve entry 0 which is used by the PMF
- */
- uint8_t entry = (SC_VN(sc) + 1)*8;
-
- val = (mac_addr[0] << 8) | mac_addr[1];
- EMAC_WR(sc, EMAC_REG_EMAC_MAC_MATCH + entry, val);
-
- val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
- (mac_addr[4] << 8) | mac_addr[5];
- EMAC_WR(sc, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
-
- /* Enable the PME and clear the status */
- pmc = pci_read_config(sc->dev,
- (sc->devinfo.pcie_pm_cap_reg +
- PCIR_POWER_STATUS),
- 2);
- pmc |= PCIM_PSTAT_PMEENABLE | PCIM_PSTAT_PME;
- pci_write_config(sc->dev,
- (sc->devinfo.pcie_pm_cap_reg +
- PCIR_POWER_STATUS),
- pmc, 4);
-
- reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
- }
-#endif
- else {
+ } else {
reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
}
@@ -4221,7 +3958,7 @@ bxe_func_stop(struct bxe_softc *sc)
rc = ecore_func_state_change(sc, &func_params);
if (rc) {
BLOGE(sc, "FUNC_STOP ramrod failed. "
- "Running a dry transaction\n");
+ "Running a dry transaction (%d)\n", rc);
bxe_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
return (ecore_func_state_change(sc, &func_params));
}
@@ -4332,7 +4069,7 @@ bxe_chip_cleanup(struct bxe_softc *sc,
*/
rc = bxe_func_wait_started(sc);
if (rc) {
- BLOGE(sc, "bxe_func_wait_started failed\n");
+ BLOGE(sc, "bxe_func_wait_started failed (%d)\n", rc);
}
/*
@@ -4350,14 +4087,14 @@ bxe_chip_cleanup(struct bxe_softc *sc,
* very wrong has happen.
*/
if (!bxe_wait_sp_comp(sc, ~0x0UL)) {
- BLOGE(sc, "Common slow path ramrods got stuck!\n");
+ BLOGE(sc, "Common slow path ramrods got stuck!(%d)\n", rc);
}
unload_error:
rc = bxe_func_stop(sc);
if (rc) {
- BLOGE(sc, "Function stop failed!\n");
+ BLOGE(sc, "Function stop failed!(%d)\n", rc);
}
/* disable HW interrupts */
@@ -4369,7 +4106,7 @@ unload_error:
/* Reset the chip */
rc = bxe_reset_hw(sc, reset_code);
if (rc) {
- BLOGE(sc, "Hardware reset failed\n");
+ BLOGE(sc, "Hardware reset failed(%d)\n", rc);
}
/* Report UNLOAD_DONE to MCP */
@@ -4495,7 +4232,8 @@ bxe_nic_unload(struct bxe_softc *sc,
mb();
BLOGD(sc, DBG_LOAD, "Releasing a leadership...\n");
- BLOGE(sc, "Can't unload in closed or error state\n");
+ BLOGE(sc, "Can't unload in closed or error state recover_state 0x%x"
+ " state = 0x%x\n", sc->recovery_state, sc->state);
return (-1);
}
@@ -4702,7 +4440,8 @@ bxe_ioctl_nvram(struct bxe_softc *sc,
if ((nvdata = (struct bxe_nvram_data *)
malloc(len, M_DEVBUF,
(M_NOWAIT | M_ZERO))) == NULL) {
- BLOGE(sc, "BXE_IOC_RD_NVRAM malloc failed\n");
+ BLOGE(sc, "BXE_IOC_RD_NVRAM malloc failed priv_op 0x%x "
+ " len = 0x%x\n", priv_op, len);
return (1);
}
memcpy(nvdata, &nvdata_base, sizeof(struct bxe_nvram_data));
@@ -5401,11 +5140,6 @@ bxe_set_pbd_lso_e2(struct mbuf *m,
ETH_TX_PARSE_BD_E2_LSO_MSS);
/* XXX test for IPv6 with extension header... */
-#if 0
- struct ip6_hdr *ip6;
- if (ip6 && ip6->ip6_nxt == 'some ipv6 extension header')
- *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
-#endif
}
static void
@@ -5666,17 +5400,6 @@ bxe_tx_encap_continue:
} else {
/* used by FW for packet accounting */
tx_start_bd->vlan_or_ethertype = htole16(fp->tx_pkt_prod);
-#if 0
- /*
- * If NPAR-SD is active then FW should do the tagging regardless
- * of value of priority. Otherwise, if priority indicates this is
- * a control packet we need to indicate to FW to avoid tagging.
- */
- if (!IS_MF_AFEX(sc) && (mbuf priority == PRIO_CONTROL)) {
- SET_FLAG(tx_start_bd->general_data,
- ETH_TX_START_BD_FORCE_VLAN_MODE, 1);
- }
-#endif
}
}
@@ -5716,25 +5439,6 @@ bxe_tx_encap_continue:
hlen = bxe_set_pbd_csum_e2(fp, m0, &pbd_e2_parsing_data);
}
-#if 0
- /*
- * Add the MACs to the parsing BD if the module param was
- * explicitly set, if this is a vf, or in switch independent
- * mode.
- */
- if (sc->flags & BXE_TX_SWITCHING || IS_VF(sc) || IS_MF_SI(sc)) {
- eh = mtod(m0, struct ether_vlan_header *);
- bxe_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
- &pbd_e2->data.mac_addr.src_mid,
- &pbd_e2->data.mac_addr.src_lo,
- eh->evl_shost);
- bxe_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
- &pbd_e2->data.mac_addr.dst_mid,
- &pbd_e2->data.mac_addr.dst_lo,
- eh->evl_dhost);
- }
-#endif
-
SET_FLAG(pbd_e2_parsing_data, ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE,
mac_type);
} else {
@@ -6362,13 +6066,6 @@ bxe_free_mem(struct bxe_softc *sc)
{
int i;
-#if 0
- if (!CONFIGURE_NIC_MODE(sc)) {
- /* free searcher T2 table */
- bxe_dma_free(sc, &sc->t2);
- }
-#endif
-
for (i = 0; i < L2_ILT_LINES(sc); i++) {
bxe_dma_free(sc, &sc->context[i].vcxt_dma);
sc->context[i].vcxt = NULL;
@@ -6379,9 +6076,6 @@ bxe_free_mem(struct bxe_softc *sc)
bxe_free_ilt_lines_mem(sc);
-#if 0
- bxe_iov_free_mem(sc);
-#endif
}
static int
@@ -6391,16 +6085,6 @@ bxe_alloc_mem(struct bxe_softc *sc)
int allocated;
int i;
-#if 0
- if (!CONFIGURE_NIC_MODE(sc)) {
- /* allocate searcher T2 table */
- if (bxe_dma_alloc(sc, SRC_T2_SZ,
- &sc->t2, "searcher t2 table") != 0) {
- return (-1);
- }
- }
-#endif
-
/*
* Allocate memory for CDU context:
* This memory is allocated separately and not in the generic ILT
@@ -6455,14 +6139,6 @@ bxe_alloc_mem(struct bxe_softc *sc)
return (-1);
}
-#if 0
- if (bxe_iov_alloc_mem(sc)) {
- BLOGE(sc, "Failed to allocate memory for SRIOV\n");
- bxe_free_mem(sc);
- return (-1);
- }
-#endif
-
return (0);
}
@@ -8340,27 +8016,9 @@ bxe_attn_int_deasserted3(struct bxe_soft
if (val & DRV_STATUS_DRV_INFO_REQ)
bxe_handle_drv_info_req(sc);
-#if 0
- if (val & DRV_STATUS_VF_DISABLED)
- bxe_vf_handle_flr_event(sc);
-#endif
-
if ((sc->port.pmf == 0) && (val & DRV_STATUS_PMF))
bxe_pmf_update(sc);
-#if 0
- if (sc->port.pmf &&
- (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) &&
- (sc->dcbx_enabled > 0))
- /* start dcbx state machine */
- bxe_dcbx_set_params(sc, BXE_DCBX_STATE_NEG_RECEIVED);
-#endif
-
-#if 0
- if (val & DRV_STATUS_AFEX_EVENT_MASK)
- bxe_handle_afex_cmd(sc, val & DRV_STATUS_AFEX_EVENT_MASK);
-#endif
-
if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS)
bxe_handle_eee_event(sc);
@@ -8761,8 +8419,7 @@ bxe_handle_mcast_eqe(struct bxe_softc *s
rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT);
if (rc < 0) {
BLOGD(sc, DBG_SP,
- "ERROR: Failed to send pending mcast commands (%d)\n",
- rc);
+ "ERROR: Failed to send pending mcast commands (%d)\n", rc);
}
}
@@ -8822,16 +8479,6 @@ bxe_handle_rx_mode_eqe(struct bxe_softc
&sc->sp_state)) {
bxe_set_storm_rx_mode(sc);
}
-#if 0
- else if (bxe_test_and_clear_bit(ECORE_FILTER_ISCSI_ETH_START_SCHED,
- &sc->sp_state)) {
- bxe_set_iscsi_eth_rx_mode(sc, TRUE);
- }
- else if (bxe_test_and_clear_bit(ECORE_FILTER_ISCSI_ETH_STOP_SCHED,
- &sc->sp_state)) {
- bxe_set_iscsi_eth_rx_mode(sc, FALSE);
- }
-#endif
}
static void
@@ -8883,27 +8530,12 @@ bxe_eq_int(struct bxe_softc *sc)
elem = &sc->eq[EQ_DESC(sw_cons)];
-#if 0
- int rc;
- rc = bxe_iov_eq_sp_event(sc, elem);
- if (!rc) {
- BLOGE(sc, "bxe_iov_eq_sp_event returned %d\n", rc);
- goto next_spqe;
- }
-#endif
-
/* elem CID originates from FW, actually LE */
cid = SW_CID(elem->message.data.cfc_del_event.cid);
opcode = elem->message.opcode;
/* handle eq element */
switch (opcode) {
-#if 0
- case EVENT_RING_OPCODE_VF_PF_CHANNEL:
- BLOGD(sc, DBG_SP, "vf/pf channel element on eq\n");
- bxe_vf_mbx(sc, &elem->message.data.vf_pf_event);
- continue;
-#endif
case EVENT_RING_OPCODE_STAT_QUERY:
BLOGD(sc, DBG_SP, "got statistics completion event %d\n",
@@ -8949,25 +8581,9 @@ bxe_eq_int(struct bxe_softc *sc)
else {
BLOGD(sc, DBG_SP,
"AFEX: ramrod completed FUNCTION_UPDATE\n");
-#if 0
- f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_AFEX_UPDATE);
- /*
- * We will perform the queues update from the sp_core_task as
- * all queue SP operations should run with CORE_LOCK.
- */
- bxe_set_bit(BXE_SP_CORE_AFEX_F_UPDATE, &sc->sp_core_state);
- taskqueue_enqueue(sc->sp_tq, &sc->sp_tq_task);
-#endif
}
goto next_spqe;
-#if 0
- case EVENT_RING_OPCODE_AFEX_VIF_LISTS:
- f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_AFEX_VIFLISTS);
- bxe_after_afex_vif_lists(sc, elem);
- goto next_spqe;
-#endif
-
case EVENT_RING_OPCODE_FORWARD_SETUP:
q_obj = &bxe_fwd_sp_obj(sc, q_obj);
if (q_obj->complete_cmd(sc, q_obj,
@@ -9094,14 +8710,6 @@ bxe_handle_sp_tq(void *context,
*/
// XXX bxe_iov_sp_task(sc);
-#if 0
- /* AFEX - poll to check if VIFSET_ACK should be sent to MFW */
- if (bxe_test_and_clear_bit(ECORE_AFEX_PENDING_VIFSET_MCP_ACK,
- &sc->sp_state)) {
- bxe_link_report(sc);
- bxe_fw_command(sc, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
- }
-#endif
}
static void
@@ -9210,13 +8818,6 @@ bxe_intr_legacy(void *xsc)
BLOGD(sc, DBG_INTR, "---> BXE INTx <---\n");
*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***
More information about the svn-src-stable-9
mailing list