git: de65f94d760d - stable/13 - ixgbe: Style pass on FreeBSD part of driver
- Go to: [ bottom of page ] [ top of archives ] [ this month ]
Date: Wed, 27 Nov 2024 02:14:25 UTC
The branch stable/13 has been updated by kbowling: URL: https://cgit.FreeBSD.org/src/commit/?id=de65f94d760d57153b7b8a107bac7c5f5a26c03c commit de65f94d760d57153b7b8a107bac7c5f5a26c03c Author: Kevin Bowling <kbowling@FreeBSD.org> AuthorDate: 2024-11-24 07:18:33 +0000 Commit: Kevin Bowling <kbowling@FreeBSD.org> CommitDate: 2024-11-27 01:07:22 +0000 ixgbe: Style pass on FreeBSD part of driver Fix up some indentation and reflow long lines Sponsored by: BBOX.io (cherry picked from commit c58d34dd67a419866ee50f152044e49cecbae261) --- sys/dev/ixgbe/if_bypass.c | 110 ++++--- sys/dev/ixgbe/if_fdir.c | 24 +- sys/dev/ixgbe/if_ix.c | 825 +++++++++++++++++++++++++++------------------- sys/dev/ixgbe/if_ixv.c | 274 ++++++++------- sys/dev/ixgbe/if_sriov.c | 59 ++-- sys/dev/ixgbe/ix_txrx.c | 106 +++--- sys/dev/ixgbe/ixgbe.h | 8 +- 7 files changed, 790 insertions(+), 616 deletions(-) diff --git a/sys/dev/ixgbe/if_bypass.c b/sys/dev/ixgbe/if_bypass.c index 166150d75cc6..138b4e17db0d 100644 --- a/sys/dev/ixgbe/if_bypass.c +++ b/sys/dev/ixgbe/if_bypass.c @@ -1,4 +1,4 @@ -/****************************************************************************** +/***************************************************************************** Copyright (c) 2001-2017, Intel Corporation All rights reserved. @@ -29,7 +29,7 @@ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -******************************************************************************/ +*****************************************************************************/ #include "ixgbe.h" @@ -114,11 +114,11 @@ ixgbe_get_bypass_time(u32 *year, u32 *sec) static int ixgbe_bp_version(SYSCTL_HANDLER_ARGS) { - struct ixgbe_softc *sc = (struct ixgbe_softc *) arg1; + struct ixgbe_softc *sc = (struct ixgbe_softc *) arg1; struct ixgbe_hw *hw = &sc->hw; - int error = 0; - static int version = 0; - u32 cmd; + int error = 0; + static int version = 0; + u32 cmd; ixgbe_bypass_mutex_enter(sc); cmd = BYPASS_PAGE_CTL2 | BYPASS_WE; @@ -154,15 +154,14 @@ err: static int ixgbe_bp_set_state(SYSCTL_HANDLER_ARGS) { - struct ixgbe_softc *sc = (struct ixgbe_softc *) arg1; + struct ixgbe_softc *sc = (struct ixgbe_softc *) arg1; struct ixgbe_hw *hw = &sc->hw; - int error = 0; - static int state = 0; + int error = 0; + static int state = 0; /* Get the current state */ ixgbe_bypass_mutex_enter(sc); - error = hw->mac.ops.bypass_rw(hw, - BYPASS_PAGE_CTL0, &state); + error = hw->mac.ops.bypass_rw(hw, BYPASS_PAGE_CTL0, &state); ixgbe_bypass_mutex_clear(sc); if (error != 0) return (error); @@ -216,10 +215,10 @@ out: static int ixgbe_bp_timeout(SYSCTL_HANDLER_ARGS) { - struct ixgbe_softc *sc = (struct ixgbe_softc *) arg1; + struct ixgbe_softc *sc = (struct ixgbe_softc *) arg1; struct ixgbe_hw *hw = &sc->hw; - int error = 0; - static int timeout = 0; + int error = 0; + static int timeout = 0; /* Get the current value */ ixgbe_bypass_mutex_enter(sc); @@ -259,10 +258,10 @@ ixgbe_bp_timeout(SYSCTL_HANDLER_ARGS) static int ixgbe_bp_main_on(SYSCTL_HANDLER_ARGS) { - struct ixgbe_softc *sc = (struct ixgbe_softc *) arg1; + struct ixgbe_softc *sc = (struct ixgbe_softc *) arg1; struct ixgbe_hw *hw = &sc->hw; - int error = 0; - static int main_on = 0; + int error = 0; + static int main_on = 0; ixgbe_bypass_mutex_enter(sc); error = hw->mac.ops.bypass_rw(hw, BYPASS_PAGE_CTL0, &main_on); @@ -301,10 +300,10 @@ ixgbe_bp_main_on(SYSCTL_HANDLER_ARGS) static int ixgbe_bp_main_off(SYSCTL_HANDLER_ARGS) { - struct ixgbe_softc *sc = (struct ixgbe_softc *) arg1; + struct ixgbe_softc *sc = (struct ixgbe_softc *) arg1; struct ixgbe_hw *hw = &sc->hw; - int error = 0; - static int main_off = 0; + int error = 0; + static int main_off = 0; ixgbe_bypass_mutex_enter(sc); error = hw->mac.ops.bypass_rw(hw, BYPASS_PAGE_CTL0, &main_off); @@ -343,10 +342,10 @@ ixgbe_bp_main_off(SYSCTL_HANDLER_ARGS) static int ixgbe_bp_aux_on(SYSCTL_HANDLER_ARGS) { - struct ixgbe_softc *sc = (struct ixgbe_softc *) arg1; + struct ixgbe_softc *sc = (struct ixgbe_softc *) arg1; struct ixgbe_hw *hw = &sc->hw; - int error = 0; - static int aux_on = 0; + int error = 0; + static int aux_on = 0; ixgbe_bypass_mutex_enter(sc); error = hw->mac.ops.bypass_rw(hw, BYPASS_PAGE_CTL0, &aux_on); @@ -385,10 +384,10 @@ ixgbe_bp_aux_on(SYSCTL_HANDLER_ARGS) static int ixgbe_bp_aux_off(SYSCTL_HANDLER_ARGS) { - struct ixgbe_softc *sc = (struct ixgbe_softc *) arg1; + struct ixgbe_softc *sc = (struct ixgbe_softc *) arg1; struct ixgbe_hw *hw = &sc->hw; - int error = 0; - static int aux_off = 0; + int error = 0; + static int aux_off = 0; ixgbe_bypass_mutex_enter(sc); error = hw->mac.ops.bypass_rw(hw, BYPASS_PAGE_CTL0, &aux_off); @@ -432,11 +431,11 @@ ixgbe_bp_aux_off(SYSCTL_HANDLER_ARGS) static int ixgbe_bp_wd_set(SYSCTL_HANDLER_ARGS) { - struct ixgbe_softc *sc = (struct ixgbe_softc *) arg1; + struct ixgbe_softc *sc = (struct ixgbe_softc *) arg1; struct ixgbe_hw *hw = &sc->hw; - int error, tmp; - static int timeout = 0; - u32 mask, arg; + int error, tmp; + static int timeout = 0; + u32 mask, arg; /* Get the current hardware value */ ixgbe_bypass_mutex_enter(sc); @@ -503,11 +502,11 @@ ixgbe_bp_wd_set(SYSCTL_HANDLER_ARGS) static int ixgbe_bp_wd_reset(SYSCTL_HANDLER_ARGS) { - struct ixgbe_softc *sc = (struct ixgbe_softc *) arg1; + struct ixgbe_softc *sc = (struct ixgbe_softc *) arg1; struct ixgbe_hw *hw = &sc->hw; - u32 sec, year; - int cmd, count = 0, error = 0; - int reset_wd = 0; + u32 sec, year; + int cmd, count = 0, error = 0; + int reset_wd = 0; error = sysctl_handle_int(oidp, &reset_wd, 0, req); if ((error) || (req->newptr == NULL)) @@ -549,14 +548,14 @@ ixgbe_bp_wd_reset(SYSCTL_HANDLER_ARGS) static int ixgbe_bp_log(SYSCTL_HANDLER_ARGS) { - struct ixgbe_softc *sc = (struct ixgbe_softc *) arg1; - struct ixgbe_hw *hw = &sc->hw; - u32 cmd, base, head; - u32 log_off, count = 0; - static int status = 0; - u8 data; + struct ixgbe_softc *sc = (struct ixgbe_softc *) arg1; + struct ixgbe_hw *hw = &sc->hw; + u32 cmd, base, head; + u32 log_off, count = 0; + static int status = 0; + u8 data; struct ixgbe_bypass_eeprom eeprom[BYPASS_MAX_LOGS]; - int i, error = 0; + int i, error = 0; error = sysctl_handle_int(oidp, &status, 0, req); if ((error) || (req->newptr == NULL)) @@ -639,12 +638,15 @@ ixgbe_bp_log(SYSCTL_HANDLER_ARGS) BYPASS_LOG_EVENT_SHIFT; u8 action = eeprom[count].actions & BYPASS_LOG_ACTION_M; u16 day_mon[2][13] = { - {0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365}, - {0, 31, 59, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366} + {0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, + 334, 365}, + {0, 31, 59, 91, 121, 152, 182, 213, 244, 274, 305, + 335, 366} }; char *event_str[] = {"unknown", "main on", "aux on", "main off", "aux off", "WDT", "user" }; - char *action_str[] = {"ignore", "normal", "bypass", "isolate",}; + char *action_str[] = + {"ignore", "normal", "bypass", "isolate",}; /* verify vaild data 1 - 6 */ if (event < BYPASS_EVENT_MAIN_ON || event > BYPASS_EVENT_USR) @@ -711,11 +713,11 @@ unlock_err: void ixgbe_bypass_init(struct ixgbe_softc *sc) { - struct ixgbe_hw *hw = &sc->hw; - device_t dev = sc->dev; - struct sysctl_oid *bp_node; + struct ixgbe_hw *hw = &sc->hw; + device_t dev = sc->dev; + struct sysctl_oid *bp_node; struct sysctl_oid_list *bp_list; - u32 mask, value, sec, year; + u32 mask, value, sec, year; if (!(sc->feat_cap & IXGBE_FEATURE_BYPASS)) return; @@ -723,13 +725,13 @@ ixgbe_bypass_init(struct ixgbe_softc *sc) /* First set up time for the hardware */ ixgbe_get_bypass_time(&year, &sec); - mask = BYPASS_CTL1_TIME_M - | BYPASS_CTL1_VALID_M - | BYPASS_CTL1_OFFTRST_M; + mask = BYPASS_CTL1_TIME_M | + BYPASS_CTL1_VALID_M | + BYPASS_CTL1_OFFTRST_M; - value = (sec & BYPASS_CTL1_TIME_M) - | BYPASS_CTL1_VALID - | BYPASS_CTL1_OFFTRST; + value = (sec & BYPASS_CTL1_TIME_M) | + BYPASS_CTL1_VALID | + BYPASS_CTL1_OFFTRST; ixgbe_bypass_mutex_enter(sc); hw->mac.ops.bypass_set(hw, BYPASS_PAGE_CTL1, mask, value); diff --git a/sys/dev/ixgbe/if_fdir.c b/sys/dev/ixgbe/if_fdir.c index e5abd7795fcd..f25d29fa91bf 100644 --- a/sys/dev/ixgbe/if_fdir.c +++ b/sys/dev/ixgbe/if_fdir.c @@ -1,4 +1,4 @@ -/****************************************************************************** +/***************************************************************************** Copyright (c) 2001-2017, Intel Corporation All rights reserved. @@ -29,7 +29,7 @@ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -******************************************************************************/ +*****************************************************************************/ #include "ixgbe.h" @@ -51,9 +51,9 @@ ixgbe_init_fdir(struct ixgbe_softc *sc) void ixgbe_reinit_fdir(void *context) { - if_ctx_t ctx = context; + if_ctx_t ctx = context; struct ixgbe_softc *sc = iflib_get_softc(ctx); - struct ifnet *ifp = iflib_get_ifp(ctx); + struct ifnet *ifp = iflib_get_ifp(ctx); if (!(sc->feat_en & IXGBE_FEATURE_FDIR)) return; @@ -79,16 +79,16 @@ ixgbe_reinit_fdir(void *context) void ixgbe_atr(struct tx_ring *txr, struct mbuf *mp) { - struct ixgbe_softc *sc = txr->sc; - struct ix_queue *que; - struct ip *ip; - struct tcphdr *th; - struct udphdr *uh; - struct ether_vlan_header *eh; + struct ixgbe_softc *sc = txr->sc; + struct ix_queue *que; + struct ip *ip; + struct tcphdr *th; + struct udphdr *uh; + struct ether_vlan_header *eh; union ixgbe_atr_hash_dword input = {.dword = 0}; union ixgbe_atr_hash_dword common = {.dword = 0}; - int ehdrlen, ip_hlen; - u16 etype; + int ehdrlen, ip_hlen; + u16 etype; eh = mtod(mp, struct ether_vlan_header *); if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { diff --git a/sys/dev/ixgbe/if_ix.c b/sys/dev/ixgbe/if_ix.c index 993aa48d51ef..d79972e0788d 100644 --- a/sys/dev/ixgbe/if_ix.c +++ b/sys/dev/ixgbe/if_ix.c @@ -1,4 +1,4 @@ -/****************************************************************************** +/***************************************************************************** Copyright (c) 2001-2017, Intel Corporation All rights reserved. @@ -29,7 +29,7 @@ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -******************************************************************************/ +*****************************************************************************/ #include "opt_inet.h" #include "opt_inet6.h" @@ -58,53 +58,94 @@ static const char ixgbe_driver_version[] = "4.0.1-k"; ************************************************************************/ static const pci_vendor_info_t ixgbe_vendor_info_array[] = { - PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, "Intel(R) 82598EB AF (Dual Fiber)"), - PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, "Intel(R) 82598EB AF (Fiber)"), - PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, "Intel(R) 82598EB AT (CX4)"), - PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, "Intel(R) 82598EB AT"), - PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, "Intel(R) 82598EB AT2"), - PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, "Intel(R) 82598"), - PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, "Intel(R) 82598EB AF DA (Dual Fiber)"), - PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, "Intel(R) 82598EB AT (Dual CX4)"), - PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, "Intel(R) 82598EB AF (Dual Fiber LR)"), - PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, "Intel(R) 82598EB AF (Dual Fiber SR)"), - PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, "Intel(R) 82598EB LOM"), - PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, "Intel(R) X520 82599 (KX4)"), - PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, "Intel(R) X520 82599 (KX4 Mezzanine)"), - PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, "Intel(R) X520 82599ES (SFI/SFP+)"), - PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, "Intel(R) X520 82599 (XAUI/BX4)"), - PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, "Intel(R) X520 82599 (Dual CX4)"), - PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, "Intel(R) X520-T 82599 LOM"), - PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_LS, "Intel(R) X520 82599 LS"), - PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, "Intel(R) X520 82599 (Combined Backplane)"), - PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, "Intel(R) X520 82599 (Backplane w/FCoE)"), - PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, "Intel(R) X520 82599 (Dual SFP+)"), - PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, "Intel(R) X520 82599 (Dual SFP+ w/FCoE)"), - PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, "Intel(R) X520-1 82599EN (SFP+)"), - PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, "Intel(R) X520-4 82599 (Quad SFP+)"), - PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, "Intel(R) X520-Q1 82599 (QSFP+)"), - PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, "Intel(R) X540-AT2"), - PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, "Intel(R) X540-T1"), - PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, "Intel(R) X550-T2"), - PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, "Intel(R) X550-T1"), - PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, "Intel(R) X552 (KR Backplane)"), - PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, "Intel(R) X552 (KX4 Backplane)"), - PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, "Intel(R) X552/X557-AT (10GBASE-T)"), - PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, "Intel(R) X552 (1000BASE-T)"), - PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, "Intel(R) X552 (SFP+)"), - PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, "Intel(R) X553 (KR Backplane)"), - PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, "Intel(R) X553 L (KR Backplane)"), - PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, "Intel(R) X553 (SFP+)"), - PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, "Intel(R) X553 N (SFP+)"), - PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, "Intel(R) X553 (1GbE SGMII)"), - PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, "Intel(R) X553 L (1GbE SGMII)"), - PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, "Intel(R) X553/X557-AT (10GBASE-T)"), - PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, "Intel(R) X553 (1GbE)"), - PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, "Intel(R) X553 L (1GbE)"), - PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, "Intel(R) X540-T2 (Bypass)"), - PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, "Intel(R) X520 82599 (Bypass)"), + PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, + "Intel(R) 82598EB AF (Dual Fiber)"), + PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, + "Intel(R) 82598EB AF (Fiber)"), + PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, + "Intel(R) 82598EB AT (CX4)"), + PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, + "Intel(R) 82598EB AT"), + PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, + "Intel(R) 82598EB AT2"), + PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, "Intel(R) 82598"), + PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, + "Intel(R) 82598EB AF DA (Dual Fiber)"), + PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, + "Intel(R) 82598EB AT (Dual CX4)"), + PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, + "Intel(R) 82598EB AF (Dual Fiber LR)"), + PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, + "Intel(R) 82598EB AF (Dual Fiber SR)"), + PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, + "Intel(R) 82598EB LOM"), + PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, + "Intel(R) X520 82599 (KX4)"), + PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, + "Intel(R) X520 82599 (KX4 Mezzanine)"), + PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, + "Intel(R) X520 82599ES (SFI/SFP+)"), + PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, + "Intel(R) X520 82599 (XAUI/BX4)"), + PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, + "Intel(R) X520 82599 (Dual CX4)"), + PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, + "Intel(R) X520-T 82599 LOM"), + PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_LS, + "Intel(R) X520 82599 LS"), + PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, + "Intel(R) X520 82599 (Combined Backplane)"), + PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, + "Intel(R) X520 82599 (Backplane w/FCoE)"), + PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, + "Intel(R) X520 82599 (Dual SFP+)"), + PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, + "Intel(R) X520 82599 (Dual SFP+ w/FCoE)"), + PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, + "Intel(R) X520-1 82599EN (SFP+)"), + PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, + "Intel(R) X520-4 82599 (Quad SFP+)"), + PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, + "Intel(R) X520-Q1 82599 (QSFP+)"), + PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, + "Intel(R) X540-AT2"), + PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, "Intel(R) X540-T1"), + PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, "Intel(R) X550-T2"), + PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, "Intel(R) X550-T1"), + PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, + "Intel(R) X552 (KR Backplane)"), + PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, + "Intel(R) X552 (KX4 Backplane)"), + PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, + "Intel(R) X552/X557-AT (10GBASE-T)"), + PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, + "Intel(R) X552 (1000BASE-T)"), + PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, + "Intel(R) X552 (SFP+)"), + PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, + "Intel(R) X553 (KR Backplane)"), + PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, + "Intel(R) X553 L (KR Backplane)"), + PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, + "Intel(R) X553 (SFP+)"), + PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, + "Intel(R) X553 N (SFP+)"), + PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, + "Intel(R) X553 (1GbE SGMII)"), + PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, + "Intel(R) X553 L (1GbE SGMII)"), + PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, + "Intel(R) X553/X557-AT (10GBASE-T)"), + PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, + "Intel(R) X553 (1GbE)"), + PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, + "Intel(R) X553 L (1GbE)"), + PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, + "Intel(R) X540-T2 (Bypass)"), + PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, + "Intel(R) X520 82599 (Bypass)"), /* required last entry */ - PVID_END + PVID_END }; static void *ixgbe_register(device_t); @@ -127,8 +168,10 @@ static int ixgbe_if_mtu_set(if_ctx_t, uint32_t); static void ixgbe_if_crcstrip_set(if_ctx_t, int, int); static void ixgbe_if_multi_set(if_ctx_t); static int ixgbe_if_promisc_set(if_ctx_t, int); -static int ixgbe_if_tx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int); -static int ixgbe_if_rx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int); +static int ixgbe_if_tx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, + int); +static int ixgbe_if_rx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, + int); static void ixgbe_if_queues_free(if_ctx_t); static void ixgbe_if_timer(if_ctx_t, uint16_t); static void ixgbe_if_update_admin_status(if_ctx_t); @@ -319,7 +362,8 @@ static int ixgbe_smart_speed = ixgbe_smart_speed_on; * but this allows it to be forced off for testing. */ static int ixgbe_enable_msix = 1; -SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0, +SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, + 0, "Enable MSI-X interrupts"); /* @@ -337,12 +381,14 @@ SYSCTL_INT(_hw_ix, OID_AUTO, unsupported_sfp, CTLFLAG_RDTUN, * so we'll default to turning it off. */ static int ixgbe_enable_fdir = 0; -SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0, +SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, + 0, "Enable Flow Director"); /* Receive-Side Scaling */ static int ixgbe_enable_rss = 1; -SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0, +SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, + 0, "Enable Receive-Side Scaling (RSS)"); /* @@ -352,7 +398,8 @@ SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0, * traffic for that interrupt vector */ static int ixgbe_enable_aim = false; -SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RWTUN, &ixgbe_enable_aim, 0, +SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RWTUN, &ixgbe_enable_aim, + 0, "Enable adaptive interrupt moderation"); #if 0 @@ -408,9 +455,9 @@ ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets) { struct ixgbe_softc *sc = iflib_get_softc(ctx); - if_softc_ctx_t scctx = sc->shared; + if_softc_ctx_t scctx = sc->shared; struct ix_tx_queue *que; - int i, j, error; + int i, j, error; MPASS(sc->num_tx_queues > 0); MPASS(sc->num_tx_queues == ntxqsets); @@ -418,8 +465,8 @@ ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, /* Allocate queue structure memory */ sc->tx_queues = - (struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) * ntxqsets, - M_IXGBE, M_NOWAIT | M_ZERO); + (struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) * + ntxqsets, M_IXGBE, M_NOWAIT | M_ZERO); if (!sc->tx_queues) { device_printf(iflib_get_dev(ctx), "Unable to allocate TX ring memory\n"); @@ -430,20 +477,20 @@ ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, struct tx_ring *txr = &que->txr; /* In case SR-IOV is enabled, align the index properly */ - txr->me = ixgbe_vf_que_index(sc->iov_mode, sc->pool, - i); + txr->me = ixgbe_vf_que_index(sc->iov_mode, sc->pool, i); txr->sc = que->sc = sc; /* Allocate report status array */ - txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IXGBE, M_NOWAIT | M_ZERO); + txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) * + scctx->isc_ntxd[0], M_IXGBE, M_NOWAIT | M_ZERO); if (txr->tx_rsq == NULL) { error = ENOMEM; goto fail; } for (j = 0; j < scctx->isc_ntxd[0]; j++) txr->tx_rsq[j] = QIDX_INVALID; - /* get the virtual and physical address of the hardware queues */ + /* get virtual and physical address of the hardware queues */ txr->tail = IXGBE_TDT(txr->me); txr->tx_base = (union ixgbe_adv_tx_desc *)vaddrs[i]; txr->tx_paddr = paddrs[i]; @@ -475,9 +522,9 @@ static int ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets) { - struct ixgbe_softc *sc = iflib_get_softc(ctx); + struct ixgbe_softc *sc = iflib_get_softc(ctx); struct ix_rx_queue *que; - int i; + int i; MPASS(sc->num_rx_queues > 0); MPASS(sc->num_rx_queues == nrxqsets); @@ -486,7 +533,7 @@ ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, /* Allocate queue structure memory */ sc->rx_queues = (struct ix_rx_queue *)malloc(sizeof(struct ix_rx_queue)*nrxqsets, - M_IXGBE, M_NOWAIT | M_ZERO); + M_IXGBE, M_NOWAIT | M_ZERO); if (!sc->rx_queues) { device_printf(iflib_get_dev(ctx), "Unable to allocate TX ring memory\n"); @@ -497,8 +544,7 @@ ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, struct rx_ring *rxr = &que->rxr; /* In case SR-IOV is enabled, align the index properly */ - rxr->me = ixgbe_vf_que_index(sc->iov_mode, sc->pool, - i); + rxr->me = ixgbe_vf_que_index(sc->iov_mode, sc->pool, i); rxr->sc = que->sc = sc; @@ -522,10 +568,10 @@ ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, static void ixgbe_if_queues_free(if_ctx_t ctx) { - struct ixgbe_softc *sc = iflib_get_softc(ctx); + struct ixgbe_softc *sc = iflib_get_softc(ctx); struct ix_tx_queue *tx_que = sc->tx_queues; struct ix_rx_queue *rx_que = sc->rx_queues; - int i; + int i; if (tx_que != NULL) { for (i = 0; i < sc->num_tx_queues; i++, tx_que++) { @@ -553,10 +599,10 @@ static void ixgbe_initialize_rss_mapping(struct ixgbe_softc *sc) { struct ixgbe_hw *hw = &sc->hw; - u32 reta = 0, mrqc, rss_key[10]; - int queue_id, table_size, index_mult; - int i, j; - u32 rss_hash_config; + u32 reta = 0, mrqc, rss_key[10]; + int queue_id, table_size, index_mult; + int i, j; + u32 rss_hash_config; if (sc->feat_en & IXGBE_FEATURE_RSS) { /* Fetch the configured RSS key */ @@ -608,8 +654,8 @@ ixgbe_initialize_rss_mapping(struct ixgbe_softc *sc) if (i < 128) IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); else - IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32), - reta); + IXGBE_WRITE_REG(hw, + IXGBE_ERETA((i >> 2) - 32), reta); reta = 0; } } @@ -627,12 +673,12 @@ ixgbe_initialize_rss_mapping(struct ixgbe_softc *sc) * and so we end up with a mix of 2-tuple and 4-tuple * traffic. */ - rss_hash_config = RSS_HASHTYPE_RSS_IPV4 - | RSS_HASHTYPE_RSS_TCP_IPV4 - | RSS_HASHTYPE_RSS_IPV6 - | RSS_HASHTYPE_RSS_TCP_IPV6 - | RSS_HASHTYPE_RSS_IPV6_EX - | RSS_HASHTYPE_RSS_TCP_IPV6_EX; + rss_hash_config = RSS_HASHTYPE_RSS_IPV4 | + RSS_HASHTYPE_RSS_TCP_IPV4 | + RSS_HASHTYPE_RSS_IPV6 | + RSS_HASHTYPE_RSS_TCP_IPV6 | + RSS_HASHTYPE_RSS_IPV6_EX | + RSS_HASHTYPE_RSS_TCP_IPV6_EX; } mrqc = IXGBE_MRQC_RSSEN; @@ -666,14 +712,14 @@ ixgbe_initialize_rss_mapping(struct ixgbe_softc *sc) static void ixgbe_initialize_receive_units(if_ctx_t ctx) { - struct ixgbe_softc *sc = iflib_get_softc(ctx); - if_softc_ctx_t scctx = sc->shared; - struct ixgbe_hw *hw = &sc->hw; - struct ifnet *ifp = iflib_get_ifp(ctx); + struct ixgbe_softc *sc = iflib_get_softc(ctx); + if_softc_ctx_t scctx = sc->shared; + struct ixgbe_hw *hw = &sc->hw; + struct ifnet *ifp = iflib_get_ifp(ctx); struct ix_rx_queue *que; - int i, j; - u32 bufsz, fctrl, srrctl, rxcsum; - u32 hlreg; + int i, j; + u32 bufsz, fctrl, srrctl, rxcsum; + u32 hlreg; /* * Make sure receives are disabled while @@ -704,7 +750,7 @@ ixgbe_initialize_receive_units(if_ctx_t ctx) /* Setup the Base and Length of the Rx Descriptor Ring */ for (i = 0, que = sc->rx_queues; i < sc->num_rx_queues; i++, que++) { struct rx_ring *rxr = &que->rxr; - u64 rdba = rxr->rx_paddr; + u64 rdba = rxr->rx_paddr; j = rxr->me; @@ -746,10 +792,10 @@ ixgbe_initialize_receive_units(if_ctx_t ctx) } if (sc->hw.mac.type != ixgbe_mac_82598EB) { - u32 psrtype = IXGBE_PSRTYPE_TCPHDR - | IXGBE_PSRTYPE_UDPHDR - | IXGBE_PSRTYPE_IPV4HDR - | IXGBE_PSRTYPE_IPV6HDR; + u32 psrtype = IXGBE_PSRTYPE_TCPHDR | + IXGBE_PSRTYPE_UDPHDR | + IXGBE_PSRTYPE_IPV4HDR | + IXGBE_PSRTYPE_IPV6HDR; IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype); } @@ -779,9 +825,9 @@ ixgbe_initialize_receive_units(if_ctx_t ctx) static void ixgbe_initialize_transmit_units(if_ctx_t ctx) { - struct ixgbe_softc *sc = iflib_get_softc(ctx); - struct ixgbe_hw *hw = &sc->hw; - if_softc_ctx_t scctx = sc->shared; + struct ixgbe_softc *sc = iflib_get_softc(ctx); + struct ixgbe_hw *hw = &sc->hw; + if_softc_ctx_t scctx = sc->shared; struct ix_tx_queue *que; int i; @@ -822,7 +868,8 @@ ixgbe_initialize_transmit_units(if_ctx_t ctx) txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j)); break; default: - txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j)); + txctrl = + IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j)); break; } txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; @@ -831,7 +878,8 @@ ixgbe_initialize_transmit_units(if_ctx_t ctx) IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl); break; default: - IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl); + IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), + txctrl); break; } @@ -876,12 +924,12 @@ ixgbe_register(device_t dev) static int ixgbe_if_attach_pre(if_ctx_t ctx) { - struct ixgbe_softc *sc; - device_t dev; - if_softc_ctx_t scctx; + struct ixgbe_softc *sc; + device_t dev; + if_softc_ctx_t scctx; struct ixgbe_hw *hw; - int error = 0; - u32 ctrl_ext; + int error = 0; + u32 ctrl_ext; size_t i; INIT_DEBUGOUT("ixgbe_attach: begin"); @@ -923,8 +971,10 @@ ixgbe_if_attach_pre(if_ctx_t ctx) goto err_pci; } - if (hw->mac.ops.fw_recovery_mode && hw->mac.ops.fw_recovery_mode(hw)) { - device_printf(dev, "Firmware recovery mode detected. Limiting " + if (hw->mac.ops.fw_recovery_mode && + hw->mac.ops.fw_recovery_mode(hw)) { + device_printf(dev, + "Firmware recovery mode detected. Limiting " "functionality.\nRefer to the Intel(R) Ethernet Adapters " "and Devices User Guide for details on firmware recovery " "mode."); @@ -991,7 +1041,12 @@ ixgbe_if_attach_pre(if_ctx_t ctx) error = ixgbe_start_hw(hw); switch (error) { case IXGBE_ERR_EEPROM_VERSION: - device_printf(dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues associated with your hardware.\nIf you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n"); + device_printf(dev, + "This device is a pre-production adapter/LOM. Please be" + " aware there may be issues associated with your" + " hardware.\nIf you are experiencing problems please" + " contact your Intel or hardware representative who" + " provided you with this hardware.\n"); break; case IXGBE_ERR_SFP_NOT_SUPPORTED: device_printf(dev, "Unsupported SFP+ Module\n"); @@ -1073,15 +1128,14 @@ static int ixgbe_if_attach_post(if_ctx_t ctx) { device_t dev; - struct ixgbe_softc *sc; + struct ixgbe_softc *sc; struct ixgbe_hw *hw; - int error = 0; + int error = 0; dev = iflib_get_dev(ctx); sc = iflib_get_softc(ctx); hw = &sc->hw; - if (sc->intr_type == IFLIB_INTR_LEGACY && (sc->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) == 0) { device_printf(dev, "Device does not support legacy interrupts"); @@ -1090,10 +1144,11 @@ ixgbe_if_attach_post(if_ctx_t ctx) } /* Allocate multicast array memory. */ - sc->mta = malloc(sizeof(*sc->mta) * - MAX_NUM_MULTICAST_ADDRESSES, M_IXGBE, M_NOWAIT); + sc->mta = malloc(sizeof(*sc->mta) * MAX_NUM_MULTICAST_ADDRESSES, + M_IXGBE, M_NOWAIT); if (sc->mta == NULL) { - device_printf(dev, "Can not allocate multicast setup array\n"); + device_printf(dev, + "Can not allocate multicast setup array\n"); error = ENOMEM; goto err; } @@ -1173,7 +1228,7 @@ static void ixgbe_check_wol_support(struct ixgbe_softc *sc) { struct ixgbe_hw *hw = &sc->hw; - u16 dev_caps = 0; + u16 dev_caps = 0; /* Find out WoL support for port */ sc->wol_support = hw->wol_enabled = 0; @@ -1197,7 +1252,7 @@ ixgbe_check_wol_support(struct ixgbe_softc *sc) static int ixgbe_setup_interface(if_ctx_t ctx) { - struct ifnet *ifp = iflib_get_ifp(ctx); + struct ifnet *ifp = iflib_get_ifp(ctx); struct ixgbe_softc *sc = iflib_get_softc(ctx); INIT_DEBUGOUT("ixgbe_setup_interface: begin"); @@ -1223,7 +1278,7 @@ static uint64_t ixgbe_if_get_counter(if_ctx_t ctx, ift_counter cnt) { struct ixgbe_softc *sc = iflib_get_softc(ctx); - if_t ifp = iflib_get_ifp(ctx); + if_t ifp = iflib_get_ifp(ctx); switch (cnt) { case IFCOUNTER_IPACKETS: @@ -1257,10 +1312,9 @@ ixgbe_if_get_counter(if_ctx_t ctx, ift_counter cnt) static int ixgbe_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req) { - struct ixgbe_softc *sc = iflib_get_softc(ctx); - struct ixgbe_hw *hw = &sc->hw; - int i; - + struct ixgbe_softc *sc = iflib_get_softc(ctx); + struct ixgbe_hw *hw = &sc->hw; + int i; if (hw->phy.ops.read_i2c_byte == NULL) return (ENXIO); @@ -1270,7 +1324,8 @@ ixgbe_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req) return (0); } /* ixgbe_if_i2c_req */ -/* ixgbe_if_needs_restart - Tell iflib when the driver needs to be reinitialized +/* ixgbe_if_needs_restart - Tell iflib when the driver needs to be + * reinitialized * @ctx: iflib context * @event: event code to check * @@ -1294,10 +1349,10 @@ ixgbe_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event) static void ixgbe_add_media_types(if_ctx_t ctx) { - struct ixgbe_softc *sc = iflib_get_softc(ctx); + struct ixgbe_softc *sc = iflib_get_softc(ctx); struct ixgbe_hw *hw = &sc->hw; - device_t dev = iflib_get_dev(ctx); - u64 layer; + device_t dev = iflib_get_dev(ctx); + u64 layer; layer = sc->phy_layer = ixgbe_get_supported_physical_layer(hw); @@ -1417,10 +1472,10 @@ ixgbe_is_sfp(struct ixgbe_hw *hw) static void ixgbe_config_link(if_ctx_t ctx) { - struct ixgbe_softc *sc = iflib_get_softc(ctx); + struct ixgbe_softc *sc = iflib_get_softc(ctx); struct ixgbe_hw *hw = &sc->hw; - u32 autoneg, err = 0; - bool sfp, negotiate; + u32 autoneg, err = 0; + bool sfp, negotiate; sfp = ixgbe_is_sfp(hw); @@ -1481,11 +1536,11 @@ ixgbe_config_link(if_ctx_t ctx) static void ixgbe_update_stats_counters(struct ixgbe_softc *sc) { - struct ixgbe_hw *hw = &sc->hw; + struct ixgbe_hw *hw = &sc->hw; struct ixgbe_hw_stats *stats = &sc->stats.pf; - u32 missed_rx = 0, bprc, lxon, lxoff, total; - u32 lxoffrxc; - u64 total_missed_rx = 0; + u32 missed_rx = 0, bprc, lxon, lxoff, total; + u32 lxoffrxc; + u64 total_missed_rx = 0; stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC); @@ -1630,19 +1685,19 @@ ixgbe_update_stats_counters(struct ixgbe_softc *sc) static void ixgbe_add_hw_stats(struct ixgbe_softc *sc) { - device_t dev = iflib_get_dev(sc->ctx); - struct ix_rx_queue *rx_que; - struct ix_tx_queue *tx_que; + device_t dev = iflib_get_dev(sc->ctx); + struct ix_rx_queue *rx_que; + struct ix_tx_queue *tx_que; struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); - struct sysctl_oid *tree = device_get_sysctl_tree(dev); + struct sysctl_oid *tree = device_get_sysctl_tree(dev); struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree); - struct ixgbe_hw_stats *stats = &sc->stats.pf; - struct sysctl_oid *stat_node, *queue_node; + struct ixgbe_hw_stats *stats = &sc->stats.pf; + struct sysctl_oid *stat_node, *queue_node; struct sysctl_oid_list *stat_list, *queue_list; - int i; + int i; #define QUEUE_NAME_LEN 32 - char namebuf[QUEUE_NAME_LEN]; + char namebuf[QUEUE_NAME_LEN]; /* Driver Statistics */ SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped", @@ -1652,7 +1707,8 @@ ixgbe_add_hw_stats(struct ixgbe_softc *sc) SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq", CTLFLAG_RD, &sc->link_irq, "Link MSI-X IRQ Handled"); - for (i = 0, tx_que = sc->tx_queues; i < sc->num_tx_queues; i++, tx_que++) { + for (i = 0, tx_que = sc->tx_queues; i < sc->num_tx_queues; + i++, tx_que++) { struct tx_ring *txr = &tx_que->txr; snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i); queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, @@ -1661,10 +1717,12 @@ ixgbe_add_hw_stats(struct ixgbe_softc *sc) SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head", CTLTYPE_UINT | CTLFLAG_RD, txr, 0, - ixgbe_sysctl_tdh_handler, "IU", "Transmit Descriptor Head"); + ixgbe_sysctl_tdh_handler, "IU", + "Transmit Descriptor Head"); SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail", CTLTYPE_UINT | CTLFLAG_RD, txr, 0, - ixgbe_sysctl_tdt_handler, "IU", "Transmit Descriptor Tail"); + ixgbe_sysctl_tdt_handler, "IU", + "Transmit Descriptor Tail"); SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx", CTLFLAG_RD, &txr->tso_tx, "TSO"); SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets", @@ -1672,7 +1730,8 @@ ixgbe_add_hw_stats(struct ixgbe_softc *sc) "Queue Packets Transmitted"); } - for (i = 0, rx_que = sc->rx_queues; i < sc->num_rx_queues; i++, rx_que++) { + for (i = 0, rx_que = sc->rx_queues; i < sc->num_rx_queues; + i++, rx_que++) { struct rx_ring *rxr = &rx_que->rxr; snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i); queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, @@ -1689,10 +1748,12 @@ ixgbe_add_hw_stats(struct ixgbe_softc *sc) "irqs on this queue"); SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head", CTLTYPE_UINT | CTLFLAG_RD, rxr, 0, - ixgbe_sysctl_rdh_handler, "IU", "Receive Descriptor Head"); + ixgbe_sysctl_rdh_handler, "IU", + "Receive Descriptor Head"); *** 2221 LINES SKIPPED ***