Page MenuHomeFreeBSD

D11727.id32532.diff
No OneTemporary

D11727.id32532.diff

This file is larger than 256 KB, so syntax highlighting was skipped.
Index: sys/conf/files
===================================================================
--- sys/conf/files
+++ sys/conf/files
@@ -2168,11 +2168,9 @@
compile-with "${NORMAL_C} -I$S/dev/ixgbe -DSMP"
dev/ixgbe/if_bypass.c optional ix inet \
compile-with "${NORMAL_C} -I$S/dev/ixgbe"
-dev/ixgbe/ixgbe_netmap.c optional ix inet \
- compile-with "${NORMAL_C} -I$S/dev/ixgbe"
dev/ixgbe/if_fdir.c optional ix inet | ixv inet \
compile-with "${NORMAL_C} -I$S/dev/ixgbe"
-dev/ixgbe/if_sriov.c optional ix inet | ixv inet \
+dev/ixgbe/if_sriov.c optional ix inet \
compile-with "${NORMAL_C} -I$S/dev/ixgbe"
dev/ixgbe/ix_txrx.c optional ix inet | ixv inet \
compile-with "${NORMAL_C} -I$S/dev/ixgbe"
Index: sys/dev/ixgbe/if_bypass.c
===================================================================
--- sys/dev/ixgbe/if_bypass.c
+++ sys/dev/ixgbe/if_bypass.c
@@ -165,12 +165,12 @@
error = hw->mac.ops.bypass_rw(hw,
BYPASS_PAGE_CTL0, &state);
ixgbe_bypass_mutex_clear(adapter);
- if (error)
+ if (error != 0)
return (error);
state = (state >> BYPASS_STATUS_OFF_SHIFT) & 0x3;
error = sysctl_handle_int(oidp, &state, 0, req);
- if ((error) || (req->newptr == NULL))
+ if ((error != 0) || (req->newptr == NULL))
return (error);
/* Sanity check new state */
@@ -437,7 +437,7 @@
struct ixgbe_hw *hw = &adapter->hw;
int error, tmp;
static int timeout = 0;
- u32 mask, arg = BYPASS_PAGE_CTL0;
+ u32 mask, arg;
/* Get the current hardware value */
ixgbe_bypass_mutex_enter(adapter);
@@ -456,48 +456,38 @@
if ((error) || (req->newptr == NULL))
return (error);
- mask = BYPASS_WDT_ENABLE_M;
+ arg = 0x1 << BYPASS_WDT_ENABLE_SHIFT;
+ mask = BYPASS_WDT_ENABLE_M | BYPASS_WDT_VALUE_M;
switch (timeout) {
- case 0: /* disables the timer */
- break;
- case 1:
- arg = BYPASS_WDT_1_5 << BYPASS_WDT_TIME_SHIFT;
- arg |= 0x1 << BYPASS_WDT_ENABLE_SHIFT;
- mask |= BYPASS_WDT_VALUE_M;
- break;
- case 2:
- arg = BYPASS_WDT_2 << BYPASS_WDT_TIME_SHIFT;
- arg |= 0x1 << BYPASS_WDT_ENABLE_SHIFT;
- mask |= BYPASS_WDT_VALUE_M;
- break;
- case 3:
- arg = BYPASS_WDT_3 << BYPASS_WDT_TIME_SHIFT;
- arg |= 0x1 << BYPASS_WDT_ENABLE_SHIFT;
- mask |= BYPASS_WDT_VALUE_M;
- break;
- case 4:
- arg = BYPASS_WDT_4 << BYPASS_WDT_TIME_SHIFT;
- arg |= 0x1 << BYPASS_WDT_ENABLE_SHIFT;
- mask |= BYPASS_WDT_VALUE_M;
- break;
- case 8:
- arg = BYPASS_WDT_8 << BYPASS_WDT_TIME_SHIFT;
- arg |= 0x1 << BYPASS_WDT_ENABLE_SHIFT;
- mask |= BYPASS_WDT_VALUE_M;
- break;
- case 16:
- arg = BYPASS_WDT_16 << BYPASS_WDT_TIME_SHIFT;
- arg |= 0x1 << BYPASS_WDT_ENABLE_SHIFT;
- mask |= BYPASS_WDT_VALUE_M;
- break;
- case 32:
- arg = BYPASS_WDT_32 << BYPASS_WDT_TIME_SHIFT;
- arg |= 0x1 << BYPASS_WDT_ENABLE_SHIFT;
- mask |= BYPASS_WDT_VALUE_M;
- break;
- default:
- return (EINVAL);
+ case 0: /* disables the timer */
+ arg = BYPASS_PAGE_CTL0;
+ mask = BYPASS_WDT_ENABLE_M;
+ break;
+ case 1:
+ arg |= BYPASS_WDT_1_5 << BYPASS_WDT_TIME_SHIFT;
+ break;
+ case 2:
+ arg |= BYPASS_WDT_2 << BYPASS_WDT_TIME_SHIFT;
+ break;
+ case 3:
+ arg |= BYPASS_WDT_3 << BYPASS_WDT_TIME_SHIFT;
+ break;
+ case 4:
+ arg |= BYPASS_WDT_4 << BYPASS_WDT_TIME_SHIFT;
+ break;
+ case 8:
+ arg |= BYPASS_WDT_8 << BYPASS_WDT_TIME_SHIFT;
+ break;
+ case 16:
+ arg |= BYPASS_WDT_16 << BYPASS_WDT_TIME_SHIFT;
+ break;
+ case 32:
+ arg |= BYPASS_WDT_32 << BYPASS_WDT_TIME_SHIFT;
+ break;
+ default:
+ return (EINVAL);
}
+
/* Set the new watchdog */
ixgbe_bypass_mutex_enter(adapter);
error = hw->mac.ops.bypass_set(hw, BYPASS_PAGE_CTL0, mask, arg);
@@ -541,7 +531,8 @@
error = IXGBE_BYPASS_FW_WRITE_FAILURE;
break;
}
- if (hw->mac.ops.bypass_rw(hw, BYPASS_PAGE_CTL1, &reset_wd)) {
+ error = hw->mac.ops.bypass_rw(hw, BYPASS_PAGE_CTL1, &reset_wd);
+ if (error != 0) {
error = IXGBE_ERR_INVALID_ARGUMENT;
break;
}
@@ -615,7 +606,7 @@
&data);
ixgbe_bypass_mutex_clear(adapter);
if (error)
- return (-EINVAL);
+ return (EINVAL);
eeprom[count].logs += data << (8 * i);
}
@@ -624,7 +615,7 @@
log_off + i, &eeprom[count].actions);
ixgbe_bypass_mutex_clear(adapter);
if (error)
- return (-EINVAL);
+ return (EINVAL);
/* Quit if not a unread log */
if (!(eeprom[count].logs & BYPASS_LOG_CLEAR_M))
@@ -696,21 +687,21 @@
ixgbe_bypass_mutex_clear(adapter);
if (error)
- return (-EINVAL);
+ return (EINVAL);
}
status = 0; /* reset */
/* Another log command can now run */
while (atomic_cmpset_int(&adapter->bypass.log, 1, 0) == 0)
usec_delay(3000);
- return(error);
+ return (error);
unlock_err:
ixgbe_bypass_mutex_clear(adapter);
status = 0; /* reset */
while (atomic_cmpset_int(&adapter->bypass.log, 1, 0) == 0)
usec_delay(3000);
- return (-EINVAL);
+ return (EINVAL);
} /* ixgbe_bp_log */
/************************************************************************
@@ -802,7 +793,5 @@
adapter, 0, ixgbe_bp_wd_reset, "S", "Bypass WD Reset");
adapter->feat_en |= IXGBE_FEATURE_BYPASS;
-
- return;
} /* ixgbe_bypass_init */
Index: sys/dev/ixgbe/if_fdir.c
===================================================================
--- sys/dev/ixgbe/if_fdir.c
+++ sys/dev/ixgbe/if_fdir.c
@@ -50,10 +50,11 @@
} /* ixgbe_init_fdir */
void
-ixgbe_reinit_fdir(void *context, int pending)
+ixgbe_reinit_fdir(void *context)
{
- struct adapter *adapter = context;
- struct ifnet *ifp = adapter->ifp;
+ if_ctx_t ctx = context;
+ struct adapter *adapter = iflib_get_softc(ctx);
+ struct ifnet *ifp = iflib_get_ifp(ctx);
if (!(adapter->feat_en & IXGBE_FEATURE_FDIR))
return;
@@ -146,9 +147,9 @@
/* TASK_INIT needs this function defined regardless if it's enabled */
void
-ixgbe_reinit_fdir(void *context, int pending)
+ixgbe_reinit_fdir(void *context)
{
- UNREFERENCED_2PARAMETER(context, pending);
+ UNREFERENCED_PARAMETER(context);
} /* ixgbe_reinit_fdir */
void
Index: sys/dev/ixgbe/if_ix.c
===================================================================
--- sys/dev/ixgbe/if_ix.c
+++ sys/dev/ixgbe/if_ix.c
@@ -33,13 +33,16 @@
/*$FreeBSD$*/
-#ifndef IXGBE_STANDALONE_BUILD
#include "opt_inet.h"
#include "opt_inet6.h"
#include "opt_rss.h"
-#endif
#include "ixgbe.h"
+#include "ixgbe_sriov.h"
+#include "ifdi_if.h"
+
+#include <net/netmap.h>
+#include <dev/netmap/netmap_kern.h>
/************************************************************************
* Driver version
@@ -56,180 +59,175 @@
*
* { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
************************************************************************/
-static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
+static pci_vendor_info_t ixgbe_vendor_info_array[] =
{
- {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
- {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
- {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
- {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
- {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
- {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
- {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
- {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
- {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
- {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
- {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
- {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
- {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
- {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
- {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
- {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
- {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
- {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
- {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
- {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
- {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
- {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
- {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
- {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
- {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
- {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
- {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
- {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0},
- {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
- {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
- {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
- {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, 0, 0, 0},
- {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0},
- {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0},
- {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0},
- {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0},
- {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0},
- {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0},
- {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0},
- {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0},
- {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0},
- {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0},
- {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0},
- {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0},
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
/* required last entry */
- {0, 0, 0, 0, 0}
+ PVID_END
};
-/************************************************************************
- * Table of branding strings
- ************************************************************************/
-static char *ixgbe_strings[] = {
- "Intel(R) PRO/10GbE PCI-Express Network Driver"
-};
+static void *ixgbe_register(device_t dev);
+static int ixgbe_if_attach_pre(if_ctx_t ctx);
+static int ixgbe_if_attach_post(if_ctx_t ctx);
+static int ixgbe_if_detach(if_ctx_t ctx);
+static int ixgbe_if_shutdown(if_ctx_t ctx);
+static int ixgbe_if_suspend(if_ctx_t ctx);
+static int ixgbe_if_resume(if_ctx_t ctx);
+
+static void ixgbe_if_stop(if_ctx_t ctx);
+void ixgbe_if_enable_intr(if_ctx_t ctx);
+static void ixgbe_if_disable_intr(if_ctx_t ctx);
+static int ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid);
+static void ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr);
+static int ixgbe_if_media_change(if_ctx_t ctx);
+static int ixgbe_if_msix_intr_assign(if_ctx_t, int);
+static int ixgbe_if_mtu_set(if_ctx_t ctx, uint32_t mtu);
+static void ixgbe_if_crcstrip_set(if_ctx_t ctx, int onoff, int strip);
+static void ixgbe_if_multi_set(if_ctx_t ctx);
+static int ixgbe_if_promisc_set(if_ctx_t ctx, int flags);
+static int ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
+ uint64_t *paddrs, int nrxqs, int nrxqsets);
+static int ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
+ uint64_t *paddrs, int nrxqs, int nrxqsets);
+static void ixgbe_if_queues_free(if_ctx_t ctx);
+static void ixgbe_if_timer(if_ctx_t ctx, uint16_t);
+static void ixgbe_if_update_admin_status(if_ctx_t ctx);
+static void ixgbe_if_vlan_register(if_ctx_t ctx, u16 vtag);
+static void ixgbe_if_vlan_unregister(if_ctx_t ctx, u16 vtag);
+
+int ixgbe_intr(void *arg);
/************************************************************************
* Function prototypes
************************************************************************/
-static int ixgbe_probe(device_t);
-static int ixgbe_attach(device_t);
-static int ixgbe_detach(device_t);
-static int ixgbe_shutdown(device_t);
-static int ixgbe_suspend(device_t);
-static int ixgbe_resume(device_t);
-static int ixgbe_ioctl(struct ifnet *, u_long, caddr_t);
-static void ixgbe_init(void *);
-static void ixgbe_init_locked(struct adapter *);
-static void ixgbe_stop(void *);
#if __FreeBSD_version >= 1100036
-static uint64_t ixgbe_get_counter(struct ifnet *, ift_counter);
+static uint64_t ixgbe_if_get_counter(if_ctx_t, ift_counter);
#endif
-static void ixgbe_init_device_features(struct adapter *);
-static void ixgbe_check_fan_failure(struct adapter *, u32, bool);
-static void ixgbe_add_media_types(struct adapter *);
-static void ixgbe_media_status(struct ifnet *, struct ifmediareq *);
-static int ixgbe_media_change(struct ifnet *);
-static int ixgbe_allocate_pci_resources(struct adapter *);
-static void ixgbe_get_slot_info(struct adapter *);
-static int ixgbe_allocate_msix(struct adapter *);
-static int ixgbe_allocate_legacy(struct adapter *);
-static int ixgbe_configure_interrupts(struct adapter *);
-static void ixgbe_free_pci_resources(struct adapter *);
-static void ixgbe_local_timer(void *);
-static int ixgbe_setup_interface(device_t, struct adapter *);
-static void ixgbe_config_gpie(struct adapter *);
-static void ixgbe_config_dmac(struct adapter *);
-static void ixgbe_config_delay_values(struct adapter *);
-static void ixgbe_config_link(struct adapter *);
-static void ixgbe_check_wol_support(struct adapter *);
-static int ixgbe_setup_low_power_mode(struct adapter *);
-static void ixgbe_rearm_queues(struct adapter *, u64);
-
-static void ixgbe_initialize_transmit_units(struct adapter *);
-static void ixgbe_initialize_receive_units(struct adapter *);
-static void ixgbe_enable_rx_drop(struct adapter *);
-static void ixgbe_disable_rx_drop(struct adapter *);
-static void ixgbe_initialize_rss_mapping(struct adapter *);
-
-static void ixgbe_enable_intr(struct adapter *);
-static void ixgbe_disable_intr(struct adapter *);
-static void ixgbe_update_stats_counters(struct adapter *);
-static void ixgbe_set_promisc(struct adapter *);
-static void ixgbe_set_multi(struct adapter *);
-static void ixgbe_update_link_status(struct adapter *);
-static void ixgbe_set_ivar(struct adapter *, u8, u8, s8);
-static void ixgbe_configure_ivars(struct adapter *);
-static u8 *ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
-
-static void ixgbe_setup_vlan_hw_support(struct adapter *);
-static void ixgbe_register_vlan(void *, struct ifnet *, u16);
-static void ixgbe_unregister_vlan(void *, struct ifnet *, u16);
-
-static void ixgbe_add_device_sysctls(struct adapter *);
-static void ixgbe_add_hw_stats(struct adapter *);
-static int ixgbe_set_flowcntl(struct adapter *, int);
-static int ixgbe_set_advertise(struct adapter *, int);
-static int ixgbe_get_advertise(struct adapter *);
+
+static void ixgbe_enable_queue(struct adapter *adapter, u32 vector);
+static void ixgbe_disable_queue(struct adapter *adapter, u32 vector);
+static void ixgbe_add_device_sysctls(if_ctx_t ctx);
+static int ixgbe_allocate_pci_resources(if_ctx_t ctx);
+static int ixgbe_setup_low_power_mode(if_ctx_t ctx);
+
+static void ixgbe_config_dmac(struct adapter *adapter);
+static void ixgbe_configure_ivars(struct adapter *adapter);
+static void ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector,
+ s8 type);
+static u8 *ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
+static bool ixgbe_sfp_probe(if_ctx_t ctx);
+
+static void ixgbe_free_pci_resources(if_ctx_t ctx);
+
+static int ixgbe_msix_link(void *arg);
+static int ixgbe_msix_que(void *arg);
+static void ixgbe_initialize_rss_mapping(struct adapter *adapter);
+static void ixgbe_initialize_receive_units(if_ctx_t ctx);
+static void ixgbe_initialize_transmit_units(if_ctx_t ctx);
+
+static int ixgbe_setup_interface(if_ctx_t ctx);
+static void ixgbe_init_device_features(struct adapter *adapter);
+static void ixgbe_check_fan_failure(struct adapter *, u32, bool);
+static void ixgbe_add_media_types(if_ctx_t ctx);
+static void ixgbe_update_stats_counters(struct adapter *adapter);
+static void ixgbe_config_link(struct adapter *adapter);
+static void ixgbe_get_slot_info(struct adapter *);
+static void ixgbe_check_wol_support(struct adapter *adapter);
+static void ixgbe_enable_rx_drop(struct adapter *);
+static void ixgbe_disable_rx_drop(struct adapter *);
+
+static void ixgbe_add_hw_stats(struct adapter *adapter);
+static int ixgbe_set_flowcntl(struct adapter *, int);
+static int ixgbe_set_advertise(struct adapter *, int);
+static int ixgbe_get_advertise(struct adapter *);
+static void ixgbe_setup_vlan_hw_support(if_ctx_t ctx);
+static void ixgbe_config_gpie(struct adapter *adapter);
+static void ixgbe_config_delay_values(struct adapter *adapter);
/* Sysctl handlers */
-static void ixgbe_set_sysctl_value(struct adapter *, const char *,
- const char *, int *, int);
-static int ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS);
-static int ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS);
-static int ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS);
-static int ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS);
-static int ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS);
-static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS);
+static void ixgbe_set_sysctl_value(struct adapter *, const char *,
+ const char *, int *, int);
+static int ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS);
+static int ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS);
+static int ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS);
+static int ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS);
+static int ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS);
+static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS);
#ifdef IXGBE_DEBUG
-static int ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS);
-static int ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS);
+static int ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS);
+static int ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS);
#endif
-static int ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS);
-static int ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS);
-static int ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS);
-static int ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS);
-static int ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS);
-static int ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS);
-static int ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS);
-
-/* Support for pluggable optic modules */
-static bool ixgbe_sfp_probe(struct adapter *);
-
-/* Legacy (single vector) interrupt handler */
-static void ixgbe_legacy_irq(void *);
-
-/* The MSI/MSI-X Interrupt handlers */
-static void ixgbe_msix_que(void *);
-static void ixgbe_msix_link(void *);
+static int ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS);
+static int ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS);
+static int ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS);
+static int ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS);
+static int ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS);
+static int ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS);
+static int ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS);
/* Deferred interrupt tasklets */
-static void ixgbe_handle_que(void *, int);
-static void ixgbe_handle_link(void *, int);
-static void ixgbe_handle_msf(void *, int);
-static void ixgbe_handle_mod(void *, int);
-static void ixgbe_handle_phy(void *, int);
-
+static void ixgbe_handle_msf(void *);
+static void ixgbe_handle_mod(void *);
+static void ixgbe_handle_phy(void *);
/************************************************************************
* FreeBSD Device Interface Entry Points
************************************************************************/
static device_method_t ix_methods[] = {
/* Device interface */
- DEVMETHOD(device_probe, ixgbe_probe),
- DEVMETHOD(device_attach, ixgbe_attach),
- DEVMETHOD(device_detach, ixgbe_detach),
- DEVMETHOD(device_shutdown, ixgbe_shutdown),
- DEVMETHOD(device_suspend, ixgbe_suspend),
- DEVMETHOD(device_resume, ixgbe_resume),
+ DEVMETHOD(device_register, ixgbe_register),
+ DEVMETHOD(device_probe, iflib_device_probe),
+ DEVMETHOD(device_attach, iflib_device_attach),
+ DEVMETHOD(device_detach, iflib_device_detach),
+ DEVMETHOD(device_shutdown, iflib_device_shutdown),
+ DEVMETHOD(device_suspend, iflib_device_suspend),
+ DEVMETHOD(device_resume, iflib_device_resume),
#ifdef PCI_IOV
- DEVMETHOD(pci_iov_init, ixgbe_init_iov),
- DEVMETHOD(pci_iov_uninit, ixgbe_uninit_iov),
- DEVMETHOD(pci_iov_add_vf, ixgbe_add_vf),
+ DEVMETHOD(pci_iov_init, iflib_device_iov_init),
+ DEVMETHOD(pci_iov_uninit, iflib_device_iov_uninit),
+ DEVMETHOD(pci_iov_add_vf, iflib_device_iov_add_vf),
#endif /* PCI_IOV */
DEVMETHOD_END
};
@@ -243,15 +241,52 @@
MODULE_DEPEND(ix, pci, 1, 1, 1);
MODULE_DEPEND(ix, ether, 1, 1, 1);
-#ifdef DEV_NETMAP
-MODULE_DEPEND(ix, netmap, 1, 1, 1);
-#endif
+MODULE_DEPEND(ix, iflib, 1, 1, 1);
+
+static device_method_t ixgbe_if_methods[] = {
+ DEVMETHOD(ifdi_attach_pre, ixgbe_if_attach_pre),
+ DEVMETHOD(ifdi_attach_post, ixgbe_if_attach_post),
+ DEVMETHOD(ifdi_detach, ixgbe_if_detach),
+ DEVMETHOD(ifdi_shutdown, ixgbe_if_shutdown),
+ DEVMETHOD(ifdi_suspend, ixgbe_if_suspend),
+ DEVMETHOD(ifdi_resume, ixgbe_if_resume),
+ DEVMETHOD(ifdi_init, ixgbe_if_init),
+ DEVMETHOD(ifdi_stop, ixgbe_if_stop),
+ DEVMETHOD(ifdi_msix_intr_assign, ixgbe_if_msix_intr_assign),
+ DEVMETHOD(ifdi_intr_enable, ixgbe_if_enable_intr),
+ DEVMETHOD(ifdi_intr_disable, ixgbe_if_disable_intr),
+ DEVMETHOD(ifdi_tx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable),
+ DEVMETHOD(ifdi_rx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable),
+ DEVMETHOD(ifdi_tx_queues_alloc, ixgbe_if_tx_queues_alloc),
+ DEVMETHOD(ifdi_rx_queues_alloc, ixgbe_if_rx_queues_alloc),
+ DEVMETHOD(ifdi_queues_free, ixgbe_if_queues_free),
+ DEVMETHOD(ifdi_update_admin_status, ixgbe_if_update_admin_status),
+ DEVMETHOD(ifdi_multi_set, ixgbe_if_multi_set),
+ DEVMETHOD(ifdi_mtu_set, ixgbe_if_mtu_set),
+ DEVMETHOD(ifdi_crcstrip_set, ixgbe_if_crcstrip_set),
+ DEVMETHOD(ifdi_media_status, ixgbe_if_media_status),
+ DEVMETHOD(ifdi_media_change, ixgbe_if_media_change),
+ DEVMETHOD(ifdi_promisc_set, ixgbe_if_promisc_set),
+ DEVMETHOD(ifdi_timer, ixgbe_if_timer),
+ DEVMETHOD(ifdi_vlan_register, ixgbe_if_vlan_register),
+ DEVMETHOD(ifdi_vlan_unregister, ixgbe_if_vlan_unregister),
+ DEVMETHOD(ifdi_get_counter, ixgbe_if_get_counter),
+#ifdef PCI_IOV
+ DEVMETHOD(ifdi_iov_init, ixgbe_if_iov_init),
+ DEVMETHOD(ifdi_iov_uninit, ixgbe_if_iov_uninit),
+ DEVMETHOD(ifdi_iov_vf_add, ixgbe_if_iov_vf_add),
+#endif /* PCI_IOV */
+ DEVMETHOD_END
+};
/*
* TUNEABLE PARAMETERS:
*/
static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD, 0, "IXGBE driver parameters");
+static driver_t ixgbe_if_driver = {
+ "ixgbe_if", ixgbe_if_methods, sizeof(struct adapter)
+};
/*
* AIM: Adaptive Interrupt Moderation
@@ -260,24 +295,13 @@
* traffic for that interrupt vector
*/
static int ixgbe_enable_aim = TRUE;
-SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixgbe_enable_aim, 0,
+SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RWTUN, &ixgbe_enable_aim, 0,
"Enable adaptive interrupt moderation");
static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
&ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
-/* How many packets rxeof tries to clean at a time */
-static int ixgbe_rx_process_limit = 256;
-SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
- &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited");
-
-/* How many packets txeof tries to clean at a time */
-static int ixgbe_tx_process_limit = 256;
-SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
- &ixgbe_tx_process_limit, 0,
- "Maximum number of sent packets to process at a time, -1 means unlimited");
-
/* Flow control setting, default to full */
static int ixgbe_flow_control = ixgbe_fc_full;
SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
@@ -306,30 +330,6 @@
"Enable MSI-X interrupts");
/*
- * Number of Queues, can be set to 0,
- * it then autoconfigures based on the
- * number of cpus with a max of 8. This
- * can be overriden manually here.
- */
-static int ixgbe_num_queues = 0;
-SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
- "Number of queues to configure, 0 indicates autoconfigure");
-
-/*
- * Number of TX descriptors per ring,
- * setting higher than RX as this seems
- * the better performing choice.
- */
-static int ixgbe_txd = PERFORM_TXD;
-SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
- "Number of transmit descriptors per queue");
-
-/* Number of RX descriptors per ring */
-static int ixgbe_rxd = PERFORM_RXD;
-SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
- "Number of receive descriptors per queue");
-
-/*
* Defining this on will allow the use
* of unsupported SFP+ modules, note that
* doing so you are on your own :)
@@ -347,24 +347,217 @@
SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
"Enable Flow Director");
-/* Legacy Transmit (single queue) */
-static int ixgbe_enable_legacy_tx = 0;
-SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN,
- &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow");
-
/* Receive-Side Scaling */
static int ixgbe_enable_rss = 1;
SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
"Enable Receive-Side Scaling (RSS)");
+#if 0
/* Keep running tab on them for sanity check */
static int ixgbe_total_ports;
-
-static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *);
-static int (*ixgbe_ring_empty)(struct ifnet *, struct buf_ring *);
+#endif
MALLOC_DEFINE(M_IXGBE, "ix", "ix driver allocations");
+/*
+ * For Flow Director: this is the number of TX packets we sample
+ * for the filter pool, this means every 20th packet will be probed.
+ *
+ * This feature can be disabled by setting this to 0.
+ */
+static int atr_sample_rate = 20;
+
+extern struct if_txrx ixgbe_txrx;
+
+static struct if_shared_ctx ixgbe_sctx_init = {
+ .isc_magic = IFLIB_MAGIC,
+ .isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */
+ .isc_tx_maxsize = IXGBE_TSO_SIZE,
+
+ .isc_tx_maxsegsize = PAGE_SIZE,
+
+ .isc_rx_maxsize = PAGE_SIZE*4,
+ .isc_rx_nsegments = 1,
+ .isc_rx_maxsegsize = PAGE_SIZE*4,
+ .isc_nfl = 1,
+ .isc_ntxqs = 1,
+ .isc_nrxqs = 1,
+
+ .isc_admin_intrcnt = 1,
+ .isc_vendor_info = ixgbe_vendor_info_array,
+ .isc_driver_version = ixgbe_driver_version,
+ .isc_driver = &ixgbe_if_driver,
+
+ .isc_nrxd_min = {MIN_RXD},
+ .isc_ntxd_min = {MIN_TXD},
+ .isc_nrxd_max = {MAX_RXD},
+ .isc_ntxd_max = {MAX_TXD},
+ .isc_nrxd_default = {DEFAULT_RXD},
+ .isc_ntxd_default = {DEFAULT_TXD},
+};
+
+if_shared_ctx_t ixgbe_sctx = &ixgbe_sctx_init;
+
+/************************************************************************
+ * ixgbe_if_tx_queues_alloc
+ ************************************************************************/
+static int
+ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
+ int ntxqs, int ntxqsets)
+{
+ struct adapter *adapter = iflib_get_softc(ctx);
+ if_softc_ctx_t scctx = adapter->shared;
+ struct ix_tx_queue *que;
+ int i, j, error;
+
+ MPASS(adapter->num_tx_queues > 0);
+ MPASS(adapter->num_tx_queues == ntxqsets);
+ MPASS(ntxqs == 1);
+
+ /* Allocate queue structure memory */
+ adapter->tx_queues =
+ (struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) * ntxqsets,
+ M_IXGBE, M_NOWAIT | M_ZERO);
+ if (!adapter->tx_queues) {
+ device_printf(iflib_get_dev(ctx),
+ "Unable to allocate TX ring memory\n");
+ return (ENOMEM);
+ }
+
+ for (i = 0, que = adapter->tx_queues; i < ntxqsets; i++, que++) {
+ struct tx_ring *txr = &que->txr;
+
+ /* In case SR-IOV is enabled, align the index properly */
+ txr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool,
+ i);
+
+ txr->adapter = que->adapter = adapter;
+ adapter->active_queues |= (u64)1 << txr->me;
+
+ /* Allocate report status array */
+ txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IXGBE, M_NOWAIT | M_ZERO);
+ if (txr->tx_rsq == NULL) {
+ error = ENOMEM;
+ goto fail;
+ }
+ for (j = 0; j < scctx->isc_ntxd[0]; j++)
+ txr->tx_rsq[j] = QIDX_INVALID;
+ /* get the virtual and physical address of the hardware queues */
+ txr->tail = IXGBE_TDT(txr->me);
+ txr->tx_base = (union ixgbe_adv_tx_desc *)vaddrs[i];
+ txr->tx_paddr = paddrs[i];
+
+ txr->bytes = 0;
+ txr->total_packets = 0;
+
+ /* Set the rate at which we sample packets */
+ if (adapter->feat_en & IXGBE_FEATURE_FDIR)
+ txr->atr_sample = atr_sample_rate;
+
+ }
+
+ iflib_config_gtask_init(ctx, &adapter->mod_task, ixgbe_handle_mod,
+ "mod_task");
+ iflib_config_gtask_init(ctx, &adapter->msf_task, ixgbe_handle_msf,
+ "msf_task");
+ iflib_config_gtask_init(ctx, &adapter->phy_task, ixgbe_handle_phy,
+ "phy_task");
+ if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
+ iflib_config_gtask_init(ctx, &adapter->mbx_task,
+ ixgbe_handle_mbx, "mbx_task");
+ if (adapter->feat_en & IXGBE_FEATURE_FDIR)
+ iflib_config_gtask_init(ctx, &adapter->fdir_task,
+ ixgbe_reinit_fdir, "fdir_task");
+
+ device_printf(iflib_get_dev(ctx), "allocated for %d queues\n",
+ adapter->num_tx_queues);
+
+ return (0);
+
+fail:
+ ixgbe_if_queues_free(ctx);
+
+ return (error);
+} /* ixgbe_if_tx_queues_alloc */
+
+/************************************************************************
+ * ixgbe_if_rx_queues_alloc
+ ************************************************************************/
+static int
+ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
+ int nrxqs, int nrxqsets)
+{
+ struct adapter *adapter = iflib_get_softc(ctx);
+ struct ix_rx_queue *que;
+ int i;
+
+ MPASS(adapter->num_rx_queues > 0);
+ MPASS(adapter->num_rx_queues == nrxqsets);
+ MPASS(nrxqs == 1);
+
+ /* Allocate queue structure memory */
+ adapter->rx_queues =
+ (struct ix_rx_queue *)malloc(sizeof(struct ix_rx_queue)*nrxqsets,
+ M_IXGBE, M_NOWAIT | M_ZERO);
+ if (!adapter->rx_queues) {
+ device_printf(iflib_get_dev(ctx),
+ "Unable to allocate TX ring memory\n");
+ return (ENOMEM);
+ }
+
+ for (i = 0, que = adapter->rx_queues; i < nrxqsets; i++, que++) {
+ struct rx_ring *rxr = &que->rxr;
+
+ /* In case SR-IOV is enabled, align the index properly */
+ rxr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool,
+ i);
+
+ rxr->adapter = que->adapter = adapter;
+
+ /* get the virtual and physical address of the hw queues */
+ rxr->tail = IXGBE_RDT(rxr->me);
+ rxr->rx_base = (union ixgbe_adv_rx_desc *)vaddrs[i];
+ rxr->rx_paddr = paddrs[i];
+ rxr->bytes = 0;
+ rxr->que = que;
+ }
+
+ device_printf(iflib_get_dev(ctx), "allocated for %d rx queues\n",
+ adapter->num_rx_queues);
+
+ return (0);
+} /* ixgbe_if_rx_queues_alloc */
+
+/************************************************************************
+ * ixgbe_if_queues_free
+ ************************************************************************/
+static void
+ixgbe_if_queues_free(if_ctx_t ctx)
+{
+ struct adapter *adapter = iflib_get_softc(ctx);
+ struct ix_tx_queue *tx_que = adapter->tx_queues;
+ struct ix_rx_queue *rx_que = adapter->rx_queues;
+ int i;
+
+ if (tx_que != NULL) {
+ for (i = 0; i < adapter->num_tx_queues; i++, tx_que++) {
+ struct tx_ring *txr = &tx_que->txr;
+ if (txr->tx_rsq == NULL)
+ break;
+
+ free(txr->tx_rsq, M_IXGBE);
+ txr->tx_rsq = NULL;
+ }
+
+ free(adapter->tx_queues, M_IXGBE);
+ adapter->tx_queues = NULL;
+ }
+ if (rx_que != NULL) {
+ free(adapter->rx_queues, M_IXGBE);
+ adapter->rx_queues = NULL;
+ }
+} /* ixgbe_if_queues_free */
+
/************************************************************************
* ixgbe_initialize_rss_mapping
************************************************************************/
@@ -403,17 +596,17 @@
/* Set up the redirection table */
for (i = 0, j = 0; i < table_size; i++, j++) {
- if (j == adapter->num_queues)
+ if (j == adapter->num_rx_queues)
j = 0;
if (adapter->feat_en & IXGBE_FEATURE_RSS) {
/*
* Fetch the RSS bucket id for the given indirection
* entry. Cap it at the number of configured buckets
- * (which is num_queues.)
+ * (which is num_rx_queues.)
*/
queue_id = rss_get_indirection_to_bucket(i);
- queue_id = queue_id % adapter->num_queues;
+ queue_id = queue_id % adapter->num_rx_queues;
} else
queue_id = (j * index_mult);
@@ -486,14 +679,16 @@
#define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
static void
-ixgbe_initialize_receive_units(struct adapter *adapter)
+ixgbe_initialize_receive_units(if_ctx_t ctx)
{
- struct rx_ring *rxr = adapter->rx_rings;
- struct ixgbe_hw *hw = &adapter->hw;
- struct ifnet *ifp = adapter->ifp;
- int i, j;
- u32 bufsz, fctrl, srrctl, rxcsum;
- u32 hlreg;
+ struct adapter *adapter = iflib_get_softc(ctx);
+ if_softc_ctx_t scctx = adapter->shared;
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ifnet *ifp = iflib_get_ifp(ctx);
+ struct ix_rx_queue *que;
+ int i, j;
+ u32 bufsz, fctrl, srrctl, rxcsum;
+ u32 hlreg;
/*
* Make sure receives are disabled while
@@ -516,24 +711,16 @@
hlreg |= IXGBE_HLREG0_JUMBOEN;
else
hlreg &= ~IXGBE_HLREG0_JUMBOEN;
-
-#ifdef DEV_NETMAP
- /* CRC stripping is conditional in Netmap */
- if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
- (ifp->if_capenable & IFCAP_NETMAP) &&
- !ix_crcstrip)
- hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
- else
-#endif /* DEV_NETMAP */
- hlreg |= IXGBE_HLREG0_RXCRCSTRP;
-
IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
IXGBE_SRRCTL_BSIZEPKT_SHIFT;
- for (i = 0; i < adapter->num_queues; i++, rxr++) {
- u64 rdba = rxr->rxdma.dma_paddr;
+ /* Setup the Base and Length of the Rx Descriptor Ring */
+ for (i = 0, que = adapter->rx_queues; i < adapter->num_rx_queues; i++, que++) {
+ struct rx_ring *rxr = &que->rxr;
+ u64 rdba = rxr->rx_paddr;
+
j = rxr->me;
/* Setup the Base and Length of the Rx Descriptor Ring */
@@ -541,7 +728,7 @@
(rdba & 0x00000000ffffffffULL));
IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
- adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
+ scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc));
/* Set up the SRRCTL register */
srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
@@ -556,7 +743,7 @@
* so we do not need to clear the bit, but do it just in case
* this code is moved elsewhere.
*/
- if (adapter->num_queues > 1 &&
+ if (adapter->num_rx_queues > 1 &&
adapter->hw.fc.requested_mode == ixgbe_fc_none) {
srrctl |= IXGBE_SRRCTL_DROP_EN;
} else {
@@ -585,7 +772,7 @@
ixgbe_initialize_rss_mapping(adapter);
- if (adapter->num_queues > 1) {
+ if (adapter->num_rx_queues > 1) {
/* RSS and RX IPP Checksum are mutually exclusive */
rxcsum |= IXGBE_RXCSUM_PCSD;
}
@@ -599,21 +786,25 @@
IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
- return;
} /* ixgbe_initialize_receive_units */
/************************************************************************
* ixgbe_initialize_transmit_units - Enable transmit units.
************************************************************************/
static void
-ixgbe_initialize_transmit_units(struct adapter *adapter)
+ixgbe_initialize_transmit_units(if_ctx_t ctx)
{
- struct tx_ring *txr = adapter->tx_rings;
- struct ixgbe_hw *hw = &adapter->hw;
+ struct adapter *adapter = iflib_get_softc(ctx);
+ struct ixgbe_hw *hw = &adapter->hw;
+ if_softc_ctx_t scctx = adapter->shared;
+ struct ix_tx_queue *que;
+ int i;
/* Setup the Base and Length of the Tx Descriptor Ring */
- for (int i = 0; i < adapter->num_queues; i++, txr++) {
- u64 tdba = txr->txdma.dma_paddr;
+ for (i = 0, que = adapter->tx_queues; i < adapter->num_tx_queues;
+ i++, que++) {
+ struct tx_ring *txr = &que->txr;
+ u64 tdba = txr->tx_paddr;
u32 txctrl = 0;
int j = txr->me;
@@ -621,14 +812,16 @@
(tdba & 0x00000000ffffffffULL));
IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
- adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
+ scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc));
/* Setup the HW Tx Head and Tail descriptor pointers */
IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
/* Cache the tail address */
- txr->tail = IXGBE_TDT(j);
+ txr->tx_rs_cidx = txr->tx_rs_pidx = txr->tx_cidx_processed = 0;
+ for (int k = 0; k < scctx->isc_ntxd[0]; k++)
+ txr->tx_rsq[k] = QIDX_INVALID;
/* Disable Head Writeback */
/*
@@ -672,22 +865,32 @@
IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
}
- return;
} /* ixgbe_initialize_transmit_units */
/************************************************************************
- * ixgbe_attach - Device initialization routine
+ * ixgbe_register
+ ************************************************************************/
+static void *
+ixgbe_register(device_t dev)
+{
+ return (ixgbe_sctx);
+} /* ixgbe_register */
+
+/************************************************************************
+ * ixgbe_if_attach_pre - Device initialization routine, part 1
*
* Called when the driver is being loaded.
- * Identifies the type of hardware, allocates all resources
- * and initializes the hardware.
+ * Identifies the type of hardware, initializes the hardware,
+ * and initializes iflib structures.
*
* return 0 on success, positive on failure
************************************************************************/
static int
-ixgbe_attach(device_t dev)
+ixgbe_if_attach_pre(if_ctx_t ctx)
{
struct adapter *adapter;
+ device_t dev;
+ if_softc_ctx_t scctx;
struct ixgbe_hw *hw;
int error = 0;
u32 ctrl_ext;
@@ -695,17 +898,15 @@
INIT_DEBUGOUT("ixgbe_attach: begin");
/* Allocate, clear, and link in our adapter structure */
- adapter = device_get_softc(dev);
+ dev = iflib_get_dev(ctx);
+ adapter = iflib_get_softc(ctx);
adapter->hw.back = adapter;
+ adapter->ctx = ctx;
adapter->dev = dev;
+ scctx = adapter->shared = iflib_get_softc_ctx(ctx);
+ adapter->media = iflib_get_media(ctx);
hw = &adapter->hw;
- /* Core Lock Init*/
- IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
-
- /* Set up the timer callout */
- callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
-
/* Determine hardware revision */
hw->vendor_id = pci_get_vendor(dev);
hw->device_id = pci_get_device(dev);
@@ -713,16 +914,10 @@
hw->subsystem_vendor_id = pci_get_subvendor(dev);
hw->subsystem_device_id = pci_get_subdevice(dev);
- /*
- * Make sure BUSMASTER is set
- */
- pci_enable_busmaster(dev);
-
/* Do base PCI setup - map BAR0 */
- if (ixgbe_allocate_pci_resources(adapter)) {
+ if (ixgbe_allocate_pci_resources(ctx)) {
device_printf(dev, "Allocation of PCI resources failed\n");
- error = ENXIO;
- goto err_out;
+ return (ENXIO);
}
/* let hardware know driver is loaded */
@@ -733,10 +928,10 @@
/*
* Initialize the shared code
*/
- if (ixgbe_init_shared_code(hw)) {
+ if (ixgbe_init_shared_code(hw) != 0) {
device_printf(dev, "Unable to initialize the shared code\n");
error = ENXIO;
- goto err_out;
+ goto err_pci;
}
if (hw->mbx.ops.init_params)
@@ -744,38 +939,14 @@
hw->allow_unsupported_sfp = allow_unsupported_sfp;
- /* Pick up the 82599 settings */
- if (hw->mac.type != ixgbe_mac_82598EB) {
+ if (hw->mac.type != ixgbe_mac_82598EB)
hw->phy.smart_speed = ixgbe_smart_speed;
- adapter->num_segs = IXGBE_82599_SCATTER;
- } else
- adapter->num_segs = IXGBE_82598_SCATTER;
ixgbe_init_device_features(adapter);
- if (ixgbe_configure_interrupts(adapter)) {
- error = ENXIO;
- goto err_out;
- }
-
- /* Allocate multicast array memory. */
- adapter->mta = malloc(sizeof(*adapter->mta) *
- MAX_NUM_MULTICAST_ADDRESSES, M_IXGBE, M_NOWAIT);
- if (adapter->mta == NULL) {
- device_printf(dev, "Can not allocate multicast setup array\n");
- error = ENOMEM;
- goto err_out;
- }
-
/* Enable WoL (if supported) */
ixgbe_check_wol_support(adapter);
- /* Register for VLAN events */
- adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
- ixgbe_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
- adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
- ixgbe_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
-
/* Verify adapter fan is still functional (if applicable) */
if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
@@ -785,57 +956,9 @@
/* Ensure SW/FW semaphore is free */
ixgbe_init_swfw_semaphore(hw);
- /* Enable EEE power saving */
- if (adapter->feat_en & IXGBE_FEATURE_EEE)
- hw->mac.ops.setup_eee(hw, TRUE);
-
/* Set an initial default flow control value */
hw->fc.requested_mode = ixgbe_flow_control;
- /* Sysctls for limiting the amount of work done in the taskqueues */
- ixgbe_set_sysctl_value(adapter, "rx_processing_limit",
- "max number of rx packets to process",
- &adapter->rx_process_limit, ixgbe_rx_process_limit);
-
- ixgbe_set_sysctl_value(adapter, "tx_processing_limit",
- "max number of tx packets to process",
- &adapter->tx_process_limit, ixgbe_tx_process_limit);
-
- /* Do descriptor calc and sanity checks */
- if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
- ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
- device_printf(dev, "TXD config issue, using default!\n");
- adapter->num_tx_desc = DEFAULT_TXD;
- } else
- adapter->num_tx_desc = ixgbe_txd;
-
- /*
- * With many RX rings it is easy to exceed the
- * system mbuf allocation. Tuning nmbclusters
- * can alleviate this.
- */
- if (nmbclusters > 0) {
- int s;
- s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports;
- if (s > nmbclusters) {
- device_printf(dev, "RX Descriptors exceed system mbuf max, using default instead!\n");
- ixgbe_rxd = DEFAULT_RXD;
- }
- }
-
- if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
- ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
- device_printf(dev, "RXD config issue, using default!\n");
- adapter->num_rx_desc = DEFAULT_RXD;
- } else
- adapter->num_rx_desc = ixgbe_rxd;
-
- /* Allocate our TX/RX Queues */
- if (ixgbe_allocate_queues(adapter)) {
- error = ENOMEM;
- goto err_out;
- }
-
hw->phy.reset_if_overtemp = TRUE;
error = ixgbe_reset_hw(hw);
hw->phy.reset_if_overtemp = FALSE;
@@ -846,35 +969,24 @@
* for later insertion.
*/
adapter->sfp_probe = TRUE;
- error = IXGBE_SUCCESS;
+ error = 0;
} else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
device_printf(dev, "Unsupported SFP+ module detected!\n");
error = EIO;
- goto err_late;
+ goto err_pci;
} else if (error) {
device_printf(dev, "Hardware initialization failed\n");
error = EIO;
- goto err_late;
+ goto err_pci;
}
/* Make sure we have a good EEPROM before we read from it */
if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
device_printf(dev, "The EEPROM Checksum Is Not Valid\n");
error = EIO;
- goto err_late;
+ goto err_pci;
}
- /* Setup OS specific network interface */
- if (ixgbe_setup_interface(dev, adapter) != 0)
- goto err_late;
-
- if (adapter->feat_en & IXGBE_FEATURE_MSIX)
- error = ixgbe_allocate_msix(adapter);
- else
- error = ixgbe_allocate_legacy(adapter);
- if (error)
- goto err_late;
-
error = ixgbe_start_hw(hw);
switch (error) {
case IXGBE_ERR_EEPROM_VERSION:
@@ -883,7 +995,7 @@
case IXGBE_ERR_SFP_NOT_SUPPORTED:
device_printf(dev, "Unsupported SFP+ Module\n");
error = EIO;
- goto err_late;
+ goto err_pci;
case IXGBE_ERR_SFP_NOT_PRESENT:
device_printf(dev, "No SFP+ Module found\n");
/* falls thru */
@@ -891,14 +1003,114 @@
break;
}
+ /* Most of the iflib initialization... */
+
+ iflib_set_mac(ctx, hw->mac.addr);
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_X550EM_a:
+ scctx->isc_rss_table_size = 512;
+ break;
+ default:
+ scctx->isc_rss_table_size = 128;
+ }
+
+ /* Allow legacy interrupts */
+ ixgbe_txrx.ift_legacy_intr = ixgbe_intr;
+
+ scctx->isc_txqsizes[0] =
+ roundup2(scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc) +
+ sizeof(u32), DBA_ALIGN),
+ scctx->isc_rxqsizes[0] =
+ roundup2(scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc),
+ DBA_ALIGN);
+
+ /* XXX */
+ scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 32;
+ scctx->isc_tx_csum_flags = CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO |
+ CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_TSO;
+ if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+ scctx->isc_tx_nsegments = IXGBE_82598_SCATTER;
+ scctx->isc_msix_bar = PCIR_BAR(MSIX_82598_BAR);
+ } else {
+ scctx->isc_tx_csum_flags |= CSUM_SCTP |CSUM_IP6_SCTP;
+ scctx->isc_tx_nsegments = IXGBE_82599_SCATTER;
+ scctx->isc_msix_bar = PCIR_BAR(MSIX_82599_BAR);
+ }
+ scctx->isc_tx_tso_segments_max = scctx->isc_tx_nsegments;
+ scctx->isc_tx_tso_size_max = IXGBE_TSO_SIZE;
+ scctx->isc_tx_tso_segsize_max = PAGE_SIZE;
+
+ scctx->isc_txrx = &ixgbe_txrx;
+
+ scctx->isc_capenable = IXGBE_CAPS;
+
+ return (0);
+
+err_pci:
+ ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
+ ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
+ ixgbe_free_pci_resources(ctx);
+
+ return (error);
+} /* ixgbe_if_attach_pre */
+
+ /*********************************************************************
+ * ixgbe_if_attach_post - Device initialization routine, part 2
+ *
+ * Called during driver load, but after interrupts and
+ * resources have been allocated and configured.
+ * Sets up some data structures not relevant to iflib.
+ *
+ * return 0 on success, positive on failure
+ *********************************************************************/
+static int
+ixgbe_if_attach_post(if_ctx_t ctx)
+{
+ device_t dev;
+ struct adapter *adapter;
+ struct ixgbe_hw *hw;
+ int error = 0;
+
+ dev = iflib_get_dev(ctx);
+ adapter = iflib_get_softc(ctx);
+ hw = &adapter->hw;
+
+
+ /* Allocate multicast array memory. */
+ adapter->mta = malloc(sizeof(*adapter->mta) *
+ MAX_NUM_MULTICAST_ADDRESSES, M_IXGBE, M_NOWAIT);
+ if (adapter->mta == NULL) {
+ device_printf(dev, "Can not allocate multicast setup array\n");
+ error = ENOMEM;
+ goto err;
+ }
+
+ /* hw.ix defaults init */
+ ixgbe_set_advertise(adapter, ixgbe_advertise_speed);
+ adapter->enable_aim = ixgbe_enable_aim;
+
/* Enable the optics for 82599 SFP+ fiber */
ixgbe_enable_tx_laser(hw);
/* Enable power to the phy. */
ixgbe_set_phy_power(hw, TRUE);
+ ixgbe_initialize_iov(adapter);
+
+ error = ixgbe_setup_interface(ctx);
+ if (error) {
+ device_printf(dev, "Interface setup failed: %d\n", error);
+ goto err;
+ }
+
+ ixgbe_if_update_admin_status(ctx);
+
/* Initialize statistics */
ixgbe_update_stats_counters(adapter);
+ ixgbe_add_hw_stats(adapter);
/* Check PCIE slot type/speed/width */
ixgbe_get_slot_info(adapter);
@@ -918,36 +1130,13 @@
ixgbe_define_iov_schemas(dev, &error);
/* Add sysctls */
- ixgbe_add_device_sysctls(adapter);
- ixgbe_add_hw_stats(adapter);
-
- /* For Netmap */
- adapter->init_locked = ixgbe_init_locked;
- adapter->stop_locked = ixgbe_stop;
-
- if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
- ixgbe_netmap_attach(adapter);
-
- INIT_DEBUGOUT("ixgbe_attach: end");
+ ixgbe_add_device_sysctls(ctx);
return (0);
-
-err_late:
- ixgbe_free_transmit_structures(adapter);
- ixgbe_free_receive_structures(adapter);
- free(adapter->queues, M_DEVBUF);
-err_out:
- if (adapter->ifp != NULL)
- if_free(adapter->ifp);
- ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
- ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
- ixgbe_free_pci_resources(adapter);
- free(adapter->mta, M_IXGBE);
- IXGBE_CORE_LOCK_DESTROY(adapter);
-
+err:
+ ixgbe_if_detach(ctx);
return (error);
-} /* ixgbe_attach */
+} /* ixgbe_if_attach_post */
/************************************************************************
* ixgbe_check_wol_support
@@ -984,71 +1173,19 @@
* Setup networking device structure and register an interface.
************************************************************************/
static int
-ixgbe_setup_interface(device_t dev, struct adapter *adapter)
+ixgbe_setup_interface(if_ctx_t ctx)
{
- struct ifnet *ifp;
+ struct ifnet *ifp = iflib_get_ifp(ctx);
+ struct adapter *adapter = iflib_get_softc(ctx);
INIT_DEBUGOUT("ixgbe_setup_interface: begin");
- ifp = adapter->ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(dev, "can not allocate ifnet structure\n");
- return (-1);
- }
- if_initname(ifp, device_get_name(dev), device_get_unit(dev));
- ifp->if_baudrate = IF_Gbps(10);
- ifp->if_init = ixgbe_init;
- ifp->if_softc = adapter;
- ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
- ifp->if_ioctl = ixgbe_ioctl;
-#if __FreeBSD_version >= 1100036
- if_setgetcounterfn(ifp, ixgbe_get_counter);
-#endif
-#if __FreeBSD_version >= 1100045
- /* TSO parameters */
- ifp->if_hw_tsomax = 65518;
- ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
- ifp->if_hw_tsomaxsegsize = 2048;
-#endif
- if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
- ifp->if_start = ixgbe_legacy_start;
- IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
- ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 2;
- IFQ_SET_READY(&ifp->if_snd);
- ixgbe_start_locked = ixgbe_legacy_start_locked;
- ixgbe_ring_empty = ixgbe_legacy_ring_empty;
- } else {
- ifp->if_transmit = ixgbe_mq_start;
- ifp->if_qflush = ixgbe_qflush;
- ixgbe_start_locked = ixgbe_mq_start_locked;
- ixgbe_ring_empty = drbr_empty;
- }
-
- ether_ifattach(ifp, adapter->hw.mac.addr);
+ if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
+ if_setbaudrate(ifp, IF_Gbps(10));
adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
/*
- * Tell the upper layer(s) we support long frames.
- */
- ifp->if_hdrlen = sizeof(struct ether_vlan_header);
-
- /* Set capability flags */
- ifp->if_capabilities |= IFCAP_HWCSUM
- | IFCAP_HWCSUM_IPV6
- | IFCAP_TSO
- | IFCAP_LRO
- | IFCAP_VLAN_HWTAGGING
- | IFCAP_VLAN_HWTSO
- | IFCAP_VLAN_HWCSUM
- | IFCAP_JUMBO_MTU
- | IFCAP_VLAN_MTU
- | IFCAP_HWSTATS;
-
- /* Enable the above capabilities by default */
- ifp->if_capenable = ifp->if_capabilities;
-
- /*
* Don't turn this on by default, if vlans are
* created on another pseudo device (eg. lagg)
* then vlan events are not passed thru, breaking
@@ -1056,36 +1193,25 @@
* using vlans directly on the ixgbe driver you can
* enable this and get full hardware tag filtering.
*/
- ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
-
- /*
- * Specify the media types supported by this adapter and register
- * callbacks to update media and link information
- */
- ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
- ixgbe_media_status);
-
+ if_setcapenablebit(ifp, 0, IFCAP_VLAN_HWFILTER);
adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
- ixgbe_add_media_types(adapter);
- /* Set autoselect media by default */
- ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
+ ixgbe_add_media_types(ctx);
+
+ /* Autoselect media by default */
+ ifmedia_set(adapter->media, IFM_ETHER | IFM_AUTO);
return (0);
} /* ixgbe_setup_interface */
-#if __FreeBSD_version >= 1100036
/************************************************************************
- * ixgbe_get_counter
+ * ixgbe_if_get_counter
************************************************************************/
static uint64_t
-ixgbe_get_counter(struct ifnet *ifp, ift_counter cnt)
+ixgbe_if_get_counter(if_ctx_t ctx, ift_counter cnt)
{
- struct adapter *adapter;
- struct tx_ring *txr;
- uint64_t rv;
-
- adapter = if_getsoftc(ifp);
+ struct adapter *adapter = iflib_get_softc(ctx);
+ if_t ifp = iflib_get_ifp(ctx);
switch (cnt) {
case IFCOUNTER_IPACKETS:
@@ -1105,103 +1231,99 @@
case IFCOUNTER_IQDROPS:
return (adapter->iqdrops);
case IFCOUNTER_OQDROPS:
- rv = 0;
- txr = adapter->tx_rings;
- for (int i = 0; i < adapter->num_queues; i++, txr++)
- rv += txr->br->br_drops;
- return (rv);
+ return (0);
case IFCOUNTER_IERRORS:
return (adapter->ierrors);
default:
return (if_get_counter_default(ifp, cnt));
}
-} /* ixgbe_get_counter */
-#endif
+} /* ixgbe_if_get_counter */
/************************************************************************
* ixgbe_add_media_types
************************************************************************/
static void
-ixgbe_add_media_types(struct adapter *adapter)
+ixgbe_add_media_types(if_ctx_t ctx)
{
+ struct adapter *adapter = iflib_get_softc(ctx);
struct ixgbe_hw *hw = &adapter->hw;
- device_t dev = adapter->dev;
+ device_t dev = iflib_get_dev(ctx);
u64 layer;
- layer = adapter->phy_layer;
+ layer = adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
/* Media types with matching FreeBSD media defines */
if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
- ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_T, 0, NULL);
+ ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_T, 0, NULL);
if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
- ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
+ ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
- ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL);
+ ifmedia_add(adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL);
if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
- ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
+ ifmedia_add(adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
- ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_TWINAX, 0,
+ ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_TWINAX, 0,
NULL);
if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
- ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
+ ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
if (hw->phy.multispeed_fiber)
- ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_LX, 0,
+ ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_LX, 0,
NULL);
}
if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
- ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
+ ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
if (hw->phy.multispeed_fiber)
- ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0,
+ ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_SX, 0,
NULL);
} else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
- ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
+ ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
- ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
+ ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
#ifdef IFM_ETH_XTYPE
if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
- ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
+ ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4)
- ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
+ ifmedia_add( adapter->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
- ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
+ ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX)
- ifmedia_add(&adapter->media, IFM_ETHER | IFM_2500_KX, 0, NULL);
+ ifmedia_add(adapter->media, IFM_ETHER | IFM_2500_KX, 0, NULL);
#else
if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
device_printf(dev, "Media supported: 10GbaseKR\n");
device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
- ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
+ ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
}
if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
device_printf(dev, "Media supported: 10GbaseKX4\n");
device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
- ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
+ ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
}
if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
device_printf(dev, "Media supported: 1000baseKX\n");
device_printf(dev, "1000baseKX mapped to 1000baseCX\n");
- ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
+ ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
}
if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
device_printf(dev, "Media supported: 2500baseKX\n");
device_printf(dev, "2500baseKX mapped to 2500baseSX\n");
- ifmedia_add(&adapter->media, IFM_ETHER | IFM_2500_SX, 0, NULL);
+ ifmedia_add(adapter->media, IFM_ETHER | IFM_2500_SX, 0, NULL);
}
#endif
if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
device_printf(dev, "Media supported: 1000baseBX\n");
if (hw->device_id == IXGBE_DEV_ID_82598AT) {
- ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
+ ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
0, NULL);
- ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
+ ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
}
- ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
+ ifmedia_add(adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
} /* ixgbe_add_media_types */
/************************************************************************
@@ -1213,23 +1335,23 @@
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
if (hw->phy.type == ixgbe_phy_nl)
- return TRUE;
- return FALSE;
+ return (TRUE);
+ return (FALSE);
case ixgbe_mac_82599EB:
switch (hw->mac.ops.get_media_type(hw)) {
case ixgbe_media_type_fiber:
case ixgbe_media_type_fiber_qsfp:
- return TRUE;
+ return (TRUE);
default:
- return FALSE;
+ return (FALSE);
}
case ixgbe_mac_X550EM_x:
case ixgbe_mac_X550EM_a:
if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
- return TRUE;
- return FALSE;
+ return (TRUE);
+ return (FALSE);
default:
- return FALSE;
+ return (FALSE);
}
} /* ixgbe_is_sfp */
@@ -1246,31 +1368,24 @@
sfp = ixgbe_is_sfp(hw);
if (sfp) {
- if (hw->phy.multispeed_fiber) {
- hw->mac.ops.setup_sfp(hw);
- ixgbe_enable_tx_laser(hw);
- taskqueue_enqueue(adapter->tq, &adapter->msf_task);
- } else
- taskqueue_enqueue(adapter->tq, &adapter->mod_task);
+ GROUPTASK_ENQUEUE(&adapter->mod_task);
} else {
if (hw->mac.ops.check_link)
err = ixgbe_check_link(hw, &adapter->link_speed,
&adapter->link_up, FALSE);
if (err)
- goto out;
+ return;
autoneg = hw->phy.autoneg_advertised;
if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
&negotiate);
if (err)
- goto out;
+ return;
if (hw->mac.ops.setup_link)
err = hw->mac.ops.setup_link(hw, autoneg,
adapter->link_up);
}
-out:
- return;
} /* ixgbe_config_link */
/************************************************************************
@@ -1399,15 +1514,16 @@
static void
ixgbe_add_hw_stats(struct adapter *adapter)
{
- device_t dev = adapter->dev;
- struct tx_ring *txr = adapter->tx_rings;
- struct rx_ring *rxr = adapter->rx_rings;
+ device_t dev = iflib_get_dev(adapter->ctx);
+ struct ix_rx_queue *rx_que;
+ struct ix_tx_queue *tx_que;
struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
struct sysctl_oid *tree = device_get_sysctl_tree(dev);
struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
struct ixgbe_hw_stats *stats = &adapter->stats.pf;
struct sysctl_oid *stat_node, *queue_node;
struct sysctl_oid_list *stat_list, *queue_list;
+ int i;
#define QUEUE_NAME_LEN 32
char namebuf[QUEUE_NAME_LEN];
@@ -1415,27 +1531,18 @@
/* Driver Statistics */
SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
CTLFLAG_RD, &adapter->dropped_pkts, "Driver dropped packets");
- SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_failed",
- CTLFLAG_RD, &adapter->mbuf_defrag_failed, "m_defrag() failed");
SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
CTLFLAG_RD, &adapter->watchdog_events, "Watchdog timeouts");
SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
CTLFLAG_RD, &adapter->link_irq, "Link MSI-X IRQ Handled");
- for (int i = 0; i < adapter->num_queues; i++, txr++) {
+ for (i = 0, tx_que = adapter->tx_queues; i < adapter->num_tx_queues; i++, tx_que++) {
+ struct tx_ring *txr = &tx_que->txr;
snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
CTLFLAG_RD, NULL, "Queue Name");
queue_list = SYSCTL_CHILDREN(queue_node);
- SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
- CTLTYPE_UINT | CTLFLAG_RW, &adapter->queues[i],
- sizeof(&adapter->queues[i]),
- ixgbe_sysctl_interrupt_rate_handler, "IU",
- "Interrupt Rate");
- SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
- CTLFLAG_RD, &(adapter->queues[i].irqs),
- "irqs on this queue");
SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
ixgbe_sysctl_tdh_handler, "IU", "Transmit Descriptor Head");
@@ -1444,28 +1551,26 @@
ixgbe_sysctl_tdt_handler, "IU", "Transmit Descriptor Tail");
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
CTLFLAG_RD, &txr->tso_tx, "TSO");
- SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_tx_dma_setup",
- CTLFLAG_RD, &txr->no_tx_dma_setup,
- "Driver tx dma failure in xmit");
- SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
- CTLFLAG_RD, &txr->no_desc_avail,
- "Queue No Descriptor Available");
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
CTLFLAG_RD, &txr->total_packets,
"Queue Packets Transmitted");
- SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "br_drops",
- CTLFLAG_RD, &txr->br->br_drops,
- "Packets dropped in buf_ring");
}
- for (int i = 0; i < adapter->num_queues; i++, rxr++) {
- struct lro_ctrl *lro = &rxr->lro;
-
+ for (i = 0, rx_que = adapter->rx_queues; i < adapter->num_rx_queues; i++, rx_que++) {
+ struct rx_ring *rxr = &rx_que->rxr;
snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
CTLFLAG_RD, NULL, "Queue Name");
queue_list = SYSCTL_CHILDREN(queue_node);
+ SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
+ CTLTYPE_UINT | CTLFLAG_RW, &adapter->rx_queues[i],
+ sizeof(&adapter->rx_queues[i]),
+ ixgbe_sysctl_interrupt_rate_handler, "IU",
+ "Interrupt Rate");
+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
+ CTLFLAG_RD, &(adapter->rx_queues[i].irqs),
+ "irqs on this queue");
SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
ixgbe_sysctl_rdh_handler, "IU", "Receive Descriptor Head");
@@ -1480,10 +1585,6 @@
CTLFLAG_RD, &rxr->rx_copies, "Copied RX Frames");
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
CTLFLAG_RD, &rxr->rx_discarded, "Discarded RX packets");
- SYSCTL_ADD_U64(ctx, queue_list, OID_AUTO, "lro_queued",
- CTLFLAG_RD, &lro->lro_queued, 0, "LRO Queued");
- SYSCTL_ADD_U64(ctx, queue_list, OID_AUTO, "lro_flushed",
- CTLFLAG_RD, &lro->lro_flushed, 0, "LRO Flushed");
}
/* MAC stats get their own sub node */
@@ -1679,7 +1780,7 @@
} /* ixgbe_sysctl_rdt_handler */
/************************************************************************
- * ixgbe_register_vlan
+ * ixgbe_if_vlan_register
*
* Run via vlan config EVENT, it enables us to use the
* HW Filter table since we can get the vlan id. This
@@ -1687,60 +1788,45 @@
* VFTA, init will repopulate the real table.
************************************************************************/
static void
-ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
+ixgbe_if_vlan_register(if_ctx_t ctx, u16 vtag)
{
- struct adapter *adapter = ifp->if_softc;
+ struct adapter *adapter = iflib_get_softc(ctx);
u16 index, bit;
- if (ifp->if_softc != arg) /* Not our event */
- return;
-
- if ((vtag == 0) || (vtag > 4095)) /* Invalid */
- return;
-
- IXGBE_CORE_LOCK(adapter);
index = (vtag >> 5) & 0x7F;
bit = vtag & 0x1F;
adapter->shadow_vfta[index] |= (1 << bit);
++adapter->num_vlans;
- ixgbe_setup_vlan_hw_support(adapter);
- IXGBE_CORE_UNLOCK(adapter);
-} /* ixgbe_register_vlan */
+ ixgbe_setup_vlan_hw_support(ctx);
+} /* ixgbe_if_vlan_register */
/************************************************************************
- * ixgbe_unregister_vlan
+ * ixgbe_if_vlan_unregister
*
* Run via vlan unconfig EVENT, remove our entry in the soft vfta.
************************************************************************/
static void
-ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
+ixgbe_if_vlan_unregister(if_ctx_t ctx, u16 vtag)
{
- struct adapter *adapter = ifp->if_softc;
+ struct adapter *adapter = iflib_get_softc(ctx);
u16 index, bit;
- if (ifp->if_softc != arg)
- return;
-
- if ((vtag == 0) || (vtag > 4095)) /* Invalid */
- return;
-
- IXGBE_CORE_LOCK(adapter);
index = (vtag >> 5) & 0x7F;
bit = vtag & 0x1F;
adapter->shadow_vfta[index] &= ~(1 << bit);
--adapter->num_vlans;
/* Re-init to load the changes */
- ixgbe_setup_vlan_hw_support(adapter);
- IXGBE_CORE_UNLOCK(adapter);
-} /* ixgbe_unregister_vlan */
+ ixgbe_setup_vlan_hw_support(ctx);
+} /* ixgbe_if_vlan_unregister */
/************************************************************************
* ixgbe_setup_vlan_hw_support
************************************************************************/
static void
-ixgbe_setup_vlan_hw_support(struct adapter *adapter)
+ixgbe_setup_vlan_hw_support(if_ctx_t ctx)
{
- struct ifnet *ifp = adapter->ifp;
+ struct ifnet *ifp = iflib_get_ifp(ctx);
+ struct adapter *adapter = iflib_get_softc(ctx);
struct ixgbe_hw *hw = &adapter->hw;
struct rx_ring *rxr;
int i;
@@ -1757,8 +1843,8 @@
return;
/* Setup the queues for vlans */
- for (i = 0; i < adapter->num_queues; i++) {
- rxr = &adapter->rx_rings[i];
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ rxr = &adapter->rx_queues[i].rxr;
/* On 82599 the VLAN enable is per/queue in RXDCTL */
if (hw->mac.type != ixgbe_mac_82598EB) {
ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
@@ -1799,11 +1885,11 @@
static void
ixgbe_get_slot_info(struct adapter *adapter)
{
- device_t dev = adapter->dev;
- struct ixgbe_hw *hw = &adapter->hw;
- u32 offset;
- u16 link;
- int bus_info_valid = TRUE;
+ device_t dev = iflib_get_dev(adapter->ctx);
+ struct ixgbe_hw *hw = &adapter->hw;
+ int bus_info_valid = TRUE;
+ u32 offset;
+ u16 link;
/* Some devices are behind an internal bridge */
switch (hw->device_id) {
@@ -1889,84 +1975,110 @@
} /* ixgbe_get_slot_info */
/************************************************************************
- * ixgbe_enable_queue - MSI-X Interrupt Handlers and Tasklets
+ * ixgbe_if_msix_intr_assign
+ *
+ * Setup MSI-X Interrupt resources and handlers
************************************************************************/
-static inline void
-ixgbe_enable_queue(struct adapter *adapter, u32 vector)
+static int
+ixgbe_if_msix_intr_assign(if_ctx_t ctx, int msix)
{
- struct ixgbe_hw *hw = &adapter->hw;
- u64 queue = (u64)(1 << vector);
- u32 mask;
+ struct adapter *adapter = iflib_get_softc(ctx);
+ struct ix_rx_queue *rx_que = adapter->rx_queues;
+ struct ix_tx_queue *tx_que;
+ int error, rid, vector = 0;
+ int cpu_id = 0;
+ char buf[16];
+
+ /* Admin Que is vector 0*/
+ rid = vector + 1;
+ for (int i = 0; i < adapter->num_rx_queues; i++, vector++, rx_que++) {
+ rid = vector + 1;
- if (hw->mac.type == ixgbe_mac_82598EB) {
- mask = (IXGBE_EIMS_RTX_QUEUE & queue);
- IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
- } else {
- mask = (queue & 0xFFFFFFFF);
- if (mask)
- IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
- mask = (queue >> 32);
- if (mask)
- IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
- }
-} /* ixgbe_enable_queue */
+ snprintf(buf, sizeof(buf), "rxq%d", i);
+ error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
+ IFLIB_INTR_RX, ixgbe_msix_que, rx_que, rx_que->rxr.me, buf);
-/************************************************************************
- * ixgbe_disable_queue
- ************************************************************************/
-static inline void
-ixgbe_disable_queue(struct adapter *adapter, u32 vector)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- u64 queue = (u64)(1 << vector);
- u32 mask;
+ if (error) {
+ device_printf(iflib_get_dev(ctx),
+ "Failed to allocate que int %d err: %d", i, error);
+ adapter->num_rx_queues = i + 1;
+ goto fail;
+ }
+
+ rx_que->msix = vector;
+ adapter->active_queues |= (u64)(1 << rx_que->msix);
+ if (adapter->feat_en & IXGBE_FEATURE_RSS) {
+ /*
+ * The queue ID is used as the RSS layer bucket ID.
+ * We look up the queue ID -> RSS CPU ID and select
+ * that.
+ */
+ cpu_id = rss_getcpu(i % rss_getnumbuckets());
+ } else {
+ /*
+ * Bind the msix vector, and thus the
+ * rings to the corresponding cpu.
+ *
+ * This just happens to match the default RSS
+ * round-robin bucket -> queue -> CPU allocation.
+ */
+ if (adapter->num_rx_queues > 1)
+ cpu_id = i;
+ }
- if (hw->mac.type == ixgbe_mac_82598EB) {
- mask = (IXGBE_EIMS_RTX_QUEUE & queue);
- IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
- } else {
- mask = (queue & 0xFFFFFFFF);
- if (mask)
- IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
- mask = (queue >> 32);
- if (mask)
- IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
}
-} /* ixgbe_disable_queue */
+ for (int i = 0; i < adapter->num_tx_queues; i++) {
+ snprintf(buf, sizeof(buf), "txq%d", i);
+ tx_que = &adapter->tx_queues[i];
+ tx_que->msix = i % adapter->num_rx_queues;
+ iflib_softirq_alloc_generic(ctx, rid, IFLIB_INTR_TX, tx_que,
+ tx_que->txr.me, buf);
+ }
+ rid = vector + 1;
+ error = iflib_irq_alloc_generic(ctx, &adapter->irq, rid,
+ IFLIB_INTR_ADMIN, ixgbe_msix_link, adapter, 0, "aq");
+ if (error) {
+ device_printf(iflib_get_dev(ctx),
+ "Failed to register admin handler");
+ return (error);
+ }
-/************************************************************************
+ adapter->vector = vector;
+
+ return (0);
+fail:
+ iflib_irq_free(ctx, &adapter->irq);
+ rx_que = adapter->rx_queues;
+ for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++)
+ iflib_irq_free(ctx, &rx_que->que_irq);
+
+ return (error);
+} /* ixgbe_if_msix_intr_assign */
+
+/*********************************************************************
* ixgbe_msix_que - MSI-X Queue Interrupt Service routine
- ************************************************************************/
-void
+ **********************************************************************/
+static int
ixgbe_msix_que(void *arg)
{
- struct ix_queue *que = arg;
- struct adapter *adapter = que->adapter;
- struct ifnet *ifp = adapter->ifp;
- struct tx_ring *txr = que->txr;
- struct rx_ring *rxr = que->rxr;
- bool more;
- u32 newitr = 0;
+ struct ix_rx_queue *que = arg;
+ struct adapter *adapter = que->adapter;
+#ifdef notyet
+ struct tx_ring *txr = &que->txr;
+#endif
+ struct rx_ring *rxr = &que->rxr;
+ struct ifnet *ifp = iflib_get_ifp(que->adapter->ctx);
+ u32 newitr = 0;
/* Protect against spurious interrupts */
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
- return;
+ return 0;
ixgbe_disable_queue(adapter, que->msix);
++que->irqs;
- more = ixgbe_rxeof(que);
-
- IXGBE_TX_LOCK(txr);
- ixgbe_txeof(txr);
- if (!ixgbe_ring_empty(ifp, txr->br))
- ixgbe_start_locked(ifp, txr);
- IXGBE_TX_UNLOCK(txr);
-
- /* Do AIM now? */
-
- if (adapter->enable_aim == FALSE)
+ if (ixgbe_enable_aim == FALSE)
goto no_calc;
/*
* Do Adaptive Interrupt Moderation:
@@ -1981,11 +2093,10 @@
que->eitr_setting = 0;
/* Idle, do nothing */
- if ((txr->bytes == 0) && (rxr->bytes == 0))
- goto no_calc;
-
+#ifdef notyet
if ((txr->bytes) && (txr->packets))
newitr = txr->bytes/txr->packets;
+#endif
if ((rxr->bytes) && (rxr->packets))
newitr = max(newitr, (rxr->bytes / rxr->packets));
newitr += 24; /* account for hardware frame, crc */
@@ -2008,18 +2119,16 @@
que->eitr_setting = newitr;
/* Reset state */
+#ifdef notyet
txr->bytes = 0;
txr->packets = 0;
+#endif
rxr->bytes = 0;
rxr->packets = 0;
no_calc:
- if (more)
- taskqueue_enqueue(que->tq, &que->que_task);
- else
- ixgbe_enable_queue(adapter, que->msix);
- return;
+ return (FILTER_SCHEDULE_THREAD);
} /* ixgbe_msix_que */
/************************************************************************
@@ -2029,23 +2138,21 @@
* the interface using ifconfig.
************************************************************************/
static void
-ixgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
+ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
{
- struct adapter *adapter = ifp->if_softc;
+ struct adapter *adapter = iflib_get_softc(ctx);
struct ixgbe_hw *hw = &adapter->hw;
int layer;
- INIT_DEBUGOUT("ixgbe_media_status: begin");
- IXGBE_CORE_LOCK(adapter);
- ixgbe_update_link_status(adapter);
+ INIT_DEBUGOUT("ixgbe_if_media_status: begin");
+
+ iflib_admin_intr_deferred(ctx);
ifmr->ifm_status = IFM_AVALID;
ifmr->ifm_active = IFM_ETHER;
- if (!adapter->link_active) {
- IXGBE_CORE_UNLOCK(adapter);
+ if (!adapter->link_active)
return;
- }
ifmr->ifm_status |= IFM_ACTIVE;
layer = adapter->phy_layer;
@@ -2173,7 +2280,6 @@
if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
ifmr->ifm_active |= IFM_UNKNOWN;
-#if __FreeBSD_version >= 900025
/* Display current flow control setting used on link */
if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
hw->fc.current_mode == ixgbe_fc_full)
@@ -2181,11 +2287,6 @@
if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
hw->fc.current_mode == ixgbe_fc_full)
ifmr->ifm_active |= IFM_ETH_TXPAUSE;
-#endif
-
- IXGBE_CORE_UNLOCK(adapter);
-
- return;
} /* ixgbe_media_status */
/************************************************************************
@@ -2195,20 +2296,20 @@
* media/mediopt option with ifconfig.
************************************************************************/
static int
-ixgbe_media_change(struct ifnet *ifp)
+ixgbe_if_media_change(if_ctx_t ctx)
{
- struct adapter *adapter = ifp->if_softc;
- struct ifmedia *ifm = &adapter->media;
+ struct adapter *adapter = iflib_get_softc(ctx);
+ struct ifmedia *ifm = iflib_get_media(ctx);
struct ixgbe_hw *hw = &adapter->hw;
ixgbe_link_speed speed = 0;
- INIT_DEBUGOUT("ixgbe_media_change: begin");
+ INIT_DEBUGOUT("ixgbe_if_media_change: begin");
if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
return (EINVAL);
if (hw->phy.media_type == ixgbe_media_type_backplane)
- return (ENODEV);
+ return (EPERM);
/*
* We don't actually need to check against the supported
@@ -2216,48 +2317,48 @@
* that for us.
*/
switch (IFM_SUBTYPE(ifm->ifm_media)) {
- case IFM_AUTO:
- case IFM_10G_T:
- speed |= IXGBE_LINK_SPEED_100_FULL;
- speed |= IXGBE_LINK_SPEED_1GB_FULL;
- speed |= IXGBE_LINK_SPEED_10GB_FULL;
- break;
- case IFM_10G_LRM:
- case IFM_10G_LR:
+ case IFM_AUTO:
+ case IFM_10G_T:
+ speed |= IXGBE_LINK_SPEED_100_FULL;
+ speed |= IXGBE_LINK_SPEED_1GB_FULL;
+ speed |= IXGBE_LINK_SPEED_10GB_FULL;
+ break;
+ case IFM_10G_LRM:
+ case IFM_10G_LR:
#ifndef IFM_ETH_XTYPE
- case IFM_10G_SR: /* KR, too */
- case IFM_10G_CX4: /* KX4 */
+ case IFM_10G_SR: /* KR, too */
+ case IFM_10G_CX4: /* KX4 */
#else
- case IFM_10G_KR:
- case IFM_10G_KX4:
+ case IFM_10G_KR:
+ case IFM_10G_KX4:
#endif
- speed |= IXGBE_LINK_SPEED_1GB_FULL;
- speed |= IXGBE_LINK_SPEED_10GB_FULL;
- break;
+ speed |= IXGBE_LINK_SPEED_1GB_FULL;
+ speed |= IXGBE_LINK_SPEED_10GB_FULL;
+ break;
#ifndef IFM_ETH_XTYPE
- case IFM_1000_CX: /* KX */
+ case IFM_1000_CX: /* KX */
#else
- case IFM_1000_KX:
+ case IFM_1000_KX:
#endif
- case IFM_1000_LX:
- case IFM_1000_SX:
- speed |= IXGBE_LINK_SPEED_1GB_FULL;
- break;
- case IFM_1000_T:
- speed |= IXGBE_LINK_SPEED_100_FULL;
- speed |= IXGBE_LINK_SPEED_1GB_FULL;
- break;
- case IFM_10G_TWINAX:
- speed |= IXGBE_LINK_SPEED_10GB_FULL;
- break;
- case IFM_100_TX:
- speed |= IXGBE_LINK_SPEED_100_FULL;
- break;
- case IFM_10_T:
- speed |= IXGBE_LINK_SPEED_10_FULL;
- break;
- default:
- goto invalid;
+ case IFM_1000_LX:
+ case IFM_1000_SX:
+ speed |= IXGBE_LINK_SPEED_1GB_FULL;
+ break;
+ case IFM_1000_T:
+ speed |= IXGBE_LINK_SPEED_100_FULL;
+ speed |= IXGBE_LINK_SPEED_1GB_FULL;
+ break;
+ case IFM_10G_TWINAX:
+ speed |= IXGBE_LINK_SPEED_10GB_FULL;
+ break;
+ case IFM_100_TX:
+ speed |= IXGBE_LINK_SPEED_100_FULL;
+ break;
+ case IFM_10_T:
+ speed |= IXGBE_LINK_SPEED_10_FULL;
+ break;
+ default:
+ goto invalid;
}
hw->mac.autotry_restart = TRUE;
@@ -2271,44 +2372,28 @@
return (0);
invalid:
- device_printf(adapter->dev, "Invalid media type!\n");
+ device_printf(iflib_get_dev(ctx), "Invalid media type!\n");
return (EINVAL);
-} /* ixgbe_media_change */
+} /* ixgbe_if_media_change */
/************************************************************************
* ixgbe_set_promisc
************************************************************************/
-static void
-ixgbe_set_promisc(struct adapter *adapter)
+static int
+ixgbe_if_promisc_set(if_ctx_t ctx, int flags)
{
- struct ifnet *ifp = adapter->ifp;
- int mcnt = 0;
- u32 rctl;
+ struct adapter *adapter = iflib_get_softc(ctx);
+ struct ifnet *ifp = iflib_get_ifp(ctx);
+ u32 rctl;
+ int mcnt = 0;
rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
rctl &= (~IXGBE_FCTRL_UPE);
if (ifp->if_flags & IFF_ALLMULTI)
mcnt = MAX_NUM_MULTICAST_ADDRESSES;
else {
- struct ifmultiaddr *ifma;
-#if __FreeBSD_version < 800000
- IF_ADDR_LOCK(ifp);
-#else
- if_maddr_rlock(ifp);
-#endif
- TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
- if (ifma->ifma_addr->sa_family != AF_LINK)
- continue;
- if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
- break;
- mcnt++;
- }
-#if __FreeBSD_version < 800000
- IF_ADDR_UNLOCK(ifp);
-#else
- if_maddr_runlock(ifp);
-#endif
+ mcnt = if_multiaddr_count(ifp, MAX_NUM_MULTICAST_ADDRESSES);
}
if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
rctl &= (~IXGBE_FCTRL_MPE);
@@ -2322,12 +2407,13 @@
rctl &= ~IXGBE_FCTRL_UPE;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
}
-} /* ixgbe_set_promisc */
+ return (0);
+} /* ixgbe_if_promisc_set */
/************************************************************************
* ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
************************************************************************/
-static void
+static int
ixgbe_msix_link(void *arg)
{
struct adapter *adapter = arg;
@@ -2350,7 +2436,7 @@
/* Link status change */
if (eicr & IXGBE_EICR_LSC) {
IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
- taskqueue_enqueue(adapter->tq, &adapter->link_task);
+ iflib_admin_intr_deferred(adapter->ctx);
}
if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
@@ -2358,17 +2444,16 @@
(eicr & IXGBE_EICR_FLOW_DIR)) {
/* This is probably overkill :) */
if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1))
- return;
+ return (FILTER_HANDLED);
/* Disable the interrupt */
- IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
- taskqueue_enqueue(adapter->tq, &adapter->fdir_task);
- }
-
- if (eicr & IXGBE_EICR_ECC) {
- device_printf(adapter->dev,
- "CRITICAL: ECC ERROR!! Please Reboot!!\n");
- IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
- }
+ IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FLOW_DIR);
+ GROUPTASK_ENQUEUE(&adapter->fdir_task);
+ } else
+ if (eicr & IXGBE_EICR_ECC) {
+ device_printf(iflib_get_dev(adapter->ctx),
+ "\nCRITICAL: ECC ERROR!! Please Reboot!!\n");
+ IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
+ }
/* Check for over temp condition */
if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
@@ -2383,8 +2468,10 @@
retval = hw->phy.ops.check_overtemp(hw);
if (retval != IXGBE_ERR_OVERTEMP)
break;
- device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
- device_printf(adapter->dev, "System shutdown required!\n");
+ device_printf(iflib_get_dev(adapter->ctx),
+ "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
+ device_printf(iflib_get_dev(adapter->ctx),
+ "System shutdown required!\n");
break;
default:
if (!(eicr & IXGBE_EICR_TS))
@@ -2392,8 +2479,10 @@
retval = hw->phy.ops.check_overtemp(hw);
if (retval != IXGBE_ERR_OVERTEMP)
break;
- device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
- device_printf(adapter->dev, "System shutdown required!\n");
+ device_printf(iflib_get_dev(adapter->ctx),
+ "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
+ device_printf(iflib_get_dev(adapter->ctx),
+ "System shutdown required!\n");
IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
break;
}
@@ -2402,7 +2491,7 @@
/* Check for VF message */
if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
(eicr & IXGBE_EICR_MAILBOX))
- taskqueue_enqueue(adapter->tq, &adapter->mbx_task);
+ GROUPTASK_ENQUEUE(&adapter->mbx_task);
}
if (ixgbe_is_sfp(hw)) {
@@ -2414,14 +2503,14 @@
if (eicr & eicr_mask) {
IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
- taskqueue_enqueue(adapter->tq, &adapter->mod_task);
+ GROUPTASK_ENQUEUE(&adapter->mod_task);
}
if ((hw->mac.type == ixgbe_mac_82599EB) &&
(eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
IXGBE_WRITE_REG(hw, IXGBE_EICR,
IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
- taskqueue_enqueue(adapter->tq, &adapter->msf_task);
+ GROUPTASK_ENQUEUE(&adapter->msf_task);
}
}
@@ -2435,11 +2524,13 @@
if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
(eicr & IXGBE_EICR_GPI_SDP0_X540)) {
IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
- taskqueue_enqueue(adapter->tq, &adapter->phy_task);
+ GROUPTASK_ENQUEUE(&adapter->phy_task);
}
/* Re-enable other interrupts */
IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
+
+ return (FILTER_HANDLED);
} /* ixgbe_msix_link */
/************************************************************************
@@ -2448,9 +2539,9 @@
static int
ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
{
- struct ix_queue *que = ((struct ix_queue *)oidp->oid_arg1);
- int error;
- unsigned int reg, usec, rate;
+ struct ix_rx_queue *que = ((struct ix_rx_queue *)oidp->oid_arg1);
+ int error;
+ unsigned int reg, usec, rate;
reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
usec = ((reg & 0x0FF8) >> 3);
@@ -2478,52 +2569,54 @@
* ixgbe_add_device_sysctls
************************************************************************/
static void
-ixgbe_add_device_sysctls(struct adapter *adapter)
+ixgbe_add_device_sysctls(if_ctx_t ctx)
{
- device_t dev = adapter->dev;
+ struct adapter *adapter = iflib_get_softc(ctx);
+ device_t dev = iflib_get_dev(ctx);
struct ixgbe_hw *hw = &adapter->hw;
struct sysctl_oid_list *child;
- struct sysctl_ctx_list *ctx;
+ struct sysctl_ctx_list *ctx_list;
- ctx = device_get_sysctl_ctx(dev);
+ ctx_list = device_get_sysctl_ctx(dev);
child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
/* Sysctls for all devices */
- SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
- adapter, 0, ixgbe_sysctl_flowcntl, "I", IXGBE_SYSCTL_DESC_SET_FC);
+ SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "fc",
+ CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_flowcntl, "I",
+ IXGBE_SYSCTL_DESC_SET_FC);
adapter->enable_aim = ixgbe_enable_aim;
- SYSCTL_ADD_INT(ctx, child, OID_AUTO, "enable_aim", CTLFLAG_RW,
+ SYSCTL_ADD_INT(ctx_list, child, OID_AUTO, "enable_aim", CTLFLAG_RW,
&adapter->enable_aim, 1, "Interrupt Moderation");
- SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "advertise_speed",
+ SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "advertise_speed",
CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_advertise, "I",
IXGBE_SYSCTL_DESC_ADV_SPEED);
#ifdef IXGBE_DEBUG
/* testing sysctls (for all devices) */
- SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "power_state",
+ SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "power_state",
CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_power_state,
"I", "PCI Power State");
- SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "print_rss_config",
+ SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "print_rss_config",
CTLTYPE_STRING | CTLFLAG_RD, adapter, 0,
ixgbe_sysctl_print_rss_config, "A", "Prints RSS Configuration");
#endif
/* for X550 series devices */
if (hw->mac.type >= ixgbe_mac_X550)
- SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "dmac",
- CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_dmac,
+ SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "dmac",
+ CTLTYPE_U16 | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_dmac,
"I", "DMA Coalesce");
/* for WoL-capable devices */
if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
- SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wol_enable",
+ SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wol_enable",
CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
ixgbe_sysctl_wol_enable, "I", "Enable/Disable Wake on LAN");
- SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wufc",
- CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_wufc,
+ SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wufc",
+ CTLTYPE_U32 | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_wufc,
"I", "Enable/Disable Wake Up Filters");
}
@@ -2532,22 +2625,22 @@
struct sysctl_oid *phy_node;
struct sysctl_oid_list *phy_list;
- phy_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "phy",
+ phy_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, "phy",
CTLFLAG_RD, NULL, "External PHY sysctls");
phy_list = SYSCTL_CHILDREN(phy_node);
- SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "temp",
- CTLTYPE_INT | CTLFLAG_RD, adapter, 0, ixgbe_sysctl_phy_temp,
+ SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO, "temp",
+ CTLTYPE_U16 | CTLFLAG_RD, adapter, 0, ixgbe_sysctl_phy_temp,
"I", "Current External PHY Temperature (Celsius)");
- SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "overtemp_occurred",
- CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
+ SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO,
+ "overtemp_occurred", CTLTYPE_U16 | CTLFLAG_RD, adapter, 0,
ixgbe_sysctl_phy_overtemp_occurred, "I",
"External PHY High Temperature Event Occurred");
}
if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
- SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "eee_state",
+ SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "eee_state",
CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
ixgbe_sysctl_eee_state, "I", "EEE Power Save State");
}
@@ -2557,10 +2650,11 @@
* ixgbe_allocate_pci_resources
************************************************************************/
static int
-ixgbe_allocate_pci_resources(struct adapter *adapter)
+ixgbe_allocate_pci_resources(if_ctx_t ctx)
{
- device_t dev = adapter->dev;
- int rid;
+ struct adapter *adapter = iflib_get_softc(ctx);
+ device_t dev = iflib_get_dev(ctx);
+ int rid;
rid = PCIR_BAR(0);
adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
@@ -2591,83 +2685,37 @@
* return 0 on success, positive on failure
************************************************************************/
static int
-ixgbe_detach(device_t dev)
+ixgbe_if_detach(if_ctx_t ctx)
{
- struct adapter *adapter = device_get_softc(dev);
- struct ix_queue *que = adapter->queues;
- struct tx_ring *txr = adapter->tx_rings;
- u32 ctrl_ext;
+ struct adapter *adapter = iflib_get_softc(ctx);
+ device_t dev = iflib_get_dev(ctx);
+ u32 ctrl_ext;
INIT_DEBUGOUT("ixgbe_detach: begin");
- /* Make sure VLANS are not using driver */
- if (adapter->ifp->if_vlantrunk != NULL) {
- device_printf(dev, "Vlan in use, detach first\n");
- return (EBUSY);
- }
-
if (ixgbe_pci_iov_detach(dev) != 0) {
device_printf(dev, "SR-IOV in use; detach first.\n");
return (EBUSY);
}
- ether_ifdetach(adapter->ifp);
- /* Stop the adapter */
- IXGBE_CORE_LOCK(adapter);
- ixgbe_setup_low_power_mode(adapter);
- IXGBE_CORE_UNLOCK(adapter);
-
- for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
- if (que->tq) {
- if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
- taskqueue_drain(que->tq, &txr->txq_task);
- taskqueue_drain(que->tq, &que->que_task);
- taskqueue_free(que->tq);
- }
- }
+ iflib_config_gtask_deinit(&adapter->mod_task);
+ iflib_config_gtask_deinit(&adapter->msf_task);
+ iflib_config_gtask_deinit(&adapter->phy_task);
+ if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
+ iflib_config_gtask_deinit(&adapter->mbx_task);
- /* Drain the Link queue */
- if (adapter->tq) {
- taskqueue_drain(adapter->tq, &adapter->link_task);
- taskqueue_drain(adapter->tq, &adapter->mod_task);
- taskqueue_drain(adapter->tq, &adapter->msf_task);
- if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
- taskqueue_drain(adapter->tq, &adapter->mbx_task);
- taskqueue_drain(adapter->tq, &adapter->phy_task);
- if (adapter->feat_en & IXGBE_FEATURE_FDIR)
- taskqueue_drain(adapter->tq, &adapter->fdir_task);
- taskqueue_free(adapter->tq);
- }
+ ixgbe_setup_low_power_mode(ctx);
/* let hardware know driver is unloading */
ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
- /* Unregister VLAN events */
- if (adapter->vlan_attach != NULL)
- EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
- if (adapter->vlan_detach != NULL)
- EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
-
- callout_drain(&adapter->timer);
-
- if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
- netmap_detach(adapter->ifp);
-
- ixgbe_free_pci_resources(adapter);
- bus_generic_detach(dev);
- if_free(adapter->ifp);
-
- ixgbe_free_transmit_structures(adapter);
- ixgbe_free_receive_structures(adapter);
- free(adapter->queues, M_DEVBUF);
+ ixgbe_free_pci_resources(ctx);
free(adapter->mta, M_IXGBE);
- IXGBE_CORE_LOCK_DESTROY(adapter);
-
return (0);
-} /* ixgbe_detach */
+} /* ixgbe_if_detach */
/************************************************************************
* ixgbe_setup_low_power_mode - LPLU/WoL preparation
@@ -2675,13 +2723,15 @@
* Prepare the adapter/port for LPLU and/or WoL
************************************************************************/
static int
-ixgbe_setup_low_power_mode(struct adapter *adapter)
+ixgbe_setup_low_power_mode(if_ctx_t ctx)
{
+ struct adapter *adapter = iflib_get_softc(ctx);
struct ixgbe_hw *hw = &adapter->hw;
- device_t dev = adapter->dev;
+ device_t dev = iflib_get_dev(ctx);
s32 error = 0;
- mtx_assert(&adapter->core_mtx, MA_OWNED);
+ if (!hw->wol_enabled)
+ ixgbe_set_phy_power(hw, FALSE);
/* Limit power management flow to X550EM baseT */
if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
@@ -2707,15 +2757,15 @@
IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
/* X550EM baseT adapters need a special LPLU flow */
- hw->phy.reset_disable = true;
- ixgbe_stop(adapter);
+ hw->phy.reset_disable = TRUE;
+ ixgbe_if_stop(ctx);
error = hw->phy.ops.enter_lplu(hw);
if (error)
device_printf(dev, "Error entering LPLU: %d\n", error);
- hw->phy.reset_disable = false;
+ hw->phy.reset_disable = FALSE;
} else {
/* Just stop for other adapters */
- ixgbe_stop(adapter);
+ ixgbe_if_stop(ctx);
}
return error;
@@ -2725,19 +2775,16 @@
* ixgbe_shutdown - Shutdown entry point
************************************************************************/
static int
-ixgbe_shutdown(device_t dev)
+ixgbe_if_shutdown(if_ctx_t ctx)
{
- struct adapter *adapter = device_get_softc(dev);
- int error = 0;
+ int error = 0;
INIT_DEBUGOUT("ixgbe_shutdown: begin");
- IXGBE_CORE_LOCK(adapter);
- error = ixgbe_setup_low_power_mode(adapter);
- IXGBE_CORE_UNLOCK(adapter);
+ error = ixgbe_setup_low_power_mode(ctx);
return (error);
-} /* ixgbe_shutdown */
+} /* ixgbe_if_shutdown */
/************************************************************************
* ixgbe_suspend
@@ -2745,21 +2792,16 @@
* From D0 to D3
************************************************************************/
static int
-ixgbe_suspend(device_t dev)
+ixgbe_if_suspend(if_ctx_t ctx)
{
- struct adapter *adapter = device_get_softc(dev);
- int error = 0;
+ int error = 0;
INIT_DEBUGOUT("ixgbe_suspend: begin");
- IXGBE_CORE_LOCK(adapter);
-
- error = ixgbe_setup_low_power_mode(adapter);
-
- IXGBE_CORE_UNLOCK(adapter);
+ error = ixgbe_setup_low_power_mode(ctx);
return (error);
-} /* ixgbe_suspend */
+} /* ixgbe_if_suspend */
/************************************************************************
* ixgbe_resume
@@ -2767,17 +2809,16 @@
* From D3 to D0
************************************************************************/
static int
-ixgbe_resume(device_t dev)
+ixgbe_if_resume(if_ctx_t ctx)
{
- struct adapter *adapter = device_get_softc(dev);
- struct ifnet *ifp = adapter->ifp;
+ struct adapter *adapter = iflib_get_softc(ctx);
+ device_t dev = iflib_get_dev(ctx);
+ struct ifnet *ifp = iflib_get_ifp(ctx);
struct ixgbe_hw *hw = &adapter->hw;
u32 wus;
INIT_DEBUGOUT("ixgbe_resume: begin");
- IXGBE_CORE_LOCK(adapter);
-
/* Read & clear WUS register */
wus = IXGBE_READ_REG(hw, IXGBE_WUS);
if (wus)
@@ -2792,81 +2833,105 @@
* will re-advertise all previous advertised speeds
*/
if (ifp->if_flags & IFF_UP)
- ixgbe_init_locked(adapter);
-
- IXGBE_CORE_UNLOCK(adapter);
+ ixgbe_if_init(ctx);
return (0);
-} /* ixgbe_resume */
+} /* ixgbe_if_resume */
/************************************************************************
- * ixgbe_set_if_hwassist - Set the various hardware offload abilities.
+ * ixgbe_if_mtu_set - Ioctl mtu entry point
*
- * Takes the ifnet's if_capenable flags (e.g. set by the user using
- * ifconfig) and indicates to the OS via the ifnet's if_hwassist
- * field what mbuf offload flags the driver will understand.
+ * Return 0 on success, EINVAL on failure
************************************************************************/
-static void
-ixgbe_set_if_hwassist(struct adapter *adapter)
+static int
+ixgbe_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
{
- struct ifnet *ifp = adapter->ifp;
-
- ifp->if_hwassist = 0;
-#if __FreeBSD_version >= 1000000
- if (ifp->if_capenable & IFCAP_TSO4)
- ifp->if_hwassist |= CSUM_IP_TSO;
- if (ifp->if_capenable & IFCAP_TSO6)
- ifp->if_hwassist |= CSUM_IP6_TSO;
- if (ifp->if_capenable & IFCAP_TXCSUM) {
- ifp->if_hwassist |= (CSUM_IP | CSUM_IP_UDP | CSUM_IP_TCP);
- if (adapter->hw.mac.type != ixgbe_mac_82598EB)
- ifp->if_hwassist |= CSUM_IP_SCTP;
- }
- if (ifp->if_capenable & IFCAP_TXCSUM_IPV6) {
- ifp->if_hwassist |= (CSUM_IP6_UDP | CSUM_IP6_TCP);
- if (adapter->hw.mac.type != ixgbe_mac_82598EB)
- ifp->if_hwassist |= CSUM_IP6_SCTP;
- }
-#else
- if (ifp->if_capenable & IFCAP_TSO)
- ifp->if_hwassist |= CSUM_TSO;
- if (ifp->if_capenable & IFCAP_TXCSUM) {
- ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
- if (adapter->hw.mac.type != ixgbe_mac_82598EB)
- ifp->if_hwassist |= CSUM_SCTP;
+ struct adapter *adapter = iflib_get_softc(ctx);
+ int error = 0;
+
+ IOCTL_DEBUGOUT("ioctl: SIOCIFMTU (Set Interface MTU)");
+
+ if (mtu > IXGBE_MAX_MTU) {
+ error = EINVAL;
+ } else {
+ adapter->max_frame_size = mtu + IXGBE_MTU_HDR;
}
-#endif
-} /* ixgbe_set_if_hwassist */
+
+ return error;
+} /* ixgbe_if_mtu_set */
/************************************************************************
- * ixgbe_init_locked - Init entry point
+ * ixgbe_if_crcstrip_set
+ ************************************************************************/
+static void
+ixgbe_if_crcstrip_set(if_ctx_t ctx, int onoff, int crcstrip)
+{
+ struct adapter *sc = iflib_get_softc(ctx);
+ struct ixgbe_hw *hw = &sc->hw;
+ /* crc stripping is set in two places:
+ * IXGBE_HLREG0 (modified on init_locked and hw reset)
+ * IXGBE_RDRXCTL (set by the original driver in
+ * ixgbe_setup_hw_rsc() called in init_locked.
+ * We disable the setting when netmap is compiled in).
+ * We update the values here, but also in ixgbe.c because
+ * init_locked sometimes is called outside our control.
+ */
+ uint32_t hl, rxc;
+
+ hl = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+ rxc = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
+#ifdef NETMAP
+ if (netmap_verbose)
+ D("%s read HLREG 0x%x rxc 0x%x",
+ onoff ? "enter" : "exit", hl, rxc);
+#endif
+ /* hw requirements ... */
+ rxc &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
+ rxc |= IXGBE_RDRXCTL_RSCACKC;
+ if (onoff && !crcstrip) {
+ /* keep the crc. Fast rx */
+ hl &= ~IXGBE_HLREG0_RXCRCSTRP;
+ rxc &= ~IXGBE_RDRXCTL_CRCSTRIP;
+ } else {
+ /* reset default mode */
+ hl |= IXGBE_HLREG0_RXCRCSTRP;
+ rxc |= IXGBE_RDRXCTL_CRCSTRIP;
+ }
+#ifdef NETMAP
+ if (netmap_verbose)
+ D("%s write HLREG 0x%x rxc 0x%x",
+ onoff ? "enter" : "exit", hl, rxc);
+#endif
+ IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hl);
+ IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rxc);
+} /* ixgbe_if_crcstrip_set */
+
+/*********************************************************************
+ * ixgbe_if_init - Init entry point
*
* Used in two ways: It is used by the stack as an init
* entry point in network interface structure. It is also
* used by the driver as a hw/sw initialization routine to
* get to a consistent state.
*
- * return 0 on success, positive on failure
- ************************************************************************/
+ * Return 0 on success, positive on failure
+ **********************************************************************/
void
-ixgbe_init_locked(struct adapter *adapter)
+ixgbe_if_init(if_ctx_t ctx)
{
- struct ifnet *ifp = adapter->ifp;
- device_t dev = adapter->dev;
+ struct adapter *adapter = iflib_get_softc(ctx);
+ struct ifnet *ifp = iflib_get_ifp(ctx);
+ device_t dev = iflib_get_dev(ctx);
struct ixgbe_hw *hw = &adapter->hw;
- struct tx_ring *txr;
- struct rx_ring *rxr;
+ struct ix_rx_queue *rx_que;
+ struct ix_tx_queue *tx_que;
u32 txdctl, mhadd;
u32 rxdctl, rxctrl;
u32 ctrl_ext;
- int err = 0;
- mtx_assert(&adapter->core_mtx, MA_OWNED);
- INIT_DEBUGOUT("ixgbe_init_locked: begin");
+ int i, j, err;
- hw->adapter_stopped = FALSE;
- ixgbe_stop_adapter(hw);
- callout_stop(&adapter->timer);
+ INIT_DEBUGOUT("ixgbe_if_init: begin");
/* Queue indices may change with IOV mode */
ixgbe_align_all_queue_indices(adapter);
@@ -2879,22 +2944,14 @@
ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
hw->addr_ctrl.rar_used_count = 1;
- /* Set hardware offload abilities from ifnet flags */
- ixgbe_set_if_hwassist(adapter);
-
- /* Prepare transmit descriptors and buffers */
- if (ixgbe_setup_transmit_structures(adapter)) {
- device_printf(dev, "Could not setup transmit structures\n");
- ixgbe_stop(adapter);
- return;
- }
-
ixgbe_init_hw(hw);
+
ixgbe_initialize_iov(adapter);
- ixgbe_initialize_transmit_units(adapter);
+
+ ixgbe_initialize_transmit_units(ctx);
/* Setup Multicast table */
- ixgbe_set_multi(adapter);
+ ixgbe_if_multi_set(ctx);
/* Determine the correct mbuf pool, based on frame size */
if (adapter->max_frame_size <= MCLBYTES)
@@ -2902,15 +2959,8 @@
else
adapter->rx_mbuf_sz = MJUMPAGESIZE;
- /* Prepare receive descriptors and buffers */
- if (ixgbe_setup_receive_structures(adapter)) {
- device_printf(dev, "Could not setup receive structures\n");
- ixgbe_stop(adapter);
- return;
- }
-
/* Configure RX settings */
- ixgbe_initialize_receive_units(adapter);
+ ixgbe_initialize_receive_units(ctx);
/* Enable SDP & MSI-X interrupts based on adapter */
ixgbe_config_gpie(adapter);
@@ -2925,8 +2975,9 @@
}
/* Now enable all the queues */
- for (int i = 0; i < adapter->num_queues; i++) {
- txr = &adapter->tx_rings[i];
+ for (i = 0, tx_que = adapter->tx_queues; i < adapter->num_tx_queues; i++, tx_que++) {
+ struct tx_ring *txr = &tx_que->txr;
+
txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
txdctl |= IXGBE_TXDCTL_ENABLE;
/* Set WTHRESH to 8, burst writeback */
@@ -2942,8 +2993,9 @@
IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
}
- for (int i = 0, j = 0; i < adapter->num_queues; i++) {
- rxr = &adapter->rx_rings[i];
+ for (i = 0, rx_que = adapter->rx_queues; i < adapter->num_rx_queues; i++, rx_que++) {
+ struct rx_ring *rxr = &rx_que->rxr;
+
rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
if (hw->mac.type == ixgbe_mac_82598EB) {
/*
@@ -2956,7 +3008,7 @@
}
rxdctl |= IXGBE_RXDCTL_ENABLE;
IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
- for (; j < 10; j++) {
+ for (j = 0; j < 10; j++) {
if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
IXGBE_RXDCTL_ENABLE)
break;
@@ -2964,35 +3016,6 @@
msec_delay(1);
}
wmb();
-
- /*
- * In netmap mode, we must preserve the buffers made
- * available to userspace before the if_init()
- * (this is true by default on the TX side, because
- * init makes all buffers available to userspace).
- *
- * netmap_reset() and the device specific routines
- * (e.g. ixgbe_setup_receive_rings()) map these
- * buffers at the end of the NIC ring, so here we
- * must set the RDT (tail) register to make sure
- * they are not overwritten.
- *
- * In this driver the NIC ring starts at RDH = 0,
- * RDT points to the last slot available for reception (?),
- * so RDT = num_rx_desc - 1 means the whole ring is available.
- */
-#ifdef DEV_NETMAP
- if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
- (ifp->if_capenable & IFCAP_NETMAP)) {
- struct netmap_adapter *na = NA(adapter->ifp);
- struct netmap_kring *kring = &na->rx_rings[i];
- int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
-
- IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
- } else
-#endif /* DEV_NETMAP */
- IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me),
- adapter->num_rx_desc - 1);
}
/* Enable Receive engine */
@@ -3002,10 +3025,8 @@
rxctrl |= IXGBE_RXCTRL_RXEN;
ixgbe_enable_rx_dma(hw, rxctrl);
- callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
-
- /* Set up MSI-X routing */
- if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
+ /* Set up MSI/MSI-X routing */
+ if (ixgbe_enable_msix) {
ixgbe_configure_ivars(adapter);
/* Set up auto-mask */
if (hw->mac.type == ixgbe_mac_82598EB)
@@ -3038,6 +3059,9 @@
/* Set moderation on the Link interrupt */
IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR);
+ /* Enable power to the phy. */
+ ixgbe_set_phy_power(hw, TRUE);
+
/* Config/Enable Link */
ixgbe_config_link(adapter);
@@ -3048,13 +3072,13 @@
ixgbe_start_hw(hw);
/* Set up VLAN support and filter */
- ixgbe_setup_vlan_hw_support(adapter);
+ ixgbe_setup_vlan_hw_support(ctx);
/* Setup DMA Coalescing */
ixgbe_config_dmac(adapter);
/* And now turn on interrupts */
- ixgbe_enable_intr(adapter);
+ ixgbe_if_enable_intr(ctx);
/* Enable the use of the MBX by the VF's */
if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
@@ -3063,28 +3087,9 @@
IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
}
- /* Now inform the stack we're ready */
- ifp->if_drv_flags |= IFF_DRV_RUNNING;
-
- return;
} /* ixgbe_init_locked */
/************************************************************************
- * ixgbe_init
- ************************************************************************/
-static void
-ixgbe_init(void *arg)
-{
- struct adapter *adapter = arg;
-
- IXGBE_CORE_LOCK(adapter);
- ixgbe_init_locked(adapter);
- IXGBE_CORE_UNLOCK(adapter);
-
- return;
-} /* ixgbe_init */
-
-/************************************************************************
* ixgbe_set_ivar
*
* Setup the correct IVAR register for a particular MSI-X interrupt
@@ -3102,7 +3107,6 @@
vector |= IXGBE_IVAR_ALLOC_VAL;
switch (hw->mac.type) {
-
case ixgbe_mac_82598EB:
if (type == -1)
entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
@@ -3114,7 +3118,6 @@
ivar |= (vector << (8 * (entry & 0x3)));
IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
break;
-
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
case ixgbe_mac_X550:
@@ -3133,7 +3136,6 @@
ivar |= (vector << index);
IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
}
-
default:
break;
}
@@ -3145,8 +3147,9 @@
static void
ixgbe_configure_ivars(struct adapter *adapter)
{
- struct ix_queue *que = adapter->queues;
- u32 newitr;
+ struct ix_rx_queue *rx_que = adapter->rx_queues;
+ struct ix_tx_queue *tx_que = adapter->tx_queues;
+ u32 newitr;
if (ixgbe_max_interrupt_rate > 0)
newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
@@ -3159,17 +3162,21 @@
newitr = 0;
}
- for (int i = 0; i < adapter->num_queues; i++, que++) {
- struct rx_ring *rxr = &adapter->rx_rings[i];
- struct tx_ring *txr = &adapter->tx_rings[i];
+ for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++) {
+ struct rx_ring *rxr = &rx_que->rxr;
+
/* First the RX queue entry */
- ixgbe_set_ivar(adapter, rxr->me, que->msix, 0);
- /* ... and the TX */
- ixgbe_set_ivar(adapter, txr->me, que->msix, 1);
+ ixgbe_set_ivar(adapter, rxr->me, rx_que->msix, 0);
+
/* Set an Initial EITR value */
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(que->msix), newitr);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(rx_que->msix), newitr);
}
+ for (int i = 0; i < adapter->num_tx_queues; i++, tx_que++) {
+ struct tx_ring *txr = &tx_que->txr;
+ /* ... and the TX */
+ ixgbe_set_ivar(adapter, txr->me, tx_que->msix, 1);
+ }
/* For the Link interrupt */
ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
} /* ixgbe_configure_ivars */
@@ -3185,7 +3192,7 @@
gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
- if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
+ if (adapter->intr_type == IFLIB_INTR_MSIX) {
/* Enable Enhanced MSI-X mode */
gpie |= IXGBE_GPIE_MSIX_MODE
| IXGBE_GPIE_EIAME
@@ -3216,7 +3223,6 @@
IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
- return;
} /* ixgbe_config_gpie */
/************************************************************************
@@ -3271,41 +3277,39 @@
*
* Called whenever multicast address list is updated.
************************************************************************/
+static int
+ixgbe_mc_filter_apply(void *arg, struct ifmultiaddr *ifma, int count)
+{
+ struct adapter *adapter = arg;
+ struct ixgbe_mc_addr *mta = adapter->mta;
+
+ if (ifma->ifma_addr->sa_family != AF_LINK)
+ return (0);
+ if (count == MAX_NUM_MULTICAST_ADDRESSES)
+ return (0);
+ bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
+ mta[count].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
+ mta[count].vmdq = adapter->pool;
+
+ return (1);
+} /* ixgbe_mc_filter_apply */
+
static void
-ixgbe_set_multi(struct adapter *adapter)
+ixgbe_if_multi_set(if_ctx_t ctx)
{
- struct ifmultiaddr *ifma;
+ struct adapter *adapter = iflib_get_softc(ctx);
struct ixgbe_mc_addr *mta;
- struct ifnet *ifp = adapter->ifp;
+ struct ifnet *ifp = iflib_get_ifp(ctx);
u8 *update_ptr;
int mcnt = 0;
u32 fctrl;
- IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
+ IOCTL_DEBUGOUT("ixgbe_if_multi_set: begin");
mta = adapter->mta;
bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
-#if __FreeBSD_version < 800000
- IF_ADDR_LOCK(ifp);
-#else
- if_maddr_rlock(ifp);
-#endif
- TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
- if (ifma->ifma_addr->sa_family != AF_LINK)
- continue;
- if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
- break;
- bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
- mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
- mta[mcnt].vmdq = adapter->pool;
- mcnt++;
- }
-#if __FreeBSD_version < 800000
- IF_ADDR_UNLOCK(ifp);
-#else
- if_maddr_runlock(ifp);
-#endif
+ mcnt = if_multi_apply(iflib_get_ifp(ctx), ixgbe_mc_filter_apply, adapter);
fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
@@ -3326,8 +3330,7 @@
ixgbe_mc_array_itr, TRUE);
}
- return;
-} /* ixgbe_set_multi */
+} /* ixgbe_if_multi_set */
/************************************************************************
* ixgbe_mc_array_itr
@@ -3356,73 +3359,25 @@
* and runs the watchdog check.
************************************************************************/
static void
-ixgbe_local_timer(void *arg)
+ixgbe_if_timer(if_ctx_t ctx, uint16_t qid)
{
- struct adapter *adapter = arg;
- device_t dev = adapter->dev;
- struct ix_queue *que = adapter->queues;
- u64 queues = 0;
- int hung = 0;
+ struct adapter *adapter = iflib_get_softc(ctx);
- mtx_assert(&adapter->core_mtx, MA_OWNED);
+ if (qid != 0)
+ return;
/* Check for pluggable optics */
if (adapter->sfp_probe)
- if (!ixgbe_sfp_probe(adapter))
- goto out; /* Nothing to do */
-
- ixgbe_update_link_status(adapter);
- ixgbe_update_stats_counters(adapter);
-
- /*
- * Check the TX queues status
- * - mark hung queues so we don't schedule on them
- * - watchdog only if all queues show hung
- */
- for (int i = 0; i < adapter->num_queues; i++, que++) {
- /* Keep track of queues with work for soft irq */
- if (que->txr->busy)
- queues |= ((u64)1 << que->me);
- /*
- * Each time txeof runs without cleaning, but there
- * are uncleaned descriptors it increments busy. If
- * we get to the MAX we declare it hung.
- */
- if (que->busy == IXGBE_QUEUE_HUNG) {
- ++hung;
- /* Mark the queue as inactive */
- adapter->active_queues &= ~((u64)1 << que->me);
- continue;
- } else {
- /* Check if we've come back from hung */
- if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
- adapter->active_queues |= ((u64)1 << que->me);
- }
- if (que->busy >= IXGBE_MAX_TX_BUSY) {
- device_printf(dev,
- "Warning queue %d appears to be hung!\n", i);
- que->txr->busy = IXGBE_QUEUE_HUNG;
- ++hung;
- }
- }
+ if (!ixgbe_sfp_probe(ctx))
+ return; /* Nothing to do */
- /* Only truly watchdog if all queues show hung */
- if (hung == adapter->num_queues)
- goto watchdog;
- else if (queues != 0) { /* Force an IRQ on queues with work */
- ixgbe_rearm_queues(adapter, queues);
- }
+ ixgbe_check_link(&adapter->hw, &adapter->link_speed,
+ &adapter->link_up, 0);
-out:
- callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
- return;
+ /* Fire off the adminq task */
+ iflib_admin_intr_deferred(ctx);
-watchdog:
- device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
- adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
- adapter->watchdog_events++;
- ixgbe_init_locked(adapter);
-} /* ixgbe_local_timer */
+} /* ixgbe_if_timer */
/************************************************************************
* ixgbe_sfp_probe
@@ -3430,10 +3385,11 @@
* Determine if a port had optics inserted.
************************************************************************/
static bool
-ixgbe_sfp_probe(struct adapter *adapter)
+ixgbe_sfp_probe(if_ctx_t ctx)
{
+ struct adapter *adapter = iflib_get_softc(ctx);
struct ixgbe_hw *hw = &adapter->hw;
- device_t dev = adapter->dev;
+ device_t dev = iflib_get_dev(ctx);
bool result = FALSE;
if ((hw->phy.type == ixgbe_phy_nl) &&
@@ -3462,11 +3418,12 @@
* ixgbe_handle_mod - Tasklet for SFP module interrupts
************************************************************************/
static void
-ixgbe_handle_mod(void *context, int pending)
+ixgbe_handle_mod(void *context)
{
- struct adapter *adapter = context;
+ if_ctx_t ctx = context;
+ struct adapter *adapter = iflib_get_softc(ctx);
struct ixgbe_hw *hw = &adapter->hw;
- device_t dev = adapter->dev;
+ device_t dev = iflib_get_dev(ctx);
u32 err, cage_full = 0;
if (adapter->hw.need_crosstalk_fix) {
@@ -3501,7 +3458,7 @@
"Setup failure - unsupported SFP+ module type.\n");
return;
}
- taskqueue_enqueue(adapter->tq, &adapter->msf_task);
+ GROUPTASK_ENQUEUE(&adapter->msf_task);
} /* ixgbe_handle_mod */
@@ -3509,9 +3466,10 @@
* ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
************************************************************************/
static void
-ixgbe_handle_msf(void *context, int pending)
+ixgbe_handle_msf(void *context)
{
- struct adapter *adapter = context;
+ if_ctx_t ctx = context;
+ struct adapter *adapter = iflib_get_softc(ctx);
struct ixgbe_hw *hw = &adapter->hw;
u32 autoneg;
bool negotiate;
@@ -3526,18 +3484,19 @@
hw->mac.ops.setup_link(hw, autoneg, TRUE);
/* Adjust media types shown in ifconfig */
- ifmedia_removeall(&adapter->media);
- ixgbe_add_media_types(adapter);
- ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
+ ifmedia_removeall(adapter->media);
+ ixgbe_add_media_types(adapter->ctx);
+ ifmedia_set(adapter->media, IFM_ETHER | IFM_AUTO);
} /* ixgbe_handle_msf */
/************************************************************************
* ixgbe_handle_phy - Tasklet for external PHY interrupts
************************************************************************/
static void
-ixgbe_handle_phy(void *context, int pending)
+ixgbe_handle_phy(void *context)
{
- struct adapter *adapter = context;
+ if_ctx_t ctx = context;
+ struct adapter *adapter = iflib_get_softc(ctx);
struct ixgbe_hw *hw = &adapter->hw;
int error;
@@ -3550,28 +3509,18 @@
} /* ixgbe_handle_phy */
/************************************************************************
- * ixgbe_stop - Stop the hardware
+ * ixgbe_if_stop - Stop the hardware
*
* Disables all traffic on the adapter by issuing a
* global reset on the MAC and deallocates TX/RX buffers.
************************************************************************/
static void
-ixgbe_stop(void *arg)
+ixgbe_if_stop(if_ctx_t ctx)
{
- struct ifnet *ifp;
- struct adapter *adapter = arg;
+ struct adapter *adapter = iflib_get_softc(ctx);
struct ixgbe_hw *hw = &adapter->hw;
- ifp = adapter->ifp;
-
- mtx_assert(&adapter->core_mtx, MA_OWNED);
-
- INIT_DEBUGOUT("ixgbe_stop: begin\n");
- ixgbe_disable_intr(adapter);
- callout_stop(&adapter->timer);
-
- /* Let the stack know...*/
- ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
+ INIT_DEBUGOUT("ixgbe_if_stop: begin\n");
ixgbe_reset_hw(hw);
hw->adapter_stopped = FALSE;
@@ -3583,13 +3532,13 @@
/* Update the stack */
adapter->link_up = FALSE;
- ixgbe_update_link_status(adapter);
+ ixgbe_if_update_admin_status(ctx);
/* reprogram the RAR[0] in case user changed it. */
ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
return;
-} /* ixgbe_stop */
+} /* ixgbe_if_stop */
/************************************************************************
* ixgbe_update_link_status - Update OS on link state
@@ -3599,10 +3548,10 @@
* a link interrupt.
************************************************************************/
static void
-ixgbe_update_link_status(struct adapter *adapter)
+ixgbe_if_update_admin_status(if_ctx_t ctx)
{
- struct ifnet *ifp = adapter->ifp;
- device_t dev = adapter->dev;
+ struct adapter *adapter = iflib_get_softc(ctx);
+ device_t dev = iflib_get_dev(ctx);
if (adapter->link_up) {
if (adapter->link_active == FALSE) {
@@ -3615,7 +3564,9 @@
ixgbe_fc_enable(&adapter->hw);
/* Update DMA coalescing config */
ixgbe_config_dmac(adapter);
- if_link_state_change(ifp, LINK_STATE_UP);
+ /* should actually be negotiated value */
+ iflib_link_state_change(ctx, LINK_STATE_UP, IF_Gbps(10));
+
if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
ixgbe_ping_all_vfs(adapter);
}
@@ -3623,15 +3574,18 @@
if (adapter->link_active == TRUE) {
if (bootverbose)
device_printf(dev, "Link is Down\n");
- if_link_state_change(ifp, LINK_STATE_DOWN);
+ iflib_link_state_change(ctx, LINK_STATE_DOWN, 0);
adapter->link_active = FALSE;
if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
ixgbe_ping_all_vfs(adapter);
}
}
- return;
-} /* ixgbe_update_link_status */
+ ixgbe_update_stats_counters(adapter);
+
+ /* Re-enable link interrupts */
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
+} /* ixgbe_if_update_admin_status */
/************************************************************************
* ixgbe_config_dmac - Configure DMA Coalescing
@@ -3648,7 +3602,7 @@
if (dcfg->watchdog_timer ^ adapter->dmac ||
dcfg->link_speed ^ adapter->link_speed) {
dcfg->watchdog_timer = adapter->dmac;
- dcfg->fcoe_en = false;
+ dcfg->fcoe_en = FALSE;
dcfg->link_speed = adapter->link_speed;
dcfg->num_tcs = 1;
@@ -3660,14 +3614,15 @@
} /* ixgbe_config_dmac */
/************************************************************************
- * ixgbe_enable_intr
+ * ixgbe_if_enable_intr
************************************************************************/
-static void
-ixgbe_enable_intr(struct adapter *adapter)
+void
+ixgbe_if_enable_intr(if_ctx_t ctx)
{
- struct ixgbe_hw *hw = &adapter->hw;
- struct ix_queue *que = adapter->queues;
- u32 mask, fwsm;
+ struct adapter *adapter = iflib_get_softc(ctx);
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ix_rx_queue *que = adapter->rx_queues;
+ u32 mask, fwsm;
mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
@@ -3721,7 +3676,7 @@
IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
/* With MSI-X we use auto clear */
- if (adapter->msix_mem) {
+ if (adapter->intr_type == IFLIB_INTR_MSIX) {
mask = IXGBE_EIMS_ENABLE_MASK;
/* Don't autoclear Link */
mask &= ~IXGBE_EIMS_OTHER;
@@ -3736,21 +3691,22 @@
* allow for handling the extended (beyond 32) MSI-X
* vectors that can be used by 82599
*/
- for (int i = 0; i < adapter->num_queues; i++, que++)
+ for (int i = 0; i < adapter->num_rx_queues; i++, que++)
ixgbe_enable_queue(adapter, que->msix);
IXGBE_WRITE_FLUSH(hw);
- return;
-} /* ixgbe_enable_intr */
+} /* ixgbe_if_enable_intr */
/************************************************************************
* ixgbe_disable_intr
************************************************************************/
static void
-ixgbe_disable_intr(struct adapter *adapter)
+ixgbe_if_disable_intr(if_ctx_t ctx)
{
- if (adapter->msix_mem)
+ struct adapter *adapter = iflib_get_softc(ctx);
+
+ if (adapter->intr_type == IFLIB_INTR_MSIX)
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
@@ -3761,53 +3717,101 @@
}
IXGBE_WRITE_FLUSH(&adapter->hw);
- return;
-} /* ixgbe_disable_intr */
+} /* ixgbe_if_disable_intr */
+
+/************************************************************************
+ * ixgbe_if_rx_queue_intr_enable
+ ************************************************************************/
+static int
+ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
+{
+ struct adapter *adapter = iflib_get_softc(ctx);
+ struct ix_rx_queue *que = &adapter->rx_queues[rxqid];
+
+ ixgbe_enable_queue(adapter, que->rxr.me);
+
+ return (0);
+} /* ixgbe_if_rx_queue_intr_enable */
/************************************************************************
- * ixgbe_legacy_irq - Legacy Interrupt Service routine
+ * ixgbe_enable_queue
************************************************************************/
static void
-ixgbe_legacy_irq(void *arg)
+ixgbe_enable_queue(struct adapter *adapter, u32 vector)
{
- struct ix_queue *que = arg;
- struct adapter *adapter = que->adapter;
struct ixgbe_hw *hw = &adapter->hw;
- struct ifnet *ifp = adapter->ifp;
- struct tx_ring *txr = adapter->tx_rings;
- bool more = false;
- u32 eicr, eicr_mask;
+ u64 queue = (u64)(1 << vector);
+ u32 mask;
- /* Silicon errata #26 on 82598 */
- IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
+ if (hw->mac.type == ixgbe_mac_82598EB) {
+ mask = (IXGBE_EIMS_RTX_QUEUE & queue);
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
+ } else {
+ mask = (queue & 0xFFFFFFFF);
+ if (mask)
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
+ mask = (queue >> 32);
+ if (mask)
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
+ }
+} /* ixgbe_enable_queue */
- eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
+/************************************************************************
+ * ixgbe_disable_queue
+ ************************************************************************/
+static void
+ixgbe_disable_queue(struct adapter *adapter, u32 vector)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u64 queue = (u64)(1 << vector);
+ u32 mask;
- ++que->irqs;
- if (eicr == 0) {
- ixgbe_enable_intr(adapter);
- return;
+ if (hw->mac.type == ixgbe_mac_82598EB) {
+ mask = (IXGBE_EIMS_RTX_QUEUE & queue);
+ IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
+ } else {
+ mask = (queue & 0xFFFFFFFF);
+ if (mask)
+ IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
+ mask = (queue >> 32);
+ if (mask)
+ IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
}
+} /* ixgbe_disable_queue */
- if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
- more = ixgbe_rxeof(que);
+/************************************************************************
+ * ixgbe_intr - Legacy Interrupt Service Routine
+ ************************************************************************/
+int
+ixgbe_intr(void *arg)
+{
+ struct adapter *adapter = arg;
+ struct ix_rx_queue *que = adapter->rx_queues;
+ struct ixgbe_hw *hw = &adapter->hw;
+ if_ctx_t ctx = adapter->ctx;
+ u32 eicr, eicr_mask;
- IXGBE_TX_LOCK(txr);
- ixgbe_txeof(txr);
- if (!ixgbe_ring_empty(ifp, txr->br))
- ixgbe_start_locked(ifp, txr);
- IXGBE_TX_UNLOCK(txr);
+ eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
+
+ ++que->irqs;
+ if (eicr == 0) {
+ ixgbe_if_enable_intr(ctx);
+ return (FILTER_HANDLED);
}
/* Check for fan failure */
- if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
- ixgbe_check_fan_failure(adapter, eicr, true);
+ if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
+ (eicr & IXGBE_EICR_GPI_SDP1)) {
+ device_printf(adapter->dev,
+ "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
}
/* Link status change */
- if (eicr & IXGBE_EICR_LSC)
- taskqueue_enqueue(adapter->tq, &adapter->link_task);
+ if (eicr & IXGBE_EICR_LSC) {
+ IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
+ iflib_admin_intr_deferred(ctx);
+ }
if (ixgbe_is_sfp(hw)) {
/* Pluggable optics-related interrupt */
@@ -3818,94 +3822,52 @@
if (eicr & eicr_mask) {
IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
- taskqueue_enqueue(adapter->tq, &adapter->mod_task);
+ GROUPTASK_ENQUEUE(&adapter->mod_task);
}
if ((hw->mac.type == ixgbe_mac_82599EB) &&
(eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
IXGBE_WRITE_REG(hw, IXGBE_EICR,
IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
- taskqueue_enqueue(adapter->tq, &adapter->msf_task);
+ GROUPTASK_ENQUEUE(&adapter->msf_task);
}
}
/* External PHY interrupt */
if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
(eicr & IXGBE_EICR_GPI_SDP0_X540))
- taskqueue_enqueue(adapter->tq, &adapter->phy_task);
+ GROUPTASK_ENQUEUE(&adapter->phy_task);
- if (more)
- taskqueue_enqueue(que->tq, &que->que_task);
- else
- ixgbe_enable_intr(adapter);
-
- return;
-} /* ixgbe_legacy_irq */
+ return (FILTER_SCHEDULE_THREAD);
+} /* ixgbe_intr */
/************************************************************************
* ixgbe_free_pci_resources
************************************************************************/
static void
-ixgbe_free_pci_resources(struct adapter *adapter)
+ixgbe_free_pci_resources(if_ctx_t ctx)
{
- struct ix_queue *que = adapter->queues;
- device_t dev = adapter->dev;
- int rid, memrid;
+ struct adapter *adapter = iflib_get_softc(ctx);
+ struct ix_rx_queue *que = adapter->rx_queues;
+ device_t dev = iflib_get_dev(ctx);
- if (adapter->hw.mac.type == ixgbe_mac_82598EB)
- memrid = PCIR_BAR(MSIX_82598_BAR);
- else
- memrid = PCIR_BAR(MSIX_82599_BAR);
+ /* Release all msix queue resources */
+ if (adapter->intr_type == IFLIB_INTR_MSIX)
+ iflib_irq_free(ctx, &adapter->irq);
- /*
- * There is a slight possibility of a failure mode
- * in attach that will result in entering this function
- * before interrupt resources have been initialized, and
- * in that case we do not want to execute the loops below
- * We can detect this reliably by the state of the adapter
- * res pointer.
- */
- if (adapter->res == NULL)
- goto mem;
-
- /*
- * Release all msix queue resources:
- */
- for (int i = 0; i < adapter->num_queues; i++, que++) {
- rid = que->msix + 1;
- if (que->tag != NULL) {
- bus_teardown_intr(dev, que->res, que->tag);
- que->tag = NULL;
+ if (que != NULL) {
+ for (int i = 0; i < adapter->num_rx_queues; i++, que++) {
+ iflib_irq_free(ctx, &que->que_irq);
}
- if (que->res != NULL)
- bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
}
-
- if (adapter->tag != NULL) {
- bus_teardown_intr(dev, adapter->res, adapter->tag);
- adapter->tag = NULL;
- }
-
- /* Clean the Legacy or Link interrupt last */
- if (adapter->res != NULL)
- bus_release_resource(dev, SYS_RES_IRQ, adapter->link_rid,
- adapter->res);
-
-mem:
- if ((adapter->feat_en & IXGBE_FEATURE_MSI) ||
- (adapter->feat_en & IXGBE_FEATURE_MSIX))
- pci_release_msi(dev);
-
- if (adapter->msix_mem != NULL)
- bus_release_resource(dev, SYS_RES_MEMORY, memrid,
- adapter->msix_mem);
-
+ /*
+ * Free link/admin interrupt
+ */
if (adapter->pci_mem != NULL)
- bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
- adapter->pci_mem);
+ bus_release_resource(dev, SYS_RES_MEMORY,
+ PCIR_BAR(0), adapter->pci_mem);
- return;
} /* ixgbe_free_pci_resources */
/************************************************************************
@@ -3963,12 +3925,12 @@
case ixgbe_fc_tx_pause:
case ixgbe_fc_full:
adapter->hw.fc.requested_mode = fc;
- if (adapter->num_queues > 1)
+ if (adapter->num_rx_queues > 1)
ixgbe_disable_rx_drop(adapter);
break;
case ixgbe_fc_none:
adapter->hw.fc.requested_mode = ixgbe_fc_none;
- if (adapter->num_queues > 1)
+ if (adapter->num_rx_queues > 1)
ixgbe_enable_rx_drop(adapter);
break;
default:
@@ -3998,8 +3960,8 @@
struct rx_ring *rxr;
u32 srrctl;
- for (int i = 0; i < adapter->num_queues; i++) {
- rxr = &adapter->rx_rings[i];
+ for (int i = 0; i < adapter->num_rx_queues; i++) {
+ rxr = &adapter->rx_queues[i].rxr;
srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
srrctl |= IXGBE_SRRCTL_DROP_EN;
IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
@@ -4008,8 +3970,8 @@
/* enable drop for each vf */
for (int i = 0; i < adapter->num_vfs; i++) {
IXGBE_WRITE_REG(hw, IXGBE_QDE,
- (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
- IXGBE_QDE_ENABLE));
+ (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
+ IXGBE_QDE_ENABLE));
}
} /* ixgbe_enable_rx_drop */
@@ -4023,8 +3985,8 @@
struct rx_ring *rxr;
u32 srrctl;
- for (int i = 0; i < adapter->num_queues; i++) {
- rxr = &adapter->rx_rings[i];
+ for (int i = 0; i < adapter->num_rx_queues; i++) {
+ rxr = &adapter->rx_queues[i].rxr;
srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
srrctl &= ~IXGBE_SRRCTL_DROP_EN;
IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
@@ -4070,7 +4032,7 @@
static int
ixgbe_set_advertise(struct adapter *adapter, int advertise)
{
- device_t dev;
+ device_t dev = iflib_get_dev(adapter->ctx);
struct ixgbe_hw *hw;
ixgbe_link_speed speed = 0;
ixgbe_link_speed link_caps = 0;
@@ -4081,7 +4043,6 @@
if (adapter->advertise == advertise) /* no change */
return (0);
- dev = adapter->dev;
hw = &adapter->hw;
/* No speed changes for backplane media */
@@ -4200,12 +4161,12 @@
ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS)
{
struct adapter *adapter = (struct adapter *)arg1;
- struct ifnet *ifp = adapter->ifp;
+ struct ifnet *ifp = iflib_get_ifp(adapter->ctx);
int error;
- u32 newval;
+ u16 newval;
newval = adapter->dmac;
- error = sysctl_handle_int(oidp, &newval, 0, req);
+ error = sysctl_handle_16(oidp, &newval, 0, req);
if ((error) || (req->newptr == NULL))
return (error);
@@ -4236,7 +4197,7 @@
/* Re-initialize hardware if it's already running */
if (ifp->if_drv_flags & IFF_DRV_RUNNING)
- ixgbe_init(adapter);
+ ifp->if_init(ifp);
return (0);
} /* ixgbe_sysctl_dmac */
@@ -4340,7 +4301,7 @@
new_wufc = adapter->wufc;
- error = sysctl_handle_int(oidp, &new_wufc, 0, req);
+ error = sysctl_handle_32(oidp, &new_wufc, 0, req);
if ((error) || (req->newptr == NULL))
return (error);
if (new_wufc == adapter->wufc)
@@ -4426,14 +4387,14 @@
u16 reg;
if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
- device_printf(adapter->dev,
+ device_printf(iflib_get_dev(adapter->ctx),
"Device has no supported external thermal sensor.\n");
return (ENODEV);
}
if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
- device_printf(adapter->dev,
+ device_printf(iflib_get_dev(adapter->ctx),
"Error reading from PHY's current temperature register\n");
return (EAGAIN);
}
@@ -4441,7 +4402,7 @@
/* Shift temp for output */
reg = reg >> 8;
- return (sysctl_handle_int(oidp, NULL, reg, req));
+ return (sysctl_handle_16(oidp, NULL, reg, req));
} /* ixgbe_sysctl_phy_temp */
/************************************************************************
@@ -4458,14 +4419,14 @@
u16 reg;
if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
- device_printf(adapter->dev,
+ device_printf(iflib_get_dev(adapter->ctx),
"Device has no supported external thermal sensor.\n");
return (ENODEV);
}
if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
- device_printf(adapter->dev,
+ device_printf(iflib_get_dev(adapter->ctx),
"Error reading from PHY's temperature status register\n");
return (EAGAIN);
}
@@ -4473,7 +4434,7 @@
/* Get occurrence bit */
reg = !!(reg & 0x4000);
- return (sysctl_handle_int(oidp, 0, reg, req));
+ return (sysctl_handle_16(oidp, 0, reg, req));
} /* ixgbe_sysctl_phy_overtemp_occurred */
/************************************************************************
@@ -4490,6 +4451,7 @@
{
struct adapter *adapter = (struct adapter *)arg1;
device_t dev = adapter->dev;
+ struct ifnet *ifp = iflib_get_ifp(adapter->ctx);
int curr_eee, new_eee, error = 0;
s32 retval;
@@ -4518,7 +4480,7 @@
}
/* Restart auto-neg */
- ixgbe_init(adapter);
+ ifp->if_init(ifp);
device_printf(dev, "New EEE state: %d\n", new_eee);
@@ -4541,8 +4503,7 @@
| IXGBE_FEATURE_RSS
| IXGBE_FEATURE_MSI
| IXGBE_FEATURE_MSIX
- | IXGBE_FEATURE_LEGACY_IRQ
- | IXGBE_FEATURE_LEGACY_TX;
+ | IXGBE_FEATURE_LEGACY_IRQ;
/* Set capabilities first... */
switch (adapter->hw.mac.type) {
@@ -4613,10 +4574,6 @@
else
device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled.");
}
- /* Legacy (single queue) transmit */
- if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
- ixgbe_enable_legacy_tx)
- adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
/*
* Message Signal Interrupts - Extended (MSI-X)
* Normal MSI is only enabled if MSI-X calls fail.
@@ -4638,222 +4595,6 @@
} /* ixgbe_init_device_features */
/************************************************************************
- * ixgbe_probe - Device identification routine
- *
- * Determines if the driver should be loaded on
- * adapter based on its PCI vendor/device ID.
- *
- * return BUS_PROBE_DEFAULT on success, positive on failure
- ************************************************************************/
-static int
-ixgbe_probe(device_t dev)
-{
- ixgbe_vendor_info_t *ent;
-
- u16 pci_vendor_id = 0;
- u16 pci_device_id = 0;
- u16 pci_subvendor_id = 0;
- u16 pci_subdevice_id = 0;
- char adapter_name[256];
-
- INIT_DEBUGOUT("ixgbe_probe: begin");
-
- pci_vendor_id = pci_get_vendor(dev);
- if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
- return (ENXIO);
-
- pci_device_id = pci_get_device(dev);
- pci_subvendor_id = pci_get_subvendor(dev);
- pci_subdevice_id = pci_get_subdevice(dev);
-
- ent = ixgbe_vendor_info_array;
- while (ent->vendor_id != 0) {
- if ((pci_vendor_id == ent->vendor_id) &&
- (pci_device_id == ent->device_id) &&
- ((pci_subvendor_id == ent->subvendor_id) ||
- (ent->subvendor_id == 0)) &&
- ((pci_subdevice_id == ent->subdevice_id) ||
- (ent->subdevice_id == 0))) {
- sprintf(adapter_name, "%s, Version - %s",
- ixgbe_strings[ent->index],
- ixgbe_driver_version);
- device_set_desc_copy(dev, adapter_name);
- ++ixgbe_total_ports;
- return (BUS_PROBE_DEFAULT);
- }
- ent++;
- }
-
- return (ENXIO);
-} /* ixgbe_probe */
-
-
-/************************************************************************
- * ixgbe_ioctl - Ioctl entry point
- *
- * Called when the user wants to configure the interface.
- *
- * return 0 on success, positive on failure
- ************************************************************************/
-static int
-ixgbe_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
-{
- struct adapter *adapter = ifp->if_softc;
- struct ifreq *ifr = (struct ifreq *) data;
-#if defined(INET) || defined(INET6)
- struct ifaddr *ifa = (struct ifaddr *)data;
-#endif
- int error = 0;
- bool avoid_reset = FALSE;
-
- switch (command) {
- case SIOCSIFADDR:
-#ifdef INET
- if (ifa->ifa_addr->sa_family == AF_INET)
- avoid_reset = TRUE;
-#endif
-#ifdef INET6
- if (ifa->ifa_addr->sa_family == AF_INET6)
- avoid_reset = TRUE;
-#endif
- /*
- * Calling init results in link renegotiation,
- * so we avoid doing it when possible.
- */
- if (avoid_reset) {
- ifp->if_flags |= IFF_UP;
- if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
- ixgbe_init(adapter);
-#ifdef INET
- if (!(ifp->if_flags & IFF_NOARP))
- arp_ifinit(ifp, ifa);
-#endif
- } else
- error = ether_ioctl(ifp, command, data);
- break;
- case SIOCSIFMTU:
- IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
- if (ifr->ifr_mtu > IXGBE_MAX_MTU) {
- error = EINVAL;
- } else {
- IXGBE_CORE_LOCK(adapter);
- ifp->if_mtu = ifr->ifr_mtu;
- adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
- if (ifp->if_drv_flags & IFF_DRV_RUNNING)
- ixgbe_init_locked(adapter);
- ixgbe_recalculate_max_frame(adapter);
- IXGBE_CORE_UNLOCK(adapter);
- }
- break;
- case SIOCSIFFLAGS:
- IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
- IXGBE_CORE_LOCK(adapter);
- if (ifp->if_flags & IFF_UP) {
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
- if ((ifp->if_flags ^ adapter->if_flags) &
- (IFF_PROMISC | IFF_ALLMULTI)) {
- ixgbe_set_promisc(adapter);
- }
- } else
- ixgbe_init_locked(adapter);
- } else
- if (ifp->if_drv_flags & IFF_DRV_RUNNING)
- ixgbe_stop(adapter);
- adapter->if_flags = ifp->if_flags;
- IXGBE_CORE_UNLOCK(adapter);
- break;
- case SIOCADDMULTI:
- case SIOCDELMULTI:
- IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
- if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
- IXGBE_CORE_LOCK(adapter);
- ixgbe_disable_intr(adapter);
- ixgbe_set_multi(adapter);
- ixgbe_enable_intr(adapter);
- IXGBE_CORE_UNLOCK(adapter);
- }
- break;
- case SIOCSIFMEDIA:
- case SIOCGIFMEDIA:
- IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
- error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
- break;
- case SIOCSIFCAP:
- {
- IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
-
- int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
-
- if (!mask)
- break;
-
- /* HW cannot turn these on/off separately */
- if (mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) {
- ifp->if_capenable ^= IFCAP_RXCSUM;
- ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
- }
- if (mask & IFCAP_TXCSUM)
- ifp->if_capenable ^= IFCAP_TXCSUM;
- if (mask & IFCAP_TXCSUM_IPV6)
- ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
- if (mask & IFCAP_TSO4)
- ifp->if_capenable ^= IFCAP_TSO4;
- if (mask & IFCAP_TSO6)
- ifp->if_capenable ^= IFCAP_TSO6;
- if (mask & IFCAP_LRO)
- ifp->if_capenable ^= IFCAP_LRO;
- if (mask & IFCAP_VLAN_HWTAGGING)
- ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
- if (mask & IFCAP_VLAN_HWFILTER)
- ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
- if (mask & IFCAP_VLAN_HWTSO)
- ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
-
- if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
- IXGBE_CORE_LOCK(adapter);
- ixgbe_init_locked(adapter);
- IXGBE_CORE_UNLOCK(adapter);
- }
- VLAN_CAPABILITIES(ifp);
- break;
- }
-#if __FreeBSD_version >= 1100036
- case SIOCGI2C:
- {
- struct ixgbe_hw *hw = &adapter->hw;
- struct ifi2creq i2c;
- int i;
-
- IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
- error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
- if (error != 0)
- break;
- if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
- error = EINVAL;
- break;
- }
- if (i2c.len > sizeof(i2c.data)) {
- error = EINVAL;
- break;
- }
-
- for (i = 0; i < i2c.len; i++)
- hw->phy.ops.read_i2c_byte(hw, i2c.offset + i,
- i2c.dev_addr, &i2c.data[i]);
- error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
- break;
- }
-#endif
- default:
- IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
- error = ether_ioctl(ifp, command, data);
- break;
- }
-
- return (error);
-} /* ixgbe_ioctl */
-
-/************************************************************************
* ixgbe_check_fan_failure
************************************************************************/
static void
@@ -4868,406 +4609,3 @@
device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
} /* ixgbe_check_fan_failure */
-/************************************************************************
- * ixgbe_handle_que
- ************************************************************************/
-static void
-ixgbe_handle_que(void *context, int pending)
-{
- struct ix_queue *que = context;
- struct adapter *adapter = que->adapter;
- struct tx_ring *txr = que->txr;
- struct ifnet *ifp = adapter->ifp;
-
- if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
- ixgbe_rxeof(que);
- IXGBE_TX_LOCK(txr);
- ixgbe_txeof(txr);
- if (!ixgbe_ring_empty(ifp, txr->br))
- ixgbe_start_locked(ifp, txr);
- IXGBE_TX_UNLOCK(txr);
- }
-
- /* Re-enable this interrupt */
- if (que->res != NULL)
- ixgbe_enable_queue(adapter, que->msix);
- else
- ixgbe_enable_intr(adapter);
-
- return;
-} /* ixgbe_handle_que */
-
-
-
-/************************************************************************
- * ixgbe_allocate_legacy - Setup the Legacy or MSI Interrupt handler
- ************************************************************************/
-static int
-ixgbe_allocate_legacy(struct adapter *adapter)
-{
- device_t dev = adapter->dev;
- struct ix_queue *que = adapter->queues;
- struct tx_ring *txr = adapter->tx_rings;
- int error;
-
- /* We allocate a single interrupt resource */
- adapter->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
- &adapter->link_rid, RF_SHAREABLE | RF_ACTIVE);
- if (adapter->res == NULL) {
- device_printf(dev,
- "Unable to allocate bus resource: interrupt\n");
- return (ENXIO);
- }
-
- /*
- * Try allocating a fast interrupt and the associated deferred
- * processing contexts.
- */
- if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
- TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
- TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
- que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
- taskqueue_thread_enqueue, &que->tq);
- taskqueue_start_threads(&que->tq, 1, PI_NET, "%s ixq",
- device_get_nameunit(adapter->dev));
-
- /* Tasklets for Link, SFP and Multispeed Fiber */
- TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
- TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
- TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
- TASK_INIT(&adapter->phy_task, 0, ixgbe_handle_phy, adapter);
- if (adapter->feat_en & IXGBE_FEATURE_FDIR)
- TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
- adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
- taskqueue_thread_enqueue, &adapter->tq);
- taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
- device_get_nameunit(adapter->dev));
-
- if ((error = bus_setup_intr(dev, adapter->res,
- INTR_TYPE_NET | INTR_MPSAFE, NULL, ixgbe_legacy_irq, que,
- &adapter->tag)) != 0) {
- device_printf(dev,
- "Failed to register fast interrupt handler: %d\n", error);
- taskqueue_free(que->tq);
- taskqueue_free(adapter->tq);
- que->tq = NULL;
- adapter->tq = NULL;
-
- return (error);
- }
- /* For simplicity in the handlers */
- adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
-
- return (0);
-} /* ixgbe_allocate_legacy */
-
-
-/************************************************************************
- * ixgbe_allocate_msix - Setup MSI-X Interrupt resources and handlers
- ************************************************************************/
-static int
-ixgbe_allocate_msix(struct adapter *adapter)
-{
- device_t dev = adapter->dev;
- struct ix_queue *que = adapter->queues;
- struct tx_ring *txr = adapter->tx_rings;
- int error, rid, vector = 0;
- int cpu_id = 0;
- unsigned int rss_buckets = 0;
- cpuset_t cpu_mask;
-
- /*
- * If we're doing RSS, the number of queues needs to
- * match the number of RSS buckets that are configured.
- *
- * + If there's more queues than RSS buckets, we'll end
- * up with queues that get no traffic.
- *
- * + If there's more RSS buckets than queues, we'll end
- * up having multiple RSS buckets map to the same queue,
- * so there'll be some contention.
- */
- rss_buckets = rss_getnumbuckets();
- if ((adapter->feat_en & IXGBE_FEATURE_RSS) &&
- (adapter->num_queues != rss_buckets)) {
- device_printf(dev, "%s: number of queues (%d) != number of RSS buckets (%d); performance will be impacted.\n",
- __func__, adapter->num_queues, rss_buckets);
- }
-
- for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
- rid = vector + 1;
- que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
- RF_SHAREABLE | RF_ACTIVE);
- if (que->res == NULL) {
- device_printf(dev, "Unable to allocate bus resource: que interrupt [%d]\n",
- vector);
- return (ENXIO);
- }
- /* Set the handler function */
- error = bus_setup_intr(dev, que->res,
- INTR_TYPE_NET | INTR_MPSAFE, NULL, ixgbe_msix_que, que,
- &que->tag);
- if (error) {
- que->res = NULL;
- device_printf(dev, "Failed to register QUE handler");
- return (error);
- }
-#if __FreeBSD_version >= 800504
- bus_describe_intr(dev, que->res, que->tag, "q%d", i);
-#endif
- que->msix = vector;
- adapter->active_queues |= (u64)(1 << que->msix);
-
- if (adapter->feat_en & IXGBE_FEATURE_RSS) {
- /*
- * The queue ID is used as the RSS layer bucket ID.
- * We look up the queue ID -> RSS CPU ID and select
- * that.
- */
- cpu_id = rss_getcpu(i % rss_buckets);
- CPU_SETOF(cpu_id, &cpu_mask);
- } else {
- /*
- * Bind the MSI-X vector, and thus the
- * rings to the corresponding CPU.
- *
- * This just happens to match the default RSS
- * round-robin bucket -> queue -> CPU allocation.
- */
- if (adapter->num_queues > 1)
- cpu_id = i;
- }
- if (adapter->num_queues > 1)
- bus_bind_intr(dev, que->res, cpu_id);
-#ifdef IXGBE_DEBUG
- if (adapter->feat_en & IXGBE_FEATURE_RSS)
- device_printf(dev, "Bound RSS bucket %d to CPU %d\n", i,
- cpu_id);
- else
- device_printf(dev, "Bound queue %d to cpu %d\n", i,
- cpu_id);
-#endif /* IXGBE_DEBUG */
-
-
- if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
- TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start,
- txr);
- TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
- que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
- taskqueue_thread_enqueue, &que->tq);
-#if __FreeBSD_version < 1100000
- taskqueue_start_threads(&que->tq, 1, PI_NET, "%s:q%d",
- device_get_nameunit(adapter->dev), i);
-#else
- if (adapter->feat_en & IXGBE_FEATURE_RSS)
- taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
- &cpu_mask, "%s (bucket %d)",
- device_get_nameunit(adapter->dev), cpu_id);
- else
- taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
- NULL, "%s:q%d", device_get_nameunit(adapter->dev),
- i);
-#endif
- }
-
- /* and Link */
- adapter->link_rid = vector + 1;
- adapter->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
- &adapter->link_rid, RF_SHAREABLE | RF_ACTIVE);
- if (!adapter->res) {
- device_printf(dev,
- "Unable to allocate bus resource: Link interrupt [%d]\n",
- adapter->link_rid);
- return (ENXIO);
- }
- /* Set the link handler function */
- error = bus_setup_intr(dev, adapter->res, INTR_TYPE_NET | INTR_MPSAFE,
- NULL, ixgbe_msix_link, adapter, &adapter->tag);
- if (error) {
- adapter->res = NULL;
- device_printf(dev, "Failed to register LINK handler");
- return (error);
- }
-#if __FreeBSD_version >= 800504
- bus_describe_intr(dev, adapter->res, adapter->tag, "link");
-#endif
- adapter->vector = vector;
- /* Tasklets for Link, SFP and Multispeed Fiber */
- TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
- TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
- TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
- if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
- TASK_INIT(&adapter->mbx_task, 0, ixgbe_handle_mbx, adapter);
- TASK_INIT(&adapter->phy_task, 0, ixgbe_handle_phy, adapter);
- if (adapter->feat_en & IXGBE_FEATURE_FDIR)
- TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
- adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
- taskqueue_thread_enqueue, &adapter->tq);
- taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
- device_get_nameunit(adapter->dev));
-
- return (0);
-} /* ixgbe_allocate_msix */
-
-/************************************************************************
- * ixgbe_configure_interrupts
- *
- * Setup MSI-X, MSI, or legacy interrupts (in that order).
- * This will also depend on user settings.
- ************************************************************************/
-static int
-ixgbe_configure_interrupts(struct adapter *adapter)
-{
- device_t dev = adapter->dev;
- int rid, want, queues, msgs;
-
- /* Default to 1 queue if MSI-X setup fails */
- adapter->num_queues = 1;
-
- /* Override by tuneable */
- if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX))
- goto msi;
-
- /* First try MSI-X */
- msgs = pci_msix_count(dev);
- if (msgs == 0)
- goto msi;
- rid = PCIR_BAR(MSIX_82598_BAR);
- adapter->msix_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
- RF_ACTIVE);
- if (adapter->msix_mem == NULL) {
- rid += 4; /* 82599 maps in higher BAR */
- adapter->msix_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
- &rid, RF_ACTIVE);
- }
- if (adapter->msix_mem == NULL) {
- /* May not be enabled */
- device_printf(adapter->dev, "Unable to map MSI-X table.\n");
- goto msi;
- }
-
- /* Figure out a reasonable auto config value */
- queues = min(mp_ncpus, msgs - 1);
- /* If we're doing RSS, clamp at the number of RSS buckets */
- if (adapter->feat_en & IXGBE_FEATURE_RSS)
- queues = min(queues, rss_getnumbuckets());
- if (ixgbe_num_queues > queues) {
- device_printf(adapter->dev, "ixgbe_num_queues (%d) is too large, using reduced amount (%d).\n", ixgbe_num_queues, queues);
- ixgbe_num_queues = queues;
- }
-
- if (ixgbe_num_queues != 0)
- queues = ixgbe_num_queues;
- /* Set max queues to 8 when autoconfiguring */
- else
- queues = min(queues, 8);
-
- /* reflect correct sysctl value */
- ixgbe_num_queues = queues;
-
- /*
- * Want one vector (RX/TX pair) per queue
- * plus an additional for Link.
- */
- want = queues + 1;
- if (msgs >= want)
- msgs = want;
- else {
- device_printf(adapter->dev, "MSI-X Configuration Problem, %d vectors but %d queues wanted!\n",
- msgs, want);
- goto msi;
- }
- if ((pci_alloc_msix(dev, &msgs) == 0) && (msgs == want)) {
- device_printf(adapter->dev,
- "Using MSI-X interrupts with %d vectors\n", msgs);
- adapter->num_queues = queues;
- adapter->feat_en |= IXGBE_FEATURE_MSIX;
- return (0);
- }
- /*
- * MSI-X allocation failed or provided us with
- * less vectors than needed. Free MSI-X resources
- * and we'll try enabling MSI.
- */
- pci_release_msi(dev);
-
-msi:
- /* Without MSI-X, some features are no longer supported */
- adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
- adapter->feat_en &= ~IXGBE_FEATURE_RSS;
- adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
- adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
-
- if (adapter->msix_mem != NULL) {
- bus_release_resource(dev, SYS_RES_MEMORY, rid,
- adapter->msix_mem);
- adapter->msix_mem = NULL;
- }
- msgs = 1;
- if (pci_alloc_msi(dev, &msgs) == 0) {
- adapter->feat_en |= IXGBE_FEATURE_MSI;
- adapter->link_rid = 1;
- device_printf(adapter->dev, "Using an MSI interrupt\n");
- return (0);
- }
-
- if (!(adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ)) {
- device_printf(adapter->dev,
- "Device does not support legacy interrupts.\n");
- return 1;
- }
-
- adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
- adapter->link_rid = 0;
- device_printf(adapter->dev, "Using a Legacy interrupt\n");
-
- return (0);
-} /* ixgbe_configure_interrupts */
-
-
-/************************************************************************
- * ixgbe_handle_link - Tasklet for MSI-X Link interrupts
- *
- * Done outside of interrupt context since the driver might sleep
- ************************************************************************/
-static void
-ixgbe_handle_link(void *context, int pending)
-{
- struct adapter *adapter = context;
- struct ixgbe_hw *hw = &adapter->hw;
-
- ixgbe_check_link(hw, &adapter->link_speed, &adapter->link_up, 0);
- ixgbe_update_link_status(adapter);
-
- /* Re-enable link interrupts */
- IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
-} /* ixgbe_handle_link */
-
-/************************************************************************
- * ixgbe_rearm_queues
- ************************************************************************/
-static void
-ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
-{
- u32 mask;
-
- switch (adapter->hw.mac.type) {
- case ixgbe_mac_82598EB:
- mask = (IXGBE_EIMS_RTX_QUEUE & queues);
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
- break;
- case ixgbe_mac_82599EB:
- case ixgbe_mac_X540:
- case ixgbe_mac_X550:
- case ixgbe_mac_X550EM_x:
- case ixgbe_mac_X550EM_a:
- mask = (queues & 0xFFFFFFFF);
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
- mask = (queues >> 32);
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
- break;
- default:
- break;
- }
-} /* ixgbe_rearm_queues */
-
Index: sys/dev/ixgbe/if_ixv.c
===================================================================
--- sys/dev/ixgbe/if_ixv.c
+++ sys/dev/ixgbe/if_ixv.c
@@ -33,12 +33,14 @@
/*$FreeBSD$*/
-#ifndef IXGBE_STANDALONE_BUILD
#include "opt_inet.h"
#include "opt_inet6.h"
-#endif
#include "ixgbe.h"
+#include "ifdi_if.h"
+
+#include <net/netmap.h>
+#include <dev/netmap/netmap_kern.h>
/************************************************************************
* Driver version
@@ -54,89 +56,86 @@
*
* { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
************************************************************************/
-static ixgbe_vendor_info_t ixv_vendor_info_array[] =
+static pci_vendor_info_t ixv_vendor_info_array[] =
{
- {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
- {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
- {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0},
- {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0},
- {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF, 0, 0, 0},
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, "Intel(R) PRO/10GbE Virtual Function Network Driver"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, "Intel(R) PRO/10GbE Virtual Function Network Driver"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, "Intel(R) PRO/10GbE Virtual Function Network Driver"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, "Intel(R) PRO/10GbE Virtual Function Network Driver"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF, "Intel(R) PRO/10GbE Virtual Function Network Driver"),
/* required last entry */
- {0, 0, 0, 0, 0}
-};
-
-/************************************************************************
- * Table of branding strings
- ************************************************************************/
-static char *ixv_strings[] = {
- "Intel(R) PRO/10GbE Virtual Function Network Driver"
+PVID_END
};
/************************************************************************
* Function prototypes
************************************************************************/
-static int ixv_probe(device_t);
-static int ixv_attach(device_t);
-static int ixv_detach(device_t);
-static int ixv_shutdown(device_t);
-static int ixv_ioctl(struct ifnet *, u_long, caddr_t);
-static void ixv_init(void *);
-static void ixv_init_locked(struct adapter *);
-static void ixv_stop(void *);
-static uint64_t ixv_get_counter(struct ifnet *, ift_counter);
+static void *ixv_register(device_t dev);
+static int ixv_if_attach_pre(if_ctx_t ctx);
+static int ixv_if_attach_post(if_ctx_t ctx);
+static int ixv_if_detach(if_ctx_t ctx);
+
+static int ixv_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid);
+static int ixv_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nqs, int nqsets);
+static int ixv_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nqs, int nqsets);
+static void ixv_if_queues_free(if_ctx_t ctx);
+static void ixv_identify_hardware(if_ctx_t ctx);
static void ixv_init_device_features(struct adapter *);
-static void ixv_media_status(struct ifnet *, struct ifmediareq *);
-static int ixv_media_change(struct ifnet *);
-static int ixv_allocate_pci_resources(struct adapter *);
-static int ixv_allocate_msix(struct adapter *);
-static int ixv_configure_interrupts(struct adapter *);
-static void ixv_free_pci_resources(struct adapter *);
-static void ixv_local_timer(void *);
-static void ixv_setup_interface(device_t, struct adapter *);
+static int ixv_allocate_pci_resources(if_ctx_t ctx);
+static void ixv_free_pci_resources(if_ctx_t ctx);
+static int ixv_setup_interface(if_ctx_t ctx);
+static void ixv_if_media_status(if_ctx_t , struct ifmediareq *);
+static int ixv_if_media_change(if_ctx_t ctx);
+static void ixv_if_update_admin_status(if_ctx_t ctx);
+static int ixv_if_msix_intr_assign(if_ctx_t ctx, int msix);
+
+static int ixv_if_mtu_set(if_ctx_t ctx, uint32_t mtu);
+static void ixv_if_init(if_ctx_t ctx);
+static void ixv_if_local_timer(if_ctx_t ctx, uint16_t qid);
+static void ixv_if_stop(if_ctx_t ctx);
static int ixv_negotiate_api(struct adapter *);
-static void ixv_initialize_transmit_units(struct adapter *);
-static void ixv_initialize_receive_units(struct adapter *);
+static void ixv_initialize_transmit_units(if_ctx_t ctx);
+static void ixv_initialize_receive_units(if_ctx_t ctx);
static void ixv_initialize_rss_mapping(struct adapter *);
-static void ixv_check_link(struct adapter *);
-static void ixv_enable_intr(struct adapter *);
-static void ixv_disable_intr(struct adapter *);
-static void ixv_set_multi(struct adapter *);
-static void ixv_update_link_status(struct adapter *);
-static int ixv_sysctl_debug(SYSCTL_HANDLER_ARGS);
-static void ixv_set_ivar(struct adapter *, u8, u8, s8);
+static void ixv_setup_vlan_support(if_ctx_t ctx);
static void ixv_configure_ivars(struct adapter *);
-static u8 *ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
+static void ixv_if_enable_intr(if_ctx_t ctx);
+static void ixv_if_disable_intr(if_ctx_t ctx);
+static void ixv_if_multi_set(if_ctx_t ctx);
-static void ixv_setup_vlan_support(struct adapter *);
-static void ixv_register_vlan(void *, struct ifnet *, u16);
-static void ixv_unregister_vlan(void *, struct ifnet *, u16);
+static void ixv_if_register_vlan(if_ctx_t, u16);
+static void ixv_if_unregister_vlan(if_ctx_t, u16);
+
+static uint64_t ixv_if_get_counter(if_ctx_t, ift_counter);
static void ixv_save_stats(struct adapter *);
static void ixv_init_stats(struct adapter *);
static void ixv_update_stats(struct adapter *);
-static void ixv_add_stats_sysctls(struct adapter *);
+static void ixv_add_stats_sysctls(struct adapter *adapter);
+
+static int ixv_sysctl_debug(SYSCTL_HANDLER_ARGS);
+static void ixv_set_ivar(struct adapter *, u8, u8, s8);
+
+static u8 *ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
static void ixv_set_sysctl_value(struct adapter *, const char *,
const char *, int *, int);
/* The MSI-X Interrupt handlers */
-static void ixv_msix_que(void *);
-static void ixv_msix_mbx(void *);
-
-/* Deferred interrupt tasklets */
-static void ixv_handle_que(void *, int);
-static void ixv_handle_link(void *, int);
+static int ixv_msix_que(void *);
+static int ixv_msix_mbx(void *);
/************************************************************************
* FreeBSD Device Interface Entry Points
************************************************************************/
static device_method_t ixv_methods[] = {
/* Device interface */
- DEVMETHOD(device_probe, ixv_probe),
- DEVMETHOD(device_attach, ixv_attach),
- DEVMETHOD(device_detach, ixv_detach),
- DEVMETHOD(device_shutdown, ixv_shutdown),
+ DEVMETHOD(device_register, ixv_register),
+ DEVMETHOD(device_probe, iflib_device_probe),
+ DEVMETHOD(device_attach, iflib_device_attach),
+ DEVMETHOD(device_detach, iflib_device_detach),
+ DEVMETHOD(device_shutdown, iflib_device_shutdown),
DEVMETHOD_END
};
@@ -148,16 +147,44 @@
DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
MODULE_DEPEND(ixv, pci, 1, 1, 1);
MODULE_DEPEND(ixv, ether, 1, 1, 1);
+#ifdef DEV_NETMAP
MODULE_DEPEND(ixv, netmap, 1, 1, 1);
+#endif /* DEV_NETMAP */
+
+static device_method_t ixv_if_methods[] = {
+ DEVMETHOD(ifdi_attach_pre, ixv_if_attach_pre),
+ DEVMETHOD(ifdi_attach_post, ixv_if_attach_post),
+ DEVMETHOD(ifdi_detach, ixv_if_detach),
+ DEVMETHOD(ifdi_init, ixv_if_init),
+ DEVMETHOD(ifdi_stop, ixv_if_stop),
+ DEVMETHOD(ifdi_msix_intr_assign, ixv_if_msix_intr_assign),
+ DEVMETHOD(ifdi_intr_enable, ixv_if_enable_intr),
+ DEVMETHOD(ifdi_intr_disable, ixv_if_disable_intr),
+ DEVMETHOD(ifdi_tx_queue_intr_enable, ixv_if_rx_queue_intr_enable),
+ DEVMETHOD(ifdi_rx_queue_intr_enable, ixv_if_rx_queue_intr_enable),
+ DEVMETHOD(ifdi_tx_queues_alloc, ixv_if_tx_queues_alloc),
+ DEVMETHOD(ifdi_rx_queues_alloc, ixv_if_rx_queues_alloc),
+ DEVMETHOD(ifdi_queues_free, ixv_if_queues_free),
+ DEVMETHOD(ifdi_update_admin_status, ixv_if_update_admin_status),
+ DEVMETHOD(ifdi_multi_set, ixv_if_multi_set),
+ DEVMETHOD(ifdi_mtu_set, ixv_if_mtu_set),
+ DEVMETHOD(ifdi_media_status, ixv_if_media_status),
+ DEVMETHOD(ifdi_media_change, ixv_if_media_change),
+ DEVMETHOD(ifdi_timer, ixv_if_local_timer),
+ DEVMETHOD(ifdi_vlan_register, ixv_if_register_vlan),
+ DEVMETHOD(ifdi_vlan_unregister, ixv_if_unregister_vlan),
+ DEVMETHOD(ifdi_get_counter, ixv_if_get_counter),
+ DEVMETHOD_END
+};
+
+static driver_t ixv_if_driver = {
+ "ixv_if", ixv_if_methods, sizeof(struct adapter)
+};
/*
* TUNEABLE PARAMETERS:
*/
-/* Number of Queues - do not exceed MSI-X vectors - 1 */
-static int ixv_num_queues = 1;
-TUNABLE_INT("hw.ixv.num_queues", &ixv_num_queues);
-
/*
* AIM: Adaptive Interrupt Moderation
* which means that the interrupt rate
@@ -167,14 +194,6 @@
static int ixv_enable_aim = FALSE;
TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
-/* How many packets rxeof tries to clean at a time */
-static int ixv_rx_process_limit = 256;
-TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
-
-/* How many packets txeof tries to clean at a time */
-static int ixv_tx_process_limit = 256;
-TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit);
-
/* Flow control setting, default to full */
static int ixv_flow_control = ixgbe_fc_full;
TUNABLE_INT("hw.ixv.flow_control", &ixv_flow_control);
@@ -189,79 +208,189 @@
TUNABLE_INT("hw.ixv.hdr_split", &ixv_header_split);
/*
- * Number of TX descriptors per ring,
- * setting higher than RX as this seems
- * the better performing choice.
- */
-static int ixv_txd = DEFAULT_TXD;
-TUNABLE_INT("hw.ixv.txd", &ixv_txd);
-
-/* Number of RX descriptors per ring */
-static int ixv_rxd = DEFAULT_RXD;
-TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
-
-/* Legacy Transmit (single queue) */
-static int ixv_enable_legacy_tx = 0;
-TUNABLE_INT("hw.ixv.enable_legacy_tx", &ixv_enable_legacy_tx);
-
-/*
* Shadow VFTA table, this is needed because
* the real filter table gets cleared during
* a soft reset and we need to repopulate it.
*/
static u32 ixv_shadow_vfta[IXGBE_VFTA_SIZE];
+extern struct if_txrx ixgbe_txrx;
+
+static struct if_shared_ctx ixv_sctx_init = {
+ .isc_magic = IFLIB_MAGIC,
+ .isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */
+ .isc_tx_maxsize = IXGBE_TSO_SIZE,
+
+ .isc_tx_maxsegsize = PAGE_SIZE,
+
+ .isc_rx_maxsize = MJUM16BYTES,
+ .isc_rx_nsegments = 1,
+ .isc_rx_maxsegsize = MJUM16BYTES,
+ .isc_nfl = 1,
+ .isc_ntxqs = 1,
+ .isc_nrxqs = 1,
+ .isc_admin_intrcnt = 1,
+ .isc_vendor_info = ixv_vendor_info_array,
+ .isc_driver_version = ixv_driver_version,
+ .isc_driver = &ixv_if_driver,
+
+ .isc_nrxd_min = {MIN_RXD},
+ .isc_ntxd_min = {MIN_TXD},
+ .isc_nrxd_max = {MAX_RXD},
+ .isc_ntxd_max = {MAX_TXD},
+ .isc_nrxd_default = {DEFAULT_RXD},
+ .isc_ntxd_default = {DEFAULT_TXD},
+};
-static int (*ixv_start_locked)(struct ifnet *, struct tx_ring *);
-static int (*ixv_ring_empty)(struct ifnet *, struct buf_ring *);
+if_shared_ctx_t ixv_sctx = &ixv_sctx_init;
+
+static void *
+ixv_register(device_t dev)
+{
+ return (ixv_sctx);
+}
/************************************************************************
- * ixv_probe - Device identification routine
- *
- * Determines if the driver should be loaded on
- * adapter based on its PCI vendor/device ID.
- *
- * return BUS_PROBE_DEFAULT on success, positive on failure
+ * ixv_if_tx_queues_alloc
************************************************************************/
static int
-ixv_probe(device_t dev)
+ixv_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
+ int ntxqs, int ntxqsets)
{
- ixgbe_vendor_info_t *ent;
- u16 pci_vendor_id = 0;
- u16 pci_device_id = 0;
- u16 pci_subvendor_id = 0;
- u16 pci_subdevice_id = 0;
- char adapter_name[256];
+ struct adapter *adapter = iflib_get_softc(ctx);
+ if_softc_ctx_t scctx = adapter->shared;
+ struct ix_tx_queue *que;
+ int i, j, error;
+
+ MPASS(adapter->num_tx_queues == ntxqsets);
+ MPASS(ntxqs == 1);
+
+ /* Allocate queue structure memory */
+ adapter->tx_queues =
+ (struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) * ntxqsets,
+ M_DEVBUF, M_NOWAIT | M_ZERO);
+ if (!adapter->tx_queues) {
+ device_printf(iflib_get_dev(ctx),
+ "Unable to allocate TX ring memory\n");
+ return (ENOMEM);
+ }
+ for (i = 0, que = adapter->tx_queues; i < ntxqsets; i++, que++) {
+ struct tx_ring *txr = &que->txr;
- pci_vendor_id = pci_get_vendor(dev);
- if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
- return (ENXIO);
+ txr->me = i;
+ txr->adapter = que->adapter = adapter;
+ adapter->active_queues |= (u64)1 << txr->me;
- pci_device_id = pci_get_device(dev);
- pci_subvendor_id = pci_get_subvendor(dev);
- pci_subdevice_id = pci_get_subdevice(dev);
-
- ent = ixv_vendor_info_array;
- while (ent->vendor_id != 0) {
- if ((pci_vendor_id == ent->vendor_id) &&
- (pci_device_id == ent->device_id) &&
- ((pci_subvendor_id == ent->subvendor_id) ||
- (ent->subvendor_id == 0)) &&
- ((pci_subdevice_id == ent->subdevice_id) ||
- (ent->subdevice_id == 0))) {
- sprintf(adapter_name, "%s, Version - %s",
- ixv_strings[ent->index], ixv_driver_version);
- device_set_desc_copy(dev, adapter_name);
- return (BUS_PROBE_DEFAULT);
+ /* Allocate report status array */
+ if (!(txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_DEVBUF, M_NOWAIT | M_ZERO))) {
+ error = ENOMEM;
+ goto fail;
}
- ent++;
+ for (j = 0; j < scctx->isc_ntxd[0]; j++)
+ txr->tx_rsq[j] = QIDX_INVALID;
+ /* get the virtual and physical address of the hardware queues */
+ txr->tail = IXGBE_VFTDT(txr->me);
+ txr->tx_base = (union ixgbe_adv_tx_desc *)vaddrs[i*ntxqs];
+ txr->tx_paddr = paddrs[i*ntxqs];
+
+ txr->bytes = 0;
+ txr->total_packets = 0;
+
}
- return (ENXIO);
-} /* ixv_probe */
+ device_printf(iflib_get_dev(ctx), "allocated for %d queues\n",
+ adapter->num_tx_queues);
+
+ return (0);
+
+ fail:
+ ixv_if_queues_free(ctx);
+
+ return (error);
+} /* ixv_if_tx_queues_alloc */
+
+/************************************************************************
+ * ixv_if_rx_queues_alloc
+ ************************************************************************/
+static int
+ixv_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
+ int nrxqs, int nrxqsets)
+{
+ struct adapter *adapter = iflib_get_softc(ctx);
+ struct ix_rx_queue *que;
+ int i, error;
+
+ MPASS(adapter->num_rx_queues == nrxqsets);
+ MPASS(nrxqs == 1);
+
+ /* Allocate queue structure memory */
+ adapter->rx_queues =
+ (struct ix_rx_queue *)malloc(sizeof(struct ix_rx_queue) * nrxqsets,
+ M_DEVBUF, M_NOWAIT | M_ZERO);
+ if (!adapter->rx_queues) {
+ device_printf(iflib_get_dev(ctx),
+ "Unable to allocate TX ring memory\n");
+ error = ENOMEM;
+ goto fail;
+ }
+
+ for (i = 0, que = adapter->rx_queues; i < nrxqsets; i++, que++) {
+ struct rx_ring *rxr = &que->rxr;
+ rxr->me = i;
+ rxr->adapter = que->adapter = adapter;
+
+
+ /* get the virtual and physical address of the hw queues */
+ rxr->tail = IXGBE_VFRDT(rxr->me);
+ rxr->rx_base = (union ixgbe_adv_rx_desc *)vaddrs[i];
+ rxr->rx_paddr = paddrs[i*nrxqs];
+ rxr->bytes = 0;
+ rxr->que = que;
+ }
+
+ device_printf(iflib_get_dev(ctx), "allocated for %d rx queues\n",
+ adapter->num_rx_queues);
+
+ return (0);
+
+fail:
+ ixv_if_queues_free(ctx);
+
+ return (error);
+} /* ixv_if_rx_queues_alloc */
+
+/************************************************************************
+ * ixv_if_queues_free
+ ************************************************************************/
+static void
+ixv_if_queues_free(if_ctx_t ctx)
+{
+ struct adapter *adapter = iflib_get_softc(ctx);
+ struct ix_tx_queue *que = adapter->tx_queues;
+ int i;
+
+ if (que == NULL)
+ goto free;
+
+ for (i = 0; i < adapter->num_tx_queues; i++, que++) {
+ struct tx_ring *txr = &que->txr;
+ if (txr->tx_rsq == NULL)
+ break;
+
+ free(txr->tx_rsq, M_DEVBUF);
+ txr->tx_rsq = NULL;
+ }
+ if (adapter->tx_queues != NULL)
+ free(adapter->tx_queues, M_DEVBUF);
+free:
+ if (adapter->rx_queues != NULL)
+ free(adapter->rx_queues, M_DEVBUF);
+ adapter->tx_queues = NULL;
+ adapter->rx_queues = NULL;
+} /* ixv_if_queues_free */
/************************************************************************
- * ixv_attach - Device initialization routine
+ * ixv_if_attach_pre - Device initialization routine
*
* Called when the driver is being loaded.
* Identifies the type of hardware, allocates all resources
@@ -270,34 +399,28 @@
* return 0 on success, positive on failure
************************************************************************/
static int
-ixv_attach(device_t dev)
+ixv_if_attach_pre(if_ctx_t ctx)
{
struct adapter *adapter;
+ device_t dev;
+ if_softc_ctx_t scctx;
struct ixgbe_hw *hw;
int error = 0;
INIT_DEBUGOUT("ixv_attach: begin");
- /*
- * Make sure BUSMASTER is set, on a VM under
- * KVM it may not be and will break things.
- */
- pci_enable_busmaster(dev);
-
/* Allocate, clear, and link in our adapter structure */
- adapter = device_get_softc(dev);
+ dev = iflib_get_dev(ctx);
+ adapter = iflib_get_softc(ctx);
adapter->dev = dev;
+ adapter->ctx = ctx;
adapter->hw.back = adapter;
+ scctx = adapter->shared = iflib_get_softc_ctx(ctx);
+ adapter->media = iflib_get_media(ctx);
hw = &adapter->hw;
- adapter->init_locked = ixv_init_locked;
- adapter->stop_locked = ixv_stop;
-
- /* Core Lock Init*/
- IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
-
/* Do base PCI setup - map BAR0 */
- if (ixv_allocate_pci_resources(adapter)) {
+ if (ixv_allocate_pci_resources(ctx)) {
device_printf(dev, "ixv_allocate_pci_resources() failed!\n");
error = ENXIO;
goto err_out;
@@ -314,41 +437,8 @@
"enable_aim", CTLFLAG_RW, &ixv_enable_aim, 1,
"Interrupt Moderation");
- /* Set up the timer callout */
- callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
-
- /* Save off the information about this board */
- hw->vendor_id = pci_get_vendor(dev);
- hw->device_id = pci_get_device(dev);
- hw->revision_id = pci_get_revid(dev);
- hw->subsystem_vendor_id = pci_get_subvendor(dev);
- hw->subsystem_device_id = pci_get_subdevice(dev);
-
- /* A subset of set_mac_type */
- switch (hw->device_id) {
- case IXGBE_DEV_ID_82599_VF:
- hw->mac.type = ixgbe_mac_82599_vf;
- break;
- case IXGBE_DEV_ID_X540_VF:
- hw->mac.type = ixgbe_mac_X540_vf;
- break;
- case IXGBE_DEV_ID_X550_VF:
- hw->mac.type = ixgbe_mac_X550_vf;
- break;
- case IXGBE_DEV_ID_X550EM_X_VF:
- hw->mac.type = ixgbe_mac_X550EM_x_vf;
- break;
- case IXGBE_DEV_ID_X550EM_A_VF:
- hw->mac.type = ixgbe_mac_X550EM_a_vf;
- break;
- default:
- /* Shouldn't get here since probe succeeded */
- device_printf(dev, "Unknown device ID!\n");
- error = ENXIO;
- goto err_out;
- break;
- }
-
+ /* Determine hardware revision */
+ ixv_identify_hardware(ctx);
ixv_init_device_features(adapter);
/* Initialize the shared code */
@@ -362,9 +452,6 @@
/* Setup the mailbox */
ixgbe_init_mbx_params_vf(hw);
- /* Set the right number of segments */
- adapter->num_segs = IXGBE_82599_SCATTER;
-
error = hw->mac.ops.reset_hw(hw);
if (error == IXGBE_ERR_RESET_FAILED)
device_printf(dev, "...reset_hw() failure: Reset Failed!\n");
@@ -402,55 +489,59 @@
bcopy(addr, hw->mac.perm_addr, sizeof(addr));
}
- /* Register for VLAN events */
- adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
- ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
- adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
- ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
-
- /* Sysctls for limiting the amount of work done in the taskqueues */
- ixv_set_sysctl_value(adapter, "rx_processing_limit",
- "max number of rx packets to process",
- &adapter->rx_process_limit, ixv_rx_process_limit);
-
- ixv_set_sysctl_value(adapter, "tx_processing_limit",
- "max number of tx packets to process",
- &adapter->tx_process_limit, ixv_tx_process_limit);
-
- /* Do descriptor calc and sanity checks */
- if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
- ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
- device_printf(dev, "TXD config issue, using default!\n");
- adapter->num_tx_desc = DEFAULT_TXD;
- } else
- adapter->num_tx_desc = ixv_txd;
-
- if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
- ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) {
- device_printf(dev, "RXD config issue, using default!\n");
- adapter->num_rx_desc = DEFAULT_RXD;
- } else
- adapter->num_rx_desc = ixv_rxd;
-
- /* Setup MSI-X */
- error = ixv_configure_interrupts(adapter);
- if (error)
- goto err_out;
+ /* Most of the iflib initialization... */
+
+ iflib_set_mac(ctx, hw->mac.addr);
+ scctx->isc_txqsizes[0] =
+ roundup2(scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc) +
+ sizeof(u32), DBA_ALIGN);
+ scctx->isc_rxqsizes[0] =
+ roundup2(scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc),
+ DBA_ALIGN);
+ /* XXX */
+ scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 1;
+ scctx->isc_tx_csum_flags = CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO |
+ CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_TSO;
+ scctx->isc_tx_nsegments = IXGBE_82599_SCATTER;
+ scctx->isc_msix_bar = PCIR_BAR(MSIX_82598_BAR);
+ scctx->isc_tx_tso_segments_max = scctx->isc_tx_nsegments;
+ scctx->isc_tx_tso_size_max = IXGBE_TSO_SIZE;
+ scctx->isc_tx_tso_segsize_max = PAGE_SIZE;
+
+ scctx->isc_txrx = &ixgbe_txrx;
- /* Allocate our TX/RX Queues */
- if (ixgbe_allocate_queues(adapter)) {
- device_printf(dev, "ixgbe_allocate_queues() failed!\n");
- error = ENOMEM;
- goto err_out;
- }
+ /*
+ * Tell the upper layer(s) we support everything the PF
+ * driver does except...
+ * hardware stats
+ * VLAN tag filtering
+ * Wake-on-LAN
+ */
+ scctx->isc_capenable = IXGBE_CAPS;
+ scctx->isc_capenable ^= IFCAP_HWSTATS | IFCAP_VLAN_HWFILTER | IFCAP_WOL;
- /* Setup OS specific network interface */
- ixv_setup_interface(dev, adapter);
+ INIT_DEBUGOUT("ixv_if_attach_pre: end");
+
+ return (0);
+
+err_out:
+ ixv_free_pci_resources(ctx);
- error = ixv_allocate_msix(adapter);
+ return (error);
+} /* ixv_if_attach_pre */
+
+static int
+ixv_if_attach_post(if_ctx_t ctx)
+{
+ struct adapter *adapter = iflib_get_softc(ctx);
+ device_t dev = iflib_get_dev(ctx);
+ int error = 0;
+
+ /* Setup OS specific network interface */
+ error = ixv_setup_interface(ctx);
if (error) {
- device_printf(dev, "ixv_allocate_msix() failed!\n");
- goto err_late;
+ device_printf(dev, "Interface setup failed: %d\n", error);
+ goto end;
}
/* Do the stats setup */
@@ -458,23 +549,9 @@
ixv_init_stats(adapter);
ixv_add_stats_sysctls(adapter);
- if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
- ixgbe_netmap_attach(adapter);
-
- INIT_DEBUGOUT("ixv_attach: end");
-
- return (0);
-
-err_late:
- ixgbe_free_transmit_structures(adapter);
- ixgbe_free_receive_structures(adapter);
- free(adapter->queues, M_DEVBUF);
-err_out:
- ixv_free_pci_resources(adapter);
- IXGBE_CORE_LOCK_DESTROY(adapter);
-
- return (error);
-} /* ixv_attach */
+end:
+ return error;
+} /* ixv_if_attach_post */
/************************************************************************
* ixv_detach - Device removal routine
@@ -486,65 +563,38 @@
* return 0 on success, positive on failure
************************************************************************/
static int
-ixv_detach(device_t dev)
+ixv_if_detach(if_ctx_t ctx)
{
- struct adapter *adapter = device_get_softc(dev);
- struct ix_queue *que = adapter->queues;
-
INIT_DEBUGOUT("ixv_detach: begin");
- /* Make sure VLANS are not using driver */
- if (adapter->ifp->if_vlantrunk != NULL) {
- device_printf(dev, "Vlan in use, detach first\n");
- return (EBUSY);
- }
-
- ether_ifdetach(adapter->ifp);
- IXGBE_CORE_LOCK(adapter);
- ixv_stop(adapter);
- IXGBE_CORE_UNLOCK(adapter);
-
- for (int i = 0; i < adapter->num_queues; i++, que++) {
- if (que->tq) {
- struct tx_ring *txr = que->txr;
- taskqueue_drain(que->tq, &txr->txq_task);
- taskqueue_drain(que->tq, &que->que_task);
- taskqueue_free(que->tq);
- }
- }
-
- /* Drain the Mailbox(link) queue */
- if (adapter->tq) {
- taskqueue_drain(adapter->tq, &adapter->link_task);
- taskqueue_free(adapter->tq);
- }
-
- /* Unregister VLAN events */
- if (adapter->vlan_attach != NULL)
- EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
- if (adapter->vlan_detach != NULL)
- EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
-
- callout_drain(&adapter->timer);
+ ixv_free_pci_resources(ctx);
- if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
- netmap_detach(adapter->ifp);
-
- ixv_free_pci_resources(adapter);
- bus_generic_detach(dev);
- if_free(adapter->ifp);
+ return (0);
+} /* ixv_if_detach */
- ixgbe_free_transmit_structures(adapter);
- ixgbe_free_receive_structures(adapter);
- free(adapter->queues, M_DEVBUF);
+/************************************************************************
+ * ixv_if_mtu_set
+ ************************************************************************/
+static int
+ixv_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
+{
+ struct adapter *adapter = iflib_get_softc(ctx);
+ struct ifnet *ifp = iflib_get_ifp(ctx);
+ int error = 0;
- IXGBE_CORE_LOCK_DESTROY(adapter);
+ IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
+ if (mtu > IXGBE_MAX_FRAME_SIZE - IXGBE_MTU_HDR) {
+ error = EINVAL;
+ } else {
+ ifp->if_mtu = mtu;
+ adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
+ }
- return (0);
-} /* ixv_detach */
+ return error;
+} /* ixv_if_mtu_set */
/************************************************************************
- * ixv_init_locked - Init entry point
+ * ixv_if_init - Init entry point
*
* Used in two ways: It is used by the stack as an init entry
* point in network interface structure. It is also used
@@ -553,48 +603,39 @@
*
* return 0 on success, positive on failure
************************************************************************/
-void
-ixv_init_locked(struct adapter *adapter)
+static void
+ixv_if_init(if_ctx_t ctx)
{
- struct ifnet *ifp = adapter->ifp;
- device_t dev = adapter->dev;
+ struct adapter *adapter = iflib_get_softc(ctx);
+ struct ifnet *ifp = iflib_get_ifp(ctx);
+ device_t dev = iflib_get_dev(ctx);
struct ixgbe_hw *hw = &adapter->hw;
int error = 0;
- INIT_DEBUGOUT("ixv_init_locked: begin");
- mtx_assert(&adapter->core_mtx, MA_OWNED);
+ INIT_DEBUGOUT("ixv_if_init: begin");
hw->adapter_stopped = FALSE;
hw->mac.ops.stop_adapter(hw);
- callout_stop(&adapter->timer);
/* reprogram the RAR[0] in case user changed it. */
hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
/* Get the latest mac address, User can use a LAA */
- bcopy(IF_LLADDR(adapter->ifp), hw->mac.addr,
- IXGBE_ETH_LENGTH_OF_ADDRESS);
+ bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, 1);
- /* Prepare transmit descriptors and buffers */
- if (ixgbe_setup_transmit_structures(adapter)) {
- device_printf(dev, "Could not setup transmit structures\n");
- ixv_stop(adapter);
- return;
- }
-
/* Reset VF and renegotiate mailbox API version */
hw->mac.ops.reset_hw(hw);
error = ixv_negotiate_api(adapter);
if (error) {
device_printf(dev,
- "Mailbox API negotiation failed in init_locked!\n");
+ "Mailbox API negotiation failed in if_init!\n");
return;
}
- ixv_initialize_transmit_units(adapter);
+ ixv_initialize_transmit_units(ctx);
/* Setup Multicast table */
- ixv_set_multi(adapter);
+ ixv_if_multi_set(ctx);
/*
* Determine the correct mbuf pool
@@ -605,29 +646,11 @@
else
adapter->rx_mbuf_sz = MCLBYTES;
- /* Prepare receive descriptors and buffers */
- if (ixgbe_setup_receive_structures(adapter)) {
- device_printf(dev, "Could not setup receive structures\n");
- ixv_stop(adapter);
- return;
- }
-
/* Configure RX settings */
- ixv_initialize_receive_units(adapter);
-
- /* Set the various hardware offload abilities */
- ifp->if_hwassist = 0;
- if (ifp->if_capenable & IFCAP_TSO4)
- ifp->if_hwassist |= CSUM_TSO;
- if (ifp->if_capenable & IFCAP_TXCSUM) {
- ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
-#if __FreeBSD_version >= 800000
- ifp->if_hwassist |= CSUM_SCTP;
-#endif
- }
+ ixv_initialize_receive_units(ctx);
/* Set up VLAN offload and filter */
- ixv_setup_vlan_support(adapter);
+ ixv_setup_vlan_support(ctx);
/* Set up MSI-X routing */
ixv_configure_ivars(adapter);
@@ -645,23 +668,19 @@
hw->mac.ops.check_link(hw, &adapter->link_speed, &adapter->link_up,
FALSE);
- /* Start watchdog */
- callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
-
/* And now turn on interrupts */
- ixv_enable_intr(adapter);
+ ixv_if_enable_intr(ctx);
/* Now inform the stack we're ready */
ifp->if_drv_flags |= IFF_DRV_RUNNING;
ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
return;
-} /* ixv_init_locked */
-
-/*
- * MSI-X Interrupt Handlers and Tasklets
- */
+} /* ixv_if_init */
+/************************************************************************
+ * ixv_enable_queue
+ ************************************************************************/
static inline void
ixv_enable_queue(struct adapter *adapter, u32 vector)
{
@@ -673,6 +692,9 @@
IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
} /* ixv_enable_queue */
+/************************************************************************
+ * ixv_disable_queue
+ ************************************************************************/
static inline void
ixv_disable_queue(struct adapter *adapter, u32 vector)
{
@@ -684,44 +706,24 @@
IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
} /* ixv_disable_queue */
-static inline void
-ixv_rearm_queues(struct adapter *adapter, u64 queues)
-{
- u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
-} /* ixv_rearm_queues */
-
/************************************************************************
- * ixv_msix_que - MSI Queue Interrupt Service routine
+ * ixv_msix_que - MSI-X Queue Interrupt Service routine
************************************************************************/
-void
+static int
ixv_msix_que(void *arg)
{
- struct ix_queue *que = arg;
- struct adapter *adapter = que->adapter;
- struct ifnet *ifp = adapter->ifp;
- struct tx_ring *txr = que->txr;
- struct rx_ring *rxr = que->rxr;
- bool more;
- u32 newitr = 0;
+ struct ix_rx_queue *que = arg;
+ struct adapter *adapter = que->adapter;
+ struct rx_ring *rxr = &que->rxr;
+ u32 newitr = 0;
+#ifdef notyet
+ struct tx_ring *txr = &que->txr;
+#endif
ixv_disable_queue(adapter, que->msix);
++que->irqs;
- more = ixgbe_rxeof(que);
-
- IXGBE_TX_LOCK(txr);
- ixgbe_txeof(txr);
- /*
- * Make certain that if the stack
- * has anything queued the task gets
- * scheduled to handle it.
- */
- if (!ixv_ring_empty(adapter->ifp, txr->br))
- ixv_start_locked(ifp, txr);
- IXGBE_TX_UNLOCK(txr);
-
/* Do AIM now? */
if (ixv_enable_aim == FALSE)
@@ -738,12 +740,13 @@
que->eitr_setting = 0;
- /* Idle, do nothing */
- if ((txr->bytes == 0) && (rxr->bytes == 0))
- goto no_calc;
-
+#ifdef notyet
if ((txr->bytes) && (txr->packets))
newitr = txr->bytes/txr->packets;
+#endif
+ if (rxr->bytes == 0)
+ goto no_calc;
+
if ((rxr->bytes) && (rxr->packets))
newitr = max(newitr, (rxr->bytes / rxr->packets));
newitr += 24; /* account for hardware frame, crc */
@@ -763,24 +766,22 @@
que->eitr_setting = newitr;
/* Reset state */
+#ifdef notyet
txr->bytes = 0;
txr->packets = 0;
+#endif
rxr->bytes = 0;
rxr->packets = 0;
no_calc:
- if (more)
- taskqueue_enqueue(que->tq, &que->que_task);
- else /* Re-enable this interrupt */
- ixv_enable_queue(adapter, que->msix);
- return;
+ return (FILTER_SCHEDULE_THREAD);
} /* ixv_msix_que */
/************************************************************************
* ixv_msix_mbx
************************************************************************/
-static void
+static int
ixv_msix_mbx(void *arg)
{
struct adapter *adapter = arg;
@@ -796,11 +797,11 @@
/* Link status change */
if (reg & IXGBE_EICR_LSC)
- taskqueue_enqueue(adapter->tq, &adapter->link_task);
+ iflib_admin_intr_deferred(adapter->ctx);
IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
- return;
+ return (FILTER_HANDLED);
} /* ixv_msix_mbx */
/************************************************************************
@@ -810,21 +811,19 @@
* the interface using ifconfig.
************************************************************************/
static void
-ixv_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
+ixv_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
{
- struct adapter *adapter = ifp->if_softc;
+ struct adapter *adapter = iflib_get_softc(ctx);
INIT_DEBUGOUT("ixv_media_status: begin");
- IXGBE_CORE_LOCK(adapter);
- ixv_update_link_status(adapter);
+
+ iflib_admin_intr_deferred(ctx);
ifmr->ifm_status = IFM_AVALID;
ifmr->ifm_active = IFM_ETHER;
- if (!adapter->link_active) {
- IXGBE_CORE_UNLOCK(adapter);
+ if (!adapter->link_active)
return;
- }
ifmr->ifm_status |= IFM_ACTIVE;
@@ -842,23 +841,19 @@
ifmr->ifm_active |= IFM_10_T | IFM_FDX;
break;
}
-
- IXGBE_CORE_UNLOCK(adapter);
-
- return;
-} /* ixv_media_status */
+} /* ixv_if_media_status */
/************************************************************************
- * ixv_media_change - Media Ioctl callback
+ * ixv_if_media_change - Media Ioctl callback
*
* Called when the user changes speed/duplex using
* media/mediopt option with ifconfig.
************************************************************************/
static int
-ixv_media_change(struct ifnet *ifp)
+ixv_if_media_change(if_ctx_t ctx)
{
- struct adapter *adapter = ifp->if_softc;
- struct ifmedia *ifm = &adapter->media;
+ struct adapter *adapter = iflib_get_softc(ctx);
+ struct ifmedia *ifm = iflib_get_media(ctx);
INIT_DEBUGOUT("ixv_media_change: begin");
@@ -874,7 +869,7 @@
}
return (0);
-} /* ixv_media_change */
+} /* ixv_if_media_change */
/************************************************************************
@@ -903,26 +898,22 @@
/************************************************************************
- * ixv_set_multi - Multicast Update
+ * ixv_if_multi_set - Multicast Update
*
* Called whenever multicast address list is updated.
************************************************************************/
static void
-ixv_set_multi(struct adapter *adapter)
+ixv_if_multi_set(if_ctx_t ctx)
{
u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
+ struct adapter *adapter = iflib_get_softc(ctx);
u8 *update_ptr;
struct ifmultiaddr *ifma;
- struct ifnet *ifp = adapter->ifp;
+ if_t ifp = iflib_get_ifp(ctx);
int mcnt = 0;
- IOCTL_DEBUGOUT("ixv_set_multi: begin");
+ IOCTL_DEBUGOUT("ixv_if_multi_set: begin");
-#if __FreeBSD_version < 800000
- IF_ADDR_LOCK(ifp);
-#else
- if_maddr_rlock(ifp);
-#endif
TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
if (ifma->ifma_addr->sa_family != AF_LINK)
continue;
@@ -931,19 +922,12 @@
IXGBE_ETH_LENGTH_OF_ADDRESS);
mcnt++;
}
-#if __FreeBSD_version < 800000
- IF_ADDR_UNLOCK(ifp);
-#else
- if_maddr_runlock(ifp);
-#endif
update_ptr = mta;
adapter->hw.mac.ops.update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
ixv_mc_array_itr, TRUE);
-
- return;
-} /* ixv_set_multi */
+} /* ixv_if_multi_set */
/************************************************************************
* ixv_mc_array_itr
@@ -957,6 +941,7 @@
{
u8 *addr = *update_ptr;
u8 *newptr;
+
*vmdq = 0;
newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
@@ -966,157 +951,197 @@
} /* ixv_mc_array_itr */
/************************************************************************
- * ixv_local_timer - Timer routine
+ * ixv_if_local_timer - Timer routine
*
* Checks for link status, updates statistics,
* and runs the watchdog check.
************************************************************************/
static void
-ixv_local_timer(void *arg)
+ixv_if_local_timer(if_ctx_t ctx, uint16_t qid)
{
- struct adapter *adapter = arg;
- device_t dev = adapter->dev;
- struct ix_queue *que = adapter->queues;
- u64 queues = 0;
- int hung = 0;
-
- mtx_assert(&adapter->core_mtx, MA_OWNED);
-
- ixv_check_link(adapter);
-
- /* Stats Update */
- ixv_update_stats(adapter);
-
- /*
- * Check the TX queues status
- * - mark hung queues so we don't schedule on them
- * - watchdog only if all queues show hung
- */
- for (int i = 0; i < adapter->num_queues; i++, que++) {
- /* Keep track of queues with work for soft irq */
- if (que->txr->busy)
- queues |= ((u64)1 << que->me);
- /*
- * Each time txeof runs without cleaning, but there
- * are uncleaned descriptors it increments busy. If
- * we get to the MAX we declare it hung.
- */
- if (que->busy == IXGBE_QUEUE_HUNG) {
- ++hung;
- /* Mark the queue as inactive */
- adapter->active_queues &= ~((u64)1 << que->me);
- continue;
- } else {
- /* Check if we've come back from hung */
- if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
- adapter->active_queues |= ((u64)1 << que->me);
- }
- if (que->busy >= IXGBE_MAX_TX_BUSY) {
- device_printf(dev,
- "Warning queue %d appears to be hung!\n", i);
- que->txr->busy = IXGBE_QUEUE_HUNG;
- ++hung;
- }
-
- }
-
- /* Only truly watchdog if all queues show hung */
- if (hung == adapter->num_queues)
- goto watchdog;
- else if (queues != 0) { /* Force an IRQ on queues with work */
- ixv_rearm_queues(adapter, queues);
- }
-
- callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
-
- return;
-
-watchdog:
+ if (qid != 0)
+ return;
- device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
- adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
- adapter->watchdog_events++;
- ixv_init_locked(adapter);
-} /* ixv_local_timer */
+ /* Fire off the adminq task */
+ iflib_admin_intr_deferred(ctx);
+} /* ixv_if_local_timer */
/************************************************************************
- * ixv_update_link_status - Update OS on link state
+ * ixv_if_update_admin_status - Update OS on link state
*
* Note: Only updates the OS on the cached link state.
* The real check of the hardware only happens with
* a link interrupt.
************************************************************************/
static void
-ixv_update_link_status(struct adapter *adapter)
+ixv_if_update_admin_status(if_ctx_t ctx)
{
- struct ifnet *ifp = adapter->ifp;
- device_t dev = adapter->dev;
+ struct adapter *adapter = iflib_get_softc(ctx);
+ device_t dev = iflib_get_dev(ctx);
+
+ adapter->hw.mac.get_link_status = TRUE;
+ ixgbe_check_link(&adapter->hw, &adapter->link_speed, &adapter->link_up,
+ FALSE);
if (adapter->link_up) {
if (adapter->link_active == FALSE) {
if (bootverbose)
- device_printf(dev,"Link is up %d Gbps %s \n",
+ device_printf(dev, "Link is up %d Gbps %s \n",
((adapter->link_speed == 128) ? 10 : 1),
"Full Duplex");
adapter->link_active = TRUE;
- if_link_state_change(ifp, LINK_STATE_UP);
+ iflib_link_state_change(ctx, LINK_STATE_UP,
+ IF_Gbps(10));
}
} else { /* Link down */
if (adapter->link_active == TRUE) {
if (bootverbose)
- device_printf(dev,"Link is Down\n");
- if_link_state_change(ifp, LINK_STATE_DOWN);
+ device_printf(dev, "Link is Down\n");
+ iflib_link_state_change(ctx, LINK_STATE_DOWN, 0);
adapter->link_active = FALSE;
}
}
- return;
-} /* ixv_update_link_status */
+ /* Stats Update */
+ ixv_update_stats(adapter);
+} /* ixv_if_update_admin_status */
/************************************************************************
- * ixv_stop - Stop the hardware
+ * ixv_if_stop - Stop the hardware
*
* Disables all traffic on the adapter by issuing a
* global reset on the MAC and deallocates TX/RX buffers.
************************************************************************/
static void
-ixv_stop(void *arg)
+ixv_if_stop(if_ctx_t ctx)
{
- struct ifnet *ifp;
- struct adapter *adapter = arg;
+ struct adapter *adapter = iflib_get_softc(ctx);
struct ixgbe_hw *hw = &adapter->hw;
- ifp = adapter->ifp;
+ INIT_DEBUGOUT("ixv_stop: begin\n");
- mtx_assert(&adapter->core_mtx, MA_OWNED);
+ ixv_if_disable_intr(ctx);
- INIT_DEBUGOUT("ixv_stop: begin\n");
- ixv_disable_intr(adapter);
+ hw->mac.ops.reset_hw(hw);
+ adapter->hw.adapter_stopped = FALSE;
+ hw->mac.ops.stop_adapter(hw);
+
+ /* Update the stack */
+ adapter->link_up = FALSE;
+ ixv_if_update_admin_status(ctx);
+
+ /* reprogram the RAR[0] in case user changed it. */
+ hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
+} /* ixv_if_stop */
- /* Tell the stack that the interface is no longer active */
- ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
- hw->mac.ops.reset_hw(hw);
- adapter->hw.adapter_stopped = FALSE;
- hw->mac.ops.stop_adapter(hw);
- callout_stop(&adapter->timer);
+/************************************************************************
+ * ixv_identify_hardware - Determine hardware revision.
+ ************************************************************************/
+static void
+ixv_identify_hardware(if_ctx_t ctx)
+{
+ struct adapter *adapter = iflib_get_softc(ctx);
+ device_t dev = iflib_get_dev(ctx);
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ /* Save off the information about this board */
+ hw->vendor_id = pci_get_vendor(dev);
+ hw->device_id = pci_get_device(dev);
+ hw->revision_id = pci_get_revid(dev);
+ hw->subsystem_vendor_id = pci_get_subvendor(dev);
+ hw->subsystem_device_id = pci_get_subdevice(dev);
+
+ /* We need this to determine device-specific things */
+ ixgbe_set_mac_type(hw);
+} /* ixv_identify_hardware */
+
+/************************************************************************
+ * ixv_if_msix_intr_assign - Setup MSI-X Interrupt resources and handlers
+ ************************************************************************/
+static int
+ixv_if_msix_intr_assign(if_ctx_t ctx, int msix)
+{
+ struct adapter *adapter = iflib_get_softc(ctx);
+ device_t dev = iflib_get_dev(ctx);
+ struct ix_rx_queue *rx_que = adapter->rx_queues;
+ struct ix_tx_queue *tx_que;
+ int error, rid, vector = 0;
+ char buf[16];
+
+ for (int i = 0; i < adapter->num_rx_queues; i++, vector++, rx_que++) {
+ rid = vector + 1;
+
+ snprintf(buf, sizeof(buf), "rxq%d", i);
+ error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
+ IFLIB_INTR_RX, ixv_msix_que, rx_que, rx_que->rxr.me, buf);
+
+ if (error) {
+ device_printf(iflib_get_dev(ctx),
+ "Failed to allocate que int %d err: %d", i, error);
+ adapter->num_rx_queues = i + 1;
+ goto fail;
+ }
+
+ rx_que->msix = vector;
+ adapter->active_queues |= (u64)(1 << rx_que->msix);
+
+ }
+
+ for (int i = 0; i < adapter->num_tx_queues; i++) {
+ snprintf(buf, sizeof(buf), "txq%d", i);
+ tx_que = &adapter->tx_queues[i];
+ tx_que->msix = i % adapter->num_rx_queues;
+ iflib_softirq_alloc_generic(ctx, rid, IFLIB_INTR_TX, tx_que,
+ tx_que->txr.me, buf);
+ }
+ rid = vector + 1;
+ error = iflib_irq_alloc_generic(ctx, &adapter->irq, rid,
+ IFLIB_INTR_ADMIN, ixv_msix_mbx, adapter, 0, "aq");
+ if (error) {
+ device_printf(iflib_get_dev(ctx),
+ "Failed to register admin handler");
+ return (error);
+ }
+
+ adapter->vector = vector;
+ /*
+ * Due to a broken design QEMU will fail to properly
+ * enable the guest for MSIX unless the vectors in
+ * the table are all set up, so we must rewrite the
+ * ENABLE in the MSIX control register again at this
+ * point to cause it to successfully initialize us.
+ */
+ if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
+ int msix_ctrl;
+ pci_find_cap(dev, PCIY_MSIX, &rid);
+ rid += PCIR_MSIX_CTRL;
+ msix_ctrl = pci_read_config(dev, rid, 2);
+ msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
+ pci_write_config(dev, rid, msix_ctrl, 2);
+ }
- /* reprogram the RAR[0] in case user changed it. */
- hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
+ return (0);
- return;
-} /* ixv_stop */
+fail:
+ iflib_irq_free(ctx, &adapter->irq);
+ rx_que = adapter->rx_queues;
+ for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++)
+ iflib_irq_free(ctx, &rx_que->que_irq);
+ return (error);
+} /* ixv_if_msix_intr_assign */
/************************************************************************
* ixv_allocate_pci_resources
************************************************************************/
static int
-ixv_allocate_pci_resources(struct adapter *adapter)
+ixv_allocate_pci_resources(if_ctx_t ctx)
{
- device_t dev = adapter->dev;
- int rid;
+ struct adapter *adapter = iflib_get_softc(ctx);
+ device_t dev = iflib_get_dev(ctx);
+ int rid;
rid = PCIR_BAR(0);
adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
@@ -1132,9 +1157,6 @@
rman_get_bushandle(adapter->pci_mem);
adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
- /* Pick up the tuneable queues */
- adapter->num_queues = ixv_num_queues;
-
return (0);
} /* ixv_allocate_pci_resources */
@@ -1142,61 +1164,26 @@
* ixv_free_pci_resources
************************************************************************/
static void
-ixv_free_pci_resources(struct adapter * adapter)
+ixv_free_pci_resources(if_ctx_t ctx)
{
- struct ix_queue *que = adapter->queues;
- device_t dev = adapter->dev;
- int rid, memrid;
-
- memrid = PCIR_BAR(MSIX_82598_BAR);
+ struct adapter *adapter = iflib_get_softc(ctx);
+ struct ix_rx_queue *que = adapter->rx_queues;
+ device_t dev = iflib_get_dev(ctx);
- /*
- * There is a slight possibility of a failure mode
- * in attach that will result in entering this function
- * before interrupt resources have been initialized, and
- * in that case we do not want to execute the loops below
- * We can detect this reliably by the state of the adapter
- * res pointer.
- */
- if (adapter->res == NULL)
- goto mem;
+ /* Release all msix queue resources */
+ if (adapter->intr_type == IFLIB_INTR_MSIX)
+ iflib_irq_free(ctx, &adapter->irq);
- /*
- * Release all msix queue resources:
- */
- for (int i = 0; i < adapter->num_queues; i++, que++) {
- rid = que->msix + 1;
- if (que->tag != NULL) {
- bus_teardown_intr(dev, que->res, que->tag);
- que->tag = NULL;
+ if (que != NULL) {
+ for (int i = 0; i < adapter->num_rx_queues; i++, que++) {
+ iflib_irq_free(ctx, &que->que_irq);
}
- if (que->res != NULL)
- bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
- }
-
-
- /* Clean the Mailbox interrupt last */
- rid = adapter->vector + 1;
-
- if (adapter->tag != NULL) {
- bus_teardown_intr(dev, adapter->res, adapter->tag);
- adapter->tag = NULL;
}
- if (adapter->res != NULL)
- bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
-
-mem:
- pci_release_msi(dev);
-
- if (adapter->msix_mem != NULL)
- bus_release_resource(dev, SYS_RES_MEMORY, memrid,
- adapter->msix_mem);
+ /* Clean the Legacy or Link interrupt last */
if (adapter->pci_mem != NULL)
- bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
- adapter->pci_mem);
-
- return;
+ bus_release_resource(dev, SYS_RES_MEMORY,
+ PCIR_BAR(0), adapter->pci_mem);
} /* ixv_free_pci_resources */
/************************************************************************
@@ -1204,121 +1191,105 @@
*
* Setup networking device structure and register an interface.
************************************************************************/
-static void
-ixv_setup_interface(device_t dev, struct adapter *adapter)
+static int
+ixv_setup_interface(if_ctx_t ctx)
{
- struct ifnet *ifp;
+ struct adapter *adapter = iflib_get_softc(ctx);
+ if_softc_ctx_t scctx = adapter->shared;
+ struct ifnet *ifp = iflib_get_ifp(ctx);
INIT_DEBUGOUT("ixv_setup_interface: begin");
- ifp = adapter->ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL)
- panic("%s: can not if_alloc()\n", device_get_nameunit(dev));
- if_initname(ifp, device_get_name(dev), device_get_unit(dev));
- ifp->if_baudrate = 1000000000;
- ifp->if_init = ixv_init;
- ifp->if_softc = adapter;
- ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
- ifp->if_ioctl = ixv_ioctl;
- if_setgetcounterfn(ifp, ixv_get_counter);
- /* TSO parameters */
- ifp->if_hw_tsomax = 65518;
- ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
- ifp->if_hw_tsomaxsegsize = 2048;
- if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
- ifp->if_start = ixgbe_legacy_start;
- ixv_start_locked = ixgbe_legacy_start_locked;
- ixv_ring_empty = ixgbe_legacy_ring_empty;
- } else {
- ifp->if_transmit = ixgbe_mq_start;
- ifp->if_qflush = ixgbe_qflush;
- ixv_start_locked = ixgbe_mq_start_locked;
- ixv_ring_empty = drbr_empty;
- }
- IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
+ if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
+ if_setbaudrate(ifp, IF_Gbps(10));
+ ifp->if_snd.ifq_maxlen = scctx->isc_ntxd[0] - 2;
- ether_ifattach(ifp, adapter->hw.mac.addr);
adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
+ ifmedia_add(adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
+ ifmedia_set(adapter->media, IFM_ETHER | IFM_AUTO);
- /*
- * Tell the upper layer(s) we support long frames.
- */
- ifp->if_hdrlen = sizeof(struct ether_vlan_header);
-
- /* Set capability flags */
- ifp->if_capabilities |= IFCAP_HWCSUM
- | IFCAP_HWCSUM_IPV6
- | IFCAP_TSO
- | IFCAP_LRO
- | IFCAP_VLAN_HWTAGGING
- | IFCAP_VLAN_HWTSO
- | IFCAP_VLAN_HWCSUM
- | IFCAP_JUMBO_MTU
- | IFCAP_VLAN_MTU;
-
- /* Enable the above capabilities by default */
- ifp->if_capenable = ifp->if_capabilities;
-
- /*
- * Specify the media types supported by this adapter and register
- * callbacks to update media and link information
- */
- ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
- ixv_media_status);
- ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
- ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
-
- return;
+ return 0;
} /* ixv_setup_interface */
+/************************************************************************
+ * ixv_if_get_counter
+ ************************************************************************/
+static uint64_t
+ixv_if_get_counter(if_ctx_t ctx, ift_counter cnt)
+{
+ struct adapter *adapter = iflib_get_softc(ctx);
+ if_t ifp = iflib_get_ifp(ctx);
+
+ switch (cnt) {
+ case IFCOUNTER_IPACKETS:
+ return (adapter->ipackets);
+ case IFCOUNTER_OPACKETS:
+ return (adapter->opackets);
+ case IFCOUNTER_IBYTES:
+ return (adapter->ibytes);
+ case IFCOUNTER_OBYTES:
+ return (adapter->obytes);
+ case IFCOUNTER_IMCASTS:
+ return (adapter->imcasts);
+ default:
+ return (if_get_counter_default(ifp, cnt));
+ }
+} /* ixv_if_get_counter */
/************************************************************************
* ixv_initialize_transmit_units - Enable transmit unit.
************************************************************************/
static void
-ixv_initialize_transmit_units(struct adapter *adapter)
+ixv_initialize_transmit_units(if_ctx_t ctx)
{
- struct tx_ring *txr = adapter->tx_rings;
- struct ixgbe_hw *hw = &adapter->hw;
-
-
- for (int i = 0; i < adapter->num_queues; i++, txr++) {
- u64 tdba = txr->txdma.dma_paddr;
- u32 txctrl, txdctl;
+ struct adapter *adapter = iflib_get_softc(ctx);
+ struct ixgbe_hw *hw = &adapter->hw;
+ if_softc_ctx_t scctx = adapter->shared;
+ struct ix_tx_queue *que = adapter->tx_queues;
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++, que++) {
+ struct tx_ring *txr = &que->txr;
+ u64 tdba = txr->tx_paddr;
+ u32 txctrl, txdctl;
+ int j = txr->me;
/* Set WTHRESH to 8, burst writeback */
- txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
+ txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
txdctl |= (8 << 16);
- IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
/* Set the HW Tx Head and Tail indices */
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(j), 0);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(j), 0);
/* Set Tx Tail register */
- txr->tail = IXGBE_VFTDT(i);
+ txr->tail = IXGBE_VFTDT(j);
+
+ txr->tx_rs_cidx = txr->tx_rs_pidx = txr->tx_cidx_processed = 0;
+ for (int k = 0; k < scctx->isc_ntxd[0]; k++)
+ txr->tx_rsq[k] = QIDX_INVALID;
/* Set Ring parameters */
- IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j),
(tdba & 0x00000000ffffffffULL));
- IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
- IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
- adapter->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc));
- txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32));
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j),
+ scctx->isc_ntxd[0] * sizeof(struct ixgbe_legacy_tx_desc));
+ txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j));
txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
- IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
+ IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl);
/* Now enable */
- txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
+ txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
txdctl |= IXGBE_TXDCTL_ENABLE;
- IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
}
return;
} /* ixv_initialize_transmit_units */
-
/************************************************************************
* ixv_initialize_rss_mapping
************************************************************************/
@@ -1345,17 +1316,17 @@
/* Set up the redirection table */
for (i = 0, j = 0; i < 64; i++, j++) {
- if (j == adapter->num_queues)
+ if (j == adapter->num_rx_queues)
j = 0;
if (adapter->feat_en & IXGBE_FEATURE_RSS) {
/*
* Fetch the RSS bucket id for the given indirection
* entry. Cap it at the number of configured buckets
- * (which is num_queues.)
+ * (which is num_rx_queues.)
*/
queue_id = rss_get_indirection_to_bucket(i);
- queue_id = queue_id % adapter->num_queues;
+ queue_id = queue_id % adapter->num_rx_queues;
} else
queue_id = j;
@@ -1419,12 +1390,14 @@
* ixv_initialize_receive_units - Setup receive registers and features.
************************************************************************/
static void
-ixv_initialize_receive_units(struct adapter *adapter)
+ixv_initialize_receive_units(if_ctx_t ctx)
{
- struct rx_ring *rxr = adapter->rx_rings;
- struct ixgbe_hw *hw = &adapter->hw;
- struct ifnet *ifp = adapter->ifp;
- u32 bufsz, rxcsum, psrtype;
+ struct adapter *adapter = iflib_get_softc(ctx);
+ if_softc_ctx_t scctx;
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ifnet *ifp = iflib_get_ifp(ctx);
+ struct ix_rx_queue *que = adapter->rx_queues;
+ u32 bufsz, rxcsum, psrtype;
if (ifp->if_mtu > ETHERMTU)
bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
@@ -1437,7 +1410,7 @@
| IXGBE_PSRTYPE_IPV6HDR
| IXGBE_PSRTYPE_L2HDR;
- if (adapter->num_queues > 1)
+ if (adapter->num_rx_queues > 1)
psrtype |= 1 << 29;
IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
@@ -1446,17 +1419,20 @@
if (ixgbevf_rlpml_set_vf(hw, adapter->max_frame_size) != 0) {
device_printf(adapter->dev, "There is a problem with the PF setup. It is likely the receive unit for this VF will not function correctly.\n");
}
+ scctx = adapter->shared;
- for (int i = 0; i < adapter->num_queues; i++, rxr++) {
- u64 rdba = rxr->rxdma.dma_paddr;
- u32 reg, rxdctl;
+ for (int i = 0; i < adapter->num_rx_queues; i++, que++) {
+ struct rx_ring *rxr = &que->rxr;
+ u64 rdba = rxr->rx_paddr;
+ u32 reg, rxdctl;
+ int j = rxr->me;
/* Disable the queue */
- rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
rxdctl &= ~IXGBE_RXDCTL_ENABLE;
- IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
- for (int j = 0; j < 10; j++) {
- if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
+ IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
+ for (int k = 0; k < 10; k++) {
+ if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) &
IXGBE_RXDCTL_ENABLE)
msec_delay(1);
else
@@ -1464,32 +1440,32 @@
}
wmb();
/* Setup the Base and Length of the Rx Descriptor Ring */
- IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j),
(rdba & 0x00000000ffffffffULL));
- IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i), (rdba >> 32));
- IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
- adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32));
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j),
+ scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc));
/* Reset the ring indices */
IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
/* Set up the SRRCTL register */
- reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
+ reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(j));
reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
reg |= bufsz;
reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
- IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
+ IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(j), reg);
/* Capture Rx Tail index */
rxr->tail = IXGBE_VFRDT(rxr->me);
/* Do the queue enabling last */
rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
- IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
- for (int k = 0; k < 10; k++) {
- if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
+ IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
+ for (int l = 0; l < 10; l++) {
+ if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) &
IXGBE_RXDCTL_ENABLE)
break;
msec_delay(1);
@@ -1497,6 +1473,7 @@
wmb();
/* Set the Tail Pointer */
+#ifdef DEV_NETMAP
/*
* In netmap mode, we must preserve the buffers made
* available to userspace before the if_init()
@@ -1513,25 +1490,23 @@
* RDT points to the last slot available for reception (?),
* so RDT = num_rx_desc - 1 means the whole ring is available.
*/
-#ifdef DEV_NETMAP
- if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
- (ifp->if_capenable & IFCAP_NETMAP)) {
- struct netmap_adapter *na = NA(adapter->ifp);
- struct netmap_kring *kring = &na->rx_rings[i];
+ if (ifp->if_capenable & IFCAP_NETMAP) {
+ struct netmap_adapter *na = NA(ifp);
+ struct netmap_kring *kring = &na->rx_rings[j];
int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
} else
#endif /* DEV_NETMAP */
IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
- adapter->num_rx_desc - 1);
+ scctx->isc_nrxd[0] - 1);
}
rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
ixv_initialize_rss_mapping(adapter);
- if (adapter->num_queues > 1) {
+ if (adapter->num_rx_queues > 1) {
/* RSS and RX IPP Checksum are mutually exclusive */
rxcsum |= IXGBE_RXCSUM_PCSD;
}
@@ -1543,21 +1518,20 @@
rxcsum |= IXGBE_RXCSUM_IPPCSE;
IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
-
- return;
} /* ixv_initialize_receive_units */
/************************************************************************
* ixv_setup_vlan_support
************************************************************************/
static void
-ixv_setup_vlan_support(struct adapter *adapter)
+ixv_setup_vlan_support(if_ctx_t ctx)
{
+ struct adapter *adapter = iflib_get_softc(ctx);
struct ixgbe_hw *hw = &adapter->hw;
u32 ctrl, vid, vfta, retry;
/*
- * We get here thru init_locked, meaning
+ * We get here thru if_init, meaning
* a soft reset, this has already cleared
* the VFTA and other state, so if there
* have been no vlan's registered do nothing.
@@ -1566,7 +1540,7 @@
return;
/* Enable the queues */
- for (int i = 0; i < adapter->num_queues; i++) {
+ for (int i = 0; i < adapter->num_rx_queues; i++) {
ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
ctrl |= IXGBE_RXDCTL_VME;
IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
@@ -1574,7 +1548,7 @@
* Let Rx path know that it needs to store VLAN tag
* as part of extra mbuf info.
*/
- adapter->rx_rings[i].vtag_strip = TRUE;
+ adapter->rx_queues[i].rxr.vtag_strip = TRUE;
}
/*
@@ -1605,7 +1579,7 @@
} /* ixv_setup_vlan_support */
/************************************************************************
- * ixv_register_vlan
+ * ixv_if_register_vlan
*
* Run via a vlan config EVENT, it enables us to use the
* HW Filter table since we can get the vlan id. This just
@@ -1613,92 +1587,83 @@
* will repopulate the real table.
************************************************************************/
static void
-ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
+ixv_if_register_vlan(if_ctx_t ctx, u16 vtag)
{
- struct adapter *adapter = ifp->if_softc;
+ struct adapter *adapter = iflib_get_softc(ctx);
u16 index, bit;
- if (ifp->if_softc != arg) /* Not our event */
- return;
-
- if ((vtag == 0) || (vtag > 4095)) /* Invalid */
- return;
-
- IXGBE_CORE_LOCK(adapter);
index = (vtag >> 5) & 0x7F;
bit = vtag & 0x1F;
ixv_shadow_vfta[index] |= (1 << bit);
++adapter->num_vlans;
- /* Re-init to load the changes */
- ixv_init_locked(adapter);
- IXGBE_CORE_UNLOCK(adapter);
-} /* ixv_register_vlan */
+} /* ixv_if_register_vlan */
/************************************************************************
- * ixv_unregister_vlan
+ * ixv_if_unregister_vlan
*
* Run via a vlan unconfig EVENT, remove our entry
* in the soft vfta.
************************************************************************/
static void
-ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
+ixv_if_unregister_vlan(if_ctx_t ctx, u16 vtag)
{
- struct adapter *adapter = ifp->if_softc;
+ struct adapter *adapter = iflib_get_softc(ctx);
u16 index, bit;
- if (ifp->if_softc != arg)
- return;
-
- if ((vtag == 0) || (vtag > 4095)) /* Invalid */
- return;
-
- IXGBE_CORE_LOCK(adapter);
index = (vtag >> 5) & 0x7F;
bit = vtag & 0x1F;
ixv_shadow_vfta[index] &= ~(1 << bit);
--adapter->num_vlans;
- /* Re-init to load the changes */
- ixv_init_locked(adapter);
- IXGBE_CORE_UNLOCK(adapter);
-} /* ixv_unregister_vlan */
+} /* ixv_if_unregister_vlan */
/************************************************************************
- * ixv_enable_intr
+ * ixv_if_enable_intr
************************************************************************/
static void
-ixv_enable_intr(struct adapter *adapter)
+ixv_if_enable_intr(if_ctx_t ctx)
{
+ struct adapter *adapter = iflib_get_softc(ctx);
struct ixgbe_hw *hw = &adapter->hw;
- struct ix_queue *que = adapter->queues;
+ struct ix_rx_queue *que = adapter->rx_queues;
u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
-
IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
mask = IXGBE_EIMS_ENABLE_MASK;
mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
- for (int i = 0; i < adapter->num_queues; i++, que++)
+ for (int i = 0; i < adapter->num_rx_queues; i++, que++)
ixv_enable_queue(adapter, que->msix);
IXGBE_WRITE_FLUSH(hw);
-
- return;
-} /* ixv_enable_intr */
+} /* ixv_if_enable_intr */
/************************************************************************
- * ixv_disable_intr
+ * ixv_if_disable_intr
************************************************************************/
static void
-ixv_disable_intr(struct adapter *adapter)
+ixv_if_disable_intr(if_ctx_t ctx)
{
+ struct adapter *adapter = iflib_get_softc(ctx);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
IXGBE_WRITE_FLUSH(&adapter->hw);
+} /* ixv_if_disable_intr */
- return;
-} /* ixv_disable_intr */
+/************************************************************************
+ * ixv_if_rx_queue_intr_enable
+ ************************************************************************/
+static int
+ixv_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
+{
+ struct adapter *adapter = iflib_get_softc(ctx);
+ struct ix_rx_queue *que = &adapter->rx_queues[rxqid];
+
+ ixv_enable_queue(adapter, que->rxr.me);
+
+ return (0);
+} /* ixv_if_rx_queue_intr_enable */
/************************************************************************
* ixv_set_ivar
@@ -1736,9 +1701,11 @@
static void
ixv_configure_ivars(struct adapter *adapter)
{
- struct ix_queue *que = adapter->queues;
+ struct ix_rx_queue *que = adapter->rx_queues;
+
+ MPASS(adapter->num_rx_queues == adapter->num_tx_queues);
- for (int i = 0; i < adapter->num_queues; i++, que++) {
+ for (int i = 0; i < adapter->num_rx_queues; i++, que++) {
/* First the RX queue entry */
ixv_set_ivar(adapter, i, que->msix, 0);
/* ... and the TX */
@@ -1752,33 +1719,6 @@
ixv_set_ivar(adapter, 1, adapter->vector, -1);
} /* ixv_configure_ivars */
-
-/************************************************************************
- * ixv_get_counter
- ************************************************************************/
-static uint64_t
-ixv_get_counter(struct ifnet *ifp, ift_counter cnt)
-{
- struct adapter *adapter;
-
- adapter = if_getsoftc(ifp);
-
- switch (cnt) {
- case IFCOUNTER_IPACKETS:
- return (adapter->ipackets);
- case IFCOUNTER_OPACKETS:
- return (adapter->opackets);
- case IFCOUNTER_IBYTES:
- return (adapter->ibytes);
- case IFCOUNTER_OBYTES:
- return (adapter->obytes);
- case IFCOUNTER_IMCASTS:
- return (adapter->imcasts);
- default:
- return (if_get_counter_default(ifp, cnt));
- }
-} /* ixv_get_counter */
-
/************************************************************************
* ixv_save_stats
*
@@ -1862,15 +1802,15 @@
struct ixgbe_hw *hw = &adapter->hw;
struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
- UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.vf.last_vfgprc,
+ UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.vf.last_vfgprc,
adapter->stats.vf.vfgprc);
- UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.vf.last_vfgptc,
+ UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.vf.last_vfgptc,
adapter->stats.vf.vfgptc);
- UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
+ UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
adapter->stats.vf.last_vfgorc, adapter->stats.vf.vfgorc);
- UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
+ UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
adapter->stats.vf.last_vfgotc, adapter->stats.vf.vfgotc);
- UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.vf.last_vfmprc,
+ UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.vf.last_vfmprc,
adapter->stats.vf.vfmprc);
/* Fill out the OS statistics structure */
@@ -1888,8 +1828,8 @@
ixv_add_stats_sysctls(struct adapter *adapter)
{
device_t dev = adapter->dev;
- struct tx_ring *txr = adapter->tx_rings;
- struct rx_ring *rxr = adapter->rx_rings;
+ struct ix_tx_queue *tx_que = adapter->tx_queues;
+ struct ix_rx_queue *rx_que = adapter->rx_queues;
struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
struct sysctl_oid *tree = device_get_sysctl_tree(dev);
struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
@@ -1901,42 +1841,33 @@
char namebuf[QUEUE_NAME_LEN];
/* Driver Statistics */
- SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
- CTLFLAG_RD, &adapter->dropped_pkts, "Driver dropped packets");
- SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_failed",
- CTLFLAG_RD, &adapter->mbuf_defrag_failed, "m_defrag() failed");
SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
CTLFLAG_RD, &adapter->watchdog_events, "Watchdog timeouts");
SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
CTLFLAG_RD, &adapter->link_irq, "Link MSI-X IRQ Handled");
- for (int i = 0; i < adapter->num_queues; i++, txr++) {
+ for (int i = 0; i < adapter->num_tx_queues; i++, tx_que++) {
+ struct tx_ring *txr = &tx_que->txr;
snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
CTLFLAG_RD, NULL, "Queue Name");
queue_list = SYSCTL_CHILDREN(queue_node);
- SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
- CTLFLAG_RD, &(adapter->queues[i].irqs), "IRQs on queue");
- SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_tx_dma_setup",
- CTLFLAG_RD, &(txr->no_tx_dma_setup),
- "Driver Tx DMA failure in Tx");
- SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_no_desc",
- CTLFLAG_RD, &(txr->no_desc_avail),
- "Not-enough-descriptors count: TX");
+ SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "tso_tx",
+ CTLFLAG_RD, &(txr->tso_tx), "TSO Packets");
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
CTLFLAG_RD, &(txr->total_packets), "TX Packets");
- SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "br_drops",
- CTLFLAG_RD, &(txr->br->br_drops),
- "Packets dropped in buf_ring");
}
- for (int i = 0; i < adapter->num_queues; i++, rxr++) {
+ for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++) {
+ struct rx_ring *rxr = &rx_que->rxr;
snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
CTLFLAG_RD, NULL, "Queue Name");
queue_list = SYSCTL_CHILDREN(queue_node);
+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
+ CTLFLAG_RD, &(rx_que->irqs), "IRQs on queue");
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
CTLFLAG_RD, &(rxr->rx_packets), "RX packets");
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
@@ -1986,34 +1917,10 @@
{
device_t dev = adapter->dev;
struct ixgbe_hw *hw = &adapter->hw;
- struct ix_queue *que = adapter->queues;
- struct rx_ring *rxr;
- struct tx_ring *txr;
- struct lro_ctrl *lro;
device_printf(dev, "Error Byte Count = %u \n",
IXGBE_READ_REG(hw, IXGBE_ERRBC));
- for (int i = 0; i < adapter->num_queues; i++, que++) {
- txr = que->txr;
- rxr = que->rxr;
- lro = &rxr->lro;
- device_printf(dev, "QUE(%d) IRQs Handled: %lu\n",
- que->msix, (long)que->irqs);
- device_printf(dev, "RX(%d) Packets Received: %lld\n",
- rxr->me, (long long)rxr->rx_packets);
- device_printf(dev, "RX(%d) Bytes Received: %lu\n",
- rxr->me, (long)rxr->rx_bytes);
- device_printf(dev, "RX(%d) LRO Queued= %lld\n",
- rxr->me, (long long)lro->lro_queued);
- device_printf(dev, "RX(%d) LRO Flushed= %lld\n",
- rxr->me, (long long)lro->lro_flushed);
- device_printf(dev, "TX(%d) Packets Sent: %lu\n",
- txr->me, (long)txr->total_packets);
- device_printf(dev, "TX(%d) NO Desc Avail: %lu\n",
- txr->me, (long)txr->no_desc_avail);
- }
-
device_printf(dev, "MBX IRQ Handled: %lu\n", (long)adapter->link_irq);
} /* ixv_print_debug_info */
@@ -2079,365 +1986,5 @@
/* Needs advanced context descriptor regardless of offloads req'd */
if (adapter->feat_cap & IXGBE_FEATURE_NEEDS_CTXD)
adapter->feat_en |= IXGBE_FEATURE_NEEDS_CTXD;
-
- /* Enabled via sysctl... */
- /* Legacy (single queue) transmit */
- if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
- ixv_enable_legacy_tx)
- adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
} /* ixv_init_device_features */
-/************************************************************************
- * ixv_shutdown - Shutdown entry point
- ************************************************************************/
-static int
-ixv_shutdown(device_t dev)
-{
- struct adapter *adapter = device_get_softc(dev);
- IXGBE_CORE_LOCK(adapter);
- ixv_stop(adapter);
- IXGBE_CORE_UNLOCK(adapter);
-
- return (0);
-} /* ixv_shutdown */
-
-
-/************************************************************************
- * ixv_ioctl - Ioctl entry point
- *
- * Called when the user wants to configure the interface.
- *
- * return 0 on success, positive on failure
- ************************************************************************/
-static int
-ixv_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
-{
- struct adapter *adapter = ifp->if_softc;
- struct ifreq *ifr = (struct ifreq *)data;
-#if defined(INET) || defined(INET6)
- struct ifaddr *ifa = (struct ifaddr *)data;
- bool avoid_reset = FALSE;
-#endif
- int error = 0;
-
- switch (command) {
-
- case SIOCSIFADDR:
-#ifdef INET
- if (ifa->ifa_addr->sa_family == AF_INET)
- avoid_reset = TRUE;
-#endif
-#ifdef INET6
- if (ifa->ifa_addr->sa_family == AF_INET6)
- avoid_reset = TRUE;
-#endif
-#if defined(INET) || defined(INET6)
- /*
- * Calling init results in link renegotiation,
- * so we avoid doing it when possible.
- */
- if (avoid_reset) {
- ifp->if_flags |= IFF_UP;
- if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
- ixv_init(adapter);
- if (!(ifp->if_flags & IFF_NOARP))
- arp_ifinit(ifp, ifa);
- } else
- error = ether_ioctl(ifp, command, data);
- break;
-#endif
- case SIOCSIFMTU:
- IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
- if (ifr->ifr_mtu > IXGBE_MAX_MTU) {
- error = EINVAL;
- } else {
- IXGBE_CORE_LOCK(adapter);
- ifp->if_mtu = ifr->ifr_mtu;
- adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
- if (ifp->if_drv_flags & IFF_DRV_RUNNING)
- ixv_init_locked(adapter);
- IXGBE_CORE_UNLOCK(adapter);
- }
- break;
- case SIOCSIFFLAGS:
- IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
- IXGBE_CORE_LOCK(adapter);
- if (ifp->if_flags & IFF_UP) {
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
- ixv_init_locked(adapter);
- } else
- if (ifp->if_drv_flags & IFF_DRV_RUNNING)
- ixv_stop(adapter);
- adapter->if_flags = ifp->if_flags;
- IXGBE_CORE_UNLOCK(adapter);
- break;
- case SIOCADDMULTI:
- case SIOCDELMULTI:
- IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
- if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
- IXGBE_CORE_LOCK(adapter);
- ixv_disable_intr(adapter);
- ixv_set_multi(adapter);
- ixv_enable_intr(adapter);
- IXGBE_CORE_UNLOCK(adapter);
- }
- break;
- case SIOCSIFMEDIA:
- case SIOCGIFMEDIA:
- IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
- error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
- break;
- case SIOCSIFCAP:
- {
- int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
- IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
- if (mask & IFCAP_HWCSUM)
- ifp->if_capenable ^= IFCAP_HWCSUM;
- if (mask & IFCAP_TSO4)
- ifp->if_capenable ^= IFCAP_TSO4;
- if (mask & IFCAP_LRO)
- ifp->if_capenable ^= IFCAP_LRO;
- if (mask & IFCAP_VLAN_HWTAGGING)
- ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
- if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
- IXGBE_CORE_LOCK(adapter);
- ixv_init_locked(adapter);
- IXGBE_CORE_UNLOCK(adapter);
- }
- VLAN_CAPABILITIES(ifp);
- break;
- }
-
- default:
- IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
- error = ether_ioctl(ifp, command, data);
- break;
- }
-
- return (error);
-} /* ixv_ioctl */
-
-/************************************************************************
- * ixv_init
- ************************************************************************/
-static void
-ixv_init(void *arg)
-{
- struct adapter *adapter = arg;
-
- IXGBE_CORE_LOCK(adapter);
- ixv_init_locked(adapter);
- IXGBE_CORE_UNLOCK(adapter);
-
- return;
-} /* ixv_init */
-
-
-/************************************************************************
- * ixv_handle_que
- ************************************************************************/
-static void
-ixv_handle_que(void *context, int pending)
-{
- struct ix_queue *que = context;
- struct adapter *adapter = que->adapter;
- struct tx_ring *txr = que->txr;
- struct ifnet *ifp = adapter->ifp;
- bool more;
-
- if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
- more = ixgbe_rxeof(que);
- IXGBE_TX_LOCK(txr);
- ixgbe_txeof(txr);
- if (!ixv_ring_empty(ifp, txr->br))
- ixv_start_locked(ifp, txr);
- IXGBE_TX_UNLOCK(txr);
- if (more) {
- taskqueue_enqueue(que->tq, &que->que_task);
- return;
- }
- }
-
- /* Re-enable this interrupt */
- ixv_enable_queue(adapter, que->msix);
-
- return;
-} /* ixv_handle_que */
-
-/************************************************************************
- * ixv_allocate_msix - Setup MSI-X Interrupt resources and handlers
- ************************************************************************/
-static int
-ixv_allocate_msix(struct adapter *adapter)
-{
- device_t dev = adapter->dev;
- struct ix_queue *que = adapter->queues;
- struct tx_ring *txr = adapter->tx_rings;
- int error, msix_ctrl, rid, vector = 0;
-
- for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
- rid = vector + 1;
- que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
- RF_SHAREABLE | RF_ACTIVE);
- if (que->res == NULL) {
- device_printf(dev, "Unable to allocate bus resource: que interrupt [%d]\n",
- vector);
- return (ENXIO);
- }
- /* Set the handler function */
- error = bus_setup_intr(dev, que->res,
- INTR_TYPE_NET | INTR_MPSAFE, NULL,
- ixv_msix_que, que, &que->tag);
- if (error) {
- que->res = NULL;
- device_printf(dev, "Failed to register QUE handler");
- return (error);
- }
-#if __FreeBSD_version >= 800504
- bus_describe_intr(dev, que->res, que->tag, "que %d", i);
-#endif
- que->msix = vector;
- adapter->active_queues |= (u64)(1 << que->msix);
- /*
- * Bind the MSI-X vector, and thus the
- * ring to the corresponding CPU.
- */
- if (adapter->num_queues > 1)
- bus_bind_intr(dev, que->res, i);
- TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
- TASK_INIT(&que->que_task, 0, ixv_handle_que, que);
- que->tq = taskqueue_create_fast("ixv_que", M_NOWAIT,
- taskqueue_thread_enqueue, &que->tq);
- taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
- device_get_nameunit(adapter->dev));
- }
-
- /* and Mailbox */
- rid = vector + 1;
- adapter->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
- RF_SHAREABLE | RF_ACTIVE);
- if (!adapter->res) {
- device_printf(dev,
- "Unable to allocate bus resource: MBX interrupt [%d]\n",
- rid);
- return (ENXIO);
- }
- /* Set the mbx handler function */
- error = bus_setup_intr(dev, adapter->res, INTR_TYPE_NET | INTR_MPSAFE,
- NULL, ixv_msix_mbx, adapter, &adapter->tag);
- if (error) {
- adapter->res = NULL;
- device_printf(dev, "Failed to register LINK handler");
- return (error);
- }
-#if __FreeBSD_version >= 800504
- bus_describe_intr(dev, adapter->res, adapter->tag, "mbx");
-#endif
- adapter->vector = vector;
- /* Tasklets for Mailbox */
- TASK_INIT(&adapter->link_task, 0, ixv_handle_link, adapter);
- adapter->tq = taskqueue_create_fast("ixv_mbx", M_NOWAIT,
- taskqueue_thread_enqueue, &adapter->tq);
- taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s mbxq",
- device_get_nameunit(adapter->dev));
- /*
- * Due to a broken design QEMU will fail to properly
- * enable the guest for MSI-X unless the vectors in
- * the table are all set up, so we must rewrite the
- * ENABLE in the MSI-X control register again at this
- * point to cause it to successfully initialize us.
- */
- if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
- pci_find_cap(dev, PCIY_MSIX, &rid);
- rid += PCIR_MSIX_CTRL;
- msix_ctrl = pci_read_config(dev, rid, 2);
- msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
- pci_write_config(dev, rid, msix_ctrl, 2);
- }
-
- return (0);
-} /* ixv_allocate_msix */
-
-/************************************************************************
- * ixv_configure_interrupts - Setup MSI-X resources
- *
- * Note: The VF device MUST use MSI-X, there is no fallback.
- ************************************************************************/
-static int
-ixv_configure_interrupts(struct adapter *adapter)
-{
- device_t dev = adapter->dev;
- int rid, want, msgs;
-
- /* Must have at least 2 MSI-X vectors */
- msgs = pci_msix_count(dev);
- if (msgs < 2)
- goto out;
- rid = PCIR_BAR(3);
- adapter->msix_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
- RF_ACTIVE);
- if (adapter->msix_mem == NULL) {
- device_printf(adapter->dev, "Unable to map MSI-X table \n");
- goto out;
- }
-
- /*
- * Want vectors for the queues,
- * plus an additional for mailbox.
- */
- want = adapter->num_queues + 1;
- if (want > msgs) {
- want = msgs;
- adapter->num_queues = msgs - 1;
- } else
- msgs = want;
- if ((pci_alloc_msix(dev, &msgs) == 0) && (msgs == want)) {
- device_printf(adapter->dev,
- "Using MSI-X interrupts with %d vectors\n", want);
- /* reflect correct sysctl value */
- ixv_num_queues = adapter->num_queues;
-
- return (0);
- }
- /* Release in case alloc was insufficient */
- pci_release_msi(dev);
-out:
- if (adapter->msix_mem != NULL) {
- bus_release_resource(dev, SYS_RES_MEMORY, rid,
- adapter->msix_mem);
- adapter->msix_mem = NULL;
- }
- device_printf(adapter->dev, "MSI-X config error\n");
-
- return (ENXIO);
-} /* ixv_configure_interrupts */
-
-
-/************************************************************************
- * ixv_handle_link - Tasklet handler for MSI-X MBX interrupts
- *
- * Done outside of interrupt context since the driver might sleep
- ************************************************************************/
-static void
-ixv_handle_link(void *context, int pending)
-{
- struct adapter *adapter = context;
-
- adapter->hw.mac.ops.check_link(&adapter->hw, &adapter->link_speed,
- &adapter->link_up, FALSE);
- ixv_update_link_status(adapter);
-} /* ixv_handle_link */
-
-/************************************************************************
- * ixv_check_link - Used in the local timer to poll for link changes
- ************************************************************************/
-static void
-ixv_check_link(struct adapter *adapter)
-{
- adapter->hw.mac.get_link_status = TRUE;
-
- adapter->hw.mac.ops.check_link(&adapter->hw, &adapter->link_speed,
- &adapter->link_up, FALSE);
- ixv_update_link_status(adapter);
-} /* ixv_check_link */
-
Index: sys/dev/ixgbe/if_sriov.c
===================================================================
--- sys/dev/ixgbe/if_sriov.c
+++ sys/dev/ixgbe/if_sriov.c
@@ -33,6 +33,7 @@
/*$FreeBSD$*/
#include "ixgbe.h"
+#include "ixgbe_sriov.h"
#ifdef PCI_IOV
@@ -80,10 +81,14 @@
int i;
int index;
- for (i = 0; i < adapter->num_queues; i++) {
+ for (i = 0; i < adapter->num_rx_queues; i++) {
index = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool, i);
- adapter->rx_rings[i].me = index;
- adapter->tx_rings[i].me = index;
+ adapter->rx_queues[i].rxr.me = index;
+ }
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ index = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool, i);
+ adapter->tx_queues[i].txr.me = index;
}
}
@@ -233,7 +238,7 @@
if (tag == 0) {
/* Accept non-vlan tagged traffic. */
- //vmolr |= IXGBE_VMOLR_AUPE;
+ vmolr |= IXGBE_VMOLR_AUPE;
/* Allow VM to tag outgoing traffic; no default tag. */
vmvir = 0;
@@ -269,7 +274,7 @@
* frames on either the PF or the VF.
*/
if (adapter->max_frame_size > ETHER_MAX_LEN ||
- vf->max_frame_size > ETHER_MAX_LEN)
+ vf->maximum_frame_size > ETHER_MAX_LEN)
return (FALSE);
return (TRUE);
@@ -281,7 +286,7 @@
* 1.1 or later VF versions always work if they aren't using
* jumbo frames.
*/
- if (vf->max_frame_size <= ETHER_MAX_LEN)
+ if (vf->maximum_frame_size <= ETHER_MAX_LEN)
return (TRUE);
/*
@@ -292,7 +297,6 @@
return (TRUE);
return (FALSE);
-
}
} /* ixgbe_vf_frame_size_compatible */
@@ -451,7 +455,7 @@
}
/* It is illegal to enable vlan tag 0. */
- if (tag == 0 && enable != 0){
+ if (tag == 0 && enable != 0) {
ixgbe_send_vf_nack(adapter, vf, msg[0]);
return;
}
@@ -484,8 +488,8 @@
return;
}
- vf->max_frame_size = vf_max_size;
- ixgbe_update_max_frame(adapter, vf->max_frame_size);
+ vf->maximum_frame_size = vf_max_size;
+ ixgbe_update_max_frame(adapter, vf->maximum_frame_size);
/*
* We might have to disable reception to this VF if the frame size is
@@ -565,8 +569,12 @@
static void
-ixgbe_process_vf_msg(struct adapter *adapter, struct ixgbe_vf *vf)
+ixgbe_process_vf_msg(if_ctx_t ctx, struct ixgbe_vf *vf)
{
+ struct adapter *adapter = iflib_get_softc(ctx);
+#ifdef KTR
+ struct ifnet *ifp = iflib_get_ifp(ctx);
+#endif
struct ixgbe_hw *hw;
uint32_t msg[IXGBE_VFMAILBOX_SIZE];
int error;
@@ -578,8 +586,8 @@
if (error != 0)
return;
- CTR3(KTR_MALLOC, "%s: received msg %x from %d",
- adapter->ifp->if_xname, msg[0], vf->pool);
+ CTR3(KTR_MALLOC, "%s: received msg %x from %d", ifp->if_xname,
+ msg[0], vf->pool);
if (msg[0] == IXGBE_VF_RESET) {
ixgbe_vf_reset_msg(adapter, vf, msg);
return;
@@ -620,17 +628,16 @@
/* Tasklet for handling VF -> PF mailbox messages */
void
-ixgbe_handle_mbx(void *context, int pending)
+ixgbe_handle_mbx(void *context)
{
- struct adapter *adapter;
+ if_ctx_t ctx = context;
+ struct adapter *adapter = iflib_get_softc(ctx);
struct ixgbe_hw *hw;
struct ixgbe_vf *vf;
int i;
- adapter = context;
hw = &adapter->hw;
- IXGBE_CORE_LOCK(adapter);
for (i = 0; i < adapter->num_vfs; i++) {
vf = &adapter->vfs[i];
@@ -639,22 +646,21 @@
ixgbe_process_vf_reset(adapter, vf);
if (hw->mbx.ops.check_for_msg(hw, vf->pool) == 0)
- ixgbe_process_vf_msg(adapter, vf);
+ ixgbe_process_vf_msg(ctx, vf);
if (hw->mbx.ops.check_for_ack(hw, vf->pool) == 0)
ixgbe_process_vf_ack(adapter, vf);
}
}
- IXGBE_CORE_UNLOCK(adapter);
} /* ixgbe_handle_mbx */
int
-ixgbe_init_iov(device_t dev, u16 num_vfs, const nvlist_t *config)
+ixgbe_if_iov_init(if_ctx_t ctx, u16 num_vfs, const nvlist_t *config)
{
struct adapter *adapter;
int retval = 0;
- adapter = device_get_softc(dev);
+ adapter = iflib_get_softc(ctx);
adapter->iov_mode = IXGBE_NO_VM;
if (num_vfs == 0) {
@@ -682,45 +688,38 @@
goto err_init_iov;
}
- IXGBE_CORE_LOCK(adapter);
-
adapter->vfs = malloc(sizeof(*adapter->vfs) * num_vfs, M_IXGBE_SRIOV,
M_NOWAIT | M_ZERO);
if (adapter->vfs == NULL) {
retval = ENOMEM;
- IXGBE_CORE_UNLOCK(adapter);
goto err_init_iov;
}
adapter->num_vfs = num_vfs;
- adapter->init_locked(adapter);
+ ixgbe_if_init(adapter->ctx);
adapter->feat_en |= IXGBE_FEATURE_SRIOV;
- IXGBE_CORE_UNLOCK(adapter);
-
- return retval;
+ return (retval);
err_init_iov:
adapter->num_vfs = 0;
adapter->pool = 0;
adapter->iov_mode = IXGBE_NO_VM;
- return retval;
-} /* ixgbe_init_iov */
+ return (retval);
+} /* ixgbe_if_iov_init */
void
-ixgbe_uninit_iov(device_t dev)
+ixgbe_if_iov_uninit(if_ctx_t ctx)
{
struct ixgbe_hw *hw;
struct adapter *adapter;
uint32_t pf_reg, vf_reg;
- adapter = device_get_softc(dev);
+ adapter = iflib_get_softc(ctx);
hw = &adapter->hw;
- IXGBE_CORE_LOCK(adapter);
-
/* Enable rx/tx for the PF and disable it for all VFs. */
pf_reg = IXGBE_VF_INDEX(adapter->pool);
IXGBE_WRITE_REG(hw, IXGBE_VFRE(pf_reg), IXGBE_VF_BIT(adapter->pool));
@@ -739,9 +738,7 @@
adapter->vfs = NULL;
adapter->num_vfs = 0;
adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
-
- IXGBE_CORE_UNLOCK(adapter);
-} /* ixgbe_uninit_iov */
+} /* ixgbe_if_iov_uninit */
static void
ixgbe_init_vf(struct adapter *adapter, struct ixgbe_vf *vf)
@@ -749,8 +746,6 @@
struct ixgbe_hw *hw;
uint32_t vf_index, pfmbimr;
- IXGBE_CORE_LOCK_ASSERT(adapter);
-
hw = &adapter->hw;
if (!(vf->flags & IXGBE_VF_ACTIVE))
@@ -786,8 +781,6 @@
if (adapter->iov_mode == IXGBE_NO_VM)
return;
- IXGBE_CORE_LOCK_ASSERT(adapter);
-
/* RMW appropriate registers based on IOV mode */
/* Read... */
mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
@@ -844,36 +837,33 @@
{
struct ixgbe_vf *vf;
- IXGBE_CORE_LOCK_ASSERT(adapter);
-
for (int i = 0; i < adapter->num_vfs; i++) {
vf = &adapter->vfs[i];
if (vf->flags & IXGBE_VF_ACTIVE)
- ixgbe_update_max_frame(adapter, vf->max_frame_size);
+ ixgbe_update_max_frame(adapter, vf->maximum_frame_size);
}
} /* ixgbe_recalculate_max_frame */
int
-ixgbe_add_vf(device_t dev, u16 vfnum, const nvlist_t *config)
+ixgbe_if_iov_vf_add(if_ctx_t ctx, u16 vfnum, const nvlist_t *config)
{
struct adapter *adapter;
struct ixgbe_vf *vf;
const void *mac;
- adapter = device_get_softc(dev);
+ adapter = iflib_get_softc(ctx);
KASSERT(vfnum < adapter->num_vfs, ("VF index %d is out of range %d",
vfnum, adapter->num_vfs));
- IXGBE_CORE_LOCK(adapter);
vf = &adapter->vfs[vfnum];
vf->pool= vfnum;
/* RAR[0] is used by the PF so use vfnum + 1 for VF RAR. */
vf->rar_index = vfnum + 1;
vf->default_vlan = 0;
- vf->max_frame_size = ETHER_MAX_LEN;
- ixgbe_update_max_frame(adapter, vf->max_frame_size);
+ vf->maximum_frame_size = ETHER_MAX_LEN;
+ ixgbe_update_max_frame(adapter, vf->maximum_frame_size);
if (nvlist_exists_binary(config, "mac-addr")) {
mac = nvlist_get_binary(config, "mac-addr", NULL);
@@ -890,25 +880,16 @@
vf->flags |= IXGBE_VF_ACTIVE;
ixgbe_init_vf(adapter, vf);
- IXGBE_CORE_UNLOCK(adapter);
return (0);
-} /* ixgbe_add_vf */
+} /* ixgbe_if_iov_vf_add */
#else
void
-ixgbe_handle_mbx(void *context, int pending)
+ixgbe_handle_mbx(void *context)
{
- UNREFERENCED_2PARAMETER(context, pending);
+ UNREFERENCED_PARAMETER(context);
} /* ixgbe_handle_mbx */
-inline int
-ixgbe_vf_que_index(int mode, int vfnum, int num)
-{
- UNREFERENCED_2PARAMETER(mode, vfnum);
-
- return num;
-} /* ixgbe_vf_que_index */
-
#endif
Index: sys/dev/ixgbe/ix_txrx.c
===================================================================
--- sys/dev/ixgbe/ix_txrx.c
+++ sys/dev/ixgbe/ix_txrx.c
@@ -41,1878 +41,435 @@
#include "ixgbe.h"
-/*
- * HW RSC control:
- * this feature only works with
- * IPv4, and only on 82599 and later.
- * Also this will cause IP forwarding to
- * fail and that can't be controlled by
- * the stack as LRO can. For all these
- * reasons I've deemed it best to leave
- * this off and not bother with a tuneable
- * interface, this would need to be compiled
- * to enable.
- */
-static bool ixgbe_rsc_enable = FALSE;
-
-/*
- * For Flow Director: this is the
- * number of TX packets we sample
- * for the filter pool, this means
- * every 20th packet will be probed.
- *
- * This feature can be disabled by
- * setting this to 0.
- */
-static int atr_sample_rate = 20;
-
-/************************************************************************
- * Local Function prototypes
- ************************************************************************/
-static void ixgbe_setup_transmit_ring(struct tx_ring *);
-static void ixgbe_free_transmit_buffers(struct tx_ring *);
-static int ixgbe_setup_receive_ring(struct rx_ring *);
-static void ixgbe_free_receive_buffers(struct rx_ring *);
-static void ixgbe_rx_checksum(u32, struct mbuf *, u32);
-static void ixgbe_refresh_mbufs(struct rx_ring *, int);
-static int ixgbe_xmit(struct tx_ring *, struct mbuf **);
-static int ixgbe_tx_ctx_setup(struct tx_ring *,
- struct mbuf *, u32 *, u32 *);
-static int ixgbe_tso_setup(struct tx_ring *,
- struct mbuf *, u32 *, u32 *);
-static __inline void ixgbe_rx_discard(struct rx_ring *, int);
-static __inline void ixgbe_rx_input(struct rx_ring *, struct ifnet *,
- struct mbuf *, u32);
-static int ixgbe_dma_malloc(struct adapter *, bus_size_t,
- struct ixgbe_dma_alloc *, int);
-static void ixgbe_dma_free(struct adapter *, struct ixgbe_dma_alloc *);
-
-/************************************************************************
- * ixgbe_legacy_start_locked - Transmit entry point
- *
- * Called by the stack to initiate a transmit.
- * The driver will remain in this routine as long as there are
- * packets to transmit and transmit resources are available.
- * In case resources are not available, the stack is notified
- * and the packet is requeued.
- ************************************************************************/
-int
-ixgbe_legacy_start_locked(struct ifnet *ifp, struct tx_ring *txr)
-{
- struct mbuf *m_head;
- struct adapter *adapter = txr->adapter;
-
- IXGBE_TX_LOCK_ASSERT(txr);
-
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
- return (ENETDOWN);
- if (!adapter->link_active)
- return (ENETDOWN);
-
- while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
- if (txr->tx_avail <= IXGBE_QUEUE_MIN_FREE)
- break;
-
- IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
- if (m_head == NULL)
- break;
-
- if (ixgbe_xmit(txr, &m_head)) {
- if (m_head != NULL)
- IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
- break;
- }
- /* Send a copy of the frame to the BPF listener */
- ETHER_BPF_MTAP(ifp, m_head);
- }
-
- return IXGBE_SUCCESS;
-} /* ixgbe_legacy_start_locked */
/************************************************************************
- * ixgbe_legacy_start
- *
- * Called by the stack, this always uses the first tx ring,
- * and should not be used with multiqueue tx enabled.
+ * Local Function prototypes
************************************************************************/
-void
-ixgbe_legacy_start(struct ifnet *ifp)
-{
- struct adapter *adapter = ifp->if_softc;
- struct tx_ring *txr = adapter->tx_rings;
-
- if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
- IXGBE_TX_LOCK(txr);
- ixgbe_legacy_start_locked(ifp, txr);
- IXGBE_TX_UNLOCK(txr);
- }
-} /* ixgbe_legacy_start */
-
-/************************************************************************
- * ixgbe_mq_start - Multiqueue Transmit Entry Point
- *
- * (if_transmit function)
- ************************************************************************/
-int
-ixgbe_mq_start(struct ifnet *ifp, struct mbuf *m)
-{
- struct adapter *adapter = ifp->if_softc;
- struct ix_queue *que;
- struct tx_ring *txr;
- int i, err = 0;
- uint32_t bucket_id;
-
- /*
- * When doing RSS, map it to the same outbound queue
- * as the incoming flow would be mapped to.
- *
- * If everything is setup correctly, it should be the
- * same bucket that the current CPU we're on is.
- */
- if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
- if ((adapter->feat_en & IXGBE_FEATURE_RSS) &&
- (rss_hash2bucket(m->m_pkthdr.flowid, M_HASHTYPE_GET(m),
- &bucket_id) == 0)) {
- i = bucket_id % adapter->num_queues;
-#ifdef IXGBE_DEBUG
- if (bucket_id > adapter->num_queues)
- if_printf(ifp,
- "bucket_id (%d) > num_queues (%d)\n",
- bucket_id, adapter->num_queues);
-#endif
- } else
- i = m->m_pkthdr.flowid % adapter->num_queues;
- } else
- i = curcpu % adapter->num_queues;
-
- /* Check for a hung queue and pick alternative */
- if (((1 << i) & adapter->active_queues) == 0)
- i = ffsl(adapter->active_queues);
+static int ixgbe_isc_txd_encap(void *arg, if_pkt_info_t pi);
+static void ixgbe_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx);
+static int ixgbe_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear);
- txr = &adapter->tx_rings[i];
- que = &adapter->queues[i];
-
- err = drbr_enqueue(ifp, txr->br, m);
- if (err)
- return (err);
- if (IXGBE_TX_TRYLOCK(txr)) {
- ixgbe_mq_start_locked(ifp, txr);
- IXGBE_TX_UNLOCK(txr);
- } else
- taskqueue_enqueue(que->tq, &txr->txq_task);
-
- return (0);
-} /* ixgbe_mq_start */
-
-/************************************************************************
- * ixgbe_mq_start_locked
- ************************************************************************/
-int
-ixgbe_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr)
-{
- struct mbuf *next;
- int enqueued = 0, err = 0;
-
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
- return (ENETDOWN);
- if (txr->adapter->link_active == 0)
- return (ENETDOWN);
-
- /* Process the queue */
-#if __FreeBSD_version < 901504
- next = drbr_dequeue(ifp, txr->br);
- while (next != NULL) {
- if ((err = ixgbe_xmit(txr, &next)) != 0) {
- if (next != NULL)
- err = drbr_enqueue(ifp, txr->br, next);
-#else
- while ((next = drbr_peek(ifp, txr->br)) != NULL) {
- err = ixgbe_xmit(txr, &next);
- if (err != 0) {
- if (next == NULL)
- drbr_advance(ifp, txr->br);
- else
- drbr_putback(ifp, txr->br, next);
-#endif
- break;
- }
-#if __FreeBSD_version >= 901504
- drbr_advance(ifp, txr->br);
-#endif
- enqueued++;
-#if __FreeBSD_version >= 1100036
- /*
- * Since we're looking at the tx ring, we can check
- * to see if we're a VF by examing our tail register
- * address.
- */
- if ((txr->adapter->feat_en & IXGBE_FEATURE_VF) &&
- (next->m_flags & M_MCAST))
- if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1);
-#endif
- /* Send a copy of the frame to the BPF listener */
- ETHER_BPF_MTAP(ifp, next);
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
- break;
-#if __FreeBSD_version < 901504
- next = drbr_dequeue(ifp, txr->br);
-#endif
- }
+static void ixgbe_isc_rxd_refill(void *arg, if_rxd_update_t iru);
+static void ixgbe_isc_rxd_flush(void *arg, uint16_t qsidx, uint8_t flidx __unused, qidx_t pidx);
+static int ixgbe_isc_rxd_available(void *arg, uint16_t qsidx, qidx_t pidx,
+ qidx_t budget);
+static int ixgbe_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri);
- if (txr->tx_avail < IXGBE_TX_CLEANUP_THRESHOLD(txr->adapter))
- ixgbe_txeof(txr);
+static void ixgbe_rx_checksum(u32 staterr, if_rxd_info_t ri, u32 ptype);
- return (err);
-} /* ixgbe_mq_start_locked */
+extern void ixgbe_if_enable_intr(if_ctx_t ctx);
+static int ixgbe_determine_rsstype(u16 pkt_info);
-/************************************************************************
- * ixgbe_deferred_mq_start
- *
- * Called from a taskqueue to drain queued transmit packets.
- ************************************************************************/
-void
-ixgbe_deferred_mq_start(void *arg, int pending)
-{
- struct tx_ring *txr = arg;
- struct adapter *adapter = txr->adapter;
- struct ifnet *ifp = adapter->ifp;
+struct if_txrx ixgbe_txrx = {
+ ixgbe_isc_txd_encap,
+ ixgbe_isc_txd_flush,
+ ixgbe_isc_txd_credits_update,
+ ixgbe_isc_rxd_available,
+ ixgbe_isc_rxd_pkt_get,
+ ixgbe_isc_rxd_refill,
+ ixgbe_isc_rxd_flush,
+ NULL
+};
- IXGBE_TX_LOCK(txr);
- if (!drbr_empty(ifp, txr->br))
- ixgbe_mq_start_locked(ifp, txr);
- IXGBE_TX_UNLOCK(txr);
-} /* ixgbe_deferred_mq_start */
-
-/************************************************************************
- * ixgbe_qflush - Flush all ring buffers
- ************************************************************************/
-void
-ixgbe_qflush(struct ifnet *ifp)
-{
- struct adapter *adapter = ifp->if_softc;
- struct tx_ring *txr = adapter->tx_rings;
- struct mbuf *m;
-
- for (int i = 0; i < adapter->num_queues; i++, txr++) {
- IXGBE_TX_LOCK(txr);
- while ((m = buf_ring_dequeue_sc(txr->br)) != NULL)
- m_freem(m);
- IXGBE_TX_UNLOCK(txr);
- }
- if_qflush(ifp);
-} /* ixgbe_qflush */
-
-
-/************************************************************************
- * ixgbe_xmit
- *
- * Maps the mbufs to tx descriptors, allowing the
- * TX engine to transmit the packets.
- *
- * Return 0 on success, positive on failure
- ************************************************************************/
-static int
-ixgbe_xmit(struct tx_ring *txr, struct mbuf **m_headp)
-{
- struct adapter *adapter = txr->adapter;
- struct ixgbe_tx_buf *txbuf;
- union ixgbe_adv_tx_desc *txd = NULL;
- struct mbuf *m_head;
- int i, j, error, nsegs;
- int first;
- u32 olinfo_status = 0, cmd_type_len;
- bool remap = TRUE;
- bus_dma_segment_t segs[adapter->num_segs];
- bus_dmamap_t map;
-
- m_head = *m_headp;
-
- /* Basic descriptor defines */
- cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA |
- IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
-
- if (m_head->m_flags & M_VLANTAG)
- cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
-
- /*
- * Important to capture the first descriptor
- * used because it will contain the index of
- * the one we tell the hardware to report back
- */
- first = txr->next_avail_desc;
- txbuf = &txr->tx_buffers[first];
- map = txbuf->map;
-
- /*
- * Map the packet for DMA.
- */
-retry:
- error = bus_dmamap_load_mbuf_sg(txr->txtag, map, *m_headp, segs,
- &nsegs, BUS_DMA_NOWAIT);
-
- if (__predict_false(error)) {
- struct mbuf *m;
-
- switch (error) {
- case EFBIG:
- /* Try it again? - one try */
- if (remap == TRUE) {
- remap = FALSE;
- /*
- * XXX: m_defrag will choke on
- * non-MCLBYTES-sized clusters
- */
- m = m_defrag(*m_headp, M_NOWAIT);
- if (m == NULL) {
- adapter->mbuf_defrag_failed++;
- m_freem(*m_headp);
- *m_headp = NULL;
- return (ENOBUFS);
- }
- *m_headp = m;
- goto retry;
- } else
- return (error);
- case ENOMEM:
- txr->no_tx_dma_setup++;
- return (error);
- default:
- txr->no_tx_dma_setup++;
- m_freem(*m_headp);
- *m_headp = NULL;
- return (error);
- }
- }
-
- /* Make certain there are enough descriptors */
- if (txr->tx_avail < (nsegs + 2)) {
- txr->no_desc_avail++;
- bus_dmamap_unload(txr->txtag, map);
- return (ENOBUFS);
- }
- m_head = *m_headp;
-
- /*
- * Set up the appropriate offload context
- * this will consume the first descriptor
- */
- error = ixgbe_tx_ctx_setup(txr, m_head, &cmd_type_len, &olinfo_status);
- if (__predict_false(error)) {
- if (error == ENOBUFS)
- *m_headp = NULL;
- return (error);
- }
-
- /* Do the flow director magic */
- if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
- (txr->atr_sample) && (!adapter->fdir_reinit)) {
- ++txr->atr_count;
- if (txr->atr_count >= atr_sample_rate) {
- ixgbe_atr(txr, m_head);
- txr->atr_count = 0;
- }
- }
-
- olinfo_status |= IXGBE_ADVTXD_CC;
- i = txr->next_avail_desc;
- for (j = 0; j < nsegs; j++) {
- bus_size_t seglen;
- bus_addr_t segaddr;
-
- txbuf = &txr->tx_buffers[i];
- txd = &txr->tx_base[i];
- seglen = segs[j].ds_len;
- segaddr = htole64(segs[j].ds_addr);
-
- txd->read.buffer_addr = segaddr;
- txd->read.cmd_type_len = htole32(txr->txd_cmd |
- cmd_type_len | seglen);
- txd->read.olinfo_status = htole32(olinfo_status);
-
- if (++i == txr->num_desc)
- i = 0;
- }
-
- txd->read.cmd_type_len |= htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
- txr->tx_avail -= nsegs;
- txr->next_avail_desc = i;
-
- txbuf->m_head = m_head;
- /*
- * Here we swap the map so the last descriptor,
- * which gets the completion interrupt has the
- * real map, and the first descriptor gets the
- * unused map from this descriptor.
- */
- txr->tx_buffers[first].map = txbuf->map;
- txbuf->map = map;
- bus_dmamap_sync(txr->txtag, map, BUS_DMASYNC_PREWRITE);
-
- /* Set the EOP descriptor that will be marked done */
- txbuf = &txr->tx_buffers[first];
- txbuf->eop = txd;
-
- bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
- BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
- /*
- * Advance the Transmit Descriptor Tail (Tdt), this tells the
- * hardware that this frame is available to transmit.
- */
- ++txr->total_packets;
- IXGBE_WRITE_REG(&adapter->hw, txr->tail, i);
-
- /* Mark queue as having work */
- if (txr->busy == 0)
- txr->busy = 1;
-
- return (0);
-} /* ixgbe_xmit */
-
-
-/************************************************************************
- * ixgbe_allocate_transmit_buffers
- *
- * Allocate memory for tx_buffer structures. The tx_buffer stores all
- * the information needed to transmit a packet on the wire. This is
- * called only once at attach, setup is done every reset.
- ************************************************************************/
-static int
-ixgbe_allocate_transmit_buffers(struct tx_ring *txr)
-{
- struct adapter *adapter = txr->adapter;
- device_t dev = adapter->dev;
- struct ixgbe_tx_buf *txbuf;
- int error, i;
-
- /*
- * Setup DMA descriptor areas.
- */
- error = bus_dma_tag_create(
- /* parent */ bus_get_dma_tag(adapter->dev),
- /* alignment */ 1,
- /* bounds */ 0,
- /* lowaddr */ BUS_SPACE_MAXADDR,
- /* highaddr */ BUS_SPACE_MAXADDR,
- /* filter */ NULL,
- /* filterarg */ NULL,
- /* maxsize */ IXGBE_TSO_SIZE,
- /* nsegments */ adapter->num_segs,
- /* maxsegsize */ PAGE_SIZE,
- /* flags */ 0,
- /* lockfunc */ NULL,
- /* lockfuncarg */ NULL,
- &txr->txtag);
- if (error != 0) {
- device_printf(dev, "Unable to allocate TX DMA tag\n");
- goto fail;
- }
-
- txr->tx_buffers =
- (struct ixgbe_tx_buf *)malloc(sizeof(struct ixgbe_tx_buf) *
- adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
- if (txr->tx_buffers == NULL) {
- device_printf(dev, "Unable to allocate tx_buffer memory\n");
- error = ENOMEM;
- goto fail;
- }
-
- /* Create the descriptor buffer dma maps */
- txbuf = txr->tx_buffers;
- for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
- error = bus_dmamap_create(txr->txtag, 0, &txbuf->map);
- if (error != 0) {
- device_printf(dev, "Unable to create TX DMA map\n");
- goto fail;
- }
- }
-
- return 0;
-fail:
- /* We free all, it handles case where we are in the middle */
- ixgbe_free_transmit_structures(adapter);
-
- return (error);
-} /* ixgbe_allocate_transmit_buffers */
-
-/************************************************************************
- * ixgbe_setup_transmit_ring - Initialize a transmit ring.
- ************************************************************************/
-static void
-ixgbe_setup_transmit_ring(struct tx_ring *txr)
-{
- struct adapter *adapter = txr->adapter;
- struct ixgbe_tx_buf *txbuf;
-#ifdef DEV_NETMAP
- struct netmap_adapter *na = NA(adapter->ifp);
- struct netmap_slot *slot;
-#endif /* DEV_NETMAP */
-
- /* Clear the old ring contents */
- IXGBE_TX_LOCK(txr);
-
-#ifdef DEV_NETMAP
- if (adapter->feat_en & IXGBE_FEATURE_NETMAP) {
- /*
- * (under lock): if in netmap mode, do some consistency
- * checks and set slot to entry 0 of the netmap ring.
- */
- slot = netmap_reset(na, NR_TX, txr->me, 0);
- }
-#endif /* DEV_NETMAP */
-
- bzero((void *)txr->tx_base,
- (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc);
- /* Reset indices */
- txr->next_avail_desc = 0;
- txr->next_to_clean = 0;
-
- /* Free any existing tx buffers. */
- txbuf = txr->tx_buffers;
- for (int i = 0; i < txr->num_desc; i++, txbuf++) {
- if (txbuf->m_head != NULL) {
- bus_dmamap_sync(txr->txtag, txbuf->map,
- BUS_DMASYNC_POSTWRITE);
- bus_dmamap_unload(txr->txtag, txbuf->map);
- m_freem(txbuf->m_head);
- txbuf->m_head = NULL;
- }
-
-#ifdef DEV_NETMAP
- /*
- * In netmap mode, set the map for the packet buffer.
- * NOTE: Some drivers (not this one) also need to set
- * the physical buffer address in the NIC ring.
- * Slots in the netmap ring (indexed by "si") are
- * kring->nkr_hwofs positions "ahead" wrt the
- * corresponding slot in the NIC ring. In some drivers
- * (not here) nkr_hwofs can be negative. Function
- * netmap_idx_n2k() handles wraparounds properly.
- */
- if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) && slot) {
- int si = netmap_idx_n2k(&na->tx_rings[txr->me], i);
- netmap_load_map(na, txr->txtag,
- txbuf->map, NMB(na, slot + si));
- }
-#endif /* DEV_NETMAP */
-
- /* Clear the EOP descriptor pointer */
- txbuf->eop = NULL;
- }
-
- /* Set the rate at which we sample packets */
- if (adapter->feat_en & IXGBE_FEATURE_FDIR)
- txr->atr_sample = atr_sample_rate;
-
- /* Set number of descriptors available */
- txr->tx_avail = adapter->num_tx_desc;
-
- bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
- BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
- IXGBE_TX_UNLOCK(txr);
-} /* ixgbe_setup_transmit_ring */
-
-/************************************************************************
- * ixgbe_setup_transmit_structures - Initialize all transmit rings.
- ************************************************************************/
-int
-ixgbe_setup_transmit_structures(struct adapter *adapter)
-{
- struct tx_ring *txr = adapter->tx_rings;
-
- for (int i = 0; i < adapter->num_queues; i++, txr++)
- ixgbe_setup_transmit_ring(txr);
-
- return (0);
-} /* ixgbe_setup_transmit_structures */
-
-/************************************************************************
- * ixgbe_free_transmit_structures - Free all transmit rings.
- ************************************************************************/
-void
-ixgbe_free_transmit_structures(struct adapter *adapter)
-{
- struct tx_ring *txr = adapter->tx_rings;
-
- for (int i = 0; i < adapter->num_queues; i++, txr++) {
- IXGBE_TX_LOCK(txr);
- ixgbe_free_transmit_buffers(txr);
- ixgbe_dma_free(adapter, &txr->txdma);
- IXGBE_TX_UNLOCK(txr);
- IXGBE_TX_LOCK_DESTROY(txr);
- }
- free(adapter->tx_rings, M_DEVBUF);
-} /* ixgbe_free_transmit_structures */
-
-/************************************************************************
- * ixgbe_free_transmit_buffers
- *
- * Free transmit ring related data structures.
- ************************************************************************/
-static void
-ixgbe_free_transmit_buffers(struct tx_ring *txr)
-{
- struct adapter *adapter = txr->adapter;
- struct ixgbe_tx_buf *tx_buffer;
- int i;
-
- INIT_DEBUGOUT("ixgbe_free_transmit_ring: begin");
-
- if (txr->tx_buffers == NULL)
- return;
-
- tx_buffer = txr->tx_buffers;
- for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
- if (tx_buffer->m_head != NULL) {
- bus_dmamap_sync(txr->txtag, tx_buffer->map,
- BUS_DMASYNC_POSTWRITE);
- bus_dmamap_unload(txr->txtag, tx_buffer->map);
- m_freem(tx_buffer->m_head);
- tx_buffer->m_head = NULL;
- if (tx_buffer->map != NULL) {
- bus_dmamap_destroy(txr->txtag, tx_buffer->map);
- tx_buffer->map = NULL;
- }
- } else if (tx_buffer->map != NULL) {
- bus_dmamap_unload(txr->txtag, tx_buffer->map);
- bus_dmamap_destroy(txr->txtag, tx_buffer->map);
- tx_buffer->map = NULL;
- }
- }
- if (txr->br != NULL)
- buf_ring_free(txr->br, M_DEVBUF);
- if (txr->tx_buffers != NULL) {
- free(txr->tx_buffers, M_DEVBUF);
- txr->tx_buffers = NULL;
- }
- if (txr->txtag != NULL) {
- bus_dma_tag_destroy(txr->txtag);
- txr->txtag = NULL;
- }
-} /* ixgbe_free_transmit_buffers */
+extern if_shared_ctx_t ixgbe_sctx;
/************************************************************************
* ixgbe_tx_ctx_setup
*
* Advanced Context Descriptor setup for VLAN, CSUM or TSO
+ *
************************************************************************/
static int
-ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp,
- u32 *cmd_type_len, u32 *olinfo_status)
+ixgbe_tx_ctx_setup(struct ixgbe_adv_tx_context_desc *TXD, if_pkt_info_t pi)
{
- struct ixgbe_adv_tx_context_desc *TXD;
- struct ether_vlan_header *eh;
-#ifdef INET
- struct ip *ip;
-#endif
-#ifdef INET6
- struct ip6_hdr *ip6;
-#endif
- int ehdrlen, ip_hlen = 0;
- int offload = TRUE;
- int ctxd = txr->next_avail_desc;
- u32 vlan_macip_lens = 0;
- u32 type_tucmd_mlhl = 0;
- u16 vtag = 0;
- u16 etype;
- u8 ipproto = 0;
- caddr_t l3d;
-
-
- /* First check if TSO is to be used */
- if (mp->m_pkthdr.csum_flags & (CSUM_IP_TSO | CSUM_IP6_TSO))
- return (ixgbe_tso_setup(txr, mp, cmd_type_len, olinfo_status));
-
- if ((mp->m_pkthdr.csum_flags & CSUM_OFFLOAD) == 0)
- offload = FALSE;
-
- /* Indicate the whole packet as payload when not doing TSO */
- *olinfo_status |= mp->m_pkthdr.len << IXGBE_ADVTXD_PAYLEN_SHIFT;
+ u32 vlan_macip_lens, type_tucmd_mlhl;
+ u32 olinfo_status, mss_l4len_idx, pktlen, offload;
+ u8 ehdrlen;
- /* Now ready a context descriptor */
- TXD = (struct ixgbe_adv_tx_context_desc *)&txr->tx_base[ctxd];
-
- /*
- * In advanced descriptors the vlan tag must
- * be placed into the context descriptor. Hence
- * we need to make one even if not doing offloads.
- */
- if (mp->m_flags & M_VLANTAG) {
- vtag = htole16(mp->m_pkthdr.ether_vtag);
- vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
- } else if (!(txr->adapter->feat_en & IXGBE_FEATURE_NEEDS_CTXD) &&
- (offload == FALSE))
- return (0);
+ offload = TRUE;
+ olinfo_status = mss_l4len_idx = vlan_macip_lens = type_tucmd_mlhl = 0;
+ /* VLAN MACLEN IPLEN */
+ vlan_macip_lens |= (htole16(pi->ipi_vtag) << IXGBE_ADVTXD_VLAN_SHIFT);
/*
- * Determine where frame payload starts.
- * Jump over vlan headers if already present,
- * helpful for QinQ too.
+ * Some of our VF devices need a context descriptor for every
+ * packet. That means the ehdrlen needs to be non-zero in order
+ * for the host driver not to flag a malicious event. The stack
+ * will most likely populate this for all other reasons of why
+ * this function was called.
*/
- eh = mtod(mp, struct ether_vlan_header *);
- if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
- etype = ntohs(eh->evl_proto);
- ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
- } else {
- etype = ntohs(eh->evl_encap_proto);
+ if (pi->ipi_ehdrlen == 0) {
ehdrlen = ETHER_HDR_LEN;
- }
-
- /* Set the ether header length */
+ ehdrlen += (pi->ipi_vtag != 0) ? ETHER_VLAN_ENCAP_LEN : 0;
+ } else
+ ehdrlen = pi->ipi_ehdrlen;
vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
- if (offload == FALSE)
- goto no_offloads;
+ pktlen = pi->ipi_len;
+ /* First check if TSO is to be used */
+ if (pi->ipi_csum_flags & CSUM_TSO) {
+ /* This is used in the transmit desc in encap */
+ pktlen = pi->ipi_len - ehdrlen - pi->ipi_ip_hlen - pi->ipi_tcp_hlen;
+ mss_l4len_idx |= (pi->ipi_tso_segsz << IXGBE_ADVTXD_MSS_SHIFT);
+ mss_l4len_idx |= (pi->ipi_tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
+ }
- /*
- * If the first mbuf only includes the ethernet header,
- * jump to the next one
- * XXX: This assumes the stack splits mbufs containing headers
- * on header boundaries
- * XXX: And assumes the entire IP header is contained in one mbuf
- */
- if (mp->m_len == ehdrlen && mp->m_next)
- l3d = mtod(mp->m_next, caddr_t);
+ olinfo_status |= pktlen << IXGBE_ADVTXD_PAYLEN_SHIFT;
+
+ if (pi->ipi_flags & IPI_TX_IPV4) {
+ type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
+ /* Tell transmit desc to also do IPv4 checksum. */
+ if (pi->ipi_csum_flags & (CSUM_IP|CSUM_TSO))
+ olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
+ } else if (pi->ipi_flags & IPI_TX_IPV6)
+ type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
else
- l3d = mtod(mp, caddr_t) + ehdrlen;
-
- switch (etype) {
-#ifdef INET
- case ETHERTYPE_IP:
- ip = (struct ip *)(l3d);
- ip_hlen = ip->ip_hl << 2;
- ipproto = ip->ip_p;
- type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
- /* Insert IPv4 checksum into data descriptors */
- if (mp->m_pkthdr.csum_flags & CSUM_IP) {
- ip->ip_sum = 0;
- *olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
- }
- break;
-#endif
-#ifdef INET6
- case ETHERTYPE_IPV6:
- ip6 = (struct ip6_hdr *)(l3d);
- ip_hlen = sizeof(struct ip6_hdr);
- ipproto = ip6->ip6_nxt;
- type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
- break;
-#endif
- default:
- offload = FALSE;
- break;
- }
+ offload = FALSE;
- vlan_macip_lens |= ip_hlen;
+ vlan_macip_lens |= pi->ipi_ip_hlen;
- /* No support for offloads for non-L4 next headers */
- switch (ipproto) {
- case IPPROTO_TCP:
- if (mp->m_pkthdr.csum_flags &
- (CSUM_IP_TCP | CSUM_IP6_TCP))
- type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
- else
- offload = false;
- break;
- case IPPROTO_UDP:
- if (mp->m_pkthdr.csum_flags &
- (CSUM_IP_UDP | CSUM_IP6_UDP))
- type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
- else
- offload = false;
- break;
- case IPPROTO_SCTP:
- if (mp->m_pkthdr.csum_flags &
- (CSUM_IP_SCTP | CSUM_IP6_SCTP))
- type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
- else
- offload = false;
- break;
- default:
- offload = false;
- break;
+ switch (pi->ipi_ipproto) {
+ case IPPROTO_TCP:
+ if (pi->ipi_csum_flags & (CSUM_IP_TCP | CSUM_IP6_TCP))
+ type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
+ else
+ offload = FALSE;
+ break;
+ case IPPROTO_UDP:
+ if (pi->ipi_csum_flags & (CSUM_IP_UDP | CSUM_IP6_UDP))
+ type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
+ else
+ offload = FALSE;
+ break;
+ case IPPROTO_SCTP:
+ if (pi->ipi_csum_flags & (CSUM_IP_SCTP | CSUM_IP6_SCTP))
+ type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
+ else
+ offload = FALSE;
+ break;
+ default:
+ offload = FALSE;
+ break;
}
+/* Insert L4 checksum into data descriptors */
+ if (offload)
+ olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
- if (offload) /* Insert L4 checksum into data descriptors */
- *olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
-
-no_offloads:
type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
/* Now copy bits into descriptor */
TXD->vlan_macip_lens = htole32(vlan_macip_lens);
TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
TXD->seqnum_seed = htole32(0);
- TXD->mss_l4len_idx = htole32(0);
-
- /* We've consumed the first desc, adjust counters */
- if (++ctxd == txr->num_desc)
- ctxd = 0;
- txr->next_avail_desc = ctxd;
- --txr->tx_avail;
+ TXD->mss_l4len_idx = htole32(mss_l4len_idx);
- return (0);
+ return (olinfo_status);
} /* ixgbe_tx_ctx_setup */
/************************************************************************
- * ixgbe_tso_setup
- *
- * Setup work for hardware segmentation offload (TSO) on
- * adapters using advanced tx descriptors
+ * ixgbe_isc_txd_encap
************************************************************************/
static int
-ixgbe_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *cmd_type_len,
- u32 *olinfo_status)
-{
+ixgbe_isc_txd_encap(void *arg, if_pkt_info_t pi)
+{
+ struct adapter *sc = arg;
+ if_softc_ctx_t scctx = sc->shared;
+ struct ix_tx_queue *que = &sc->tx_queues[pi->ipi_qsidx];
+ struct tx_ring *txr = &que->txr;
+ int nsegs = pi->ipi_nsegs;
+ bus_dma_segment_t *segs = pi->ipi_segs;
+ union ixgbe_adv_tx_desc *txd = NULL;
struct ixgbe_adv_tx_context_desc *TXD;
- struct ether_vlan_header *eh;
-#ifdef INET6
- struct ip6_hdr *ip6;
-#endif
-#ifdef INET
- struct ip *ip;
-#endif
- struct tcphdr *th;
- int ctxd, ehdrlen, ip_hlen, tcp_hlen;
- u32 vlan_macip_lens = 0;
- u32 type_tucmd_mlhl = 0;
- u32 mss_l4len_idx = 0, paylen;
- u16 vtag = 0, eh_type;
-
- /*
- * Determine where frame payload starts.
- * Jump over vlan headers if already present
- */
- eh = mtod(mp, struct ether_vlan_header *);
- if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
- ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
- eh_type = eh->evl_proto;
+ int i, j, first, pidx_last;
+ u32 olinfo_status, cmd, flags;
+ qidx_t ntxd;
+
+ cmd = (IXGBE_ADVTXD_DTYP_DATA |
+ IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
+
+ if (pi->ipi_mflags & M_VLANTAG)
+ cmd |= IXGBE_ADVTXD_DCMD_VLE;
+
+ i = first = pi->ipi_pidx;
+ flags = (pi->ipi_flags & IPI_TX_INTR) ? IXGBE_TXD_CMD_RS : 0;
+ ntxd = scctx->isc_ntxd[0];
+
+ TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[first];
+ if ((pi->ipi_csum_flags & CSUM_OFFLOAD) ||
+ (sc->feat_en & IXGBE_FEATURE_NEEDS_CTXD) ||
+ pi->ipi_vtag) {
+ /*********************************************
+ * Set up the appropriate offload context
+ * this will consume the first descriptor
+ *********************************************/
+ olinfo_status = ixgbe_tx_ctx_setup(TXD, pi);
+ if (pi->ipi_csum_flags & CSUM_TSO) {
+ cmd |= IXGBE_ADVTXD_DCMD_TSE;
+ ++txr->tso_tx;
+ }
+
+ if (++i == scctx->isc_ntxd[0])
+ i = 0;
} else {
- ehdrlen = ETHER_HDR_LEN;
- eh_type = eh->evl_encap_proto;
- }
-
- switch (ntohs(eh_type)) {
-#ifdef INET
- case ETHERTYPE_IP:
- ip = (struct ip *)(mp->m_data + ehdrlen);
- if (ip->ip_p != IPPROTO_TCP)
- return (ENXIO);
- ip->ip_sum = 0;
- ip_hlen = ip->ip_hl << 2;
- th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
- th->th_sum = in_pseudo(ip->ip_src.s_addr,
- ip->ip_dst.s_addr, htons(IPPROTO_TCP));
- type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
- /* Tell transmit desc to also do IPv4 checksum. */
- *olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
- break;
-#endif
-#ifdef INET6
- case ETHERTYPE_IPV6:
- ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
- /* XXX-BZ For now we do not pretend to support ext. hdrs. */
- if (ip6->ip6_nxt != IPPROTO_TCP)
- return (ENXIO);
- ip_hlen = sizeof(struct ip6_hdr);
- th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
- th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0);
- type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
- break;
-#endif
- default:
- panic("%s: CSUM_TSO but no supported IP version (0x%04x)",
- __func__, ntohs(eh_type));
- break;
+ /* Indicate the whole packet as payload when not doing TSO */
+ olinfo_status = pi->ipi_len << IXGBE_ADVTXD_PAYLEN_SHIFT;
}
- ctxd = txr->next_avail_desc;
- TXD = (struct ixgbe_adv_tx_context_desc *)&txr->tx_base[ctxd];
-
- tcp_hlen = th->th_off << 2;
-
- /* This is used in the transmit desc in encap */
- paylen = mp->m_pkthdr.len - ehdrlen - ip_hlen - tcp_hlen;
-
- /* VLAN MACLEN IPLEN */
- if (mp->m_flags & M_VLANTAG) {
- vtag = htole16(mp->m_pkthdr.ether_vtag);
- vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
- }
-
- vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
- vlan_macip_lens |= ip_hlen;
- TXD->vlan_macip_lens = htole32(vlan_macip_lens);
-
- /* ADV DTYPE TUCMD */
- type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
- type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
- TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
-
- /* MSS L4LEN IDX */
- mss_l4len_idx |= (mp->m_pkthdr.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT);
- mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
- TXD->mss_l4len_idx = htole32(mss_l4len_idx);
-
- TXD->seqnum_seed = htole32(0);
-
- if (++ctxd == txr->num_desc)
- ctxd = 0;
-
- txr->tx_avail--;
- txr->next_avail_desc = ctxd;
- *cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
- *olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
- *olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
- ++txr->tso_tx;
-
- return (0);
-} /* ixgbe_tso_setup */
-
-
-/************************************************************************
- * ixgbe_txeof
- *
- * Examine each tx_buffer in the used queue. If the hardware is done
- * processing the packet then free associated resources. The
- * tx_buffer is put back on the free queue.
- ************************************************************************/
-void
-ixgbe_txeof(struct tx_ring *txr)
-{
- struct adapter *adapter = txr->adapter;
- struct ixgbe_tx_buf *buf;
- union ixgbe_adv_tx_desc *txd;
- u32 work, processed = 0;
- u32 limit = adapter->tx_process_limit;
-
- mtx_assert(&txr->tx_mtx, MA_OWNED);
-
-#ifdef DEV_NETMAP
- if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
- (adapter->ifp->if_capenable & IFCAP_NETMAP)) {
- struct netmap_adapter *na = NA(adapter->ifp);
- struct netmap_kring *kring = &na->tx_rings[txr->me];
- txd = txr->tx_base;
- bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
- BUS_DMASYNC_POSTREAD);
- /*
- * In netmap mode, all the work is done in the context
- * of the client thread. Interrupt handlers only wake up
- * clients, which may be sleeping on individual rings
- * or on a global resource for all rings.
- * To implement tx interrupt mitigation, we wake up the client
- * thread roughly every half ring, even if the NIC interrupts
- * more frequently. This is implemented as follows:
- * - ixgbe_txsync() sets kring->nr_kflags with the index of
- * the slot that should wake up the thread (nkr_num_slots
- * means the user thread should not be woken up);
- * - the driver ignores tx interrupts unless netmap_mitigate=0
- * or the slot has the DD bit set.
- */
- if (!netmap_mitigate ||
- (kring->nr_kflags < kring->nkr_num_slots &&
- txd[kring->nr_kflags].wb.status & IXGBE_TXD_STAT_DD)) {
- netmap_tx_irq(adapter->ifp, txr->me);
- }
- return;
- }
-#endif /* DEV_NETMAP */
-
- if (txr->tx_avail == txr->num_desc) {
- txr->busy = 0;
- return;
- }
-
- /* Get work starting point */
- work = txr->next_to_clean;
- buf = &txr->tx_buffers[work];
- txd = &txr->tx_base[work];
- work -= txr->num_desc; /* The distance to ring end */
- bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
- BUS_DMASYNC_POSTREAD);
-
- do {
- union ixgbe_adv_tx_desc *eop = buf->eop;
- if (eop == NULL) /* No work */
- break;
-
- if ((eop->wb.status & IXGBE_TXD_STAT_DD) == 0)
- break; /* I/O not complete */
-
- if (buf->m_head) {
- txr->bytes += buf->m_head->m_pkthdr.len;
- bus_dmamap_sync(txr->txtag, buf->map,
- BUS_DMASYNC_POSTWRITE);
- bus_dmamap_unload(txr->txtag, buf->map);
- m_freem(buf->m_head);
- buf->m_head = NULL;
- }
- buf->eop = NULL;
- ++txr->tx_avail;
-
- /* We clean the range if multi segment */
- while (txd != eop) {
- ++txd;
- ++buf;
- ++work;
- /* wrap the ring? */
- if (__predict_false(!work)) {
- work -= txr->num_desc;
- buf = txr->tx_buffers;
- txd = txr->tx_base;
- }
- if (buf->m_head) {
- txr->bytes += buf->m_head->m_pkthdr.len;
- bus_dmamap_sync(txr->txtag, buf->map,
- BUS_DMASYNC_POSTWRITE);
- bus_dmamap_unload(txr->txtag, buf->map);
- m_freem(buf->m_head);
- buf->m_head = NULL;
- }
- ++txr->tx_avail;
- buf->eop = NULL;
-
- }
- ++txr->packets;
- ++processed;
-
- /* Try the next packet */
- ++txd;
- ++buf;
- ++work;
- /* reset with a wrap */
- if (__predict_false(!work)) {
- work -= txr->num_desc;
- buf = txr->tx_buffers;
- txd = txr->tx_base;
- }
- prefetch(txd);
- } while (__predict_true(--limit));
-
- bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
- BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
-
- work += txr->num_desc;
- txr->next_to_clean = work;
-
- /*
- * Queue Hang detection, we know there's
- * work outstanding or the first return
- * would have been taken, so increment busy
- * if nothing managed to get cleaned, then
- * in local_timer it will be checked and
- * marked as HUNG if it exceeds a MAX attempt.
- */
- if ((processed == 0) && (txr->busy != IXGBE_QUEUE_HUNG))
- ++txr->busy;
- /*
- * If anything gets cleaned we reset state to 1,
- * note this will turn off HUNG if its set.
- */
- if (processed)
- txr->busy = 1;
-
- if (txr->tx_avail == txr->num_desc)
- txr->busy = 0;
-
- return;
-} /* ixgbe_txeof */
-
-/************************************************************************
- * ixgbe_rsc_count
- *
- * Used to detect a descriptor that has been merged by Hardware RSC.
- ************************************************************************/
-static inline u32
-ixgbe_rsc_count(union ixgbe_adv_rx_desc *rx)
-{
- return (le32toh(rx->wb.lower.lo_dword.data) &
- IXGBE_RXDADV_RSCCNT_MASK) >> IXGBE_RXDADV_RSCCNT_SHIFT;
-} /* ixgbe_rsc_count */
-
-/************************************************************************
- * ixgbe_setup_hw_rsc
- *
- * Initialize Hardware RSC (LRO) feature on 82599
- * for an RX ring, this is toggled by the LRO capability
- * even though it is transparent to the stack.
- *
- * NOTE: Since this HW feature only works with IPv4 and
- * testing has shown soft LRO to be as effective,
- * this feature will be disabled by default.
- ************************************************************************/
-static void
-ixgbe_setup_hw_rsc(struct rx_ring *rxr)
-{
- struct adapter *adapter = rxr->adapter;
- struct ixgbe_hw *hw = &adapter->hw;
- u32 rscctrl, rdrxctl;
-
- /* If turning LRO/RSC off we need to disable it */
- if ((adapter->ifp->if_capenable & IFCAP_LRO) == 0) {
- rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxr->me));
- rscctrl &= ~IXGBE_RSCCTL_RSCEN;
- return;
- }
-
- rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
- rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
-#ifdef DEV_NETMAP
- /* Always strip CRC unless Netmap disabled it */
- if (!(adapter->feat_en & IXGBE_FEATURE_NETMAP) ||
- !(adapter->ifp->if_capenable & IFCAP_NETMAP) ||
- ix_crcstrip)
-#endif /* DEV_NETMAP */
- rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
- rdrxctl |= IXGBE_RDRXCTL_RSCACKC;
- IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
-
- rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxr->me));
- rscctrl |= IXGBE_RSCCTL_RSCEN;
- /*
- * Limit the total number of descriptors that
- * can be combined, so it does not exceed 64K
- */
- if (rxr->mbuf_sz == MCLBYTES)
- rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
- else if (rxr->mbuf_sz == MJUMPAGESIZE)
- rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
- else if (rxr->mbuf_sz == MJUM9BYTES)
- rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
- else /* Using 16K cluster */
- rscctrl |= IXGBE_RSCCTL_MAXDESC_1;
-
- IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(rxr->me), rscctrl);
-
- /* Enable TCP header recognition */
- IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0),
- (IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0)) | IXGBE_PSRTYPE_TCPHDR));
+ olinfo_status |= IXGBE_ADVTXD_CC;
+ for (j = 0; j < nsegs; j++) {
+ bus_size_t seglen;
- /* Disable RSC for ACK packets */
- IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
- (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
+ txd = &txr->tx_base[i];
+ seglen = segs[j].ds_len;
- rxr->hw_rsc = TRUE;
-} /* ixgbe_setup_hw_rsc */
+ txd->read.buffer_addr = htole64(segs[j].ds_addr);
+ txd->read.cmd_type_len = htole32(cmd | seglen);
+ txd->read.olinfo_status = htole32(olinfo_status);
-/************************************************************************
- * ixgbe_refresh_mbufs
- *
- * Refresh mbuf buffers for RX descriptor rings
- * - now keeps its own state so discards due to resource
- * exhaustion are unnecessary, if an mbuf cannot be obtained
- * it just returns, keeping its placeholder, thus it can simply
- * be recalled to try again.
- ************************************************************************/
-static void
-ixgbe_refresh_mbufs(struct rx_ring *rxr, int limit)
-{
- struct adapter *adapter = rxr->adapter;
- struct ixgbe_rx_buf *rxbuf;
- struct mbuf *mp;
- bus_dma_segment_t seg[1];
- int i, j, nsegs, error;
- bool refreshed = FALSE;
-
- i = j = rxr->next_to_refresh;
- /* Control the loop with one beyond */
- if (++j == rxr->num_desc)
- j = 0;
-
- while (j != limit) {
- rxbuf = &rxr->rx_buffers[i];
- if (rxbuf->buf == NULL) {
- mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
- rxr->mbuf_sz);
- if (mp == NULL)
- goto update;
- if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN))
- m_adj(mp, ETHER_ALIGN);
- } else
- mp = rxbuf->buf;
-
- mp->m_pkthdr.len = mp->m_len = rxr->mbuf_sz;
-
- /* If we're dealing with an mbuf that was copied rather
- * than replaced, there's no need to go through busdma.
- */
- if ((rxbuf->flags & IXGBE_RX_COPY) == 0) {
- /* Get the memory mapping */
- bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
- error = bus_dmamap_load_mbuf_sg(rxr->ptag, rxbuf->pmap,
- mp, seg, &nsegs, BUS_DMA_NOWAIT);
- if (error != 0) {
- printf("Refresh mbufs: payload dmamap load failure - %d\n", error);
- m_free(mp);
- rxbuf->buf = NULL;
- goto update;
- }
- rxbuf->buf = mp;
- bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
- BUS_DMASYNC_PREREAD);
- rxbuf->addr = rxr->rx_base[i].read.pkt_addr =
- htole64(seg[0].ds_addr);
- } else {
- rxr->rx_base[i].read.pkt_addr = rxbuf->addr;
- rxbuf->flags &= ~IXGBE_RX_COPY;
+ pidx_last = i;
+ if (++i == scctx->isc_ntxd[0]) {
+ i = 0;
}
-
- refreshed = TRUE;
- /* Next is precalculated */
- i = j;
- rxr->next_to_refresh = i;
- if (++j == rxr->num_desc)
- j = 0;
}
-update:
- if (refreshed) /* Update hardware tail index */
- IXGBE_WRITE_REG(&adapter->hw, rxr->tail, rxr->next_to_refresh);
-
- return;
-} /* ixgbe_refresh_mbufs */
-
-/************************************************************************
- * ixgbe_allocate_receive_buffers
- *
- * Allocate memory for rx_buffer structures. Since we use one
- * rx_buffer per received packet, the maximum number of rx_buffer's
- * that we'll need is equal to the number of receive descriptors
- * that we've allocated.
- ************************************************************************/
-static int
-ixgbe_allocate_receive_buffers(struct rx_ring *rxr)
-{
- struct adapter *adapter = rxr->adapter;
- device_t dev = adapter->dev;
- struct ixgbe_rx_buf *rxbuf;
- int bsize, error;
-
- bsize = sizeof(struct ixgbe_rx_buf) * rxr->num_desc;
- rxr->rx_buffers = (struct ixgbe_rx_buf *)malloc(bsize, M_DEVBUF,
- M_NOWAIT | M_ZERO);
- if (rxr->rx_buffers == NULL) {
- device_printf(dev, "Unable to allocate rx_buffer memory\n");
- error = ENOMEM;
- goto fail;
+ if (flags) {
+ txr->tx_rsq[txr->tx_rs_pidx] = pidx_last;
+ txr->tx_rs_pidx = (txr->tx_rs_pidx + 1) & (ntxd - 1);
}
+ txd->read.cmd_type_len |= htole32(IXGBE_TXD_CMD_EOP | flags);
- error = bus_dma_tag_create(
- /* parent */ bus_get_dma_tag(dev),
- /* alignment */ 1,
- /* bounds */ 0,
- /* lowaddr */ BUS_SPACE_MAXADDR,
- /* highaddr */ BUS_SPACE_MAXADDR,
- /* filter */ NULL,
- /* filterarg */ NULL,
- /* maxsize */ MJUM16BYTES,
- /* nsegments */ 1,
- /* maxsegsize */ MJUM16BYTES,
- /* flags */ 0,
- /* lockfunc */ NULL,
- /* lockfuncarg */ NULL,
- &rxr->ptag);
- if (error != 0) {
- device_printf(dev, "Unable to create RX DMA tag\n");
- goto fail;
- }
+ txr->bytes += pi->ipi_len;
+ pi->ipi_new_pidx = i;
- for (int i = 0; i < rxr->num_desc; i++, rxbuf++) {
- rxbuf = &rxr->rx_buffers[i];
- error = bus_dmamap_create(rxr->ptag, 0, &rxbuf->pmap);
- if (error) {
- device_printf(dev, "Unable to create RX dma map\n");
- goto fail;
- }
- }
+ ++txr->total_packets;
return (0);
-
-fail:
- /* Frees all, but can handle partial completion */
- ixgbe_free_receive_structures(adapter);
-
- return (error);
-} /* ixgbe_allocate_receive_buffers */
+} /* ixgbe_isc_txd_encap */
/************************************************************************
- * ixgbe_free_receive_ring
+ * ixgbe_isc_txd_flush
************************************************************************/
static void
-ixgbe_free_receive_ring(struct rx_ring *rxr)
+ixgbe_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx)
{
- for (int i = 0; i < rxr->num_desc; i++) {
- ixgbe_rx_discard(rxr, i);
- }
-} /* ixgbe_free_receive_ring */
+ struct adapter *sc = arg;
+ struct ix_tx_queue *que = &sc->tx_queues[txqid];
+ struct tx_ring *txr = &que->txr;
+
+ IXGBE_WRITE_REG(&sc->hw, txr->tail, pidx);
+} /* ixgbe_isc_txd_flush */
/************************************************************************
- * ixgbe_setup_receive_ring
- *
- * Initialize a receive ring and its buffers.
+ * ixgbe_isc_txd_credits_update
************************************************************************/
static int
-ixgbe_setup_receive_ring(struct rx_ring *rxr)
-{
- struct adapter *adapter;
- struct ifnet *ifp;
- device_t dev;
- struct ixgbe_rx_buf *rxbuf;
- struct lro_ctrl *lro = &rxr->lro;
-#ifdef DEV_NETMAP
- struct netmap_adapter *na = NA(rxr->adapter->ifp);
- struct netmap_slot *slot;
-#endif /* DEV_NETMAP */
- bus_dma_segment_t seg[1];
- int rsize, nsegs, error = 0;
-
- adapter = rxr->adapter;
- ifp = adapter->ifp;
- dev = adapter->dev;
-
- /* Clear the ring contents */
- IXGBE_RX_LOCK(rxr);
-
-#ifdef DEV_NETMAP
- if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
- slot = netmap_reset(na, NR_RX, rxr->me, 0);
-#endif /* DEV_NETMAP */
-
- rsize = roundup2(adapter->num_rx_desc *
- sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
- bzero((void *)rxr->rx_base, rsize);
- /* Cache the size */
- rxr->mbuf_sz = adapter->rx_mbuf_sz;
-
- /* Free current RX buffer structs and their mbufs */
- ixgbe_free_receive_ring(rxr);
-
- /* Now replenish the mbufs */
- for (int j = 0; j != rxr->num_desc; ++j) {
- struct mbuf *mp;
-
- rxbuf = &rxr->rx_buffers[j];
-
-#ifdef DEV_NETMAP
- /*
- * In netmap mode, fill the map and set the buffer
- * address in the NIC ring, considering the offset
- * between the netmap and NIC rings (see comment in
- * ixgbe_setup_transmit_ring() ). No need to allocate
- * an mbuf, so end the block with a continue;
- */
- if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) && slot) {
- int sj = netmap_idx_n2k(&na->rx_rings[rxr->me], j);
- uint64_t paddr;
- void *addr;
-
- addr = PNMB(na, slot + sj, &paddr);
- netmap_load_map(na, rxr->ptag, rxbuf->pmap, addr);
- /* Update descriptor and the cached value */
- rxr->rx_base[j].read.pkt_addr = htole64(paddr);
- rxbuf->addr = htole64(paddr);
- continue;
- }
-#endif /* DEV_NETMAP */
-
- rxbuf->flags = 0;
- rxbuf->buf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
- adapter->rx_mbuf_sz);
- if (rxbuf->buf == NULL) {
- error = ENOBUFS;
- goto fail;
- }
- mp = rxbuf->buf;
- mp->m_pkthdr.len = mp->m_len = rxr->mbuf_sz;
- /* Get the memory mapping */
- error = bus_dmamap_load_mbuf_sg(rxr->ptag, rxbuf->pmap, mp, seg,
- &nsegs, BUS_DMA_NOWAIT);
- if (error != 0)
- goto fail;
- bus_dmamap_sync(rxr->ptag, rxbuf->pmap, BUS_DMASYNC_PREREAD);
- /* Update the descriptor and the cached value */
- rxr->rx_base[j].read.pkt_addr = htole64(seg[0].ds_addr);
- rxbuf->addr = htole64(seg[0].ds_addr);
- }
-
+ixgbe_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear)
+{
+ struct adapter *sc = arg;
+ if_softc_ctx_t scctx = sc->shared;
+ struct ix_tx_queue *que = &sc->tx_queues[txqid];
+ struct tx_ring *txr = &que->txr;
+ qidx_t processed = 0;
+ int updated;
+ qidx_t cur, prev, ntxd, rs_cidx;
+ int32_t delta;
+ uint8_t status;
+
+ rs_cidx = txr->tx_rs_cidx;
+ if (rs_cidx == txr->tx_rs_pidx)
+ return (0);
- /* Setup our descriptor indices */
- rxr->next_to_check = 0;
- rxr->next_to_refresh = 0;
- rxr->lro_enabled = FALSE;
- rxr->rx_copies = 0;
- rxr->rx_bytes = 0;
- rxr->vtag_strip = FALSE;
+ cur = txr->tx_rsq[rs_cidx];
+ status = txr->tx_base[cur].wb.status;
+ updated = !!(status & IXGBE_TXD_STAT_DD);
- bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
- BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+ if (clear == false || updated == 0)
+ return (updated);
- /*
- * Now set up the LRO interface
- */
- if (ixgbe_rsc_enable)
- ixgbe_setup_hw_rsc(rxr);
- else if (ifp->if_capenable & IFCAP_LRO) {
- int err = tcp_lro_init(lro);
- if (err) {
- device_printf(dev, "LRO Initialization failed!\n");
- goto fail;
- }
- INIT_DEBUGOUT("RX Soft LRO Initialized\n");
- rxr->lro_enabled = TRUE;
- lro->ifp = adapter->ifp;
- }
-
- IXGBE_RX_UNLOCK(rxr);
+ prev = txr->tx_cidx_processed;
+ ntxd = scctx->isc_ntxd[0];
+ do {
+ delta = (int32_t)cur - (int32_t)prev;
+ if (delta < 0)
+ delta += ntxd;
+
+ processed += delta;
+ prev = cur;
+ rs_cidx = (rs_cidx + 1) & (ntxd - 1);
+ if (rs_cidx == txr->tx_rs_pidx)
+ break;
- return (0);
+ cur = txr->tx_rsq[rs_cidx];
+ status = txr->tx_base[cur].wb.status;
+ } while ((status & IXGBE_TXD_STAT_DD));
-fail:
- ixgbe_free_receive_ring(rxr);
- IXGBE_RX_UNLOCK(rxr);
+ txr->tx_rs_cidx = rs_cidx;
+ txr->tx_cidx_processed = prev;
- return (error);
-} /* ixgbe_setup_receive_ring */
+ return (processed);
+} /* ixgbe_isc_txd_credits_update */
/************************************************************************
- * ixgbe_setup_receive_structures - Initialize all receive rings.
+ * ixgbe_isc_rxd_refill
************************************************************************/
-int
-ixgbe_setup_receive_structures(struct adapter *adapter)
-{
- struct rx_ring *rxr = adapter->rx_rings;
- int j;
-
- for (j = 0; j < adapter->num_queues; j++, rxr++)
- if (ixgbe_setup_receive_ring(rxr))
- goto fail;
-
- return (0);
-fail:
- /*
- * Free RX buffers allocated so far, we will only handle
- * the rings that completed, the failing case will have
- * cleaned up for itself. 'j' failed, so its the terminus.
- */
- for (int i = 0; i < j; ++i) {
- rxr = &adapter->rx_rings[i];
- IXGBE_RX_LOCK(rxr);
- ixgbe_free_receive_ring(rxr);
- IXGBE_RX_UNLOCK(rxr);
- }
-
- return (ENOBUFS);
-} /* ixgbe_setup_receive_structures */
-
-
-/************************************************************************
- * ixgbe_free_receive_structures - Free all receive rings.
- ************************************************************************/
-void
-ixgbe_free_receive_structures(struct adapter *adapter)
+static void
+ixgbe_isc_rxd_refill(void *arg, if_rxd_update_t iru)
{
- struct rx_ring *rxr = adapter->rx_rings;
+ struct adapter *sc = arg;
+ struct ix_rx_queue *que = &sc->rx_queues[iru->iru_qsidx];
+ struct rx_ring *rxr = &que->rxr;
+ uint64_t *paddrs;
+ int i;
+ uint32_t next_pidx, pidx;
+ uint16_t count;
- INIT_DEBUGOUT("ixgbe_free_receive_structures: begin");
+ paddrs = iru->iru_paddrs;
+ pidx = iru->iru_pidx;
+ count = iru->iru_count;
- for (int i = 0; i < adapter->num_queues; i++, rxr++) {
- ixgbe_free_receive_buffers(rxr);
- /* Free LRO memory */
- tcp_lro_free(&rxr->lro);
- /* Free the ring memory as well */
- ixgbe_dma_free(adapter, &rxr->rxdma);
+ for (i = 0, next_pidx = pidx; i < count; i++) {
+ rxr->rx_base[next_pidx].read.pkt_addr = htole64(paddrs[i]);
+ if (++next_pidx == sc->shared->isc_nrxd[0])
+ next_pidx = 0;
}
-
- free(adapter->rx_rings, M_DEVBUF);
-} /* ixgbe_free_receive_structures */
-
+} /* ixgbe_isc_rxd_refill */
/************************************************************************
- * ixgbe_free_receive_buffers - Free receive ring data structures
+ * ixgbe_isc_rxd_flush
************************************************************************/
static void
-ixgbe_free_receive_buffers(struct rx_ring *rxr)
+ixgbe_isc_rxd_flush(void *arg, uint16_t qsidx, uint8_t flidx __unused, qidx_t pidx)
{
- struct adapter *adapter = rxr->adapter;
- struct ixgbe_rx_buf *rxbuf;
-
- INIT_DEBUGOUT("ixgbe_free_receive_buffers: begin");
-
- /* Cleanup any existing buffers */
- if (rxr->rx_buffers != NULL) {
- for (int i = 0; i < adapter->num_rx_desc; i++) {
- rxbuf = &rxr->rx_buffers[i];
- ixgbe_rx_discard(rxr, i);
- if (rxbuf->pmap != NULL) {
- bus_dmamap_destroy(rxr->ptag, rxbuf->pmap);
- rxbuf->pmap = NULL;
- }
- }
- if (rxr->rx_buffers != NULL) {
- free(rxr->rx_buffers, M_DEVBUF);
- rxr->rx_buffers = NULL;
- }
- }
-
- if (rxr->ptag != NULL) {
- bus_dma_tag_destroy(rxr->ptag);
- rxr->ptag = NULL;
- }
+ struct adapter *sc = arg;
+ struct ix_rx_queue *que = &sc->rx_queues[qsidx];
+ struct rx_ring *rxr = &que->rxr;
- return;
-} /* ixgbe_free_receive_buffers */
-
-/************************************************************************
- * ixgbe_rx_input
- ************************************************************************/
-static __inline void
-ixgbe_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m,
- u32 ptype)
-{
- /*
- * ATM LRO is only for IP/TCP packets and TCP checksum of the packet
- * should be computed by hardware. Also it should not have VLAN tag in
- * ethernet header. In case of IPv6 we do not yet support ext. hdrs.
- */
- if (rxr->lro_enabled &&
- (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
- (ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
- ((ptype & (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
- (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP) ||
- (ptype & (IXGBE_RXDADV_PKTTYPE_IPV6 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
- (IXGBE_RXDADV_PKTTYPE_IPV6 | IXGBE_RXDADV_PKTTYPE_TCP)) &&
- (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
- (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
- /*
- * Send to the stack if:
- * - LRO not enabled, or
- * - no LRO resources, or
- * - lro enqueue fails
- */
- if (rxr->lro.lro_cnt != 0)
- if (tcp_lro_rx(&rxr->lro, m, 0) == 0)
- return;
- }
- (*ifp->if_input)(ifp, m);
-} /* ixgbe_rx_input */
+ IXGBE_WRITE_REG(&sc->hw, rxr->tail, pidx);
+} /* ixgbe_isc_rxd_flush */
/************************************************************************
- * ixgbe_rx_discard
+ * ixgbe_isc_rxd_available
************************************************************************/
-static __inline void
-ixgbe_rx_discard(struct rx_ring *rxr, int i)
+static int
+ixgbe_isc_rxd_available(void *arg, uint16_t qsidx, qidx_t pidx, qidx_t budget)
{
- struct ixgbe_rx_buf *rbuf;
-
- rbuf = &rxr->rx_buffers[i];
+ struct adapter *sc = arg;
+ struct ix_rx_queue *que = &sc->rx_queues[qsidx];
+ struct rx_ring *rxr = &que->rxr;
+ union ixgbe_adv_rx_desc *rxd;
+ u32 staterr;
+ int cnt, i, nrxd;
- /*
- * With advanced descriptors the writeback
- * clobbers the buffer addrs, so its easier
- * to just free the existing mbufs and take
- * the normal refresh path to get new buffers
- * and mapping.
- */
+ if (budget == 1) {
+ rxd = &rxr->rx_base[pidx];
+ staterr = le32toh(rxd->wb.upper.status_error);
- if (rbuf->fmp != NULL) {/* Partial chain ? */
- bus_dmamap_sync(rxr->ptag, rbuf->pmap, BUS_DMASYNC_POSTREAD);
- m_freem(rbuf->fmp);
- rbuf->fmp = NULL;
- rbuf->buf = NULL; /* rbuf->buf is part of fmp's chain */
- } else if (rbuf->buf) {
- bus_dmamap_sync(rxr->ptag, rbuf->pmap, BUS_DMASYNC_POSTREAD);
- m_free(rbuf->buf);
- rbuf->buf = NULL;
+ return (staterr & IXGBE_RXD_STAT_DD);
}
- bus_dmamap_unload(rxr->ptag, rbuf->pmap);
- rbuf->flags = 0;
+ nrxd = sc->shared->isc_nrxd[0];
+ // em has cnt < nrxd. off by 1 here or there?
+// for (cnt = 0, i = pidx; cnt < nrxd && cnt <= budget;) {
+ for (cnt = 0, i = pidx; cnt < nrxd-1 && cnt <= budget;) {
+ rxd = &rxr->rx_base[i];
+ staterr = le32toh(rxd->wb.upper.status_error);
- return;
-} /* ixgbe_rx_discard */
+ if ((staterr & IXGBE_RXD_STAT_DD) == 0)
+ break;
+ if (++i == nrxd)
+ i = 0;
+ if (staterr & IXGBE_RXD_STAT_EOP)
+ cnt++;
+ }
+ return (cnt);
+} /* ixgbe_isc_rxd_available */
/************************************************************************
- * ixgbe_rxeof
+ * ixgbe_isc_rxd_pkt_get
*
- * Executes in interrupt context. It replenishes the
- * mbufs in the descriptor and sends data which has
- * been dma'ed into host memory to upper layer.
+ * Routine sends data which has been dma'ed into host memory
+ * to upper layer. Initialize ri structure.
*
- * Return TRUE for more work, FALSE for all clean.
+ * Returns 0 upon success, errno on failure
************************************************************************/
-bool
-ixgbe_rxeof(struct ix_queue *que)
-{
- struct adapter *adapter = que->adapter;
- struct rx_ring *rxr = que->rxr;
- struct ifnet *ifp = adapter->ifp;
- struct lro_ctrl *lro = &rxr->lro;
- union ixgbe_adv_rx_desc *cur;
- struct ixgbe_rx_buf *rbuf, *nbuf;
- int i, nextp, processed = 0;
- u32 staterr = 0;
- u32 count = adapter->rx_process_limit;
- u16 pkt_info;
-
- IXGBE_RX_LOCK(rxr);
-
-#ifdef DEV_NETMAP
- if (adapter->feat_en & IXGBE_FEATURE_NETMAP) {
- /* Same as the txeof routine: wakeup clients on intr. */
- if (netmap_rx_irq(ifp, rxr->me, &processed)) {
- IXGBE_RX_UNLOCK(rxr);
- return (FALSE);
- }
- }
-#endif /* DEV_NETMAP */
- for (i = rxr->next_to_check; count != 0;) {
- struct mbuf *sendmp, *mp;
- u32 rsc, ptype;
- u16 len;
- u16 vtag = 0;
- bool eop;
+static int
+ixgbe_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri)
+{
+ struct adapter *adapter = arg;
+ struct ix_rx_queue *que = &adapter->rx_queues[ri->iri_qsidx];
+ struct rx_ring *rxr = &que->rxr;
+ struct ifnet *ifp = iflib_get_ifp(adapter->ctx);
+ union ixgbe_adv_rx_desc *rxd;
+
+ u16 pkt_info, len, cidx, i;
+ u16 vtag = 0;
+ u32 ptype;
+ u32 staterr = 0;
+ bool eop;
+
+ i = 0;
+ cidx = ri->iri_cidx;
+ do {
+ rxd = &rxr->rx_base[cidx];
+ staterr = le32toh(rxd->wb.upper.status_error);
+ pkt_info = le16toh(rxd->wb.lower.lo_dword.hs_rss.pkt_info);
- /* Sync the ring. */
- bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
- BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+ /* Error Checking then decrement count */
+ MPASS ((staterr & IXGBE_RXD_STAT_DD) != 0);
- cur = &rxr->rx_base[i];
- staterr = le32toh(cur->wb.upper.status_error);
- pkt_info = le16toh(cur->wb.lower.lo_dword.hs_rss.pkt_info);
+ len = le16toh(rxd->wb.upper.length);
+ ptype = le32toh(rxd->wb.lower.lo_dword.data) &
+ IXGBE_RXDADV_PKTTYPE_MASK;
- if ((staterr & IXGBE_RXD_STAT_DD) == 0)
- break;
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
- break;
+ ri->iri_len += len;
+ rxr->bytes += len;
- count--;
- sendmp = NULL;
- nbuf = NULL;
- rsc = 0;
- cur->wb.upper.status_error = 0;
- rbuf = &rxr->rx_buffers[i];
- mp = rbuf->buf;
-
- len = le16toh(cur->wb.upper.length);
- ptype = le32toh(cur->wb.lower.lo_dword.data) &
- IXGBE_RXDADV_PKTTYPE_MASK;
+ rxd->wb.upper.status_error = 0;
eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
+ if (staterr & IXGBE_RXD_STAT_VP) {
+ vtag = le16toh(rxd->wb.upper.vlan);
+ } else {
+ vtag = 0;
+ }
/* Make sure bad packets are discarded */
if (eop && (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) {
+
#if __FreeBSD_version >= 1100036
if (adapter->feat_en & IXGBE_FEATURE_VF)
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
#endif
- rxr->rx_discarded++;
- ixgbe_rx_discard(rxr, i);
- goto next_desc;
- }
-
- bus_dmamap_sync(rxr->ptag, rbuf->pmap, BUS_DMASYNC_POSTREAD);
-
- /*
- * On 82599 which supports a hardware
- * LRO (called HW RSC), packets need
- * not be fragmented across sequential
- * descriptors, rather the next descriptor
- * is indicated in bits of the descriptor.
- * This also means that we might proceses
- * more than one packet at a time, something
- * that has never been true before, it
- * required eliminating global chain pointers
- * in favor of what we are doing here. -jfv
- */
- if (!eop) {
- /*
- * Figure out the next descriptor
- * of this frame.
- */
- if (rxr->hw_rsc == TRUE) {
- rsc = ixgbe_rsc_count(cur);
- rxr->rsc_num += (rsc - 1);
- }
- if (rsc) { /* Get hardware index */
- nextp = ((staterr & IXGBE_RXDADV_NEXTP_MASK) >>
- IXGBE_RXDADV_NEXTP_SHIFT);
- } else { /* Just sequential */
- nextp = i + 1;
- if (nextp == adapter->num_rx_desc)
- nextp = 0;
- }
- nbuf = &rxr->rx_buffers[nextp];
- prefetch(nbuf);
- }
- /*
- * Rather than using the fmp/lmp global pointers
- * we now keep the head of a packet chain in the
- * buffer struct and pass this along from one
- * descriptor to the next, until we get EOP.
- */
- mp->m_len = len;
- /*
- * See if there is a stored head
- * that determines what we are
- */
- sendmp = rbuf->fmp;
- if (sendmp != NULL) { /* secondary frag */
- rbuf->buf = rbuf->fmp = NULL;
- mp->m_flags &= ~M_PKTHDR;
- sendmp->m_pkthdr.len += mp->m_len;
- } else {
- /*
- * Optimize. This might be a small packet,
- * maybe just a TCP ACK. Do a fast copy that
- * is cache aligned into a new mbuf, and
- * leave the old mbuf+cluster for re-use.
- */
- if (eop && len <= IXGBE_RX_COPY_LEN) {
- sendmp = m_gethdr(M_NOWAIT, MT_DATA);
- if (sendmp != NULL) {
- sendmp->m_data += IXGBE_RX_COPY_ALIGN;
- ixgbe_bcopy(mp->m_data, sendmp->m_data,
- len);
- sendmp->m_len = len;
- rxr->rx_copies++;
- rbuf->flags |= IXGBE_RX_COPY;
- }
- }
- if (sendmp == NULL) {
- rbuf->buf = rbuf->fmp = NULL;
- sendmp = mp;
- }
-
- /* first desc of a non-ps chain */
- sendmp->m_flags |= M_PKTHDR;
- sendmp->m_pkthdr.len = mp->m_len;
- }
- ++processed;
-
- /* Pass the head pointer on */
- if (eop == 0) {
- nbuf->fmp = sendmp;
- sendmp = NULL;
- mp->m_next = nbuf->buf;
- } else { /* Sending this frame */
- sendmp->m_pkthdr.rcvif = ifp;
- rxr->rx_packets++;
- /* capture data for AIM */
- rxr->bytes += sendmp->m_pkthdr.len;
- rxr->rx_bytes += sendmp->m_pkthdr.len;
- /* Process vlan info */
- if ((rxr->vtag_strip) && (staterr & IXGBE_RXD_STAT_VP))
- vtag = le16toh(cur->wb.upper.vlan);
- if (vtag) {
- sendmp->m_pkthdr.ether_vtag = vtag;
- sendmp->m_flags |= M_VLANTAG;
- }
- if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
- ixgbe_rx_checksum(staterr, sendmp, ptype);
-
- /*
- * In case of multiqueue, we have RXCSUM.PCSD bit set
- * and never cleared. This means we have RSS hash
- * available to be used.
- */
- if (adapter->num_queues > 1) {
- sendmp->m_pkthdr.flowid =
- le32toh(cur->wb.lower.hi_dword.rss);
- switch (pkt_info & IXGBE_RXDADV_RSSTYPE_MASK) {
- case IXGBE_RXDADV_RSSTYPE_IPV4:
- M_HASHTYPE_SET(sendmp,
- M_HASHTYPE_RSS_IPV4);
- break;
- case IXGBE_RXDADV_RSSTYPE_IPV4_TCP:
- M_HASHTYPE_SET(sendmp,
- M_HASHTYPE_RSS_TCP_IPV4);
- break;
- case IXGBE_RXDADV_RSSTYPE_IPV6:
- M_HASHTYPE_SET(sendmp,
- M_HASHTYPE_RSS_IPV6);
- break;
- case IXGBE_RXDADV_RSSTYPE_IPV6_TCP:
- M_HASHTYPE_SET(sendmp,
- M_HASHTYPE_RSS_TCP_IPV6);
- break;
- case IXGBE_RXDADV_RSSTYPE_IPV6_EX:
- M_HASHTYPE_SET(sendmp,
- M_HASHTYPE_RSS_IPV6_EX);
- break;
- case IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX:
- M_HASHTYPE_SET(sendmp,
- M_HASHTYPE_RSS_TCP_IPV6_EX);
- break;
-#if __FreeBSD_version > 1100000
- case IXGBE_RXDADV_RSSTYPE_IPV4_UDP:
- M_HASHTYPE_SET(sendmp,
- M_HASHTYPE_RSS_UDP_IPV4);
- break;
- case IXGBE_RXDADV_RSSTYPE_IPV6_UDP:
- M_HASHTYPE_SET(sendmp,
- M_HASHTYPE_RSS_UDP_IPV6);
- break;
- case IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX:
- M_HASHTYPE_SET(sendmp,
- M_HASHTYPE_RSS_UDP_IPV6_EX);
- break;
-#endif
- default:
- M_HASHTYPE_SET(sendmp,
- M_HASHTYPE_OPAQUE_HASH);
- }
- } else {
- sendmp->m_pkthdr.flowid = que->msix;
- M_HASHTYPE_SET(sendmp, M_HASHTYPE_OPAQUE);
- }
- }
-next_desc:
- bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
- BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
-
- /* Advance our pointers to the next descriptor. */
- if (++i == rxr->num_desc)
- i = 0;
-
- /* Now send to the stack or do LRO */
- if (sendmp != NULL) {
- rxr->next_to_check = i;
- IXGBE_RX_UNLOCK(rxr);
- ixgbe_rx_input(rxr, ifp, sendmp, ptype);
- IXGBE_RX_LOCK(rxr);
- i = rxr->next_to_check;
- }
-
- /* Every 8 descriptors we go to refresh mbufs */
- if (processed == 8) {
- ixgbe_refresh_mbufs(rxr, i);
- processed = 0;
- }
- }
-
- /* Refresh any remaining buf structs */
- if (ixgbe_rx_unrefreshed(rxr))
- ixgbe_refresh_mbufs(rxr, i);
-
- rxr->next_to_check = i;
-
- IXGBE_RX_UNLOCK(rxr);
-
- /*
- * Flush any outstanding LRO work
- */
- tcp_lro_flush_all(lro);
-
- /*
- * Still have cleaning to do?
- */
- if ((staterr & IXGBE_RXD_STAT_DD) != 0)
- return (TRUE);
-
- return (FALSE);
-} /* ixgbe_rxeof */
+ rxr->rx_discarded++;
+ return (EBADMSG);
+ }
+ ri->iri_frags[i].irf_flid = 0;
+ ri->iri_frags[i].irf_idx = cidx;
+ ri->iri_frags[i].irf_len = len;
+ if (++cidx == adapter->shared->isc_nrxd[0])
+ cidx = 0;
+ i++;
+ /* even a 16K packet shouldn't consume more than 8 clusters */
+ MPASS(i < 9);
+ } while (!eop);
+
+ rxr->rx_packets++;
+ rxr->packets++;
+ rxr->rx_bytes += ri->iri_len;
+
+ if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
+ ixgbe_rx_checksum(staterr, ri, ptype);
+
+ ri->iri_flowid = le32toh(rxd->wb.lower.hi_dword.rss);
+ ri->iri_rsstype = ixgbe_determine_rsstype(pkt_info);
+ ri->iri_vtag = vtag;
+ ri->iri_nfrags = i;
+ if (vtag)
+ ri->iri_flags |= M_VLANTAG;
+ return (0);
+} /* ixgbe_isc_rxd_pkt_get */
/************************************************************************
* ixgbe_rx_checksum
@@ -1922,7 +479,7 @@
* doesn't spend time verifying the checksum.
************************************************************************/
static void
-ixgbe_rx_checksum(u32 staterr, struct mbuf * mp, u32 ptype)
+ixgbe_rx_checksum(u32 staterr, if_rxd_info_t ri, u32 ptype)
{
u16 status = (u16)staterr;
u8 errors = (u8)(staterr >> 24);
@@ -1930,270 +487,59 @@
if ((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
(ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0)
- sctp = true;
+ sctp = TRUE;
/* IPv4 checksum */
if (status & IXGBE_RXD_STAT_IPCS) {
- mp->m_pkthdr.csum_flags |= CSUM_L3_CALC;
- /* IP Checksum Good */
- if (!(errors & IXGBE_RXD_ERR_IPE))
- mp->m_pkthdr.csum_flags |= CSUM_L3_VALID;
+ if (!(errors & IXGBE_RXD_ERR_IPE)) {
+ /* IP Checksum Good */
+ ri->iri_csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID;
+ } else
+ ri->iri_csum_flags = 0;
}
/* TCP/UDP/SCTP checksum */
if (status & IXGBE_RXD_STAT_L4CS) {
- mp->m_pkthdr.csum_flags |= CSUM_L4_CALC;
+ u64 type = (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
+#if __FreeBSD_version >= 800000
+ if (sctp)
+ type = CSUM_SCTP_VALID;
+#endif
if (!(errors & IXGBE_RXD_ERR_TCPE)) {
- mp->m_pkthdr.csum_flags |= CSUM_L4_VALID;
+ ri->iri_csum_flags |= type;
if (!sctp)
- mp->m_pkthdr.csum_data = htons(0xffff);
+ ri->iri_csum_data = htons(0xffff);
}
}
} /* ixgbe_rx_checksum */
/************************************************************************
- * ixgbe_dmamap_cb - Manage DMA'able memory.
- ************************************************************************/
-static void
-ixgbe_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
-{
- if (error)
- return;
- *(bus_addr_t *)arg = segs->ds_addr;
-
- return;
-} /* ixgbe_dmamap_cb */
-
-/************************************************************************
- * ixgbe_dma_malloc
- ************************************************************************/
-static int
-ixgbe_dma_malloc(struct adapter *adapter, bus_size_t size,
- struct ixgbe_dma_alloc *dma, int mapflags)
-{
- device_t dev = adapter->dev;
- int r;
-
- r = bus_dma_tag_create(
- /* parent */ bus_get_dma_tag(adapter->dev),
- /* alignment */ DBA_ALIGN,
- /* bounds */ 0,
- /* lowaddr */ BUS_SPACE_MAXADDR,
- /* highaddr */ BUS_SPACE_MAXADDR,
- /* filter */ NULL,
- /* filterarg */ NULL,
- /* maxsize */ size,
- /* nsegments */ 1,
- /* maxsegsize */ size,
- /* flags */ BUS_DMA_ALLOCNOW,
- /* lockfunc */ NULL,
- /* lockfuncarg */ NULL,
- &dma->dma_tag);
- if (r != 0) {
- device_printf(dev,
- "ixgbe_dma_malloc: bus_dma_tag_create failed; error %u\n",
- r);
- goto fail_0;
- }
- r = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr,
- BUS_DMA_NOWAIT, &dma->dma_map);
- if (r != 0) {
- device_printf(dev,
- "ixgbe_dma_malloc: bus_dmamem_alloc failed; error %u\n", r);
- goto fail_1;
- }
- r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr, size,
- ixgbe_dmamap_cb, &dma->dma_paddr, mapflags | BUS_DMA_NOWAIT);
- if (r != 0) {
- device_printf(dev,
- "ixgbe_dma_malloc: bus_dmamap_load failed; error %u\n", r);
- goto fail_2;
- }
- dma->dma_size = size;
-
- return (0);
-fail_2:
- bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
-fail_1:
- bus_dma_tag_destroy(dma->dma_tag);
-fail_0:
- dma->dma_tag = NULL;
-
- return (r);
-} /* ixgbe_dma_malloc */
-
-/************************************************************************
- * ixgbe_dma_free
- ************************************************************************/
-static void
-ixgbe_dma_free(struct adapter *adapter, struct ixgbe_dma_alloc *dma)
-{
- bus_dmamap_sync(dma->dma_tag, dma->dma_map,
- BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
- bus_dmamap_unload(dma->dma_tag, dma->dma_map);
- bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
- bus_dma_tag_destroy(dma->dma_tag);
-} /* ixgbe_dma_free */
-
-
-/************************************************************************
- * ixgbe_allocate_queues
+ * ixgbe_determine_rsstype
*
- * Allocate memory for the transmit and receive rings, and then
- * the descriptors associated with each, called only once at attach.
+ * Parse the packet type to determine the appropriate hash
************************************************************************/
-int
-ixgbe_allocate_queues(struct adapter *adapter)
-{
- device_t dev = adapter->dev;
- struct ix_queue *que;
- struct tx_ring *txr;
- struct rx_ring *rxr;
- int rsize, tsize, error = IXGBE_SUCCESS;
- int txconf = 0, rxconf = 0;
-
- /* First, allocate the top level queue structs */
- adapter->queues = (struct ix_queue *)malloc(sizeof(struct ix_queue) *
- adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO);
- if (adapter->queues == NULL) {
- device_printf(dev, "Unable to allocate queue memory\n");
- error = ENOMEM;
- goto fail;
- }
-
- /* Second, allocate the TX ring struct memory */
- adapter->tx_rings = (struct tx_ring *)malloc(sizeof(struct tx_ring) *
- adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO);
- if (adapter->tx_rings == NULL) {
- device_printf(dev, "Unable to allocate TX ring memory\n");
- error = ENOMEM;
- goto tx_fail;
- }
-
- /* Third, allocate the RX ring */
- adapter->rx_rings = (struct rx_ring *)malloc(sizeof(struct rx_ring) *
- adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO);
- if (adapter->rx_rings == NULL) {
- device_printf(dev, "Unable to allocate RX ring memory\n");
- error = ENOMEM;
- goto rx_fail;
- }
-
- /* For the ring itself */
- tsize = roundup2(adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc),
- DBA_ALIGN);
-
- /*
- * Now set up the TX queues, txconf is needed to handle the
- * possibility that things fail midcourse and we need to
- * undo memory gracefully
- */
- for (int i = 0; i < adapter->num_queues; i++, txconf++) {
- /* Set up some basics */
- txr = &adapter->tx_rings[i];
- txr->adapter = adapter;
- txr->br = NULL;
- /* In case SR-IOV is enabled, align the index properly */
- txr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool,
- i);
- txr->num_desc = adapter->num_tx_desc;
-
- /* Initialize the TX side lock */
- snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
- device_get_nameunit(dev), txr->me);
- mtx_init(&txr->tx_mtx, txr->mtx_name, NULL, MTX_DEF);
-
- if (ixgbe_dma_malloc(adapter, tsize, &txr->txdma,
- BUS_DMA_NOWAIT)) {
- device_printf(dev,
- "Unable to allocate TX Descriptor memory\n");
- error = ENOMEM;
- goto err_tx_desc;
- }
- txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
- bzero((void *)txr->tx_base, tsize);
-
- /* Now allocate transmit buffers for the ring */
- if (ixgbe_allocate_transmit_buffers(txr)) {
- device_printf(dev,
- "Critical Failure setting up transmit buffers\n");
- error = ENOMEM;
- goto err_tx_desc;
- }
- if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
- /* Allocate a buf ring */
- txr->br = buf_ring_alloc(IXGBE_BR_SIZE, M_DEVBUF,
- M_WAITOK, &txr->tx_mtx);
- if (txr->br == NULL) {
- device_printf(dev,
- "Critical Failure setting up buf ring\n");
- error = ENOMEM;
- goto err_tx_desc;
- }
- }
- }
-
- /*
- * Next the RX queues...
- */
- rsize = roundup2(adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc),
- DBA_ALIGN);
- for (int i = 0; i < adapter->num_queues; i++, rxconf++) {
- rxr = &adapter->rx_rings[i];
- /* Set up some basics */
- rxr->adapter = adapter;
- /* In case SR-IOV is enabled, align the index properly */
- rxr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool,
- i);
- rxr->num_desc = adapter->num_rx_desc;
-
- /* Initialize the RX side lock */
- snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
- device_get_nameunit(dev), rxr->me);
- mtx_init(&rxr->rx_mtx, rxr->mtx_name, NULL, MTX_DEF);
-
- if (ixgbe_dma_malloc(adapter, rsize, &rxr->rxdma,
- BUS_DMA_NOWAIT)) {
- device_printf(dev,
- "Unable to allocate RxDescriptor memory\n");
- error = ENOMEM;
- goto err_rx_desc;
- }
- rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
- bzero((void *)rxr->rx_base, rsize);
-
- /* Allocate receive buffers for the ring */
- if (ixgbe_allocate_receive_buffers(rxr)) {
- device_printf(dev,
- "Critical Failure setting up receive buffers\n");
- error = ENOMEM;
- goto err_rx_desc;
- }
- }
-
- /*
- * Finally set up the queue holding structs
- */
- for (int i = 0; i < adapter->num_queues; i++) {
- que = &adapter->queues[i];
- que->adapter = adapter;
- que->me = i;
- que->txr = &adapter->tx_rings[i];
- que->rxr = &adapter->rx_rings[i];
+static int
+ixgbe_determine_rsstype(u16 pkt_info)
+{
+ switch (pkt_info & IXGBE_RXDADV_RSSTYPE_MASK) {
+ case IXGBE_RXDADV_RSSTYPE_IPV4_TCP:
+ return M_HASHTYPE_RSS_TCP_IPV4;
+ case IXGBE_RXDADV_RSSTYPE_IPV4:
+ return M_HASHTYPE_RSS_IPV4;
+ case IXGBE_RXDADV_RSSTYPE_IPV6_TCP:
+ return M_HASHTYPE_RSS_TCP_IPV6;
+ case IXGBE_RXDADV_RSSTYPE_IPV6_EX:
+ return M_HASHTYPE_RSS_IPV6_EX;
+ case IXGBE_RXDADV_RSSTYPE_IPV6:
+ return M_HASHTYPE_RSS_IPV6;
+ case IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX:
+ return M_HASHTYPE_RSS_TCP_IPV6_EX;
+ case IXGBE_RXDADV_RSSTYPE_IPV4_UDP:
+ return M_HASHTYPE_RSS_UDP_IPV4;
+ case IXGBE_RXDADV_RSSTYPE_IPV6_UDP:
+ return M_HASHTYPE_RSS_UDP_IPV6;
+ case IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX:
+ return M_HASHTYPE_RSS_UDP_IPV6_EX;
+ default:
+ return M_HASHTYPE_OPAQUE;
}
-
- return (0);
-
-err_rx_desc:
- for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--)
- ixgbe_dma_free(adapter, &rxr->rxdma);
-err_tx_desc:
- for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--)
- ixgbe_dma_free(adapter, &txr->txdma);
- free(adapter->rx_rings, M_DEVBUF);
-rx_fail:
- free(adapter->tx_rings, M_DEVBUF);
-tx_fail:
- free(adapter->queues, M_DEVBUF);
-fail:
- return (error);
-} /* ixgbe_allocate_queues */
+} /* ixgbe_determine_rsstype */
Index: sys/dev/ixgbe/ixgbe.h
===================================================================
--- sys/dev/ixgbe/ixgbe.h
+++ sys/dev/ixgbe/ixgbe.h
@@ -57,20 +57,13 @@
#include <net/if_dl.h>
#include <net/if_media.h>
-#include <net/bpf.h>
#include <net/if_types.h>
#include <net/if_vlan_var.h>
+#include <net/iflib.h>
#include <netinet/in_systm.h>
#include <netinet/in.h>
#include <netinet/if_ether.h>
-#include <netinet/ip.h>
-#include <netinet/ip6.h>
-#include <netinet/tcp.h>
-#include <netinet/tcp_lro.h>
-#include <netinet/udp.h>
-
-#include <machine/in_cksum.h>
#include <sys/bus.h>
#include <machine/bus.h>
@@ -84,7 +77,7 @@
#include <sys/proc.h>
#include <sys/sysctl.h>
#include <sys/endian.h>
-#include <sys/taskqueue.h>
+#include <sys/gtaskqueue.h>
#include <sys/pcpu.h>
#include <sys/smp.h>
#include <machine/smp.h>
@@ -105,7 +98,7 @@
* bytes. Performance tests have show the 2K value to be optimal for top
* performance.
*/
-#define DEFAULT_TXD 1024
+#define DEFAULT_TXD 2048
#define PERFORM_TXD 2048
#define MAX_TXD 4096
#define MIN_TXD 64
@@ -120,7 +113,7 @@
* against the system mbuf pool limit, you can tune nmbclusters
* to adjust for this.
*/
-#define DEFAULT_RXD 1024
+#define DEFAULT_RXD 2048
#define PERFORM_RXD 2048
#define MAX_RXD 4096
#define MIN_RXD 64
@@ -218,6 +211,11 @@
#define CSUM_OFFLOAD (CSUM_IP|CSUM_TCP|CSUM_UDP)
#endif
+#define IXGBE_CAPS (IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 | IFCAP_TSO | \
+ IFCAP_LRO | IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWTSO | \
+ IFCAP_VLAN_HWCSUM | IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU | \
+ IFCAP_HWSTATS | IFCAP_VLAN_HWFILTER | IFCAP_WOL)
+
/* Backward compatibility items for very old versions */
#ifndef pci_find_cap
#define pci_find_cap pci_find_extcap
@@ -240,7 +238,6 @@
IXGBE_EITR_ITR_INT_MASK)
-
/************************************************************************
* vendor_info_array
*
@@ -261,23 +258,8 @@
u32 log;
};
-struct ixgbe_tx_buf {
- union ixgbe_adv_tx_desc *eop;
- struct mbuf *m_head;
- bus_dmamap_t map;
-};
-
-struct ixgbe_rx_buf {
- struct mbuf *buf;
- struct mbuf *fmp;
- bus_dmamap_t pmap;
- u_int flags;
-#define IXGBE_RX_COPY 0x01
- uint64_t addr;
-};
/*
- * Bus dma allocation structure used by ixgbe_dma_malloc and ixgbe_dma_free
*/
struct ixgbe_dma_alloc {
bus_addr_t dma_paddr;
@@ -295,46 +277,18 @@
};
/*
- * Driver queue struct: this is the interrupt container
- * for the associated tx and rx ring.
- */
-struct ix_queue {
- struct adapter *adapter;
- u32 msix; /* This queue's MSI-X vector */
- u32 eims; /* This queue's EIMS bit */
- u32 eitr_setting;
- u32 me;
- struct resource *res;
- void *tag;
- int busy;
- struct tx_ring *txr;
- struct rx_ring *rxr;
- struct task que_task;
- struct taskqueue *tq;
- u64 irqs;
-};
-
-/*
* The transmit ring, one per queue
*/
struct tx_ring {
struct adapter *adapter;
- struct mtx tx_mtx;
- u32 me;
- u32 tail;
- int busy;
union ixgbe_adv_tx_desc *tx_base;
- struct ixgbe_tx_buf *tx_buffers;
- struct ixgbe_dma_alloc txdma;
- volatile u16 tx_avail;
- u16 next_avail_desc;
- u16 next_to_clean;
- u16 num_desc;
- u32 txd_cmd;
- bus_dma_tag_t txtag;
- char mtx_name[16];
- struct buf_ring *br;
- struct task txq_task;
+ uint64_t tx_paddr;
+ u32 tail;
+ qidx_t *tx_rsq;
+ qidx_t tx_rs_cidx;
+ qidx_t tx_rs_pidx;
+ qidx_t tx_cidx_processed;
+ uint8_t me;
/* Flow Director */
u16 atr_sample;
@@ -344,9 +298,6 @@
u32 packets;
/* Soft Stats */
u64 tso_tx;
- u64 no_tx_map_avail;
- u64 no_tx_dma_setup;
- u64 no_desc_avail;
u64 total_packets;
};
@@ -355,22 +306,14 @@
* The Receive ring, one per rx queue
*/
struct rx_ring {
+ struct ix_rx_queue *que;
struct adapter *adapter;
- struct mtx rx_mtx;
u32 me;
u32 tail;
union ixgbe_adv_rx_desc *rx_base;
- struct ixgbe_dma_alloc rxdma;
- struct lro_ctrl lro;
- bool lro_enabled;
bool hw_rsc;
bool vtag_strip;
- u16 next_to_refresh;
- u16 next_to_check;
- u16 num_desc;
- u16 mbuf_sz;
- char mtx_name[16];
- struct ixgbe_rx_buf *rx_buffers;
+ uint64_t rx_paddr;
bus_dma_tag_t ptag;
u32 bytes; /* Used for AIM calc */
@@ -388,12 +331,35 @@
u64 flm;
};
+/*
+ * Driver queue struct: this is the interrupt container
+ * for the associated tx and rx ring.
+ */
+struct ix_rx_queue {
+ struct adapter *adapter;
+ u32 msix; /* This queue's MSIX vector */
+ u32 eims; /* This queue's EIMS bit */
+ u32 eitr_setting;
+ struct resource *res;
+ void *tag;
+ int busy;
+ struct rx_ring rxr;
+ struct if_irq que_irq;
+ u64 irqs;
+};
+
+struct ix_tx_queue {
+ struct adapter *adapter;
+ u32 msix; /* This queue's MSIX vector */
+ struct tx_ring txr;
+};
+
#define IXGBE_MAX_VF_MC 30 /* Max number of multicast entries */
struct ixgbe_vf {
u_int pool;
u_int rar_index;
- u_int max_frame_size;
+ u_int maximum_frame_size;
uint32_t flags;
uint8_t ether_addr[ETHER_ADDR_LEN];
uint16_t mc_hash[IXGBE_MAX_VF_MC];
@@ -407,33 +373,32 @@
struct adapter {
struct ixgbe_hw hw;
struct ixgbe_osdep osdep;
+ if_ctx_t ctx;
+ if_softc_ctx_t shared;
+#define num_tx_queues shared->isc_ntxqsets
+#define num_rx_queues shared->isc_nrxqsets
+#define max_frame_size shared->isc_max_frame_size
+#define intr_type shared->isc_intr
device_t dev;
struct ifnet *ifp;
struct resource *pci_mem;
- struct resource *msix_mem;
/*
* Interrupt resources: this set is
* either used for legacy, or for Link
* when doing MSI-X
*/
+ struct if_irq irq;
void *tag;
struct resource *res;
- struct ifmedia media;
- struct callout timer;
- int link_rid;
+ struct ifmedia *media;
int if_flags;
-
- struct mtx core_mtx;
-
- eventhandler_tag vlan_attach;
- eventhandler_tag vlan_detach;
+ int msix;
u16 num_vlans;
- u16 num_queues;
/*
* Shadow VFTA table, this is needed because
@@ -447,7 +412,6 @@
int advertise; /* link speeds */
int enable_aim; /* adaptive interrupt moderation */
bool link_active;
- u16 max_frame_size;
u16 num_segs;
u32 link_speed;
bool link_up;
@@ -464,17 +428,15 @@
/* Support for pluggable optics */
bool sfp_probe;
- struct task link_task; /* Link tasklet */
- struct task mod_task; /* SFP tasklet */
- struct task msf_task; /* Multispeed Fiber */
- struct task mbx_task; /* VF -> PF mailbox interrupt */
+ struct grouptask mod_task; /* SFP tasklet */
+ struct grouptask msf_task; /* Multispeed Fiber */
+ struct grouptask mbx_task; /* VF -> PF mailbox interrupt */
/* Flow Director */
int fdir_reinit;
- struct task fdir_task;
+ struct grouptask fdir_task;
- struct task phy_task; /* PHY intr tasklet */
- struct taskqueue *tq;
+ struct grouptask phy_task; /* PHY intr tasklet */
/*
* Queues:
@@ -482,24 +444,9 @@
* and RX/TX pair or rings associated
* with it.
*/
- struct ix_queue *queues;
-
- /*
- * Transmit rings
- * Allocated at run time, an array of rings
- */
- struct tx_ring *tx_rings;
- u32 num_tx_desc;
- u32 tx_process_limit;
-
- /*
- * Receive rings
- * Allocated at run time, an array of rings
- */
- struct rx_ring *rx_rings;
- u64 active_queues;
- u32 num_rx_desc;
- u32 rx_process_limit;
+ struct ix_tx_queue *tx_queues;
+ struct ix_rx_queue *rx_queues;
+ u64 active_queues;
/* Multicast array memory */
struct ixgbe_mc_addr *mta;
@@ -513,13 +460,8 @@
/* Bypass */
struct ixgbe_bp_data bypass;
- /* Netmap */
- void (*init_locked)(struct adapter *);
- void (*stop_locked)(void *);
-
/* Misc stats maintained by the driver */
unsigned long dropped_pkts;
- unsigned long mbuf_defrag_failed;
unsigned long mbuf_header_failed;
unsigned long mbuf_packet_failed;
unsigned long watchdog_events;
@@ -546,29 +488,12 @@
u32 feat_en;
};
-
/* Precision Time Sync (IEEE 1588) defines */
#define ETHERTYPE_IEEE1588 0x88F7
#define PICOSECS_PER_TICK 20833
#define TSYNC_UDP_PORT 319 /* UDP port for the protocol */
#define IXGBE_ADVTXD_TSTAMP 0x00080000
-
-#define IXGBE_CORE_LOCK_INIT(_sc, _name) \
- mtx_init(&(_sc)->core_mtx, _name, "IXGBE Core Lock", MTX_DEF)
-#define IXGBE_CORE_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->core_mtx)
-#define IXGBE_TX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->tx_mtx)
-#define IXGBE_RX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->rx_mtx)
-#define IXGBE_CORE_LOCK(_sc) mtx_lock(&(_sc)->core_mtx)
-#define IXGBE_TX_LOCK(_sc) mtx_lock(&(_sc)->tx_mtx)
-#define IXGBE_TX_TRYLOCK(_sc) mtx_trylock(&(_sc)->tx_mtx)
-#define IXGBE_RX_LOCK(_sc) mtx_lock(&(_sc)->rx_mtx)
-#define IXGBE_CORE_UNLOCK(_sc) mtx_unlock(&(_sc)->core_mtx)
-#define IXGBE_TX_UNLOCK(_sc) mtx_unlock(&(_sc)->tx_mtx)
-#define IXGBE_RX_UNLOCK(_sc) mtx_unlock(&(_sc)->rx_mtx)
-#define IXGBE_CORE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->core_mtx, MA_OWNED)
-#define IXGBE_TX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->tx_mtx, MA_OWNED)
-
/* For backward compatibility */
#if !defined(PCIER_LINK_STA)
#define PCIER_LINK_STA PCIR_EXPRESS_LINK_STA
@@ -625,35 +550,14 @@
drbr_needs_enqueue(struct ifnet *ifp, struct buf_ring *br)
{
#ifdef ALTQ
- if (ALTQ_IS_ENABLED(&ifp->if_snd))
- return (1);
+ if (ALTQ_IS_ENABLED(&ifp->if_snd))
+ return (1);
#endif
- return (!buf_ring_empty(br));
+ return (!buf_ring_empty(br));
}
#endif
/*
- * Find the number of unrefreshed RX descriptors
- */
-static inline u16
-ixgbe_rx_unrefreshed(struct rx_ring *rxr)
-{
- if (rxr->next_to_check > rxr->next_to_refresh)
- return (rxr->next_to_check - rxr->next_to_refresh - 1);
- else
- return ((rxr->num_desc + rxr->next_to_check) -
- rxr->next_to_refresh - 1);
-}
-
-static inline int
-ixgbe_legacy_ring_empty(struct ifnet *ifp, struct buf_ring *dummy)
-{
- UNREFERENCED_1PARAMETER(dummy);
-
- return IFQ_DRV_IS_EMPTY(&ifp->if_snd);
-}
-
-/*
* This checks for a zero mac addr, something that will be likely
* unless the Admin on the Host has created one.
*/
@@ -670,25 +574,16 @@
}
/* Shared Prototypes */
-void ixgbe_legacy_start(struct ifnet *);
-int ixgbe_legacy_start_locked(struct ifnet *, struct tx_ring *);
-int ixgbe_mq_start(struct ifnet *, struct mbuf *);
-int ixgbe_mq_start_locked(struct ifnet *, struct tx_ring *);
-void ixgbe_qflush(struct ifnet *);
-void ixgbe_deferred_mq_start(void *, int);
int ixgbe_allocate_queues(struct adapter *);
int ixgbe_setup_transmit_structures(struct adapter *);
void ixgbe_free_transmit_structures(struct adapter *);
int ixgbe_setup_receive_structures(struct adapter *);
void ixgbe_free_receive_structures(struct adapter *);
-void ixgbe_txeof(struct tx_ring *);
-bool ixgbe_rxeof(struct ix_queue *);
+int ixgbe_get_regs(SYSCTL_HANDLER_ARGS);
#include "ixgbe_bypass.h"
-#include "ixgbe_sriov.h"
#include "ixgbe_fdir.h"
#include "ixgbe_rss.h"
-#include "ixgbe_netmap.h"
#endif /* _IXGBE_H_ */
Index: sys/dev/ixgbe/ixgbe_common.c
===================================================================
--- sys/dev/ixgbe/ixgbe_common.c
+++ sys/dev/ixgbe/ixgbe_common.c
@@ -222,6 +222,7 @@
ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED,
"Device %x does not support flow control autoneg",
hw->device_id);
+
return supported;
}
@@ -2000,7 +2001,7 @@
usec_delay(5);
ixgbe_standby_eeprom(hw);
- };
+ }
/*
* On some parts, SPI write time could vary from 0-20mSec on 3.3V
@@ -2086,7 +2087,7 @@
* EEPROM
*/
mask = mask >> 1;
- };
+ }
/* We leave the "DI" bit set to "0" when we leave this routine. */
eec &= ~IXGBE_EEC_DI;
@@ -3517,7 +3518,6 @@
if (index > 3)
return IXGBE_ERR_PARAM;
-
ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
if (ret_val != IXGBE_SUCCESS)
goto out;
@@ -3714,7 +3714,7 @@
* @vmdq: VMDq pool to assign
*
* Puts an ethernet address into a receive address register, or
- * finds the rar that it is aleady in; adds to the pool list
+ * finds the rar that it is already in; adds to the pool list
**/
s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
{
@@ -4125,7 +4125,7 @@
for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) {
IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0);
IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2), 0);
- IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2 + 1), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset * 2) + 1), 0);
}
return IXGBE_SUCCESS;
Index: sys/dev/ixgbe/ixgbe_fdir.h
===================================================================
--- sys/dev/ixgbe/ixgbe_fdir.h
+++ sys/dev/ixgbe/ixgbe_fdir.h
@@ -52,7 +52,7 @@
#endif
-void ixgbe_reinit_fdir(void *, int);
+void ixgbe_reinit_fdir(void *);
void ixgbe_atr(struct tx_ring *, struct mbuf *);
#endif /* _IXGBE_FDIR_H_ */
Index: sys/dev/ixgbe/ixgbe_netmap.h
===================================================================
--- sys/dev/ixgbe/ixgbe_netmap.h
+++ sys/dev/ixgbe/ixgbe_netmap.h
@@ -1,59 +0,0 @@
-/******************************************************************************
-
- Copyright (c) 2001-2017, Intel Corporation
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- POSSIBILITY OF SUCH DAMAGE.
-
-******************************************************************************/
-/*$FreeBSD$*/
-
-
-#ifndef _IXGBE_NETMAP_H_
-#define _IXGBE_NETMAP_H_
-
-#ifdef DEV_NETMAP
-
-#include <net/netmap.h>
-#include <sys/selinfo.h>
-#include <dev/netmap/netmap_kern.h>
-
-extern int ix_crcstrip;
-
-/*
- * ixgbe_netmap.c contains functions for netmap
- * support that extend the standard driver. See additional
- * comments in ixgbe_netmap.c.
- */
-void ixgbe_netmap_attach(struct adapter *adapter);
-
-#else
-#define ixgbe_netmap_attach(a)
-#define netmap_detach(a)
-#endif /* DEV_NETMAP */
-
-#endif /* _IXGBE_NETMAP_H_ */
Index: sys/dev/ixgbe/ixgbe_netmap.c
===================================================================
--- sys/dev/ixgbe/ixgbe_netmap.c
+++ sys/dev/ixgbe/ixgbe_netmap.c
@@ -1,521 +0,0 @@
-/******************************************************************************
-
- Copyright (c) 2001-2017, Intel Corporation
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- POSSIBILITY OF SUCH DAMAGE.
-
-******************************************************************************/
-/*$FreeBSD$*/
-
-/*
- * Copyright (C) 2011-2014 Matteo Landi, Luigi Rizzo. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-
-/*
- * $FreeBSD$
- *
- * netmap support for: ixgbe
- *
- * This file is meant to be a reference on how to implement
- * netmap support for a network driver.
- * This file contains code but only static or inline functions used
- * by a single driver. To avoid replication of code we just #include
- * it near the beginning of the standard driver.
- */
-
-#ifdef DEV_NETMAP
-/*
- * Some drivers may need the following headers. Others
- * already include them by default
-
-#include <vm/vm.h>
-#include <vm/pmap.h>
-
- */
-#include "ixgbe.h"
-
-/*
- * device-specific sysctl variables:
- *
- * ix_crcstrip: 0: keep CRC in rx frames (default), 1: strip it.
- * During regular operations the CRC is stripped, but on some
- * hardware reception of frames not multiple of 64 is slower,
- * so using crcstrip=0 helps in benchmarks.
- *
- * ix_rx_miss, ix_rx_miss_bufs:
- * count packets that might be missed due to lost interrupts.
- */
-SYSCTL_DECL(_dev_netmap);
-static int ix_rx_miss, ix_rx_miss_bufs;
-int ix_crcstrip;
-SYSCTL_INT(_dev_netmap, OID_AUTO, ix_crcstrip,
- CTLFLAG_RW, &ix_crcstrip, 0, "strip CRC on rx frames");
-SYSCTL_INT(_dev_netmap, OID_AUTO, ix_rx_miss,
- CTLFLAG_RW, &ix_rx_miss, 0, "potentially missed rx intr");
-SYSCTL_INT(_dev_netmap, OID_AUTO, ix_rx_miss_bufs,
- CTLFLAG_RW, &ix_rx_miss_bufs, 0, "potentially missed rx intr bufs");
-
-
-static void
-set_crcstrip(struct ixgbe_hw *hw, int onoff)
-{
- /* crc stripping is set in two places:
- * IXGBE_HLREG0 (modified on init_locked and hw reset)
- * IXGBE_RDRXCTL (set by the original driver in
- * ixgbe_setup_hw_rsc() called in init_locked.
- * We disable the setting when netmap is compiled in).
- * We update the values here, but also in ixgbe.c because
- * init_locked sometimes is called outside our control.
- */
- uint32_t hl, rxc;
-
- hl = IXGBE_READ_REG(hw, IXGBE_HLREG0);
- rxc = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
- if (netmap_verbose)
- D("%s read HLREG 0x%x rxc 0x%x",
- onoff ? "enter" : "exit", hl, rxc);
- /* hw requirements ... */
- rxc &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
- rxc |= IXGBE_RDRXCTL_RSCACKC;
- if (onoff && !ix_crcstrip) {
- /* keep the crc. Fast rx */
- hl &= ~IXGBE_HLREG0_RXCRCSTRP;
- rxc &= ~IXGBE_RDRXCTL_CRCSTRIP;
- } else {
- /* reset default mode */
- hl |= IXGBE_HLREG0_RXCRCSTRP;
- rxc |= IXGBE_RDRXCTL_CRCSTRIP;
- }
- if (netmap_verbose)
- D("%s write HLREG 0x%x rxc 0x%x",
- onoff ? "enter" : "exit", hl, rxc);
- IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hl);
- IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rxc);
-}
-
-
-/*
- * Register/unregister. We are already under netmap lock.
- * Only called on the first register or the last unregister.
- */
-static int
-ixgbe_netmap_reg(struct netmap_adapter *na, int onoff)
-{
- struct ifnet *ifp = na->ifp;
- struct adapter *adapter = ifp->if_softc;
-
- IXGBE_CORE_LOCK(adapter);
- adapter->stop_locked(adapter);
-
- set_crcstrip(&adapter->hw, onoff);
- /* enable or disable flags and callbacks in na and ifp */
- if (onoff) {
- nm_set_native_flags(na);
- } else {
- nm_clear_native_flags(na);
- }
- adapter->init_locked(adapter); /* also enables intr */
- set_crcstrip(&adapter->hw, onoff); // XXX why twice ?
- IXGBE_CORE_UNLOCK(adapter);
- return (ifp->if_drv_flags & IFF_DRV_RUNNING ? 0 : 1);
-}
-
-
-/*
- * Reconcile kernel and user view of the transmit ring.
- *
- * All information is in the kring.
- * Userspace wants to send packets up to the one before kring->rhead,
- * kernel knows kring->nr_hwcur is the first unsent packet.
- *
- * Here we push packets out (as many as possible), and possibly
- * reclaim buffers from previously completed transmission.
- *
- * The caller (netmap) guarantees that there is only one instance
- * running at any time. Any interference with other driver
- * methods should be handled by the individual drivers.
- */
-static int
-ixgbe_netmap_txsync(struct netmap_kring *kring, int flags)
-{
- struct netmap_adapter *na = kring->na;
- struct ifnet *ifp = na->ifp;
- struct netmap_ring *ring = kring->ring;
- u_int nm_i; /* index into the netmap ring */
- u_int nic_i; /* index into the NIC ring */
- u_int n;
- u_int const lim = kring->nkr_num_slots - 1;
- u_int const head = kring->rhead;
- /*
- * interrupts on every tx packet are expensive so request
- * them every half ring, or where NS_REPORT is set
- */
- u_int report_frequency = kring->nkr_num_slots >> 1;
-
- /* device-specific */
- struct adapter *adapter = ifp->if_softc;
- struct tx_ring *txr = &adapter->tx_rings[kring->ring_id];
- int reclaim_tx;
-
- bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
- BUS_DMASYNC_POSTREAD);
-
- /*
- * First part: process new packets to send.
- * nm_i is the current index in the netmap ring,
- * nic_i is the corresponding index in the NIC ring.
- * The two numbers differ because upon a *_init() we reset
- * the NIC ring but leave the netmap ring unchanged.
- * For the transmit ring, we have
- *
- * nm_i = kring->nr_hwcur
- * nic_i = IXGBE_TDT (not tracked in the driver)
- * and
- * nm_i == (nic_i + kring->nkr_hwofs) % ring_size
- *
- * In this driver kring->nkr_hwofs >= 0, but for other
- * drivers it might be negative as well.
- */
-
- /*
- * If we have packets to send (kring->nr_hwcur != kring->rhead)
- * iterate over the netmap ring, fetch length and update
- * the corresponding slot in the NIC ring. Some drivers also
- * need to update the buffer's physical address in the NIC slot
- * even NS_BUF_CHANGED is not set (PNMB computes the addresses).
- *
- * The netmap_reload_map() calls is especially expensive,
- * even when (as in this case) the tag is 0, so do only
- * when the buffer has actually changed.
- *
- * If possible do not set the report/intr bit on all slots,
- * but only a few times per ring or when NS_REPORT is set.
- *
- * Finally, on 10G and faster drivers, it might be useful
- * to prefetch the next slot and txr entry.
- */
-
- nm_i = kring->nr_hwcur;
- if (nm_i != head) { /* we have new packets to send */
- nic_i = netmap_idx_k2n(kring, nm_i);
-
- __builtin_prefetch(&ring->slot[nm_i]);
- __builtin_prefetch(&txr->tx_buffers[nic_i]);
-
- for (n = 0; nm_i != head; n++) {
- struct netmap_slot *slot = &ring->slot[nm_i];
- u_int len = slot->len;
- uint64_t paddr;
- void *addr = PNMB(na, slot, &paddr);
-
- /* device-specific */
- union ixgbe_adv_tx_desc *curr = &txr->tx_base[nic_i];
- struct ixgbe_tx_buf *txbuf = &txr->tx_buffers[nic_i];
- int flags = (slot->flags & NS_REPORT ||
- nic_i == 0 || nic_i == report_frequency) ?
- IXGBE_TXD_CMD_RS : 0;
-
- /* prefetch for next round */
- __builtin_prefetch(&ring->slot[nm_i + 1]);
- __builtin_prefetch(&txr->tx_buffers[nic_i + 1]);
-
- NM_CHECK_ADDR_LEN(na, addr, len);
-
- if (slot->flags & NS_BUF_CHANGED) {
- /* buffer has changed, reload map */
- netmap_reload_map(na, txr->txtag, txbuf->map, addr);
- }
- slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED);
-
- /* Fill the slot in the NIC ring. */
- /* Use legacy descriptor, they are faster? */
- curr->read.buffer_addr = htole64(paddr);
- curr->read.olinfo_status = 0;
- curr->read.cmd_type_len = htole32(len | flags |
- IXGBE_ADVTXD_DCMD_IFCS | IXGBE_TXD_CMD_EOP);
-
- /* make sure changes to the buffer are synced */
- bus_dmamap_sync(txr->txtag, txbuf->map,
- BUS_DMASYNC_PREWRITE);
-
- nm_i = nm_next(nm_i, lim);
- nic_i = nm_next(nic_i, lim);
- }
- kring->nr_hwcur = head;
-
- /* synchronize the NIC ring */
- bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
- BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
-
- /* (re)start the tx unit up to slot nic_i (excluded) */
- IXGBE_WRITE_REG(&adapter->hw, txr->tail, nic_i);
- }
-
- /*
- * Second part: reclaim buffers for completed transmissions.
- * Because this is expensive (we read a NIC register etc.)
- * we only do it in specific cases (see below).
- */
- if (flags & NAF_FORCE_RECLAIM) {
- reclaim_tx = 1; /* forced reclaim */
- } else if (!nm_kr_txempty(kring)) {
- reclaim_tx = 0; /* have buffers, no reclaim */
- } else {
- /*
- * No buffers available. Locate previous slot with
- * REPORT_STATUS set.
- * If the slot has DD set, we can reclaim space,
- * otherwise wait for the next interrupt.
- * This enables interrupt moderation on the tx
- * side though it might reduce throughput.
- */
- struct ixgbe_legacy_tx_desc *txd =
- (struct ixgbe_legacy_tx_desc *)txr->tx_base;
-
- nic_i = txr->next_to_clean + report_frequency;
- if (nic_i > lim)
- nic_i -= lim + 1;
- // round to the closest with dd set
- nic_i = (nic_i < kring->nkr_num_slots / 4 ||
- nic_i >= kring->nkr_num_slots*3/4) ?
- 0 : report_frequency;
- reclaim_tx = txd[nic_i].upper.fields.status & IXGBE_TXD_STAT_DD; // XXX cpu_to_le32 ?
- }
- if (reclaim_tx) {
- /*
- * Record completed transmissions.
- * We (re)use the driver's txr->next_to_clean to keep
- * track of the most recently completed transmission.
- *
- * The datasheet discourages the use of TDH to find
- * out the number of sent packets, but we only set
- * REPORT_STATUS in a few slots so TDH is the only
- * good way.
- */
- nic_i = IXGBE_READ_REG(&adapter->hw, IXGBE_TDH(kring->ring_id));
- if (nic_i >= kring->nkr_num_slots) { /* XXX can it happen ? */
- D("TDH wrap %d", nic_i);
- nic_i -= kring->nkr_num_slots;
- }
- if (nic_i != txr->next_to_clean) {
- /* some tx completed, increment avail */
- txr->next_to_clean = nic_i;
- kring->nr_hwtail = nm_prev(netmap_idx_n2k(kring, nic_i), lim);
- }
- }
-
- return 0;
-}
-
-
-/*
- * Reconcile kernel and user view of the receive ring.
- * Same as for the txsync, this routine must be efficient.
- * The caller guarantees a single invocations, but races against
- * the rest of the driver should be handled here.
- *
- * On call, kring->rhead is the first packet that userspace wants
- * to keep, and kring->rcur is the wakeup point.
- * The kernel has previously reported packets up to kring->rtail.
- *
- * If (flags & NAF_FORCE_READ) also check for incoming packets irrespective
- * of whether or not we received an interrupt.
- */
-static int
-ixgbe_netmap_rxsync(struct netmap_kring *kring, int flags)
-{
- struct netmap_adapter *na = kring->na;
- struct ifnet *ifp = na->ifp;
- struct netmap_ring *ring = kring->ring;
- u_int nm_i; /* index into the netmap ring */
- u_int nic_i; /* index into the NIC ring */
- u_int n;
- u_int const lim = kring->nkr_num_slots - 1;
- u_int const head = kring->rhead;
- int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR;
-
- /* device-specific */
- struct adapter *adapter = ifp->if_softc;
- struct rx_ring *rxr = &adapter->rx_rings[kring->ring_id];
-
- if (head > lim)
- return netmap_ring_reinit(kring);
-
- /* XXX check sync modes */
- bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
- BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
-
- /*
- * First part: import newly received packets.
- *
- * nm_i is the index of the next free slot in the netmap ring,
- * nic_i is the index of the next received packet in the NIC ring,
- * and they may differ in case if_init() has been called while
- * in netmap mode. For the receive ring we have
- *
- * nic_i = rxr->next_to_check;
- * nm_i = kring->nr_hwtail (previous)
- * and
- * nm_i == (nic_i + kring->nkr_hwofs) % ring_size
- *
- * rxr->next_to_check is set to 0 on a ring reinit
- */
- if (netmap_no_pendintr || force_update) {
- int crclen = (ix_crcstrip) ? 0 : 4;
- uint16_t slot_flags = kring->nkr_slot_flags;
-
- nic_i = rxr->next_to_check; // or also k2n(kring->nr_hwtail)
- nm_i = netmap_idx_n2k(kring, nic_i);
-
- for (n = 0; ; n++) {
- union ixgbe_adv_rx_desc *curr = &rxr->rx_base[nic_i];
- uint32_t staterr = le32toh(curr->wb.upper.status_error);
-
- if ((staterr & IXGBE_RXD_STAT_DD) == 0)
- break;
- ring->slot[nm_i].len = le16toh(curr->wb.upper.length) - crclen;
- ring->slot[nm_i].flags = slot_flags;
- bus_dmamap_sync(rxr->ptag,
- rxr->rx_buffers[nic_i].pmap, BUS_DMASYNC_POSTREAD);
- nm_i = nm_next(nm_i, lim);
- nic_i = nm_next(nic_i, lim);
- }
- if (n) { /* update the state variables */
- if (netmap_no_pendintr && !force_update) {
- /* diagnostics */
- ix_rx_miss ++;
- ix_rx_miss_bufs += n;
- }
- rxr->next_to_check = nic_i;
- kring->nr_hwtail = nm_i;
- }
- kring->nr_kflags &= ~NKR_PENDINTR;
- }
-
- /*
- * Second part: skip past packets that userspace has released.
- * (kring->nr_hwcur to kring->rhead excluded),
- * and make the buffers available for reception.
- * As usual nm_i is the index in the netmap ring,
- * nic_i is the index in the NIC ring, and
- * nm_i == (nic_i + kring->nkr_hwofs) % ring_size
- */
- nm_i = kring->nr_hwcur;
- if (nm_i != head) {
- nic_i = netmap_idx_k2n(kring, nm_i);
- for (n = 0; nm_i != head; n++) {
- struct netmap_slot *slot = &ring->slot[nm_i];
- uint64_t paddr;
- void *addr = PNMB(na, slot, &paddr);
-
- union ixgbe_adv_rx_desc *curr = &rxr->rx_base[nic_i];
- struct ixgbe_rx_buf *rxbuf = &rxr->rx_buffers[nic_i];
-
- if (addr == NETMAP_BUF_BASE(na)) /* bad buf */
- goto ring_reset;
-
- if (slot->flags & NS_BUF_CHANGED) {
- /* buffer has changed, reload map */
- netmap_reload_map(na, rxr->ptag, rxbuf->pmap, addr);
- slot->flags &= ~NS_BUF_CHANGED;
- }
- curr->wb.upper.status_error = 0;
- curr->read.pkt_addr = htole64(paddr);
- bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
- BUS_DMASYNC_PREREAD);
- nm_i = nm_next(nm_i, lim);
- nic_i = nm_next(nic_i, lim);
- }
- kring->nr_hwcur = head;
-
- bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
- BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
- /*
- * IMPORTANT: we must leave one free slot in the ring,
- * so move nic_i back by one unit
- */
- nic_i = nm_prev(nic_i, lim);
- IXGBE_WRITE_REG(&adapter->hw, rxr->tail, nic_i);
- }
-
- return 0;
-
-ring_reset:
- return netmap_ring_reinit(kring);
-}
-
-
-/*
- * The attach routine, called near the end of ixgbe_attach(),
- * fills the parameters for netmap_attach() and calls it.
- * It cannot fail, in the worst case (such as no memory)
- * netmap mode will be disabled and the driver will only
- * operate in standard mode.
- */
-void
-ixgbe_netmap_attach(struct adapter *adapter)
-{
- struct netmap_adapter na;
-
- bzero(&na, sizeof(na));
-
- na.ifp = adapter->ifp;
- na.na_flags = NAF_BDG_MAYSLEEP;
- na.num_tx_desc = adapter->num_tx_desc;
- na.num_rx_desc = adapter->num_rx_desc;
- na.nm_txsync = ixgbe_netmap_txsync;
- na.nm_rxsync = ixgbe_netmap_rxsync;
- na.nm_register = ixgbe_netmap_reg;
- na.num_tx_rings = na.num_rx_rings = adapter->num_queues;
- netmap_attach(&na);
-}
-
-#endif /* DEV_NETMAP */
-
-/* end of file */
Index: sys/dev/ixgbe/ixgbe_osdep.h
===================================================================
--- sys/dev/ixgbe/ixgbe_osdep.h
+++ sys/dev/ixgbe/ixgbe_osdep.h
@@ -1,31 +1,31 @@
/******************************************************************************
- Copyright (c) 2001-2017, Intel Corporation
+ Copyright (c) 2001-2017, Intel Corporation
All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
+
+ Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
+
+ 1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived from
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
-
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
Index: sys/dev/ixgbe/ixgbe_osdep.c
===================================================================
--- sys/dev/ixgbe/ixgbe_osdep.c
+++ sys/dev/ixgbe/ixgbe_osdep.c
@@ -1,31 +1,31 @@
/******************************************************************************
- Copyright (c) 2001-2017, Intel Corporation
+ Copyright (c) 2001-2017, Intel Corporation
All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
+
+ Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
+
+ 1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived from
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
-
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
Index: sys/dev/ixgbe/ixgbe_phy.c
===================================================================
--- sys/dev/ixgbe/ixgbe_phy.c
+++ sys/dev/ixgbe/ixgbe_phy.c
@@ -1490,21 +1490,18 @@
hw->phy.type = ixgbe_phy_sfp_intel;
break;
default:
- if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
- hw->phy.type =
- ixgbe_phy_sfp_passive_unknown;
- else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
- hw->phy.type =
- ixgbe_phy_sfp_active_unknown;
- else
- hw->phy.type = ixgbe_phy_sfp_unknown;
+ hw->phy.type = ixgbe_phy_sfp_unknown;
break;
}
}
/* Allow any DA cable vendor */
if (cable_tech & (IXGBE_SFF_DA_PASSIVE_CABLE |
- IXGBE_SFF_DA_ACTIVE_CABLE)) {
+ IXGBE_SFF_DA_ACTIVE_CABLE)) {
+ if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
+ hw->phy.type = ixgbe_phy_sfp_passive_unknown;
+ else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
+ hw->phy.type = ixgbe_phy_sfp_active_unknown;
status = IXGBE_SUCCESS;
goto out;
}
Index: sys/dev/ixgbe/ixgbe_sriov.h
===================================================================
--- sys/dev/ixgbe/ixgbe_sriov.h
+++ sys/dev/ixgbe/ixgbe_sriov.h
@@ -41,6 +41,7 @@
#include <sys/nv.h>
#include <sys/iov_schema.h>
#include <dev/pci/pci_iov.h>
+#include <net/iflib.h>
#include "ixgbe_mbx.h"
#define IXGBE_VF_CTS (1 << 0) /* VF is clear to send. */
@@ -57,7 +58,7 @@
#define IXGBE_VF_GET_QUEUES_RESP_LEN 5
-#define IXGBE_API_VER_1_0 0
+#define IXGBE_API_VER_1_0 0
#define IXGBE_API_VER_2_0 1 /* Solaris API. Not supported. */
#define IXGBE_API_VER_1_1 2
#define IXGBE_API_VER_UNKNOWN UINT16_MAX
@@ -66,15 +67,17 @@
#define IXGBE_32_VM 32
#define IXGBE_64_VM 64
-int ixgbe_add_vf(device_t, u16, const nvlist_t *);
-int ixgbe_init_iov(device_t, u16, const nvlist_t *);
-void ixgbe_uninit_iov(device_t);
+void ixgbe_if_init(if_ctx_t ctx);
+int ixgbe_if_iov_vf_add(if_ctx_t, u16, const nvlist_t *);
+int ixgbe_if_iov_init(if_ctx_t, u16, const nvlist_t *);
+void ixgbe_if_iov_uninit(if_ctx_t);
void ixgbe_initialize_iov(struct adapter *);
void ixgbe_recalculate_max_frame(struct adapter *);
void ixgbe_ping_all_vfs(struct adapter *);
int ixgbe_pci_iov_detach(device_t);
void ixgbe_define_iov_schemas(device_t, int *);
void ixgbe_align_all_queue_indices(struct adapter *);
+int ixgbe_vf_que_index(int, int, int);
u32 ixgbe_get_mtqc(int);
u32 ixgbe_get_mrqc(int);
@@ -91,12 +94,12 @@
#define ixgbe_pci_iov_detach(_a) 0
#define ixgbe_define_iov_schemas(_a,_b)
#define ixgbe_align_all_queue_indices(_a)
+#define ixgbe_vf_que_index(_a, _b, _c) (_c)
#define ixgbe_get_mtqc(_a) IXGBE_MTQC_64Q_1PB
#define ixgbe_get_mrqc(_a) 0
#endif /* PCI_IOV */
-void ixgbe_handle_mbx(void *, int);
-int ixgbe_vf_que_index(int, int, int);
+void ixgbe_handle_mbx(void *);
#endif
Index: sys/dev/ixgbe/ixgbe_type.h
===================================================================
--- sys/dev/ixgbe/ixgbe_type.h
+++ sys/dev/ixgbe/ixgbe_type.h
@@ -1562,7 +1562,7 @@
#define IXGBE_MDIO_GLOBAL_ALARM_1 0xCC00 /* Global alarm 1 */
#define IXGBE_MDIO_GLOBAL_ALM_1_DEV_FAULT 0x0010 /* device fault */
#define IXGBE_MDIO_GLOBAL_ALM_1_HI_TMP_FAIL 0x4000 /* high temp failure */
-#define IXGBE_MDIO_GLOBAL_FAULT_MSG 0xC850 /* Global Fault Message */
+#define IXGBE_MDIO_GLOBAL_FAULT_MSG 0xC850 /* Global Fault Message */
#define IXGBE_MDIO_GLOBAL_FAULT_MSG_HI_TMP 0x8007 /* high temp failure */
#define IXGBE_MDIO_GLOBAL_INT_MASK 0xD400 /* Global int mask */
#define IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN 0x1000 /* autoneg vendor alarm int enable */
Index: sys/dev/ixgbe/ixgbe_vf.h
===================================================================
--- sys/dev/ixgbe/ixgbe_vf.h
+++ sys/dev/ixgbe/ixgbe_vf.h
@@ -1,31 +1,31 @@
/******************************************************************************
- Copyright (c) 2001-2017, Intel Corporation
+ Copyright (c) 2001-2017, Intel Corporation
All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
+
+ Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
+
+ 1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived from
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
-
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
Index: sys/modules/ix/Makefile
===================================================================
--- sys/modules/ix/Makefile
+++ sys/modules/ix/Makefile
@@ -3,10 +3,9 @@
.PATH: ${SRCTOP}/sys/dev/ixgbe
KMOD = if_ix
-SRCS = device_if.h bus_if.h pci_if.h pci_iov_if.h
+SRCS = device_if.h bus_if.h pci_if.h pci_iov_if.h ifdi_if.h
SRCS += opt_inet.h opt_inet6.h opt_rss.h
SRCS += if_ix.c if_bypass.c if_fdir.c if_sriov.c ix_txrx.c ixgbe_osdep.c
-SRCS += ixgbe_netmap.c
# Shared source
SRCS += ixgbe_common.c ixgbe_api.c ixgbe_phy.c ixgbe_mbx.c ixgbe_vf.c
SRCS += ixgbe_dcb.c ixgbe_dcb_82598.c ixgbe_dcb_82599.c
Index: sys/modules/ixv/Makefile
===================================================================
--- sys/modules/ixv/Makefile
+++ sys/modules/ixv/Makefile
@@ -3,9 +3,9 @@
.PATH: ${SRCTOP}/sys/dev/ixgbe
KMOD = if_ixv
-SRCS = device_if.h bus_if.h pci_if.h pci_iov_if.h
+SRCS = device_if.h bus_if.h pci_if.h pci_iov_if.h ifdi_if.h
SRCS += opt_inet.h opt_inet6.h opt_rss.h
-SRCS += if_ixv.c if_fdir.c if_sriov.c ix_txrx.c ixgbe_osdep.c ixgbe_netmap.c
+SRCS += if_ixv.c if_fdir.c ix_txrx.c ixgbe_osdep.c
# Shared source
SRCS += ixgbe_common.c ixgbe_api.c ixgbe_phy.c ixgbe_mbx.c ixgbe_vf.c
SRCS += ixgbe_dcb.c ixgbe_dcb_82598.c ixgbe_dcb_82599.c

File Metadata

Mime Type
text/plain
Expires
Fri, Jan 24, 2:26 PM (5 h, 59 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
16057146
Default Alt Text
D11727.id32532.diff (367 KB)

Event Timeline