Page MenuHomeFreeBSD

D5214.id13241.diff
No OneTemporary

D5214.id13241.diff

Index: sys/dev/ixl/if_ixl.c
===================================================================
--- sys/dev/ixl/if_ixl.c
+++ sys/dev/ixl/if_ixl.c
@@ -45,6 +45,8 @@
#include <net/rss_config.h>
#endif
+#include "ifdi_if.h"
+
/*********************************************************************
* Driver version
*********************************************************************/
@@ -55,71 +57,48 @@
*
* Used by probe to select devices to load on
* Last field stores an index into ixl_strings
- * Last entry must be all 0s
+ * Last entry must be PVID_END
*
* { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
*********************************************************************/
-static ixl_vendor_info_t ixl_vendor_info_array[] =
-{
- {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710, 0, 0, 0},
- {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_A, 0, 0, 0},
- {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B, 0, 0, 0},
- {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C, 0, 0, 0},
- {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A, 0, 0, 0},
- {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B, 0, 0, 0},
- {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C, 0, 0, 0},
- {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T, 0, 0, 0},
- {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4, 0, 0, 0},
- {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2, 0, 0, 0},
- {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2_A, 0, 0, 0},
+static pci_vendor_info_t ixl_vendor_info_array[] =
+{
+ PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710, "Intel(R) Ethernet Connection XL710 Driver"),
+ PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_A, "Intel(R) Ethernet Connection XL710 Driver"),
+ PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B, "Intel(R) Ethernet Connection XL710 Driver"),
+ PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C, "Intel(R) Ethernet Connection XL710 Driver"),
+ PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A, "Intel(R) Ethernet Connection XL710 Driver"),
+ PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B, "Intel(R) Ethernet Connection XL710 Driver"),
+ PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C, "Intel(R) Ethernet Connection XL710 Driver"),
+ PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T, "Intel(R) Ethernet Connection XL710 Driver"),
+ PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4,"Intel(R) Ethernet Connection XL710 Driver"),
+ PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2, "Intel(R) Ethernet Connection XL710 Driver"),
+ PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2_A, "Intel(R) Ethernet Connection XL710 Driver"),
#ifdef X722_SUPPORT
- {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722, 0, 0, 0},
- {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722, 0, 0, 0},
- {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722, 0, 0, 0},
+ PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722, "Intel(R) Ethernet Connection XL710 Driver"),
+ PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722, "Intel(R) Ethernet Connection XL710 Driver"),
+ PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722, "Intel(R) Ethernet Connection XL710 Driver"),
#endif
/* required last entry */
- {0, 0, 0, 0, 0}
-};
-
-/*********************************************************************
- * Table of branding strings
- *********************************************************************/
-
-static char *ixl_strings[] = {
- "Intel(R) Ethernet Connection XL710 Driver"
+ PVID_END
};
/*********************************************************************
* Function prototypes
*********************************************************************/
-static int ixl_probe(device_t);
-static int ixl_attach(device_t);
-static int ixl_detach(device_t);
-static int ixl_shutdown(device_t);
+static void *ixl_register(device_t);
static int ixl_get_hw_capabilities(struct ixl_pf *);
-static void ixl_cap_txcsum_tso(struct ixl_vsi *, struct ifnet *, int);
-static int ixl_ioctl(struct ifnet *, u_long, caddr_t);
-static void ixl_init(void *);
-static void ixl_init_locked(struct ixl_pf *);
-static void ixl_stop(struct ixl_pf *);
-static void ixl_media_status(struct ifnet *, struct ifmediareq *);
-static int ixl_media_change(struct ifnet *);
static void ixl_update_link_status(struct ixl_pf *);
static int ixl_allocate_pci_resources(struct ixl_pf *);
static u16 ixl_get_bus_info(struct i40e_hw *, device_t);
-static int ixl_setup_stations(struct ixl_pf *);
static int ixl_switch_config(struct ixl_pf *);
static int ixl_initialize_vsi(struct ixl_vsi *);
-static int ixl_assign_vsi_msix(struct ixl_pf *);
-static int ixl_assign_vsi_legacy(struct ixl_pf *);
-static int ixl_init_msix(struct ixl_pf *);
static void ixl_configure_msix(struct ixl_pf *);
static void ixl_configure_itr(struct ixl_pf *);
static void ixl_configure_legacy(struct ixl_pf *);
static void ixl_free_pci_resources(struct ixl_pf *);
-static void ixl_local_timer(void *);
static int ixl_setup_interface(device_t, struct ixl_vsi *);
static void ixl_link_event(struct ixl_pf *, struct i40e_arq_event_info *);
static void ixl_config_rss(struct ixl_vsi *);
@@ -130,21 +109,13 @@
static int ixl_enable_rings(struct ixl_vsi *);
static int ixl_disable_rings(struct ixl_vsi *);
static void ixl_enable_intr(struct ixl_vsi *);
-static void ixl_disable_intr(struct ixl_vsi *);
-static void ixl_disable_rings_intr(struct ixl_vsi *);
static void ixl_enable_adminq(struct i40e_hw *);
static void ixl_disable_adminq(struct i40e_hw *);
-static void ixl_enable_queue(struct i40e_hw *, int);
-static void ixl_disable_queue(struct i40e_hw *, int);
static void ixl_enable_legacy(struct i40e_hw *);
static void ixl_disable_legacy(struct i40e_hw *);
-static void ixl_set_promisc(struct ixl_vsi *);
-static void ixl_add_multi(struct ixl_vsi *);
static void ixl_del_multi(struct ixl_vsi *);
-static void ixl_register_vlan(void *, struct ifnet *, u16);
-static void ixl_unregister_vlan(void *, struct ifnet *, u16);
static void ixl_setup_vlan_filters(struct ixl_vsi *);
static void ixl_init_filters(struct ixl_vsi *);
@@ -166,14 +137,11 @@
#endif
/* The MSI/X Interrupt handlers */
-static void ixl_intr(void *);
-static void ixl_msix_que(void *);
-static void ixl_msix_adminq(void *);
+int ixl_intr(void *);
+static int ixl_msix_que(void *);
+static int ixl_msix_adminq(void *);
static void ixl_handle_mdd_event(struct ixl_pf *);
-/* Deferred interrupt tasklets */
-static void ixl_do_adminq(void *, int);
-
/* Sysctl handlers */
static int ixl_set_flowcntl(SYSCTL_HANDLER_ARGS);
static int ixl_set_advertise(SYSCTL_HANDLER_ARGS);
@@ -208,32 +176,62 @@
#ifdef PCI_IOV
static int ixl_adminq_err_to_errno(enum i40e_admin_queue_err err);
-static int ixl_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t*);
-static void ixl_iov_uninit(device_t dev);
-static int ixl_add_vf(device_t dev, uint16_t vfnum, const nvlist_t*);
+static int ixl_if_iov_init(if_ctx_t, uint16_t num_vfs, const nvlist_t*);
+static void ixl_if_iov_uninit(if_ctx_t);
+static int ixl_if_vf_add(if_ctx_t, uint16_t vfnum, const nvlist_t*);
static void ixl_handle_vf_msg(struct ixl_pf *,
struct i40e_arq_event_info *);
-static void ixl_handle_vflr(void *arg, int pending);
+static void ixl_if_handle_vflr(if_ctx_t ctx);
static void ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf);
static void ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf);
#endif
+static int ixl_if_attach_pre(if_ctx_t);
+static int ixl_if_attach_post(if_ctx_t);
+static int ixl_if_msix_intr_assign(if_ctx_t, int);
+
+static int ixl_if_detach(if_ctx_t);
+
+static void ixl_if_init(if_ctx_t ctx);
+static void ixl_if_stop(if_ctx_t ctx);
+
+static void ixl_if_intr_enable(if_ctx_t ctx);
+static void ixl_if_intr_disable(if_ctx_t ctx);
+static void ixl_if_queue_intr_enable(if_ctx_t ctx, uint16_t qid);
+static void ixl_if_queue_intr_disable(if_ctx_t ctx, uint16_t qid);
+
+static void ixl_if_multi_set(if_ctx_t);
+static void ixl_if_update_admin_status(if_ctx_t);
+static int ixl_if_mtu_set(if_ctx_t, uint32_t);
+
+static void ixl_if_media_status(if_ctx_t, struct ifmediareq *);
+
+static void ixl_if_vlan_register(if_ctx_t ctx, u16 vtag);
+static void ixl_if_vlan_unregister(if_ctx_t ctx, u16 vtag);
+
+
+
+static void ixl_if_timer(if_ctx_t, uint16_t);
+static int ixl_if_promisc_set(if_ctx_t ctx, int flags);
+
+
/*********************************************************************
* FreeBSD Device Interface Entry Points
*********************************************************************/
static device_method_t ixl_methods[] = {
/* Device interface */
- DEVMETHOD(device_probe, ixl_probe),
- DEVMETHOD(device_attach, ixl_attach),
- DEVMETHOD(device_detach, ixl_detach),
- DEVMETHOD(device_shutdown, ixl_shutdown),
+ DEVMETHOD(device_register, ixl_register),
+ DEVMETHOD(device_probe, iflib_device_probe),
+ DEVMETHOD(device_attach, iflib_device_attach),
+ DEVMETHOD(device_detach, iflib_device_detach),
+ DEVMETHOD(device_shutdown, iflib_device_suspend),
#ifdef PCI_IOV
- DEVMETHOD(pci_iov_init, ixl_iov_init),
- DEVMETHOD(pci_iov_uninit, ixl_iov_uninit),
- DEVMETHOD(pci_iov_add_vf, ixl_add_vf),
+ DEVMETHOD(pci_iov_init, iflib_device_iov_init),
+ DEVMETHOD(pci_iov_uninit, iflib_device_iov_uninit),
+ DEVMETHOD(pci_iov_add_vf, iflib_device_iov_add_vf),
#endif
{0, 0}
};
@@ -247,14 +245,43 @@
MODULE_DEPEND(ixl, pci, 1, 1, 1);
MODULE_DEPEND(ixl, ether, 1, 1, 1);
-#ifdef DEV_NETMAP
-MODULE_DEPEND(ixl, netmap, 1, 1, 1);
-#endif /* DEV_NETMAP */
+MODULE_DEPEND(ixl, iflib, 1, 1, 1);
+
+
+static device_method_t ixl_if_methods[] = {
+ DEVMETHOD(ifdi_attach_pre, ixl_if_attach_pre),
+ DEVMETHOD(ifdi_attach_post, ixl_if_attach_post),
+ DEVMETHOD(ifdi_detach, ixl_if_detach),
+ DEVMETHOD(ifdi_init, ixl_if_init),
+ DEVMETHOD(ifdi_stop, ixl_if_stop),
+ DEVMETHOD(ifdi_msix_intr_assign, ixl_if_msix_intr_assign),
+ DEVMETHOD(ifdi_intr_disable, ixl_if_intr_disable),
+ DEVMETHOD(ifdi_intr_enable, ixl_if_intr_enable),
+ DEVMETHOD(ifdi_queue_intr_enable, ixl_if_queue_intr_enable),
+ DEVMETHOD(ifdi_multi_set, ixl_if_multi_set),
+ DEVMETHOD(ifdi_queues_alloc, ixl_if_queues_alloc),
+ DEVMETHOD(ifdi_update_admin_status, ixl_if_update_admin_status),
+ DEVMETHOD(ifdi_mtu_set, ixl_if_mtu_set),
+ DEVMETHOD(ifdi_media_status, ixl_if_media_status),
+ DEVMETHOD(ifdi_media_change, ixl_if_media_change),
+ DEVMETHOD(ifdi_timer, ixl_if_timer),
+ DEVMETHOD(ifdi_promisc_set, ixl_if_promisc_set),
+ DEVMETHOD(ifdi_vlan_register, ixl_if_vlan_register),
+ DEVMETHOD(ifdi_vlan_unregister, ixl_if_vlan_unregister),
+ DEVMETHOD(ifdi_queues_free, ixl_if_queues_free),
+#ifdef PCI_IOV
+ DEVMETHOD(ifdi_vflr_handle, ixl_if_handle_vflr),
+ DEVMETHOD(ifdi_iov_init, ixl_if_iov_init),
+ DEVMETHOD(ifdi_iov_uninit, ixl_if_iov_uninit),
+ DEVMETHOD(ifdi_iov_vf_add, ixl_if_vf_add),
+#endif
+ DEVMETHOD_END
+};
+
+static driver_t ixl_if_driver = {
+ "ixl_if", ixl_if_methods, sizeof(struct ixl_pf),
+};
-/*
-** Global reset mutex
-*/
-static struct mtx ixl_reset_mtx;
/*
** TUNEABLE PARAMETERS:
@@ -324,11 +351,6 @@
TUNABLE_INT("hw.ixl.atr_rate", &ixl_atr_rate);
#endif
-#ifdef DEV_NETMAP
-#define NETMAP_IXL_MAIN /* only bring in one part of the netmap code */
-#include <dev/netmap/if_ixl_netmap.h>
-#endif /* DEV_NETMAP */
-
static char *ixl_fc_string[6] = {
"None",
"Rx",
@@ -338,68 +360,54 @@
"Default"
};
-static MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations");
+extern struct if_txrx ixl_txrx;
+
+static struct if_shared_ctx ixl_sctx_init = {
+ .isc_magic = IFLIB_MAGIC,
+ .isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */
+ .isc_tx_maxsize = IXL_TSO_SIZE,
+
+ .isc_tx_maxsegsize = PAGE_SIZE*4,
+
+ .isc_rx_maxsize = PAGE_SIZE*4,
+ .isc_rx_nsegments = 1,
+ .isc_rx_maxsegsize = PAGE_SIZE*4,
+ .isc_ntxd = DEFAULT_RING,
+ .isc_nrxd = DEFAULT_RING,
+ .isc_nfl = 1,
+ .isc_qsizes[0] = roundup2((DEFAULT_RING * sizeof(struct i40e_tx_desc)) +
+ sizeof(u32), DBA_ALIGN),
+ .isc_qsizes[1] = roundup2(DEFAULT_RING *
+ sizeof(union i40e_rx_desc), DBA_ALIGN),
+ .isc_nqs = 2,
+ .isc_admin_intrcnt = 1,
+ .isc_vendor_info = ixl_vendor_info_array,
+ .isc_driver_version = ixl_driver_version,
+ .isc_txrx = &ixl_txrx,
+ .isc_driver = &ixl_if_driver,
+};
+
+if_shared_ctx_t ixl_sctx = &ixl_sctx_init;
+MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations");
static uint8_t ixl_bcast_addr[ETHER_ADDR_LEN] =
{0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
-/*********************************************************************
- * Device identification routine
- *
- * ixl_probe determines if the driver should be loaded on
- * the hardware based on PCI vendor/device id of the device.
- *
- * return BUS_PROBE_DEFAULT on success, positive on failure
- *********************************************************************/
-
-static int
-ixl_probe(device_t dev)
+static void *
+ixl_register(device_t dev)
{
- ixl_vendor_info_t *ent;
-
- u16 pci_vendor_id, pci_device_id;
- u16 pci_subvendor_id, pci_subdevice_id;
- char device_name[256];
- static bool lock_init = FALSE;
-
- INIT_DEBUGOUT("ixl_probe: begin");
-
- pci_vendor_id = pci_get_vendor(dev);
- if (pci_vendor_id != I40E_INTEL_VENDOR_ID)
- return (ENXIO);
-
- pci_device_id = pci_get_device(dev);
- pci_subvendor_id = pci_get_subvendor(dev);
- pci_subdevice_id = pci_get_subdevice(dev);
-
- ent = ixl_vendor_info_array;
- while (ent->vendor_id != 0) {
- if ((pci_vendor_id == ent->vendor_id) &&
- (pci_device_id == ent->device_id) &&
+ ixl_sctx->isc_ntxd = ixl_ringsz;
+ ixl_sctx->isc_nrxd = ixl_ringsz;
+ ixl_sctx->isc_qsizes[0] = roundup2((ixl_ringsz * sizeof(struct i40e_tx_desc)) +
+ sizeof(u32), DBA_ALIGN);
+ ixl_sctx->isc_qsizes[1] = roundup2(ixl_ringsz *
+ sizeof(union i40e_rx_desc), DBA_ALIGN);
- ((pci_subvendor_id == ent->subvendor_id) ||
- (ent->subvendor_id == 0)) &&
- ((pci_subdevice_id == ent->subdevice_id) ||
- (ent->subdevice_id == 0))) {
- sprintf(device_name, "%s, Version - %s",
- ixl_strings[ent->index],
- ixl_driver_version);
- device_set_desc_copy(dev, device_name);
- /* One shot mutex init */
- if (lock_init == FALSE) {
- lock_init = TRUE;
- mtx_init(&ixl_reset_mtx,
- "ixl_reset",
- "IXL RESET Lock", MTX_DEF);
- }
- return (BUS_PROBE_DEFAULT);
- }
- ent++;
- }
- return (ENXIO);
+ return (ixl_sctx);
}
+
/*********************************************************************
* Device initialization routine
*
@@ -411,37 +419,41 @@
*********************************************************************/
static int
-ixl_attach(device_t dev)
+ixl_if_attach_pre(if_ctx_t ctx)
{
+ device_t dev;
struct ixl_pf *pf;
struct i40e_hw *hw;
struct ixl_vsi *vsi;
- u16 bus;
int error = 0;
-#ifdef PCI_IOV
- nvlist_t *pf_schema, *vf_schema;
- int iov_error;
-#endif
INIT_DEBUGOUT("ixl_attach: begin");
- /* Allocate, clear, and link in our primary soft structure */
- pf = device_get_softc(dev);
- pf->dev = pf->osdep.dev = dev;
+ dev = iflib_get_dev(ctx);
+ pf = iflib_get_softc(ctx);
hw = &pf->hw;
+ vsi = &pf->vsi;
+ vsi->back = pf;
+ vsi->hw = &pf->hw;
+ vsi->id = 0;
+ vsi->num_vlans = 0;
+ vsi->ctx = ctx;
+ vsi->media = iflib_get_media(ctx);
+ vsi->shared = iflib_get_softc_ctx(ctx);
+ pf->dev = iflib_get_dev(ctx);
+
+ /*
+ * These are the same across all current ixl models
+ */
+ vsi->shared->isc_tx_nsegments = IXL_MAX_TX_SEGS;
+ vsi->shared->isc_msix_bar = PCIR_BAR(IXL_BAR);
+
/*
** Note this assumes we have a single embedded VSI,
** this could be enhanced later to allocate multiple
*/
vsi = &pf->vsi;
- vsi->dev = pf->dev;
-
- /* Core Lock Init*/
- IXL_PF_LOCK_INIT(pf, device_get_nameunit(dev));
-
- /* Set up the timer callout */
- callout_init_mtx(&pf->timer, &pf->pf_mtx, 0);
/* Set up sysctls */
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
@@ -540,6 +552,8 @@
hw->bus.func = pci_get_function(dev);
pf->vc_debug_lvl = 1;
+ hw->back = &pf->osdep;
+ pf->osdep.dev = dev;
/* Do PCI setup - map BAR0, etc */
if (ixl_allocate_pci_resources(pf)) {
@@ -627,46 +641,73 @@
goto err_mac_hmc;
}
bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN);
- i40e_get_port_mac_addr(hw, hw->mac.port_addr);
+ iflib_set_mac(ctx, hw->mac.addr);
- /* Set up VSI and queues */
- if (ixl_setup_stations(pf) != 0) {
- device_printf(dev, "setup stations failed!\n");
- error = ENOMEM;
- goto err_mac_hmc;
- }
+ i40e_get_port_mac_addr(hw, hw->mac.port_addr);
/* Initialize mac filter list for VSI */
SLIST_INIT(&vsi->ftl);
+ device_printf(dev, "%s success!\n", __FUNCTION__);
+ return (0);
- /* Set up interrupt routing here */
- if (pf->msix > 1)
- error = ixl_assign_vsi_msix(pf);
- else
- error = ixl_assign_vsi_legacy(pf);
- if (error)
- goto err_late;
+err_mac_hmc:
+ i40e_shutdown_lan_hmc(hw);
+err_get_cap:
+ i40e_shutdown_adminq(hw);
+err_out:
+ ixl_free_pci_resources(pf);
+ ixl_free_mac_filters(vsi);
+ return (error);
+}
+
+static int
+ixl_if_attach_post(if_ctx_t ctx)
+{
+ device_t dev;
+ struct ixl_pf *pf;
+ struct i40e_hw *hw;
+ struct ixl_vsi *vsi;
+ int error = 0;
+ u16 bus;
+#ifdef PCI_IOV
+ nvlist_t *pf_schema, *vf_schema;
+ int iov_error;
+#endif
+
+ INIT_DEBUGOUT("ixl_attach: begin");
+
+ dev = iflib_get_dev(ctx);
+ vsi = iflib_get_softc(ctx);
+ vsi->ifp = iflib_get_ifp(ctx);
+ pf = (struct ixl_pf *)vsi;
+ hw = &pf->hw;
if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
(hw->aq.fw_maj_ver < 4)) {
i40e_msec_delay(75);
error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
- if (error)
+ if (error) {
device_printf(dev, "link restart failed, aq_err=%d\n",
pf->hw.aq.asq_last_status);
+ goto err_mac_hmc;
+ }
}
/* Determine link state */
i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
i40e_get_link_status(hw, &pf->link_up);
- /* Setup OS specific network interface */
if (ixl_setup_interface(dev, vsi) != 0) {
device_printf(dev, "interface setup failed!\n");
error = EIO;
- goto err_late;
}
+ if (error) {
+ device_printf(dev, "Interface setup failed: %d\n", error);
+ goto err_mac_hmc;
+ } else
+ device_printf(dev, "%s success!\n", __FUNCTION__);
+
error = ixl_switch_config(pf);
if (error) {
device_printf(dev, "Initial switch config failed: %d\n", error);
@@ -688,12 +729,6 @@
ixl_update_stats_counters(pf);
ixl_add_hw_stats(pf);
- /* Register for VLAN events */
- vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
- ixl_register_vlan, vsi, EVENTHANDLER_PRI_FIRST);
- vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
- ixl_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST);
-
#ifdef PCI_IOV
/* SR-IOV is only supported when MSI-X is in use. */
if (pf->msix > 1) {
@@ -714,24 +749,14 @@
iov_error);
}
#endif
-
-#ifdef DEV_NETMAP
- ixl_netmap_attach(vsi);
-#endif /* DEV_NETMAP */
INIT_DEBUGOUT("ixl_attach: end");
+ device_printf(dev, "%s success!\n", __FUNCTION__);
return (0);
-
-err_late:
- if (vsi->ifp != NULL)
- if_free(vsi->ifp);
err_mac_hmc:
i40e_shutdown_lan_hmc(hw);
-err_get_cap:
i40e_shutdown_adminq(hw);
-err_out:
ixl_free_pci_resources(pf);
- ixl_free_vsi(vsi);
- IXL_PF_LOCK_DESTROY(pf);
+ ixl_free_mac_filters(vsi);
return (error);
}
@@ -746,12 +771,11 @@
*********************************************************************/
static int
-ixl_detach(device_t dev)
+ixl_if_detach(if_ctx_t ctx)
{
- struct ixl_pf *pf = device_get_softc(dev);
+ struct ixl_vsi *vsi = iflib_get_softc(ctx);
+ struct ixl_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
- struct ixl_vsi *vsi = &pf->vsi;
- struct ixl_queue *que = vsi->queues;
i40e_status status;
#ifdef PCI_IOV
int error;
@@ -759,82 +783,31 @@
INIT_DEBUGOUT("ixl_detach: begin");
- /* Make sure VLANS are not using driver */
- if (vsi->ifp->if_vlantrunk != NULL) {
- device_printf(dev,"Vlan in use, detach first\n");
- return (EBUSY);
- }
-
#ifdef PCI_IOV
- error = pci_iov_detach(dev);
+ error = pci_iov_detach(iflib_get_dev(ctx));
if (error != 0) {
- device_printf(dev, "SR-IOV in use; detach first.\n");
+ device_printf(iflib_get_dev(ctx), "SR-IOV in use; detach first.\n");
return (error);
}
#endif
- ether_ifdetach(vsi->ifp);
- if (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING) {
- IXL_PF_LOCK(pf);
- ixl_stop(pf);
- IXL_PF_UNLOCK(pf);
- }
-
- for (int i = 0; i < vsi->num_queues; i++, que++) {
- if (que->tq) {
- taskqueue_drain(que->tq, &que->task);
- taskqueue_drain(que->tq, &que->tx_task);
- taskqueue_free(que->tq);
- }
- }
-
/* Shutdown LAN HMC */
status = i40e_shutdown_lan_hmc(hw);
if (status)
- device_printf(dev,
+ device_printf(iflib_get_dev(ctx),
"Shutdown LAN HMC failed with code %d\n", status);
/* Shutdown admin queue */
status = i40e_shutdown_adminq(hw);
if (status)
- device_printf(dev,
+ device_printf(iflib_get_dev(ctx),
"Shutdown Admin queue failed with code %d\n", status);
- /* Unregister VLAN events */
- if (vsi->vlan_attach != NULL)
- EVENTHANDLER_DEREGISTER(vlan_config, vsi->vlan_attach);
- if (vsi->vlan_detach != NULL)
- EVENTHANDLER_DEREGISTER(vlan_unconfig, vsi->vlan_detach);
-
- callout_drain(&pf->timer);
-#ifdef DEV_NETMAP
- netmap_detach(vsi->ifp);
-#endif /* DEV_NETMAP */
ixl_free_pci_resources(pf);
- bus_generic_detach(dev);
- if_free(vsi->ifp);
- ixl_free_vsi(vsi);
- IXL_PF_LOCK_DESTROY(pf);
- return (0);
-}
-
-/*********************************************************************
- *
- * Shutdown entry point
- *
- **********************************************************************/
-
-static int
-ixl_shutdown(device_t dev)
-{
- struct ixl_pf *pf = device_get_softc(dev);
- IXL_PF_LOCK(pf);
- ixl_stop(pf);
- IXL_PF_UNLOCK(pf);
+ ixl_free_mac_filters(vsi);
return (0);
}
-
/*********************************************************************
*
* Get the hardware capabilities
@@ -846,7 +819,7 @@
{
struct i40e_aqc_list_capabilities_element_resp *buf;
struct i40e_hw *hw = &pf->hw;
- device_t dev = pf->dev;
+ device_t dev = iflib_get_dev(((struct ixl_vsi *)pf)->ctx);
int error, len;
u16 needed;
bool again = TRUE;
@@ -854,7 +827,7 @@
len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
retry:
if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *)
- malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO))) {
+ malloc(len, M_IXL, M_NOWAIT | M_ZERO))) {
device_printf(dev, "Unable to allocate cap memory\n");
return (ENOMEM);
}
@@ -862,7 +835,7 @@
/* This populates the hw struct */
error = i40e_aq_discover_capabilities(hw, buf, len,
&needed, i40e_aqc_opc_list_func_capabilities, NULL);
- free(buf, M_DEVBUF);
+ free(buf, M_IXL);
if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) &&
(again == TRUE)) {
/* retry once with a larger buffer */
@@ -893,226 +866,27 @@
return (error);
}
-static void
-ixl_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask)
-{
- device_t dev = vsi->dev;
-
- /* Enable/disable TXCSUM/TSO4 */
- if (!(ifp->if_capenable & IFCAP_TXCSUM)
- && !(ifp->if_capenable & IFCAP_TSO4)) {
- if (mask & IFCAP_TXCSUM) {
- ifp->if_capenable |= IFCAP_TXCSUM;
- /* enable TXCSUM, restore TSO if previously enabled */
- if (vsi->flags & IXL_FLAGS_KEEP_TSO4) {
- vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
- ifp->if_capenable |= IFCAP_TSO4;
- }
- }
- else if (mask & IFCAP_TSO4) {
- ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4);
- vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
- device_printf(dev,
- "TSO4 requires txcsum, enabling both...\n");
- }
- } else if((ifp->if_capenable & IFCAP_TXCSUM)
- && !(ifp->if_capenable & IFCAP_TSO4)) {
- if (mask & IFCAP_TXCSUM)
- ifp->if_capenable &= ~IFCAP_TXCSUM;
- else if (mask & IFCAP_TSO4)
- ifp->if_capenable |= IFCAP_TSO4;
- } else if((ifp->if_capenable & IFCAP_TXCSUM)
- && (ifp->if_capenable & IFCAP_TSO4)) {
- if (mask & IFCAP_TXCSUM) {
- vsi->flags |= IXL_FLAGS_KEEP_TSO4;
- ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4);
- device_printf(dev,
- "TSO4 requires txcsum, disabling both...\n");
- } else if (mask & IFCAP_TSO4)
- ifp->if_capenable &= ~IFCAP_TSO4;
- }
-
- /* Enable/disable TXCSUM_IPV6/TSO6 */
- if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6)
- && !(ifp->if_capenable & IFCAP_TSO6)) {
- if (mask & IFCAP_TXCSUM_IPV6) {
- ifp->if_capenable |= IFCAP_TXCSUM_IPV6;
- if (vsi->flags & IXL_FLAGS_KEEP_TSO6) {
- vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
- ifp->if_capenable |= IFCAP_TSO6;
- }
- } else if (mask & IFCAP_TSO6) {
- ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
- vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
- device_printf(dev,
- "TSO6 requires txcsum6, enabling both...\n");
- }
- } else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
- && !(ifp->if_capenable & IFCAP_TSO6)) {
- if (mask & IFCAP_TXCSUM_IPV6)
- ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6;
- else if (mask & IFCAP_TSO6)
- ifp->if_capenable |= IFCAP_TSO6;
- } else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
- && (ifp->if_capenable & IFCAP_TSO6)) {
- if (mask & IFCAP_TXCSUM_IPV6) {
- vsi->flags |= IXL_FLAGS_KEEP_TSO6;
- ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
- device_printf(dev,
- "TSO6 requires txcsum6, disabling both...\n");
- } else if (mask & IFCAP_TSO6)
- ifp->if_capenable &= ~IFCAP_TSO6;
- }
-}
-
/*********************************************************************
- * Ioctl entry point
+ * Ioctl mtu entry point
*
- * ixl_ioctl is called when the user wants to configure the
- * interface.
*
- * return 0 on success, positive on failure
+ * return 0 on success, EINVAL on failure
**********************************************************************/
-
static int
-ixl_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
+ixl_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
{
- struct ixl_vsi *vsi = ifp->if_softc;
- struct ixl_pf *pf = vsi->back;
- struct ifreq *ifr = (struct ifreq *) data;
-#if defined(INET) || defined(INET6)
- struct ifaddr *ifa = (struct ifaddr *)data;
- bool avoid_reset = FALSE;
-#endif
- int error = 0;
+ struct ixl_vsi *vsi = iflib_get_softc(ctx);
- switch (command) {
-
- case SIOCSIFADDR:
-#ifdef INET
- if (ifa->ifa_addr->sa_family == AF_INET)
- avoid_reset = TRUE;
-#endif
-#ifdef INET6
- if (ifa->ifa_addr->sa_family == AF_INET6)
- avoid_reset = TRUE;
-#endif
-#if defined(INET) || defined(INET6)
- /*
- ** Calling init results in link renegotiation,
- ** so we avoid doing it when possible.
- */
- if (avoid_reset) {
- ifp->if_flags |= IFF_UP;
- if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
- ixl_init(pf);
-#ifdef INET
- if (!(ifp->if_flags & IFF_NOARP))
- arp_ifinit(ifp, ifa);
-#endif
- } else
- error = ether_ioctl(ifp, command, data);
- break;
-#endif
- case SIOCSIFMTU:
IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
- if (ifr->ifr_mtu > IXL_MAX_FRAME -
- ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) {
- error = EINVAL;
- } else {
- IXL_PF_LOCK(pf);
- ifp->if_mtu = ifr->ifr_mtu;
- vsi->max_frame_size =
- ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
- + ETHER_VLAN_ENCAP_LEN;
- ixl_init_locked(pf);
- IXL_PF_UNLOCK(pf);
- }
- break;
- case SIOCSIFFLAGS:
- IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
- IXL_PF_LOCK(pf);
- if (ifp->if_flags & IFF_UP) {
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
- if ((ifp->if_flags ^ pf->if_flags) &
- (IFF_PROMISC | IFF_ALLMULTI)) {
- ixl_set_promisc(vsi);
- }
- } else
- ixl_init_locked(pf);
- } else
- if (ifp->if_drv_flags & IFF_DRV_RUNNING)
- ixl_stop(pf);
- pf->if_flags = ifp->if_flags;
- IXL_PF_UNLOCK(pf);
- break;
- case SIOCADDMULTI:
- IOCTL_DEBUGOUT("ioctl: SIOCADDMULTI");
- if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
- IXL_PF_LOCK(pf);
- ixl_disable_intr(vsi);
- ixl_add_multi(vsi);
- ixl_enable_intr(vsi);
- IXL_PF_UNLOCK(pf);
- }
- break;
- case SIOCDELMULTI:
- IOCTL_DEBUGOUT("ioctl: SIOCDELMULTI");
- if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
- IXL_PF_LOCK(pf);
- ixl_disable_intr(vsi);
- ixl_del_multi(vsi);
- ixl_enable_intr(vsi);
- IXL_PF_UNLOCK(pf);
- }
- break;
- case SIOCSIFMEDIA:
- case SIOCGIFMEDIA:
-#ifdef IFM_ETH_XTYPE
- case SIOCGIFXMEDIA:
-#endif
- IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
- error = ifmedia_ioctl(ifp, ifr, &vsi->media, command);
- break;
- case SIOCSIFCAP:
- {
- int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
- IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
-
- ixl_cap_txcsum_tso(vsi, ifp, mask);
-
- if (mask & IFCAP_RXCSUM)
- ifp->if_capenable ^= IFCAP_RXCSUM;
- if (mask & IFCAP_RXCSUM_IPV6)
- ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
- if (mask & IFCAP_LRO)
- ifp->if_capenable ^= IFCAP_LRO;
- if (mask & IFCAP_VLAN_HWTAGGING)
- ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
- if (mask & IFCAP_VLAN_HWFILTER)
- ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
- if (mask & IFCAP_VLAN_HWTSO)
- ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
- if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
- IXL_PF_LOCK(pf);
- ixl_init_locked(pf);
- IXL_PF_UNLOCK(pf);
- }
- VLAN_CAPABILITIES(ifp);
-
- break;
- }
-
- default:
- IOCTL_DEBUGOUT("ioctl: UNKNOWN (0x%X)\n", (int)command);
- error = ether_ioctl(ifp, command, data);
- break;
- }
+ if (mtu > IXL_MAX_FRAME - ETHER_HDR_LEN - ETHER_CRC_LEN -
+ ETHER_VLAN_ENCAP_LEN)
+ return (EINVAL);
- return (error);
+ vsi->max_frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN +
+ ETHER_VLAN_ENCAP_LEN;
+ return (0);
}
-
/*********************************************************************
* Init entry point
*
@@ -1125,22 +899,21 @@
**********************************************************************/
static void
-ixl_init_locked(struct ixl_pf *pf)
+ixl_if_init(if_ctx_t ctx)
{
+ struct ixl_vsi *vsi = iflib_get_softc(ctx);
+ struct ixl_pf *pf = vsi->back;
+ device_t dev = iflib_get_dev(ctx);
+
struct i40e_hw *hw = &pf->hw;
- struct ixl_vsi *vsi = &pf->vsi;
- struct ifnet *ifp = vsi->ifp;
- device_t dev = pf->dev;
struct i40e_filter_control_settings filter;
u8 tmpaddr[ETHER_ADDR_LEN];
int ret;
- mtx_assert(&pf->pf_mtx, MA_OWNED);
INIT_DEBUGOUT("ixl_init: begin");
- ixl_stop(pf);
/* Get the latest mac address... User might use a LAA */
- bcopy(IF_LLADDR(vsi->ifp), tmpaddr,
+ bcopy(IF_LLADDR(iflib_get_ifp(ctx)), tmpaddr,
I40E_ETH_LENGTH_OF_ADDRESS);
if (!cmp_etheraddr(hw->mac.addr, tmpaddr) &&
(i40e_validate_mac_addr(tmpaddr) == I40E_SUCCESS)) {
@@ -1159,15 +932,6 @@
}
}
- /* Set the various hardware offload abilities */
- ifp->if_hwassist = 0;
- if (ifp->if_capenable & IFCAP_TSO)
- ifp->if_hwassist |= CSUM_TSO;
- if (ifp->if_capenable & IFCAP_TXCSUM)
- ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
- if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
- ifp->if_hwassist |= (CSUM_TCP_IPV6 | CSUM_UDP_IPV6);
-
/* Set up the device filtering */
bzero(&filter, sizeof(filter));
filter.enable_ethtype = TRUE;
@@ -1195,9 +959,6 @@
/* Setup vlan's if needed */
ixl_setup_vlan_filters(vsi);
- /* Start the local timer */
- callout_reset(&pf->timer, hz, ixl_local_timer, pf);
-
/* Set up MSI/X routing and the ITR settings */
if (ixl_enable_msix) {
ixl_configure_msix(pf);
@@ -1215,88 +976,27 @@
int aq_error = i40e_aq_set_mac_config(hw, vsi->max_frame_size,
TRUE, 0, NULL);
if (aq_error)
- device_printf(vsi->dev,
+ device_printf(dev,
"aq_set_mac_config in init error, code %d\n",
aq_error);
-
- /* And now turn on interrupts */
- ixl_enable_intr(vsi);
-
- /* Now inform the stack we're ready */
- ifp->if_drv_flags |= IFF_DRV_RUNNING;
- ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
-
- return;
-}
-
-static void
-ixl_init(void *arg)
-{
- struct ixl_pf *pf = arg;
-
- IXL_PF_LOCK(pf);
- ixl_init_locked(pf);
- IXL_PF_UNLOCK(pf);
- return;
-}
-
-/*
-**
-** MSIX Interrupt Handlers and Tasklets
-**
-*/
-static void
-ixl_handle_que(void *context, int pending)
-{
- struct ixl_queue *que = context;
- struct ixl_vsi *vsi = que->vsi;
- struct i40e_hw *hw = vsi->hw;
- struct tx_ring *txr = &que->txr;
- struct ifnet *ifp = vsi->ifp;
- bool more;
-
- if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
- more = ixl_rxeof(que, IXL_RX_LIMIT);
- IXL_TX_LOCK(txr);
- ixl_txeof(que);
- if (!drbr_empty(ifp, txr->br))
- ixl_mq_start_locked(ifp, txr);
- IXL_TX_UNLOCK(txr);
- if (more) {
- taskqueue_enqueue(que->tq, &que->task);
- return;
- }
- }
-
- /* Reenable this interrupt - hmmm */
- ixl_enable_queue(hw, que->me);
- return;
}
-
/*********************************************************************
*
* Legacy Interrupt Service routine
*
**********************************************************************/
-void
+int
ixl_intr(void *arg)
{
struct ixl_pf *pf = arg;
struct i40e_hw *hw = &pf->hw;
struct ixl_vsi *vsi = &pf->vsi;
struct ixl_queue *que = vsi->queues;
- struct ifnet *ifp = vsi->ifp;
- struct tx_ring *txr = &que->txr;
u32 reg, icr0, mask;
- bool more_tx, more_rx;
++que->irqs;
- /* Protect against spurious interrupts */
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
- return;
-
icr0 = rd32(hw, I40E_PFINT_ICR0);
reg = rd32(hw, I40E_PFINT_DYN_CTL0);
@@ -1307,22 +1007,14 @@
#ifdef PCI_IOV
if (icr0 & I40E_PFINT_ICR0_VFLR_MASK)
- taskqueue_enqueue(pf->tq, &pf->vflr_task);
+ iflib_iov_intr_deferred(vsi->ctx);
#endif
if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
- taskqueue_enqueue(pf->tq, &pf->adminq);
- return;
+ iflib_admin_intr_deferred(vsi->ctx);
+ return (FILTER_HANDLED);
}
- more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
-
- IXL_TX_LOCK(txr);
- more_tx = ixl_txeof(que);
- if (!drbr_empty(vsi->ifp, txr->br))
- more_tx = 1;
- IXL_TX_UNLOCK(txr);
-
/* re-enable other interrupt causes */
wr32(hw, I40E_PFINT_ICR0_ENA, mask);
@@ -1335,10 +1027,7 @@
reg |= I40E_QINT_TQCTL_CAUSE_ENA_MASK;
reg &= ~I40E_PFINT_ICR0_INTEVENT_MASK;
wr32(hw, I40E_QINT_TQCTL(0), reg);
-
- ixl_enable_legacy(hw);
-
- return;
+ return (FILTER_SCHEDULE_THREAD);
}
@@ -1347,44 +1036,16 @@
* MSIX VSI Interrupt Service routine
*
**********************************************************************/
-void
+int
ixl_msix_que(void *arg)
{
struct ixl_queue *que = arg;
- struct ixl_vsi *vsi = que->vsi;
- struct i40e_hw *hw = vsi->hw;
- struct tx_ring *txr = &que->txr;
- bool more_tx, more_rx;
-
- /* Protect against spurious interrupts */
- if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING))
- return;
-
- ++que->irqs;
-
- more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
-
- IXL_TX_LOCK(txr);
- more_tx = ixl_txeof(que);
- /*
- ** Make certain that if the stack
- ** has anything queued the task gets
- ** scheduled to handle it.
- */
- if (!drbr_empty(vsi->ifp, txr->br))
- more_tx = 1;
- IXL_TX_UNLOCK(txr);
ixl_set_queue_rx_itr(que);
ixl_set_queue_tx_itr(que);
- if (more_tx || more_rx)
- taskqueue_enqueue(que->tq, &que->task);
- else
- ixl_enable_queue(hw, que->me);
-
- return;
-}
+ return (FILTER_SCHEDULE_THREAD);
+}
/*********************************************************************
@@ -1392,7 +1053,7 @@
* MSIX Admin Queue Interrupt Service routine
*
**********************************************************************/
-static void
+int
ixl_msix_adminq(void *arg)
{
struct ixl_pf *pf = arg;
@@ -1416,7 +1077,7 @@
#ifdef PCI_IOV
if (reg & I40E_PFINT_ICR0_VFLR_MASK) {
mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
- taskqueue_enqueue(pf->tq, &pf->vflr_task);
+ iflib_iov_intr_deferred(pf->vsi.ctx);
}
#endif
@@ -1424,8 +1085,62 @@
reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
wr32(hw, I40E_PFINT_DYN_CTL0, reg);
- taskqueue_enqueue(pf->tq, &pf->adminq);
- return;
+ iflib_admin_intr_deferred(pf->vsi.ctx);
+ return (FILTER_HANDLED);
+}
+
+/*********************************************************************
+ *
+ * Setup MSIX Interrupt resources and handlers for the VSI
+ *
+ **********************************************************************/
+int
+ixl_if_msix_intr_assign(if_ctx_t ctx, int msix)
+{
+ struct ixl_vsi *vsi = iflib_get_softc(ctx);
+ struct ixl_pf *pf = vsi->back;
+ struct ixl_queue *que = vsi->queues;
+ int err, rid, vector = 0;
+
+ /* Admin Que is vector 0*/
+ rid = vector + 1;
+
+ err = iflib_irq_alloc_generic(ctx, &vsi->irq, rid, IFLIB_INTR_ADMIN,
+ ixl_msix_adminq, pf, 0, "aq");
+ if (err) {
+ iflib_irq_free(ctx, &vsi->irq);
+ device_printf(iflib_get_dev(ctx), "Failed to register Admin que handler");
+ return (err);
+ }
+ pf->admvec = vector;
+ ++vector;
+ iflib_softirq_alloc_generic(ctx, rid, IFLIB_INTR_IOV, pf, 0, "ixl_iov");
+
+ /* Now set up the stations */
+ for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
+ char buf[16];
+ rid = vector + 1;
+
+ snprintf(buf, sizeof(buf), "rxq%d", i);
+ err = iflib_irq_alloc_generic(ctx, &que->que_irq, rid, IFLIB_INTR_RX,
+ ixl_msix_que, que, que->me, buf);
+ if (err) {
+ device_printf(iflib_get_dev(ctx), "Failed to allocate q int %d err: %d", i, err);
+ vsi->num_queues = i + 1;
+ goto fail;
+ }
+ snprintf(buf, sizeof(buf), "txq%d", i);
+ iflib_softirq_alloc_generic(ctx, rid, IFLIB_INTR_TX, que, que->me, buf);
+ que->msix = vector;
+ }
+
+ return (0);
+fail:
+ iflib_irq_free(ctx, &vsi->irq);
+ que = vsi->queues;
+ for (int i = 0; i < vsi->num_queues; i++, que++)
+ iflib_irq_free(ctx, &que->que_irq);
+ return (err);
}
/*********************************************************************
@@ -1437,14 +1152,13 @@
*
**********************************************************************/
static void
-ixl_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
+ixl_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
{
- struct ixl_vsi *vsi = ifp->if_softc;
- struct ixl_pf *pf = vsi->back;
+ struct ixl_vsi *vsi = iflib_get_softc(ctx);
+ struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
struct i40e_hw *hw = &pf->hw;
INIT_DEBUGOUT("ixl_media_status: begin");
- IXL_PF_LOCK(pf);
hw->phy.get_link_info = TRUE;
i40e_get_link_status(hw, &pf->link_up);
@@ -1454,7 +1168,6 @@
ifmr->ifm_active = IFM_ETHER;
if (!pf->link_up) {
- IXL_PF_UNLOCK(pf);
return;
}
@@ -1554,33 +1267,6 @@
if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
ifmr->ifm_active |= IFM_ETH_RXPAUSE;
- IXL_PF_UNLOCK(pf);
-
- return;
-}
-
-/*********************************************************************
- *
- * Media Ioctl callback
- *
- * This routine is called when the user changes speed/duplex using
- * media/mediopt option with ifconfig.
- *
- **********************************************************************/
-static int
-ixl_media_change(struct ifnet * ifp)
-{
- struct ixl_vsi *vsi = ifp->if_softc;
- struct ifmedia *ifm = &vsi->media;
-
- INIT_DEBUGOUT("ixl_media_change: begin");
-
- if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
- return (EINVAL);
-
- if_printf(ifp, "Media change is currently not supported.\n");
-
- return (ENODEV);
}
@@ -1592,9 +1278,10 @@
** implementation this is only available for TCP connections
*/
void
-ixl_atr(struct ixl_queue *que, struct tcphdr *th, int etype)
+ixl_atr(struct ixl_queue *que, int hflags, int etype)
{
struct ixl_vsi *vsi = que->vsi;
+ if_shared_ctx_t sctx = ixl_sctx;
struct tx_ring *txr = &que->txr;
struct i40e_filter_program_desc *FDIR;
u32 ptype, dtype;
@@ -1608,7 +1295,7 @@
** or at the selected sample rate
*/
txr->atr_count++;
- if (((th->th_flags & (TH_FIN | TH_SYN)) == 0) &&
+ if (((hflags & (TH_FIN | TH_SYN)) == 0) &&
(txr->atr_count < txr->atr_rate))
return;
txr->atr_count = 0;
@@ -1616,7 +1303,7 @@
/* Get a descriptor to use */
idx = txr->next_avail;
FDIR = (struct i40e_filter_program_desc *) &txr->base[idx];
- if (++idx == que->num_desc)
+ if (++idx == sctx->isc_ntxd)
idx = 0;
txr->avail--;
txr->next_avail = idx;
@@ -1638,7 +1325,7 @@
** We use the TCP TH_FIN as a trigger to remove
** the filter, otherwise its an update.
*/
- dtype |= (th->th_flags & TH_FIN) ?
+ dtype |= (hflags & TH_FIN) ?
(I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
(I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
@@ -1652,44 +1339,31 @@
FDIR->qindex_flex_ptype_vsi = htole32(ptype);
FDIR->dtype_cmd_cntindex = htole32(dtype);
- return;
}
#endif
-
-static void
-ixl_set_promisc(struct ixl_vsi *vsi)
+static int
+ixl_if_promisc_set(if_ctx_t ctx, int flags)
{
- struct ifnet *ifp = vsi->ifp;
+ struct ixl_vsi *vsi = iflib_get_softc(ctx);
+ struct ifnet *ifp = iflib_get_ifp(ctx);
struct i40e_hw *hw = vsi->hw;
- int err, mcnt = 0;
+ int err;
bool uni = FALSE, multi = FALSE;
- if (ifp->if_flags & IFF_ALLMULTI)
+ if (flags & IFF_ALLMULTI ||
+ if_multiaddr_count(ifp, MAX_MULTICAST_ADDR) == MAX_MULTICAST_ADDR)
multi = TRUE;
- else { /* Need to count the multicast addresses */
- struct ifmultiaddr *ifma;
- if_maddr_rlock(ifp);
- TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
- if (ifma->ifma_addr->sa_family != AF_LINK)
- continue;
- if (mcnt == MAX_MULTICAST_ADDR)
- break;
- mcnt++;
- }
- if_maddr_runlock(ifp);
- }
-
- if (mcnt >= MAX_MULTICAST_ADDR)
- multi = TRUE;
- if (ifp->if_flags & IFF_PROMISC)
+ if (flags & IFF_PROMISC)
uni = TRUE;
err = i40e_aq_set_vsi_unicast_promiscuous(hw,
vsi->seid, uni, NULL);
+ if (err)
+ return (err);
err = i40e_aq_set_vsi_multicast_promiscuous(hw,
vsi->seid, multi, NULL);
- return;
+ return (err);
}
/*********************************************************************
@@ -1699,92 +1373,64 @@
*
*********************************************************************/
static void
-ixl_add_multi(struct ixl_vsi *vsi)
+ixl_del_multi(struct ixl_vsi *vsi)
{
- struct ifmultiaddr *ifma;
- struct ifnet *ifp = vsi->ifp;
- struct i40e_hw *hw = vsi->hw;
- int mcnt = 0, flags;
+ struct ixl_mac_filter *f;
+ int mcnt = 0;
- IOCTL_DEBUGOUT("ixl_add_multi: begin");
+ IOCTL_DEBUGOUT("ixl_del_multi: begin");
- if_maddr_rlock(ifp);
- /*
- ** First just get a count, to decide if we
- ** we simply use multicast promiscuous.
- */
- TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
- if (ifma->ifma_addr->sa_family != AF_LINK)
- continue;
+ SLIST_FOREACH(f, &vsi->ftl, next) {
+ if ((f->flags & (IXL_FILTER_USED|IXL_FILTER_MC)) == (IXL_FILTER_USED|IXL_FILTER_MC)){
+ f->flags |= IXL_FILTER_DEL;
mcnt++;
}
- if_maddr_runlock(ifp);
-
- if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
- /* delete existing MC filters */
- ixl_del_hw_filters(vsi, mcnt);
- i40e_aq_set_vsi_multicast_promiscuous(hw,
- vsi->seid, TRUE, NULL);
- return;
}
+ if (mcnt > 0)
+ ixl_del_hw_filters(vsi, mcnt);
+}
+
+static int
+ixl_mc_filter_apply(void *arg, struct ifmultiaddr *ifma, int count __unused)
+{
+ struct ixl_vsi *vsi = arg;
- mcnt = 0;
- if_maddr_rlock(ifp);
- TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
if (ifma->ifma_addr->sa_family != AF_LINK)
- continue;
+ return (0);
ixl_add_mc_filter(vsi,
(u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr));
- mcnt++;
- }
- if_maddr_runlock(ifp);
- if (mcnt > 0) {
- flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC);
- ixl_add_hw_filters(vsi, flags, mcnt);
- }
-
- IOCTL_DEBUGOUT("ixl_add_multi: end");
- return;
+ return (1);
}
static void
-ixl_del_multi(struct ixl_vsi *vsi)
+ixl_if_multi_set(if_ctx_t ctx)
{
- struct ifnet *ifp = vsi->ifp;
- struct ifmultiaddr *ifma;
- struct ixl_mac_filter *f;
- int mcnt = 0;
- bool match = FALSE;
+ struct ixl_vsi *vsi = iflib_get_softc(ctx);
+ struct i40e_hw *hw = vsi->hw;
+ int mcnt = 0, flags;
- IOCTL_DEBUGOUT("ixl_del_multi: begin");
+ IOCTL_DEBUGOUT("ixl_if_multi_set: begin");
- /* Search for removed multicast addresses */
- if_maddr_rlock(ifp);
- SLIST_FOREACH(f, &vsi->ftl, next) {
- if ((f->flags & IXL_FILTER_USED) && (f->flags & IXL_FILTER_MC)) {
- match = FALSE;
- TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
- if (ifma->ifma_addr->sa_family != AF_LINK)
- continue;
- u8 *mc_addr = (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
- if (cmp_etheraddr(f->macaddr, mc_addr)) {
- match = TRUE;
- break;
- }
- }
- if (match == FALSE) {
- f->flags |= IXL_FILTER_DEL;
- mcnt++;
- }
+ mcnt = if_multiaddr_count(iflib_get_ifp(ctx), MAX_MULTICAST_ADDR);
+ /* delete existing MC filters */
+ ixl_del_multi(vsi);
+
+ if (__predict_false(mcnt == MAX_MULTICAST_ADDR)) {
+ i40e_aq_set_vsi_multicast_promiscuous(hw,
+ vsi->seid, TRUE, NULL);
+ return;
}
+ /* (re-)install filters for all mcast addresses */
+ mcnt = if_multi_apply(iflib_get_ifp(ctx), ixl_mc_filter_apply, vsi);
+
+ if (mcnt > 0) {
+ flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC);
+ ixl_add_hw_filters(vsi, flags, mcnt);
}
- if_maddr_runlock(ifp);
- if (mcnt > 0)
- ixl_del_hw_filters(vsi, mcnt);
+ IOCTL_DEBUGOUT("ixl_if_multi_set: end");
}
-
/*********************************************************************
* Timer routine
*
@@ -1794,68 +1440,32 @@
**********************************************************************/
static void
-ixl_local_timer(void *arg)
+ixl_if_timer(if_ctx_t ctx, uint16_t qid)
{
- struct ixl_pf *pf = arg;
+ struct ixl_vsi *vsi = iflib_get_softc(ctx);
+ struct ixl_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
- struct ixl_vsi *vsi = &pf->vsi;
- struct ixl_queue *que = vsi->queues;
- device_t dev = pf->dev;
- int hung = 0;
+ struct ixl_queue *que = &vsi->queues[qid];
u32 mask;
- mtx_assert(&pf->pf_mtx, MA_OWNED);
-
- /* Fire off the adminq task */
- taskqueue_enqueue(pf->tq, &pf->adminq);
-
- /* Update stats */
- ixl_update_stats_counters(pf);
-
/*
** Check status of the queues
*/
mask = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK);
- for (int i = 0; i < vsi->num_queues; i++,que++) {
/* Any queues with outstanding work get a sw irq */
if (que->busy)
wr32(hw, I40E_PFINT_DYN_CTLN(que->me), mask);
- /*
- ** Each time txeof runs without cleaning, but there
- ** are uncleaned descriptors it increments busy. If
- ** we get to 5 we declare it hung.
- */
- if (que->busy == IXL_QUEUE_HUNG) {
- ++hung;
- /* Mark the queue as inactive */
- vsi->active_queues &= ~((u64)1 << que->me);
- continue;
- } else {
- /* Check if we've come back from hung */
- if ((vsi->active_queues & ((u64)1 << que->me)) == 0)
- vsi->active_queues |= ((u64)1 << que->me);
- }
- if (que->busy >= IXL_MAX_TX_BUSY) {
-#ifdef IXL_DEBUG
- device_printf(dev,"Warning queue %d "
- "appears to be hung!\n", i);
-#endif
- que->busy = IXL_QUEUE_HUNG;
- ++hung;
- }
- }
- /* Only reinit if all queues show hung */
- if (hung == vsi->num_queues)
- goto hung;
- callout_reset(&pf->timer, hz, ixl_local_timer, pf);
+ if (qid != 0)
return;
-hung:
- device_printf(dev, "Local Timer: HANG DETECT - Resetting!!\n");
- ixl_init_locked(pf);
+ /* Fire off the adminq task */
+ iflib_admin_intr_deferred(ctx);
+
+ /* Update stats */
+ ixl_update_stats_counters(pf);
}
/*
@@ -1868,8 +1478,7 @@
{
struct ixl_vsi *vsi = &pf->vsi;
struct i40e_hw *hw = &pf->hw;
- struct ifnet *ifp = vsi->ifp;
- device_t dev = pf->dev;
+ device_t dev = iflib_get_dev(vsi->ctx);
if (pf->link_up){
if (vsi->link_active == FALSE) {
@@ -1887,24 +1496,20 @@
** partition is not at least 10GB
*/
if (hw->func_caps.npar_enable &&
- (hw->phy.link_info.link_speed ==
- I40E_LINK_SPEED_1GB ||
- hw->phy.link_info.link_speed ==
- I40E_LINK_SPEED_100MB))
- device_printf(dev, "The partition detected"
- "link speed that is less than 10Gbps\n");
- if_link_state_change(ifp, LINK_STATE_UP);
+ (hw->phy.link_info.link_speed == I40E_LINK_SPEED_1GB ||
+ hw->phy.link_info.link_speed == I40E_LINK_SPEED_100MB))
+ device_printf(dev, "The partition detected link"
+ "speed that is less than 10Gbps\n");
+ iflib_link_state_change(vsi->ctx, LINK_STATE_UP);
}
} else { /* Link down */
if (vsi->link_active == TRUE) {
if (bootverbose)
device_printf(dev,"Link is Down\n");
- if_link_state_change(ifp, LINK_STATE_DOWN);
+ iflib_link_state_change(vsi->ctx, LINK_STATE_DOWN);
vsi->link_active = FALSE;
}
}
-
- return;
}
/*********************************************************************
@@ -1915,304 +1520,14 @@
**********************************************************************/
static void
-ixl_stop(struct ixl_pf *pf)
+ixl_if_stop(if_ctx_t ctx)
{
- struct ixl_vsi *vsi = &pf->vsi;
- struct ifnet *ifp = vsi->ifp;
-
- mtx_assert(&pf->pf_mtx, MA_OWNED);
+ struct ixl_vsi *vsi = iflib_get_softc(ctx);
- INIT_DEBUGOUT("ixl_stop: begin\n");
- if (pf->num_vfs == 0)
- ixl_disable_intr(vsi);
- else
- ixl_disable_rings_intr(vsi);
+ INIT_DEBUGOUT("ixl_if_stop: begin\n");
ixl_disable_rings(vsi);
-
- /* Tell the stack that the interface is no longer active */
- ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
-
- /* Stop the local timer */
- callout_stop(&pf->timer);
-
- return;
-}
-
-
-/*********************************************************************
- *
- * Setup MSIX Interrupt resources and handlers for the VSI
- *
- **********************************************************************/
-static int
-ixl_assign_vsi_legacy(struct ixl_pf *pf)
-{
- device_t dev = pf->dev;
- struct ixl_vsi *vsi = &pf->vsi;
- struct ixl_queue *que = vsi->queues;
- int error, rid = 0;
-
- if (pf->msix == 1)
- rid = 1;
- pf->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
- &rid, RF_SHAREABLE | RF_ACTIVE);
- if (pf->res == NULL) {
- device_printf(dev,"Unable to allocate"
- " bus resource: vsi legacy/msi interrupt\n");
- return (ENXIO);
- }
-
- /* Set the handler function */
- error = bus_setup_intr(dev, pf->res,
- INTR_TYPE_NET | INTR_MPSAFE, NULL,
- ixl_intr, pf, &pf->tag);
- if (error) {
- pf->res = NULL;
- device_printf(dev, "Failed to register legacy/msi handler");
- return (error);
- }
- bus_describe_intr(dev, pf->res, pf->tag, "irq0");
- TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
- TASK_INIT(&que->task, 0, ixl_handle_que, que);
- que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
- taskqueue_thread_enqueue, &que->tq);
- taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
- device_get_nameunit(dev));
- TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
-
-#ifdef PCI_IOV
- TASK_INIT(&pf->vflr_task, 0, ixl_handle_vflr, pf);
-#endif
-
- pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
- taskqueue_thread_enqueue, &pf->tq);
- taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq",
- device_get_nameunit(dev));
-
- return (0);
-}
-
-
-/*********************************************************************
- *
- * Setup MSIX Interrupt resources and handlers for the VSI
- *
- **********************************************************************/
-static int
-ixl_assign_vsi_msix(struct ixl_pf *pf)
-{
- device_t dev = pf->dev;
- struct ixl_vsi *vsi = &pf->vsi;
- struct ixl_queue *que = vsi->queues;
- struct tx_ring *txr;
- int error, rid, vector = 0;
-#ifdef RSS
- cpuset_t cpu_mask;
-#endif
-
- /* Admin Que is vector 0*/
- rid = vector + 1;
- pf->res = bus_alloc_resource_any(dev,
- SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
- if (!pf->res) {
- device_printf(dev,"Unable to allocate"
- " bus resource: Adminq interrupt [%d]\n", rid);
- return (ENXIO);
- }
- /* Set the adminq vector and handler */
- error = bus_setup_intr(dev, pf->res,
- INTR_TYPE_NET | INTR_MPSAFE, NULL,
- ixl_msix_adminq, pf, &pf->tag);
- if (error) {
- pf->res = NULL;
- device_printf(dev, "Failed to register Admin que handler");
- return (error);
- }
- bus_describe_intr(dev, pf->res, pf->tag, "aq");
- pf->admvec = vector;
- /* Tasklet for Admin Queue */
- TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
-
-#ifdef PCI_IOV
- TASK_INIT(&pf->vflr_task, 0, ixl_handle_vflr, pf);
-#endif
-
- pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
- taskqueue_thread_enqueue, &pf->tq);
- taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq",
- device_get_nameunit(pf->dev));
- ++vector;
-
- /* Now set up the stations */
- for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
- int cpu_id = i;
- rid = vector + 1;
- txr = &que->txr;
- que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
- RF_SHAREABLE | RF_ACTIVE);
- if (que->res == NULL) {
- device_printf(dev,"Unable to allocate"
- " bus resource: que interrupt [%d]\n", vector);
- return (ENXIO);
- }
- /* Set the handler function */
- error = bus_setup_intr(dev, que->res,
- INTR_TYPE_NET | INTR_MPSAFE, NULL,
- ixl_msix_que, que, &que->tag);
- if (error) {
- que->res = NULL;
- device_printf(dev, "Failed to register que handler");
- return (error);
- }
- bus_describe_intr(dev, que->res, que->tag, "q%d", i);
- /* Bind the vector to a CPU */
-#ifdef RSS
- cpu_id = rss_getcpu(i % rss_getnumbuckets());
-#endif
- bus_bind_intr(dev, que->res, cpu_id);
- que->msix = vector;
- TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
- TASK_INIT(&que->task, 0, ixl_handle_que, que);
- que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
- taskqueue_thread_enqueue, &que->tq);
-#ifdef RSS
- CPU_SETOF(cpu_id, &cpu_mask);
- taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
- &cpu_mask, "%s (bucket %d)",
- device_get_nameunit(dev), cpu_id);
-#else
- taskqueue_start_threads(&que->tq, 1, PI_NET,
- "%s que", device_get_nameunit(dev));
-#endif
- }
-
- return (0);
-}
-
-
-/*
- * Allocate MSI/X vectors
- */
-static int
-ixl_init_msix(struct ixl_pf *pf)
-{
- device_t dev = pf->dev;
- int rid, want, vectors, queues, available;
-
- /* Override by tuneable */
- if (ixl_enable_msix == 0)
- goto msi;
-
- /*
- ** When used in a virtualized environment
- ** PCI BUSMASTER capability may not be set
- ** so explicity set it here and rewrite
- ** the ENABLE in the MSIX control register
- ** at this point to cause the host to
- ** successfully initialize us.
- */
- {
- u16 pci_cmd_word;
- int msix_ctrl;
- pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
- pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
- pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
- pci_find_cap(dev, PCIY_MSIX, &rid);
- rid += PCIR_MSIX_CTRL;
- msix_ctrl = pci_read_config(dev, rid, 2);
- msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
- pci_write_config(dev, rid, msix_ctrl, 2);
- }
-
- /* First try MSI/X */
- rid = PCIR_BAR(IXL_BAR);
- pf->msix_mem = bus_alloc_resource_any(dev,
- SYS_RES_MEMORY, &rid, RF_ACTIVE);
- if (!pf->msix_mem) {
- /* May not be enabled */
- device_printf(pf->dev,
- "Unable to map MSIX table \n");
- goto msi;
- }
-
- available = pci_msix_count(dev);
- if (available == 0) { /* system has msix disabled */
- bus_release_resource(dev, SYS_RES_MEMORY,
- rid, pf->msix_mem);
- pf->msix_mem = NULL;
- goto msi;
- }
-
- /* Figure out a reasonable auto config value */
- queues = (mp_ncpus > (available - 1)) ? (available - 1) : mp_ncpus;
-
- /* Override with hardcoded value if sane */
- if ((ixl_max_queues != 0) && (ixl_max_queues <= queues))
- queues = ixl_max_queues;
-
-#ifdef RSS
- /* If we're doing RSS, clamp at the number of RSS buckets */
- if (queues > rss_getnumbuckets())
- queues = rss_getnumbuckets();
-#endif
-
- /*
- ** Want one vector (RX/TX pair) per queue
- ** plus an additional for the admin queue.
- */
- want = queues + 1;
- if (want <= available) /* Have enough */
- vectors = want;
- else {
- device_printf(pf->dev,
- "MSIX Configuration Problem, "
- "%d vectors available but %d wanted!\n",
- available, want);
- return (0); /* Will go to Legacy setup */
- }
-
- if (pci_alloc_msix(dev, &vectors) == 0) {
- device_printf(pf->dev,
- "Using MSIX interrupts with %d vectors\n", vectors);
- pf->msix = vectors;
- pf->vsi.num_queues = queues;
-#ifdef RSS
- /*
- * If we're doing RSS, the number of queues needs to
- * match the number of RSS buckets that are configured.
- *
- * + If there's more queues than RSS buckets, we'll end
- * up with queues that get no traffic.
- *
- * + If there's more RSS buckets than queues, we'll end
- * up having multiple RSS buckets map to the same queue,
- * so there'll be some contention.
- */
- if (queues != rss_getnumbuckets()) {
- device_printf(dev,
- "%s: queues (%d) != RSS buckets (%d)"
- "; performance will be impacted.\n",
- __func__, queues, rss_getnumbuckets());
- }
-#endif
- return (vectors);
- }
-msi:
- vectors = pci_msi_count(dev);
- pf->vsi.num_queues = 1;
- pf->msix = 1;
- ixl_max_queues = 1;
- ixl_enable_msix = 0;
- if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0)
- device_printf(pf->dev,"Using an MSI interrupt\n");
- else {
- pf->msix = 0;
- device_printf(pf->dev,"Using a Legacy interrupt\n");
- }
- return (vectors);
}
-
/*
* Plumb MSI/X vectors
*/
@@ -2279,11 +1594,9 @@
struct i40e_hw *hw = &pf->hw;
u32 reg;
-
wr32(hw, I40E_PFINT_ITR0(0), 0);
wr32(hw, I40E_PFINT_ITR0(1), 0);
-
/* Setup "other" causes */
reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK
| I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
@@ -2367,7 +1680,7 @@
ixl_allocate_pci_resources(struct ixl_pf *pf)
{
int rid;
- device_t dev = pf->dev;
+ device_t dev = iflib_get_dev(pf->vsi.ctx);
rid = PCIR_BAR(0);
pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
@@ -2388,12 +1701,6 @@
pf->hw.back = &pf->osdep;
- /*
- ** Now setup MSI or MSI/X, should
- ** return us the number of supported
- ** vectors. (Will be 1 for MSI)
- */
- pf->msix = ixl_init_msix(pf);
return (0);
}
@@ -2402,7 +1709,7 @@
{
struct ixl_vsi *vsi = &pf->vsi;
struct ixl_queue *que = vsi->queues;
- device_t dev = pf->dev;
+ device_t dev = iflib_get_dev(vsi->ctx);
int rid, memrid;
memrid = PCIR_BAR(IXL_BAR);
@@ -2411,19 +1718,14 @@
if ((!ixl_enable_msix) || (que == NULL))
goto early;
+
/*
** Release all msix VSI resources:
*/
- for (int i = 0; i < vsi->num_queues; i++, que++) {
- rid = que->msix + 1;
- if (que->tag != NULL) {
- bus_teardown_intr(dev, que->res, que->tag);
- que->tag = NULL;
- }
- if (que->res != NULL)
- bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
- }
+ iflib_irq_free(vsi->ctx, &vsi->irq);
+ for (int i = 0; i < vsi->num_queues; i++, que++)
+ iflib_irq_free(vsi->ctx, &que->que_irq);
early:
/* Clean the AdminQ interrupt last */
if (pf->admvec) /* we are doing MSIX */
@@ -2438,18 +1740,10 @@
if (pf->res != NULL)
bus_release_resource(dev, SYS_RES_IRQ, rid, pf->res);
- if (pf->msix)
- pci_release_msi(dev);
-
- if (pf->msix_mem != NULL)
- bus_release_resource(dev, SYS_RES_MEMORY,
- memrid, pf->msix_mem);
-
if (pf->pci_mem != NULL)
bus_release_resource(dev, SYS_RES_MEMORY,
PCIR_BAR(0), pf->pci_mem);
- return;
}
static void
@@ -2457,79 +1751,79 @@
{
/* Display supported media types */
if (phy_type & (1 << I40E_PHY_TYPE_100BASE_TX))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_100_TX, 0, NULL);
+ ifmedia_add(vsi->media, IFM_ETHER | IFM_100_TX, 0, NULL);
if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_T))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL);
+ ifmedia_add(vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL);
if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_SX))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
+ ifmedia_add(vsi->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_LX))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_LX, 0, NULL);
+ ifmedia_add(vsi->media, IFM_ETHER | IFM_1000_LX, 0, NULL);
if (phy_type & (1 << I40E_PHY_TYPE_XAUI) ||
phy_type & (1 << I40E_PHY_TYPE_XFI) ||
phy_type & (1 << I40E_PHY_TYPE_10GBASE_SFPP_CU))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
+ ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_SR))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
+ ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_LR))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
+ ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_T))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_T, 0, NULL);
+ ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_T, 0, NULL);
if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4) ||
phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4_CU) ||
phy_type & (1 << I40E_PHY_TYPE_40GBASE_AOC) ||
phy_type & (1 << I40E_PHY_TYPE_XLAUI) ||
phy_type & (1 << I40E_PHY_TYPE_40GBASE_KR4))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
+ ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_SR4))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
+ ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_LR4))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
+ ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
#ifndef IFM_ETH_XTYPE
if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_KX))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
+ ifmedia_add(vsi->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1_CU) ||
phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1) ||
phy_type & (1 << I40E_PHY_TYPE_10GBASE_AOC) ||
phy_type & (1 << I40E_PHY_TYPE_SFI))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
+ ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KX4))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
+ ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KR))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
+ ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_KR4))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
+ ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
if (phy_type & (1 << I40E_PHY_TYPE_XLPPI))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
+ ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
#else
if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_KX))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
+ ifmedia_add(vsi->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1_CU)
|| phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_CR1, 0, NULL);
+ ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_CR1, 0, NULL);
if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_AOC))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX_LONG, 0, NULL);
+ ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_TWINAX_LONG, 0, NULL);
if (phy_type & (1 << I40E_PHY_TYPE_SFI))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SFI, 0, NULL);
+ ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_SFI, 0, NULL);
if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KX4))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
+ ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KR))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
+ ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
if (phy_type & (1 << I40E_PHY_TYPE_20GBASE_KR2))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_20G_KR2, 0, NULL);
+ ifmedia_add(vsi->media, IFM_ETHER | IFM_20G_KR2, 0, NULL);
if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_KR4))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_KR4, 0, NULL);
+ ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_KR4, 0, NULL);
if (phy_type & (1 << I40E_PHY_TYPE_XLPPI))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL);
+ ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL);
#endif
}
@@ -2541,60 +1835,28 @@
static int
ixl_setup_interface(device_t dev, struct ixl_vsi *vsi)
{
- struct ifnet *ifp;
- struct i40e_hw *hw = vsi->hw;
- struct ixl_queue *que = vsi->queues;
+ if_ctx_t ctx = vsi->ctx;
+ struct ixl_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ struct ifnet *ifp = iflib_get_ifp(ctx);
struct i40e_aq_get_phy_abilities_resp abilities;
enum i40e_status_code aq_error = 0;
+ uint64_t cap;
INIT_DEBUGOUT("ixl_setup_interface: begin");
-
- ifp = vsi->ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(dev, "can not allocate ifnet structure\n");
- return (-1);
- }
- if_initname(ifp, device_get_name(dev), device_get_unit(dev));
- ifp->if_mtu = ETHERMTU;
- ifp->if_baudrate = IF_Gbps(40);
- ifp->if_init = ixl_init;
- ifp->if_softc = vsi;
- ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
- ifp->if_ioctl = ixl_ioctl;
-
-#if __FreeBSD_version >= 1100036
- if_setgetcounterfn(ifp, ixl_get_counter);
-#endif
-
- ifp->if_transmit = ixl_mq_start;
-
- ifp->if_qflush = ixl_qflush;
-
- ifp->if_snd.ifq_maxlen = que->num_desc - 2;
-
+ /* initialize fast path functions */
+
+ cap = IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 | IFCAP_LRO | IFCAP_JUMBO_MTU; /* IFCAP_TSO | */
+ cap |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWTSO | IFCAP_VLAN_MTU | IFCAP_VLAN_HWCSUM;
+ if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
+ if_setcapabilitiesbit(ifp, cap, 0);
+ if_setcapenable(ifp, if_getcapabilities(ifp));
+ if_setbaudrate(ifp, 4000000000);
vsi->max_frame_size =
ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
+ ETHER_VLAN_ENCAP_LEN;
/*
- * Tell the upper layer(s) we support long frames.
- */
- ifp->if_hdrlen = sizeof(struct ether_vlan_header);
-
- ifp->if_capabilities |= IFCAP_HWCSUM;
- ifp->if_capabilities |= IFCAP_HWCSUM_IPV6;
- ifp->if_capabilities |= IFCAP_TSO;
- ifp->if_capabilities |= IFCAP_JUMBO_MTU;
- ifp->if_capabilities |= IFCAP_LRO;
-
- /* VLAN capabilties */
- ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
- | IFCAP_VLAN_HWTSO
- | IFCAP_VLAN_MTU
- | IFCAP_VLAN_HWCSUM;
- ifp->if_capenable = ifp->if_capabilities;
-
- /*
** Don't turn this on by default, if vlans are
** created on another pseudo device (eg. lagg)
** then vlan events are not passed thru, breaking
@@ -2604,13 +1866,6 @@
*/
ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
- /*
- * Specify the media types supported by this adapter and register
- * callbacks to update media and link information
- */
- ifmedia_init(&vsi->media, IFM_IMASK, ixl_media_change,
- ixl_media_status);
-
aq_error = i40e_aq_get_phy_capabilities(hw,
FALSE, TRUE, &abilities, NULL);
/* May need delay to detect fiber correctly */
@@ -2632,10 +1887,8 @@
ixl_add_ifmedia(vsi, abilities.phy_type);
/* Use autoselect media by default */
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
- ifmedia_set(&vsi->media, IFM_ETHER | IFM_AUTO);
-
- ether_ifattach(ifp, hw->mac.addr);
+ ifmedia_add(vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
+ ifmedia_set(vsi->media, IFM_ETHER | IFM_AUTO);
return (0);
}
@@ -2662,7 +1915,7 @@
if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
(!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
(!(status->link_info & I40E_AQ_LINK_UP)))
- device_printf(pf->dev, "Link failed because "
+ device_printf(iflib_get_dev(pf->vsi.ctx), "Link failed because "
"an unqualified module was detected\n");
return;
@@ -2680,7 +1933,7 @@
{
struct i40e_hw *hw = &pf->hw;
struct ixl_vsi *vsi = &pf->vsi;
- device_t dev = vsi->dev;
+ device_t dev = iflib_get_dev(vsi->ctx);
struct i40e_aqc_get_switch_config_resp *sw_config;
u8 aq_buf[I40E_AQ_LARGE_BUF];
int ret;
@@ -2725,17 +1978,20 @@
static int
ixl_initialize_vsi(struct ixl_vsi *vsi)
{
- struct ixl_pf *pf = vsi->back;
+ if_shared_ctx_t sctx = ixl_sctx;
struct ixl_queue *que = vsi->queues;
- device_t dev = vsi->dev;
+ device_t dev = iflib_get_dev(vsi->ctx);
struct i40e_hw *hw = vsi->hw;
struct i40e_vsi_context ctxt;
int err = 0;
+ struct ifnet *ifp = iflib_get_ifp(vsi->ctx);
memset(&ctxt, 0, sizeof(ctxt));
ctxt.seid = vsi->seid;
+#ifdef notyet
if (pf->veb_seid != 0)
ctxt.uplink_seid = pf->veb_seid;
+#endif
ctxt.pf_num = hw->pf_id;
err = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
if (err) {
@@ -2764,7 +2020,7 @@
/* Set VLAN receive stripping mode */
ctxt.info.valid_sections |= I40E_AQ_VSI_PROP_VLAN_VALID;
ctxt.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL;
- if (vsi->ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
+ if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
else
ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
@@ -2796,17 +2052,18 @@
/* Setup the HMC TX Context */
- size = que->num_desc * sizeof(struct i40e_tx_desc);
+ size = sctx->isc_ntxd * sizeof(struct i40e_tx_desc);
memset(&tctx, 0, sizeof(struct i40e_hmc_obj_txq));
tctx.new_context = 1;
- tctx.base = (txr->dma.pa/IXL_TX_CTX_BASE_UNITS);
- tctx.qlen = que->num_desc;
+
+ tctx.base = (txr->tx_paddr/IXL_TX_CTX_BASE_UNITS);
+ tctx.qlen = sctx->isc_ntxd;
tctx.fc_ena = 0;
tctx.rdylist = vsi->info.qs_handle[0]; /* index is TC */
/* Enable HEAD writeback */
tctx.head_wb_ena = 1;
- tctx.head_wb_addr = txr->dma.pa +
- (que->num_desc * sizeof(struct i40e_tx_desc));
+ tctx.head_wb_addr = txr->tx_paddr +
+ (sctx->isc_ntxd * sizeof(struct i40e_tx_desc));
tctx.rdylist_act = 0;
err = i40e_clear_lan_tx_queue_context(hw, i);
if (err) {
@@ -2846,8 +2103,8 @@
rctx.dtype = 0;
rctx.dsize = 1; /* do 32byte descriptors */
rctx.hsplit_0 = 0; /* no HDR split initially */
- rctx.base = (rxr->dma.pa/IXL_RX_CTX_BASE_UNITS);
- rctx.qlen = que->num_desc;
+ rctx.base = (rxr->rx_paddr/IXL_RX_CTX_BASE_UNITS);
+ rctx.qlen = sctx->isc_nrxd;
rctx.tphrdesc_ena = 1;
rctx.tphwdesc_ena = 1;
rctx.tphdata_ena = 0;
@@ -2868,68 +2125,10 @@
err = i40e_set_lan_rx_queue_context(hw, i, &rctx);
if (err) {
device_printf(dev, "Unable to set RX context %d\n", i);
- break;
- }
- err = ixl_init_rx_ring(que);
- if (err) {
- device_printf(dev, "Fail in init_rx_ring %d\n", i);
- break;
- }
- wr32(vsi->hw, I40E_QRX_TAIL(que->me), 0);
-#ifdef DEV_NETMAP
- /* preserve queue */
- if (vsi->ifp->if_capenable & IFCAP_NETMAP) {
- struct netmap_adapter *na = NA(vsi->ifp);
- struct netmap_kring *kring = &na->rx_rings[i];
- int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
- wr32(vsi->hw, I40E_QRX_TAIL(que->me), t);
- } else
-#endif /* DEV_NETMAP */
- wr32(vsi->hw, I40E_QRX_TAIL(que->me), que->num_desc - 1);
- }
- return (err);
-}
-
-
-/*********************************************************************
- *
- * Free all VSI structs.
- *
- **********************************************************************/
-void
-ixl_free_vsi(struct ixl_vsi *vsi)
-{
- struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
- struct ixl_queue *que = vsi->queues;
-
- /* Free station queues */
- for (int i = 0; i < vsi->num_queues; i++, que++) {
- struct tx_ring *txr = &que->txr;
- struct rx_ring *rxr = &que->rxr;
-
- if (!mtx_initialized(&txr->mtx)) /* uninitialized */
- continue;
- IXL_TX_LOCK(txr);
- ixl_free_que_tx(que);
- if (txr->base)
- i40e_free_dma_mem(&pf->hw, &txr->dma);
- IXL_TX_UNLOCK(txr);
- IXL_TX_LOCK_DESTROY(txr);
-
- if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
- continue;
- IXL_RX_LOCK(rxr);
- ixl_free_que_rx(que);
- if (rxr->base)
- i40e_free_dma_mem(&pf->hw, &rxr->dma);
- IXL_RX_UNLOCK(rxr);
- IXL_RX_LOCK_DESTROY(rxr);
-
+ break;
}
- free(vsi->queues, M_DEVBUF);
-
- /* Free VSI filter list */
- ixl_free_mac_filters(vsi);
+ }
+ return (err);
}
static void
@@ -2940,11 +2139,10 @@
while (!SLIST_EMPTY(&vsi->ftl)) {
f = SLIST_FIRST(&vsi->ftl);
SLIST_REMOVE_HEAD(&vsi->ftl, next);
- free(f, M_DEVBUF);
+ free(f, M_IXL);
}
}
-
/*********************************************************************
*
* Allocate memory for the VSI (virtual station interface) and their
@@ -2952,127 +2150,6 @@
* called only once at attach.
*
**********************************************************************/
-static int
-ixl_setup_stations(struct ixl_pf *pf)
-{
- device_t dev = pf->dev;
- struct ixl_vsi *vsi;
- struct ixl_queue *que;
- struct tx_ring *txr;
- struct rx_ring *rxr;
- int rsize, tsize;
- int error = I40E_SUCCESS;
-
- vsi = &pf->vsi;
- vsi->back = (void *)pf;
- vsi->hw = &pf->hw;
- vsi->id = 0;
- vsi->num_vlans = 0;
- vsi->back = pf;
-
- /* Get memory for the station queues */
- if (!(vsi->queues =
- (struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
- vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
- device_printf(dev, "Unable to allocate queue memory\n");
- error = ENOMEM;
- goto early;
- }
-
- for (int i = 0; i < vsi->num_queues; i++) {
- que = &vsi->queues[i];
- que->num_desc = ixl_ringsz;
- que->me = i;
- que->vsi = vsi;
- /* mark the queue as active */
- vsi->active_queues |= (u64)1 << que->me;
- txr = &que->txr;
- txr->que = que;
- txr->tail = I40E_QTX_TAIL(que->me);
-
- /* Initialize the TX lock */
- snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
- device_get_nameunit(dev), que->me);
- mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
- /* Create the TX descriptor ring */
- tsize = roundup2((que->num_desc *
- sizeof(struct i40e_tx_desc)) +
- sizeof(u32), DBA_ALIGN);
- if (i40e_allocate_dma_mem(&pf->hw,
- &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
- device_printf(dev,
- "Unable to allocate TX Descriptor memory\n");
- error = ENOMEM;
- goto fail;
- }
- txr->base = (struct i40e_tx_desc *)txr->dma.va;
- bzero((void *)txr->base, tsize);
- /* Now allocate transmit soft structs for the ring */
- if (ixl_allocate_tx_data(que)) {
- device_printf(dev,
- "Critical Failure setting up TX structures\n");
- error = ENOMEM;
- goto fail;
- }
- /* Allocate a buf ring */
- txr->br = buf_ring_alloc(4096, M_DEVBUF,
- M_WAITOK, &txr->mtx);
- if (txr->br == NULL) {
- device_printf(dev,
- "Critical Failure setting up TX buf ring\n");
- error = ENOMEM;
- goto fail;
- }
-
- /*
- * Next the RX queues...
- */
- rsize = roundup2(que->num_desc *
- sizeof(union i40e_rx_desc), DBA_ALIGN);
- rxr = &que->rxr;
- rxr->que = que;
- rxr->tail = I40E_QRX_TAIL(que->me);
-
- /* Initialize the RX side lock */
- snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
- device_get_nameunit(dev), que->me);
- mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
-
- if (i40e_allocate_dma_mem(&pf->hw,
- &rxr->dma, i40e_mem_reserved, rsize, 4096)) {
- device_printf(dev,
- "Unable to allocate RX Descriptor memory\n");
- error = ENOMEM;
- goto fail;
- }
- rxr->base = (union i40e_rx_desc *)rxr->dma.va;
- bzero((void *)rxr->base, rsize);
-
- /* Allocate receive soft structs for the ring*/
- if (ixl_allocate_rx_data(que)) {
- device_printf(dev,
- "Critical Failure setting up receive structs\n");
- error = ENOMEM;
- goto fail;
- }
- }
-
- return (0);
-
-fail:
- for (int i = 0; i < vsi->num_queues; i++) {
- que = &vsi->queues[i];
- rxr = &que->rxr;
- txr = &que->txr;
- if (rxr->base)
- i40e_free_dma_mem(&pf->hw, &rxr->dma);
- if (txr->base)
- i40e_free_dma_mem(&pf->hw, &txr->dma);
- }
-
-early:
- return (error);
-}
/*
** Provide a update to the queue RX
@@ -3144,7 +2221,6 @@
}
rxr->bytes = 0;
rxr->packets = 0;
- return;
}
@@ -3218,7 +2294,6 @@
}
txr->bytes = 0;
txr->packets = 0;
- return;
}
#define QUEUE_NAME_LEN 32
@@ -3231,7 +2306,7 @@
struct sysctl_oid_list *child;
struct sysctl_oid_list *vsi_list;
- tree = device_get_sysctl_tree(pf->dev);
+ tree = device_get_sysctl_tree(iflib_get_dev(vsi->ctx));
child = SYSCTL_CHILDREN(tree);
vsi->vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, sysctl_name,
CTLFLAG_RD, NULL, "VSI Number");
@@ -3243,8 +2318,8 @@
static void
ixl_add_hw_stats(struct ixl_pf *pf)
{
- device_t dev = pf->dev;
struct ixl_vsi *vsi = &pf->vsi;
+ device_t dev = iflib_get_dev(vsi->ctx);
struct ixl_queue *queues = vsi->queues;
struct i40e_hw_port_stats *pf_stats = &pf->stats;
@@ -3410,8 +2485,6 @@
entry++;
}
}
-
-
/*
** ixl_config_rss - setup RSS
** - note this is done for the single vsi
@@ -3504,57 +2577,6 @@
ixl_flush(hw);
}
-
-/*
-** This routine is run via an vlan config EVENT,
-** it enables us to use the HW Filter table since
-** we can get the vlan id. This just creates the
-** entry in the soft version of the VFTA, init will
-** repopulate the real table.
-*/
-static void
-ixl_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
-{
- struct ixl_vsi *vsi = ifp->if_softc;
- struct i40e_hw *hw = vsi->hw;
- struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
-
- if (ifp->if_softc != arg) /* Not our event */
- return;
-
- if ((vtag == 0) || (vtag > 4095)) /* Invalid */
- return;
-
- IXL_PF_LOCK(pf);
- ++vsi->num_vlans;
- ixl_add_filter(vsi, hw->mac.addr, vtag);
- IXL_PF_UNLOCK(pf);
-}
-
-/*
-** This routine is run via an vlan
-** unconfig EVENT, remove our entry
-** in the soft vfta.
-*/
-static void
-ixl_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
-{
- struct ixl_vsi *vsi = ifp->if_softc;
- struct i40e_hw *hw = vsi->hw;
- struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
-
- if (ifp->if_softc != arg)
- return;
-
- if ((vtag == 0) || (vtag > 4095)) /* Invalid */
- return;
-
- IXL_PF_LOCK(pf);
- --vsi->num_vlans;
- ixl_del_filter(vsi, hw->mac.addr, vtag);
- IXL_PF_UNLOCK(pf);
-}
-
/*
** This routine updates vlan filters, called by init
** it scans the filter table and then updates the hw
@@ -3588,7 +2610,6 @@
flags = IXL_FILTER_VLAN;
flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
ixl_add_hw_filters(vsi, flags, cnt);
- return;
}
/*
@@ -3624,8 +2645,6 @@
f->vlan = IXL_VLAN_ANY;
f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED
| IXL_FILTER_MC);
-
- return;
}
static void
@@ -3636,20 +2655,54 @@
}
/*
+** This routine is run via an vlan config EVENT,
+** it enables us to use the HW Filter table since
+** we can get the vlan id. This just creates the
+** entry in the soft version of the VFTA, init will
+** repopulate the real table.
+*/
+void
+ixl_if_vlan_register(if_ctx_t ctx, u16 vtag)
+{
+ struct ixl_vsi *vsi = iflib_get_softc(ctx);
+ struct i40e_hw *hw = vsi->hw;
+
+ if ((vtag == 0) || (vtag > 4095)) /* Invalid */
+ return;
+
+ ++vsi->num_vlans;
+ ixl_add_filter(vsi, hw->mac.addr, vtag);
+}
+
+/*
+** This routine is run via an vlan
+** unconfig EVENT, remove our entry
+** in the soft vfta.
+*/
+void
+ixl_if_vlan_unregister(if_ctx_t ctx, u16 vtag)
+{
+ struct ixl_vsi *vsi = iflib_get_softc(ctx);
+ struct i40e_hw *hw = vsi->hw;
+
+
+ if ((vtag == 0) || (vtag > 4095)) /* Invalid */
+ return;
+
+ --vsi->num_vlans;
+ ixl_del_filter(vsi, hw->mac.addr, vtag);
+}
+
+/*
** This routine adds macvlan filters
*/
-static void
+void
ixl_add_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
{
struct ixl_mac_filter *f, *tmp;
- struct ixl_pf *pf;
- device_t dev;
DEBUGOUT("ixl_add_filter: begin");
- pf = vsi->back;
- dev = pf->dev;
-
/* Does one already exist */
f = ixl_find_filter(vsi, macaddr, vlan);
if (f != NULL)
@@ -3669,7 +2722,7 @@
f = ixl_get_filter(vsi);
if (f == NULL) {
- device_printf(dev, "WARNING: no filter available!!\n");
+ device_printf(iflib_get_dev(vsi->ctx), "WARNING: no filter available!!\n");
return;
}
bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
@@ -3684,7 +2737,7 @@
return;
}
-static void
+void
ixl_del_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
{
struct ixl_mac_filter *f;
@@ -3739,18 +2792,12 @@
{
struct i40e_aqc_add_macvlan_element_data *a, *b;
struct ixl_mac_filter *f;
- struct ixl_pf *pf;
- struct i40e_hw *hw;
- device_t dev;
+ struct i40e_hw *hw = vsi->hw;
+ device_t dev = iflib_get_dev(vsi->ctx);
int err, j = 0;
- pf = vsi->back;
- dev = pf->dev;
- hw = &pf->hw;
- IXL_PF_LOCK_ASSERT(pf);
-
a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt,
- M_DEVBUF, M_NOWAIT | M_ZERO);
+ M_IXL, M_NOWAIT | M_ZERO);
if (a == NULL) {
device_printf(dev, "add_hw_filters failed to get memory\n");
return;
@@ -3787,7 +2834,7 @@
else
vsi->hw_filters_add += j;
}
- free(a, M_DEVBUF);
+ free(a, M_IXL);
return;
}
@@ -3800,20 +2847,15 @@
ixl_del_hw_filters(struct ixl_vsi *vsi, int cnt)
{
struct i40e_aqc_remove_macvlan_element_data *d, *e;
- struct ixl_pf *pf;
- struct i40e_hw *hw;
- device_t dev;
+ struct i40e_hw *hw = vsi->hw;
+ device_t dev = iflib_get_dev(vsi->ctx);
struct ixl_mac_filter *f, *f_temp;
int err, j = 0;
DEBUGOUT("ixl_del_hw_filters: begin\n");
- pf = vsi->back;
- hw = &pf->hw;
- dev = pf->dev;
-
d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt,
- M_DEVBUF, M_NOWAIT | M_ZERO);
+ M_IXL, M_NOWAIT | M_ZERO);
if (d == NULL) {
printf("del hw filter failed to get memory\n");
return;
@@ -3827,7 +2869,7 @@
e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
/* delete entry from vsi list */
SLIST_REMOVE(&vsi->ftl, f, ixl_mac_filter, next);
- free(f, M_DEVBUF);
+ free(f, M_IXL);
j++;
}
if (j == cnt)
@@ -3849,7 +2891,7 @@
} else
vsi->hw_filters_del += j;
}
- free(d, M_DEVBUF);
+ free(d, M_IXL);
DEBUGOUT("ixl_del_hw_filters: end\n");
return;
@@ -3880,7 +2922,7 @@
i40e_msec_delay(10);
}
if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) {
- device_printf(pf->dev, "TX queue %d disabled!\n",
+ device_printf(iflib_get_dev(vsi->ctx), "TX queue %d disabled!\n",
index);
error = ETIMEDOUT;
}
@@ -3897,7 +2939,7 @@
i40e_msec_delay(10);
}
if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) {
- device_printf(pf->dev, "RX queue %d disabled!\n",
+ device_printf(iflib_get_dev(vsi->ctx), "RX queue %d disabled!\n",
index);
error = ETIMEDOUT;
}
@@ -3906,6 +2948,59 @@
return (error);
}
+static void
+ixl_if_intr_enable(if_ctx_t ctx)
+{
+ struct ixl_vsi *vsi = iflib_get_softc(ctx);
+ struct i40e_hw *hw = vsi->hw;
+ struct ixl_queue *que = vsi->queues;
+
+ if (ixl_enable_msix) {
+ ixl_enable_adminq(hw);
+ for (int i = 0; i < vsi->num_queues; i++, que++)
+ ixl_if_queue_intr_enable(vsi->ctx, que->me);
+ } else
+ ixl_enable_legacy(hw);
+}
+
+static void
+ixl_if_intr_disable(if_ctx_t ctx)
+{
+ struct ixl_vsi *vsi = iflib_get_softc(ctx);
+ struct i40e_hw *hw = vsi->hw;
+ struct ixl_queue *que = vsi->queues;
+
+ if (ixl_enable_msix) {
+ ixl_disable_adminq(hw);
+ for (int i = 0; i < vsi->num_queues; i++, que++)
+ ixl_if_queue_intr_disable(ctx, que->me);
+ } else
+ ixl_disable_legacy(hw);
+}
+
+static void
+ixl_if_queue_intr_enable(if_ctx_t ctx, uint16_t qid)
+{
+ struct ixl_vsi *vsi = iflib_get_softc(ctx);
+ u32 reg;
+
+ reg = I40E_PFINT_DYN_CTLN_INTENA_MASK |
+ I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
+ (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
+ wr32(vsi->hw, I40E_PFINT_DYN_CTLN(qid), reg);
+}
+
+static void
+ixl_if_queue_intr_disable(if_ctx_t ctx, uint16_t qid)
+{
+ struct ixl_vsi *vsi = iflib_get_softc(ctx);
+ struct i40e_hw *hw = vsi->hw;
+ u32 reg;
+
+ reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
+ wr32(hw, I40E_PFINT_DYN_CTLN(qid), reg);
+}
+
static int
ixl_disable_rings(struct ixl_vsi *vsi)
{
@@ -3932,7 +3027,7 @@
i40e_msec_delay(10);
}
if (reg & I40E_QTX_ENA_QENA_STAT_MASK) {
- device_printf(pf->dev, "TX queue %d still enabled!\n",
+ device_printf(iflib_get_dev(vsi->ctx), "TX queue %d still enabled!\n",
index);
error = ETIMEDOUT;
}
@@ -3948,7 +3043,7 @@
i40e_msec_delay(10);
}
if (reg & I40E_QRX_ENA_QENA_STAT_MASK) {
- device_printf(pf->dev, "RX queue %d still enabled!\n",
+ device_printf(iflib_get_dev(vsi->ctx), "RX queue %d still enabled!\n",
index);
error = ETIMEDOUT;
}
@@ -3966,7 +3061,7 @@
static void ixl_handle_mdd_event(struct ixl_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
- device_t dev = pf->dev;
+ device_t dev = iflib_get_dev(pf->vsi.ctx);
bool mdd_detected = false;
bool pf_mdd_detected = false;
u32 reg;
@@ -4038,33 +3133,12 @@
if (ixl_enable_msix) {
ixl_enable_adminq(hw);
for (int i = 0; i < vsi->num_queues; i++, que++)
- ixl_enable_queue(hw, que->me);
+ ixl_if_queue_intr_enable(vsi->ctx, que->me);
} else
ixl_enable_legacy(hw);
}
static void
-ixl_disable_rings_intr(struct ixl_vsi *vsi)
-{
- struct i40e_hw *hw = vsi->hw;
- struct ixl_queue *que = vsi->queues;
-
- for (int i = 0; i < vsi->num_queues; i++, que++)
- ixl_disable_queue(hw, que->me);
-}
-
-static void
-ixl_disable_intr(struct ixl_vsi *vsi)
-{
- struct i40e_hw *hw = vsi->hw;
-
- if (ixl_enable_msix)
- ixl_disable_adminq(hw);
- else
- ixl_disable_legacy(hw);
-}
-
-static void
ixl_enable_adminq(struct i40e_hw *hw)
{
u32 reg;
@@ -4089,28 +3163,6 @@
}
static void
-ixl_enable_queue(struct i40e_hw *hw, int id)
-{
- u32 reg;
-
- reg = I40E_PFINT_DYN_CTLN_INTENA_MASK |
- I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
- (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
- wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
-}
-
-static void
-ixl_disable_queue(struct i40e_hw *hw, int id)
-{
- u32 reg;
-
- reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
- wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
-
- return;
-}
-
-static void
ixl_enable_legacy(struct i40e_hw *hw)
{
u32 reg;
@@ -4312,11 +3364,11 @@
** - do outside interrupt since it might sleep
*/
static void
-ixl_do_adminq(void *context, int pending)
+ixl_if_update_admin_status(if_ctx_t ctx)
{
- struct ixl_pf *pf = context;
+ struct ixl_vsi *vsi = iflib_get_softc(ctx);
+ struct ixl_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
- struct ixl_vsi *vsi = &pf->vsi;
struct i40e_arq_event_info event;
i40e_status ret;
u32 reg, loop = 0;
@@ -4324,13 +3376,12 @@
event.buf_len = IXL_AQ_BUF_SZ;
event.msg_buf = malloc(event.buf_len,
- M_DEVBUF, M_NOWAIT | M_ZERO);
+ M_IXL, M_NOWAIT | M_ZERO);
if (!event.msg_buf) {
printf("Unable to allocate adminq memory\n");
return;
}
- IXL_PF_LOCK(pf);
/* clean and process any events */
do {
ret = i40e_clean_arq_element(hw, &event, &result);
@@ -4361,18 +3412,17 @@
reg = rd32(hw, I40E_PFINT_ICR0_ENA);
reg |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
wr32(hw, I40E_PFINT_ICR0_ENA, reg);
- free(event.msg_buf, M_DEVBUF);
+ free(event.msg_buf, M_IXL);
/*
* If there are still messages to process, reschedule ourselves.
* Otherwise, re-enable our interrupt and go to sleep.
*/
if (result > 0)
- taskqueue_enqueue(pf->tq, &pf->adminq);
+ iflib_admin_intr_deferred(ctx);
else
ixl_enable_intr(vsi);
- IXL_PF_UNLOCK(pf);
}
#ifdef IXL_DEBUG_SYSCTL
@@ -4408,7 +3458,6 @@
printf("Queue irqs = %jx\n", (uintmax_t)que->irqs);
printf("AdminQ irqs = %jx\n", (uintmax_t)pf->admin_irq);
- printf("RX next check = %x\n", rxr->next_check);
printf("RX not ready = %jx\n", (uintmax_t)rxr->not_done);
printf("RX packets = %jx\n", (uintmax_t)rxr->rx_packets);
printf("TX desc avail = %x\n", txr->avail);
@@ -4451,6 +3500,7 @@
struct i40e_hw *hw = &pf->hw;
struct i40e_eth_stats *es;
struct i40e_eth_stats *oes;
+
struct i40e_hw_port_stats *nsd;
u16 stat_idx = vsi->info.stat_counter_idx;
@@ -4506,22 +3556,18 @@
ixl_update_vsi_stats(struct ixl_vsi *vsi)
{
struct ixl_pf *pf;
- struct ifnet *ifp;
struct i40e_eth_stats *es;
u64 tx_discards;
struct i40e_hw_port_stats *nsd;
pf = vsi->back;
- ifp = vsi->ifp;
es = &vsi->eth_stats;
nsd = &pf->stats;
ixl_update_eth_stats(vsi);
tx_discards = es->tx_discards + nsd->tx_dropped_link_down;
- for (int i = 0; i < vsi->num_queues; i++)
- tx_discards += vsi->queues[i].txr.br->br_drops;
/* Update ifnet stats */
IXL_SET_IPACKETS(vsi, es->rx_unicast +
@@ -4636,7 +3682,7 @@
*/
struct ixl_pf *pf = (struct ixl_pf *)arg1;
struct i40e_hw *hw = &pf->hw;
- device_t dev = pf->dev;
+ device_t dev = iflib_get_dev(pf->vsi.ctx);
int error = 0;
enum i40e_status_code aq_error = 0;
u8 fc_aq_err = 0;
@@ -4724,7 +3770,10 @@
ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds)
{
struct i40e_hw *hw = &pf->hw;
- device_t dev = pf->dev;
+ if_ctx_t ctx = ((struct ixl_vsi *)pf)->ctx;
+ device_t dev = iflib_get_dev(ctx);
+ struct ifnet *ifp = iflib_get_ifp(ctx);
+
struct i40e_aq_get_phy_abilities_resp abilities;
struct i40e_aq_set_phy_config config;
enum i40e_status_code aq_error = 0;
@@ -4772,11 +3821,7 @@
** This seems a bit heavy handed, but we
** need to get a reinit on some devices
*/
- IXL_PF_LOCK(pf);
- ixl_stop(pf);
- ixl_init_locked(pf);
- IXL_PF_UNLOCK(pf);
-
+ ifp->if_init(ifp->if_softc);
return (0);
}
@@ -4795,7 +3840,7 @@
{
struct ixl_pf *pf = (struct ixl_pf *)arg1;
struct i40e_hw *hw = &pf->hw;
- device_t dev = pf->dev;
+ device_t dev = iflib_get_dev(pf->vsi.ctx);
int requested_ls = 0;
int error = 0;
@@ -5032,7 +4077,7 @@
}
buf_len = sizeof(char) * (entry_len + 1) * ftl_len + 2;
- buf = buf_i = malloc(buf_len, M_DEVBUF, M_NOWAIT);
+ buf = buf_i = malloc(buf_len, M_IXL, M_NOWAIT);
sprintf(buf_i++, "\n");
SLIST_FOREACH(f, &vsi->ftl, next) {
@@ -5050,7 +4095,7 @@
error = sysctl_handle_string(oidp, buf, strlen(buf), req);
if (error)
printf("sysctl error: %d\n", error);
- free(buf, M_DEVBUF);
+ free(buf, M_IXL);
return error;
}
@@ -5070,14 +4115,14 @@
{
struct ixl_pf *pf = (struct ixl_pf *)arg1;
struct i40e_hw *hw = &pf->hw;
- device_t dev = pf->dev;
+ device_t dev = iflib_get_dev(pf->vsi.ctx);
struct sbuf *buf;
int error = 0;
u8 num_entries;
struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE];
- buf = sbuf_new_for_sysctl(NULL, NULL, 0, req);
+ buf = sbuf_new(NULL, NULL, 1024, SBUF_AUTOEXTEND);
if (!buf) {
device_printf(dev, "Could not allocate sbuf for output.\n");
return (ENOMEM);
@@ -5118,6 +4163,7 @@
sbuf_cat(buf, "\n");
}
+ sbuf_trim(buf);
error = sbuf_finish(buf);
sbuf_delete(buf);
@@ -5167,7 +4213,7 @@
{
struct ixl_pf *pf = (struct ixl_pf *)arg1;
struct i40e_hw *hw = &pf->hw;
- device_t dev = pf->dev;
+ device_t dev = iflib_get_dev(pf->vsi.ctx);
struct sbuf *buf;
struct sbuf *nmbuf;
int error = 0;
@@ -5177,7 +4223,7 @@
struct i40e_aqc_get_switch_config_resp *sw_config;
sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
- buf = sbuf_new_for_sysctl(NULL, NULL, 0, req);
+ buf = sbuf_new(NULL, NULL, 1024, SBUF_AUTOEXTEND);
if (!buf) {
device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
return (ENOMEM);
@@ -5296,7 +4342,7 @@
code = i40e_aq_config_vsi_bw_limit(hw, vf->vsi.seid, 0, 0, NULL);
if (code != I40E_SUCCESS) {
- device_printf(pf->dev, "Failed to disable BW limit: %d\n",
+ device_printf(iflib_get_dev(pf->vsi.ctx), "Failed to disable BW limit: %d\n",
ixl_adminq_err_to_errno(hw->aq.asq_last_status));
return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
}
@@ -5494,7 +4540,7 @@
error = ixl_flush_pcie(pf, vf);
if (error != 0)
- device_printf(pf->dev,
+ device_printf(iflib_get_dev(pf->vsi.ctx),
"Timed out waiting for PCIe activity to stop on VF-%d\n",
vf->vf_num);
@@ -5507,7 +4553,7 @@
}
if (i == IXL_VF_RESET_TIMEOUT)
- device_printf(pf->dev, "VF %d failed to reset\n", vf->vf_num);
+ device_printf(iflib_get_dev(pf->vsi.ctx), "VF %d failed to reset\n", vf->vf_num);
wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), I40E_VFR_COMPLETED);
@@ -6380,7 +5426,7 @@
opcode = le32toh(event->desc.cookie_high);
if (vf_num >= pf->num_vfs) {
- device_printf(pf->dev, "Got msg from illegal VF: %d\n", vf_num);
+ device_printf(iflib_get_dev(pf->vsi.ctx), "Got msg from illegal VF: %d\n", vf_num);
return;
}
@@ -6444,7 +5490,7 @@
/* Handle any VFs that have reset themselves via a Function Level Reset(FLR). */
static void
-ixl_handle_vflr(void *arg, int pending)
+ixl_if_handle_vflr(if_ctx_t ctx)
{
struct ixl_pf *pf;
struct i40e_hw *hw;
@@ -6452,10 +5498,9 @@
uint32_t vflrstat_index, vflrstat_mask, vflrstat, icr0;
int i;
- pf = arg;
+ pf = iflib_get_softc(ctx);
hw = &pf->hw;
- IXL_PF_LOCK(pf);
for (i = 0; i < pf->num_vfs; i++) {
global_vf_num = hw->func_caps.vf_base_id + i;
@@ -6474,8 +5519,6 @@
icr0 |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
wr32(hw, I40E_PFINT_ICR0_ENA, icr0);
ixl_flush(hw);
-
- IXL_PF_UNLOCK(pf);
}
static int
@@ -6533,19 +5576,20 @@
}
static int
-ixl_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params)
+ixl_if_iov_init(if_ctx_t ctx, uint16_t num_vfs, const nvlist_t *params)
{
+ device_t dev;
struct ixl_pf *pf;
struct i40e_hw *hw;
struct ixl_vsi *pf_vsi;
enum i40e_status_code ret;
int i, error;
- pf = device_get_softc(dev);
+ dev = iflib_get_dev(ctx);
+ pf = iflib_get_softc(ctx);
hw = &pf->hw;
pf_vsi = &pf->vsi;
- IXL_PF_LOCK(pf);
pf->vfs = malloc(sizeof(struct ixl_vf) * num_vfs, M_IXL, M_NOWAIT |
M_ZERO);
@@ -6570,32 +5614,31 @@
ixl_enable_adminq(hw);
pf->num_vfs = num_vfs;
- IXL_PF_UNLOCK(pf);
return (0);
fail:
free(pf->vfs, M_IXL);
pf->vfs = NULL;
- IXL_PF_UNLOCK(pf);
return (error);
}
static void
-ixl_iov_uninit(device_t dev)
+ixl_if_iov_uninit(if_ctx_t ctx)
{
struct ixl_pf *pf;
struct i40e_hw *hw;
struct ixl_vsi *vsi;
struct ifnet *ifp;
struct ixl_vf *vfs;
+ device_t dev;
int i, num_vfs;
- pf = device_get_softc(dev);
+ dev = iflib_get_dev(ctx);
+ pf = iflib_get_softc(ctx);
hw = &pf->hw;
vsi = &pf->vsi;
ifp = vsi->ifp;
- IXL_PF_LOCK(pf);
for (i = 0; i < pf->num_vfs; i++) {
if (pf->vfs[i].vsi.seid != 0)
i40e_aq_delete_element(hw, pf->vfs[i].vsi.seid, NULL);
@@ -6606,19 +5649,14 @@
pf->veb_seid = 0;
}
-#if __FreeBSD_version > 1100022
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
-#else
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
-#endif
- ixl_disable_intr(vsi);
+ ixl_if_intr_disable(ctx);
vfs = pf->vfs;
num_vfs = pf->num_vfs;
pf->vfs = NULL;
pf->num_vfs = 0;
- IXL_PF_UNLOCK(pf);
/* Do this after the unlock as sysctl_ctx_free might sleep. */
for (i = 0; i < num_vfs; i++)
@@ -6627,19 +5665,20 @@
}
static int
-ixl_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params)
+ixl_if_vf_add(if_ctx_t ctx, uint16_t vfnum, const nvlist_t *params)
{
char sysctl_name[QUEUE_NAME_LEN];
struct ixl_pf *pf;
struct ixl_vf *vf;
+ device_t dev;
const void *mac;
size_t size;
int error;
- pf = device_get_softc(dev);
+ dev = iflib_get_dev(ctx);
+ pf = iflib_get_softc(ctx);
vf = &pf->vfs[vfnum];
- IXL_PF_LOCK(pf);
vf->vf_num = vfnum;
vf->vsi.back = pf;
@@ -6673,7 +5712,6 @@
ixl_reset_vf(pf, vf);
out:
- IXL_PF_UNLOCK(pf);
if (error == 0) {
snprintf(sysctl_name, sizeof(sysctl_name), "vf%d", vfnum);
ixl_add_vsi_sysctls(pf, &vf->vsi, &vf->ctx, sysctl_name);
Index: sys/dev/ixl/if_ixl_common.c
===================================================================
--- /dev/null
+++ sys/dev/ixl/if_ixl_common.c
@@ -0,0 +1,96 @@
+#ifndef IXL_STANDALONE_BUILD
+#include "opt_inet.h"
+#include "opt_inet6.h"
+#include "opt_rss.h"
+#endif
+
+#include "ixl.h"
+#include "ixl_pf.h"
+
+#ifdef RSS
+#include <net/rss_config.h>
+#endif
+
+#include "ifdi_if.h"
+
+
+/*********************************************************************
+ *
+ * Media Ioctl callback
+ *
+ * This routine is called when the user changes speed/duplex using
+ * media/mediopt option with ifconfig.
+ *
+ **********************************************************************/
+int
+ixl_if_media_change(if_ctx_t ctx)
+{
+ struct ifmedia *ifm = iflib_get_media(ctx);
+
+ INIT_DEBUGOUT("ixl_media_change: begin");
+
+ if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
+ return (EINVAL);
+
+ if_printf(iflib_get_ifp(ctx), "Media change is currently not supported.\n");
+ return (ENODEV);
+}
+
+int
+ixl_if_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nqs)
+{
+ struct ixl_vsi *vsi = iflib_get_softc(ctx);
+ struct ixl_queue *que;
+ struct ixl_tx_buf *bufs;
+ if_shared_ctx_t sctx;
+ int i;
+
+ MPASS(vsi->num_queues > 0);
+ MPASS(nqs == 2);
+ /* Allocate queue structure memory */
+ sctx = iflib_get_sctx(ctx);
+ if (!(vsi->queues =
+ (struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
+ vsi->num_queues, M_IXL, M_NOWAIT | M_ZERO))) {
+ device_printf(iflib_get_dev(ctx), "Unable to allocate TX ring memory\n");
+ return (ENOMEM);
+ }
+ if ((bufs = malloc(sizeof(*bufs)*sctx->isc_ntxd*vsi->num_queues, M_IXL, M_WAITOK|M_ZERO)) == NULL) {
+ free(vsi->queues, M_IXL);
+ device_printf(iflib_get_dev(ctx), "failed to allocate sw bufs\n");
+ return (ENOMEM);
+ }
+
+ for (i = 0, que = vsi->queues; i < vsi->num_queues; i++, que++) {
+ struct tx_ring *txr = &que->txr;
+ struct rx_ring *rxr = &que->rxr;
+
+ que->me = i;
+ que->vsi = vsi;
+
+ /* get the virtual and physical address of the hardware queues */
+ txr->tail = I40E_QTX_TAIL(que->me);
+ txr->tx_base = (struct i40e_tx_desc *)vaddrs[i*2];
+ txr->tx_paddr = paddrs[i*2];
+ txr->tx_buffers = bufs + i*sctx->isc_ntxd;
+ rxr->tail = I40E_QRX_TAIL(que->me);
+ rxr->rx_base = (union i40e_rx_desc *)vaddrs[i*2 + 1];
+ rxr->rx_paddr = paddrs[i*2 + 1];
+ txr->que = rxr->que = que;
+ }
+
+ device_printf(iflib_get_dev(ctx), "allocated for %d queues\n", vsi->num_queues);
+ return (0);
+}
+
+void
+ixl_if_queues_free(if_ctx_t ctx)
+{
+ struct ixl_vsi *vsi = iflib_get_softc(ctx);
+ struct ixl_queue *que;
+
+ if ((que = vsi->queues) == NULL)
+ return;
+ free(que->txr.tx_buffers, M_IXL);
+ free(que, M_IXL);
+}
Index: sys/dev/ixl/if_ixlv.c
===================================================================
--- sys/dev/ixl/if_ixlv.c
+++ sys/dev/ixl/if_ixlv.c
@@ -60,59 +60,55 @@
* { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
*********************************************************************/
-static ixl_vendor_info_t ixlv_vendor_info_array[] =
+static struct pci_vendor_info ixlv_vendor_info_array[] =
{
- {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF, 0, 0, 0},
- {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF_HV, 0, 0, 0},
+ PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF, "Intel(R) Ethernet Connection XL710 VF Driver"),
+ PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF_HV, "Intel(R) Ethernet Connection XL710 VF HV Driver"),
/* required last entry */
- {0, 0, 0, 0, 0}
+ PVID_END
};
/*********************************************************************
- * Table of branding strings
- *********************************************************************/
-
-static char *ixlv_strings[] = {
- "Intel(R) Ethernet Connection XL710 VF Driver"
-};
-
-
-/*********************************************************************
* Function prototypes
*********************************************************************/
-static int ixlv_probe(device_t);
-static int ixlv_attach(device_t);
-static int ixlv_detach(device_t);
-static int ixlv_shutdown(device_t);
-static void ixlv_init_locked(struct ixlv_sc *);
+static void *ixlv_register(device_t);
+static int ixlv_if_attach_pre(if_ctx_t);
+static int ixlv_if_attach_post(if_ctx_t);
+static int ixlv_if_detach(if_ctx_t);
+static int ixlv_if_shutdown(if_ctx_t);
+static void ixlv_if_init(if_ctx_t);
+static void ixlv_if_stop(if_ctx_t);
+static void ixlv_if_intr_enable(if_ctx_t ctx);
+static void ixlv_if_intr_disable(if_ctx_t ctx);
+static void ixlv_if_queue_intr_enable(if_ctx_t ctx, uint16_t qid);
+static void ixlv_if_queue_intr_disable(if_ctx_t ctx, uint16_t qid);
+static void ixlv_if_media_status(if_ctx_t, struct ifmediareq *);
+static void ixlv_if_timer(if_ctx_t ctx, uint16_t qid);
+static int ixlv_if_msix_intr_assign(if_ctx_t ctx, int msix);
+static void ixlv_if_vlan_register(if_ctx_t ctx, u16 vtag);
+static void ixlv_if_vlan_unregister(if_ctx_t ctx, u16 vtag);
+static void ixlv_if_multi_set(if_ctx_t ctx);
+static void ixlv_if_update_admin_status(if_ctx_t ctx);
+static int ixlv_if_promisc_set(if_ctx_t ctx, int flags);
+static int ixlv_if_mtu_set(if_ctx_t ctx, uint32_t mtu);
+
static int ixlv_allocate_pci_resources(struct ixlv_sc *);
static void ixlv_free_pci_resources(struct ixlv_sc *);
-static int ixlv_assign_msix(struct ixlv_sc *);
-static int ixlv_init_msix(struct ixlv_sc *);
-static int ixlv_init_taskqueue(struct ixlv_sc *);
-static int ixlv_setup_queues(struct ixlv_sc *);
static void ixlv_config_rss(struct ixlv_sc *);
-static void ixlv_stop(struct ixlv_sc *);
-static void ixlv_add_multi(struct ixl_vsi *);
-static void ixlv_del_multi(struct ixl_vsi *);
-static void ixlv_free_queues(struct ixl_vsi *);
-static int ixlv_setup_interface(device_t, struct ixlv_sc *);
+static int ixlv_setup_interface(if_ctx_t ctx);
-static int ixlv_media_change(struct ifnet *);
-static void ixlv_media_status(struct ifnet *, struct ifmediareq *);
-
-static void ixlv_local_timer(void *);
+static void ixlv_init_internal(if_ctx_t ctx);
+static void ixlv_init_multi(struct ixl_vsi *vsi);
+static void ixlv_del_multi(struct ixl_vsi *vsi);
static int ixlv_add_mac_filter(struct ixlv_sc *, u8 *, u16);
static int ixlv_del_mac_filter(struct ixlv_sc *sc, u8 *macaddr);
static void ixlv_init_filters(struct ixlv_sc *);
static void ixlv_free_filters(struct ixlv_sc *);
-static void ixlv_msix_que(void *);
-static void ixlv_msix_adminq(void *);
-static void ixlv_do_adminq(void *, int);
+static int ixlv_msix_que(void *);
+static int ixlv_msix_adminq(void *);
static void ixlv_do_adminq_locked(struct ixlv_sc *sc);
-static void ixlv_handle_que(void *, int);
static int ixlv_reset(struct ixlv_sc *);
static int ixlv_reset_complete(struct i40e_hw *);
static void ixlv_set_queue_rx_itr(struct ixl_queue *);
@@ -122,20 +118,16 @@
static void ixlv_enable_adminq_irq(struct i40e_hw *);
static void ixlv_disable_adminq_irq(struct i40e_hw *);
-static void ixlv_enable_queue_irq(struct i40e_hw *, int);
-static void ixlv_disable_queue_irq(struct i40e_hw *, int);
static void ixlv_setup_vlan_filters(struct ixlv_sc *);
-static void ixlv_register_vlan(void *, struct ifnet *, u16);
-static void ixlv_unregister_vlan(void *, struct ifnet *, u16);
static void ixlv_init_hw(struct ixlv_sc *);
static int ixlv_setup_vc(struct ixlv_sc *);
static int ixlv_vf_config(struct ixlv_sc *);
-
+#if 0
static void ixlv_cap_txcsum_tso(struct ixl_vsi *,
struct ifnet *, int);
-
+#endif
static void ixlv_add_sysctls(struct ixlv_sc *);
static int ixlv_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS);
static int ixlv_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS);
@@ -146,11 +138,12 @@
static device_method_t ixlv_methods[] = {
/* Device interface */
- DEVMETHOD(device_probe, ixlv_probe),
- DEVMETHOD(device_attach, ixlv_attach),
- DEVMETHOD(device_detach, ixlv_detach),
- DEVMETHOD(device_shutdown, ixlv_shutdown),
- {0, 0}
+ DEVMETHOD(device_register, ixlv_register),
+ DEVMETHOD(device_probe, iflib_device_probe),
+ DEVMETHOD(device_attach, iflib_device_attach),
+ DEVMETHOD(device_detach, iflib_device_detach),
+ DEVMETHOD(device_shutdown, iflib_device_shutdown),
+ DEVMETHOD_END
};
static driver_t ixlv_driver = {
@@ -160,9 +153,42 @@
devclass_t ixlv_devclass;
DRIVER_MODULE(ixlv, pci, ixlv_driver, ixlv_devclass, 0, 0);
+MODULE_DEPEND(ixlv, iflib, 1, 1, 1);
MODULE_DEPEND(ixlv, pci, 1, 1, 1);
MODULE_DEPEND(ixlv, ether, 1, 1, 1);
+static device_method_t ixlv_if_methods[] = {
+ DEVMETHOD(ifdi_attach_pre, ixlv_if_attach_pre),
+ DEVMETHOD(ifdi_attach_post, ixlv_if_attach_post),
+ DEVMETHOD(ifdi_detach, ixlv_if_detach),
+ DEVMETHOD(ifdi_detach, ixlv_if_shutdown),
+ DEVMETHOD(ifdi_init, ixlv_if_init),
+ DEVMETHOD(ifdi_stop, ixlv_if_stop),
+ DEVMETHOD(ifdi_intr_disable, ixlv_if_intr_disable),
+ DEVMETHOD(ifdi_intr_enable, ixlv_if_intr_enable),
+ DEVMETHOD(ifdi_queue_intr_enable, ixlv_if_queue_intr_enable),
+ DEVMETHOD(ifdi_multi_set, ixlv_if_multi_set),
+ DEVMETHOD(ifdi_update_admin_status, ixlv_if_update_admin_status),
+ DEVMETHOD(ifdi_mtu_set, ixlv_if_mtu_set),
+ DEVMETHOD(ifdi_media_status, ixlv_if_media_status),
+ DEVMETHOD(ifdi_timer, ixlv_if_timer),
+ DEVMETHOD(ifdi_promisc_set, ixlv_if_promisc_set),
+ DEVMETHOD(ifdi_msix_intr_assign, ixlv_if_msix_intr_assign),
+ DEVMETHOD(ifdi_vlan_register, ixlv_if_vlan_register),
+ DEVMETHOD(ifdi_vlan_unregister, ixlv_if_vlan_unregister),
+
+ DEVMETHOD(ifdi_media_change, ixl_if_media_change),
+ DEVMETHOD(ifdi_queues_alloc, ixl_if_queues_alloc),
+ DEVMETHOD(ifdi_queues_free, ixl_if_queues_free),
+ DEVMETHOD_END
+};
+
+
+static driver_t ixlv_if_driver = {
+ "ixlv_if", ixlv_if_methods, sizeof(struct ixlv_sc),
+};
+
+
/*
** TUNEABLE PARAMETERS:
*/
@@ -221,54 +247,48 @@
SYSCTL_INT(_hw_ixlv, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
&ixlv_tx_itr, 0, "TX Interrupt Rate");
+extern struct if_txrx ixl_txrx;
+
+static struct if_shared_ctx ixlv_sctx_init = {
+ .isc_magic = IFLIB_MAGIC,
+ .isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */
+ .isc_tx_maxsize = IXL_TSO_SIZE,
+
+ .isc_tx_maxsegsize = PAGE_SIZE*4,
+
+ .isc_rx_maxsize = PAGE_SIZE*4,
+ .isc_rx_nsegments = 1,
+ .isc_rx_maxsegsize = PAGE_SIZE*4,
+ .isc_ntxd = DEFAULT_RING,
+ .isc_nrxd = DEFAULT_RING,
+ .isc_nfl = 1,
+ .isc_qsizes[0] = roundup2((DEFAULT_RING * sizeof(struct i40e_tx_desc)) +
+ sizeof(u32), DBA_ALIGN),
+ .isc_qsizes[1] = roundup2(DEFAULT_RING *
+ sizeof(union i40e_rx_desc), DBA_ALIGN),
+ .isc_nqs = 2,
+ .isc_admin_intrcnt = 1,
+ .isc_vendor_info = ixlv_vendor_info_array,
+ .isc_driver_version = ixlv_driver_version,
+ .isc_txrx = &ixl_txrx,
+ .isc_driver = &ixlv_if_driver,
+};
-/*********************************************************************
- * Device identification routine
- *
- * ixlv_probe determines if the driver should be loaded on
- * the hardware based on PCI vendor/device id of the device.
- *
- * return BUS_PROBE_DEFAULT on success, positive on failure
- *********************************************************************/
+if_shared_ctx_t ixlv_sctx = &ixlv_sctx_init;
+MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations");
-static int
-ixlv_probe(device_t dev)
+static void *
+ixlv_register(device_t dev)
{
- ixl_vendor_info_t *ent;
-
- u16 pci_vendor_id, pci_device_id;
- u16 pci_subvendor_id, pci_subdevice_id;
- char device_name[256];
-
- INIT_DEBUGOUT("ixlv_probe: begin");
-
- pci_vendor_id = pci_get_vendor(dev);
- if (pci_vendor_id != I40E_INTEL_VENDOR_ID)
- return (ENXIO);
-
- pci_device_id = pci_get_device(dev);
- pci_subvendor_id = pci_get_subvendor(dev);
- pci_subdevice_id = pci_get_subdevice(dev);
-
- ent = ixlv_vendor_info_array;
- while (ent->vendor_id != 0) {
- if ((pci_vendor_id == ent->vendor_id) &&
- (pci_device_id == ent->device_id) &&
+ ixlv_sctx->isc_ntxd = ixlv_ringsz;
+ ixlv_sctx->isc_nrxd = ixlv_ringsz;
+ ixlv_sctx->isc_qsizes[0] = roundup2((ixlv_ringsz * sizeof(struct i40e_tx_desc)) +
+ sizeof(u32), DBA_ALIGN);
+ ixlv_sctx->isc_qsizes[1] = roundup2(ixlv_ringsz *
+ sizeof(union i40e_rx_desc), DBA_ALIGN);
- ((pci_subvendor_id == ent->subvendor_id) ||
- (ent->subvendor_id == 0)) &&
- ((pci_subdevice_id == ent->subdevice_id) ||
- (ent->subdevice_id == 0))) {
- sprintf(device_name, "%s, Version - %s",
- ixlv_strings[ent->index],
- ixlv_driver_version);
- device_set_desc_copy(dev, device_name);
- return (BUS_PROBE_DEFAULT);
- }
- ent++;
- }
- return (ENXIO);
+ return (ixlv_sctx);
}
/*********************************************************************
@@ -281,22 +301,24 @@
* return 0 on success, positive on failure
*********************************************************************/
+/* XXX We fail without MSIX support */
static int
-ixlv_attach(device_t dev)
+ixlv_if_attach_pre(if_ctx_t ctx)
{
struct ixlv_sc *sc;
struct i40e_hw *hw;
struct ixl_vsi *vsi;
int error = 0;
+ device_t dev;
INIT_DBG_DEV(dev, "begin");
/* Allocate, clear, and link in our primary soft structure */
- sc = device_get_softc(dev);
+ dev = iflib_get_dev(ctx);
+ sc = iflib_get_softc(ctx);
sc->dev = sc->osdep.dev = dev;
hw = &sc->hw;
vsi = &sc->vsi;
- vsi->dev = dev;
/* Initialize hw struct */
ixlv_init_hw(sc);
@@ -304,13 +326,6 @@
/* Allocate filter lists */
ixlv_init_filters(sc);
- /* Core Lock Init*/
- mtx_init(&sc->mtx, device_get_nameunit(dev),
- "IXL SC Lock", MTX_DEF);
-
- /* Set up the timer callout */
- callout_init_mtx(&sc->timer, &sc->mtx, 0);
-
/* Do PCI setup - map BAR0, etc */
if (ixlv_allocate_pci_resources(sc)) {
device_printf(dev, "%s: Allocation of PCI resources failed\n",
@@ -399,17 +414,38 @@
vsi->id = sc->vsi_res->vsi_id;
vsi->back = (void *)sc;
sc->link_up = TRUE;
+ /* ATTACH_PRE end */
- /* This allocates the memory and early settings */
- if (ixlv_setup_queues(sc) != 0) {
- device_printf(dev, "%s: setup queues failed!\n",
- __func__);
- error = EIO;
- goto out;
- }
+ return (error);
+
+err_res_buf:
+ free(sc->vf_res, M_DEVBUF);
+err_aq:
+ i40e_shutdown_adminq(hw);
+err_pci_res:
+ ixlv_free_pci_resources(sc);
+err_early:
+ ixlv_free_filters(sc);
+ INIT_DBG_DEV(dev, "end: error %d", error);
+ return (error);
+}
+
+static int
+ixlv_if_attach_post(if_ctx_t ctx)
+{
+ struct ixlv_sc *sc;
+ int error = 0;
+ device_t dev;
+ struct i40e_hw *hw;
+
+ INIT_DBG_DEV(dev, "begin");
+ /* Allocate, clear, and link in our primary soft structure */
+ dev = iflib_get_dev(ctx);
+ sc = iflib_get_softc(ctx);
+ hw = &sc->hw;
/* Setup the stack interface */
- if (ixlv_setup_interface(dev, sc) != 0) {
+ if (ixlv_setup_interface(ctx) != 0) {
device_printf(dev, "%s: setup interface failed!\n",
__func__);
error = EIO;
@@ -418,22 +454,10 @@
INIT_DBG_DEV(dev, "Queue memory and interface setup");
- /* Do queue interrupt setup */
- ixlv_assign_msix(sc);
-
- /* Start AdminQ taskqueue */
- ixlv_init_taskqueue(sc);
-
/* Initialize stats */
bzero(&sc->vsi.eth_stats, sizeof(struct i40e_eth_stats));
ixlv_add_sysctls(sc);
- /* Register for VLAN events */
- vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
- ixlv_register_vlan, vsi, EVENTHANDLER_PRI_FIRST);
- vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
- ixlv_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST);
-
/* We want AQ enabled early */
ixlv_enable_adminq_irq(hw);
@@ -443,23 +467,13 @@
ixl_vc_init_mgr(sc, &sc->vc_mgr);
INIT_DBG_DEV(dev, "end");
- return (error);
-
+ return (0);
out:
- ixlv_free_queues(vsi);
-err_res_buf:
- free(sc->vf_res, M_DEVBUF);
-err_aq:
- i40e_shutdown_adminq(hw);
-err_pci_res:
- ixlv_free_pci_resources(sc);
-err_early:
- mtx_destroy(&sc->mtx);
- ixlv_free_filters(sc);
- INIT_DBG_DEV(dev, "end: error %d", error);
+ ixlv_if_detach(ctx);
return (error);
}
+
/*********************************************************************
* Device removal routine
*
@@ -471,47 +485,19 @@
*********************************************************************/
static int
-ixlv_detach(device_t dev)
+ixlv_if_detach(if_ctx_t ctx)
{
- struct ixlv_sc *sc = device_get_softc(dev);
- struct ixl_vsi *vsi = &sc->vsi;
-
+ struct ixlv_sc *sc = iflib_get_softc(ctx);
+#ifdef IXL_DEBUG
+ device_t dev = iflib_get_dev(ctx);
+#endif
INIT_DBG_DEV(dev, "begin");
- /* Make sure VLANS are not using driver */
- if (vsi->ifp->if_vlantrunk != NULL) {
- if_printf(vsi->ifp, "Vlan in use, detach first\n");
- INIT_DBG_DEV(dev, "end");
- return (EBUSY);
- }
-
- /* Stop driver */
- ether_ifdetach(vsi->ifp);
- if (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING) {
- mtx_lock(&sc->mtx);
- ixlv_stop(sc);
- mtx_unlock(&sc->mtx);
- }
-
- /* Unregister VLAN events */
- if (vsi->vlan_attach != NULL)
- EVENTHANDLER_DEREGISTER(vlan_config, vsi->vlan_attach);
- if (vsi->vlan_detach != NULL)
- EVENTHANDLER_DEREGISTER(vlan_unconfig, vsi->vlan_detach);
-
- /* Drain VC mgr */
- callout_drain(&sc->vc_mgr.callout);
-
i40e_shutdown_adminq(&sc->hw);
- taskqueue_free(sc->tq);
- if_free(vsi->ifp);
free(sc->vf_res, M_DEVBUF);
ixlv_free_pci_resources(sc);
- ixlv_free_queues(vsi);
- mtx_destroy(&sc->mtx);
ixlv_free_filters(sc);
- bus_generic_detach(dev);
INIT_DBG_DEV(dev, "end");
return (0);
}
@@ -523,20 +509,20 @@
**********************************************************************/
static int
-ixlv_shutdown(device_t dev)
+ixlv_if_shutdown(if_ctx_t ctx)
{
- struct ixlv_sc *sc = device_get_softc(dev);
-
+#ifdef IXL_DEBUG
+ device_t dev = iflib_get_dev(ctx);
+#endif
INIT_DBG_DEV(dev, "begin");
- mtx_lock(&sc->mtx);
- ixlv_stop(sc);
- mtx_unlock(&sc->mtx);
+ ixlv_if_stop(ctx);
INIT_DBG_DEV(dev, "end");
return (0);
}
+#ifdef notyet
/*
* Configure TXCSUM(IPV6) and TSO(4/6)
* - the hardware handles these together so we
@@ -612,6 +598,8 @@
}
}
+#endif
+
/*********************************************************************
* Ioctl entry point
*
@@ -620,7 +608,7 @@
*
* return 0 on success, positive on failure
**********************************************************************/
-
+#if 0
static int
ixlv_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
{
@@ -664,58 +652,32 @@
#endif
case SIOCSIFMTU:
IOCTL_DBG_IF2(ifp, "SIOCSIFMTU (Set Interface MTU)");
- mtx_lock(&sc->mtx);
- if (ifr->ifr_mtu > IXL_MAX_FRAME -
- ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) {
- error = EINVAL;
- IOCTL_DBG_IF(ifp, "mtu too large");
- } else {
- IOCTL_DBG_IF2(ifp, "mtu: %lu -> %d", ifp->if_mtu, ifr->ifr_mtu);
- // ERJ: Interestingly enough, these types don't match
- ifp->if_mtu = (u_long)ifr->ifr_mtu;
- vsi->max_frame_size =
- ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
- + ETHER_VLAN_ENCAP_LEN;
- ixlv_init_locked(sc);
- }
- mtx_unlock(&sc->mtx);
+
break;
- case SIOCSIFFLAGS:
- IOCTL_DBG_IF2(ifp, "SIOCSIFFLAGS (Set Interface Flags)");
- mtx_lock(&sc->mtx);
- if (ifp->if_flags & IFF_UP) {
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
- ixlv_init_locked(sc);
- } else
- if (ifp->if_drv_flags & IFF_DRV_RUNNING)
- ixlv_stop(sc);
- sc->if_flags = ifp->if_flags;
- mtx_unlock(&sc->mtx);
+
break;
case SIOCADDMULTI:
+#if 0
IOCTL_DBG_IF2(ifp, "SIOCADDMULTI");
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
- mtx_lock(&sc->mtx);
- ixlv_disable_intr(vsi);
+
ixlv_add_multi(vsi);
- ixlv_enable_intr(vsi);
- mtx_unlock(&sc->mtx);
+
}
break;
+#endif
case SIOCDELMULTI:
IOCTL_DBG_IF2(ifp, "SIOCDELMULTI");
if (sc->init_state == IXLV_RUNNING) {
- mtx_lock(&sc->mtx);
- ixlv_disable_intr(vsi);
+
ixlv_del_multi(vsi);
- ixlv_enable_intr(vsi);
- mtx_unlock(&sc->mtx);
+
}
break;
case SIOCSIFMEDIA:
case SIOCGIFMEDIA:
IOCTL_DBG_IF2(ifp, "SIOCxIFMEDIA (Get/Set Interface Media)");
- error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
+ error = ifmedia_ioctl(ifp, ifr, sc->media, command);
break;
case SIOCSIFCAP:
{
@@ -752,6 +714,7 @@
return (error);
}
+#endif
/*
** To do a reinit on the VF is unfortunately more complicated
@@ -774,7 +737,7 @@
INIT_DBG_IF(ifp, "begin");
if (ifp->if_drv_flags & IFF_DRV_RUNNING)
- ixlv_stop(sc);
+ ixlv_if_stop(sc->vsi.ctx);
error = ixlv_reset(sc);
@@ -834,8 +797,9 @@
}
static void
-ixlv_init_locked(struct ixlv_sc *sc)
+ixlv_init_internal(if_ctx_t ctx)
{
+ struct ixlv_sc *sc = iflib_get_softc(ctx);
struct i40e_hw *hw = &sc->hw;
struct ixl_vsi *vsi = &sc->vsi;
struct ixl_queue *que = vsi->queues;
@@ -844,8 +808,6 @@
INIT_DBG_IF(ifp, "begin");
- IXLV_CORE_LOCK_ASSERT(sc);
-
/* Do a reinit first if an init has already been done */
if ((sc->init_state == IXLV_RUNNING) ||
(sc->init_state == IXLV_RESET_REQUIRED) ||
@@ -887,17 +849,11 @@
/* Setup vlan's if needed */
ixlv_setup_vlan_filters(sc);
+ ixlv_init_multi(&sc->vsi);
+
/* Prepare the queues for operation */
for (int i = 0; i < vsi->num_queues; i++, que++) {
- struct rx_ring *rxr = &que->rxr;
-
ixl_init_tx_ring(que);
-
- if (vsi->max_frame_size <= MCLBYTES)
- rxr->mbuf_sz = MCLBYTES;
- else
- rxr->mbuf_sz = MJUMPAGESIZE;
- ixl_init_rx_ring(que);
}
/* Configure queues */
@@ -915,38 +871,31 @@
ixl_vc_enqueue(&sc->vc_mgr, &sc->enable_queues_cmd,
IXLV_FLAG_AQ_ENABLE_QUEUES, ixl_init_cmd_complete, sc);
- /* Start the local timer */
- callout_reset(&sc->timer, hz, ixlv_local_timer, sc);
-
sc->init_state = IXLV_RUNNING;
init_done:
INIT_DBG_IF(ifp, "end");
- return;
}
-/*
-** Init entry point for the stack
-*/
-void
-ixlv_init(void *arg)
+static void
+ixlv_if_init(if_ctx_t ctx)
{
- struct ixl_vsi *vsi = (struct ixl_vsi *)arg;
- struct ixlv_sc *sc = vsi->back;
- int retries = 0;
+ struct ifnet *ifp;
+ int retries;
- mtx_lock(&sc->mtx);
- ixlv_init_locked(sc);
- mtx_unlock(&sc->mtx);
+ ixlv_init_internal(ctx);
- /* Wait for init_locked to finish */
- while (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING)
+ ifp = iflib_get_ifp(ctx);
+ retries = 0;
+ /* Wait for init to finish */
+ while (!(ifp->if_drv_flags & IFF_DRV_RUNNING)
&& ++retries < 100) {
i40e_msec_delay(10);
}
if (retries >= IXLV_AQ_MAX_ERR)
- if_printf(vsi->ifp,
+ if_printf(ifp,
"Init failed to complete in alloted time!\n");
+
}
/*
@@ -1136,134 +1085,12 @@
return (ret_error);
}
-/*
- * Allocate MSI/X vectors, setup the AQ vector early
- */
-static int
-ixlv_init_msix(struct ixlv_sc *sc)
-{
- device_t dev = sc->dev;
- int rid, want, vectors, queues, available;
-
- rid = PCIR_BAR(IXL_BAR);
- sc->msix_mem = bus_alloc_resource_any(dev,
- SYS_RES_MEMORY, &rid, RF_ACTIVE);
- if (!sc->msix_mem) {
- /* May not be enabled */
- device_printf(sc->dev,
- "Unable to map MSIX table \n");
- goto fail;
- }
-
- available = pci_msix_count(dev);
- if (available == 0) { /* system has msix disabled */
- bus_release_resource(dev, SYS_RES_MEMORY,
- rid, sc->msix_mem);
- sc->msix_mem = NULL;
- goto fail;
- }
-
- /* Figure out a reasonable auto config value */
- queues = (mp_ncpus > (available - 1)) ? (available - 1) : mp_ncpus;
-
- /* Override with hardcoded value if sane */
- if ((ixlv_max_queues != 0) && (ixlv_max_queues <= queues))
- queues = ixlv_max_queues;
-#ifdef RSS
- /* If we're doing RSS, clamp at the number of RSS buckets */
- if (queues > rss_getnumbuckets())
- queues = rss_getnumbuckets();
-#endif
+#if 0
/* Enforce the VF max value */
if (queues > IXLV_MAX_QUEUES)
queues = IXLV_MAX_QUEUES;
-
- /*
- ** Want one vector (RX/TX pair) per queue
- ** plus an additional for the admin queue.
- */
- want = queues + 1;
- if (want <= available) /* Have enough */
- vectors = want;
- else {
- device_printf(sc->dev,
- "MSIX Configuration Problem, "
- "%d vectors available but %d wanted!\n",
- available, want);
- goto fail;
- }
-
-#ifdef RSS
- /*
- * If we're doing RSS, the number of queues needs to
- * match the number of RSS buckets that are configured.
- *
- * + If there's more queues than RSS buckets, we'll end
- * up with queues that get no traffic.
- *
- * + If there's more RSS buckets than queues, we'll end
- * up having multiple RSS buckets map to the same queue,
- * so there'll be some contention.
- */
- if (queues != rss_getnumbuckets()) {
- device_printf(dev,
- "%s: queues (%d) != RSS buckets (%d)"
- "; performance will be impacted.\n",
- __func__, queues, rss_getnumbuckets());
- }
#endif
- if (pci_alloc_msix(dev, &vectors) == 0) {
- device_printf(sc->dev,
- "Using MSIX interrupts with %d vectors\n", vectors);
- sc->msix = vectors;
- sc->vsi.num_queues = queues;
- }
-
- /*
- ** Explicitly set the guest PCI BUSMASTER capability
- ** and we must rewrite the ENABLE in the MSIX control
- ** register again at this point to cause the host to
- ** successfully initialize us.
- */
- {
- u16 pci_cmd_word;
- int msix_ctrl;
- pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
- pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
- pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
- pci_find_cap(dev, PCIY_MSIX, &rid);
- rid += PCIR_MSIX_CTRL;
- msix_ctrl = pci_read_config(dev, rid, 2);
- msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
- pci_write_config(dev, rid, msix_ctrl, 2);
- }
-
- /* Next we need to setup the vector for the Admin Queue */
- rid = 1; // zero vector + 1
- sc->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
- &rid, RF_SHAREABLE | RF_ACTIVE);
- if (sc->res == NULL) {
- device_printf(dev,"Unable to allocate"
- " bus resource: AQ interrupt \n");
- goto fail;
- }
- if (bus_setup_intr(dev, sc->res,
- INTR_TYPE_NET | INTR_MPSAFE, NULL,
- ixlv_msix_adminq, sc, &sc->tag)) {
- sc->res = NULL;
- device_printf(dev, "Failed to register AQ handler");
- goto fail;
- }
- bus_describe_intr(dev, sc->res, sc->tag, "adminq");
-
- return (vectors);
-
-fail:
- /* The VF driver MUST use MSIX */
- return (0);
-}
-
static int
ixlv_allocate_pci_resources(struct ixlv_sc *sc)
{
@@ -1279,10 +1106,8 @@
return (ENXIO);
}
- sc->osdep.mem_bus_space_tag =
- rman_get_bustag(sc->pci_mem);
- sc->osdep.mem_bus_space_handle =
- rman_get_bushandle(sc->pci_mem);
+ sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->pci_mem);
+ sc->osdep.mem_bus_space_handle = rman_get_bushandle(sc->pci_mem);
sc->osdep.mem_bus_space_size = rman_get_size(sc->pci_mem);
sc->osdep.flush_reg = I40E_VFGEN_RSTAT;
sc->hw.hw_addr = (u8 *) &sc->osdep.mem_bus_space_handle;
@@ -1292,145 +1117,18 @@
/* Disable adminq interrupts */
ixlv_disable_adminq_irq(&sc->hw);
- /*
- ** Now setup MSI/X, it will return
- ** us the number of supported vectors
- */
- sc->msix = ixlv_init_msix(sc);
-
- /* We fail without MSIX support */
- if (sc->msix == 0)
- return (ENXIO);
-
return (0);
}
static void
ixlv_free_pci_resources(struct ixlv_sc *sc)
{
- struct ixl_vsi *vsi = &sc->vsi;
- struct ixl_queue *que = vsi->queues;
device_t dev = sc->dev;
- /* We may get here before stations are setup */
- if (que == NULL)
- goto early;
-
- /*
- ** Release all msix queue resources:
- */
- for (int i = 0; i < vsi->num_queues; i++, que++) {
- int rid = que->msix + 1;
- if (que->tag != NULL) {
- bus_teardown_intr(dev, que->res, que->tag);
- que->tag = NULL;
- }
- if (que->res != NULL)
- bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
- }
-
-early:
- /* Clean the AdminQ interrupt */
- if (sc->tag != NULL) {
- bus_teardown_intr(dev, sc->res, sc->tag);
- sc->tag = NULL;
- }
- if (sc->res != NULL)
- bus_release_resource(dev, SYS_RES_IRQ, 1, sc->res);
-
- pci_release_msi(dev);
-
- if (sc->msix_mem != NULL)
- bus_release_resource(dev, SYS_RES_MEMORY,
- PCIR_BAR(IXL_BAR), sc->msix_mem);
-
if (sc->pci_mem != NULL)
bus_release_resource(dev, SYS_RES_MEMORY,
PCIR_BAR(0), sc->pci_mem);
- return;
-}
-
-/*
- * Create taskqueue and tasklet for Admin Queue interrupts.
- */
-static int
-ixlv_init_taskqueue(struct ixlv_sc *sc)
-{
- int error = 0;
-
- TASK_INIT(&sc->aq_irq, 0, ixlv_do_adminq, sc);
-
- sc->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
- taskqueue_thread_enqueue, &sc->tq);
- taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s sc->tq",
- device_get_nameunit(sc->dev));
-
- return (error);
-}
-
-/*********************************************************************
- *
- * Setup MSIX Interrupt resources and handlers for the VSI queues
- *
- **********************************************************************/
-static int
-ixlv_assign_msix(struct ixlv_sc *sc)
-{
- device_t dev = sc->dev;
- struct ixl_vsi *vsi = &sc->vsi;
- struct ixl_queue *que = vsi->queues;
- struct tx_ring *txr;
- int error, rid, vector = 1;
-#ifdef RSS
- cpuset_t cpu_mask;
-#endif
-
- for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
- int cpu_id = i;
- rid = vector + 1;
- txr = &que->txr;
- que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
- RF_SHAREABLE | RF_ACTIVE);
- if (que->res == NULL) {
- device_printf(dev,"Unable to allocate"
- " bus resource: que interrupt [%d]\n", vector);
- return (ENXIO);
- }
- /* Set the handler function */
- error = bus_setup_intr(dev, que->res,
- INTR_TYPE_NET | INTR_MPSAFE, NULL,
- ixlv_msix_que, que, &que->tag);
- if (error) {
- que->res = NULL;
- device_printf(dev, "Failed to register que handler");
- return (error);
- }
- bus_describe_intr(dev, que->res, que->tag, "que %d", i);
- /* Bind the vector to a CPU */
-#ifdef RSS
- cpu_id = rss_getcpu(i % rss_getnumbuckets());
-#endif
- bus_bind_intr(dev, que->res, cpu_id);
- que->msix = vector;
- vsi->que_mask |= (u64)(1 << que->msix);
- TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
- TASK_INIT(&que->task, 0, ixlv_handle_que, que);
- que->tq = taskqueue_create_fast("ixlv_que", M_NOWAIT,
- taskqueue_thread_enqueue, &que->tq);
-#ifdef RSS
- CPU_SETOF(cpu_id, &cpu_mask);
- taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
- &cpu_mask, "%s (bucket %d)",
- device_get_nameunit(dev), cpu_id);
-#else
- taskqueue_start_threads(&que->tq, 1, PI_NET,
- "%s que", device_get_nameunit(dev));
-#endif
-
- }
-
- return (0);
}
/*
@@ -1492,48 +1190,77 @@
return (EBUSY);
}
-
/*********************************************************************
*
- * Setup networking device structure and register an interface.
+ * Setup MSIX Interrupt resources and handlers for the VSI
*
**********************************************************************/
static int
-ixlv_setup_interface(device_t dev, struct ixlv_sc *sc)
+ixlv_if_msix_intr_assign(if_ctx_t ctx, int msix)
{
- struct ifnet *ifp;
- struct ixl_vsi *vsi = &sc->vsi;
+ struct ixl_vsi *vsi = iflib_get_softc(ctx);
+ struct ixl_pf *pf = vsi->back;
struct ixl_queue *que = vsi->queues;
+ int err, rid, vector = 0;
- INIT_DBG_DEV(dev, "begin");
+ /* Admin Que is vector 0*/
+ rid = vector + 1;
- ifp = vsi->ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(dev, "%s: could not allocate ifnet"
- " structure!\n", __func__);
- return (-1);
+ err = iflib_irq_alloc_generic(ctx, &vsi->irq, rid, IFLIB_INTR_ADMIN,
+ ixlv_msix_adminq, pf, 0, "aq");
+ if (err) {
+ iflib_irq_free(ctx, &vsi->irq);
+ device_printf(iflib_get_dev(ctx), "Failed to register Admin que handler");
+ return (err);
}
+ ++vector;
+ iflib_softirq_alloc_generic(ctx, rid, IFLIB_INTR_IOV, pf, 0, "ixl_iov");
- if_initname(ifp, device_get_name(dev), device_get_unit(dev));
+ /* Now set up the stations */
+ for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
+ char buf[16];
+ rid = vector + 1;
- ifp->if_mtu = ETHERMTU;
- ifp->if_baudrate = 4000000000; // ??
- ifp->if_init = ixlv_init;
- ifp->if_softc = vsi;
- ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
- ifp->if_ioctl = ixlv_ioctl;
+ snprintf(buf, sizeof(buf), "rxq%d", i);
+ err = iflib_irq_alloc_generic(ctx, &que->que_irq, rid, IFLIB_INTR_RX,
+ ixlv_msix_que, que, que->me, buf);
+ if (err) {
+ device_printf(iflib_get_dev(ctx), "Failed to allocate q int %d err: %d", i, err);
+ vsi->num_queues = i + 1;
+ goto fail;
+ }
+ snprintf(buf, sizeof(buf), "txq%d", i);
+ iflib_softirq_alloc_generic(ctx, rid, IFLIB_INTR_TX, que, que->me, buf);
+ que->msix = vector;
+ }
-#if __FreeBSD_version >= 1100000
- if_setgetcounterfn(ifp, ixl_get_counter);
-#endif
+ return (0);
+fail:
+ iflib_irq_free(ctx, &vsi->irq);
+ que = vsi->queues;
+ for (int i = 0; i < vsi->num_queues; i++, que++)
+ iflib_irq_free(ctx, &que->que_irq);
+ return (err);
+}
- ifp->if_transmit = ixl_mq_start;
+/*********************************************************************
+ *
+ * Setup networking device structure and register an interface.
+ *
+ **********************************************************************/
+static int
+ixlv_setup_interface(if_ctx_t ctx)
+{
+ struct ifnet *ifp;
+ struct ixlv_sc *sc = iflib_get_softc(ctx);
+ struct ixl_vsi *vsi = &sc->vsi;
- ifp->if_qflush = ixl_qflush;
- ifp->if_snd.ifq_maxlen = que->num_desc - 2;
+ INIT_DBG_DEV(dev, "begin");
- ether_ifattach(ifp, sc->hw.mac.addr);
+ ifp = vsi->ifp = iflib_get_ifp(ctx);
+ if_setbaudrate(ifp, 4000000000);
+ ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
vsi->max_frame_size =
ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
+ ETHER_VLAN_ENCAP_LEN;
@@ -1565,150 +1292,16 @@
*/
ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
- /*
- * Specify the media types supported by this adapter and register
- * callbacks to update media and link information
- */
- ifmedia_init(&sc->media, IFM_IMASK, ixlv_media_change,
- ixlv_media_status);
-
// JFV Add media types later?
- ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
- ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
+ ifmedia_add(sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
+ ifmedia_set(sc->media, IFM_ETHER | IFM_AUTO);
INIT_DBG_DEV(dev, "end");
return (0);
}
/*
-** Allocate and setup the interface queues
-*/
-static int
-ixlv_setup_queues(struct ixlv_sc *sc)
-{
- device_t dev = sc->dev;
- struct ixl_vsi *vsi;
- struct ixl_queue *que;
- struct tx_ring *txr;
- struct rx_ring *rxr;
- int rsize, tsize;
- int error = I40E_SUCCESS;
-
- vsi = &sc->vsi;
- vsi->back = (void *)sc;
- vsi->hw = &sc->hw;
- vsi->num_vlans = 0;
-
- /* Get memory for the station queues */
- if (!(vsi->queues =
- (struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
- vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
- device_printf(dev, "Unable to allocate queue memory\n");
- error = ENOMEM;
- goto early;
- }
-
- for (int i = 0; i < vsi->num_queues; i++) {
- que = &vsi->queues[i];
- que->num_desc = ixlv_ringsz;
- que->me = i;
- que->vsi = vsi;
- /* mark the queue as active */
- vsi->active_queues |= (u64)1 << que->me;
-
- txr = &que->txr;
- txr->que = que;
- txr->tail = I40E_QTX_TAIL1(que->me);
- /* Initialize the TX lock */
- snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
- device_get_nameunit(dev), que->me);
- mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
- /*
- ** Create the TX descriptor ring, the extra int is
- ** added as the location for HEAD WB.
- */
- tsize = roundup2((que->num_desc *
- sizeof(struct i40e_tx_desc)) +
- sizeof(u32), DBA_ALIGN);
- if (i40e_allocate_dma_mem(&sc->hw,
- &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
- device_printf(dev,
- "Unable to allocate TX Descriptor memory\n");
- error = ENOMEM;
- goto fail;
- }
- txr->base = (struct i40e_tx_desc *)txr->dma.va;
- bzero((void *)txr->base, tsize);
- /* Now allocate transmit soft structs for the ring */
- if (ixl_allocate_tx_data(que)) {
- device_printf(dev,
- "Critical Failure setting up TX structures\n");
- error = ENOMEM;
- goto fail;
- }
- /* Allocate a buf ring */
- txr->br = buf_ring_alloc(ixlv_txbrsz, M_DEVBUF,
- M_WAITOK, &txr->mtx);
- if (txr->br == NULL) {
- device_printf(dev,
- "Critical Failure setting up TX buf ring\n");
- error = ENOMEM;
- goto fail;
- }
-
- /*
- * Next the RX queues...
- */
- rsize = roundup2(que->num_desc *
- sizeof(union i40e_rx_desc), DBA_ALIGN);
- rxr = &que->rxr;
- rxr->que = que;
- rxr->tail = I40E_QRX_TAIL1(que->me);
-
- /* Initialize the RX side lock */
- snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
- device_get_nameunit(dev), que->me);
- mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
-
- if (i40e_allocate_dma_mem(&sc->hw,
- &rxr->dma, i40e_mem_reserved, rsize, 4096)) { //JFV - should this be DBA?
- device_printf(dev,
- "Unable to allocate RX Descriptor memory\n");
- error = ENOMEM;
- goto fail;
- }
- rxr->base = (union i40e_rx_desc *)rxr->dma.va;
- bzero((void *)rxr->base, rsize);
-
- /* Allocate receive soft structs for the ring*/
- if (ixl_allocate_rx_data(que)) {
- device_printf(dev,
- "Critical Failure setting up receive structs\n");
- error = ENOMEM;
- goto fail;
- }
- }
-
- return (0);
-
-fail:
- for (int i = 0; i < vsi->num_queues; i++) {
- que = &vsi->queues[i];
- rxr = &que->rxr;
- txr = &que->txr;
- if (rxr->base)
- i40e_free_dma_mem(&sc->hw, &rxr->dma);
- if (txr->base)
- i40e_free_dma_mem(&sc->hw, &txr->dma);
- }
- free(vsi->queues, M_DEVBUF);
-
-early:
- return (error);
-}
-
-/*
** This routine is run via an vlan config EVENT,
** it enables us to use the HW Filter table since
** we can get the vlan id. This just creates the
@@ -1716,16 +1309,13 @@
** repopulate the real table.
*/
static void
-ixlv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
+ixlv_if_vlan_register(if_ctx_t ctx, u16 vtag)
{
- struct ixl_vsi *vsi = arg;
- struct ixlv_sc *sc = vsi->back;
+ struct ixlv_sc *sc = iflib_get_softc(ctx);
+ struct ixl_vsi *vsi = &sc->vsi;
struct ixlv_vlan_filter *v;
- if (ifp->if_softc != arg) /* Not our event */
- return;
-
if ((vtag == 0) || (vtag > 4095)) /* Invalid */
return;
@@ -1735,16 +1325,15 @@
return;
}
- mtx_lock(&sc->mtx);
++vsi->num_vlans;
+ /* should either fail or be M_WAITOK XXX */
v = malloc(sizeof(struct ixlv_vlan_filter), M_DEVBUF, M_NOWAIT | M_ZERO);
+
SLIST_INSERT_HEAD(sc->vlan_filters, v, next);
v->vlan = vtag;
v->flags = IXL_FILTER_ADD;
ixl_vc_enqueue(&sc->vc_mgr, &sc->add_vlan_cmd,
IXLV_FLAG_AQ_ADD_VLAN_FILTER, ixl_init_cmd_complete, sc);
- mtx_unlock(&sc->mtx);
- return;
}
/*
@@ -1753,20 +1342,15 @@
** in the soft vfta.
*/
static void
-ixlv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
+ixlv_if_vlan_unregister(if_ctx_t ctx, u16 vtag)
{
- struct ixl_vsi *vsi = arg;
- struct ixlv_sc *sc = vsi->back;
+ struct ixlv_sc *sc = iflib_get_softc(ctx);
+ struct ixl_vsi *vsi = &sc->vsi;
struct ixlv_vlan_filter *v;
int i = 0;
-
- if (ifp->if_softc != arg)
- return;
-
if ((vtag == 0) || (vtag > 4095)) /* Invalid */
return;
- mtx_lock(&sc->mtx);
SLIST_FOREACH(v, sc->vlan_filters, next) {
if (v->vlan == vtag) {
v->flags = IXL_FILTER_DEL;
@@ -1777,8 +1361,6 @@
if (i)
ixl_vc_enqueue(&sc->vc_mgr, &sc->del_vlan_cmd,
IXLV_FLAG_AQ_DEL_VLAN_FILTER, ixl_init_cmd_complete, sc);
- mtx_unlock(&sc->mtx);
- return;
}
/*
@@ -1821,7 +1403,7 @@
/*
** Admin Queue interrupt handler
*/
-static void
+static int
ixlv_msix_adminq(void *arg)
{
struct ixlv_sc *sc = arg;
@@ -1834,32 +1416,34 @@
reg = rd32(hw, I40E_VFINT_DYN_CTL01);
reg |= I40E_VFINT_DYN_CTL01_CLEARPBA_MASK;
wr32(hw, I40E_VFINT_DYN_CTL01, reg);
-
- /* schedule task */
- taskqueue_enqueue(sc->tq, &sc->aq_irq);
- return;
+ iflib_admin_intr_deferred(sc->vsi.ctx);
+ return (FILTER_HANDLED);
}
void
-ixlv_enable_intr(struct ixl_vsi *vsi)
+ixlv_if_intr_enable(if_ctx_t ctx)
{
+ struct ixlv_sc *sc = iflib_get_softc(ctx);
+ struct ixl_vsi *vsi = &sc->vsi;
struct i40e_hw *hw = vsi->hw;
struct ixl_queue *que = vsi->queues;
ixlv_enable_adminq_irq(hw);
for (int i = 0; i < vsi->num_queues; i++, que++)
- ixlv_enable_queue_irq(hw, que->me);
+ ixlv_if_queue_intr_enable(ctx, que->me);
}
void
-ixlv_disable_intr(struct ixl_vsi *vsi)
+ixlv_if_intr_disable(if_ctx_t ctx)
{
+ struct ixlv_sc *sc = iflib_get_softc(ctx);
+ struct ixl_vsi *vsi = &sc->vsi;
struct i40e_hw *hw = vsi->hw;
struct ixl_queue *que = vsi->queues;
ixlv_disable_adminq_irq(hw);
for (int i = 0; i < vsi->num_queues; i++, que++)
- ixlv_disable_queue_irq(hw, que->me);
+ ixlv_if_queue_intr_disable(ctx, que->me);
}
@@ -1886,8 +1470,11 @@
}
static void
-ixlv_enable_queue_irq(struct i40e_hw *hw, int id)
+ixlv_if_queue_intr_enable(if_ctx_t ctx, uint16_t id)
{
+ struct ixlv_sc *sc = iflib_get_softc(ctx);
+ struct ixl_vsi *vsi = &sc->vsi;
+ struct i40e_hw *hw = vsi->hw;
u32 reg;
reg = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
@@ -1896,11 +1483,14 @@
}
static void
-ixlv_disable_queue_irq(struct i40e_hw *hw, int id)
+ixlv_if_queue_intr_disable(if_ctx_t ctx, uint16_t qid)
{
- wr32(hw, I40E_VFINT_DYN_CTLN1(id), 0);
+ struct ixlv_sc *sc = iflib_get_softc(ctx);
+ struct ixl_vsi *vsi = &sc->vsi;
+ struct i40e_hw *hw = vsi->hw;
+
+ wr32(hw, I40E_VFINT_DYN_CTLN1(qid), 0);
rd32(hw, I40E_VFGEN_RSTAT);
- return;
}
@@ -2051,83 +1641,19 @@
return;
}
-
-/*
-**
-** MSIX Interrupt Handlers and Tasklets
-**
-*/
-static void
-ixlv_handle_que(void *context, int pending)
-{
- struct ixl_queue *que = context;
- struct ixl_vsi *vsi = que->vsi;
- struct i40e_hw *hw = vsi->hw;
- struct tx_ring *txr = &que->txr;
- struct ifnet *ifp = vsi->ifp;
- bool more;
-
- if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
- more = ixl_rxeof(que, IXL_RX_LIMIT);
- mtx_lock(&txr->mtx);
- ixl_txeof(que);
- if (!drbr_empty(ifp, txr->br))
- ixl_mq_start_locked(ifp, txr);
- mtx_unlock(&txr->mtx);
- if (more) {
- taskqueue_enqueue(que->tq, &que->task);
- return;
- }
- }
-
- /* Reenable this interrupt - hmmm */
- ixlv_enable_queue_irq(hw, que->me);
- return;
-}
-
-
/*********************************************************************
*
* MSIX Queue Interrupt Service routine
*
**********************************************************************/
-static void
+static int
ixlv_msix_que(void *arg)
{
struct ixl_queue *que = arg;
- struct ixl_vsi *vsi = que->vsi;
- struct i40e_hw *hw = vsi->hw;
- struct tx_ring *txr = &que->txr;
- bool more_tx, more_rx;
-
- /* Spurious interrupts are ignored */
- if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING))
- return;
-
- ++que->irqs;
-
- more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
-
- mtx_lock(&txr->mtx);
- more_tx = ixl_txeof(que);
- /*
- ** Make certain that if the stack
- ** has anything queued the task gets
- ** scheduled to handle it.
- */
- if (!drbr_empty(vsi->ifp, txr->br))
- more_tx = 1;
- mtx_unlock(&txr->mtx);
ixlv_set_queue_rx_itr(que);
ixlv_set_queue_tx_itr(que);
-
- if (more_tx || more_rx)
- taskqueue_enqueue(que->tq, &que->task);
- else
- ixlv_enable_queue_irq(hw, que->me);
-
- return;
+ return (FILTER_SCHEDULE_THREAD);
}
@@ -2140,14 +1666,11 @@
*
**********************************************************************/
static void
-ixlv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
+ixlv_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
{
- struct ixl_vsi *vsi = ifp->if_softc;
- struct ixlv_sc *sc = vsi->back;
-
+ struct ixlv_sc *sc = iflib_get_softc(ctx);
INIT_DBG_IF(ifp, "begin");
- mtx_lock(&sc->mtx);
ixlv_update_link_status(sc);
@@ -2155,7 +1678,6 @@
ifmr->ifm_active = IFM_ETHER;
if (!sc->link_up) {
- mtx_unlock(&sc->mtx);
INIT_DBG_IF(ifp, "end: link not up");
return;
}
@@ -2163,35 +1685,9 @@
ifmr->ifm_status |= IFM_ACTIVE;
/* Hardware is always full-duplex */
ifmr->ifm_active |= IFM_FDX;
- mtx_unlock(&sc->mtx);
- INIT_DBG_IF(ifp, "end");
- return;
-}
-
-/*********************************************************************
- *
- * Media Ioctl callback
- *
- * This routine is called when the user changes speed/duplex using
- * media/mediopt option with ifconfig.
- *
- **********************************************************************/
-static int
-ixlv_media_change(struct ifnet * ifp)
-{
- struct ixl_vsi *vsi = ifp->if_softc;
- struct ifmedia *ifm = &vsi->media;
-
- INIT_DBG_IF(ifp, "begin");
-
- if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
- return (EINVAL);
-
INIT_DBG_IF(ifp, "end");
- return (0);
}
-
/*********************************************************************
* Multicast Initialization
*
@@ -2224,51 +1720,37 @@
IOCTL_DBG_IF(vsi->ifp, "end");
}
+static int
+ixlv_mc_filter_apply(void *arg, struct ifmultiaddr *ifma, int count __unused)
+{
+ struct ixlv_sc *sc = arg;
+
+ if (ifma->ifma_addr->sa_family != AF_LINK)
+ return (0);
+ if (!ixlv_add_mac_filter(sc,
+ (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
+ IXL_FILTER_MC))
+ return (1);
+
+ return (0);
+}
static void
-ixlv_add_multi(struct ixl_vsi *vsi)
+ixlv_if_multi_set(if_ctx_t ctx)
{
- struct ifmultiaddr *ifma;
- struct ifnet *ifp = vsi->ifp;
- struct ixlv_sc *sc = vsi->back;
+ struct ixlv_sc *sc = iflib_get_softc(ctx);
int mcnt = 0;
IOCTL_DBG_IF(ifp, "begin");
- if_maddr_rlock(ifp);
- /*
- ** Get a count, to decide if we
- ** simply use multicast promiscuous.
- */
- TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
- if (ifma->ifma_addr->sa_family != AF_LINK)
- continue;
- mcnt++;
- }
- if_maddr_runlock(ifp);
+ mcnt = if_multiaddr_count(iflib_get_ifp(ctx), MAX_MULTICAST_ADDR);
+ /* XXX */
+ ixlv_del_multi(&sc->vsi);
- // TODO: Remove -- cannot set promiscuous mode in a VF
if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
- /* delete all multicast filters */
- ixlv_init_multi(vsi);
- sc->promiscuous_flags |= I40E_FLAG_VF_MULTICAST_PROMISC;
- ixl_vc_enqueue(&sc->vc_mgr, &sc->add_multi_cmd,
- IXLV_FLAG_AQ_CONFIGURE_PROMISC, ixl_init_cmd_complete,
- sc);
- IOCTL_DEBUGOUT("%s: end: too many filters", __func__);
+ IOCTL_DEBUGOUT("%s: end: too many filters - no promiscuous mode in VF", __func__);
return;
}
-
- mcnt = 0;
- if_maddr_rlock(ifp);
- TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
- if (ifma->ifma_addr->sa_family != AF_LINK)
- continue;
- if (!ixlv_add_mac_filter(sc,
- (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
- IXL_FILTER_MC))
- mcnt++;
- }
- if_maddr_runlock(ifp);
+ mcnt = if_multi_apply(iflib_get_ifp(ctx), ixlv_mc_filter_apply, sc);
/*
** Notify AQ task that sw filters need to be
** added to hw list
@@ -2281,6 +1763,29 @@
IOCTL_DBG_IF(ifp, "end");
}
+static int
+ixlv_if_promisc_set(if_ctx_t ctx, int flags)
+{
+ if (flags & (IFF_ALLMULTI|IFF_PROMISC))
+ return (EINVAL);
+ return (0);
+}
+
+static int
+ixlv_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
+{
+ struct ixlv_sc *sc;
+
+ sc = iflib_get_softc(ctx);
+ if (mtu > IXL_MAX_FRAME - ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN)
+ return (EINVAL);
+ else
+ sc->vsi.max_frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
+ + ETHER_VLAN_ENCAP_LEN;
+ return (0);
+}
+
+
static void
ixlv_del_multi(struct ixl_vsi *vsi)
{
@@ -2341,21 +1846,27 @@
**********************************************************************/
static void
-ixlv_local_timer(void *arg)
+ixlv_if_timer(if_ctx_t ctx, uint16_t qid)
{
- struct ixlv_sc *sc = arg;
+ struct ixlv_sc *sc = iflib_get_softc(ctx);
struct i40e_hw *hw = &sc->hw;
struct ixl_vsi *vsi = &sc->vsi;
- struct ixl_queue *que = vsi->queues;
- device_t dev = sc->dev;
- int hung = 0;
+ struct ixl_queue *que = &vsi->queues[qid];
u32 mask, val;
- IXLV_CORE_LOCK_ASSERT(sc);
-
/* If Reset is in progress just bail */
if (sc->init_state == IXLV_RESET_PENDING)
return;
+ mask = (I40E_VFINT_DYN_CTLN1_INTENA_MASK |
+ I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK);
+
+ /* Any queues with outstanding work get a sw irq */
+ /* should be set by encap */
+ if (que->busy)
+ wr32(hw, I40E_VFINT_DYN_CTLN1(que->me), mask);
+
+ if (qid != 0)
+ return;
/* Check for when PF triggers a VF reset */
val = rd32(hw, I40E_VFGEN_RSTAT) &
@@ -2369,51 +1880,10 @@
ixlv_request_stats(sc);
+#if 0
/* clean and process any events */
taskqueue_enqueue(sc->tq, &sc->aq_irq);
-
- /*
- ** Check status on the queues for a hang
- */
- mask = (I40E_VFINT_DYN_CTLN1_INTENA_MASK |
- I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK);
-
- for (int i = 0; i < vsi->num_queues; i++,que++) {
- /* Any queues with outstanding work get a sw irq */
- if (que->busy)
- wr32(hw, I40E_VFINT_DYN_CTLN1(que->me), mask);
- /*
- ** Each time txeof runs without cleaning, but there
- ** are uncleaned descriptors it increments busy. If
- ** we get to 5 we declare it hung.
- */
- if (que->busy == IXL_QUEUE_HUNG) {
- ++hung;
- /* Mark the queue as inactive */
- vsi->active_queues &= ~((u64)1 << que->me);
- continue;
- } else {
- /* Check if we've come back from hung */
- if ((vsi->active_queues & ((u64)1 << que->me)) == 0)
- vsi->active_queues |= ((u64)1 << que->me);
- }
- if (que->busy >= IXL_MAX_TX_BUSY) {
- device_printf(dev,"Warning queue %d "
- "appears to be hung!\n", i);
- que->busy = IXL_QUEUE_HUNG;
- ++hung;
- }
- }
- /* Only reset when all queues show hung */
- if (hung == vsi->num_queues)
- goto hung;
- callout_reset(&sc->timer, hz, ixlv_local_timer, sc);
- return;
-
-hung:
- device_printf(dev, "Local Timer: TX HANG DETECTED - Resetting!!\n");
- sc->init_state = IXLV_RESET_REQUIRED;
- ixlv_init_locked(sc);
+#endif
}
/*
@@ -2421,11 +1891,12 @@
** the real check of the hardware only happens with
** a link interrupt.
*/
-void
-ixlv_update_link_status(struct ixlv_sc *sc)
+static void
+ixlv_if_update_admin_status(if_ctx_t ctx)
{
+ struct ixlv_sc *sc = iflib_get_softc(ctx);
+ struct ifnet *ifp = iflib_get_ifp(ctx);
struct ixl_vsi *vsi = &sc->vsi;
- struct ifnet *ifp = vsi->ifp;
if (sc->link_up){
if (vsi->link_active == FALSE) {
@@ -2455,69 +1926,26 @@
**********************************************************************/
static void
-ixlv_stop(struct ixlv_sc *sc)
+ixlv_if_stop(if_ctx_t ctx)
{
struct ifnet *ifp;
+ struct ixlv_sc *sc;
int start;
- ifp = sc->vsi.ifp;
+ ifp = iflib_get_ifp(ctx);
+ sc = iflib_get_softc(ctx);
INIT_DBG_IF(ifp, "begin");
- IXLV_CORE_LOCK_ASSERT(sc);
-
ixl_vc_flush(&sc->vc_mgr);
- ixlv_disable_queues(sc);
start = ticks;
while ((ifp->if_drv_flags & IFF_DRV_RUNNING) &&
((ticks - start) < hz/10))
ixlv_do_adminq_locked(sc);
- /* Stop the local timer */
- callout_stop(&sc->timer);
-
INIT_DBG_IF(ifp, "end");
}
-
-/*********************************************************************
- *
- * Free all station queue structs.
- *
- **********************************************************************/
-static void
-ixlv_free_queues(struct ixl_vsi *vsi)
-{
- struct ixlv_sc *sc = (struct ixlv_sc *)vsi->back;
- struct ixl_queue *que = vsi->queues;
-
- for (int i = 0; i < vsi->num_queues; i++, que++) {
- struct tx_ring *txr = &que->txr;
- struct rx_ring *rxr = &que->rxr;
-
- if (!mtx_initialized(&txr->mtx)) /* uninitialized */
- continue;
- IXL_TX_LOCK(txr);
- ixl_free_que_tx(que);
- if (txr->base)
- i40e_free_dma_mem(&sc->hw, &txr->dma);
- IXL_TX_UNLOCK(txr);
- IXL_TX_LOCK_DESTROY(txr);
-
- if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
- continue;
- IXL_RX_LOCK(rxr);
- ixl_free_que_rx(que);
- if (rxr->base)
- i40e_free_dma_mem(&sc->hw, &rxr->dma);
- IXL_RX_UNLOCK(rxr);
- IXL_RX_LOCK_DESTROY(rxr);
-
- }
- free(vsi->queues, M_DEVBUF);
-}
-
-
/*
** ixlv_config_rss - setup RSS
**
@@ -2698,21 +2126,6 @@
return (0);
}
-/*
-** Tasklet handler for MSIX Adminq interrupts
-** - done outside interrupt context since it might sleep
-*/
-static void
-ixlv_do_adminq(void *context, int pending)
-{
- struct ixlv_sc *sc = context;
-
- mtx_lock(&sc->mtx);
- ixlv_do_adminq_locked(sc);
- mtx_unlock(&sc->mtx);
- return;
-}
-
static void
ixlv_do_adminq_locked(struct ixlv_sc *sc)
{
@@ -2724,8 +2137,6 @@
u32 reg, oldreg;
i40e_status ret;
- IXLV_CORE_LOCK_ASSERT(sc);
-
event.buf_len = IXL_AQ_BUF_SZ;
event.msg_buf = sc->aq_buffer;
v_msg = (struct i40e_virtchnl_msg *)&event.desc;
@@ -2969,4 +2380,3 @@
return error;
return (0);
}
-
Index: sys/dev/ixl/ixl.h
===================================================================
--- sys/dev/ixl/ixl.h
+++ sys/dev/ixl/ixl.h
@@ -39,8 +39,6 @@
#include <sys/param.h>
#include <sys/systm.h>
-#include <sys/buf_ring.h>
-#include <sys/mbuf.h>
#include <sys/protosw.h>
#include <sys/socket.h>
#include <sys/malloc.h>
@@ -60,6 +58,7 @@
#include <net/bpf.h>
#include <net/if_types.h>
#include <net/if_vlan_var.h>
+#include <net/iflib.h>
#include <netinet/in_systm.h>
#include <netinet/in.h>
@@ -85,7 +84,6 @@
#include <sys/proc.h>
#include <sys/sysctl.h>
#include <sys/endian.h>
-#include <sys/taskqueue.h>
#include <sys/pcpu.h>
#include <sys/smp.h>
#include <machine/smp.h>
@@ -96,12 +94,16 @@
#include <dev/pci/pci_iov.h>
#endif
+#include "ifdi_if.h"
#include "i40e_type.h"
#include "i40e_prototype.h"
+MALLOC_DECLARE(M_IXL);
+
#if defined(IXL_DEBUG) || defined(IXL_DEBUG_SYSCTL)
#include <sys/sbuf.h>
+
#define MAC_FORMAT "%02x:%02x:%02x:%02x:%02x:%02x"
#define MAC_FORMAT_ARGS(mac_addr) \
(mac_addr)[0], (mac_addr)[1], (mac_addr)[2], (mac_addr)[3], \
@@ -304,16 +306,6 @@
#define IXL_END_OF_INTR_LNKLST 0x7FF
-#define IXL_TX_LOCK(_sc) mtx_lock(&(_sc)->mtx)
-#define IXL_TX_UNLOCK(_sc) mtx_unlock(&(_sc)->mtx)
-#define IXL_TX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->mtx)
-#define IXL_TX_TRYLOCK(_sc) mtx_trylock(&(_sc)->mtx)
-#define IXL_TX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->mtx, MA_OWNED)
-
-#define IXL_RX_LOCK(_sc) mtx_lock(&(_sc)->mtx)
-#define IXL_RX_UNLOCK(_sc) mtx_unlock(&(_sc)->mtx)
-#define IXL_RX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->mtx)
-
#if __FreeBSD_version >= 1100036
#define IXL_SET_IPACKETS(vsi, count) (vsi)->ipackets = (count)
#define IXL_SET_IERRORS(vsi, count) (vsi)->ierrors = (count)
@@ -362,17 +354,6 @@
struct ixl_tx_buf {
u32 eop_index;
- struct mbuf *m_head;
- bus_dmamap_t map;
- bus_dma_tag_t tag;
-};
-
-struct ixl_rx_buf {
- struct mbuf *m_head;
- struct mbuf *m_pack;
- struct mbuf *fmp;
- bus_dmamap_t hmap;
- bus_dmamap_t pmap;
};
/*
@@ -392,23 +373,20 @@
*/
struct tx_ring {
struct ixl_queue *que;
- struct mtx mtx;
u32 tail;
- struct i40e_tx_desc *base;
- struct i40e_dma_mem dma;
+ struct i40e_tx_desc *tx_base;
+ uint64_t tx_paddr;
u16 next_avail;
u16 next_to_clean;
u16 atr_rate;
u16 atr_count;
u16 itr;
u16 latency;
- struct ixl_tx_buf *buffers;
+ struct ixl_tx_buf *tx_buffers;
volatile u16 avail;
u32 cmd;
bus_dma_tag_t tx_tag;
bus_dma_tag_t tso_tag;
- char mtx_name[16];
- struct buf_ring *br;
/* Used for Dynamic ITR calculation */
u32 packets;
@@ -426,19 +404,12 @@
*/
struct rx_ring {
struct ixl_queue *que;
- struct mtx mtx;
- union i40e_rx_desc *base;
- struct i40e_dma_mem dma;
- struct lro_ctrl lro;
- bool lro_enabled;
- bool hdr_split;
+ union i40e_rx_desc *rx_base;
+ uint64_t rx_paddr;
bool discard;
- u16 next_refresh;
- u16 next_check;
u16 itr;
u16 latency;
- char mtx_name[16];
- struct ixl_rx_buf *buffers;
+
u32 mbuf_sz;
u32 tail;
bus_dma_tag_t htag;
@@ -467,13 +438,11 @@
u32 eims; /* This queue's EIMS bit */
struct resource *res;
void *tag;
- int num_desc; /* both tx and rx */
int busy;
struct tx_ring txr;
struct rx_ring rxr;
- struct task task;
- struct task tx_task;
- struct taskqueue *tq;
+
+ struct if_irq que_irq;
/* Queue stats */
u64 irqs;
@@ -486,32 +455,40 @@
u64 dropped_pkts;
};
+#define DOWNCAST(sctx) ((struct ixl_vsi *)(sctx))
/*
** Virtual Station interface:
** there would be one of these per traffic class/type
** for now just one, and its embedded in the pf
*/
SLIST_HEAD(ixl_ftl_head, ixl_mac_filter);
+
struct ixl_vsi {
- void *back;
+ if_ctx_t ctx;
+ if_softc_ctx_t shared;
+
struct ifnet *ifp;
- struct device *dev;
+ struct ifmedia *media;
+
+#define num_queues shared->isc_nqsets
+#define max_frame_size shared->isc_max_frame_size
+
+ void *back;
struct i40e_hw *hw;
- struct ifmedia media;
u64 que_mask;
int id;
u16 vsi_num;
u16 msix_base; /* station base MSIX vector */
u16 first_queue;
- u16 num_queues;
u16 rx_itr_setting;
u16 tx_itr_setting;
struct ixl_queue *queues; /* head of queues */
bool link_active;
u16 seid;
+ u32 link_speed;
+ struct if_irq irq;
u16 uplink_seid;
u16 downlink_seid;
- u16 max_frame_size;
/* MAC/VLAN Filter list */
struct ixl_ftl_head ftl;
@@ -519,8 +496,6 @@
struct i40e_aqc_vsi_properties_data info;
- eventhandler_tag vlan_attach;
- eventhandler_tag vlan_detach;
u16 num_vlans;
/* Per-VSI stats from hardware */
@@ -551,21 +526,6 @@
};
/*
-** Find the number of unrefreshed RX descriptors
-*/
-static inline u16
-ixl_rx_unrefreshed(struct ixl_queue *que)
-{
- struct rx_ring *rxr = &que->rxr;
-
- if (rxr->next_check > rxr->next_refresh)
- return (rxr->next_check - rxr->next_refresh - 1);
- else
- return ((que->num_desc + rxr->next_check) -
- rxr->next_refresh - 1);
-}
-
-/*
** Find the next available unused filter
*/
static inline struct ixl_mac_filter *
@@ -575,7 +535,7 @@
/* create a new empty filter */
f = malloc(sizeof(struct ixl_mac_filter),
- M_DEVBUF, M_NOWAIT | M_ZERO);
+ M_IXL, M_NOWAIT | M_ZERO);
if (f)
SLIST_INSERT_HEAD(&vsi->ftl, f, next);
@@ -588,14 +548,8 @@
static inline bool
cmp_etheraddr(const u8 *ea1, const u8 *ea2)
{
- bool cmp = FALSE;
- if ((ea1[0] == ea2[0]) && (ea1[1] == ea2[1]) &&
- (ea1[2] == ea2[2]) && (ea1[3] == ea2[3]) &&
- (ea1[4] == ea2[4]) && (ea1[5] == ea2[5]))
- cmp = TRUE;
-
- return (cmp);
+ return (bcmp(ea1, ea2, 6) == 0);
}
/*
@@ -632,19 +586,8 @@
/*********************************************************************
* TXRX Function prototypes
*********************************************************************/
-int ixl_allocate_tx_data(struct ixl_queue *);
-int ixl_allocate_rx_data(struct ixl_queue *);
void ixl_init_tx_ring(struct ixl_queue *);
-int ixl_init_rx_ring(struct ixl_queue *);
-bool ixl_rxeof(struct ixl_queue *, int);
-bool ixl_txeof(struct ixl_queue *);
-int ixl_mq_start(struct ifnet *, struct mbuf *);
-int ixl_mq_start_locked(struct ifnet *, struct tx_ring *);
-void ixl_deferred_mq_start(void *, int);
-void ixl_qflush(struct ifnet *);
-void ixl_free_vsi(struct ixl_vsi *);
-void ixl_free_que_tx(struct ixl_queue *);
-void ixl_free_que_rx(struct ixl_queue *);
+
#ifdef IXL_FDIR
void ixl_atr(struct ixl_queue *, struct tcphdr *, int);
#endif
@@ -652,4 +595,12 @@
uint64_t ixl_get_counter(if_t ifp, ift_counter cnt);
#endif
+/*********************************************************************
+ * common Function prototypes
+ *********************************************************************/
+
+int ixl_if_media_change(if_ctx_t);
+int ixl_if_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int);
+void ixl_if_queues_free(if_ctx_t ctx);
+
#endif /* _IXL_H_ */
Index: sys/dev/ixl/ixl_pf.h
===================================================================
--- sys/dev/ixl/ixl_pf.h
+++ sys/dev/ixl/ixl_pf.h
@@ -54,12 +54,22 @@
/* Physical controller structure */
struct ixl_pf {
+ /*
+ ** VSI - Stations:
+ ** These are the traffic class holders, and
+ ** will have a stack interface and queues
+ ** associated with them.
+ ** NOTE: for now using just one, so embed it.
+ ** also, to make it interchangeable place it _first_
+ */
+ struct ixl_vsi vsi;
+
+
struct i40e_hw hw;
struct i40e_osdep osdep;
- struct device *dev;
+ device_t dev;
struct resource *pci_mem;
- struct resource *msix_mem;
/*
* Interrupt resources: this set is
@@ -73,28 +83,14 @@
int msix;
int if_flags;
- struct mtx pf_mtx;
-
u32 qbase;
u32 admvec;
- struct task adminq;
- struct taskqueue *tq;
bool link_up;
u32 link_speed;
int advertised_speed;
int fc; /* local flow ctrl setting */
- /*
- ** Network interfaces
- ** These are the traffic class holders, and
- ** will have a stack interface and queues
- ** associated with them.
- ** NOTE: The PF has only a single interface,
- ** so it is embedded in the PF struct.
- */
- struct ixl_vsi vsi;
-
/* Misc stats maintained by the driver */
u64 watchdog_events;
u64 admin_irq;
@@ -129,11 +125,4 @@
#define i40e_send_vf_nack(pf, vf, op, st) \
ixl_send_vf_nack_msg((pf), (vf), (op), (st), __FILE__, __LINE__)
-#define IXL_PF_LOCK_INIT(_sc, _name) \
- mtx_init(&(_sc)->pf_mtx, _name, "IXL PF Lock", MTX_DEF)
-#define IXL_PF_LOCK(_sc) mtx_lock(&(_sc)->pf_mtx)
-#define IXL_PF_UNLOCK(_sc) mtx_unlock(&(_sc)->pf_mtx)
-#define IXL_PF_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->pf_mtx)
-#define IXL_PF_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->pf_mtx, MA_OWNED)
-
#endif /* _IXL_PF_H_ */
Index: sys/dev/ixl/ixl_txrx.c
===================================================================
--- sys/dev/ixl/ixl_txrx.c
+++ sys/dev/ixl/ixl_txrx.c
@@ -51,148 +51,35 @@
#endif
/* Local Prototypes */
-static void ixl_rx_checksum(struct mbuf *, u32, u32, u8);
-static void ixl_refresh_mbufs(struct ixl_queue *, int);
-static int ixl_xmit(struct ixl_queue *, struct mbuf **);
-static int ixl_tx_setup_offload(struct ixl_queue *,
- struct mbuf *, u32 *, u32 *);
-static bool ixl_tso_setup(struct ixl_queue *, struct mbuf *);
+static void ixl_rx_checksum(if_rxd_info_t ri, u32 status, u32 error, u8 ptype);
-static __inline void ixl_rx_discard(struct rx_ring *, int);
-static __inline void ixl_rx_input(struct rx_ring *, struct ifnet *,
- struct mbuf *, u8);
+static int ixl_isc_txd_encap(void *arg, if_pkt_info_t pi);
+static void ixl_isc_txd_flush(void *arg, uint16_t txqid, uint32_t pidx);
+static int ixl_isc_txd_credits_update(void *arg, uint16_t qid, uint32_t cidx);
-#ifdef DEV_NETMAP
-#include <dev/netmap/if_ixl_netmap.h>
-#endif /* DEV_NETMAP */
+static void ixl_isc_rxd_refill(void *arg, uint16_t rxqid, uint8_t flid __unused,
+ uint32_t pidx, uint64_t *paddrs, caddr_t *vaddrs __unused, uint16_t count);
+static void ixl_isc_rxd_flush(void *arg, uint16_t rxqid, uint8_t flid __unused, uint32_t pidx);
+static int ixl_isc_rxd_available(void *arg, uint16_t rxqid, uint32_t idx);
+static int ixl_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri);
-/*
-** Multiqueue Transmit driver
-*/
-int
-ixl_mq_start(struct ifnet *ifp, struct mbuf *m)
-{
- struct ixl_vsi *vsi = ifp->if_softc;
- struct ixl_queue *que;
- struct tx_ring *txr;
- int err, i;
-#ifdef RSS
- u32 bucket_id;
-#endif
+extern int ixl_intr(void *arg);
- /*
- ** Which queue to use:
- **
- ** When doing RSS, map it to the same outbound
- ** queue as the incoming flow would be mapped to.
- ** If everything is setup correctly, it should be
- ** the same bucket that the current CPU we're on is.
- */
- if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
-#ifdef RSS
- if (rss_hash2bucket(m->m_pkthdr.flowid,
- M_HASHTYPE_GET(m), &bucket_id) == 0) {
- i = bucket_id % vsi->num_queues;
- } else
-#endif
- i = m->m_pkthdr.flowid % vsi->num_queues;
- } else
- i = curcpu % vsi->num_queues;
- /*
- ** This may not be perfect, but until something
- ** better comes along it will keep from scheduling
- ** on stalled queues.
- */
- if (((1 << i) & vsi->active_queues) == 0)
- i = ffsl(vsi->active_queues);
+struct if_txrx ixl_txrx = {
+ ixl_isc_txd_encap,
+ ixl_isc_txd_flush,
+ ixl_isc_txd_credits_update,
+ ixl_isc_rxd_available,
+ ixl_isc_rxd_pkt_get,
+ ixl_isc_rxd_refill,
+ ixl_isc_rxd_flush,
+ ixl_intr
+};
- que = &vsi->queues[i];
- txr = &que->txr;
-
- err = drbr_enqueue(ifp, txr->br, m);
- if (err)
- return (err);
- if (IXL_TX_TRYLOCK(txr)) {
- ixl_mq_start_locked(ifp, txr);
- IXL_TX_UNLOCK(txr);
- } else
- taskqueue_enqueue(que->tq, &que->tx_task);
-
- return (0);
-}
-
-int
-ixl_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr)
-{
- struct ixl_queue *que = txr->que;
- struct ixl_vsi *vsi = que->vsi;
- struct mbuf *next;
- int err = 0;
-
-
- if (((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) ||
- vsi->link_active == 0)
- return (ENETDOWN);
-
- /* Process the transmit queue */
- while ((next = drbr_peek(ifp, txr->br)) != NULL) {
- if ((err = ixl_xmit(que, &next)) != 0) {
- if (next == NULL)
- drbr_advance(ifp, txr->br);
- else
- drbr_putback(ifp, txr->br, next);
- break;
- }
- drbr_advance(ifp, txr->br);
- /* Send a copy of the frame to the BPF listener */
- ETHER_BPF_MTAP(ifp, next);
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
- break;
- }
-
- if (txr->avail < IXL_TX_CLEANUP_THRESHOLD)
- ixl_txeof(que);
-
- return (err);
-}
-
-/*
- * Called from a taskqueue to drain queued transmit packets.
- */
-void
-ixl_deferred_mq_start(void *arg, int pending)
-{
- struct ixl_queue *que = arg;
- struct tx_ring *txr = &que->txr;
- struct ixl_vsi *vsi = que->vsi;
- struct ifnet *ifp = vsi->ifp;
-
- IXL_TX_LOCK(txr);
- if (!drbr_empty(ifp, txr->br))
- ixl_mq_start_locked(ifp, txr);
- IXL_TX_UNLOCK(txr);
-}
-
-/*
-** Flush all queue ring buffers
-*/
-void
-ixl_qflush(struct ifnet *ifp)
-{
- struct ixl_vsi *vsi = ifp->if_softc;
+extern if_shared_ctx_t ixl_sctx;
- for (int i = 0; i < vsi->num_queues; i++) {
- struct ixl_queue *que = &vsi->queues[i];
- struct tx_ring *txr = &que->txr;
- struct mbuf *m;
- IXL_TX_LOCK(txr);
- while ((m = buf_ring_dequeue_sc(txr->br)) != NULL)
- m_freem(m);
- IXL_TX_UNLOCK(txr);
- }
- if_qflush(ifp);
-}
+#ifdef notyet
/*
** Find mbuf chains passed to the driver
** that are 'sparse', using more than 8
@@ -219,6 +106,113 @@
return (ret);
}
+#endif
+
+
+/*********************************************************************
+ *
+ * Setup descriptor for hw offloads
+ *
+ **********************************************************************/
+
+static void
+ixl_tx_setup_offload(struct ixl_queue *que,
+ if_pkt_info_t pi, u32 *cmd, u32 *off)
+{
+
+
+ switch (pi->ipi_etype) {
+#ifdef INET
+ case ETHERTYPE_IP:
+ /* The IP checksum must be recalculated with TSO */
+ if (pi->ipi_csum_flags & CSUM_TSO)
+ *cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
+ else
+ *cmd |= I40E_TX_DESC_CMD_IIPT_IPV4;
+ break;
+#endif
+#ifdef INET6
+ case ETHERTYPE_IPV6:
+ *cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
+ break;
+#endif
+ default:
+ break;
+ }
+
+ *off |= (pi->ipi_ehdrlen >> 1) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
+ *off |= (pi->ipi_ip_hlen >> 2) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
+
+ switch (pi->ipi_ipproto) {
+ case IPPROTO_TCP:
+ if (pi->ipi_csum_flags & (CSUM_TCP|CSUM_TCP_IPV6)) {
+ *cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
+ *off |= (pi->ipi_tcp_hlen >> 2) <<
+ I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+ }
+#ifdef IXL_FDIR
+ ixl_atr(que, pi->ipi_tcp_hflags, pi->ipi_etype);
+#endif
+ break;
+ case IPPROTO_UDP:
+ if (pi->ipi_csum_flags & (CSUM_UDP|CSUM_UDP_IPV6)) {
+ *cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
+ *off |= (sizeof(struct udphdr) >> 2) <<
+ I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+ }
+ break;
+
+ case IPPROTO_SCTP:
+ if (pi->ipi_csum_flags & (CSUM_SCTP|CSUM_SCTP_IPV6)) {
+ *cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
+ *off |= (sizeof(struct sctphdr) >> 2) <<
+ I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+ }
+ /* Fall Thru */
+ default:
+ break;
+ }
+
+}
+
+/**********************************************************************
+ *
+ * Setup context for hardware segmentation offload (TSO)
+ *
+ **********************************************************************/
+static int
+ixl_tso_setup(struct ixl_queue *que, if_pkt_info_t pi)
+{
+ struct tx_ring *txr = &que->txr;
+ struct i40e_tx_context_desc *TXD;
+ struct ixl_tx_buf *buf;
+ u32 cmd, mss, type, tsolen;
+ int idx;
+ u64 type_cmd_tso_mss;
+
+ idx = pi->ipi_pidx;
+ buf = &txr->tx_buffers[idx];
+ TXD = (struct i40e_tx_context_desc *) &txr->tx_base[idx];
+ tsolen = pi->ipi_len - (pi->ipi_ehdrlen + pi->ipi_ip_hlen + pi->ipi_tcp_hlen);
+
+ type = I40E_TX_DESC_DTYPE_CONTEXT;
+ cmd = I40E_TX_CTX_DESC_TSO;
+ mss = pi->ipi_tso_segsz;
+
+ type_cmd_tso_mss = ((u64)type << I40E_TXD_CTX_QW1_DTYPE_SHIFT) |
+ ((u64)cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
+ ((u64)tsolen << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
+ ((u64)mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
+ TXD->type_cmd_tso_mss = htole64(type_cmd_tso_mss);
+
+ TXD->tunneling_params = htole32(0);
+ buf->eop_index = -1;
+
+ if (++idx == ixl_sctx->isc_ntxd)
+ idx = 0;
+ return (idx);
+}
+
/*********************************************************************
@@ -231,37 +225,35 @@
#define IXL_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
static int
-ixl_xmit(struct ixl_queue *que, struct mbuf **m_headp)
+ixl_isc_txd_encap(void *arg, if_pkt_info_t pi)
{
- struct ixl_vsi *vsi = que->vsi;
- struct i40e_hw *hw = vsi->hw;
+ struct ixl_vsi *vsi = arg;
+ struct ixl_queue *que = &vsi->queues[pi->ipi_qsidx];
struct tx_ring *txr = &que->txr;
+ int nsegs = pi->ipi_nsegs;
+ bus_dma_segment_t *segs = pi->ipi_segs;
struct ixl_tx_buf *buf;
struct i40e_tx_desc *txd = NULL;
- struct mbuf *m_head, *m;
- int i, j, error, nsegs, maxsegs;
+ int i, j;
int first, last = 0;
- u16 vtag = 0;
- u32 cmd, off;
- bus_dmamap_t map;
- bus_dma_tag_t tag;
- bus_dma_segment_t segs[IXL_MAX_TSO_SEGS];
+ u32 cmd, off;
cmd = off = 0;
- m_head = *m_headp;
/*
* Important to capture the first descriptor
* used because it will contain the index of
* the one we tell the hardware to report back
*/
- first = txr->next_avail;
- buf = &txr->buffers[first];
- map = buf->map;
- tag = txr->tx_tag;
- maxsegs = IXL_MAX_TX_SEGS;
+ first = pi->ipi_pidx;
+ buf = &txr->tx_buffers[first];
+ if (pi->ipi_flags & IPI_TX_INTR)
+ cmd |= (I40E_TX_DESC_CMD_RS << I40E_TXD_QW1_CMD_SHIFT);
+
+#ifdef notyet
+ /* add this check to iflib - shouldn't actually happen in practice */
if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
/* Use larger mapping for TSO */
tag = txr->tso_tag;
@@ -276,77 +268,27 @@
*m_headp = m;
}
}
-
- /*
- * Map the packet for DMA.
- */
- error = bus_dmamap_load_mbuf_sg(tag, map,
- *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
-
- if (error == EFBIG) {
- struct mbuf *m;
-
- m = m_collapse(*m_headp, M_NOWAIT, maxsegs);
- if (m == NULL) {
- que->mbuf_defrag_failed++;
- m_freem(*m_headp);
- *m_headp = NULL;
- return (ENOBUFS);
- }
- *m_headp = m;
-
- /* Try it again */
- error = bus_dmamap_load_mbuf_sg(tag, map,
- *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
-
- if (error == ENOMEM) {
- que->tx_dma_setup++;
- return (error);
- } else if (error != 0) {
- que->tx_dma_setup++;
- m_freem(*m_headp);
- *m_headp = NULL;
- return (error);
- }
- } else if (error == ENOMEM) {
- que->tx_dma_setup++;
- return (error);
- } else if (error != 0) {
- que->tx_dma_setup++;
- m_freem(*m_headp);
- *m_headp = NULL;
- return (error);
- }
-
- /* Make certain there are enough descriptors */
- if (nsegs > txr->avail - 2) {
- txr->no_desc++;
- error = ENOBUFS;
- goto xmit_fail;
- }
- m_head = *m_headp;
+#endif
/* Set up the TSO/CSUM offload */
- if (m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD) {
- error = ixl_tx_setup_offload(que, m_head, &cmd, &off);
- if (error)
- goto xmit_fail;
+ if (pi->ipi_csum_flags & CSUM_OFFLOAD) {
+ /* Set up the TSO context descriptor if required */
+ if (pi->ipi_csum_flags & CSUM_TSO)
+ first = ixl_tso_setup(que, pi);
+ ixl_tx_setup_offload(que, pi, &cmd, &off);
}
- cmd |= I40E_TX_DESC_CMD_ICRC;
- /* Grab the VLAN tag */
- if (m_head->m_flags & M_VLANTAG) {
+ if (pi->ipi_mflags & M_VLANTAG)
cmd |= I40E_TX_DESC_CMD_IL2TAG1;
- vtag = htole16(m_head->m_pkthdr.ether_vtag);
- }
- i = txr->next_avail;
+ cmd |= I40E_TX_DESC_CMD_ICRC;
+
+ i = first;
for (j = 0; j < nsegs; j++) {
bus_size_t seglen;
- buf = &txr->buffers[i];
- buf->tag = tag; /* Keep track of the type tag */
- txd = &txr->base[i];
+ buf = &txr->tx_buffers[i];
+ txd = &txr->tx_base[i];
seglen = segs[j].ds_len;
txd->buffer_addr = htole64(segs[j].ds_addr);
@@ -355,473 +297,73 @@
| ((u64)cmd << I40E_TXD_QW1_CMD_SHIFT)
| ((u64)off << I40E_TXD_QW1_OFFSET_SHIFT)
| ((u64)seglen << I40E_TXD_QW1_TX_BUF_SZ_SHIFT)
- | ((u64)vtag << I40E_TXD_QW1_L2TAG1_SHIFT));
+ | ((u64)htole16(pi->ipi_vtag) << I40E_TXD_QW1_L2TAG1_SHIFT));
last = i; /* descriptor that will get completion IRQ */
- if (++i == que->num_desc)
+ if (++i == ixl_sctx->isc_ntxd)
i = 0;
-
- buf->m_head = NULL;
- buf->eop_index = -1;
- }
- /* Set the last descriptor for report */
- txd->cmd_type_offset_bsz |=
- htole64(((u64)IXL_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT));
- txr->avail -= nsegs;
- txr->next_avail = i;
-
- buf->m_head = m_head;
- /* Swap the dma map between the first and last descriptor */
- txr->buffers[first].map = buf->map;
- buf->map = map;
- bus_dmamap_sync(tag, map, BUS_DMASYNC_PREWRITE);
-
- /* Set the index of the descriptor that will be marked done */
- buf = &txr->buffers[first];
- buf->eop_index = last;
-
- bus_dmamap_sync(txr->dma.tag, txr->dma.map,
- BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
- /*
- * Advance the Transmit Descriptor Tail (Tdt), this tells the
- * hardware that this frame is available to transmit.
- */
- ++txr->total_packets;
- wr32(hw, txr->tail, i);
-
- ixl_flush(hw);
- /* Mark outstanding work */
- if (que->busy == 0)
- que->busy = 1;
- return (0);
-
-xmit_fail:
- bus_dmamap_unload(tag, buf->map);
- return (error);
-}
-
-
-/*********************************************************************
- *
- * Allocate memory for tx_buffer structures. The tx_buffer stores all
- * the information needed to transmit a packet on the wire. This is
- * called only once at attach, setup is done every reset.
- *
- **********************************************************************/
-int
-ixl_allocate_tx_data(struct ixl_queue *que)
-{
- struct tx_ring *txr = &que->txr;
- struct ixl_vsi *vsi = que->vsi;
- device_t dev = vsi->dev;
- struct ixl_tx_buf *buf;
- int error = 0;
-
- /*
- * Setup DMA descriptor areas.
- */
- if ((error = bus_dma_tag_create(NULL, /* parent */
- 1, 0, /* alignment, bounds */
- BUS_SPACE_MAXADDR, /* lowaddr */
- BUS_SPACE_MAXADDR, /* highaddr */
- NULL, NULL, /* filter, filterarg */
- IXL_TSO_SIZE, /* maxsize */
- IXL_MAX_TX_SEGS, /* nsegments */
- PAGE_SIZE, /* maxsegsize */
- 0, /* flags */
- NULL, /* lockfunc */
- NULL, /* lockfuncarg */
- &txr->tx_tag))) {
- device_printf(dev,"Unable to allocate TX DMA tag\n");
- goto fail;
- }
-
- /* Make a special tag for TSO */
- if ((error = bus_dma_tag_create(NULL, /* parent */
- 1, 0, /* alignment, bounds */
- BUS_SPACE_MAXADDR, /* lowaddr */
- BUS_SPACE_MAXADDR, /* highaddr */
- NULL, NULL, /* filter, filterarg */
- IXL_TSO_SIZE, /* maxsize */
- IXL_MAX_TSO_SEGS, /* nsegments */
- PAGE_SIZE, /* maxsegsize */
- 0, /* flags */
- NULL, /* lockfunc */
- NULL, /* lockfuncarg */
- &txr->tso_tag))) {
- device_printf(dev,"Unable to allocate TX TSO DMA tag\n");
- goto fail;
- }
-
- if (!(txr->buffers =
- (struct ixl_tx_buf *) malloc(sizeof(struct ixl_tx_buf) *
- que->num_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
- device_printf(dev, "Unable to allocate tx_buffer memory\n");
- error = ENOMEM;
- goto fail;
- }
-
- /* Create the descriptor buffer default dma maps */
- buf = txr->buffers;
- for (int i = 0; i < que->num_desc; i++, buf++) {
- buf->tag = txr->tx_tag;
- error = bus_dmamap_create(buf->tag, 0, &buf->map);
- if (error != 0) {
- device_printf(dev, "Unable to create TX DMA map\n");
- goto fail;
- }
- }
-fail:
- return (error);
-}
-
-
-/*********************************************************************
- *
- * (Re)Initialize a queue transmit ring.
- * - called by init, it clears the descriptor ring,
- * and frees any stale mbufs
- *
- **********************************************************************/
-void
-ixl_init_tx_ring(struct ixl_queue *que)
-{
-#ifdef DEV_NETMAP
- struct netmap_adapter *na = NA(que->vsi->ifp);
- struct netmap_slot *slot;
-#endif /* DEV_NETMAP */
- struct tx_ring *txr = &que->txr;
- struct ixl_tx_buf *buf;
-
- /* Clear the old ring contents */
- IXL_TX_LOCK(txr);
-
-#ifdef DEV_NETMAP
- /*
- * (under lock): if in netmap mode, do some consistency
- * checks and set slot to entry 0 of the netmap ring.
- */
- slot = netmap_reset(na, NR_TX, que->me, 0);
-#endif /* DEV_NETMAP */
-
- bzero((void *)txr->base,
- (sizeof(struct i40e_tx_desc)) * que->num_desc);
-
- /* Reset indices */
- txr->next_avail = 0;
- txr->next_to_clean = 0;
-
-#ifdef IXL_FDIR
- /* Initialize flow director */
- txr->atr_rate = ixl_atr_rate;
- txr->atr_count = 0;
-#endif
-
- /* Free any existing tx mbufs. */
- buf = txr->buffers;
- for (int i = 0; i < que->num_desc; i++, buf++) {
- if (buf->m_head != NULL) {
- bus_dmamap_sync(buf->tag, buf->map,
- BUS_DMASYNC_POSTWRITE);
- bus_dmamap_unload(buf->tag, buf->map);
- m_freem(buf->m_head);
- buf->m_head = NULL;
- }
-#ifdef DEV_NETMAP
- /*
- * In netmap mode, set the map for the packet buffer.
- * NOTE: Some drivers (not this one) also need to set
- * the physical buffer address in the NIC ring.
- * netmap_idx_n2k() maps a nic index, i, into the corresponding
- * netmap slot index, si
- */
- if (slot) {
- int si = netmap_idx_n2k(&na->tx_rings[que->me], i);
- netmap_load_map(na, buf->tag, buf->map, NMB(na, slot + si));
- }
-#endif /* DEV_NETMAP */
- /* Clear the EOP index */
- buf->eop_index = -1;
- }
-
- /* Set number of descriptors available */
- txr->avail = que->num_desc;
-
- bus_dmamap_sync(txr->dma.tag, txr->dma.map,
- BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
- IXL_TX_UNLOCK(txr);
-}
-
-
-/*********************************************************************
- *
- * Free transmit ring related data structures.
- *
- **********************************************************************/
-void
-ixl_free_que_tx(struct ixl_queue *que)
-{
- struct tx_ring *txr = &que->txr;
- struct ixl_tx_buf *buf;
-
- INIT_DBG_IF(que->vsi->ifp, "queue %d: begin", que->me);
-
- for (int i = 0; i < que->num_desc; i++) {
- buf = &txr->buffers[i];
- if (buf->m_head != NULL) {
- bus_dmamap_sync(buf->tag, buf->map,
- BUS_DMASYNC_POSTWRITE);
- bus_dmamap_unload(buf->tag,
- buf->map);
- m_freem(buf->m_head);
- buf->m_head = NULL;
- if (buf->map != NULL) {
- bus_dmamap_destroy(buf->tag,
- buf->map);
- buf->map = NULL;
- }
- } else if (buf->map != NULL) {
- bus_dmamap_unload(buf->tag,
- buf->map);
- bus_dmamap_destroy(buf->tag,
- buf->map);
- buf->map = NULL;
- }
- }
- if (txr->br != NULL)
- buf_ring_free(txr->br, M_DEVBUF);
- if (txr->buffers != NULL) {
- free(txr->buffers, M_DEVBUF);
- txr->buffers = NULL;
- }
- if (txr->tx_tag != NULL) {
- bus_dma_tag_destroy(txr->tx_tag);
- txr->tx_tag = NULL;
- }
- if (txr->tso_tag != NULL) {
- bus_dma_tag_destroy(txr->tso_tag);
- txr->tso_tag = NULL;
- }
-
- INIT_DBG_IF(que->vsi->ifp, "queue %d: end", que->me);
- return;
-}
-
-/*********************************************************************
- *
- * Setup descriptor for hw offloads
- *
- **********************************************************************/
-
-static int
-ixl_tx_setup_offload(struct ixl_queue *que,
- struct mbuf *mp, u32 *cmd, u32 *off)
-{
- struct ether_vlan_header *eh;
-#ifdef INET
- struct ip *ip = NULL;
-#endif
- struct tcphdr *th = NULL;
-#ifdef INET6
- struct ip6_hdr *ip6;
-#endif
- int elen, ip_hlen = 0, tcp_hlen;
- u16 etype;
- u8 ipproto = 0;
- bool tso = FALSE;
-
-
- /* Set up the TSO context descriptor if required */
- if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
- tso = ixl_tso_setup(que, mp);
- if (tso)
- ++que->tso;
- else
- return (ENXIO);
- }
-
- /*
- * Determine where frame payload starts.
- * Jump over vlan headers if already present,
- * helpful for QinQ too.
- */
- eh = mtod(mp, struct ether_vlan_header *);
- if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
- etype = ntohs(eh->evl_proto);
- elen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
- } else {
- etype = ntohs(eh->evl_encap_proto);
- elen = ETHER_HDR_LEN;
- }
-
- switch (etype) {
-#ifdef INET
- case ETHERTYPE_IP:
- ip = (struct ip *)(mp->m_data + elen);
- ip_hlen = ip->ip_hl << 2;
- ipproto = ip->ip_p;
- th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
- /* The IP checksum must be recalculated with TSO */
- if (tso)
- *cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
- else
- *cmd |= I40E_TX_DESC_CMD_IIPT_IPV4;
- break;
-#endif
-#ifdef INET6
- case ETHERTYPE_IPV6:
- ip6 = (struct ip6_hdr *)(mp->m_data + elen);
- ip_hlen = sizeof(struct ip6_hdr);
- ipproto = ip6->ip6_nxt;
- th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
- *cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
- break;
-#endif
- default:
- break;
- }
-
- *off |= (elen >> 1) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
- *off |= (ip_hlen >> 2) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
-
- switch (ipproto) {
- case IPPROTO_TCP:
- tcp_hlen = th->th_off << 2;
- if (mp->m_pkthdr.csum_flags & (CSUM_TCP|CSUM_TCP_IPV6)) {
- *cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
- *off |= (tcp_hlen >> 2) <<
- I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
- }
-#ifdef IXL_FDIR
- ixl_atr(que, th, etype);
-#endif
- break;
- case IPPROTO_UDP:
- if (mp->m_pkthdr.csum_flags & (CSUM_UDP|CSUM_UDP_IPV6)) {
- *cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
- *off |= (sizeof(struct udphdr) >> 2) <<
- I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
- }
- break;
-
- case IPPROTO_SCTP:
- if (mp->m_pkthdr.csum_flags & (CSUM_SCTP|CSUM_SCTP_IPV6)) {
- *cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
- *off |= (sizeof(struct sctphdr) >> 2) <<
- I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
- }
- /* Fall Thru */
- default:
- break;
+
+ buf->eop_index = -1;
}
+ /* Set the last descriptor for report */
+ txd->cmd_type_offset_bsz |=
+ htole64(((u64)IXL_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT));
+ pi->ipi_new_pidx = i;
+
+
+ /* Set the index of the descriptor that will be marked done */
+ buf = &txr->tx_buffers[first];
+ buf->eop_index = last;
+ ++txr->total_packets;
return (0);
}
+static void
+ixl_isc_txd_flush(void *arg, uint16_t txqid, uint32_t pidx)
+{
+ struct ixl_vsi *vsi = arg;
+ struct tx_ring *txr = &vsi->queues[txqid].txr;
+ /*
+ * Advance the Transmit Descriptor Tail (Tdt), this tells the
+ * hardware that this frame is available to transmit.
+ */
+ wr32(vsi->hw, txr->tail, pidx);
+}
-/**********************************************************************
+/*********************************************************************
*
- * Setup context for hardware segmentation offload (TSO)
+ * (Re)Initialize a queue transmit ring.
+ * - called by init, it clears the descriptor ring,
+ * and frees any stale mbufs
*
**********************************************************************/
-static bool
-ixl_tso_setup(struct ixl_queue *que, struct mbuf *mp)
+void
+ixl_init_tx_ring(struct ixl_queue *que)
{
struct tx_ring *txr = &que->txr;
- struct i40e_tx_context_desc *TXD;
struct ixl_tx_buf *buf;
- u32 cmd, mss, type, tsolen;
- u16 etype;
- int idx, elen, ip_hlen, tcp_hlen;
- struct ether_vlan_header *eh;
-#ifdef INET
- struct ip *ip;
-#endif
-#ifdef INET6
- struct ip6_hdr *ip6;
-#endif
-#if defined(INET6) || defined(INET)
- struct tcphdr *th;
-#endif
- u64 type_cmd_tso_mss;
- /*
- * Determine where frame payload starts.
- * Jump over vlan headers if already present
- */
- eh = mtod(mp, struct ether_vlan_header *);
- if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
- elen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
- etype = eh->evl_proto;
- } else {
- elen = ETHER_HDR_LEN;
- etype = eh->evl_encap_proto;
- }
+ /* Clear the old ring contents */
+ bzero((void *)txr->tx_base,
+ (sizeof(struct i40e_tx_desc)) * ixl_sctx->isc_ntxd);
- switch (ntohs(etype)) {
-#ifdef INET6
- case ETHERTYPE_IPV6:
- ip6 = (struct ip6_hdr *)(mp->m_data + elen);
- if (ip6->ip6_nxt != IPPROTO_TCP)
- return (ENXIO);
- ip_hlen = sizeof(struct ip6_hdr);
- th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
- th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0);
- tcp_hlen = th->th_off << 2;
- break;
-#endif
-#ifdef INET
- case ETHERTYPE_IP:
- ip = (struct ip *)(mp->m_data + elen);
- if (ip->ip_p != IPPROTO_TCP)
- return (ENXIO);
- ip->ip_sum = 0;
- ip_hlen = ip->ip_hl << 2;
- th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
- th->th_sum = in_pseudo(ip->ip_src.s_addr,
- ip->ip_dst.s_addr, htons(IPPROTO_TCP));
- tcp_hlen = th->th_off << 2;
- break;
+#ifdef IXL_FDIR
+ /* Initialize flow director */
+ txr->atr_rate = ixl_atr_rate;
+ txr->atr_count = 0;
#endif
- default:
- printf("%s: CSUM_TSO but no supported IP version (0x%04x)",
- __func__, ntohs(etype));
- return FALSE;
- }
-
- /* Ensure we have at least the IP+TCP header in the first mbuf. */
- if (mp->m_len < elen + ip_hlen + sizeof(struct tcphdr))
- return FALSE;
-
- idx = txr->next_avail;
- buf = &txr->buffers[idx];
- TXD = (struct i40e_tx_context_desc *) &txr->base[idx];
- tsolen = mp->m_pkthdr.len - (elen + ip_hlen + tcp_hlen);
-
- type = I40E_TX_DESC_DTYPE_CONTEXT;
- cmd = I40E_TX_CTX_DESC_TSO;
- mss = mp->m_pkthdr.tso_segsz;
- type_cmd_tso_mss = ((u64)type << I40E_TXD_CTX_QW1_DTYPE_SHIFT) |
- ((u64)cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
- ((u64)tsolen << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
- ((u64)mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
- TXD->type_cmd_tso_mss = htole64(type_cmd_tso_mss);
+ buf = txr->tx_buffers;
+ for (int i = 0; i < ixl_sctx->isc_ntxd; i++, buf++) {
- TXD->tunneling_params = htole32(0);
- buf->m_head = NULL;
+ /* Clear the EOP index */
buf->eop_index = -1;
-
- if (++idx == que->num_desc)
- idx = 0;
-
- txr->avail--;
- txr->next_avail = idx;
-
- return TRUE;
+ }
}
+
/*
** ixl_get_tx_head - Retrieve the value from the
** location the HW records its HEAD index
@@ -830,7 +372,8 @@
ixl_get_tx_head(struct ixl_queue *que)
{
struct tx_ring *txr = &que->txr;
- void *head = &txr->base[que->num_desc];
+ void *head = &txr->tx_base[ixl_sctx->isc_ntxd];
+
return LE32_TO_CPU(*(volatile __le32 *)head);
}
@@ -841,37 +384,24 @@
* tx_buffer is put back on the free queue.
*
**********************************************************************/
-bool
-ixl_txeof(struct ixl_queue *que)
+static int
+ixl_isc_txd_credits_update(void *arg, uint16_t qid, uint32_t cidx)
{
+ struct ixl_vsi *vsi = arg;
+ struct ixl_queue *que = &vsi->queues[qid];
struct tx_ring *txr = &que->txr;
u32 first, last, head, done, processed;
struct ixl_tx_buf *buf;
struct i40e_tx_desc *tx_desc, *eop_desc;
-
- mtx_assert(&txr->mtx, MA_OWNED);
-
-#ifdef DEV_NETMAP
- // XXX todo: implement moderation
- if (netmap_tx_irq(que->vsi->ifp, que->me))
- return FALSE;
-#endif /* DEF_NETMAP */
-
- /* These are not the descriptors you seek, move along :) */
- if (txr->avail == que->num_desc) {
- que->busy = 0;
- return FALSE;
- }
-
processed = 0;
- first = txr->next_to_clean;
- buf = &txr->buffers[first];
- tx_desc = (struct i40e_tx_desc *)&txr->base[first];
+ first = cidx;
+ buf = &txr->tx_buffers[first];
+ tx_desc = (struct i40e_tx_desc *)&txr->tx_base[first];
last = buf->eop_index;
if (last == -1)
- return FALSE;
- eop_desc = (struct i40e_tx_desc *)&txr->base[last];
+ return (0);
+ eop_desc = (struct i40e_tx_desc *)&txr->tx_base[last];
/* Get the Head WB value */
head = ixl_get_tx_head(que);
@@ -882,11 +412,9 @@
** I do this so the comparison in the
** inner while loop below can be simple
*/
- if (++last == que->num_desc) last = 0;
+ if (++last == ixl_sctx->isc_ntxd) last = 0;
done = last;
- bus_dmamap_sync(txr->dma.tag, txr->dma.map,
- BUS_DMASYNC_POSTREAD);
/*
** The HEAD index of the ring is written in a
** defined location, this rather than a done bit
@@ -894,74 +422,28 @@
** 'cleaned'.
*/
while (first != head) {
- /* We clean the range of the packet */
while (first != done) {
- ++txr->avail;
++processed;
- if (buf->m_head) {
- txr->bytes += /* for ITR adjustment */
- buf->m_head->m_pkthdr.len;
- txr->tx_bytes += /* for TX stats */
- buf->m_head->m_pkthdr.len;
- bus_dmamap_sync(buf->tag,
- buf->map,
- BUS_DMASYNC_POSTWRITE);
- bus_dmamap_unload(buf->tag,
- buf->map);
- m_freem(buf->m_head);
- buf->m_head = NULL;
- buf->map = NULL;
- }
buf->eop_index = -1;
-
- if (++first == que->num_desc)
+ if (++first == ixl_sctx->isc_ntxd)
first = 0;
- buf = &txr->buffers[first];
- tx_desc = &txr->base[first];
+ buf = &txr->tx_buffers[first];
+ tx_desc = &txr->tx_base[first];
}
++txr->packets;
/* See if there is more work now */
last = buf->eop_index;
- if (last != -1) {
- eop_desc = &txr->base[last];
+ if (last == -1)
+ break;
+ eop_desc = &txr->tx_base[last];
/* Get next done point */
- if (++last == que->num_desc) last = 0;
+ if (++last == ixl_sctx->isc_ntxd) last = 0;
done = last;
- } else
- break;
- }
- bus_dmamap_sync(txr->dma.tag, txr->dma.map,
- BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
-
- txr->next_to_clean = first;
-
-
- /*
- ** Hang detection, we know there's
- ** work outstanding or the first return
- ** would have been taken, so indicate an
- ** unsuccessful pass, in local_timer if
- ** the value is too great the queue will
- ** be considered hung. If anything has been
- ** cleaned then reset the state.
- */
- if ((processed == 0) && (que->busy != IXL_QUEUE_HUNG))
- ++que->busy;
-
- if (processed)
- que->busy = 1; /* Note this turns off HUNG */
- /*
- * If there are no pending descriptors, clear the timeout.
- */
- if (txr->avail == que->num_desc) {
- que->busy = 0;
- return FALSE;
}
-
- return TRUE;
+ return (processed);
}
/*********************************************************************
@@ -974,465 +456,54 @@
*
**********************************************************************/
static void
-ixl_refresh_mbufs(struct ixl_queue *que, int limit)
-{
- struct ixl_vsi *vsi = que->vsi;
- struct rx_ring *rxr = &que->rxr;
- bus_dma_segment_t hseg[1];
- bus_dma_segment_t pseg[1];
- struct ixl_rx_buf *buf;
- struct mbuf *mh, *mp;
- int i, j, nsegs, error;
- bool refreshed = FALSE;
-
- i = j = rxr->next_refresh;
- /* Control the loop with one beyond */
- if (++j == que->num_desc)
- j = 0;
-
- while (j != limit) {
- buf = &rxr->buffers[i];
- if (rxr->hdr_split == FALSE)
- goto no_split;
-
- if (buf->m_head == NULL) {
- mh = m_gethdr(M_NOWAIT, MT_DATA);
- if (mh == NULL)
- goto update;
- } else
- mh = buf->m_head;
-
- mh->m_pkthdr.len = mh->m_len = MHLEN;
- mh->m_len = MHLEN;
- mh->m_flags |= M_PKTHDR;
- /* Get the memory mapping */
- error = bus_dmamap_load_mbuf_sg(rxr->htag,
- buf->hmap, mh, hseg, &nsegs, BUS_DMA_NOWAIT);
- if (error != 0) {
- printf("Refresh mbufs: hdr dmamap load"
- " failure - %d\n", error);
- m_free(mh);
- buf->m_head = NULL;
- goto update;
- }
- buf->m_head = mh;
- bus_dmamap_sync(rxr->htag, buf->hmap,
- BUS_DMASYNC_PREREAD);
- rxr->base[i].read.hdr_addr =
- htole64(hseg[0].ds_addr);
-
-no_split:
- if (buf->m_pack == NULL) {
- mp = m_getjcl(M_NOWAIT, MT_DATA,
- M_PKTHDR, rxr->mbuf_sz);
- if (mp == NULL)
- goto update;
- } else
- mp = buf->m_pack;
-
- mp->m_pkthdr.len = mp->m_len = rxr->mbuf_sz;
- /* Get the memory mapping */
- error = bus_dmamap_load_mbuf_sg(rxr->ptag,
- buf->pmap, mp, pseg, &nsegs, BUS_DMA_NOWAIT);
- if (error != 0) {
- printf("Refresh mbufs: payload dmamap load"
- " failure - %d\n", error);
- m_free(mp);
- buf->m_pack = NULL;
- goto update;
- }
- buf->m_pack = mp;
- bus_dmamap_sync(rxr->ptag, buf->pmap,
- BUS_DMASYNC_PREREAD);
- rxr->base[i].read.pkt_addr =
- htole64(pseg[0].ds_addr);
- /* Used only when doing header split */
- rxr->base[i].read.hdr_addr = 0;
-
- refreshed = TRUE;
- /* Next is precalculated */
- i = j;
- rxr->next_refresh = i;
- if (++j == que->num_desc)
- j = 0;
- }
-update:
- if (refreshed) /* Update hardware tail index */
- wr32(vsi->hw, rxr->tail, rxr->next_refresh);
- return;
-}
-
-
-/*********************************************************************
- *
- * Allocate memory for rx_buffer structures. Since we use one
- * rx_buffer per descriptor, the maximum number of rx_buffer's
- * that we'll need is equal to the number of receive descriptors
- * that we've defined.
- *
- **********************************************************************/
-int
-ixl_allocate_rx_data(struct ixl_queue *que)
-{
- struct rx_ring *rxr = &que->rxr;
- struct ixl_vsi *vsi = que->vsi;
- device_t dev = vsi->dev;
- struct ixl_rx_buf *buf;
- int i, bsize, error;
-
- bsize = sizeof(struct ixl_rx_buf) * que->num_desc;
- if (!(rxr->buffers =
- (struct ixl_rx_buf *) malloc(bsize,
- M_DEVBUF, M_NOWAIT | M_ZERO))) {
- device_printf(dev, "Unable to allocate rx_buffer memory\n");
- error = ENOMEM;
- return (error);
- }
-
- if ((error = bus_dma_tag_create(NULL, /* parent */
- 1, 0, /* alignment, bounds */
- BUS_SPACE_MAXADDR, /* lowaddr */
- BUS_SPACE_MAXADDR, /* highaddr */
- NULL, NULL, /* filter, filterarg */
- MSIZE, /* maxsize */
- 1, /* nsegments */
- MSIZE, /* maxsegsize */
- 0, /* flags */
- NULL, /* lockfunc */
- NULL, /* lockfuncarg */
- &rxr->htag))) {
- device_printf(dev, "Unable to create RX DMA htag\n");
- return (error);
- }
-
- if ((error = bus_dma_tag_create(NULL, /* parent */
- 1, 0, /* alignment, bounds */
- BUS_SPACE_MAXADDR, /* lowaddr */
- BUS_SPACE_MAXADDR, /* highaddr */
- NULL, NULL, /* filter, filterarg */
- MJUM16BYTES, /* maxsize */
- 1, /* nsegments */
- MJUM16BYTES, /* maxsegsize */
- 0, /* flags */
- NULL, /* lockfunc */
- NULL, /* lockfuncarg */
- &rxr->ptag))) {
- device_printf(dev, "Unable to create RX DMA ptag\n");
- return (error);
- }
-
- for (i = 0; i < que->num_desc; i++) {
- buf = &rxr->buffers[i];
- error = bus_dmamap_create(rxr->htag,
- BUS_DMA_NOWAIT, &buf->hmap);
- if (error) {
- device_printf(dev, "Unable to create RX head map\n");
- break;
- }
- error = bus_dmamap_create(rxr->ptag,
- BUS_DMA_NOWAIT, &buf->pmap);
- if (error) {
- device_printf(dev, "Unable to create RX pkt map\n");
- break;
- }
- }
-
- return (error);
-}
-
-
-/*********************************************************************
- *
- * (Re)Initialize the queue receive ring and its buffers.
- *
- **********************************************************************/
-int
-ixl_init_rx_ring(struct ixl_queue *que)
-{
- struct rx_ring *rxr = &que->rxr;
- struct ixl_vsi *vsi = que->vsi;
-#if defined(INET6) || defined(INET)
- struct ifnet *ifp = vsi->ifp;
- struct lro_ctrl *lro = &rxr->lro;
-#endif
- struct ixl_rx_buf *buf;
- bus_dma_segment_t pseg[1], hseg[1];
- int rsize, nsegs, error = 0;
-#ifdef DEV_NETMAP
- struct netmap_adapter *na = NA(que->vsi->ifp);
- struct netmap_slot *slot;
-#endif /* DEV_NETMAP */
-
- IXL_RX_LOCK(rxr);
-#ifdef DEV_NETMAP
- /* same as in ixl_init_tx_ring() */
- slot = netmap_reset(na, NR_RX, que->me, 0);
-#endif /* DEV_NETMAP */
- /* Clear the ring contents */
- rsize = roundup2(que->num_desc *
- sizeof(union i40e_rx_desc), DBA_ALIGN);
- bzero((void *)rxr->base, rsize);
- /* Cleanup any existing buffers */
- for (int i = 0; i < que->num_desc; i++) {
- buf = &rxr->buffers[i];
- if (buf->m_head != NULL) {
- bus_dmamap_sync(rxr->htag, buf->hmap,
- BUS_DMASYNC_POSTREAD);
- bus_dmamap_unload(rxr->htag, buf->hmap);
- buf->m_head->m_flags |= M_PKTHDR;
- m_freem(buf->m_head);
- }
- if (buf->m_pack != NULL) {
- bus_dmamap_sync(rxr->ptag, buf->pmap,
- BUS_DMASYNC_POSTREAD);
- bus_dmamap_unload(rxr->ptag, buf->pmap);
- buf->m_pack->m_flags |= M_PKTHDR;
- m_freem(buf->m_pack);
- }
- buf->m_head = NULL;
- buf->m_pack = NULL;
- }
-
- /* header split is off */
- rxr->hdr_split = FALSE;
-
- /* Now replenish the mbufs */
- for (int j = 0; j != que->num_desc; ++j) {
- struct mbuf *mh, *mp;
-
- buf = &rxr->buffers[j];
-#ifdef DEV_NETMAP
- /*
- * In netmap mode, fill the map and set the buffer
- * address in the NIC ring, considering the offset
- * between the netmap and NIC rings (see comment in
- * ixgbe_setup_transmit_ring() ). No need to allocate
- * an mbuf, so end the block with a continue;
- */
- if (slot) {
- int sj = netmap_idx_n2k(&na->rx_rings[que->me], j);
- uint64_t paddr;
- void *addr;
-
- addr = PNMB(na, slot + sj, &paddr);
- netmap_load_map(na, rxr->dma.tag, buf->pmap, addr);
- /* Update descriptor and the cached value */
- rxr->base[j].read.pkt_addr = htole64(paddr);
- rxr->base[j].read.hdr_addr = 0;
- continue;
- }
-#endif /* DEV_NETMAP */
- /*
- ** Don't allocate mbufs if not
- ** doing header split, its wasteful
- */
- if (rxr->hdr_split == FALSE)
- goto skip_head;
-
- /* First the header */
- buf->m_head = m_gethdr(M_NOWAIT, MT_DATA);
- if (buf->m_head == NULL) {
- error = ENOBUFS;
- goto fail;
- }
- m_adj(buf->m_head, ETHER_ALIGN);
- mh = buf->m_head;
- mh->m_len = mh->m_pkthdr.len = MHLEN;
- mh->m_flags |= M_PKTHDR;
- /* Get the memory mapping */
- error = bus_dmamap_load_mbuf_sg(rxr->htag,
- buf->hmap, buf->m_head, hseg,
- &nsegs, BUS_DMA_NOWAIT);
- if (error != 0) /* Nothing elegant to do here */
- goto fail;
- bus_dmamap_sync(rxr->htag,
- buf->hmap, BUS_DMASYNC_PREREAD);
- /* Update descriptor */
- rxr->base[j].read.hdr_addr = htole64(hseg[0].ds_addr);
-
-skip_head:
- /* Now the payload cluster */
- buf->m_pack = m_getjcl(M_NOWAIT, MT_DATA,
- M_PKTHDR, rxr->mbuf_sz);
- if (buf->m_pack == NULL) {
- error = ENOBUFS;
- goto fail;
- }
- mp = buf->m_pack;
- mp->m_pkthdr.len = mp->m_len = rxr->mbuf_sz;
- /* Get the memory mapping */
- error = bus_dmamap_load_mbuf_sg(rxr->ptag,
- buf->pmap, mp, pseg,
- &nsegs, BUS_DMA_NOWAIT);
- if (error != 0)
- goto fail;
- bus_dmamap_sync(rxr->ptag,
- buf->pmap, BUS_DMASYNC_PREREAD);
- /* Update descriptor */
- rxr->base[j].read.pkt_addr = htole64(pseg[0].ds_addr);
- rxr->base[j].read.hdr_addr = 0;
- }
+ixl_isc_rxd_refill(void *arg, uint16_t rxqid, uint8_t flid __unused,
+ uint32_t pidx, uint64_t *paddrs, caddr_t *vaddrs __unused, uint16_t count)
-
- /* Setup our descriptor indices */
- rxr->next_check = 0;
- rxr->next_refresh = 0;
- rxr->lro_enabled = FALSE;
- rxr->split = 0;
- rxr->bytes = 0;
- rxr->discard = FALSE;
-
- wr32(vsi->hw, rxr->tail, que->num_desc - 1);
- ixl_flush(vsi->hw);
-
-#if defined(INET6) || defined(INET)
- /*
- ** Now set up the LRO interface:
- */
- if (ifp->if_capenable & IFCAP_LRO) {
- int err = tcp_lro_init(lro);
- if (err) {
- if_printf(ifp, "queue %d: LRO Initialization failed!\n", que->me);
- goto fail;
- }
- INIT_DBG_IF(ifp, "queue %d: RX Soft LRO Initialized", que->me);
- rxr->lro_enabled = TRUE;
- lro->ifp = vsi->ifp;
- }
-#endif
-
- bus_dmamap_sync(rxr->dma.tag, rxr->dma.map,
- BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
-
-fail:
- IXL_RX_UNLOCK(rxr);
- return (error);
-}
-
-
-/*********************************************************************
- *
- * Free station receive ring data structures
- *
- **********************************************************************/
-void
-ixl_free_que_rx(struct ixl_queue *que)
{
- struct rx_ring *rxr = &que->rxr;
- struct ixl_rx_buf *buf;
-
- INIT_DBG_IF(que->vsi->ifp, "queue %d: begin", que->me);
-
- /* Cleanup any existing buffers */
- if (rxr->buffers != NULL) {
- for (int i = 0; i < que->num_desc; i++) {
- buf = &rxr->buffers[i];
- if (buf->m_head != NULL) {
- bus_dmamap_sync(rxr->htag, buf->hmap,
- BUS_DMASYNC_POSTREAD);
- bus_dmamap_unload(rxr->htag, buf->hmap);
- buf->m_head->m_flags |= M_PKTHDR;
- m_freem(buf->m_head);
- }
- if (buf->m_pack != NULL) {
- bus_dmamap_sync(rxr->ptag, buf->pmap,
- BUS_DMASYNC_POSTREAD);
- bus_dmamap_unload(rxr->ptag, buf->pmap);
- buf->m_pack->m_flags |= M_PKTHDR;
- m_freem(buf->m_pack);
- }
- buf->m_head = NULL;
- buf->m_pack = NULL;
- if (buf->hmap != NULL) {
- bus_dmamap_destroy(rxr->htag, buf->hmap);
- buf->hmap = NULL;
- }
- if (buf->pmap != NULL) {
- bus_dmamap_destroy(rxr->ptag, buf->pmap);
- buf->pmap = NULL;
- }
- }
- if (rxr->buffers != NULL) {
- free(rxr->buffers, M_DEVBUF);
- rxr->buffers = NULL;
- }
- }
+ struct ixl_vsi *vsi = arg;
+ struct rx_ring *rxr = &vsi->queues[rxqid].rxr;
+ int i;
+ uint32_t next_pidx;
- if (rxr->htag != NULL) {
- bus_dma_tag_destroy(rxr->htag);
- rxr->htag = NULL;
+ for (i = 0, next_pidx = pidx; i < count; i++) {
+ rxr->rx_base[next_pidx].read.pkt_addr = htole64(paddrs[i]);
+ if (++next_pidx == ixl_sctx->isc_nrxd)
+ next_pidx = 0;
}
- if (rxr->ptag != NULL) {
- bus_dma_tag_destroy(rxr->ptag);
- rxr->ptag = NULL;
- }
-
- INIT_DBG_IF(que->vsi->ifp, "queue %d: end", que->me);
- return;
}
-static __inline void
-ixl_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u8 ptype)
+static void
+ixl_isc_rxd_flush(void * arg, uint16_t rxqid, uint8_t flid __unused, uint32_t pidx)
{
+ struct ixl_vsi *vsi = arg;
+ struct rx_ring *rxr = &vsi->queues[rxqid].rxr;
-#if defined(INET6) || defined(INET)
- /*
- * ATM LRO is only for IPv4/TCP packets and TCP checksum of the packet
- * should be computed by hardware. Also it should not have VLAN tag in
- * ethernet header.
- */
- if (rxr->lro_enabled &&
- (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
- (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
- (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
- /*
- * Send to the stack if:
- ** - LRO not enabled, or
- ** - no LRO resources, or
- ** - lro enqueue fails
- */
- if (rxr->lro.lro_cnt != 0)
- if (tcp_lro_rx(&rxr->lro, m, 0) == 0)
- return;
- }
-#endif
- IXL_RX_UNLOCK(rxr);
- (*ifp->if_input)(ifp, m);
- IXL_RX_LOCK(rxr);
+ wr32(vsi->hw, rxr->tail, pidx);
}
-
-static __inline void
-ixl_rx_discard(struct rx_ring *rxr, int i)
+static int
+ixl_isc_rxd_available(void *arg, uint16_t rxqid, uint32_t idx)
{
- struct ixl_rx_buf *rbuf;
-
- rbuf = &rxr->buffers[i];
-
- if (rbuf->fmp != NULL) {/* Partial chain ? */
- rbuf->fmp->m_flags |= M_PKTHDR;
- m_freem(rbuf->fmp);
- rbuf->fmp = NULL;
- }
-
- /*
- ** With advanced descriptors the writeback
- ** clobbers the buffer addrs, so its easier
- ** to just free the existing mbufs and take
- ** the normal refresh path to get new buffers
- ** and mapping.
- */
- if (rbuf->m_head) {
- m_free(rbuf->m_head);
- rbuf->m_head = NULL;
- }
+ struct ixl_vsi *vsi = arg;
+ struct rx_ring *rxr = &vsi->queues[rxqid].rxr;
+ union i40e_rx_desc *cur;
+ u64 qword;
+ uint32_t status;
+ int cnt, i;
- if (rbuf->m_pack) {
- m_free(rbuf->m_pack);
- rbuf->m_pack = NULL;
+ for (cnt = 0, i = idx; cnt < ixl_sctx->isc_nrxd;) {
+ cur = &rxr->rx_base[i];
+ qword = le64toh(cur->wb.qword1.status_error_len);
+ status = (qword & I40E_RXD_QW1_STATUS_MASK)
+ >> I40E_RXD_QW1_STATUS_SHIFT;
+ if ((status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) == 0)
+ break;
+ cnt++;
+ if (++i == ixl_sctx->isc_nrxd)
+ i = 0;
}
- return;
+ return (cnt);
}
#ifdef RSS
@@ -1495,52 +566,27 @@
/*********************************************************************
*
- * This routine executes in interrupt context. It replenishes
- * the mbufs in the descriptor and sends data which has been
+ * This routine executes in ithread context. It sends data which has been
* dma'ed into host memory to upper layer.
*
- * We loop at most count times if count is > 0, or until done if
- * count < 0.
- *
- * Return TRUE for more work, FALSE for all clean.
+ * Returns 0 upon success, errno on failure
*********************************************************************/
-bool
-ixl_rxeof(struct ixl_queue *que, int count)
+
+static int
+ixl_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri)
{
- struct ixl_vsi *vsi = que->vsi;
+ struct ixl_vsi *vsi = arg;
+ struct ixl_queue *que = &vsi->queues[ri->iri_qsidx];
struct rx_ring *rxr = &que->rxr;
- struct ifnet *ifp = vsi->ifp;
-#if defined(INET6) || defined(INET)
- struct lro_ctrl *lro = &rxr->lro;
- struct lro_entry *queued;
-#endif
- int i, nextp, processed = 0;
union i40e_rx_desc *cur;
- struct ixl_rx_buf *rbuf, *nbuf;
-
-
- IXL_RX_LOCK(rxr);
-
-#ifdef DEV_NETMAP
- if (netmap_rx_irq(ifp, que->me, &count)) {
- IXL_RX_UNLOCK(rxr);
- return (FALSE);
- }
-#endif /* DEV_NETMAP */
-
- for (i = rxr->next_check; count != 0;) {
- struct mbuf *sendmp, *mh, *mp;
- u32 rsc, status, error;
+ u32 status, error;
u16 hlen, plen, vtag;
u64 qword;
u8 ptype;
bool eop;
- /* Sync the ring. */
- bus_dmamap_sync(rxr->dma.tag, rxr->dma.map,
- BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
-
- cur = &rxr->base[i];
+ ri->iri_qidx = 0;
+ cur = &rxr->rx_base[ri->iri_cidx];
qword = le64toh(cur->wb.qword1.status_error_len);
status = (qword & I40E_RXD_QW1_STATUS_MASK)
>> I40E_RXD_QW1_STATUS_SHIFT;
@@ -1553,21 +599,18 @@
ptype = (qword & I40E_RXD_QW1_PTYPE_MASK)
>> I40E_RXD_QW1_PTYPE_SHIFT;
- if ((status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) == 0) {
- ++rxr->not_done;
- break;
- }
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
- break;
+ /* we should never be called without a valid descriptor */
+ MPASS((status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) != 0);
+
+ ri->iri_len = plen;
+ rxr->rx_bytes += plen;
+
+#ifdef notyet
+ /* XXX should be checked from avail */
+
+#endif
- count--;
- sendmp = NULL;
- nbuf = NULL;
- rsc = 0;
cur->wb.qword1.status_error_len = 0;
- rbuf = &rxr->buffers[i];
- mh = rbuf->m_head;
- mp = rbuf->m_pack;
eop = (status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT));
if (status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT))
vtag = le16toh(cur->wb.qword0.lo_dword.l2tag1);
@@ -1581,169 +624,33 @@
*/
if (eop && (error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
rxr->discarded++;
- ixl_rx_discard(rxr, i);
- goto next_desc;
+ return (EBADMSG);
}
/* Prefetch the next buffer */
if (!eop) {
- nextp = i + 1;
- if (nextp == que->num_desc)
- nextp = 0;
- nbuf = &rxr->buffers[nextp];
- prefetch(nbuf);
- }
-
- /*
- ** The header mbuf is ONLY used when header
- ** split is enabled, otherwise we get normal
- ** behavior, ie, both header and payload
- ** are DMA'd into the payload buffer.
- **
- ** Rather than using the fmp/lmp global pointers
- ** we now keep the head of a packet chain in the
- ** buffer struct and pass this along from one
- ** descriptor to the next, until we get EOP.
- */
- if (rxr->hdr_split && (rbuf->fmp == NULL)) {
- if (hlen > IXL_RX_HDR)
- hlen = IXL_RX_HDR;
- mh->m_len = hlen;
- mh->m_flags |= M_PKTHDR;
- mh->m_next = NULL;
- mh->m_pkthdr.len = mh->m_len;
- /* Null buf pointer so it is refreshed */
- rbuf->m_head = NULL;
- /*
- ** Check the payload length, this
- ** could be zero if its a small
- ** packet.
- */
- if (plen > 0) {
- mp->m_len = plen;
- mp->m_next = NULL;
- mp->m_flags &= ~M_PKTHDR;
- mh->m_next = mp;
- mh->m_pkthdr.len += mp->m_len;
- /* Null buf pointer so it is refreshed */
- rbuf->m_pack = NULL;
- rxr->split++;
- }
- /*
- ** Now create the forward
- ** chain so when complete
- ** we wont have to.
- */
- if (eop == 0) {
- /* stash the chain head */
- nbuf->fmp = mh;
- /* Make forward chain */
- if (plen)
- mp->m_next = nbuf->m_pack;
- else
- mh->m_next = nbuf->m_pack;
- } else {
- /* Singlet, prepare to send */
- sendmp = mh;
- if (vtag) {
- sendmp->m_pkthdr.ether_vtag = vtag;
- sendmp->m_flags |= M_VLANTAG;
- }
- }
+ ri->iri_next_offset = 1;
} else {
- /*
- ** Either no header split, or a
- ** secondary piece of a fragmented
- ** split packet.
- */
- mp->m_len = plen;
- /*
- ** See if there is a stored head
- ** that determines what we are
- */
- sendmp = rbuf->fmp;
- rbuf->m_pack = rbuf->fmp = NULL;
-
- if (sendmp != NULL) /* secondary frag */
- sendmp->m_pkthdr.len += mp->m_len;
- else {
- /* first desc of a non-ps chain */
- sendmp = mp;
- sendmp->m_flags |= M_PKTHDR;
- sendmp->m_pkthdr.len = mp->m_len;
- if (vtag) {
- sendmp->m_pkthdr.ether_vtag = vtag;
- sendmp->m_flags |= M_VLANTAG;
- }
- }
- /* Pass the head pointer on */
- if (eop == 0) {
- nbuf->fmp = sendmp;
- sendmp = NULL;
- mp->m_next = nbuf->m_pack;
- }
- }
- ++processed;
- /* Sending this frame? */
- if (eop) {
- sendmp->m_pkthdr.rcvif = ifp;
- /* gather stats */
rxr->rx_packets++;
- rxr->rx_bytes += sendmp->m_pkthdr.len;
/* capture data for dynamic ITR adjustment */
rxr->packets++;
- rxr->bytes += sendmp->m_pkthdr.len;
- if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
- ixl_rx_checksum(sendmp, status, error, ptype);
+ if ((vsi->ifp->if_capenable & IFCAP_RXCSUM) != 0)
+ ixl_rx_checksum(ri, status, error, ptype);
#ifdef RSS
- sendmp->m_pkthdr.flowid =
+ ri->iri_flowid =
le32toh(cur->wb.qword0.hi_dword.rss);
- M_HASHTYPE_SET(sendmp, ixl_ptype_to_hash(ptype));
+ ri->iri_rsstype = ixl_ptype_to_hash(ptype);
#else
- sendmp->m_pkthdr.flowid = que->msix;
- M_HASHTYPE_SET(sendmp, M_HASHTYPE_OPAQUE);
+ ri->iri_flowid = que->msix;
+ ri->iri_rsstype = M_HASHTYPE_OPAQUE;
#endif
+ if (vtag) {
+ ri->iri_vtag = vtag;
+ ri->iri_flags |= M_VLANTAG;
}
-next_desc:
- bus_dmamap_sync(rxr->dma.tag, rxr->dma.map,
- BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
-
- /* Advance our pointers to the next descriptor. */
- if (++i == que->num_desc)
- i = 0;
-
- /* Now send to the stack or do LRO */
- if (sendmp != NULL) {
- rxr->next_check = i;
- ixl_rx_input(rxr, ifp, sendmp, ptype);
- i = rxr->next_check;
- }
-
- /* Every 8 descriptors we go to refresh mbufs */
- if (processed == 8) {
- ixl_refresh_mbufs(que, i);
- processed = 0;
- }
- }
-
- /* Refresh any remaining buf structs */
- if (ixl_rx_unrefreshed(que))
- ixl_refresh_mbufs(que, i);
-
- rxr->next_check = i;
-
-#if defined(INET6) || defined(INET)
- /*
- * Flush any outstanding LRO work
- */
- while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
- SLIST_REMOVE_HEAD(&lro->lro_active, next);
- tcp_lro_flush(lro, queued);
+ ri->iri_next_offset = 0;
}
-#endif
-
- IXL_RX_UNLOCK(rxr);
- return (FALSE);
+ return (0);
}
@@ -1755,7 +662,7 @@
*
*********************************************************************/
static void
-ixl_rx_checksum(struct mbuf * mp, u32 status, u32 error, u8 ptype)
+ixl_rx_checksum(if_rxd_info_t ri, u32 status, u32 error, u8 ptype)
{
struct i40e_rx_ptype_decoded decoded;
@@ -1764,7 +671,7 @@
/* Errors? */
if (error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) |
(1 << I40E_RX_DESC_ERROR_L4E_SHIFT))) {
- mp->m_pkthdr.csum_flags = 0;
+ ri->iri_csum_flags = 0;
return;
}
@@ -1773,19 +680,19 @@
decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6)
if (status &
(1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT)) {
- mp->m_pkthdr.csum_flags = 0;
+ ri->iri_csum_flags = 0;
return;
}
/* IP Checksum Good */
- mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
- mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
+ ri->iri_csum_flags = CSUM_IP_CHECKED;
+ ri->iri_csum_flags |= CSUM_IP_VALID;
if (status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)) {
- mp->m_pkthdr.csum_flags |=
+ ri->iri_csum_flags |=
(CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
- mp->m_pkthdr.csum_data |= htons(0xffff);
+ ri->iri_csum_data |= htons(0xffff);
}
return;
}
Index: sys/dev/ixl/ixlv.h
===================================================================
--- sys/dev/ixl/ixlv.h
+++ sys/dev/ixl/ixlv.h
@@ -104,7 +104,6 @@
struct device *dev;
struct resource *pci_mem;
- struct resource *msix_mem;
enum ixlv_state_t init_state;
@@ -114,8 +113,7 @@
void *tag;
struct resource *res; /* For the AQ */
- struct ifmedia media;
- struct callout timer;
+ struct ifmedia *media;
int msix;
int pf_version;
int if_flags;
@@ -123,15 +121,12 @@
bool link_up;
u32 link_speed;
- struct mtx mtx;
-
u32 qbase;
u32 admvec;
- struct timeout_task timeout;
+#ifdef notyet
struct task aq_irq;
struct task aq_sched;
- struct taskqueue *tq;
-
+#endif
struct ixl_vsi vsi;
/* Filter lists */
@@ -166,7 +161,6 @@
u8 aq_buffer[IXL_AQ_BUF_SZ];
};
-#define IXLV_CORE_LOCK_ASSERT(sc) mtx_assert(&(sc)->mtx, MA_OWNED)
/*
** This checks for a zero mac addr, something that will be likely
** unless the Admin on the Host has created one.
Index: sys/dev/ixl/ixlvc.c
===================================================================
--- sys/dev/ixl/ixlvc.c
+++ sys/dev/ixl/ixlvc.c
@@ -372,6 +372,7 @@
struct ixl_queue *que = vsi->queues;
struct tx_ring *txr;
struct rx_ring *rxr;
+ if_shared_ctx_t sctx;
int len, pairs;
struct i40e_virtchnl_vsi_queue_config_info *vqci;
@@ -386,6 +387,7 @@
ixl_vc_schedule_retry(&sc->vc_mgr);
return;
}
+ sctx = iflib_get_sctx(sc->vsi.ctx);
vqci->vsi_id = sc->vsi_res->vsi_id;
vqci->num_queue_pairs = pairs;
vqpi = vqci->qpair;
@@ -397,17 +399,17 @@
rxr = &que->rxr;
vqpi->txq.vsi_id = vqci->vsi_id;
vqpi->txq.queue_id = i;
- vqpi->txq.ring_len = que->num_desc;
- vqpi->txq.dma_ring_addr = txr->dma.pa;
+ vqpi->txq.ring_len = sctx->isc_ntxd;
+ vqpi->txq.dma_ring_addr = txr->tx_paddr;
/* Enable Head writeback */
vqpi->txq.headwb_enabled = 1;
- vqpi->txq.dma_headwb_addr = txr->dma.pa +
- (que->num_desc * sizeof(struct i40e_tx_desc));
+ vqpi->txq.dma_headwb_addr = txr->tx_paddr +
+ (sctx->isc_ntxd * sizeof(struct i40e_tx_desc));
vqpi->rxq.vsi_id = vqci->vsi_id;
vqpi->rxq.queue_id = i;
- vqpi->rxq.ring_len = que->num_desc;
- vqpi->rxq.dma_ring_addr = rxr->dma.pa;
+ vqpi->rxq.ring_len = sctx->isc_ntxd;
+ vqpi->rxq.dma_ring_addr = rxr->rx_paddr;
vqpi->rxq.max_pkt_size = vsi->max_frame_size;
vqpi->rxq.databuffer_size = rxr->mbuf_sz;
vqpi->rxq.splithdr_enabled = 0;
@@ -806,9 +808,10 @@
uint64_t tx_discards;
tx_discards = es->tx_discards;
+#ifdef notyet
for (int i = 0; i < vsi->num_queues; i++)
tx_discards += sc->vsi.queues[i].txr.br->br_drops;
-
+#endif
/* Update ifnet stats */
IXL_SET_IPACKETS(vsi, es->rx_unicast +
es->rx_multicast +
@@ -1001,7 +1004,6 @@
mgr->sc = sc;
mgr->current = NULL;
TAILQ_INIT(&mgr->pending);
- callout_init_mtx(&mgr->callout, &sc->mtx, 0);
}
static void
@@ -1036,7 +1038,6 @@
{
struct ixl_vc_mgr *mgr = (struct ixl_vc_mgr *)arg;
- IXLV_CORE_LOCK_ASSERT(mgr->sc);
ixl_vc_process_completion(mgr, I40E_ERR_TIMEOUT);
}
@@ -1045,7 +1046,6 @@
{
struct ixl_vc_mgr *mgr = (struct ixl_vc_mgr *)arg;
- IXLV_CORE_LOCK_ASSERT(mgr->sc);
ixl_vc_send_current(mgr);
}
@@ -1088,7 +1088,6 @@
ixl_vc_enqueue(struct ixl_vc_mgr *mgr, struct ixl_vc_cmd *cmd,
uint32_t req, ixl_vc_callback_t *callback, void *arg)
{
- IXLV_CORE_LOCK_ASSERT(mgr->sc);
if (cmd->flags & IXLV_VC_CMD_FLAG_BUSY) {
if (mgr->current == cmd)
@@ -1111,7 +1110,6 @@
{
struct ixl_vc_cmd *cmd;
- IXLV_CORE_LOCK_ASSERT(mgr->sc);
KASSERT(TAILQ_EMPTY(&mgr->pending) || mgr->current != NULL,
("ixlv: pending commands waiting but no command in progress"));
Index: sys/modules/ixl/Makefile
===================================================================
--- sys/modules/ixl/Makefile
+++ sys/modules/ixl/Makefile
@@ -3,13 +3,12 @@
.PATH: ${.CURDIR}/../../dev/ixl
KMOD = if_ixl
-SRCS = device_if.h bus_if.h pci_if.h pci_iov_if.h opt_bdg.h
+SRCS = device_if.h bus_if.h pci_if.h pci_iov_if.h opt_bdg.h ifdi_if.h
SRCS += opt_inet.h opt_inet6.h opt_rss.h
-SRCS += if_ixl.c ixl_txrx.c i40e_osdep.c
+SRCS += if_ixl.c if_ixl_common.c ixl_txrx.c i40e_osdep.c
# Shared source
SRCS += i40e_common.c i40e_nvm.c i40e_adminq.c i40e_lan_hmc.c i40e_hmc.c
-
CFLAGS += -DSMP -DIXL_DEBUG_SYSCTL
# Add Flow Director support
Index: sys/modules/ixlv/Makefile
===================================================================
--- sys/modules/ixlv/Makefile
+++ sys/modules/ixlv/Makefile
@@ -3,9 +3,9 @@
.PATH: ${.CURDIR}/../../dev/ixl
KMOD = if_ixlv
-SRCS = device_if.h bus_if.h pci_if.h pci_iov_if.h opt_bdg.h
+SRCS = device_if.h bus_if.h pci_if.h pci_iov_if.h opt_bdg.h ifdi_if.h
SRCS += opt_inet.h opt_inet6.h opt_rss.h
-SRCS += if_ixlv.c ixlvc.c ixl_txrx.c i40e_osdep.c
+SRCS += if_ixlv.c ixlvc.c if_ixl_common.c ixl_txrx.c i40e_osdep.c
# Shared source
SRCS += i40e_common.c i40e_nvm.c i40e_adminq.c

File Metadata

Mime Type
text/plain
Expires
Tue, Apr 29, 3:39 AM (17 h, 55 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
17836719
Default Alt Text
D5214.id13241.diff (209 KB)

Event Timeline