Page MenuHomeFreeBSD

D5214.id25255.diff
No OneTemporary

D5214.id25255.diff

This file is larger than 256 KB, so syntax highlighting was skipped.
Index: sys/amd64/conf/GENERIC
===================================================================
--- sys/amd64/conf/GENERIC
+++ sys/amd64/conf/GENERIC
@@ -234,7 +234,7 @@
device ixv # Intel PRO/10GbE PCIE VF Ethernet
device ixl # Intel XL710 40Gbe PCIE Ethernet
options IXL_IW # Enable iWARP Client Interface in ixl(4)
-device ixlv # Intel XL710 40Gbe VF PCIE Ethernet
+#device ixlv # Intel XL710 40Gbe VF PCIE Ethernet
device le # AMD Am7900 LANCE and Am79C9xx PCnet
device ti # Alteon Networks Tigon I/II gigabit Ethernet
device txp # 3Com 3cR990 (``Typhoon'')
Index: sys/conf/files.amd64
===================================================================
--- sys/conf/files.amd64
+++ sys/conf/files.amd64
@@ -260,10 +260,10 @@
compile-with "${NORMAL_C} -I$S/dev/ixl"
dev/ixl/ixl_iw.c optional ixl pci \
compile-with "${NORMAL_C} -I$S/dev/ixl"
-dev/ixl/if_ixlv.c optional ixlv pci \
- compile-with "${NORMAL_C} -I$S/dev/ixl"
-dev/ixl/ixlvc.c optional ixlv pci \
- compile-with "${NORMAL_C} -I$S/dev/ixl"
+#dev/ixl/if_ixlv.c optional ixlv pci \
+# compile-with "${NORMAL_C} -I$S/dev/ixl"
+#dev/ixl/ixlvc.c optional ixlv pci \
+# compile-with "${NORMAL_C} -I$S/dev/ixl"
dev/ixl/ixl_txrx.c optional ixl pci | ixlv pci \
compile-with "${NORMAL_C} -I$S/dev/ixl"
dev/ixl/i40e_osdep.c optional ixl pci | ixlv pci \
Index: sys/dev/ixl/i40e_osdep.c
===================================================================
--- sys/dev/ixl/i40e_osdep.c
+++ sys/dev/ixl/i40e_osdep.c
@@ -69,6 +69,12 @@
device_t dev = ((struct i40e_osdep *)hw->back)->dev;
int err;
+ // DEBUG
+ if (dev == NULL) {
+ printf("dev is null!\n");
+ err = ENODEV;
+ goto fail_0;
+ }
err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
alignment, 0, /* alignment, bounds */
Index: sys/dev/ixl/if_ixl.c
===================================================================
--- sys/dev/ixl/if_ixl.c
+++ sys/dev/ixl/if_ixl.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2013-2015, Intel Corporation
+ Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -32,13 +32,14 @@
******************************************************************************/
/*$FreeBSD$*/
+#include "opt_inet.h"
+#include "opt_inet6.h"
+#include "opt_rss.h"
+
#include "ixl.h"
#include "ixl_pf.h"
-#ifdef IXL_IW
-#include "ixl_iw.h"
-#include "ixl_iw_int.h"
-#endif
+#include "ifdi_if.h"
#ifdef PCI_IOV
#include "ixl_pf_iov.h"
@@ -47,7 +48,7 @@
/*********************************************************************
* Driver version
*********************************************************************/
-char ixl_driver_version[] = "1.7.12-k";
+char ixl_driver_version[] = "1.6.6-iflib-k";
/*********************************************************************
* PCI Device ID Table
@@ -59,47 +60,62 @@
* { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
*********************************************************************/
-static ixl_vendor_info_t ixl_vendor_info_array[] =
+static pci_vendor_info_t ixl_vendor_info_array[] =
{
- {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710, 0, 0, 0},
- {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B, 0, 0, 0},
- {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C, 0, 0, 0},
- {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A, 0, 0, 0},
- {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B, 0, 0, 0},
- {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C, 0, 0, 0},
- {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T, 0, 0, 0},
- {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4, 0, 0, 0},
- {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_X722, 0, 0, 0},
- {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_X722, 0, 0, 0},
- {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722, 0, 0, 0},
- {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722, 0, 0, 0},
- {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722, 0, 0, 0},
- {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_I_X722, 0, 0, 0},
- {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_B, 0, 0, 0},
- {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_SFP28, 0, 0, 0},
+ PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710, "Intel(R) Ethernet Connection 7 Series Driver"),
+ PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B, "Intel(R) Ethernet Connection 7 Series Driver"),
+ PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C, "Intel(R) Ethernet Connection 7 Series Driver"),
+ PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A, "Intel(R) Ethernet Connection 7 Series Driver"),
+ PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B, "Intel(R) Ethernet Connection 7 Series Driver"),
+ PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C, "Intel(R) Ethernet Connection 7 Series Driver"),
+ PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T, "Intel(R) Ethernet Connection 7 Series Driver"),
+ PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4, "Intel(R) Ethernet Connection 7 Series Driver"),
+ PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_X722, "Intel(R) Ethernet Connection 7 Series Driver"),
+ PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_X722, "Intel(R) Ethernet Connection 7 Series Driver"),
+ PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722, "Intel(R) Ethernet Connection 7 Series Driver"),
+ PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722, "Intel(R) Ethernet Connection 7 Series Driver"),
+ PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722, "Intel(R) Ethernet Connection 7 Series Driver"),
+ PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_I_X722, "Intel(R) Ethernet Connection 7 Series Driver"),
/* required last entry */
- {0, 0, 0, 0, 0}
-};
-
-/*********************************************************************
- * Table of branding strings
- *********************************************************************/
-
-static char *ixl_strings[] = {
- "Intel(R) Ethernet Connection XL710/X722 Driver"
+ PVID_END
};
-
/*********************************************************************
* Function prototypes
*********************************************************************/
-static int ixl_probe(device_t);
-static int ixl_attach(device_t);
-static int ixl_detach(device_t);
-static int ixl_shutdown(device_t);
-static int ixl_save_pf_tunables(struct ixl_pf *);
-static int ixl_attach_get_link_status(struct ixl_pf *);
+/*** IFLIB interface ***/
+static void *ixl_register(device_t dev);
+static int ixl_if_attach_pre(if_ctx_t ctx);
+static int ixl_if_attach_post(if_ctx_t ctx);
+static int ixl_if_detach(if_ctx_t ctx);
+static int ixl_if_shutdown(if_ctx_t ctx);
+static int ixl_if_suspend(if_ctx_t ctx);
+static int ixl_if_resume(if_ctx_t ctx);
+static int ixl_if_msix_intr_assign(if_ctx_t ctx, int msix);
+static void ixl_if_enable_intr(if_ctx_t ctx);
+static void ixl_if_disable_intr(if_ctx_t ctx);
+static int ixl_if_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid);
+static int ixl_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets);
+static int ixl_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nqs, int nqsets);
+static void ixl_if_queues_free(if_ctx_t ctx);
+static void ixl_if_update_admin_status(if_ctx_t ctx);
+static void ixl_if_multi_set(if_ctx_t ctx);
+static int ixl_if_mtu_set(if_ctx_t ctx, uint32_t mtu);
+static void ixl_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr);
+static int ixl_if_media_change(if_ctx_t ctx);
+static int ixl_if_promisc_set(if_ctx_t ctx, int flags);
+static void ixl_if_timer(if_ctx_t ctx, uint16_t qid);
+static void ixl_if_vlan_register(if_ctx_t ctx, u16 vtag);
+static void ixl_if_vlan_unregister(if_ctx_t ctx, u16 vtag);
+static uint64_t ixl_if_get_counter(if_ctx_t ctx, ift_counter cnt);
+
+/*** Other ***/
+static int ixl_mc_filter_apply(void *arg, struct ifmultiaddr *ifma, int);
+static int ixl_save_pf_tunables(struct ixl_pf *);
+static int ixl_attach_get_link_status(struct ixl_pf *);
+static int ixl_allocate_pci_resources(struct ixl_pf *);
+
/*********************************************************************
* FreeBSD Device Interface Entry Points
@@ -107,16 +123,17 @@
static device_method_t ixl_methods[] = {
/* Device interface */
- DEVMETHOD(device_probe, ixl_probe),
- DEVMETHOD(device_attach, ixl_attach),
- DEVMETHOD(device_detach, ixl_detach),
- DEVMETHOD(device_shutdown, ixl_shutdown),
+ DEVMETHOD(device_register, ixl_register),
+ DEVMETHOD(device_probe, iflib_device_probe),
+ DEVMETHOD(device_attach, iflib_device_attach),
+ DEVMETHOD(device_detach, iflib_device_detach),
+ DEVMETHOD(device_shutdown, iflib_device_shutdown),
#ifdef PCI_IOV
DEVMETHOD(pci_iov_init, ixl_iov_init),
DEVMETHOD(pci_iov_uninit, ixl_iov_uninit),
DEVMETHOD(pci_iov_add_vf, ixl_add_vf),
#endif
- {0, 0}
+ DEVMETHOD_END
};
static driver_t ixl_driver = {
@@ -126,17 +143,47 @@
devclass_t ixl_devclass;
DRIVER_MODULE(ixl, pci, ixl_driver, ixl_devclass, 0, 0);
-MODULE_VERSION(ixl, 1);
-
MODULE_DEPEND(ixl, pci, 1, 1, 1);
MODULE_DEPEND(ixl, ether, 1, 1, 1);
-#if defined(DEV_NETMAP) && __FreeBSD_version >= 1100000
-MODULE_DEPEND(ixl, netmap, 1, 1, 1);
-#endif /* DEV_NETMAP */
+MODULE_DEPEND(ixl, iflib, 1, 1, 1);
+
+static device_method_t ixl_if_methods[] = {
+ DEVMETHOD(ifdi_attach_pre, ixl_if_attach_pre),
+ DEVMETHOD(ifdi_attach_post, ixl_if_attach_post),
+ DEVMETHOD(ifdi_detach, ixl_if_detach),
+ DEVMETHOD(ifdi_shutdown, ixl_if_shutdown),
+ DEVMETHOD(ifdi_suspend, ixl_if_suspend),
+ DEVMETHOD(ifdi_resume, ixl_if_resume),
+ DEVMETHOD(ifdi_init, ixl_if_init),
+ DEVMETHOD(ifdi_stop, ixl_if_stop),
+ DEVMETHOD(ifdi_msix_intr_assign, ixl_if_msix_intr_assign),
+ DEVMETHOD(ifdi_intr_enable, ixl_if_enable_intr),
+ DEVMETHOD(ifdi_intr_disable, ixl_if_disable_intr),
+ DEVMETHOD(ifdi_queue_intr_enable, ixl_if_queue_intr_enable),
+ DEVMETHOD(ifdi_tx_queues_alloc, ixl_if_tx_queues_alloc),
+ DEVMETHOD(ifdi_rx_queues_alloc, ixl_if_rx_queues_alloc),
+ DEVMETHOD(ifdi_queues_free, ixl_if_queues_free),
+ DEVMETHOD(ifdi_update_admin_status, ixl_if_update_admin_status),
+ DEVMETHOD(ifdi_multi_set, ixl_if_multi_set),
+ DEVMETHOD(ifdi_mtu_set, ixl_if_mtu_set),
+ // DEVMETHOD(ifdi_crcstrip_set, ixl_if_crcstrip_set),
+ DEVMETHOD(ifdi_media_status, ixl_if_media_status),
+ DEVMETHOD(ifdi_media_change, ixl_if_media_change),
+ DEVMETHOD(ifdi_promisc_set, ixl_if_promisc_set),
+ DEVMETHOD(ifdi_timer, ixl_if_timer),
+ DEVMETHOD(ifdi_vlan_register, ixl_if_vlan_register),
+ DEVMETHOD(ifdi_vlan_unregister, ixl_if_vlan_unregister),
+ DEVMETHOD(ifdi_get_counter, ixl_if_get_counter),
+ DEVMETHOD_END
+};
-/*
+static driver_t ixl_if_driver = {
+ "ixl_if", ixl_if_methods, sizeof(struct ixl_pf)
+};
+
+/*****************************************************************************
** TUNEABLE PARAMETERS:
-*/
+*****************************************************************************/
static SYSCTL_NODE(_hw, OID_AUTO, ixl, CTLFLAG_RD, 0,
"IXL driver parameters");
@@ -154,7 +201,7 @@
** Number of descriptors per ring:
** - TX and RX are the same size
*/
-static int ixl_ring_size = IXL_DEFAULT_RING;
+static int ixl_ring_size = DEFAULT_RING;
TUNABLE_INT("hw.ixl.ring_size", &ixl_ring_size);
SYSCTL_INT(_hw_ixl, OID_AUTO, ring_size, CTLFLAG_RDTUN,
&ixl_ring_size, 0, "Descriptor Ring Size");
@@ -195,211 +242,173 @@
** - true/false for dynamic adjustment
** - default values for static ITR
*/
-static int ixl_dynamic_rx_itr = 1;
+static int ixl_dynamic_rx_itr = 0;
TUNABLE_INT("hw.ixl.dynamic_rx_itr", &ixl_dynamic_rx_itr);
SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
&ixl_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
+#if 0
static int ixl_dynamic_tx_itr = 1;
TUNABLE_INT("hw.ixl.dynamic_tx_itr", &ixl_dynamic_tx_itr);
SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
&ixl_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
+#endif
static int ixl_rx_itr = IXL_ITR_8K;
TUNABLE_INT("hw.ixl.rx_itr", &ixl_rx_itr);
SYSCTL_INT(_hw_ixl, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
&ixl_rx_itr, 0, "RX Interrupt Rate");
+#if 0
static int ixl_tx_itr = IXL_ITR_4K;
TUNABLE_INT("hw.ixl.tx_itr", &ixl_tx_itr);
SYSCTL_INT(_hw_ixl, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
&ixl_tx_itr, 0, "TX Interrupt Rate");
-
-#ifdef IXL_IW
-int ixl_enable_iwarp = 0;
-TUNABLE_INT("hw.ixl.enable_iwarp", &ixl_enable_iwarp);
#endif
-#ifdef DEV_NETMAP
-#define NETMAP_IXL_MAIN /* only bring in one part of the netmap code */
-#include <dev/netmap/if_ixl_netmap.h>
-#endif /* DEV_NETMAP */
-
-/*********************************************************************
- * Device identification routine
- *
- * ixl_probe determines if the driver should be loaded on
- * the hardware based on PCI vendor/device id of the device.
- *
- * return BUS_PROBE_DEFAULT on success, positive on failure
- *********************************************************************/
-
-static int
-ixl_probe(device_t dev)
-{
- ixl_vendor_info_t *ent;
+extern struct if_txrx ixl_txrx;
+
+static struct if_shared_ctx ixl_sctx_init = {
+ .isc_magic = IFLIB_MAGIC,
+ .isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */
+ .isc_tx_maxsize = IXL_TSO_SIZE,
+
+ .isc_tx_maxsegsize = PAGE_SIZE,
+
+ // TODO: Review the rx_maxsize and rx_maxsegsize params
+ // Where are they used in iflib?
+ .isc_rx_maxsize = 16384,
+ .isc_rx_nsegments = 1,
+ .isc_rx_maxsegsize = 16384,
+ // TODO: What is isc_nfl for?
+ .isc_nfl = 1,
+ .isc_ntxqs = 1,
+ .isc_nrxqs = 1,
+
+ .isc_admin_intrcnt = 1,
+ .isc_vendor_info = ixl_vendor_info_array,
+ .isc_driver_version = ixl_driver_version,
+ .isc_driver = &ixl_if_driver,
+
+ .isc_nrxd_min = {IXL_MIN_RING},
+ .isc_ntxd_min = {IXL_MIN_RING},
+ .isc_nrxd_max = {IXL_MAX_RING},
+ .isc_ntxd_max = {IXL_MAX_RING},
+ .isc_nrxd_default = {DEFAULT_RING},
+ .isc_ntxd_default = {DEFAULT_RING},
+};
- u16 pci_vendor_id, pci_device_id;
- u16 pci_subvendor_id, pci_subdevice_id;
- char device_name[256];
+if_shared_ctx_t ixl_sctx = &ixl_sctx_init;
-#if 0
- INIT_DEBUGOUT("ixl_probe: begin");
-#endif
- pci_vendor_id = pci_get_vendor(dev);
- if (pci_vendor_id != I40E_INTEL_VENDOR_ID)
- return (ENXIO);
+/*** Functions ***/
- pci_device_id = pci_get_device(dev);
- pci_subvendor_id = pci_get_subvendor(dev);
- pci_subdevice_id = pci_get_subdevice(dev);
-
- ent = ixl_vendor_info_array;
- while (ent->vendor_id != 0) {
- if ((pci_vendor_id == ent->vendor_id) &&
- (pci_device_id == ent->device_id) &&
-
- ((pci_subvendor_id == ent->subvendor_id) ||
- (ent->subvendor_id == 0)) &&
-
- ((pci_subdevice_id == ent->subdevice_id) ||
- (ent->subdevice_id == 0))) {
- sprintf(device_name, "%s, Version - %s",
- ixl_strings[ent->index],
- ixl_driver_version);
- device_set_desc_copy(dev, device_name);
- return (BUS_PROBE_DEFAULT);
- }
- ent++;
- }
- return (ENXIO);
+static void *
+ixl_register(device_t dev)
+{
+ return (ixl_sctx);
}
-static int
-ixl_attach_get_link_status(struct ixl_pf *pf)
+int
+ixl_allocate_pci_resources(struct ixl_pf *pf)
{
+ int rid;
struct i40e_hw *hw = &pf->hw;
- device_t dev = pf->dev;
- int error = 0;
-
- if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
- (hw->aq.fw_maj_ver < 4)) {
- i40e_msec_delay(75);
- error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
- if (error) {
- device_printf(dev, "link restart failed, aq_err=%d\n",
- pf->hw.aq.asq_last_status);
- return error;
- }
- }
+ device_t dev = iflib_get_dev(pf->vsi.ctx);
- /* Determine link state */
- hw->phy.get_link_info = TRUE;
- i40e_get_link_status(hw, &pf->link_up);
- return (0);
-}
+ /* Map BAR0 */
+ rid = PCIR_BAR(0);
+ pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
+ &rid, RF_ACTIVE);
-/*
- * Sanity check and save off tunable values.
- */
-static int
-ixl_save_pf_tunables(struct ixl_pf *pf)
-{
- device_t dev = pf->dev;
-
- /* Save tunable information */
- pf->enable_msix = ixl_enable_msix;
- pf->max_queues = ixl_max_queues;
- pf->enable_tx_fc_filter = ixl_enable_tx_fc_filter;
- pf->dynamic_rx_itr = ixl_dynamic_rx_itr;
- pf->dynamic_tx_itr = ixl_dynamic_tx_itr;
- pf->dbg_mask = ixl_core_debug_mask;
- pf->hw.debug_mask = ixl_shared_debug_mask;
+ if (!(pf->pci_mem)) {
+ device_printf(dev, "Unable to allocate bus resource: PCI memory\n");
+ return (ENXIO);
+ }
- if (ixl_ring_size < IXL_MIN_RING
- || ixl_ring_size > IXL_MAX_RING
- || ixl_ring_size % IXL_RING_INCREMENT != 0) {
- device_printf(dev, "Invalid ring_size value of %d set!\n",
- ixl_ring_size);
- device_printf(dev, "ring_size must be between %d and %d, "
- "inclusive, and must be a multiple of %d\n",
- IXL_MIN_RING, IXL_MAX_RING, IXL_RING_INCREMENT);
- device_printf(dev, "Using default value of %d instead\n",
- IXL_DEFAULT_RING);
- pf->ringsz = IXL_DEFAULT_RING;
- } else
- pf->ringsz = ixl_ring_size;
-
- if (ixl_tx_itr < 0 || ixl_tx_itr > IXL_MAX_ITR) {
- device_printf(dev, "Invalid tx_itr value of %d set!\n",
- ixl_tx_itr);
- device_printf(dev, "tx_itr must be between %d and %d, "
- "inclusive\n",
- 0, IXL_MAX_ITR);
- device_printf(dev, "Using default value of %d instead\n",
- IXL_ITR_4K);
- pf->tx_itr = IXL_ITR_4K;
- } else
- pf->tx_itr = ixl_tx_itr;
-
- if (ixl_rx_itr < 0 || ixl_rx_itr > IXL_MAX_ITR) {
- device_printf(dev, "Invalid rx_itr value of %d set!\n",
- ixl_rx_itr);
- device_printf(dev, "rx_itr must be between %d and %d, "
- "inclusive\n",
- 0, IXL_MAX_ITR);
- device_printf(dev, "Using default value of %d instead\n",
- IXL_ITR_8K);
- pf->rx_itr = IXL_ITR_8K;
- } else
- pf->rx_itr = ixl_rx_itr;
+ /* Save off the PCI information */
+ hw->vendor_id = pci_get_vendor(dev);
+ hw->device_id = pci_get_device(dev);
+ hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
+ hw->subsystem_vendor_id =
+ pci_read_config(dev, PCIR_SUBVEND_0, 2);
+ hw->subsystem_device_id =
+ pci_read_config(dev, PCIR_SUBDEV_0, 2);
+
+ hw->bus.device = pci_get_slot(dev);
+ hw->bus.func = pci_get_function(dev);
+
+ /* Save off register access information */
+ pf->osdep.mem_bus_space_tag =
+ rman_get_bustag(pf->pci_mem);
+ pf->osdep.mem_bus_space_handle =
+ rman_get_bushandle(pf->pci_mem);
+ pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem);
+ pf->osdep.flush_reg = I40E_GLGEN_STAT;
+ pf->osdep.dev = dev;
+
+ pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle;
+ pf->hw.back = &pf->osdep;
return (0);
}
-/*********************************************************************
- * Device initialization routine
- *
- * The attach entry point is called when the driver is being loaded.
- * This routine identifies the type of hardware, allocates all resources
- * and initializes the hardware.
- *
- * return 0 on success, positive on failure
- *********************************************************************/
-
static int
-ixl_attach(device_t dev)
+ixl_if_attach_pre(if_ctx_t ctx)
{
- struct ixl_pf *pf;
- struct i40e_hw *hw;
- struct ixl_vsi *vsi;
+ device_t dev;
+ struct ixl_pf *pf;
+ struct i40e_hw *hw;
+ struct ixl_vsi *vsi;
+ if_softc_ctx_t scctx;
enum i40e_status_code status;
- int error = 0;
+ int error = 0;
- INIT_DEBUGOUT("ixl_attach: begin");
+ INIT_DEBUGOUT("ixl_if_attach_pre: begin");
- /* Allocate, clear, and link in our primary soft structure */
- pf = device_get_softc(dev);
- pf->dev = pf->osdep.dev = dev;
- hw = &pf->hw;
+ dev = iflib_get_dev(ctx);
+ pf = iflib_get_softc(ctx);
+ hw = &pf->hw;
/*
** Note this assumes we have a single embedded VSI,
** this could be enhanced later to allocate multiple
*/
vsi = &pf->vsi;
- vsi->dev = pf->dev;
+ vsi->back = pf;
+ vsi->hw = &pf->hw;
+ vsi->id = 0;
+ vsi->num_vlans = 0;
+ vsi->ctx = ctx;
+ vsi->media = iflib_get_media(ctx);
+ vsi->shared = scctx = iflib_get_softc_ctx(ctx);
+ pf->dev = dev;
+
+ /*
+ * These are the same across all current ixl models
+ */
+ vsi->shared->isc_tx_nsegments = IXL_MAX_TX_SEGS;
+ vsi->shared->isc_msix_bar = PCIR_BAR(IXL_MSIX_BAR);
+
+ vsi->shared->isc_tx_tso_segments_max = IXL_MAX_TSO_SEGS;
+ vsi->shared->isc_tx_tso_size_max = IXL_TSO_SIZE;
+ vsi->shared->isc_tx_tso_segsize_max = PAGE_SIZE;
/* Save tunable values */
error = ixl_save_pf_tunables(pf);
if (error)
return (error);
- /* Core Lock Init*/
- IXL_PF_LOCK_INIT(pf, device_get_nameunit(dev));
-
- /* Set up the timer callout */
- callout_init_mtx(&pf->timer, &pf->pf_mtx, 0);
+ /*
+ * TODO: Excoriate mmacy for not documenting what needs to be set in the iflib stuff
+ * in attach_pre()
+ * Or, in general...
+ */
+ scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0]
+ * sizeof(struct i40e_tx_desc) + sizeof(u32), DBA_ALIGN);
+ scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0]
+ * sizeof(union i40e_32byte_rx_desc), DBA_ALIGN);
+ /* XXX: No idea what this does */
+ scctx->isc_max_txqsets = scctx->isc_max_rxqsets = 32;
/* Do PCI setup - map BAR0, etc */
if (ixl_allocate_pci_resources(pf)) {
@@ -427,18 +436,13 @@
goto err_out;
}
- /*
- * Allocate interrupts and figure out number of queues to use
- * for PF interface
- */
- pf->msix = ixl_init_msix(pf);
-
- /* Set up the admin queue */
+ /* Set admin queue parameters */
hw->aq.num_arq_entries = IXL_AQ_LEN;
hw->aq.num_asq_entries = IXL_AQ_LEN;
hw->aq.arq_buf_size = IXL_AQ_BUF_SZ;
hw->aq.asq_buf_size = IXL_AQ_BUF_SZ;
+ /* Set up the admin queue */
status = i40e_init_adminq(hw);
if (status != 0 && status != I40E_ERR_FIRMWARE_API_VERSION) {
device_printf(dev, "Unable to initialize Admin Queue, error %s\n",
@@ -474,7 +478,8 @@
/* Get capabilities from the device */
error = ixl_get_hw_capabilities(pf);
if (error) {
- device_printf(dev, "HW capabilities failure!\n");
+ device_printf(dev, "get_hw_capabilities failed: %d\n",
+ error);
goto err_get_cap;
}
@@ -486,7 +491,6 @@
i40e_stat_str(hw, status));
goto err_get_cap;
}
-
status = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
if (status) {
device_printf(dev, "configure_lan_hmc failed: %s\n",
@@ -494,23 +498,6 @@
goto err_mac_hmc;
}
- /* Init queue allocation manager */
- error = ixl_pf_qmgr_init(&pf->qmgr, hw->func_caps.num_tx_qp);
- if (error) {
- device_printf(dev, "Failed to init queue manager for PF queues, error %d\n",
- error);
- goto err_mac_hmc;
- }
- /* reserve a contiguous allocation for the PF's VSI */
- error = ixl_pf_qmgr_alloc_contiguous(&pf->qmgr, vsi->num_queues, &pf->qtag);
- if (error) {
- device_printf(dev, "Failed to reserve queues for PF LAN VSI, error %d\n",
- error);
- goto err_mac_hmc;
- }
- device_printf(dev, "Allocating %d queues for PF LAN VSI; %d queues active\n",
- pf->qtag.num_allocated, pf->qtag.num_active);
-
/* Disable LLDP from the firmware for certain NVM versions */
if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
(pf->hw.aq.fw_maj_ver < 4))
@@ -524,17 +511,49 @@
goto err_mac_hmc;
}
bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN);
+ iflib_set_mac(ctx, hw->mac.addr);
i40e_get_port_mac_addr(hw, hw->mac.port_addr);
/* Initialize mac filter list for VSI */
SLIST_INIT(&vsi->ftl);
- /* Set up SW VSI and allocate queue memory and rings */
- if (ixl_setup_stations(pf)) {
- device_printf(dev, "setup stations failed!\n");
- error = ENOMEM;
- goto err_mac_hmc;
- }
+ /* Fill out more iflib parameters */
+ scctx->isc_txrx = &ixl_txrx;
+ vsi->shared->isc_rss_table_size = pf->hw.func_caps.rss_table_size;
+ scctx->isc_tx_csum_flags = CSUM_OFFLOAD;
+ scctx->isc_capenable = IXL_CAPS;
+
+ INIT_DEBUGOUT("ixl_if_attach_pre: end");
+ return (0);
+
+// TODO: Review what needs to be cleaned up when this fails
+err_mac_hmc:
+ i40e_shutdown_lan_hmc(hw);
+err_get_cap:
+ i40e_shutdown_adminq(hw);
+err_out:
+ ixl_free_pci_resources(pf);
+ ixl_free_mac_filters(vsi);
+ return (error);
+}
+
+static int
+ixl_if_attach_post(if_ctx_t ctx)
+{
+ device_t dev;
+ struct ixl_pf *pf;
+ struct i40e_hw *hw;
+ struct ixl_vsi *vsi;
+ int error = 0;
+ enum i40e_status_code status;
+
+ INIT_DEBUGOUT("ixl_if_attach_post: begin");
+
+ dev = iflib_get_dev(ctx);
+ vsi = iflib_get_softc(ctx);
+ vsi->ifp = iflib_get_ifp(ctx);
+ pf = (struct ixl_pf *)vsi;
+ hw = &pf->hw;
/* Setup OS network interface / ifnet */
if (ixl_setup_interface(dev, vsi)) {
@@ -551,10 +570,30 @@
error = ixl_switch_config(pf);
if (error) {
- device_printf(dev, "Initial ixl_switch_config() failed: %d\n",
- error);
- goto err_late;
+ device_printf(dev, "Initial switch config failed: %d\n", error);
+ goto err_mac_hmc;
+ }
+
+ /* Init queue allocation manager */
+ /* XXX: This init can go in pre or post; allocation must be in post */
+ error = ixl_pf_qmgr_init(&pf->qmgr, hw->func_caps.num_tx_qp);
+ if (error) {
+ device_printf(dev, "Failed to init queue manager for PF queues, error %d\n",
+ error);
+ goto err_mac_hmc;
+ }
+ /* reserve a contiguous allocation for the PF's VSI */
+ /* TODO: Could be refined? */
+ error = ixl_pf_qmgr_alloc_contiguous(&pf->qmgr,
+ max(vsi->num_tx_queues, vsi->num_rx_queues), &pf->qtag);
+ if (error) {
+ device_printf(dev, "Failed to reserve queues for PF LAN VSI, error %d\n",
+ error);
+ goto err_mac_hmc;
}
+ device_printf(dev, "Allocating %d queues for PF LAN VSI; %d queues active\n",
+ pf->qtag.num_allocated, pf->qtag.num_active);
+
/* Limit PHY interrupts to link, autoneg, and modules failure */
status = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
@@ -566,221 +605,849 @@
goto err_late;
}
- /* Get the bus configuration and set the shared code's config */
- ixl_get_bus_info(pf);
+ /* Get the bus configuration and set the shared code */
+ ixl_get_bus_info(hw, dev);
- /*
- * In MSI-X mode, initialize the Admin Queue interrupt,
- * so userland tools can communicate with the adapter regardless of
- * the ifnet interface's status.
- */
- if (pf->msix > 1) {
- error = ixl_setup_adminq_msix(pf);
- if (error) {
- device_printf(dev, "ixl_setup_adminq_msix() error: %d\n",
- error);
- goto err_late;
- }
- error = ixl_setup_adminq_tq(pf);
- if (error) {
- device_printf(dev, "ixl_setup_adminq_tq() error: %d\n",
- error);
- goto err_late;
- }
+ // TODO: Don't call this in non-MSIX mode
+ /* Keep admin queue interrupts active while driver is loaded */
+ if (pf->enable_msix)
ixl_configure_intr0_msix(pf);
- ixl_enable_intr0(hw);
-
- error = ixl_setup_queue_msix(vsi);
- if (error)
- device_printf(dev, "ixl_setup_queue_msix() error: %d\n",
- error);
- error = ixl_setup_queue_tqs(vsi);
- if (error)
- device_printf(dev, "ixl_setup_queue_tqs() error: %d\n",
- error);
- } else {
- error = ixl_setup_legacy(pf);
-
- error = ixl_setup_adminq_tq(pf);
- if (error) {
- device_printf(dev, "ixl_setup_adminq_tq() error: %d\n",
- error);
- goto err_late;
- }
-
- error = ixl_setup_queue_tqs(vsi);
- if (error)
- device_printf(dev, "ixl_setup_queue_tqs() error: %d\n",
- error);
- }
-
- if (error) {
- device_printf(dev, "interrupt setup error: %d\n", error);
- }
-
- /* Set initial advertised speed sysctl value */
- ixl_get_initial_advertised_speeds(pf);
/* Initialize statistics & add sysctls */
ixl_add_device_sysctls(pf);
-
ixl_pf_reset_stats(pf);
ixl_update_stats_counters(pf);
ixl_add_hw_stats(pf);
- /* Register for VLAN events */
- vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
- ixl_register_vlan, vsi, EVENTHANDLER_PRI_FIRST);
- vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
- ixl_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST);
+ /* Set initial advertised speed sysctl value */
+ ixl_get_initial_advertised_speeds(pf);
#ifdef PCI_IOV
ixl_initialize_sriov(pf);
#endif
-
-#ifdef DEV_NETMAP
- ixl_netmap_attach(vsi);
-#endif /* DEV_NETMAP */
-
-#ifdef IXL_IW
- if (hw->func_caps.iwarp && ixl_enable_iwarp) {
- pf->iw_enabled = (pf->iw_msix > 0) ? true : false;
- if (pf->iw_enabled) {
- error = ixl_iw_pf_attach(pf);
- if (error) {
- device_printf(dev,
- "interfacing to iwarp driver failed: %d\n",
- error);
- goto err_late;
- }
- } else
- device_printf(dev,
- "iwarp disabled on this device (no msix vectors)\n");
- } else {
- pf->iw_enabled = false;
- device_printf(dev, "The device is not iWARP enabled\n");
- }
-#endif
-
- INIT_DEBUGOUT("ixl_attach: end");
+ INIT_DEBUGOUT("ixl_if_attach_post: end");
return (0);
+// TODO: Review what needs to be cleaned up when this fails
err_late:
- if (vsi->ifp != NULL) {
- ether_ifdetach(vsi->ifp);
- if_free(vsi->ifp);
- }
err_mac_hmc:
i40e_shutdown_lan_hmc(hw);
-err_get_cap:
i40e_shutdown_adminq(hw);
-err_out:
ixl_free_pci_resources(pf);
- ixl_free_vsi(vsi);
- IXL_PF_LOCK_DESTROY(pf);
+ ixl_free_mac_filters(vsi);
return (error);
}
-/*********************************************************************
- * Device removal routine
- *
- * The detach entry point is called when the driver is being removed.
- * This routine stops the adapter and deallocates all the resources
- * that were allocated for driver operation.
- *
- * return 0 on success, positive on failure
- *********************************************************************/
-
static int
-ixl_detach(device_t dev)
+ixl_if_detach(if_ctx_t ctx)
{
- struct ixl_pf *pf = device_get_softc(dev);
+ struct ixl_vsi *vsi = iflib_get_softc(ctx);
+ struct ixl_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
- struct ixl_vsi *vsi = &pf->vsi;
- enum i40e_status_code status;
-#if defined(PCI_IOV) || defined(IXL_IW)
+ i40e_status status;
+#ifdef PCI_IOV
int error;
#endif
- INIT_DEBUGOUT("ixl_detach: begin");
-
- /* Make sure VLANS are not using driver */
- if (vsi->ifp->if_vlantrunk != NULL) {
- device_printf(dev, "Vlan in use, detach first\n");
- return (EBUSY);
- }
+ INIT_DEBUGOUT("ixl_if_detach: begin");
#ifdef PCI_IOV
- error = pci_iov_detach(dev);
+ error = pci_iov_detach(iflib_get_dev(ctx));
if (error != 0) {
- device_printf(dev, "SR-IOV in use; detach first.\n");
+ device_printf(iflib_get_dev(ctx), "SR-IOV in use; detach first.\n");
return (error);
}
#endif
- ether_ifdetach(vsi->ifp);
- if (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING)
- ixl_stop(pf);
-
/* Shutdown LAN HMC */
status = i40e_shutdown_lan_hmc(hw);
if (status)
- device_printf(dev,
+ device_printf(iflib_get_dev(ctx),
"Shutdown LAN HMC failed with code %d\n", status);
- /* Teardown LAN queue resources */
- ixl_teardown_queue_msix(vsi);
- ixl_free_queue_tqs(vsi);
/* Shutdown admin queue */
- ixl_disable_intr0(hw);
- ixl_teardown_adminq_msix(pf);
- ixl_free_adminq_tq(pf);
+ ixl_disable_adminq(hw);
+
+ /* Shutdown admin queue */
status = i40e_shutdown_adminq(hw);
if (status)
- device_printf(dev,
+ device_printf(iflib_get_dev(ctx),
"Shutdown Admin queue failed with code %d\n", status);
- /* Unregister VLAN events */
- if (vsi->vlan_attach != NULL)
- EVENTHANDLER_DEREGISTER(vlan_config, vsi->vlan_attach);
- if (vsi->vlan_detach != NULL)
- EVENTHANDLER_DEREGISTER(vlan_unconfig, vsi->vlan_detach);
+ ixl_pf_qmgr_destroy(&pf->qmgr);
+ ixl_free_pci_resources(pf);
+ ixl_free_mac_filters(vsi);
+ return (0);
+}
+
+/* TODO: Do shutdown-specific stuff here */
+static int
+ixl_if_shutdown(if_ctx_t ctx)
+{
+ int error = 0;
- callout_drain(&pf->timer);
+ INIT_DEBUGOUT("ixl_if_shutdown: begin");
-#ifdef IXL_IW
- if (ixl_enable_iwarp && pf->iw_enabled) {
- error = ixl_iw_pf_detach(pf);
- if (error == EBUSY) {
- device_printf(dev, "iwarp in use; stop it first.\n");
- return (error);
+ /* TODO: Call ixl_if_stop()? */
+
+ /* TODO: Then setup low power mode */
+
+ return (error);
+}
+
+static int
+ixl_if_suspend(if_ctx_t ctx)
+{
+ int error = 0;
+
+ INIT_DEBUGOUT("ixl_if_suspend: begin");
+
+ /* TODO: Call ixl_if_stop()? */
+
+ /* TODO: Then setup low power mode */
+
+ return (error);
+}
+
+static int
+ixl_if_resume(if_ctx_t ctx)
+{
+ struct ifnet *ifp = iflib_get_ifp(ctx);
+
+ INIT_DEBUGOUT("ixl_if_resume: begin");
+
+ /* Read & clear wake-up registers */
+
+ /* Required after D3->D0 transition */
+ if (ifp->if_flags & IFF_UP)
+ ixl_if_init(ctx);
+
+ return (0);
+}
+
+void
+ixl_if_init(if_ctx_t ctx)
+{
+ struct ixl_vsi *vsi = iflib_get_softc(ctx);
+ struct ixl_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ device_t dev = iflib_get_dev(ctx);
+ struct i40e_filter_control_settings filter;
+ u8 tmpaddr[ETHER_ADDR_LEN];
+ int ret;
+
+ INIT_DEBUGOUT("ixl_if_init: begin");
+
+ ixl_if_stop(ctx);
+
+ /* Get the latest mac address... User might use a LAA */
+ bcopy(IF_LLADDR(vsi->ifp), tmpaddr,
+ I40E_ETH_LENGTH_OF_ADDRESS);
+ if (!cmp_etheraddr(hw->mac.addr, tmpaddr) &&
+ (i40e_validate_mac_addr(tmpaddr) == I40E_SUCCESS)) {
+ ixl_del_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
+ bcopy(tmpaddr, hw->mac.addr,
+ I40E_ETH_LENGTH_OF_ADDRESS);
+ ret = i40e_aq_mac_address_write(hw,
+ I40E_AQC_WRITE_TYPE_LAA_ONLY,
+ hw->mac.addr, NULL);
+ if (ret) {
+ device_printf(dev, "LLA address"
+ "change failed!!\n");
+ return;
+ }
+ }
+
+ ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
+
+ /* Set up the device filtering */
+ bzero(&filter, sizeof(filter));
+ filter.enable_ethtype = TRUE;
+ filter.enable_macvlan = TRUE;
+ filter.enable_fdir = FALSE;
+ filter.hash_lut_size = I40E_HASH_LUT_SIZE_512;
+ if (i40e_set_filter_control(hw, &filter))
+ device_printf(dev, "i40e_set_filter_control() failed\n");
+
+ /* Prepare the VSI: rings, hmc contexts, etc... */
+ if (ixl_initialize_vsi(vsi)) {
+ device_printf(dev, "initialize vsi failed!!\n");
+ return;
+ }
+
+ /* Set up RSS */
+ ixl_config_rss(pf);
+
+ /* Add protocol filters to list */
+ ixl_init_filters(vsi);
+
+ /* Setup vlan's if needed */
+ ixl_setup_vlan_filters(vsi);
+
+ /* Set up MSI/X routing and the ITR settings */
+ if (pf->enable_msix) {
+ ixl_configure_queue_intr_msix(pf);
+ ixl_configure_itr(pf);
+ } else
+ ixl_configure_legacy(pf);
+
+ ixl_enable_rings(vsi);
+
+ // i40e_aq_set_default_vsi(hw, vsi->seid, NULL);
+
+ ixl_reconfigure_filters(vsi);
+
+ /* And now turn on interrupts */
+ // TODO: Something is wrong here...
+ ixl_enable_adminq(hw);
+ ixl_enable_intr(vsi);
+}
+
+void
+ixl_if_stop(if_ctx_t ctx)
+{
+ struct ixl_vsi *vsi = iflib_get_softc(ctx);
+ struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
+
+ INIT_DEBUGOUT("ixl_if_stop: begin\n");
+
+ if (pf->num_vfs == 0)
+ ixl_disable_intr(vsi);
+ else
+ ixl_disable_rings_intr(vsi);
+
+ ixl_disable_rings(vsi);
+}
+
+static int
+ixl_if_msix_intr_assign(if_ctx_t ctx, int msix)
+{
+ struct ixl_vsi *vsi = iflib_get_softc(ctx);
+ struct ixl_pf *pf = vsi->back;
+ struct ixl_rx_queue *que = vsi->rx_queues;
+ struct ixl_tx_queue *tx_que = vsi->tx_queues;
+ int err, i, rid, vector = 0;
+ char buf[16];
+
+ /* Admin Que is vector 0*/
+ rid = vector + 1;
+
+ err = iflib_irq_alloc_generic(ctx, &vsi->irq, rid, IFLIB_INTR_ADMIN,
+ ixl_msix_adminq, pf, 0, "aq");
+ if (err) {
+ iflib_irq_free(ctx, &vsi->irq);
+ device_printf(iflib_get_dev(ctx), "Failed to register Admin que handler");
+ return (err);
+ }
+ pf->admvec = vector;
+ ++vector;
+ iflib_softirq_alloc_generic(ctx, rid, IFLIB_INTR_IOV, pf, 0, "ixl_iov");
+
+ /* Now set up the stations */
+ for (i = 0; i < vsi->num_rx_queues; i++, vector++, que++) {
+ rid = vector + 1;
+
+ snprintf(buf, sizeof(buf), "rxq%d", i);
+ err = iflib_irq_alloc_generic(ctx, &que->que_irq, rid, IFLIB_INTR_RX,
+ ixl_msix_que, que, que->rxr.me, buf);
+ if (err) {
+ device_printf(iflib_get_dev(ctx), "Failed to allocate q int %d err: %d", i, err);
+ vsi->num_rx_queues = i + 1;
+ goto fail;
}
+ que->msix = vector;
+ }
+
+ for (i = 0, tx_que = vsi->tx_queues; i < vsi->num_tx_queues; i++, tx_que++) {
+ snprintf(buf, sizeof(buf), "txq%d", i);
+ rid = que->msix + 1;
+ iflib_softirq_alloc_generic(ctx, rid, IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
}
+
+ return (0);
+fail:
+ iflib_irq_free(ctx, &vsi->irq);
+ que = vsi->rx_queues;
+ for (int i = 0; i < vsi->num_rx_queues; i++, que++)
+ iflib_irq_free(ctx, &que->que_irq);
+ return (err);
+}
+
+/* Enable all interrupts */
+static void
+ixl_if_enable_intr(if_ctx_t ctx)
+{
+ struct ixl_vsi *vsi = iflib_get_softc(ctx);
+ struct i40e_hw *hw = vsi->hw;
+ struct ixl_rx_queue *que = vsi->rx_queues;
+
+ ixl_enable_adminq(hw);
+ /* Enable queue interrupts */
+ for (int i = 0; i < vsi->num_rx_queues; i++, que++)
+ /* TODO: Queue index parameter is probably wrong */
+ ixl_enable_queue(hw, que->rxr.me);
+}
+
+/* Disable all interrupts */
+static void
+ixl_if_disable_intr(if_ctx_t ctx)
+{
+ struct ixl_vsi *vsi = iflib_get_softc(ctx);
+ struct i40e_hw *hw = vsi->hw;
+ struct ixl_rx_queue *que = vsi->rx_queues;
+
+ ixl_disable_adminq(hw);
+ /* Enable queue interrupts */
+ for (int i = 0; i < vsi->num_rx_queues; i++, que++)
+ /* TODO: Queue index parameter is probably wrong */
+ ixl_disable_queue(hw, que->rxr.me);
+}
+
+/* Enable queue interrupt */
+static int
+ixl_if_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
+{
+ struct ixl_vsi *vsi = iflib_get_softc(ctx);
+ struct i40e_hw *hw = vsi->hw;
+ struct ixl_rx_queue *que = &vsi->rx_queues[rxqid];
+
+ ixl_enable_queue(hw, que->rxr.me);
+
+ return (0);
+}
+
+static int
+ixl_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets)
+{
+ struct ixl_vsi *vsi = iflib_get_softc(ctx);
+ struct ixl_tx_queue *que;
+ int i;
+
+ MPASS(vsi->num_tx_queues > 0);
+ MPASS(ntxqs == 1);
+ MPASS(vsi->num_tx_queues == ntxqsets);
+
+ /* Allocate queue structure memory */
+ if (!(vsi->tx_queues =
+ (struct ixl_tx_queue *) malloc(sizeof(struct ixl_tx_queue) *ntxqsets, M_IXL, M_NOWAIT | M_ZERO))) {
+ device_printf(iflib_get_dev(ctx), "Unable to allocate TX ring memory\n");
+ return (ENOMEM);
+ }
+
+ for (i = 0, que = vsi->tx_queues; i < ntxqsets; i++, que++) {
+ struct tx_ring *txr = &que->txr;
+
+ txr->me = i;
+ que->vsi = vsi;
+
+ /* get the virtual and physical address of the hardware queues */
+ txr->tail = I40E_QTX_TAIL(txr->me);
+ txr->tx_base = (struct i40e_tx_desc *)vaddrs[i];
+ txr->tx_paddr = paddrs[i];
+ txr->que = que;
+ }
+
+ // TODO: Do a config_gtask_init for admin queue here?
+ // iflib_config_gtask_init(ctx, &adapter->mod_task, ixgbe_handle_mod, "mod_task");
+
+ device_printf(iflib_get_dev(ctx), "%s: allocated for %d txqs\n", __func__, vsi->num_tx_queues);
+ return (0);
+}
+
+static int
+ixl_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets)
+{
+ struct ixl_vsi *vsi = iflib_get_softc(ctx);
+ struct ixl_rx_queue *que;
+ // if_shared_ctx_t sctx;
+ int i;
+
+ MPASS(vsi->num_rx_queues > 0);
+ MPASS(nrxqs == 1);
+ MPASS(vsi->num_rx_queues == nrxqsets);
+
+ /* Allocate queue structure memory */
+ if (!(vsi->rx_queues =
+ (struct ixl_rx_queue *) malloc(sizeof(struct ixl_rx_queue) *
+ nrxqsets, M_IXL, M_NOWAIT | M_ZERO))) {
+ device_printf(iflib_get_dev(ctx), "Unable to allocate RX ring memory\n");
+ return (ENOMEM);
+ }
+
+ for (i = 0, que = vsi->rx_queues; i < nrxqsets; i++, que++) {
+ struct rx_ring *rxr = &que->rxr;
+
+ rxr->me = i;
+ que->vsi = vsi;
+
+ /* get the virtual and physical address of the hardware queues */
+ rxr->tail = I40E_QRX_TAIL(rxr->me);
+ rxr->rx_base = (union i40e_rx_desc *)vaddrs[i];
+ rxr->rx_paddr = paddrs[i];
+ rxr->que = que;
+ }
+
+ device_printf(iflib_get_dev(ctx), "%s: allocated for %d rxqs\n", __func__, vsi->num_rx_queues);
+ return (0);
+}
+
+static void
+ixl_if_queues_free(if_ctx_t ctx)
+{
+ struct ixl_vsi *vsi = iflib_get_softc(ctx);
+
+ if (vsi->tx_queues != NULL) {
+ free(vsi->tx_queues, M_IXL);
+ vsi->tx_queues = NULL;
+ }
+ if (vsi->rx_queues != NULL) {
+ free(vsi->rx_queues, M_IXL);
+ vsi->rx_queues = NULL;
+ }
+}
+
+static void
+ixl_if_update_admin_status(if_ctx_t ctx)
+{
+ struct ixl_vsi *vsi = iflib_get_softc(ctx);
+ struct ixl_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_arq_event_info event;
+ i40e_status ret;
+ u32 loop = 0;
+ u16 opcode, result;
+
+ /* TODO: Split up
+ * - Update admin queue stuff
+ * - Update link status
+ * - Enqueue aq task
+ * - Re-enable admin intr
+ */
+
+ // TODO: Does this belong here?
+ if (pf->state & IXL_PF_STATE_EMPR_RESETTING) {
+ /* Flag cleared at end of this function */
+ ixl_handle_empr_reset(pf);
+ return;
+ }
+
+ event.buf_len = IXL_AQ_BUF_SZ;
+ event.msg_buf = malloc(event.buf_len,
+ M_IXL, M_NOWAIT | M_ZERO);
+ if (!event.msg_buf) {
+ printf("Unable to allocate adminq memory\n");
+ return;
+ }
+
+ /* clean and process any events */
+ do {
+ ret = i40e_clean_arq_element(hw, &event, &result);
+ if (ret)
+ break;
+ opcode = LE16_TO_CPU(event.desc.opcode);
+ ixl_dbg(pf, IXL_DBG_AQ,
+ "%s: Admin Queue event: %#06x\n", __func__, opcode);
+ switch (opcode) {
+ case i40e_aqc_opc_get_link_status:
+ ixl_link_event(pf, &event);
+ // TODO: Replace with admin status event function call?
+ //ixl_update_link_status(pf);
+ break;
+ case i40e_aqc_opc_send_msg_to_pf:
+#ifdef PCI_IOV
+ ixl_handle_vf_msg(pf, &event);
#endif
+ break;
+ case i40e_aqc_opc_event_lan_overflow:
+ break;
+ default:
+#ifdef IXL_DEBUG
+ printf("AdminQ unknown event %x\n", opcode);
+#endif
+ break;
+ }
+
+ } while (result && (loop++ < IXL_ADM_LIMIT));
+
+#if 0 // I'm pretty sure this is unnecessary
+ reg = rd32(hw, I40E_PFINT_ICR0_ENA);
+ reg |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
+ wr32(hw, I40E_PFINT_ICR0_ENA, reg);
+#endif
+ free(event.msg_buf, M_IXL);
+
+ /* XXX: This updates the link status */
+ if (pf->link_up) {
+ if (vsi->link_active == FALSE) {
+ vsi->link_active = TRUE;
+ /* should actually be negotiated value */
+ iflib_link_state_change(ctx, LINK_STATE_UP, IF_Gbps(10));
+#if 0
+ ixgbe_ping_all_vfs(adapter);
+#endif
+
+ }
+ } else { /* Link down */
+ if (vsi->link_active == TRUE) {
+ vsi->link_active = FALSE;
+ iflib_link_state_change(ctx, LINK_STATE_DOWN, 0);
+#if 0
+ ixgbe_ping_all_vfs(adapter);
+#endif
+ }
+ }
+
+ /* Re-enable link interrupts */
+ // IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
+
+ /*
+ * If there are still messages to process, reschedule ourselves.
+ * Otherwise, re-enable our interrupt and go to sleep.
+ */
+ if (result > 0)
+ iflib_admin_intr_deferred(ctx);
+ else
+ /* TODO: Link/adminq interrupt should be re-enabled in IFDI_LINK_INTR_ENABLE */
+ ixl_enable_intr(vsi);
+}
+
+static void
+ixl_if_multi_set(if_ctx_t ctx)
+{
+ struct ixl_vsi *vsi = iflib_get_softc(ctx);
+ struct i40e_hw *hw = vsi->hw;
+ int mcnt = 0, flags;
+
+ IOCTL_DEBUGOUT("ixl_if_multi_set: begin");
+
+ mcnt = if_multiaddr_count(iflib_get_ifp(ctx), MAX_MULTICAST_ADDR);
+ /* delete existing MC filters */
+ ixl_del_multi(vsi);
+
+ if (__predict_false(mcnt == MAX_MULTICAST_ADDR)) {
+ i40e_aq_set_vsi_multicast_promiscuous(hw,
+ vsi->seid, TRUE, NULL);
+ return;
+ }
+ /* (re-)install filters for all mcast addresses */
+ mcnt = if_multi_apply(iflib_get_ifp(ctx), ixl_mc_filter_apply, vsi);
+
+ if (mcnt > 0) {
+ flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC);
+ ixl_add_hw_filters(vsi, flags, mcnt);
+ }
+
+ IOCTL_DEBUGOUT("ixl_if_multi_set: end");
+}
+
+static int
+ixl_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
+{
+ struct ixl_vsi *vsi = iflib_get_softc(ctx);
+
+ IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
+ if (mtu > IXL_MAX_FRAME - ETHER_HDR_LEN - ETHER_CRC_LEN -
+ ETHER_VLAN_ENCAP_LEN)
+ return (EINVAL);
+
+ vsi->shared->isc_max_frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN +
+ ETHER_VLAN_ENCAP_LEN;
-#ifdef DEV_NETMAP
- netmap_detach(vsi->ifp);
-#endif /* DEV_NETMAP */
- ixl_pf_qmgr_destroy(&pf->qmgr);
- ixl_free_pci_resources(pf);
- bus_generic_detach(dev);
- if_free(vsi->ifp);
- ixl_free_vsi(vsi);
- IXL_PF_LOCK_DESTROY(pf);
return (0);
}
-/*********************************************************************
- *
- * Shutdown entry point
- *
- **********************************************************************/
+static void
+ixl_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr)
+{
+ struct ixl_vsi *vsi = iflib_get_softc(ctx);
+ struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+
+ INIT_DEBUGOUT("ixl_media_status: begin");
+
+ hw->phy.get_link_info = TRUE;
+ i40e_get_link_status(hw, &pf->link_up);
+
+ ifmr->ifm_status = IFM_AVALID;
+ ifmr->ifm_active = IFM_ETHER;
+
+ if (!pf->link_up) {
+ return;
+ }
+
+ ifmr->ifm_status |= IFM_ACTIVE;
+ /* Hardware is always full-duplex */
+ ifmr->ifm_active |= IFM_FDX;
+
+ switch (hw->phy.link_info.phy_type) {
+ /* 100 M */
+ case I40E_PHY_TYPE_100BASE_TX:
+ ifmr->ifm_active |= IFM_100_TX;
+ break;
+ /* 1 G */
+ case I40E_PHY_TYPE_1000BASE_T:
+ ifmr->ifm_active |= IFM_1000_T;
+ break;
+ case I40E_PHY_TYPE_1000BASE_SX:
+ ifmr->ifm_active |= IFM_1000_SX;
+ break;
+ case I40E_PHY_TYPE_1000BASE_LX:
+ ifmr->ifm_active |= IFM_1000_LX;
+ break;
+ /* 10 G */
+ case I40E_PHY_TYPE_10GBASE_SFPP_CU:
+ ifmr->ifm_active |= IFM_10G_TWINAX;
+ break;
+ case I40E_PHY_TYPE_10GBASE_SR:
+ ifmr->ifm_active |= IFM_10G_SR;
+ break;
+ case I40E_PHY_TYPE_10GBASE_LR:
+ ifmr->ifm_active |= IFM_10G_LR;
+ break;
+ case I40E_PHY_TYPE_10GBASE_T:
+ ifmr->ifm_active |= IFM_10G_T;
+ break;
+ /* 40 G */
+ case I40E_PHY_TYPE_40GBASE_CR4:
+ case I40E_PHY_TYPE_40GBASE_CR4_CU:
+ ifmr->ifm_active |= IFM_40G_CR4;
+ break;
+ case I40E_PHY_TYPE_40GBASE_SR4:
+ ifmr->ifm_active |= IFM_40G_SR4;
+ break;
+ case I40E_PHY_TYPE_40GBASE_LR4:
+ ifmr->ifm_active |= IFM_40G_LR4;
+ break;
+ case I40E_PHY_TYPE_1000BASE_KX:
+ ifmr->ifm_active |= IFM_1000_KX;
+ break;
+ /* ERJ: What's the difference between these? */
+ case I40E_PHY_TYPE_10GBASE_CR1_CU:
+ case I40E_PHY_TYPE_10GBASE_CR1:
+ ifmr->ifm_active |= IFM_10G_CR1;
+ break;
+ case I40E_PHY_TYPE_10GBASE_KX4:
+ ifmr->ifm_active |= IFM_10G_KX4;
+ break;
+ case I40E_PHY_TYPE_10GBASE_KR:
+ ifmr->ifm_active |= IFM_10G_KR;
+ break;
+ case I40E_PHY_TYPE_20GBASE_KR2:
+ ifmr->ifm_active |= IFM_20G_KR2;
+ break;
+ case I40E_PHY_TYPE_40GBASE_KR4:
+ ifmr->ifm_active |= IFM_40G_KR4;
+ break;
+ case I40E_PHY_TYPE_XLPPI:
+ ifmr->ifm_active |= IFM_40G_XLPPI;
+ break;
+ default:
+ ifmr->ifm_active |= IFM_UNKNOWN;
+ break;
+ }
+ /* Report flow control status as well */
+ if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
+ ifmr->ifm_active |= IFM_ETH_TXPAUSE;
+ if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
+ ifmr->ifm_active |= IFM_ETH_RXPAUSE;
+
+}
+
+static int
+ixl_if_media_change(if_ctx_t ctx)
+{
+ struct ifmedia *ifm = iflib_get_media(ctx);
+
+ INIT_DEBUGOUT("ixl_media_change: begin");
+
+ if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
+ return (EINVAL);
+
+ if_printf(iflib_get_ifp(ctx), "Media change is currently not supported.\n");
+ return (ENODEV);
+}
+
+static int
+ixl_if_promisc_set(if_ctx_t ctx, int flags)
+{
+ struct ixl_vsi *vsi = iflib_get_softc(ctx);
+ struct ifnet *ifp = iflib_get_ifp(ctx);
+ struct i40e_hw *hw = vsi->hw;
+ int err;
+ bool uni = FALSE, multi = FALSE;
+
+ if (flags & IFF_ALLMULTI ||
+ if_multiaddr_count(ifp, MAX_MULTICAST_ADDR) == MAX_MULTICAST_ADDR)
+ multi = TRUE;
+ if (flags & IFF_PROMISC)
+ uni = TRUE;
+
+ err = i40e_aq_set_vsi_unicast_promiscuous(hw,
+ vsi->seid, uni, NULL, false);
+ if (err)
+ return (err);
+ err = i40e_aq_set_vsi_multicast_promiscuous(hw,
+ vsi->seid, multi, NULL);
+ return (err);
+}
+
+static void
+ixl_if_timer(if_ctx_t ctx, uint16_t qid)
+{
+ struct ixl_vsi *vsi = iflib_get_softc(ctx);
+ struct ixl_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ struct ixl_tx_queue *que = &vsi->tx_queues[qid];
+ u32 mask;
+
+ /*
+ ** Check status of the queues
+ */
+ mask = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
+ I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK);
+
+ /* If queue param has outstanding work, trigger sw irq */
+ // TODO: TX queues in iflib don't use HW interrupts; does this do anything?
+ if (que->busy)
+ wr32(hw, I40E_PFINT_DYN_CTLN(que->txr.me), mask);
+
+ if (qid != 0)
+ return;
+
+ /* Fire off the adminq task */
+ iflib_admin_intr_deferred(ctx);
+
+ /* Update stats */
+ ixl_update_stats_counters(pf);
+}
+
+static void
+ixl_if_vlan_register(if_ctx_t ctx, u16 vtag)
+{
+ struct ixl_vsi *vsi = iflib_get_softc(ctx);
+ struct i40e_hw *hw = vsi->hw;
+
+ if ((vtag == 0) || (vtag > 4095)) /* Invalid */
+ return;
+
+ ++vsi->num_vlans;
+ ixl_add_filter(vsi, hw->mac.addr, vtag);
+}
+
+static void
+ixl_if_vlan_unregister(if_ctx_t ctx, u16 vtag)
+{
+ struct ixl_vsi *vsi = iflib_get_softc(ctx);
+ struct i40e_hw *hw = vsi->hw;
+
+ if ((vtag == 0) || (vtag > 4095)) /* Invalid */
+ return;
+
+ --vsi->num_vlans;
+ ixl_del_filter(vsi, hw->mac.addr, vtag);
+}
+
+static uint64_t
+ixl_if_get_counter(if_ctx_t ctx, ift_counter cnt)
+{
+ struct ixl_vsi *vsi = iflib_get_softc(ctx);
+ if_t ifp = iflib_get_ifp(ctx);
+
+ switch (cnt) {
+ case IFCOUNTER_IPACKETS:
+ return (vsi->ipackets);
+ case IFCOUNTER_IERRORS:
+ return (vsi->ierrors);
+ case IFCOUNTER_OPACKETS:
+ return (vsi->opackets);
+ case IFCOUNTER_OERRORS:
+ return (vsi->oerrors);
+ case IFCOUNTER_COLLISIONS:
+ /* Collisions are by standard impossible in 40G/10G Ethernet */
+ return (0);
+ case IFCOUNTER_IBYTES:
+ return (vsi->ibytes);
+ case IFCOUNTER_OBYTES:
+ return (vsi->obytes);
+ case IFCOUNTER_IMCASTS:
+ return (vsi->imcasts);
+ case IFCOUNTER_OMCASTS:
+ return (vsi->omcasts);
+ case IFCOUNTER_IQDROPS:
+ return (vsi->iqdrops);
+ case IFCOUNTER_OQDROPS:
+ return (vsi->oqdrops);
+ case IFCOUNTER_NOPROTO:
+ return (vsi->noproto);
+ default:
+ return (if_get_counter_default(ifp, cnt));
+ }
+}
+
+static int
+ixl_mc_filter_apply(void *arg, struct ifmultiaddr *ifma, int count __unused)
+{
+ struct ixl_vsi *vsi = arg;
+
+ if (ifma->ifma_addr->sa_family != AF_LINK)
+ return (0);
+ ixl_add_mc_filter(vsi,
+ (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr));
+ return (1);
+}
static int
-ixl_shutdown(device_t dev)
+ixl_save_pf_tunables(struct ixl_pf *pf)
{
- struct ixl_pf *pf = device_get_softc(dev);
- ixl_stop(pf);
+ device_t dev = pf->dev;
+
+ /* Save tunable information */
+ pf->enable_msix = ixl_enable_msix;
+ pf->max_queues = ixl_max_queues;
+ pf->ringsz = ixl_ring_size;
+ pf->enable_tx_fc_filter = ixl_enable_tx_fc_filter;
+ pf->dynamic_rx_itr = ixl_dynamic_rx_itr;
+ //pf->dynamic_tx_itr = ixl_dynamic_tx_itr;
+ //pf->tx_itr = ixl_tx_itr;
+ pf->rx_itr = ixl_rx_itr;
+ pf->dbg_mask = ixl_core_debug_mask;
+ pf->hw.debug_mask = ixl_shared_debug_mask;
+
+ if (ixl_ring_size < IXL_MIN_RING
+ || ixl_ring_size > IXL_MAX_RING
+ || ixl_ring_size % IXL_RING_INCREMENT != 0) {
+ device_printf(dev, "Invalid ring_size value of %d set!\n",
+ ixl_ring_size);
+ device_printf(dev, "ring_size must be between %d and %d, "
+ "inclusive, and must be a multiple of %d\n",
+ IXL_MIN_RING, IXL_MAX_RING, IXL_RING_INCREMENT);
+ return (EINVAL);
+ }
+
+ return (0);
+}
+
+static int
+ixl_attach_get_link_status(struct ixl_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ device_t dev = pf->dev;
+ int error = 0;
+
+ if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
+ (hw->aq.fw_maj_ver < 4)) {
+ i40e_msec_delay(75);
+ error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
+ if (error) {
+ device_printf(dev, "link restart failed, aq_err=%d\n",
+ pf->hw.aq.asq_last_status);
+ return error;
+ }
+ }
+
+ /* Determine link state */
+ hw->phy.get_link_info = TRUE;
+ i40e_get_link_status(hw, &pf->link_up);
return (0);
}
Index: sys/dev/ixl/if_ixlv.c
===================================================================
--- sys/dev/ixl/if_ixlv.c
+++ sys/dev/ixl/if_ixlv.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2013-2015, Intel Corporation
+ Copyright (c) 2001-2016, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -32,3073 +32,12 @@
******************************************************************************/
/*$FreeBSD$*/
-#include "ixl.h"
-#include "ixlv.h"
-
-/*********************************************************************
- * Driver version
- *********************************************************************/
-char ixlv_driver_version[] = "1.4.12-k";
-
-/*********************************************************************
- * PCI Device ID Table
- *
- * Used by probe to select devices to load on
- * Last field stores an index into ixlv_strings
- * Last entry must be all 0s
- *
- * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
- *********************************************************************/
-
-static ixl_vendor_info_t ixlv_vendor_info_array[] =
-{
- {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF, 0, 0, 0},
- {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_VF, 0, 0, 0},
- {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_A0_VF, 0, 0, 0},
- /* required last entry */
- {0, 0, 0, 0, 0}
-};
-
-/*********************************************************************
- * Table of branding strings
- *********************************************************************/
-
-static char *ixlv_strings[] = {
- "Intel(R) Ethernet Connection XL710/X722 VF Driver"
-};
-
-
-/*********************************************************************
- * Function prototypes
- *********************************************************************/
-static int ixlv_probe(device_t);
-static int ixlv_attach(device_t);
-static int ixlv_detach(device_t);
-static int ixlv_shutdown(device_t);
-static void ixlv_init_locked(struct ixlv_sc *);
-static int ixlv_allocate_pci_resources(struct ixlv_sc *);
-static void ixlv_free_pci_resources(struct ixlv_sc *);
-static int ixlv_assign_msix(struct ixlv_sc *);
-static int ixlv_init_msix(struct ixlv_sc *);
-static int ixlv_init_taskqueue(struct ixlv_sc *);
-static int ixlv_setup_queues(struct ixlv_sc *);
-static void ixlv_config_rss(struct ixlv_sc *);
-static void ixlv_stop(struct ixlv_sc *);
-static void ixlv_add_multi(struct ixl_vsi *);
-static void ixlv_del_multi(struct ixl_vsi *);
-static void ixlv_free_queues(struct ixl_vsi *);
-static int ixlv_setup_interface(device_t, struct ixlv_sc *);
-static int ixlv_teardown_adminq_msix(struct ixlv_sc *);
-
-static int ixlv_media_change(struct ifnet *);
-static void ixlv_media_status(struct ifnet *, struct ifmediareq *);
-
-static void ixlv_local_timer(void *);
-
-static int ixlv_add_mac_filter(struct ixlv_sc *, u8 *, u16);
-static int ixlv_del_mac_filter(struct ixlv_sc *sc, u8 *macaddr);
-static void ixlv_init_filters(struct ixlv_sc *);
-static void ixlv_free_filters(struct ixlv_sc *);
-
-static void ixlv_msix_que(void *);
-static void ixlv_msix_adminq(void *);
-static void ixlv_do_adminq(void *, int);
-static void ixlv_do_adminq_locked(struct ixlv_sc *sc);
-static void ixlv_handle_que(void *, int);
-static int ixlv_reset(struct ixlv_sc *);
-static int ixlv_reset_complete(struct i40e_hw *);
-static void ixlv_set_queue_rx_itr(struct ixl_queue *);
-static void ixlv_set_queue_tx_itr(struct ixl_queue *);
-static void ixl_init_cmd_complete(struct ixl_vc_cmd *, void *,
- enum i40e_status_code);
-static void ixlv_configure_itr(struct ixlv_sc *);
-
-static void ixlv_enable_adminq_irq(struct i40e_hw *);
-static void ixlv_disable_adminq_irq(struct i40e_hw *);
-static void ixlv_enable_queue_irq(struct i40e_hw *, int);
-static void ixlv_disable_queue_irq(struct i40e_hw *, int);
-
-static void ixlv_setup_vlan_filters(struct ixlv_sc *);
-static void ixlv_register_vlan(void *, struct ifnet *, u16);
-static void ixlv_unregister_vlan(void *, struct ifnet *, u16);
-
-static void ixlv_init_hw(struct ixlv_sc *);
-static int ixlv_setup_vc(struct ixlv_sc *);
-static int ixlv_vf_config(struct ixlv_sc *);
-
-static void ixlv_cap_txcsum_tso(struct ixl_vsi *,
- struct ifnet *, int);
-
-static void ixlv_add_sysctls(struct ixlv_sc *);
-#ifdef IXL_DEBUG
-static int ixlv_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS);
-static int ixlv_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS);
-#endif
-
-/*********************************************************************
- * FreeBSD Device Interface Entry Points
- *********************************************************************/
-
-static device_method_t ixlv_methods[] = {
- /* Device interface */
- DEVMETHOD(device_probe, ixlv_probe),
- DEVMETHOD(device_attach, ixlv_attach),
- DEVMETHOD(device_detach, ixlv_detach),
- DEVMETHOD(device_shutdown, ixlv_shutdown),
- {0, 0}
-};
-
-static driver_t ixlv_driver = {
- "ixlv", ixlv_methods, sizeof(struct ixlv_sc),
-};
-
-devclass_t ixlv_devclass;
-DRIVER_MODULE(ixlv, pci, ixlv_driver, ixlv_devclass, 0, 0);
-
-MODULE_DEPEND(ixlv, pci, 1, 1, 1);
-MODULE_DEPEND(ixlv, ether, 1, 1, 1);
-
-/*
-** TUNEABLE PARAMETERS:
-*/
-
-static SYSCTL_NODE(_hw, OID_AUTO, ixlv, CTLFLAG_RD, 0,
- "IXLV driver parameters");
-
-/*
-** Number of descriptors per ring:
-** - TX and RX are the same size
-*/
-static int ixlv_ringsz = IXL_DEFAULT_RING;
-TUNABLE_INT("hw.ixlv.ringsz", &ixlv_ringsz);
-SYSCTL_INT(_hw_ixlv, OID_AUTO, ring_size, CTLFLAG_RDTUN,
- &ixlv_ringsz, 0, "Descriptor Ring Size");
-
-/* Set to zero to auto calculate */
-int ixlv_max_queues = 0;
-TUNABLE_INT("hw.ixlv.max_queues", &ixlv_max_queues);
-SYSCTL_INT(_hw_ixlv, OID_AUTO, max_queues, CTLFLAG_RDTUN,
- &ixlv_max_queues, 0, "Number of Queues");
-
-/*
-** Number of entries in Tx queue buf_ring.
-** Increasing this will reduce the number of
-** errors when transmitting fragmented UDP
-** packets.
-*/
-static int ixlv_txbrsz = DEFAULT_TXBRSZ;
-TUNABLE_INT("hw.ixlv.txbrsz", &ixlv_txbrsz);
-SYSCTL_INT(_hw_ixlv, OID_AUTO, txbr_size, CTLFLAG_RDTUN,
- &ixlv_txbrsz, 0, "TX Buf Ring Size");
-
-/*
-** Controls for Interrupt Throttling
-** - true/false for dynamic adjustment
-** - default values for static ITR
-*/
-int ixlv_dynamic_rx_itr = 0;
-TUNABLE_INT("hw.ixlv.dynamic_rx_itr", &ixlv_dynamic_rx_itr);
-SYSCTL_INT(_hw_ixlv, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
- &ixlv_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
-
-int ixlv_dynamic_tx_itr = 0;
-TUNABLE_INT("hw.ixlv.dynamic_tx_itr", &ixlv_dynamic_tx_itr);
-SYSCTL_INT(_hw_ixlv, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
- &ixlv_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
-
-int ixlv_rx_itr = IXL_ITR_8K;
-TUNABLE_INT("hw.ixlv.rx_itr", &ixlv_rx_itr);
-SYSCTL_INT(_hw_ixlv, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
- &ixlv_rx_itr, 0, "RX Interrupt Rate");
-
-int ixlv_tx_itr = IXL_ITR_4K;
-TUNABLE_INT("hw.ixlv.tx_itr", &ixlv_tx_itr);
-SYSCTL_INT(_hw_ixlv, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
- &ixlv_tx_itr, 0, "TX Interrupt Rate");
-
-/*********************************************************************
- * Device identification routine
- *
- * ixlv_probe determines if the driver should be loaded on
- * the hardware based on PCI vendor/device id of the device.
- *
- * return BUS_PROBE_DEFAULT on success, positive on failure
- *********************************************************************/
-
-static int
-ixlv_probe(device_t dev)
-{
- ixl_vendor_info_t *ent;
-
- u16 pci_vendor_id, pci_device_id;
- u16 pci_subvendor_id, pci_subdevice_id;
- char device_name[256];
-
-#if 0
- INIT_DEBUGOUT("ixlv_probe: begin");
+#ifndef KLD_MODULE
+#include "opt_iflib.h"
#endif
- pci_vendor_id = pci_get_vendor(dev);
- if (pci_vendor_id != I40E_INTEL_VENDOR_ID)
- return (ENXIO);
-
- pci_device_id = pci_get_device(dev);
- pci_subvendor_id = pci_get_subvendor(dev);
- pci_subdevice_id = pci_get_subdevice(dev);
-
- ent = ixlv_vendor_info_array;
- while (ent->vendor_id != 0) {
- if ((pci_vendor_id == ent->vendor_id) &&
- (pci_device_id == ent->device_id) &&
-
- ((pci_subvendor_id == ent->subvendor_id) ||
- (ent->subvendor_id == 0)) &&
-
- ((pci_subdevice_id == ent->subdevice_id) ||
- (ent->subdevice_id == 0))) {
- sprintf(device_name, "%s, Version - %s",
- ixlv_strings[ent->index],
- ixlv_driver_version);
- device_set_desc_copy(dev, device_name);
- return (BUS_PROBE_DEFAULT);
- }
- ent++;
- }
- return (ENXIO);
-}
-
-/*********************************************************************
- * Device initialization routine
- *
- * The attach entry point is called when the driver is being loaded.
- * This routine identifies the type of hardware, allocates all resources
- * and initializes the hardware.
- *
- * return 0 on success, positive on failure
- *********************************************************************/
-
-static int
-ixlv_attach(device_t dev)
-{
- struct ixlv_sc *sc;
- struct i40e_hw *hw;
- struct ixl_vsi *vsi;
- int error = 0;
-
- INIT_DBG_DEV(dev, "begin");
-
- /* Allocate, clear, and link in our primary soft structure */
- sc = device_get_softc(dev);
- sc->dev = sc->osdep.dev = dev;
- hw = &sc->hw;
- vsi = &sc->vsi;
- vsi->dev = dev;
-
- /* Initialize hw struct */
- ixlv_init_hw(sc);
-
- /* Allocate filter lists */
- ixlv_init_filters(sc);
-
- /* Core Lock Init */
- mtx_init(&sc->mtx, device_get_nameunit(dev),
- "IXL SC Lock", MTX_DEF);
-
- /* Set up the timer callout */
- callout_init_mtx(&sc->timer, &sc->mtx, 0);
-
- /* Do PCI setup - map BAR0, etc */
- if (ixlv_allocate_pci_resources(sc)) {
- device_printf(dev, "%s: Allocation of PCI resources failed\n",
- __func__);
- error = ENXIO;
- goto err_early;
- }
-
- INIT_DBG_DEV(dev, "Allocated PCI resources and MSIX vectors");
-
- error = i40e_set_mac_type(hw);
- if (error) {
- device_printf(dev, "%s: set_mac_type failed: %d\n",
- __func__, error);
- goto err_pci_res;
- }
-
- error = ixlv_reset_complete(hw);
- if (error) {
- device_printf(dev, "%s: Device is still being reset\n",
- __func__);
- goto err_pci_res;
- }
-
- INIT_DBG_DEV(dev, "VF Device is ready for configuration");
-
- error = ixlv_setup_vc(sc);
- if (error) {
- device_printf(dev, "%s: Error setting up PF comms, %d\n",
- __func__, error);
- goto err_pci_res;
- }
-
- INIT_DBG_DEV(dev, "PF API version verified");
-
- /* Need API version before sending reset message */
- error = ixlv_reset(sc);
- if (error) {
- device_printf(dev, "VF reset failed; reload the driver\n");
- goto err_aq;
- }
-
- INIT_DBG_DEV(dev, "VF reset complete");
-
- /* Ask for VF config from PF */
- error = ixlv_vf_config(sc);
- if (error) {
- device_printf(dev, "Error getting configuration from PF: %d\n",
- error);
- goto err_aq;
- }
-
- device_printf(dev, "VSIs %d, QPs %d, MSIX %d, RSS sizes: key %d lut %d\n",
- sc->vf_res->num_vsis,
- sc->vf_res->num_queue_pairs,
- sc->vf_res->max_vectors,
- sc->vf_res->rss_key_size,
- sc->vf_res->rss_lut_size);
-#ifdef IXL_DEBUG
- device_printf(dev, "Offload flags: 0x%b\n",
- sc->vf_res->vf_offload_flags, IXLV_PRINTF_VF_OFFLOAD_FLAGS);
-#endif
-
- /* got VF config message back from PF, now we can parse it */
- for (int i = 0; i < sc->vf_res->num_vsis; i++) {
- if (sc->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
- sc->vsi_res = &sc->vf_res->vsi_res[i];
- }
- if (!sc->vsi_res) {
- device_printf(dev, "%s: no LAN VSI found\n", __func__);
- error = EIO;
- goto err_res_buf;
- }
-
- INIT_DBG_DEV(dev, "Resource Acquisition complete");
-
- /* If no mac address was assigned just make a random one */
- if (!ixlv_check_ether_addr(hw->mac.addr)) {
- u8 addr[ETHER_ADDR_LEN];
- arc4rand(&addr, sizeof(addr), 0);
- addr[0] &= 0xFE;
- addr[0] |= 0x02;
- bcopy(addr, hw->mac.addr, sizeof(addr));
- }
-
- /* Now that the number of queues for this VF is known, set up interrupts */
- sc->msix = ixlv_init_msix(sc);
- /* We fail without MSIX support */
- if (sc->msix == 0) {
- error = ENXIO;
- goto err_res_buf;
- }
-
- vsi->id = sc->vsi_res->vsi_id;
- vsi->back = (void *)sc;
- sc->link_up = TRUE;
-
- /* This allocates the memory and early settings */
- if (ixlv_setup_queues(sc) != 0) {
- device_printf(dev, "%s: setup queues failed!\n",
- __func__);
- error = EIO;
- goto out;
- }
-
- /* Setup the stack interface */
- if (ixlv_setup_interface(dev, sc) != 0) {
- device_printf(dev, "%s: setup interface failed!\n",
- __func__);
- error = EIO;
- goto out;
- }
-
- INIT_DBG_DEV(dev, "Queue memory and interface setup");
-
- /* Do queue interrupt setup */
- if (ixlv_assign_msix(sc) != 0) {
- device_printf(dev, "%s: allocating queue interrupts failed!\n",
- __func__);
- error = ENXIO;
- goto out;
- }
-
- /* Start AdminQ taskqueue */
- ixlv_init_taskqueue(sc);
-
- /* Initialize stats */
- bzero(&sc->vsi.eth_stats, sizeof(struct i40e_eth_stats));
- ixlv_add_sysctls(sc);
-
- /* Register for VLAN events */
- vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
- ixlv_register_vlan, vsi, EVENTHANDLER_PRI_FIRST);
- vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
- ixlv_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST);
-
- /* We want AQ enabled early */
- ixlv_enable_adminq_irq(hw);
-
- /* Set things up to run init */
- sc->init_state = IXLV_INIT_READY;
-
- ixl_vc_init_mgr(sc, &sc->vc_mgr);
-
- INIT_DBG_DEV(dev, "end");
- return (error);
-
-out:
- ixlv_free_queues(vsi);
-err_res_buf:
- free(sc->vf_res, M_DEVBUF);
-err_aq:
- i40e_shutdown_adminq(hw);
-err_pci_res:
- ixlv_free_pci_resources(sc);
-err_early:
- mtx_destroy(&sc->mtx);
- ixlv_free_filters(sc);
- INIT_DBG_DEV(dev, "end: error %d", error);
- return (error);
-}
-
-/*********************************************************************
- * Device removal routine
- *
- * The detach entry point is called when the driver is being removed.
- * This routine stops the adapter and deallocates all the resources
- * that were allocated for driver operation.
- *
- * return 0 on success, positive on failure
- *********************************************************************/
-
-static int
-ixlv_detach(device_t dev)
-{
- struct ixlv_sc *sc = device_get_softc(dev);
- struct ixl_vsi *vsi = &sc->vsi;
- struct i40e_hw *hw = &sc->hw;
- enum i40e_status_code status;
-
- INIT_DBG_DEV(dev, "begin");
-
- /* Make sure VLANS are not using driver */
- if (vsi->ifp->if_vlantrunk != NULL) {
- if_printf(vsi->ifp, "Vlan in use, detach first\n");
- return (EBUSY);
- }
-
- /* Stop driver */
- ether_ifdetach(vsi->ifp);
- if (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING) {
- mtx_lock(&sc->mtx);
- ixlv_stop(sc);
- mtx_unlock(&sc->mtx);
- }
-
- /* Unregister VLAN events */
- if (vsi->vlan_attach != NULL)
- EVENTHANDLER_DEREGISTER(vlan_config, vsi->vlan_attach);
- if (vsi->vlan_detach != NULL)
- EVENTHANDLER_DEREGISTER(vlan_unconfig, vsi->vlan_detach);
-
- /* Drain VC mgr */
- callout_drain(&sc->vc_mgr.callout);
-
- ixlv_disable_adminq_irq(hw);
- ixlv_teardown_adminq_msix(sc);
- /* Drain admin queue taskqueue */
- taskqueue_free(sc->tq);
- status = i40e_shutdown_adminq(&sc->hw);
- if (status != I40E_SUCCESS) {
- device_printf(dev,
- "i40e_shutdown_adminq() failed with status %s\n",
- i40e_stat_str(hw, status));
- }
-
- if_free(vsi->ifp);
- free(sc->vf_res, M_DEVBUF);
- ixlv_free_pci_resources(sc);
- ixlv_free_queues(vsi);
- ixlv_free_filters(sc);
-
- bus_generic_detach(dev);
- mtx_destroy(&sc->mtx);
- INIT_DBG_DEV(dev, "end");
- return (0);
-}
-
-/*********************************************************************
- *
- * Shutdown entry point
- *
- **********************************************************************/
-
-static int
-ixlv_shutdown(device_t dev)
-{
- struct ixlv_sc *sc = device_get_softc(dev);
-
- INIT_DBG_DEV(dev, "begin");
-
- mtx_lock(&sc->mtx);
- ixlv_stop(sc);
- mtx_unlock(&sc->mtx);
-
- INIT_DBG_DEV(dev, "end");
- return (0);
-}
-
-/*
- * Configure TXCSUM(IPV6) and TSO(4/6)
- * - the hardware handles these together so we
- * need to tweak them
- */
-static void
-ixlv_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask)
-{
- /* Enable/disable TXCSUM/TSO4 */
- if (!(ifp->if_capenable & IFCAP_TXCSUM)
- && !(ifp->if_capenable & IFCAP_TSO4)) {
- if (mask & IFCAP_TXCSUM) {
- ifp->if_capenable |= IFCAP_TXCSUM;
- /* enable TXCSUM, restore TSO if previously enabled */
- if (vsi->flags & IXL_FLAGS_KEEP_TSO4) {
- vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
- ifp->if_capenable |= IFCAP_TSO4;
- }
- }
- else if (mask & IFCAP_TSO4) {
- ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4);
- vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
- if_printf(ifp,
- "TSO4 requires txcsum, enabling both...\n");
- }
- } else if((ifp->if_capenable & IFCAP_TXCSUM)
- && !(ifp->if_capenable & IFCAP_TSO4)) {
- if (mask & IFCAP_TXCSUM)
- ifp->if_capenable &= ~IFCAP_TXCSUM;
- else if (mask & IFCAP_TSO4)
- ifp->if_capenable |= IFCAP_TSO4;
- } else if((ifp->if_capenable & IFCAP_TXCSUM)
- && (ifp->if_capenable & IFCAP_TSO4)) {
- if (mask & IFCAP_TXCSUM) {
- vsi->flags |= IXL_FLAGS_KEEP_TSO4;
- ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4);
- if_printf(ifp,
- "TSO4 requires txcsum, disabling both...\n");
- } else if (mask & IFCAP_TSO4)
- ifp->if_capenable &= ~IFCAP_TSO4;
- }
-
- /* Enable/disable TXCSUM_IPV6/TSO6 */
- if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6)
- && !(ifp->if_capenable & IFCAP_TSO6)) {
- if (mask & IFCAP_TXCSUM_IPV6) {
- ifp->if_capenable |= IFCAP_TXCSUM_IPV6;
- if (vsi->flags & IXL_FLAGS_KEEP_TSO6) {
- vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
- ifp->if_capenable |= IFCAP_TSO6;
- }
- } else if (mask & IFCAP_TSO6) {
- ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
- vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
- if_printf(ifp,
- "TSO6 requires txcsum6, enabling both...\n");
- }
- } else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
- && !(ifp->if_capenable & IFCAP_TSO6)) {
- if (mask & IFCAP_TXCSUM_IPV6)
- ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6;
- else if (mask & IFCAP_TSO6)
- ifp->if_capenable |= IFCAP_TSO6;
- } else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
- && (ifp->if_capenable & IFCAP_TSO6)) {
- if (mask & IFCAP_TXCSUM_IPV6) {
- vsi->flags |= IXL_FLAGS_KEEP_TSO6;
- ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
- if_printf(ifp,
- "TSO6 requires txcsum6, disabling both...\n");
- } else if (mask & IFCAP_TSO6)
- ifp->if_capenable &= ~IFCAP_TSO6;
- }
-}
-
-/*********************************************************************
- * Ioctl entry point
- *
- * ixlv_ioctl is called when the user wants to configure the
- * interface.
- *
- * return 0 on success, positive on failure
- **********************************************************************/
-
-static int
-ixlv_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
-{
- struct ixl_vsi *vsi = ifp->if_softc;
- struct ixlv_sc *sc = vsi->back;
- struct ifreq *ifr = (struct ifreq *)data;
-#if defined(INET) || defined(INET6)
- struct ifaddr *ifa = (struct ifaddr *)data;
- bool avoid_reset = FALSE;
-#endif
- int error = 0;
-
-
- switch (command) {
-
- case SIOCSIFADDR:
-#ifdef INET
- if (ifa->ifa_addr->sa_family == AF_INET)
- avoid_reset = TRUE;
-#endif
-#ifdef INET6
- if (ifa->ifa_addr->sa_family == AF_INET6)
- avoid_reset = TRUE;
-#endif
-#if defined(INET) || defined(INET6)
- /*
- ** Calling init results in link renegotiation,
- ** so we avoid doing it when possible.
- */
- if (avoid_reset) {
- ifp->if_flags |= IFF_UP;
- if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
- ixlv_init(vsi);
-#ifdef INET
- if (!(ifp->if_flags & IFF_NOARP))
- arp_ifinit(ifp, ifa);
-#endif
- } else
- error = ether_ioctl(ifp, command, data);
- break;
-#endif
- case SIOCSIFMTU:
- IOCTL_DBG_IF2(ifp, "SIOCSIFMTU (Set Interface MTU)");
- mtx_lock(&sc->mtx);
- if (ifr->ifr_mtu > IXL_MAX_FRAME -
- ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) {
- error = EINVAL;
- IOCTL_DBG_IF(ifp, "mtu too large");
- } else {
- IOCTL_DBG_IF2(ifp, "mtu: %lu -> %d", (u_long)ifp->if_mtu, ifr->ifr_mtu);
- // ERJ: Interestingly enough, these types don't match
- ifp->if_mtu = (u_long)ifr->ifr_mtu;
- vsi->max_frame_size =
- ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
- + ETHER_VLAN_ENCAP_LEN;
- if (ifp->if_drv_flags & IFF_DRV_RUNNING)
- ixlv_init_locked(sc);
- }
- mtx_unlock(&sc->mtx);
- break;
- case SIOCSIFFLAGS:
- IOCTL_DBG_IF2(ifp, "SIOCSIFFLAGS (Set Interface Flags)");
- mtx_lock(&sc->mtx);
- if (ifp->if_flags & IFF_UP) {
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
- ixlv_init_locked(sc);
- } else
- if (ifp->if_drv_flags & IFF_DRV_RUNNING)
- ixlv_stop(sc);
- sc->if_flags = ifp->if_flags;
- mtx_unlock(&sc->mtx);
- break;
- case SIOCADDMULTI:
- IOCTL_DBG_IF2(ifp, "SIOCADDMULTI");
- if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
- mtx_lock(&sc->mtx);
- ixlv_disable_intr(vsi);
- ixlv_add_multi(vsi);
- ixlv_enable_intr(vsi);
- mtx_unlock(&sc->mtx);
- }
- break;
- case SIOCDELMULTI:
- IOCTL_DBG_IF2(ifp, "SIOCDELMULTI");
- if (sc->init_state == IXLV_RUNNING) {
- mtx_lock(&sc->mtx);
- ixlv_disable_intr(vsi);
- ixlv_del_multi(vsi);
- ixlv_enable_intr(vsi);
- mtx_unlock(&sc->mtx);
- }
- break;
- case SIOCSIFMEDIA:
- case SIOCGIFMEDIA:
- IOCTL_DBG_IF2(ifp, "SIOCxIFMEDIA (Get/Set Interface Media)");
- error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
- break;
- case SIOCSIFCAP:
- {
- int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
- IOCTL_DBG_IF2(ifp, "SIOCSIFCAP (Set Capabilities)");
-
- ixlv_cap_txcsum_tso(vsi, ifp, mask);
-
- if (mask & IFCAP_RXCSUM)
- ifp->if_capenable ^= IFCAP_RXCSUM;
- if (mask & IFCAP_RXCSUM_IPV6)
- ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
- if (mask & IFCAP_LRO)
- ifp->if_capenable ^= IFCAP_LRO;
- if (mask & IFCAP_VLAN_HWTAGGING)
- ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
- if (mask & IFCAP_VLAN_HWFILTER)
- ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
- if (mask & IFCAP_VLAN_HWTSO)
- ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
- if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
- ixlv_init(vsi);
- }
- VLAN_CAPABILITIES(ifp);
-
- break;
- }
-
- default:
- IOCTL_DBG_IF2(ifp, "UNKNOWN (0x%X)", (int)command);
- error = ether_ioctl(ifp, command, data);
- break;
- }
-
- return (error);
-}
-
-/*
-** To do a reinit on the VF is unfortunately more complicated
-** than a physical device, we must have the PF more or less
-** completely recreate our memory, so many things that were
-** done only once at attach in traditional drivers now must be
-** redone at each reinitialization. This function does that
-** 'prelude' so we can then call the normal locked init code.
-*/
-int
-ixlv_reinit_locked(struct ixlv_sc *sc)
-{
- struct i40e_hw *hw = &sc->hw;
- struct ixl_vsi *vsi = &sc->vsi;
- struct ifnet *ifp = vsi->ifp;
- struct ixlv_mac_filter *mf, *mf_temp;
- struct ixlv_vlan_filter *vf;
- int error = 0;
-
- INIT_DBG_IF(ifp, "begin");
-
- if (ifp->if_drv_flags & IFF_DRV_RUNNING)
- ixlv_stop(sc);
-
- error = ixlv_reset(sc);
-
- INIT_DBG_IF(ifp, "VF was reset");
-
- /* set the state in case we went thru RESET */
- sc->init_state = IXLV_RUNNING;
-
- /*
- ** Resetting the VF drops all filters from hardware;
- ** we need to mark them to be re-added in init.
- */
- SLIST_FOREACH_SAFE(mf, sc->mac_filters, next, mf_temp) {
- if (mf->flags & IXL_FILTER_DEL) {
- SLIST_REMOVE(sc->mac_filters, mf,
- ixlv_mac_filter, next);
- free(mf, M_DEVBUF);
- } else
- mf->flags |= IXL_FILTER_ADD;
- }
- if (vsi->num_vlans != 0)
- SLIST_FOREACH(vf, sc->vlan_filters, next)
- vf->flags = IXL_FILTER_ADD;
- else { /* clean any stale filters */
- while (!SLIST_EMPTY(sc->vlan_filters)) {
- vf = SLIST_FIRST(sc->vlan_filters);
- SLIST_REMOVE_HEAD(sc->vlan_filters, next);
- free(vf, M_DEVBUF);
- }
- }
-
- ixlv_enable_adminq_irq(hw);
- ixl_vc_flush(&sc->vc_mgr);
-
- INIT_DBG_IF(ifp, "end");
- return (error);
-}
-
-static void
-ixl_init_cmd_complete(struct ixl_vc_cmd *cmd, void *arg,
- enum i40e_status_code code)
-{
- struct ixlv_sc *sc;
-
- sc = arg;
-
- /*
- * Ignore "Adapter Stopped" message as that happens if an ifconfig down
- * happens while a command is in progress, so we don't print an error
- * in that case.
- */
- if (code != I40E_SUCCESS && code != I40E_ERR_ADAPTER_STOPPED) {
- if_printf(sc->vsi.ifp,
- "Error %s waiting for PF to complete operation %d\n",
- i40e_stat_str(&sc->hw, code), cmd->request);
- }
-}
-
-static void
-ixlv_init_locked(struct ixlv_sc *sc)
-{
- struct i40e_hw *hw = &sc->hw;
- struct ixl_vsi *vsi = &sc->vsi;
- struct ixl_queue *que = vsi->queues;
- struct ifnet *ifp = vsi->ifp;
- int error = 0;
-
- INIT_DBG_IF(ifp, "begin");
-
- IXLV_CORE_LOCK_ASSERT(sc);
-
- /* Do a reinit first if an init has already been done */
- if ((sc->init_state == IXLV_RUNNING) ||
- (sc->init_state == IXLV_RESET_REQUIRED) ||
- (sc->init_state == IXLV_RESET_PENDING))
- error = ixlv_reinit_locked(sc);
- /* Don't bother with init if we failed reinit */
- if (error)
- goto init_done;
-
- /* Remove existing MAC filter if new MAC addr is set */
- if (bcmp(IF_LLADDR(ifp), hw->mac.addr, ETHER_ADDR_LEN) != 0) {
- error = ixlv_del_mac_filter(sc, hw->mac.addr);
- if (error == 0)
- ixl_vc_enqueue(&sc->vc_mgr, &sc->del_mac_cmd,
- IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete,
- sc);
- }
-
- /* Check for an LAA mac address... */
- bcopy(IF_LLADDR(ifp), hw->mac.addr, ETHER_ADDR_LEN);
-
- ifp->if_hwassist = 0;
- if (ifp->if_capenable & IFCAP_TSO)
- ifp->if_hwassist |= CSUM_TSO;
- if (ifp->if_capenable & IFCAP_TXCSUM)
- ifp->if_hwassist |= (CSUM_OFFLOAD_IPV4 & ~CSUM_IP);
- if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
- ifp->if_hwassist |= CSUM_OFFLOAD_IPV6;
-
- /* Add mac filter for this VF to PF */
- if (i40e_validate_mac_addr(hw->mac.addr) == I40E_SUCCESS) {
- error = ixlv_add_mac_filter(sc, hw->mac.addr, 0);
- if (!error || error == EEXIST)
- ixl_vc_enqueue(&sc->vc_mgr, &sc->add_mac_cmd,
- IXLV_FLAG_AQ_ADD_MAC_FILTER, ixl_init_cmd_complete,
- sc);
- }
-
- /* Setup vlan's if needed */
- ixlv_setup_vlan_filters(sc);
-
- /* Prepare the queues for operation */
- for (int i = 0; i < vsi->num_queues; i++, que++) {
- struct rx_ring *rxr = &que->rxr;
-
- ixl_init_tx_ring(que);
-
- if (vsi->max_frame_size <= MCLBYTES)
- rxr->mbuf_sz = MCLBYTES;
- else
- rxr->mbuf_sz = MJUMPAGESIZE;
- ixl_init_rx_ring(que);
- }
-
- /* Set initial ITR values */
- ixlv_configure_itr(sc);
-
- /* Configure queues */
- ixl_vc_enqueue(&sc->vc_mgr, &sc->config_queues_cmd,
- IXLV_FLAG_AQ_CONFIGURE_QUEUES, ixl_init_cmd_complete, sc);
-
- /* Set up RSS */
- ixlv_config_rss(sc);
-
- /* Map vectors */
- ixl_vc_enqueue(&sc->vc_mgr, &sc->map_vectors_cmd,
- IXLV_FLAG_AQ_MAP_VECTORS, ixl_init_cmd_complete, sc);
-
- /* Enable queues */
- ixl_vc_enqueue(&sc->vc_mgr, &sc->enable_queues_cmd,
- IXLV_FLAG_AQ_ENABLE_QUEUES, ixl_init_cmd_complete, sc);
-
- /* Start the local timer */
- callout_reset(&sc->timer, hz, ixlv_local_timer, sc);
-
- sc->init_state = IXLV_RUNNING;
-
-init_done:
- INIT_DBG_IF(ifp, "end");
- return;
-}
-
-/*
-** Init entry point for the stack
-*/
-void
-ixlv_init(void *arg)
-{
- struct ixl_vsi *vsi = (struct ixl_vsi *)arg;
- struct ixlv_sc *sc = vsi->back;
- int retries = 0;
-
- /* Prevent init from running again while waiting for AQ calls
- * made in init_locked() to complete. */
- mtx_lock(&sc->mtx);
- if (sc->init_in_progress) {
- mtx_unlock(&sc->mtx);
- return;
- } else
- sc->init_in_progress = true;
-
- ixlv_init_locked(sc);
- mtx_unlock(&sc->mtx);
-
- /* Wait for init_locked to finish */
- while (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING)
- && ++retries < IXLV_MAX_INIT_WAIT) {
- i40e_msec_pause(25);
- }
- if (retries >= IXLV_MAX_INIT_WAIT) {
- if_printf(vsi->ifp,
- "Init failed to complete in allotted time!\n");
- }
-
- mtx_lock(&sc->mtx);
- sc->init_in_progress = false;
- mtx_unlock(&sc->mtx);
-}
-
-/*
- * ixlv_attach() helper function; gathers information about
- * the (virtual) hardware for use elsewhere in the driver.
- */
-static void
-ixlv_init_hw(struct ixlv_sc *sc)
-{
- struct i40e_hw *hw = &sc->hw;
- device_t dev = sc->dev;
-
- /* Save off the information about this board */
- hw->vendor_id = pci_get_vendor(dev);
- hw->device_id = pci_get_device(dev);
- hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
- hw->subsystem_vendor_id =
- pci_read_config(dev, PCIR_SUBVEND_0, 2);
- hw->subsystem_device_id =
- pci_read_config(dev, PCIR_SUBDEV_0, 2);
-
- hw->bus.device = pci_get_slot(dev);
- hw->bus.func = pci_get_function(dev);
-}
-
-/*
- * ixlv_attach() helper function; initalizes the admin queue
- * and attempts to establish contact with the PF by
- * retrying the initial "API version" message several times
- * or until the PF responds.
- */
-static int
-ixlv_setup_vc(struct ixlv_sc *sc)
-{
- struct i40e_hw *hw = &sc->hw;
- device_t dev = sc->dev;
- int error = 0, ret_error = 0, asq_retries = 0;
- bool send_api_ver_retried = 0;
-
- /* Need to set these AQ paramters before initializing AQ */
- hw->aq.num_arq_entries = IXL_AQ_LEN;
- hw->aq.num_asq_entries = IXL_AQ_LEN;
- hw->aq.arq_buf_size = IXL_AQ_BUF_SZ;
- hw->aq.asq_buf_size = IXL_AQ_BUF_SZ;
-
- for (int i = 0; i < IXLV_AQ_MAX_ERR; i++) {
- /* Initialize admin queue */
- error = i40e_init_adminq(hw);
- if (error) {
- device_printf(dev, "%s: init_adminq failed: %d\n",
- __func__, error);
- ret_error = 1;
- continue;
- }
-
- INIT_DBG_DEV(dev, "Initialized Admin Queue; starting"
- " send_api_ver attempt %d", i+1);
-
-retry_send:
- /* Send VF's API version */
- error = ixlv_send_api_ver(sc);
- if (error) {
- i40e_shutdown_adminq(hw);
- ret_error = 2;
- device_printf(dev, "%s: unable to send api"
- " version to PF on attempt %d, error %d\n",
- __func__, i+1, error);
- }
-
- asq_retries = 0;
- while (!i40e_asq_done(hw)) {
- if (++asq_retries > IXLV_AQ_MAX_ERR) {
- i40e_shutdown_adminq(hw);
- device_printf(dev, "Admin Queue timeout "
- "(waiting for send_api_ver), %d more tries...\n",
- IXLV_AQ_MAX_ERR - (i + 1));
- ret_error = 3;
- break;
- }
- i40e_msec_pause(10);
- }
- if (asq_retries > IXLV_AQ_MAX_ERR)
- continue;
-
- INIT_DBG_DEV(dev, "Sent API version message to PF");
-
- /* Verify that the VF accepts the PF's API version */
- error = ixlv_verify_api_ver(sc);
- if (error == ETIMEDOUT) {
- if (!send_api_ver_retried) {
- /* Resend message, one more time */
- send_api_ver_retried++;
- device_printf(dev,
- "%s: Timeout while verifying API version on first"
- " try!\n", __func__);
- goto retry_send;
- } else {
- device_printf(dev,
- "%s: Timeout while verifying API version on second"
- " try!\n", __func__);
- ret_error = 4;
- break;
- }
- }
- if (error) {
- device_printf(dev,
- "%s: Unable to verify API version,"
- " error %s\n", __func__, i40e_stat_str(hw, error));
- ret_error = 5;
- }
- break;
- }
-
- if (ret_error >= 4)
- i40e_shutdown_adminq(hw);
- return (ret_error);
-}
-
-/*
- * ixlv_attach() helper function; asks the PF for this VF's
- * configuration, and saves the information if it receives it.
- */
-static int
-ixlv_vf_config(struct ixlv_sc *sc)
-{
- struct i40e_hw *hw = &sc->hw;
- device_t dev = sc->dev;
- int bufsz, error = 0, ret_error = 0;
- int asq_retries, retried = 0;
-
-retry_config:
- error = ixlv_send_vf_config_msg(sc);
- if (error) {
- device_printf(dev,
- "%s: Unable to send VF config request, attempt %d,"
- " error %d\n", __func__, retried + 1, error);
- ret_error = 2;
- }
-
- asq_retries = 0;
- while (!i40e_asq_done(hw)) {
- if (++asq_retries > IXLV_AQ_MAX_ERR) {
- device_printf(dev, "%s: Admin Queue timeout "
- "(waiting for send_vf_config_msg), attempt %d\n",
- __func__, retried + 1);
- ret_error = 3;
- goto fail;
- }
- i40e_msec_pause(10);
- }
-
- INIT_DBG_DEV(dev, "Sent VF config message to PF, attempt %d",
- retried + 1);
-
- if (!sc->vf_res) {
- bufsz = sizeof(struct i40e_virtchnl_vf_resource) +
- (I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource));
- sc->vf_res = malloc(bufsz, M_DEVBUF, M_NOWAIT);
- if (!sc->vf_res) {
- device_printf(dev,
- "%s: Unable to allocate memory for VF configuration"
- " message from PF on attempt %d\n", __func__, retried + 1);
- ret_error = 1;
- goto fail;
- }
- }
-
- /* Check for VF config response */
- error = ixlv_get_vf_config(sc);
- if (error == ETIMEDOUT) {
- /* The 1st time we timeout, send the configuration message again */
- if (!retried) {
- retried++;
- goto retry_config;
- }
- device_printf(dev,
- "%s: ixlv_get_vf_config() timed out waiting for a response\n",
- __func__);
- }
- if (error) {
- device_printf(dev,
- "%s: Unable to get VF configuration from PF after %d tries!\n",
- __func__, retried + 1);
- ret_error = 4;
- }
- goto done;
-
-fail:
- free(sc->vf_res, M_DEVBUF);
-done:
- return (ret_error);
-}
-
-/*
- * Allocate MSI/X vectors, setup the AQ vector early
- */
-static int
-ixlv_init_msix(struct ixlv_sc *sc)
-{
- device_t dev = sc->dev;
- int rid, want, vectors, queues, available;
- int auto_max_queues;
-
- rid = PCIR_BAR(IXL_MSIX_BAR);
- sc->msix_mem = bus_alloc_resource_any(dev,
- SYS_RES_MEMORY, &rid, RF_ACTIVE);
- if (!sc->msix_mem) {
- /* May not be enabled */
- device_printf(sc->dev,
- "Unable to map MSIX table\n");
- goto fail;
- }
-
- available = pci_msix_count(dev);
- if (available == 0) { /* system has msix disabled */
- bus_release_resource(dev, SYS_RES_MEMORY,
- rid, sc->msix_mem);
- sc->msix_mem = NULL;
- goto fail;
- }
-
- /* Clamp queues to number of CPUs and # of MSI-X vectors available */
- auto_max_queues = min(mp_ncpus, available - 1);
- /* Clamp queues to # assigned to VF by PF */
- auto_max_queues = min(auto_max_queues, sc->vf_res->num_queue_pairs);
-
- /* Override with tunable value if tunable is less than autoconfig count */
- if ((ixlv_max_queues != 0) && (ixlv_max_queues <= auto_max_queues))
- queues = ixlv_max_queues;
- /* Use autoconfig amount if that's lower */
- else if ((ixlv_max_queues != 0) && (ixlv_max_queues > auto_max_queues)) {
- device_printf(dev, "ixlv_max_queues (%d) is too large, using "
- "autoconfig amount (%d)...\n",
- ixlv_max_queues, auto_max_queues);
- queues = auto_max_queues;
- }
- /* Limit maximum auto-configured queues to 8 if no user value is set */
- else
- queues = min(auto_max_queues, 8);
-
-#ifdef RSS
- /* If we're doing RSS, clamp at the number of RSS buckets */
- if (queues > rss_getnumbuckets())
- queues = rss_getnumbuckets();
-#endif
-
- /*
- ** Want one vector (RX/TX pair) per queue
- ** plus an additional for the admin queue.
- */
- want = queues + 1;
- if (want <= available) /* Have enough */
- vectors = want;
- else {
- device_printf(sc->dev,
- "MSIX Configuration Problem, "
- "%d vectors available but %d wanted!\n",
- available, want);
- goto fail;
- }
-
-#ifdef RSS
- /*
- * If we're doing RSS, the number of queues needs to
- * match the number of RSS buckets that are configured.
- *
- * + If there's more queues than RSS buckets, we'll end
- * up with queues that get no traffic.
- *
- * + If there's more RSS buckets than queues, we'll end
- * up having multiple RSS buckets map to the same queue,
- * so there'll be some contention.
- */
- if (queues != rss_getnumbuckets()) {
- device_printf(dev,
- "%s: queues (%d) != RSS buckets (%d)"
- "; performance will be impacted.\n",
- __func__, queues, rss_getnumbuckets());
- }
-#endif
-
- if (pci_alloc_msix(dev, &vectors) == 0) {
- device_printf(sc->dev,
- "Using MSIX interrupts with %d vectors\n", vectors);
- sc->msix = vectors;
- sc->vsi.num_queues = queues;
- }
-
- /* Next we need to setup the vector for the Admin Queue */
- rid = 1; /* zero vector + 1 */
- sc->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
- &rid, RF_SHAREABLE | RF_ACTIVE);
- if (sc->res == NULL) {
- device_printf(dev, "Unable to allocate"
- " bus resource: AQ interrupt \n");
- goto fail;
- }
- if (bus_setup_intr(dev, sc->res,
- INTR_TYPE_NET | INTR_MPSAFE, NULL,
- ixlv_msix_adminq, sc, &sc->tag)) {
- sc->res = NULL;
- device_printf(dev, "Failed to register AQ handler");
- goto fail;
- }
- bus_describe_intr(dev, sc->res, sc->tag, "adminq");
-
- return (vectors);
-
-fail:
- /* The VF driver MUST use MSIX */
- return (0);
-}
-
-static int
-ixlv_allocate_pci_resources(struct ixlv_sc *sc)
-{
- int rid;
- device_t dev = sc->dev;
-
- rid = PCIR_BAR(0);
- sc->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
- &rid, RF_ACTIVE);
-
- if (!(sc->pci_mem)) {
- device_printf(dev, "Unable to allocate bus resource: memory\n");
- return (ENXIO);
- }
-
- sc->osdep.mem_bus_space_tag =
- rman_get_bustag(sc->pci_mem);
- sc->osdep.mem_bus_space_handle =
- rman_get_bushandle(sc->pci_mem);
- sc->osdep.mem_bus_space_size = rman_get_size(sc->pci_mem);
- sc->osdep.flush_reg = I40E_VFGEN_RSTAT;
- sc->hw.hw_addr = (u8 *) &sc->osdep.mem_bus_space_handle;
-
- sc->hw.back = &sc->osdep;
-
- /*
- ** Explicitly set the guest PCI BUSMASTER capability
- ** and we must rewrite the ENABLE in the MSIX control
- ** register again at this point to cause the host to
- ** successfully initialize us.
- **
- ** This must be set before accessing any registers.
- */
- {
- u16 pci_cmd_word;
- int msix_ctrl;
- pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
- pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
- pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
- pci_find_cap(dev, PCIY_MSIX, &rid);
- rid += PCIR_MSIX_CTRL;
- msix_ctrl = pci_read_config(dev, rid, 2);
- msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
- pci_write_config(dev, rid, msix_ctrl, 2);
- }
-
- /* Disable adminq interrupts (just in case) */
- ixlv_disable_adminq_irq(&sc->hw);
-
- return (0);
-}
-
-static void
-ixlv_free_pci_resources(struct ixlv_sc *sc)
-{
- struct ixl_vsi *vsi = &sc->vsi;
- struct ixl_queue *que = vsi->queues;
- device_t dev = sc->dev;
-
- /* We may get here before stations are setup */
- if (que == NULL)
- goto early;
-
- /*
- ** Release all msix queue resources:
- */
- for (int i = 0; i < vsi->num_queues; i++, que++) {
- int rid = que->msix + 1;
- if (que->tag != NULL) {
- bus_teardown_intr(dev, que->res, que->tag);
- que->tag = NULL;
- }
- if (que->res != NULL) {
- bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
- que->res = NULL;
- }
- }
-
-early:
- pci_release_msi(dev);
-
- if (sc->msix_mem != NULL)
- bus_release_resource(dev, SYS_RES_MEMORY,
- PCIR_BAR(IXL_MSIX_BAR), sc->msix_mem);
-
- if (sc->pci_mem != NULL)
- bus_release_resource(dev, SYS_RES_MEMORY,
- PCIR_BAR(0), sc->pci_mem);
-}
-
-/*
- * Create taskqueue and tasklet for Admin Queue interrupts.
- */
-static int
-ixlv_init_taskqueue(struct ixlv_sc *sc)
-{
- int error = 0;
-
- TASK_INIT(&sc->aq_irq, 0, ixlv_do_adminq, sc);
-
- sc->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
- taskqueue_thread_enqueue, &sc->tq);
- taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s sc->tq",
- device_get_nameunit(sc->dev));
-
- return (error);
-}
-
-/*********************************************************************
- *
- * Setup MSIX Interrupt resources and handlers for the VSI queues
- *
- **********************************************************************/
-static int
-ixlv_assign_msix(struct ixlv_sc *sc)
-{
- device_t dev = sc->dev;
- struct ixl_vsi *vsi = &sc->vsi;
- struct ixl_queue *que = vsi->queues;
- struct tx_ring *txr;
- int error, rid, vector = 1;
-#ifdef RSS
- cpuset_t cpu_mask;
-#endif
-
- for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
- int cpu_id = i;
- rid = vector + 1;
- txr = &que->txr;
- que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
- RF_SHAREABLE | RF_ACTIVE);
- if (que->res == NULL) {
- device_printf(dev,"Unable to allocate"
- " bus resource: que interrupt [%d]\n", vector);
- return (ENXIO);
- }
- /* Set the handler function */
- error = bus_setup_intr(dev, que->res,
- INTR_TYPE_NET | INTR_MPSAFE, NULL,
- ixlv_msix_que, que, &que->tag);
- if (error) {
- que->res = NULL;
- device_printf(dev, "Failed to register que handler");
- return (error);
- }
- bus_describe_intr(dev, que->res, que->tag, "que %d", i);
- /* Bind the vector to a CPU */
-#ifdef RSS
- cpu_id = rss_getcpu(i % rss_getnumbuckets());
-#endif
- bus_bind_intr(dev, que->res, cpu_id);
- que->msix = vector;
- TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
- TASK_INIT(&que->task, 0, ixlv_handle_que, que);
- que->tq = taskqueue_create_fast("ixlv_que", M_NOWAIT,
- taskqueue_thread_enqueue, &que->tq);
-#ifdef RSS
- CPU_SETOF(cpu_id, &cpu_mask);
- taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
- &cpu_mask, "%s (bucket %d)",
- device_get_nameunit(dev), cpu_id);
+#ifdef IFLIB
+#include <dev/ixl/iflib_if_ixlv.c>
#else
- taskqueue_start_threads(&que->tq, 1, PI_NET,
- "%s que", device_get_nameunit(dev));
+#include <dev/ixl/legacy_if_ixlv.c>
#endif
-
- }
-
- return (0);
-}
-
-/*
-** Requests a VF reset from the PF.
-**
-** Requires the VF's Admin Queue to be initialized.
-*/
-static int
-ixlv_reset(struct ixlv_sc *sc)
-{
- struct i40e_hw *hw = &sc->hw;
- device_t dev = sc->dev;
- int error = 0;
-
- /* Ask the PF to reset us if we are initiating */
- if (sc->init_state != IXLV_RESET_PENDING)
- ixlv_request_reset(sc);
-
- i40e_msec_pause(100);
- error = ixlv_reset_complete(hw);
- if (error) {
- device_printf(dev, "%s: VF reset failed\n",
- __func__);
- return (error);
- }
-
- error = i40e_shutdown_adminq(hw);
- if (error) {
- device_printf(dev, "%s: shutdown_adminq failed: %d\n",
- __func__, error);
- return (error);
- }
-
- error = i40e_init_adminq(hw);
- if (error) {
- device_printf(dev, "%s: init_adminq failed: %d\n",
- __func__, error);
- return(error);
- }
-
- return (0);
-}
-
-static int
-ixlv_reset_complete(struct i40e_hw *hw)
-{
- u32 reg;
-
- /* Wait up to ~10 seconds */
- for (int i = 0; i < 100; i++) {
- reg = rd32(hw, I40E_VFGEN_RSTAT) &
- I40E_VFGEN_RSTAT_VFR_STATE_MASK;
-
- if ((reg == I40E_VFR_VFACTIVE) ||
- (reg == I40E_VFR_COMPLETED))
- return (0);
- i40e_msec_pause(100);
- }
-
- return (EBUSY);
-}
-
-
-/*********************************************************************
- *
- * Setup networking device structure and register an interface.
- *
- **********************************************************************/
-static int
-ixlv_setup_interface(device_t dev, struct ixlv_sc *sc)
-{
- struct ifnet *ifp;
- struct ixl_vsi *vsi = &sc->vsi;
- struct ixl_queue *que = vsi->queues;
-
- INIT_DBG_DEV(dev, "begin");
-
- ifp = vsi->ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(dev, "%s: could not allocate ifnet"
- " structure!\n", __func__);
- return (-1);
- }
-
- if_initname(ifp, device_get_name(dev), device_get_unit(dev));
-
- ifp->if_mtu = ETHERMTU;
- ifp->if_baudrate = IF_Gbps(40);
- ifp->if_init = ixlv_init;
- ifp->if_softc = vsi;
- ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
- ifp->if_ioctl = ixlv_ioctl;
-
-#if __FreeBSD_version >= 1100000
- if_setgetcounterfn(ifp, ixl_get_counter);
-#endif
-
- ifp->if_transmit = ixl_mq_start;
-
- ifp->if_qflush = ixl_qflush;
- ifp->if_snd.ifq_maxlen = que->num_desc - 2;
-
- ether_ifattach(ifp, sc->hw.mac.addr);
-
- vsi->max_frame_size =
- ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
- + ETHER_VLAN_ENCAP_LEN;
-
- /*
- * Tell the upper layer(s) we support long frames.
- */
- ifp->if_hdrlen = sizeof(struct ether_vlan_header);
-
- ifp->if_capabilities |= IFCAP_HWCSUM;
- ifp->if_capabilities |= IFCAP_HWCSUM_IPV6;
- ifp->if_capabilities |= IFCAP_TSO;
- ifp->if_capabilities |= IFCAP_JUMBO_MTU;
-
- ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
- | IFCAP_VLAN_HWTSO
- | IFCAP_VLAN_MTU
- | IFCAP_VLAN_HWCSUM
- | IFCAP_LRO;
- ifp->if_capenable = ifp->if_capabilities;
-
- /*
- ** Don't turn this on by default, if vlans are
- ** created on another pseudo device (eg. lagg)
- ** then vlan events are not passed thru, breaking
- ** operation, but with HW FILTER off it works. If
- ** using vlans directly on the ixl driver you can
- ** enable this and get full hardware tag filtering.
- */
- ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
-
- /*
- * Specify the media types supported by this adapter and register
- * callbacks to update media and link information
- */
- ifmedia_init(&sc->media, IFM_IMASK, ixlv_media_change,
- ixlv_media_status);
-
- // JFV Add media types later?
-
- ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
- ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
-
- INIT_DBG_DEV(dev, "end");
- return (0);
-}
-
-/*
-** Allocate and setup the interface queues
-*/
-static int
-ixlv_setup_queues(struct ixlv_sc *sc)
-{
- device_t dev = sc->dev;
- struct ixl_vsi *vsi;
- struct ixl_queue *que;
- struct tx_ring *txr;
- struct rx_ring *rxr;
- int rsize, tsize;
- int error = I40E_SUCCESS;
-
- vsi = &sc->vsi;
- vsi->back = (void *)sc;
- vsi->hw = &sc->hw;
- vsi->num_vlans = 0;
-
- /* Get memory for the station queues */
- if (!(vsi->queues =
- (struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
- vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
- device_printf(dev, "Unable to allocate queue memory\n");
- error = ENOMEM;
- goto early;
- }
-
- for (int i = 0; i < vsi->num_queues; i++) {
- que = &vsi->queues[i];
- que->num_desc = ixlv_ringsz;
- que->me = i;
- que->vsi = vsi;
-
- txr = &que->txr;
- txr->que = que;
- txr->tail = I40E_QTX_TAIL1(que->me);
- /* Initialize the TX lock */
- snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
- device_get_nameunit(dev), que->me);
- mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
- /*
- ** Create the TX descriptor ring, the extra int is
- ** added as the location for HEAD WB.
- */
- tsize = roundup2((que->num_desc *
- sizeof(struct i40e_tx_desc)) +
- sizeof(u32), DBA_ALIGN);
- if (i40e_allocate_dma_mem(&sc->hw,
- &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
- device_printf(dev,
- "Unable to allocate TX Descriptor memory\n");
- error = ENOMEM;
- goto fail;
- }
- txr->base = (struct i40e_tx_desc *)txr->dma.va;
- bzero((void *)txr->base, tsize);
- /* Now allocate transmit soft structs for the ring */
- if (ixl_allocate_tx_data(que)) {
- device_printf(dev,
- "Critical Failure setting up TX structures\n");
- error = ENOMEM;
- goto fail;
- }
- /* Allocate a buf ring */
- txr->br = buf_ring_alloc(ixlv_txbrsz, M_DEVBUF,
- M_WAITOK, &txr->mtx);
- if (txr->br == NULL) {
- device_printf(dev,
- "Critical Failure setting up TX buf ring\n");
- error = ENOMEM;
- goto fail;
- }
-
- /*
- * Next the RX queues...
- */
- rsize = roundup2(que->num_desc *
- sizeof(union i40e_rx_desc), DBA_ALIGN);
- rxr = &que->rxr;
- rxr->que = que;
- rxr->tail = I40E_QRX_TAIL1(que->me);
-
- /* Initialize the RX side lock */
- snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
- device_get_nameunit(dev), que->me);
- mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
-
- if (i40e_allocate_dma_mem(&sc->hw,
- &rxr->dma, i40e_mem_reserved, rsize, 4096)) { //JFV - should this be DBA?
- device_printf(dev,
- "Unable to allocate RX Descriptor memory\n");
- error = ENOMEM;
- goto fail;
- }
- rxr->base = (union i40e_rx_desc *)rxr->dma.va;
- bzero((void *)rxr->base, rsize);
-
- /* Allocate receive soft structs for the ring */
- if (ixl_allocate_rx_data(que)) {
- device_printf(dev,
- "Critical Failure setting up receive structs\n");
- error = ENOMEM;
- goto fail;
- }
- }
-
- return (0);
-
-fail:
- for (int i = 0; i < vsi->num_queues; i++) {
- que = &vsi->queues[i];
- rxr = &que->rxr;
- txr = &que->txr;
- if (rxr->base)
- i40e_free_dma_mem(&sc->hw, &rxr->dma);
- if (txr->base)
- i40e_free_dma_mem(&sc->hw, &txr->dma);
- }
- free(vsi->queues, M_DEVBUF);
-
-early:
- return (error);
-}
-
-/*
-** This routine is run via an vlan config EVENT,
-** it enables us to use the HW Filter table since
-** we can get the vlan id. This just creates the
-** entry in the soft version of the VFTA, init will
-** repopulate the real table.
-*/
-static void
-ixlv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
-{
- struct ixl_vsi *vsi = arg;
- struct ixlv_sc *sc = vsi->back;
- struct ixlv_vlan_filter *v;
-
-
- if (ifp->if_softc != arg) /* Not our event */
- return;
-
- if ((vtag == 0) || (vtag > 4095)) /* Invalid */
- return;
-
- /* Sanity check - make sure it doesn't already exist */
- SLIST_FOREACH(v, sc->vlan_filters, next) {
- if (v->vlan == vtag)
- return;
- }
-
- mtx_lock(&sc->mtx);
- ++vsi->num_vlans;
- v = malloc(sizeof(struct ixlv_vlan_filter), M_DEVBUF, M_NOWAIT | M_ZERO);
- SLIST_INSERT_HEAD(sc->vlan_filters, v, next);
- v->vlan = vtag;
- v->flags = IXL_FILTER_ADD;
- ixl_vc_enqueue(&sc->vc_mgr, &sc->add_vlan_cmd,
- IXLV_FLAG_AQ_ADD_VLAN_FILTER, ixl_init_cmd_complete, sc);
- mtx_unlock(&sc->mtx);
- return;
-}
-
-/*
-** This routine is run via an vlan
-** unconfig EVENT, remove our entry
-** in the soft vfta.
-*/
-static void
-ixlv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
-{
- struct ixl_vsi *vsi = arg;
- struct ixlv_sc *sc = vsi->back;
- struct ixlv_vlan_filter *v;
- int i = 0;
-
- if (ifp->if_softc != arg)
- return;
-
- if ((vtag == 0) || (vtag > 4095)) /* Invalid */
- return;
-
- mtx_lock(&sc->mtx);
- SLIST_FOREACH(v, sc->vlan_filters, next) {
- if (v->vlan == vtag) {
- v->flags = IXL_FILTER_DEL;
- ++i;
- --vsi->num_vlans;
- }
- }
- if (i)
- ixl_vc_enqueue(&sc->vc_mgr, &sc->del_vlan_cmd,
- IXLV_FLAG_AQ_DEL_VLAN_FILTER, ixl_init_cmd_complete, sc);
- mtx_unlock(&sc->mtx);
- return;
-}
-
-/*
-** Get a new filter and add it to the mac filter list.
-*/
-static struct ixlv_mac_filter *
-ixlv_get_mac_filter(struct ixlv_sc *sc)
-{
- struct ixlv_mac_filter *f;
-
- f = malloc(sizeof(struct ixlv_mac_filter),
- M_DEVBUF, M_NOWAIT | M_ZERO);
- if (f)
- SLIST_INSERT_HEAD(sc->mac_filters, f, next);
-
- return (f);
-}
-
-/*
-** Find the filter with matching MAC address
-*/
-static struct ixlv_mac_filter *
-ixlv_find_mac_filter(struct ixlv_sc *sc, u8 *macaddr)
-{
- struct ixlv_mac_filter *f;
- bool match = FALSE;
-
- SLIST_FOREACH(f, sc->mac_filters, next) {
- if (cmp_etheraddr(f->macaddr, macaddr)) {
- match = TRUE;
- break;
- }
- }
-
- if (!match)
- f = NULL;
- return (f);
-}
-
-static int
-ixlv_teardown_adminq_msix(struct ixlv_sc *sc)
-{
- device_t dev = sc->dev;
- int error = 0;
-
- if (sc->tag != NULL) {
- bus_teardown_intr(dev, sc->res, sc->tag);
- if (error) {
- device_printf(dev, "bus_teardown_intr() for"
- " interrupt 0 failed\n");
- // return (ENXIO);
- }
- sc->tag = NULL;
- }
- if (sc->res != NULL) {
- bus_release_resource(dev, SYS_RES_IRQ, 1, sc->res);
- if (error) {
- device_printf(dev, "bus_release_resource() for"
- " interrupt 0 failed\n");
- // return (ENXIO);
- }
- sc->res = NULL;
- }
-
- return (0);
-
-}
-
-/*
-** Admin Queue interrupt handler
-*/
-static void
-ixlv_msix_adminq(void *arg)
-{
- struct ixlv_sc *sc = arg;
- struct i40e_hw *hw = &sc->hw;
- u32 reg, mask;
-
- reg = rd32(hw, I40E_VFINT_ICR01);
- mask = rd32(hw, I40E_VFINT_ICR0_ENA1);
-
- reg = rd32(hw, I40E_VFINT_DYN_CTL01);
- reg |= I40E_VFINT_DYN_CTL01_CLEARPBA_MASK;
- wr32(hw, I40E_VFINT_DYN_CTL01, reg);
-
- /* schedule task */
- taskqueue_enqueue(sc->tq, &sc->aq_irq);
- return;
-}
-
-void
-ixlv_enable_intr(struct ixl_vsi *vsi)
-{
- struct i40e_hw *hw = vsi->hw;
- struct ixl_queue *que = vsi->queues;
-
- ixlv_enable_adminq_irq(hw);
- for (int i = 0; i < vsi->num_queues; i++, que++)
- ixlv_enable_queue_irq(hw, que->me);
-}
-
-void
-ixlv_disable_intr(struct ixl_vsi *vsi)
-{
- struct i40e_hw *hw = vsi->hw;
- struct ixl_queue *que = vsi->queues;
-
- ixlv_disable_adminq_irq(hw);
- for (int i = 0; i < vsi->num_queues; i++, que++)
- ixlv_disable_queue_irq(hw, que->me);
-}
-
-
-static void
-ixlv_disable_adminq_irq(struct i40e_hw *hw)
-{
- wr32(hw, I40E_VFINT_DYN_CTL01, 0);
- wr32(hw, I40E_VFINT_ICR0_ENA1, 0);
- /* flush */
- rd32(hw, I40E_VFGEN_RSTAT);
- return;
-}
-
-static void
-ixlv_enable_adminq_irq(struct i40e_hw *hw)
-{
- wr32(hw, I40E_VFINT_DYN_CTL01,
- I40E_VFINT_DYN_CTL01_INTENA_MASK |
- I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
- wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA1_ADMINQ_MASK);
- /* flush */
- rd32(hw, I40E_VFGEN_RSTAT);
- return;
-}
-
-static void
-ixlv_enable_queue_irq(struct i40e_hw *hw, int id)
-{
- u32 reg;
-
- reg = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
- I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK |
- I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK;
- wr32(hw, I40E_VFINT_DYN_CTLN1(id), reg);
-}
-
-static void
-ixlv_disable_queue_irq(struct i40e_hw *hw, int id)
-{
- wr32(hw, I40E_VFINT_DYN_CTLN1(id),
- I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK);
- rd32(hw, I40E_VFGEN_RSTAT);
- return;
-}
-
-/*
- * Get initial ITR values from tunable values.
- */
-static void
-ixlv_configure_itr(struct ixlv_sc *sc)
-{
- struct i40e_hw *hw = &sc->hw;
- struct ixl_vsi *vsi = &sc->vsi;
- struct ixl_queue *que = vsi->queues;
-
- vsi->rx_itr_setting = ixlv_rx_itr;
- vsi->tx_itr_setting = ixlv_tx_itr;
-
- for (int i = 0; i < vsi->num_queues; i++, que++) {
- struct tx_ring *txr = &que->txr;
- struct rx_ring *rxr = &que->rxr;
-
- wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR, i),
- vsi->rx_itr_setting);
- rxr->itr = vsi->rx_itr_setting;
- rxr->latency = IXL_AVE_LATENCY;
-
- wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR, i),
- vsi->tx_itr_setting);
- txr->itr = vsi->tx_itr_setting;
- txr->latency = IXL_AVE_LATENCY;
- }
-}
-
-/*
-** Provide a update to the queue RX
-** interrupt moderation value.
-*/
-static void
-ixlv_set_queue_rx_itr(struct ixl_queue *que)
-{
- struct ixl_vsi *vsi = que->vsi;
- struct i40e_hw *hw = vsi->hw;
- struct rx_ring *rxr = &que->rxr;
- u16 rx_itr;
- u16 rx_latency = 0;
- int rx_bytes;
-
-
- /* Idle, do nothing */
- if (rxr->bytes == 0)
- return;
-
- if (ixlv_dynamic_rx_itr) {
- rx_bytes = rxr->bytes/rxr->itr;
- rx_itr = rxr->itr;
-
- /* Adjust latency range */
- switch (rxr->latency) {
- case IXL_LOW_LATENCY:
- if (rx_bytes > 10) {
- rx_latency = IXL_AVE_LATENCY;
- rx_itr = IXL_ITR_20K;
- }
- break;
- case IXL_AVE_LATENCY:
- if (rx_bytes > 20) {
- rx_latency = IXL_BULK_LATENCY;
- rx_itr = IXL_ITR_8K;
- } else if (rx_bytes <= 10) {
- rx_latency = IXL_LOW_LATENCY;
- rx_itr = IXL_ITR_100K;
- }
- break;
- case IXL_BULK_LATENCY:
- if (rx_bytes <= 20) {
- rx_latency = IXL_AVE_LATENCY;
- rx_itr = IXL_ITR_20K;
- }
- break;
- }
-
- rxr->latency = rx_latency;
-
- if (rx_itr != rxr->itr) {
- /* do an exponential smoothing */
- rx_itr = (10 * rx_itr * rxr->itr) /
- ((9 * rx_itr) + rxr->itr);
- rxr->itr = min(rx_itr, IXL_MAX_ITR);
- wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR,
- que->me), rxr->itr);
- }
- } else { /* We may have have toggled to non-dynamic */
- if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
- vsi->rx_itr_setting = ixlv_rx_itr;
- /* Update the hardware if needed */
- if (rxr->itr != vsi->rx_itr_setting) {
- rxr->itr = vsi->rx_itr_setting;
- wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR,
- que->me), rxr->itr);
- }
- }
- rxr->bytes = 0;
- rxr->packets = 0;
- return;
-}
-
-
-/*
-** Provide a update to the queue TX
-** interrupt moderation value.
-*/
-static void
-ixlv_set_queue_tx_itr(struct ixl_queue *que)
-{
- struct ixl_vsi *vsi = que->vsi;
- struct i40e_hw *hw = vsi->hw;
- struct tx_ring *txr = &que->txr;
- u16 tx_itr;
- u16 tx_latency = 0;
- int tx_bytes;
-
-
- /* Idle, do nothing */
- if (txr->bytes == 0)
- return;
-
- if (ixlv_dynamic_tx_itr) {
- tx_bytes = txr->bytes/txr->itr;
- tx_itr = txr->itr;
-
- switch (txr->latency) {
- case IXL_LOW_LATENCY:
- if (tx_bytes > 10) {
- tx_latency = IXL_AVE_LATENCY;
- tx_itr = IXL_ITR_20K;
- }
- break;
- case IXL_AVE_LATENCY:
- if (tx_bytes > 20) {
- tx_latency = IXL_BULK_LATENCY;
- tx_itr = IXL_ITR_8K;
- } else if (tx_bytes <= 10) {
- tx_latency = IXL_LOW_LATENCY;
- tx_itr = IXL_ITR_100K;
- }
- break;
- case IXL_BULK_LATENCY:
- if (tx_bytes <= 20) {
- tx_latency = IXL_AVE_LATENCY;
- tx_itr = IXL_ITR_20K;
- }
- break;
- }
-
- txr->latency = tx_latency;
-
- if (tx_itr != txr->itr) {
- /* do an exponential smoothing */
- tx_itr = (10 * tx_itr * txr->itr) /
- ((9 * tx_itr) + txr->itr);
- txr->itr = min(tx_itr, IXL_MAX_ITR);
- wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR,
- que->me), txr->itr);
- }
-
- } else { /* We may have have toggled to non-dynamic */
- if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
- vsi->tx_itr_setting = ixlv_tx_itr;
- /* Update the hardware if needed */
- if (txr->itr != vsi->tx_itr_setting) {
- txr->itr = vsi->tx_itr_setting;
- wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR,
- que->me), txr->itr);
- }
- }
- txr->bytes = 0;
- txr->packets = 0;
- return;
-}
-
-
-/*
-**
-** MSIX Interrupt Handlers and Tasklets
-**
-*/
-static void
-ixlv_handle_que(void *context, int pending)
-{
- struct ixl_queue *que = context;
- struct ixl_vsi *vsi = que->vsi;
- struct i40e_hw *hw = vsi->hw;
- struct tx_ring *txr = &que->txr;
- struct ifnet *ifp = vsi->ifp;
- bool more;
-
- if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
- more = ixl_rxeof(que, IXL_RX_LIMIT);
- mtx_lock(&txr->mtx);
- ixl_txeof(que);
- if (!drbr_empty(ifp, txr->br))
- ixl_mq_start_locked(ifp, txr);
- mtx_unlock(&txr->mtx);
- if (more) {
- taskqueue_enqueue(que->tq, &que->task);
- return;
- }
- }
-
- /* Reenable this interrupt - hmmm */
- ixlv_enable_queue_irq(hw, que->me);
- return;
-}
-
-
-/*********************************************************************
- *
- * MSIX Queue Interrupt Service routine
- *
- **********************************************************************/
-static void
-ixlv_msix_que(void *arg)
-{
- struct ixl_queue *que = arg;
- struct ixl_vsi *vsi = que->vsi;
- struct i40e_hw *hw = vsi->hw;
- struct tx_ring *txr = &que->txr;
- bool more_tx, more_rx;
-
- /* Spurious interrupts are ignored */
- if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING))
- return;
-
- ++que->irqs;
-
- more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
-
- mtx_lock(&txr->mtx);
- more_tx = ixl_txeof(que);
- /*
- ** Make certain that if the stack
- ** has anything queued the task gets
- ** scheduled to handle it.
- */
- if (!drbr_empty(vsi->ifp, txr->br))
- more_tx = 1;
- mtx_unlock(&txr->mtx);
-
- ixlv_set_queue_rx_itr(que);
- ixlv_set_queue_tx_itr(que);
-
- if (more_tx || more_rx)
- taskqueue_enqueue(que->tq, &que->task);
- else
- ixlv_enable_queue_irq(hw, que->me);
-
- return;
-}
-
-
-/*********************************************************************
- *
- * Media Ioctl callback
- *
- * This routine is called whenever the user queries the status of
- * the interface using ifconfig.
- *
- **********************************************************************/
-static void
-ixlv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
-{
- struct ixl_vsi *vsi = ifp->if_softc;
- struct ixlv_sc *sc = vsi->back;
-
- INIT_DBG_IF(ifp, "begin");
-
- mtx_lock(&sc->mtx);
-
- ixlv_update_link_status(sc);
-
- ifmr->ifm_status = IFM_AVALID;
- ifmr->ifm_active = IFM_ETHER;
-
- if (!sc->link_up) {
- mtx_unlock(&sc->mtx);
- INIT_DBG_IF(ifp, "end: link not up");
- return;
- }
-
- ifmr->ifm_status |= IFM_ACTIVE;
- /* Hardware is always full-duplex */
- ifmr->ifm_active |= IFM_FDX;
- mtx_unlock(&sc->mtx);
- INIT_DBG_IF(ifp, "end");
- return;
-}
-
-/*********************************************************************
- *
- * Media Ioctl callback
- *
- * This routine is called when the user changes speed/duplex using
- * media/mediopt option with ifconfig.
- *
- **********************************************************************/
-static int
-ixlv_media_change(struct ifnet * ifp)
-{
- struct ixl_vsi *vsi = ifp->if_softc;
- struct ifmedia *ifm = &vsi->media;
-
- INIT_DBG_IF(ifp, "begin");
-
- if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
- return (EINVAL);
-
- INIT_DBG_IF(ifp, "end");
- return (0);
-}
-
-
-/*********************************************************************
- * Multicast Initialization
- *
- * This routine is called by init to reset a fresh state.
- *
- **********************************************************************/
-
-static void
-ixlv_init_multi(struct ixl_vsi *vsi)
-{
- struct ixlv_mac_filter *f;
- struct ixlv_sc *sc = vsi->back;
- int mcnt = 0;
-
- IOCTL_DBG_IF(vsi->ifp, "begin");
-
- /* First clear any multicast filters */
- SLIST_FOREACH(f, sc->mac_filters, next) {
- if ((f->flags & IXL_FILTER_USED)
- && (f->flags & IXL_FILTER_MC)) {
- f->flags |= IXL_FILTER_DEL;
- mcnt++;
- }
- }
- if (mcnt > 0)
- ixl_vc_enqueue(&sc->vc_mgr, &sc->del_multi_cmd,
- IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete,
- sc);
-
- IOCTL_DBG_IF(vsi->ifp, "end");
-}
-
-static void
-ixlv_add_multi(struct ixl_vsi *vsi)
-{
- struct ifmultiaddr *ifma;
- struct ifnet *ifp = vsi->ifp;
- struct ixlv_sc *sc = vsi->back;
- int mcnt = 0;
-
- IOCTL_DBG_IF(ifp, "begin");
-
- if_maddr_rlock(ifp);
- /*
- ** Get a count, to decide if we
- ** simply use multicast promiscuous.
- */
- TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
- if (ifma->ifma_addr->sa_family != AF_LINK)
- continue;
- mcnt++;
- }
- if_maddr_runlock(ifp);
-
- /* TODO: Remove -- cannot set promiscuous mode in a VF */
- if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
- /* delete all multicast filters */
- ixlv_init_multi(vsi);
- sc->promiscuous_flags |= I40E_FLAG_VF_MULTICAST_PROMISC;
- ixl_vc_enqueue(&sc->vc_mgr, &sc->add_multi_cmd,
- IXLV_FLAG_AQ_CONFIGURE_PROMISC, ixl_init_cmd_complete,
- sc);
- IOCTL_DEBUGOUT("%s: end: too many filters", __func__);
- return;
- }
-
- mcnt = 0;
- if_maddr_rlock(ifp);
- TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
- if (ifma->ifma_addr->sa_family != AF_LINK)
- continue;
- if (!ixlv_add_mac_filter(sc,
- (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
- IXL_FILTER_MC))
- mcnt++;
- }
- if_maddr_runlock(ifp);
- /*
- ** Notify AQ task that sw filters need to be
- ** added to hw list
- */
- if (mcnt > 0)
- ixl_vc_enqueue(&sc->vc_mgr, &sc->add_multi_cmd,
- IXLV_FLAG_AQ_ADD_MAC_FILTER, ixl_init_cmd_complete,
- sc);
-
- IOCTL_DBG_IF(ifp, "end");
-}
-
-static void
-ixlv_del_multi(struct ixl_vsi *vsi)
-{
- struct ixlv_mac_filter *f;
- struct ifmultiaddr *ifma;
- struct ifnet *ifp = vsi->ifp;
- struct ixlv_sc *sc = vsi->back;
- int mcnt = 0;
- bool match = FALSE;
-
- IOCTL_DBG_IF(ifp, "begin");
-
- /* Search for removed multicast addresses */
- if_maddr_rlock(ifp);
- SLIST_FOREACH(f, sc->mac_filters, next) {
- if ((f->flags & IXL_FILTER_USED)
- && (f->flags & IXL_FILTER_MC)) {
- /* check if mac address in filter is in sc's list */
- match = FALSE;
- TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
- if (ifma->ifma_addr->sa_family != AF_LINK)
- continue;
- u8 *mc_addr =
- (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
- if (cmp_etheraddr(f->macaddr, mc_addr)) {
- match = TRUE;
- break;
- }
- }
- /* if this filter is not in the sc's list, remove it */
- if (match == FALSE && !(f->flags & IXL_FILTER_DEL)) {
- f->flags |= IXL_FILTER_DEL;
- mcnt++;
- IOCTL_DBG_IF(ifp, "marked: " MAC_FORMAT,
- MAC_FORMAT_ARGS(f->macaddr));
- }
- else if (match == FALSE)
- IOCTL_DBG_IF(ifp, "exists: " MAC_FORMAT,
- MAC_FORMAT_ARGS(f->macaddr));
- }
- }
- if_maddr_runlock(ifp);
-
- if (mcnt > 0)
- ixl_vc_enqueue(&sc->vc_mgr, &sc->del_multi_cmd,
- IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete,
- sc);
-
- IOCTL_DBG_IF(ifp, "end");
-}
-
-/*********************************************************************
- * Timer routine
- *
- * This routine checks for link status,updates statistics,
- * and runs the watchdog check.
- *
- **********************************************************************/
-
-static void
-ixlv_local_timer(void *arg)
-{
- struct ixlv_sc *sc = arg;
- struct i40e_hw *hw = &sc->hw;
- struct ixl_vsi *vsi = &sc->vsi;
- struct ixl_queue *que = vsi->queues;
- device_t dev = sc->dev;
- struct tx_ring *txr;
- int hung = 0;
- u32 mask, val;
- s32 timer, new_timer;
-
- IXLV_CORE_LOCK_ASSERT(sc);
-
- /* If Reset is in progress just bail */
- if (sc->init_state == IXLV_RESET_PENDING)
- return;
-
- /* Check for when PF triggers a VF reset */
- val = rd32(hw, I40E_VFGEN_RSTAT) &
- I40E_VFGEN_RSTAT_VFR_STATE_MASK;
-
- if (val != I40E_VFR_VFACTIVE
- && val != I40E_VFR_COMPLETED) {
- DDPRINTF(dev, "reset in progress! (%d)", val);
- return;
- }
-
- ixlv_request_stats(sc);
-
- /* clean and process any events */
- taskqueue_enqueue(sc->tq, &sc->aq_irq);
-
- /*
- ** Check status on the queues for a hang
- */
- mask = (I40E_VFINT_DYN_CTLN1_INTENA_MASK |
- I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK |
- I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK);
-
- for (int i = 0; i < vsi->num_queues; i++, que++) {
- txr = &que->txr;
- timer = atomic_load_acq_32(&txr->watchdog_timer);
- if (timer > 0) {
- new_timer = timer - hz;
- if (new_timer <= 0) {
- atomic_store_rel_32(&txr->watchdog_timer, -1);
- device_printf(dev, "WARNING: queue %d "
- "appears to be hung!\n", que->me);
- ++hung;
- } else {
- /*
- * If this fails, that means something in the TX path has updated
- * the watchdog, so it means the TX path is still working and
- * the watchdog doesn't need to countdown.
- */
- atomic_cmpset_rel_32(&txr->watchdog_timer, timer, new_timer);
- /* Any queues with outstanding work get a sw irq */
- wr32(hw, I40E_VFINT_DYN_CTLN1(que->me), mask);
- }
- }
- }
- /* Reset when a queue shows hung */
- if (hung)
- goto hung;
-
- callout_reset(&sc->timer, hz, ixlv_local_timer, sc);
- return;
-
-hung:
- device_printf(dev, "WARNING: Resetting!\n");
- sc->init_state = IXLV_RESET_REQUIRED;
- sc->watchdog_events++;
- ixlv_stop(sc);
- ixlv_init_locked(sc);
-}
-
-/*
-** Note: this routine updates the OS on the link state
-** the real check of the hardware only happens with
-** a link interrupt.
-*/
-void
-ixlv_update_link_status(struct ixlv_sc *sc)
-{
- struct ixl_vsi *vsi = &sc->vsi;
- struct ifnet *ifp = vsi->ifp;
-
- if (sc->link_up){
- if (vsi->link_active == FALSE) {
- if (bootverbose)
- if_printf(ifp,"Link is Up, %d Gbps\n",
- (sc->link_speed == I40E_LINK_SPEED_40GB) ? 40:10);
- vsi->link_active = TRUE;
- if_link_state_change(ifp, LINK_STATE_UP);
- }
- } else { /* Link down */
- if (vsi->link_active == TRUE) {
- if (bootverbose)
- if_printf(ifp,"Link is Down\n");
- if_link_state_change(ifp, LINK_STATE_DOWN);
- vsi->link_active = FALSE;
- }
- }
-
- return;
-}
-
-/*********************************************************************
- *
- * This routine disables all traffic on the adapter by issuing a
- * global reset on the MAC and deallocates TX/RX buffers.
- *
- **********************************************************************/
-
-static void
-ixlv_stop(struct ixlv_sc *sc)
-{
- struct ifnet *ifp;
- int start;
-
- ifp = sc->vsi.ifp;
- INIT_DBG_IF(ifp, "begin");
-
- IXLV_CORE_LOCK_ASSERT(sc);
-
- ixl_vc_flush(&sc->vc_mgr);
- ixlv_disable_queues(sc);
-
- start = ticks;
- while ((ifp->if_drv_flags & IFF_DRV_RUNNING) &&
- ((ticks - start) < hz/10))
- ixlv_do_adminq_locked(sc);
-
- /* Stop the local timer */
- callout_stop(&sc->timer);
-
- INIT_DBG_IF(ifp, "end");
-}
-
-
-/*********************************************************************
- *
- * Free all station queue structs.
- *
- **********************************************************************/
-static void
-ixlv_free_queues(struct ixl_vsi *vsi)
-{
- struct ixlv_sc *sc = (struct ixlv_sc *)vsi->back;
- struct ixl_queue *que = vsi->queues;
-
- for (int i = 0; i < vsi->num_queues; i++, que++) {
- struct tx_ring *txr = &que->txr;
- struct rx_ring *rxr = &que->rxr;
-
- if (!mtx_initialized(&txr->mtx)) /* uninitialized */
- continue;
- IXL_TX_LOCK(txr);
- ixl_free_que_tx(que);
- if (txr->base)
- i40e_free_dma_mem(&sc->hw, &txr->dma);
- IXL_TX_UNLOCK(txr);
- IXL_TX_LOCK_DESTROY(txr);
-
- if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
- continue;
- IXL_RX_LOCK(rxr);
- ixl_free_que_rx(que);
- if (rxr->base)
- i40e_free_dma_mem(&sc->hw, &rxr->dma);
- IXL_RX_UNLOCK(rxr);
- IXL_RX_LOCK_DESTROY(rxr);
-
- }
- free(vsi->queues, M_DEVBUF);
-}
-
-static void
-ixlv_config_rss_reg(struct ixlv_sc *sc)
-{
- struct i40e_hw *hw = &sc->hw;
- struct ixl_vsi *vsi = &sc->vsi;
- u32 lut = 0;
- u64 set_hena = 0, hena;
- int i, j, que_id;
- u32 rss_seed[IXL_RSS_KEY_SIZE_REG];
-#ifdef RSS
- u32 rss_hash_config;
-#endif
-
- /* Don't set up RSS if using a single queue */
- if (vsi->num_queues == 1) {
- wr32(hw, I40E_VFQF_HENA(0), 0);
- wr32(hw, I40E_VFQF_HENA(1), 0);
- ixl_flush(hw);
- return;
- }
-
-#ifdef RSS
- /* Fetch the configured RSS key */
- rss_getkey((uint8_t *) &rss_seed);
-#else
- ixl_get_default_rss_key(rss_seed);
-#endif
-
- /* Fill out hash function seed */
- for (i = 0; i < IXL_RSS_KEY_SIZE_REG; i++)
- wr32(hw, I40E_VFQF_HKEY(i), rss_seed[i]);
-
- /* Enable PCTYPES for RSS: */
-#ifdef RSS
- rss_hash_config = rss_gethashconfig();
- if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
- set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
- if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
- set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
- if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
- set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
- if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
- set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
- if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
- set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
- if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
- set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
- if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
- set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
-#else
- set_hena = IXL_DEFAULT_RSS_HENA_XL710;
-#endif
- hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) |
- ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32);
- hena |= set_hena;
- wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
- wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
-
- /* Populate the LUT with max no. of queues in round robin fashion */
- for (i = 0, j = 0; i < IXL_RSS_VSI_LUT_SIZE; i++, j++) {
- if (j == vsi->num_queues)
- j = 0;
-#ifdef RSS
- /*
- * Fetch the RSS bucket id for the given indirection entry.
- * Cap it at the number of configured buckets (which is
- * num_queues.)
- */
- que_id = rss_get_indirection_to_bucket(i);
- que_id = que_id % vsi->num_queues;
-#else
- que_id = j;
-#endif
- /* lut = 4-byte sliding window of 4 lut entries */
- lut = (lut << 8) | (que_id & IXL_RSS_VF_LUT_ENTRY_MASK);
- /* On i = 3, we have 4 entries in lut; write to the register */
- if ((i & 3) == 3) {
- wr32(hw, I40E_VFQF_HLUT(i >> 2), lut);
- DDPRINTF(sc->dev, "HLUT(%2d): %#010x", i, lut);
- }
- }
- ixl_flush(hw);
-}
-
-static void
-ixlv_config_rss_pf(struct ixlv_sc *sc)
-{
- ixl_vc_enqueue(&sc->vc_mgr, &sc->config_rss_key_cmd,
- IXLV_FLAG_AQ_CONFIG_RSS_KEY, ixl_init_cmd_complete, sc);
-
- ixl_vc_enqueue(&sc->vc_mgr, &sc->set_rss_hena_cmd,
- IXLV_FLAG_AQ_SET_RSS_HENA, ixl_init_cmd_complete, sc);
-
- ixl_vc_enqueue(&sc->vc_mgr, &sc->config_rss_lut_cmd,
- IXLV_FLAG_AQ_CONFIG_RSS_LUT, ixl_init_cmd_complete, sc);
-}
-
-/*
-** ixlv_config_rss - setup RSS
-**
-** RSS keys and table are cleared on VF reset.
-*/
-static void
-ixlv_config_rss(struct ixlv_sc *sc)
-{
- if (sc->vf_res->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG) {
- DDPRINTF(sc->dev, "Setting up RSS using VF registers...");
- ixlv_config_rss_reg(sc);
- } else if (sc->vf_res->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF) {
- DDPRINTF(sc->dev, "Setting up RSS using messages to PF...");
- ixlv_config_rss_pf(sc);
- } else
- device_printf(sc->dev, "VF does not support RSS capability sent by PF.\n");
-}
-
-/*
-** This routine refreshes vlan filters, called by init
-** it scans the filter table and then updates the AQ
-*/
-static void
-ixlv_setup_vlan_filters(struct ixlv_sc *sc)
-{
- struct ixl_vsi *vsi = &sc->vsi;
- struct ixlv_vlan_filter *f;
- int cnt = 0;
-
- if (vsi->num_vlans == 0)
- return;
- /*
- ** Scan the filter table for vlan entries,
- ** and if found call for the AQ update.
- */
- SLIST_FOREACH(f, sc->vlan_filters, next)
- if (f->flags & IXL_FILTER_ADD)
- cnt++;
- if (cnt > 0)
- ixl_vc_enqueue(&sc->vc_mgr, &sc->add_vlan_cmd,
- IXLV_FLAG_AQ_ADD_VLAN_FILTER, ixl_init_cmd_complete, sc);
-}
-
-
-/*
-** This routine adds new MAC filters to the sc's list;
-** these are later added in hardware by sending a virtual
-** channel message.
-*/
-static int
-ixlv_add_mac_filter(struct ixlv_sc *sc, u8 *macaddr, u16 flags)
-{
- struct ixlv_mac_filter *f;
-
- /* Does one already exist? */
- f = ixlv_find_mac_filter(sc, macaddr);
- if (f != NULL) {
- IDPRINTF(sc->vsi.ifp, "exists: " MAC_FORMAT,
- MAC_FORMAT_ARGS(macaddr));
- return (EEXIST);
- }
-
- /* If not, get a new empty filter */
- f = ixlv_get_mac_filter(sc);
- if (f == NULL) {
- if_printf(sc->vsi.ifp, "%s: no filters available!!\n",
- __func__);
- return (ENOMEM);
- }
-
- IDPRINTF(sc->vsi.ifp, "marked: " MAC_FORMAT,
- MAC_FORMAT_ARGS(macaddr));
-
- bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
- f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
- f->flags |= flags;
- return (0);
-}
-
-/*
-** Marks a MAC filter for deletion.
-*/
-static int
-ixlv_del_mac_filter(struct ixlv_sc *sc, u8 *macaddr)
-{
- struct ixlv_mac_filter *f;
-
- f = ixlv_find_mac_filter(sc, macaddr);
- if (f == NULL)
- return (ENOENT);
-
- f->flags |= IXL_FILTER_DEL;
- return (0);
-}
-
-/*
-** Tasklet handler for MSIX Adminq interrupts
-** - done outside interrupt context since it might sleep
-*/
-static void
-ixlv_do_adminq(void *context, int pending)
-{
- struct ixlv_sc *sc = context;
-
- mtx_lock(&sc->mtx);
- ixlv_do_adminq_locked(sc);
- mtx_unlock(&sc->mtx);
- return;
-}
-
-static void
-ixlv_do_adminq_locked(struct ixlv_sc *sc)
-{
- struct i40e_hw *hw = &sc->hw;
- struct i40e_arq_event_info event;
- struct i40e_virtchnl_msg *v_msg;
- device_t dev = sc->dev;
- u16 result = 0;
- u32 reg, oldreg;
- i40e_status ret;
- bool aq_error = false;
-
- IXLV_CORE_LOCK_ASSERT(sc);
-
- event.buf_len = IXL_AQ_BUF_SZ;
- event.msg_buf = sc->aq_buffer;
- v_msg = (struct i40e_virtchnl_msg *)&event.desc;
-
- do {
- ret = i40e_clean_arq_element(hw, &event, &result);
- if (ret)
- break;
- ixlv_vc_completion(sc, v_msg->v_opcode,
- v_msg->v_retval, event.msg_buf, event.msg_len);
- if (result != 0)
- bzero(event.msg_buf, IXL_AQ_BUF_SZ);
- } while (result);
-
- /* check for Admin queue errors */
- oldreg = reg = rd32(hw, hw->aq.arq.len);
- if (reg & I40E_VF_ARQLEN1_ARQVFE_MASK) {
- device_printf(dev, "ARQ VF Error detected\n");
- reg &= ~I40E_VF_ARQLEN1_ARQVFE_MASK;
- aq_error = true;
- }
- if (reg & I40E_VF_ARQLEN1_ARQOVFL_MASK) {
- device_printf(dev, "ARQ Overflow Error detected\n");
- reg &= ~I40E_VF_ARQLEN1_ARQOVFL_MASK;
- aq_error = true;
- }
- if (reg & I40E_VF_ARQLEN1_ARQCRIT_MASK) {
- device_printf(dev, "ARQ Critical Error detected\n");
- reg &= ~I40E_VF_ARQLEN1_ARQCRIT_MASK;
- aq_error = true;
- }
- if (oldreg != reg)
- wr32(hw, hw->aq.arq.len, reg);
-
- oldreg = reg = rd32(hw, hw->aq.asq.len);
- if (reg & I40E_VF_ATQLEN1_ATQVFE_MASK) {
- device_printf(dev, "ASQ VF Error detected\n");
- reg &= ~I40E_VF_ATQLEN1_ATQVFE_MASK;
- aq_error = true;
- }
- if (reg & I40E_VF_ATQLEN1_ATQOVFL_MASK) {
- device_printf(dev, "ASQ Overflow Error detected\n");
- reg &= ~I40E_VF_ATQLEN1_ATQOVFL_MASK;
- aq_error = true;
- }
- if (reg & I40E_VF_ATQLEN1_ATQCRIT_MASK) {
- device_printf(dev, "ASQ Critical Error detected\n");
- reg &= ~I40E_VF_ATQLEN1_ATQCRIT_MASK;
- aq_error = true;
- }
- if (oldreg != reg)
- wr32(hw, hw->aq.asq.len, reg);
-
- if (aq_error) {
- /* Need to reset adapter */
- device_printf(dev, "WARNING: Resetting!\n");
- sc->init_state = IXLV_RESET_REQUIRED;
- ixlv_stop(sc);
- ixlv_init_locked(sc);
- }
- ixlv_enable_adminq_irq(hw);
-}
-
-static void
-ixlv_add_sysctls(struct ixlv_sc *sc)
-{
- device_t dev = sc->dev;
- struct ixl_vsi *vsi = &sc->vsi;
- struct i40e_eth_stats *es = &vsi->eth_stats;
-
- struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
- struct sysctl_oid *tree = device_get_sysctl_tree(dev);
- struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
-
- struct sysctl_oid *vsi_node, *queue_node;
- struct sysctl_oid_list *vsi_list, *queue_list;
-
-#define QUEUE_NAME_LEN 32
- char queue_namebuf[QUEUE_NAME_LEN];
-
- struct ixl_queue *queues = vsi->queues;
- struct tx_ring *txr;
- struct rx_ring *rxr;
-
- /* Driver statistics sysctls */
- SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
- CTLFLAG_RD, &sc->watchdog_events,
- "Watchdog timeouts");
- SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "admin_irq",
- CTLFLAG_RD, &sc->admin_irq,
- "Admin Queue IRQ Handled");
-
- /* VSI statistics sysctls */
- vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "vsi",
- CTLFLAG_RD, NULL, "VSI-specific statistics");
- vsi_list = SYSCTL_CHILDREN(vsi_node);
-
- struct ixl_sysctl_info ctls[] =
- {
- {&es->rx_bytes, "good_octets_rcvd", "Good Octets Received"},
- {&es->rx_unicast, "ucast_pkts_rcvd",
- "Unicast Packets Received"},
- {&es->rx_multicast, "mcast_pkts_rcvd",
- "Multicast Packets Received"},
- {&es->rx_broadcast, "bcast_pkts_rcvd",
- "Broadcast Packets Received"},
- {&es->rx_discards, "rx_discards", "Discarded RX packets"},
- {&es->rx_unknown_protocol, "rx_unknown_proto", "RX unknown protocol packets"},
- {&es->tx_bytes, "good_octets_txd", "Good Octets Transmitted"},
- {&es->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"},
- {&es->tx_multicast, "mcast_pkts_txd",
- "Multicast Packets Transmitted"},
- {&es->tx_broadcast, "bcast_pkts_txd",
- "Broadcast Packets Transmitted"},
- {&es->tx_errors, "tx_errors", "TX packet errors"},
- // end
- {0,0,0}
- };
- struct ixl_sysctl_info *entry = ctls;
- while (entry->stat != NULL)
- {
- SYSCTL_ADD_QUAD(ctx, child, OID_AUTO, entry->name,
- CTLFLAG_RD, entry->stat,
- entry->description);
- entry++;
- }
-
- /* Queue sysctls */
- for (int q = 0; q < vsi->num_queues; q++) {
- snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q);
- queue_node = SYSCTL_ADD_NODE(ctx, vsi_list, OID_AUTO, queue_namebuf,
- CTLFLAG_RD, NULL, "Queue Name");
- queue_list = SYSCTL_CHILDREN(queue_node);
-
- txr = &(queues[q].txr);
- rxr = &(queues[q].rxr);
-
- SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed",
- CTLFLAG_RD, &(queues[q].mbuf_defrag_failed),
- "m_defrag() failed");
- SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "dropped",
- CTLFLAG_RD, &(queues[q].dropped_pkts),
- "Driver dropped packets");
- SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "irqs",
- CTLFLAG_RD, &(queues[q].irqs),
- "irqs on this queue");
- SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tso_tx",
- CTLFLAG_RD, &(queues[q].tso),
- "TSO");
- SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_dmamap_failed",
- CTLFLAG_RD, &(queues[q].tx_dmamap_failed),
- "Driver tx dma failure in xmit");
- SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
- CTLFLAG_RD, &(txr->no_desc),
- "Queue No Descriptor Available");
- SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_packets",
- CTLFLAG_RD, &(txr->total_packets),
- "Queue Packets Transmitted");
- SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
- CTLFLAG_RD, &(txr->tx_bytes),
- "Queue Bytes Transmitted");
- SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "rx_packets",
- CTLFLAG_RD, &(rxr->rx_packets),
- "Queue Packets Received");
- SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
- CTLFLAG_RD, &(rxr->rx_bytes),
- "Queue Bytes Received");
- SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_itr",
- CTLFLAG_RD, &(rxr->itr), 0,
- "Queue Rx ITR Interval");
- SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "tx_itr",
- CTLFLAG_RD, &(txr->itr), 0,
- "Queue Tx ITR Interval");
-
-#ifdef IXL_DEBUG
- /* Examine queue state */
- SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qtx_head",
- CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
- sizeof(struct ixl_queue),
- ixlv_sysctl_qtx_tail_handler, "IU",
- "Queue Transmit Descriptor Tail");
- SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qrx_head",
- CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
- sizeof(struct ixl_queue),
- ixlv_sysctl_qrx_tail_handler, "IU",
- "Queue Receive Descriptor Tail");
- SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "watchdog_timer",
- CTLFLAG_RD, &(txr.watchdog_timer), 0,
- "Ticks before watchdog event is triggered");
-#endif
- }
-}
-
-static void
-ixlv_init_filters(struct ixlv_sc *sc)
-{
- sc->mac_filters = malloc(sizeof(struct ixlv_mac_filter),
- M_DEVBUF, M_NOWAIT | M_ZERO);
- SLIST_INIT(sc->mac_filters);
- sc->vlan_filters = malloc(sizeof(struct ixlv_vlan_filter),
- M_DEVBUF, M_NOWAIT | M_ZERO);
- SLIST_INIT(sc->vlan_filters);
- return;
-}
-
-static void
-ixlv_free_filters(struct ixlv_sc *sc)
-{
- struct ixlv_mac_filter *f;
- struct ixlv_vlan_filter *v;
-
- while (!SLIST_EMPTY(sc->mac_filters)) {
- f = SLIST_FIRST(sc->mac_filters);
- SLIST_REMOVE_HEAD(sc->mac_filters, next);
- free(f, M_DEVBUF);
- }
- while (!SLIST_EMPTY(sc->vlan_filters)) {
- v = SLIST_FIRST(sc->vlan_filters);
- SLIST_REMOVE_HEAD(sc->vlan_filters, next);
- free(v, M_DEVBUF);
- }
- return;
-}
-
-#ifdef IXL_DEBUG
-/**
- * ixlv_sysctl_qtx_tail_handler
- * Retrieves I40E_QTX_TAIL1 value from hardware
- * for a sysctl.
- */
-static int
-ixlv_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS)
-{
- struct ixl_queue *que;
- int error;
- u32 val;
-
- que = ((struct ixl_queue *)oidp->oid_arg1);
- if (!que) return 0;
-
- val = rd32(que->vsi->hw, que->txr.tail);
- error = sysctl_handle_int(oidp, &val, 0, req);
- if (error || !req->newptr)
- return error;
- return (0);
-}
-
-/**
- * ixlv_sysctl_qrx_tail_handler
- * Retrieves I40E_QRX_TAIL1 value from hardware
- * for a sysctl.
- */
-static int
-ixlv_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS)
-{
- struct ixl_queue *que;
- int error;
- u32 val;
-
- que = ((struct ixl_queue *)oidp->oid_arg1);
- if (!que) return 0;
-
- val = rd32(que->vsi->hw, que->rxr.tail);
- error = sysctl_handle_int(oidp, &val, 0, req);
- if (error || !req->newptr)
- return error;
- return (0);
-}
-#endif
-
Index: sys/dev/ixl/iflib_ixlv.h
===================================================================
--- sys/dev/ixl/iflib_ixlv.h
+++ sys/dev/ixl/iflib_ixlv.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2013-2015, Intel Corporation
+ Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -38,8 +38,7 @@
#include "ixlv_vc_mgr.h"
-#define IXLV_AQ_MAX_ERR 30
-#define IXLV_MAX_INIT_WAIT 120
+#define IXLV_AQ_MAX_ERR 200
#define IXLV_MAX_FILTERS 128
#define IXLV_MAX_QUEUES 16
#define IXLV_AQ_TIMEOUT (1 * hz)
@@ -144,10 +143,10 @@
u32 qbase;
u32 admvec;
struct timeout_task timeout;
+#ifdef notyet
struct task aq_irq;
struct task aq_sched;
- struct taskqueue *tq;
-
+#endif
struct ixl_vsi vsi;
/* Filter lists */
@@ -186,7 +185,6 @@
u8 aq_buffer[IXL_AQ_BUF_SZ];
};
-#define IXLV_CORE_LOCK_ASSERT(sc) mtx_assert(&(sc)->mtx, MA_OWNED)
/*
** This checks for a zero mac addr, something that will be likely
** unless the Admin on the Host has created one.
Index: sys/dev/ixl/iflib_ixlvc.c
===================================================================
--- sys/dev/ixl/iflib_ixlvc.c
+++ sys/dev/ixl/iflib_ixlvc.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2013-2015, Intel Corporation
+ Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -178,11 +178,8 @@
err = i40e_aq_send_msg_to_pf(hw, op, I40E_SUCCESS, msg, len, NULL);
if (err)
- device_printf(dev, "Unable to send opcode %s to PF, "
- "status %s, aq error %s\n",
- ixl_vc_opcode_str(op),
- i40e_stat_str(hw, err),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ device_printf(dev, "Unable to send opcode %d to PF, "
+ "error %d, aq status %d\n", op, err, hw->aq.asq_last_status);
return err;
}
@@ -874,7 +871,7 @@
{
struct i40e_virtchnl_rss_hena hena;
- hena.hena = IXL_DEFAULT_RSS_HENA_X722;
+ hena.hena = IXL_DEFAULT_RSS_HENA;
ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_SET_RSS_HENA,
(u8 *)&hena, sizeof(hena));
@@ -975,8 +972,8 @@
/* Catch-all error response */
if (v_retval) {
device_printf(dev,
- "%s: AQ returned error %s to our request %s!\n",
- __func__, i40e_stat_str(&sc->hw, v_retval), ixl_vc_opcode_str(v_opcode));
+ "%s: AQ returned error %d to our request %d!\n",
+ __func__, v_retval, v_opcode);
}
#ifdef IXL_DEBUG
@@ -1058,8 +1055,8 @@
default:
#ifdef IXL_DEBUG
device_printf(dev,
- "%s: Received unexpected message %s from PF.\n",
- __func__, ixl_vc_opcode_str(v_opcode));
+ "%s: Received unexpected message %d from PF.\n",
+ __func__, v_opcode);
#endif
break;
}
Index: sys/dev/ixl/ixl.h
===================================================================
--- sys/dev/ixl/ixl.h
+++ sys/dev/ixl/ixl.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2013-2015, Intel Corporation
+ Copyright (c) 2013-2016, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -32,14 +32,8 @@
******************************************************************************/
/*$FreeBSD$*/
-
-#ifndef _IXL_H_
-#define _IXL_H_
-
-#include "opt_inet.h"
-#include "opt_inet6.h"
-#include "opt_rss.h"
-#include "opt_ixl.h"
+#ifndef _IFLIB_IXL_H_
+#define _IFLIB_IXL_H_
#include <sys/param.h>
#include <sys/systm.h>
@@ -52,7 +46,6 @@
#include <sys/module.h>
#include <sys/sockio.h>
#include <sys/eventhandler.h>
-#include <sys/syslog.h>
#include <net/if.h>
#include <net/if_var.h>
@@ -61,6 +54,7 @@
#include <net/ethernet.h>
#include <net/if_dl.h>
#include <net/if_media.h>
+#include <net/iflib.h>
#include <net/bpf.h>
#include <net/if_types.h>
@@ -102,80 +96,10 @@
#include <netinet/in_rss.h>
#endif
+#include "ifdi_if.h"
#include "i40e_type.h"
#include "i40e_prototype.h"
-
-#define MAC_FORMAT "%02x:%02x:%02x:%02x:%02x:%02x"
-#define MAC_FORMAT_ARGS(mac_addr) \
- (mac_addr)[0], (mac_addr)[1], (mac_addr)[2], (mac_addr)[3], \
- (mac_addr)[4], (mac_addr)[5]
-#define ON_OFF_STR(is_set) ((is_set) ? "On" : "Off")
-
-#ifdef IXL_DEBUG
-
-#define _DBG_PRINTF(S, ...) printf("%s: " S "\n", __func__, ##__VA_ARGS__)
-#define _DEV_DBG_PRINTF(dev, S, ...) device_printf(dev, "%s: " S "\n", __func__, ##__VA_ARGS__)
-#define _IF_DBG_PRINTF(ifp, S, ...) if_printf(ifp, "%s: " S "\n", __func__, ##__VA_ARGS__)
-
-/* Defines for printing generic debug information */
-#define DPRINTF(...) _DBG_PRINTF(__VA_ARGS__)
-#define DDPRINTF(...) _DEV_DBG_PRINTF(__VA_ARGS__)
-#define IDPRINTF(...) _IF_DBG_PRINTF(__VA_ARGS__)
-
-/* Defines for printing specific debug information */
-#define DEBUG_INIT 1
-#define DEBUG_IOCTL 1
-#define DEBUG_HW 1
-
-#define INIT_DEBUGOUT(...) if (DEBUG_INIT) _DBG_PRINTF(__VA_ARGS__)
-#define INIT_DBG_DEV(...) if (DEBUG_INIT) _DEV_DBG_PRINTF(__VA_ARGS__)
-#define INIT_DBG_IF(...) if (DEBUG_INIT) _IF_DBG_PRINTF(__VA_ARGS__)
-
-#define IOCTL_DEBUGOUT(...) if (DEBUG_IOCTL) _DBG_PRINTF(__VA_ARGS__)
-#define IOCTL_DBG_IF2(ifp, S, ...) if (DEBUG_IOCTL) \
- if_printf(ifp, S "\n", ##__VA_ARGS__)
-#define IOCTL_DBG_IF(...) if (DEBUG_IOCTL) _IF_DBG_PRINTF(__VA_ARGS__)
-
-#define HW_DEBUGOUT(...) if (DEBUG_HW) _DBG_PRINTF(__VA_ARGS__)
-
-#else /* no IXL_DEBUG */
-#define DEBUG_INIT 0
-#define DEBUG_IOCTL 0
-#define DEBUG_HW 0
-
-#define DPRINTF(...)
-#define DDPRINTF(...)
-#define IDPRINTF(...)
-
-#define INIT_DEBUGOUT(...)
-#define INIT_DBG_DEV(...)
-#define INIT_DBG_IF(...)
-#define IOCTL_DEBUGOUT(...)
-#define IOCTL_DBG_IF2(...)
-#define IOCTL_DBG_IF(...)
-#define HW_DEBUGOUT(...)
-#endif /* IXL_DEBUG */
-
-enum ixl_dbg_mask {
- IXL_DBG_INFO = 0x00000001,
- IXL_DBG_EN_DIS = 0x00000002,
- IXL_DBG_AQ = 0x00000004,
- IXL_DBG_NVMUPD = 0x00000008,
-
- IXL_DBG_IOCTL_KNOWN = 0x00000010,
- IXL_DBG_IOCTL_UNKNOWN = 0x00000020,
- IXL_DBG_IOCTL_ALL = 0x00000030,
-
- I40E_DEBUG_RSS = 0x00000100,
-
- IXL_DBG_IOV = 0x00001000,
- IXL_DBG_IOV_VC = 0x00002000,
-
- IXL_DBG_SWITCH_INFO = 0x00010000,
- IXL_DBG_I2C = 0x00020000,
-
- IXL_DBG_ALL = 0xFFFFFFFF
-};
+#include "ixl_debug.h"
/* Tunables */
@@ -187,7 +111,7 @@
* Tx descriptors are always 16 bytes, but Rx descriptors can be 32 bytes.
* The driver currently always uses 32 byte Rx descriptors.
*/
-#define IXL_DEFAULT_RING 1024
+#define DEFAULT_RING 1024
#define IXL_MAX_RING 8160
#define IXL_MIN_RING 32
#define IXL_RING_INCREMENT 32
@@ -221,7 +145,10 @@
#define IXL_MSIX_BAR 3
#define IXL_ADM_LIMIT 2
-#define IXL_TSO_SIZE 65535
+// TODO: Find out which TSO_SIZE to use
+//#define IXL_TSO_SIZE 65535
+#define IXL_TSO_SIZE ((255*1024)-1)
+#define IXL_TX_BUF_SZ ((u32) 1514)
#define IXL_AQ_BUF_SZ ((u32) 4096)
#define IXL_RX_HDR 128
#define IXL_RX_LIMIT 512
@@ -234,7 +161,6 @@
#define IXL_MAX_TSO_SEGS 128
#define IXL_SPARSE_CHAIN 6
#define IXL_QUEUE_HUNG 0x80000000
-#define IXL_MIN_TSO_MSS 64
#define IXL_RSS_KEY_SIZE_REG 13
#define IXL_RSS_KEY_SIZE (IXL_RSS_KEY_SIZE_REG * 4)
@@ -256,15 +182,13 @@
#define IXL_NVM_VERSION_HI_MASK (0xf << IXL_NVM_VERSION_HI_SHIFT)
/*
- * Interrupt Moderation parameters
- * Multiply ITR values by 2 for real ITR value
+ * Interrupt Moderation parameters
*/
-#define IXL_MAX_ITR 0x0FF0
+#define IXL_MAX_ITR 0x07FF
#define IXL_ITR_100K 0x0005
#define IXL_ITR_20K 0x0019
#define IXL_ITR_8K 0x003E
#define IXL_ITR_4K 0x007A
-#define IXL_ITR_1K 0x01F4
#define IXL_ITR_DYNAMIC 0x8000
#define IXL_LOW_LATENCY 0
#define IXL_AVE_LATENCY 1
@@ -317,7 +241,7 @@
#define IXL_END_OF_INTR_LNKLST 0x7FF
-#define IXL_DEFAULT_RSS_HENA_BASE (\
+#define IXL_DEFAULT_RSS_HENA (\
BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
@@ -330,17 +254,13 @@
BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \
BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD))
-#define IXL_DEFAULT_RSS_HENA_XL710 IXL_DEFAULT_RSS_HENA_BASE
-
-#define IXL_DEFAULT_RSS_HENA_X722 (\
- IXL_DEFAULT_RSS_HENA_BASE | \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) | \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK))
+#define IXL_CAPS \
+ (IFCAP_TSO4 | IFCAP_TSO6 | IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6 | IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | \
+ IFCAP_VLAN_HWFILTER | IFCAP_VLAN_HWTSO | IFCAP_HWCSUM | \
+ IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTSO | \
+ IFCAP_VLAN_MTU | IFCAP_HWCSUM_IPV6 | IFCAP_JUMBO_MTU | IFCAP_LRO)
+#if 0
#define IXL_TX_LOCK(_sc) mtx_lock(&(_sc)->mtx)
#define IXL_TX_UNLOCK(_sc) mtx_unlock(&(_sc)->mtx)
#define IXL_TX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->mtx)
@@ -350,6 +270,7 @@
#define IXL_RX_LOCK(_sc) mtx_lock(&(_sc)->mtx)
#define IXL_RX_UNLOCK(_sc) mtx_unlock(&(_sc)->mtx)
#define IXL_RX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->mtx)
+#endif
/* Pre-11 counter(9) compatibility */
#if __FreeBSD_version >= 1100036
@@ -397,22 +318,6 @@
unsigned int index;
} ixl_vendor_info_t;
-
-struct ixl_tx_buf {
- u32 eop_index;
- struct mbuf *m_head;
- bus_dmamap_t map;
- bus_dma_tag_t tag;
-};
-
-struct ixl_rx_buf {
- struct mbuf *m_head;
- struct mbuf *m_pack;
- struct mbuf *fmp;
- bus_dmamap_t hmap;
- bus_dmamap_t pmap;
-};
-
/*
** This struct has multiple uses, multicast
** addresses, vlans, and mac filters all use it.
@@ -428,25 +333,17 @@
* The Transmit ring control struct
*/
struct tx_ring {
- struct ixl_queue *que;
- struct mtx mtx;
+ struct ixl_tx_queue *que;
u32 tail;
- struct i40e_tx_desc *base;
- struct i40e_dma_mem dma;
+ struct i40e_tx_desc *tx_base;
u16 next_avail;
- u16 next_to_clean;
+ u64 tx_paddr;
+
u16 atr_rate;
u16 atr_count;
u32 itr;
u32 latency;
- struct ixl_tx_buf *buffers;
- volatile u16 avail;
- u32 cmd;
- bus_dma_tag_t tx_tag;
- bus_dma_tag_t tso_tag;
- char mtx_name[16];
- struct buf_ring *br;
- s32 watchdog_timer;
+ u32 me;
/* Used for Dynamic ITR calculation */
u32 packets;
@@ -454,97 +351,100 @@
/* Soft Stats */
u64 tx_bytes;
- u64 no_desc;
u64 total_packets;
};
-
/*
* The Receive ring control struct
*/
struct rx_ring {
- struct ixl_queue *que;
- struct mtx mtx;
- union i40e_rx_desc *base;
- struct i40e_dma_mem dma;
- struct lro_ctrl lro;
- bool lro_enabled;
- bool hdr_split;
+ struct ixl_rx_queue *que;
+ union i40e_rx_desc *rx_base;
+ uint64_t rx_paddr;
bool discard;
- u32 next_refresh;
- u32 next_check;
u32 itr;
u32 latency;
- char mtx_name[16];
- struct ixl_rx_buf *buffers;
u32 mbuf_sz;
u32 tail;
- bus_dma_tag_t htag;
- bus_dma_tag_t ptag;
+ u32 me;
/* Used for Dynamic ITR calculation */
u32 packets;
u32 bytes;
/* Soft stats */
+ // TODO: Remove since no header split
u64 split;
u64 rx_packets;
u64 rx_bytes;
+ // TODO: Change to discarded?
u64 desc_errs;
- u64 not_done;
};
/*
-** Driver queue struct: this is the interrupt container
-** for the associated tx and rx ring pair.
+** Driver queue structs
+// TODO: Add to this comment?
*/
-struct ixl_queue {
+struct ixl_tx_queue {
struct ixl_vsi *vsi;
- u32 me;
- u32 msix; /* This queue's MSIX vector */
- u32 eims; /* This queue's EIMS bit */
- struct resource *res;
- void *tag;
- int num_desc; /* both tx and rx */
+ // TODO: Maybe this needs to get removed
+ int busy;
struct tx_ring txr;
- struct rx_ring rxr;
- struct task task;
- struct task tx_task;
- struct taskqueue *tq;
-
- /* Queue stats */
+ /* Stats */
u64 irqs;
u64 tso;
- u64 mbuf_defrag_failed;
- u64 mbuf_hdr_failed;
- u64 mbuf_pkt_failed;
- u64 tx_dmamap_failed;
- u64 dropped_pkts;
- u64 mss_too_small;
};
+struct ixl_rx_queue {
+ struct ixl_vsi *vsi;
+ u32 msix; /* This queue's MSIX vector */
+ u32 eims; // TODO: Change var name; there is no EIMS in fortville
+ struct rx_ring rxr;
+ struct if_irq que_irq; // TODO: Add comment
+ /* Stats */
+ u64 irqs;
+};
+
+#define DOWNCAST(sctx) ((struct ixl_vsi *)(sctx)) // TODO: Check if ixgbe has something similar
+
/*
** Virtual Station Interface
*/
SLIST_HEAD(ixl_ftl_head, ixl_mac_filter);
+
struct ixl_vsi {
- void *back;
+ if_ctx_t ctx;
+ if_softc_ctx_t shared;
+
struct ifnet *ifp;
- device_t dev;
+ struct ifmedia *media;
+
+// TODO: I don't like these defines
+#define num_rx_queues shared->isc_nrxqsets
+#define num_tx_queues shared->isc_ntxqsets
+// This conflicts with a shared code struct definition
+// #define max_frame_size shared->isc_max_frame_size
+
+ void *back;
struct i40e_hw *hw;
- struct ifmedia media;
- enum i40e_vsi_type type;
+ // TODO: Remove?
+ u64 que_mask;
int id;
- u16 num_queues;
+ u16 vsi_num;
+ // TODO: Replace
+ u16 msix_base; /* station base MSIX vector */
+ // TODO: Replace
+ u16 first_queue; /* station base MSIX vector */
u32 rx_itr_setting;
u32 tx_itr_setting;
- u16 max_frame_size;
-
- struct ixl_queue *queues; /* head of queues */
-
- u16 vsi_num;
+ struct ixl_tx_queue *tx_queues; /* TX queue array */
+ struct ixl_rx_queue *rx_queues; /* RX queue array */
bool link_active;
u16 seid;
+ u32 link_speed;
+
+ struct if_irq irq; // TODO: Comment
+
u16 uplink_seid;
u16 downlink_seid;
@@ -555,10 +455,9 @@
/* Contains readylist & stat counter id */
struct i40e_aqc_vsi_properties_data info;
- eventhandler_tag vlan_attach;
- eventhandler_tag vlan_detach;
u16 num_vlans;
+ // TODO: Maybe these things should get their own struct
/* Per-VSI stats from hardware */
struct i40e_eth_stats eth_stats;
struct i40e_eth_stats eth_stats_offsets;
@@ -581,26 +480,12 @@
u64 hw_filters_add;
/* Misc. */
+ u64 active_queues;
u64 flags;
- struct sysctl_oid *vsi_node;
+ struct sysctl_oid *vsi_node; // TODO: Comment?
};
/*
-** Find the number of unrefreshed RX descriptors
-*/
-static inline u16
-ixl_rx_unrefreshed(struct ixl_queue *que)
-{
- struct rx_ring *rxr = &que->rxr;
-
- if (rxr->next_check > rxr->next_refresh)
- return (rxr->next_check - rxr->next_refresh - 1);
- else
- return ((que->num_desc + rxr->next_check) -
- rxr->next_refresh - 1);
-}
-
-/*
** Find the next available unused filter
*/
static inline struct ixl_mac_filter *
@@ -623,14 +508,7 @@
static inline bool
cmp_etheraddr(const u8 *ea1, const u8 *ea2)
{
- bool cmp = FALSE;
-
- if ((ea1[0] == ea2[0]) && (ea1[1] == ea2[1]) &&
- (ea1[2] == ea2[2]) && (ea1[3] == ea2[3]) &&
- (ea1[4] == ea2[4]) && (ea1[5] == ea2[5]))
- cmp = TRUE;
-
- return (cmp);
+ return (bcmp(ea1, ea2, 6) == 0);
}
/*
@@ -664,30 +542,32 @@
char *description;
};
-static uint8_t ixl_bcast_addr[ETHER_ADDR_LEN] =
- {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
-
/*********************************************************************
* TXRX Function prototypes
*********************************************************************/
-int ixl_allocate_tx_data(struct ixl_queue *);
-int ixl_allocate_rx_data(struct ixl_queue *);
-void ixl_init_tx_ring(struct ixl_queue *);
-int ixl_init_rx_ring(struct ixl_queue *);
-bool ixl_rxeof(struct ixl_queue *, int);
-bool ixl_txeof(struct ixl_queue *);
-void ixl_free_que_tx(struct ixl_queue *);
-void ixl_free_que_rx(struct ixl_queue *);
-
int ixl_mq_start(struct ifnet *, struct mbuf *);
int ixl_mq_start_locked(struct ifnet *, struct tx_ring *);
void ixl_deferred_mq_start(void *, int);
void ixl_free_vsi(struct ixl_vsi *);
void ixl_qflush(struct ifnet *);
+/*********************************************************************
+ * Common Function prototypes
+ *********************************************************************/
+
+/*** IFLIB interface ***/
+//int ixl_if_media_change(if_ctx_t ctx);
+//static int ixl_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets);
+//static int ixl_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nqs, int nqsets);
+//void ixl_if_queues_free(if_ctx_t ctx);
+
/* Common function prototypes between PF/VF driver */
-#if __FreeBSD_version >= 1100000
-uint64_t ixl_get_counter(if_t ifp, ift_counter cnt);
+void ixl_init_tx_ring(struct ixl_vsi *vsi, struct ixl_tx_queue *que);
+void ixl_set_queue_rx_itr(struct ixl_rx_queue *que);
+#if 0
+#if __FreeBSD_version >= 1020000
+static uint64_t ixl_if_get_counter(if_ctx_t ctx, ift_counter cnt);
+#endif
#endif
-void ixl_get_default_rss_key(u32 *);
-#endif /* _IXL_H_ */
+void ixl_get_default_rss_key(u32 *);
+#endif /* _IFLIB_IXL_H_ */
Index: sys/dev/ixl/ixl_debug.h
===================================================================
--- /dev/null
+++ sys/dev/ixl/ixl_debug.h
@@ -0,0 +1,109 @@
+/******************************************************************************
+
+ Copyright (c) 2013-2016, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#ifndef _IXL_DEBUG_H_
+#define _IXL_DEBUG_H_
+
+#define MAC_FORMAT "%02x:%02x:%02x:%02x:%02x:%02x"
+#define MAC_FORMAT_ARGS(mac_addr) \
+ (mac_addr)[0], (mac_addr)[1], (mac_addr)[2], (mac_addr)[3], \
+ (mac_addr)[4], (mac_addr)[5]
+#define ON_OFF_STR(is_set) ((is_set) ? "On" : "Off")
+
+#ifdef IXL_DEBUG
+
+#define _DBG_PRINTF(S, ...) printf("%s: " S "\n", __func__, ##__VA_ARGS__)
+#define _DEV_DBG_PRINTF(dev, S, ...) device_printf(dev, "%s: " S "\n", __func__, ##__VA_ARGS__)
+#define _IF_DBG_PRINTF(ifp, S, ...) if_printf(ifp, "%s: " S "\n", __func__, ##__VA_ARGS__)
+
+/* Defines for printing generic debug information */
+#define DPRINTF(...) _DBG_PRINTF(__VA_ARGS__)
+#define DDPRINTF(...) _DEV_DBG_PRINTF(__VA_ARGS__)
+#define IDPRINTF(...) _IF_DBG_PRINTF(__VA_ARGS__)
+
+/* Defines for printing specific debug information */
+#define DEBUG_INIT 1
+#define DEBUG_IOCTL 1
+#define DEBUG_HW 1
+
+#define INIT_DEBUGOUT(...) if (DEBUG_INIT) _DBG_PRINTF(__VA_ARGS__)
+#define INIT_DBG_DEV(...) if (DEBUG_INIT) _DEV_DBG_PRINTF(__VA_ARGS__)
+#define INIT_DBG_IF(...) if (DEBUG_INIT) _IF_DBG_PRINTF(__VA_ARGS__)
+
+#define IOCTL_DEBUGOUT(...) if (DEBUG_IOCTL) _DBG_PRINTF(__VA_ARGS__)
+#define IOCTL_DBG_IF2(ifp, S, ...) if (DEBUG_IOCTL) \
+ if_printf(ifp, S "\n", ##__VA_ARGS__)
+#define IOCTL_DBG_IF(...) if (DEBUG_IOCTL) _IF_DBG_PRINTF(__VA_ARGS__)
+
+#define HW_DEBUGOUT(...) if (DEBUG_HW) _DBG_PRINTF(__VA_ARGS__)
+
+#else /* no IXL_DEBUG */
+#define DEBUG_INIT 0
+#define DEBUG_IOCTL 0
+#define DEBUG_HW 0
+
+#define DPRINTF(...)
+#define DDPRINTF(...)
+#define IDPRINTF(...)
+
+#define INIT_DEBUGOUT(...)
+#define INIT_DBG_DEV(...)
+#define INIT_DBG_IF(...)
+#define IOCTL_DEBUGOUT(...)
+#define IOCTL_DBG_IF2(...)
+#define IOCTL_DBG_IF(...)
+#define HW_DEBUGOUT(...)
+#endif /* IXL_DEBUG */
+
+enum ixl_dbg_mask {
+ IXL_DBG_INFO = 0x00000001,
+ IXL_DBG_EN_DIS = 0x00000002,
+ IXL_DBG_AQ = 0x00000004,
+ IXL_DBG_NVMUPD = 0x00000008,
+
+ IXL_DBG_IOCTL_KNOWN = 0x00000010,
+ IXL_DBG_IOCTL_UNKNOWN = 0x00000020,
+ IXL_DBG_IOCTL_ALL = 0x00000030,
+
+ I40E_DEBUG_RSS = 0x00000100,
+
+ IXL_DBG_IOV = 0x00001000,
+ IXL_DBG_IOV_VC = 0x00002000,
+
+ IXL_DBG_SWITCH_INFO = 0x00010000,
+
+ IXL_DBG_ALL = 0xFFFFFFFF
+};
+
+#endif /* _IXL_DEBUG_H_ */
Index: sys/dev/ixl/ixl_pf.h
===================================================================
--- sys/dev/ixl/ixl_pf.h
+++ sys/dev/ixl/ixl_pf.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2013-2015, Intel Corporation
+ Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -32,9 +32,8 @@
******************************************************************************/
/*$FreeBSD$*/
-
-#ifndef _IXL_PF_H_
-#define _IXL_PF_H_
+#ifndef _IFLIB_IXL_PF_H_
+#define _IFLIB_IXL_PF_H_
#include "ixl.h"
#include "ixl_pf_qmgr.h"
@@ -61,59 +60,34 @@
/* Physical controller structure */
struct ixl_pf {
+ // TODO: Do we want to keep this comment?
+ /* To make it interchangeable, put it first */
+ struct ixl_vsi vsi;
+
struct i40e_hw hw;
struct i40e_osdep osdep;
device_t dev;
- struct ixl_vsi vsi;
struct resource *pci_mem;
- struct resource *msix_mem;
-
- /*
- * Interrupt resources: this set is
- * either used for legacy, or for Link
- * when doing MSIX
- */
- void *tag;
- struct resource *res;
- struct callout timer;
int msix;
-#ifdef IXL_IW
- int iw_msix;
- bool iw_enabled;
-#endif
int if_flags;
+
+ u32 qbase;
+ u32 admvec;
int state;
- bool init_in_progress;
- u8 supported_speeds;
struct ixl_pf_qmgr qmgr;
struct ixl_pf_qtag qtag;
- /* Tunable values */
- bool enable_msix;
- int max_queues;
- int ringsz;
- bool enable_tx_fc_filter;
- int dynamic_rx_itr;
- int dynamic_tx_itr;
- int tx_itr;
- int rx_itr;
-
- struct mtx pf_mtx;
-
- u32 qbase;
- u32 admvec;
- struct task adminq;
- struct taskqueue *tq;
-
bool link_up;
u32 link_speed;
int advertised_speed;
int fc; /* link flow ctrl setting */
+
+ /* Debug levels */
enum ixl_dbg_mask dbg_mask;
- bool has_i2c;
+ int vc_debug_lvl;
/* Misc stats maintained by the driver */
u64 watchdog_events;
@@ -129,7 +103,16 @@
int num_vfs;
uint16_t veb_seid;
struct task vflr_task;
- int vc_debug_lvl;
+
+ /* Tunable values */
+ bool enable_msix;
+ int max_queues;
+ int ringsz;
+ bool enable_tx_fc_filter;
+ int dynamic_rx_itr;
+ int dynamic_tx_itr;
+ int tx_itr;
+ int rx_itr;
};
/*
@@ -152,10 +135,8 @@
"\t 0x2 - advertise 1G\n" \
"\t 0x4 - advertise 10G\n" \
"\t 0x8 - advertise 20G\n" \
-"\t0x10 - advertise 25G\n" \
-"\t0x20 - advertise 40G\n\n" \
-"Set to 0 to disable link.\n" \
-"Use \"sysctl -x\" to view flags properly."
+"\t0x10 - advertise 40G\n\n" \
+"Set to 0 to disable link."
#define IXL_SYSCTL_HELP_FC \
"\nSet flow control mode using the values below.\n" \
@@ -180,42 +161,22 @@
static MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations");
/*** Functions / Macros ***/
-/* Adjust the level here to 10 or over to print stats messages */
-#define I40E_VC_DEBUG(p, level, ...) \
- do { \
- if (level < 10) \
- ixl_dbg(p, IXL_DBG_IOV_VC, ##__VA_ARGS__); \
+#define I40E_VC_DEBUG(pf, level, ...) \
+ do { \
+ if ((pf)->vc_debug_lvl >= (level)) \
+ device_printf((pf)->dev, __VA_ARGS__); \
} while (0)
#define i40e_send_vf_nack(pf, vf, op, st) \
ixl_send_vf_nack_msg((pf), (vf), (op), (st), __FILE__, __LINE__)
-#define IXL_PF_LOCK_INIT(_sc, _name) \
- mtx_init(&(_sc)->pf_mtx, _name, "IXL PF Lock", MTX_DEF)
-#define IXL_PF_LOCK(_sc) mtx_lock(&(_sc)->pf_mtx)
-#define IXL_PF_UNLOCK(_sc) mtx_unlock(&(_sc)->pf_mtx)
-#define IXL_PF_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->pf_mtx)
-#define IXL_PF_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->pf_mtx, MA_OWNED)
-
-/* Debug printing */
-#define ixl_dbg(p, m, s, ...) ixl_debug_core(p, m, s, ##__VA_ARGS__)
-void ixl_debug_core(struct ixl_pf *, enum ixl_dbg_mask, char *, ...);
-
/* For stats sysctl naming */
#define QUEUE_NAME_LEN 32
-/* For netmap(4) compatibility */
-#define ixl_disable_intr(vsi) ixl_disable_rings_intr(vsi)
-
-/*
- * PF-only function declarations
- */
-
+/* PF-only function declarations */
void ixl_set_busmaster(device_t);
-void ixl_set_msix_enable(device_t);
int ixl_setup_interface(device_t, struct ixl_vsi *);
void ixl_print_nvm_cmd(device_t, struct i40e_nvm_access *);
-char * ixl_aq_speed_to_str(enum i40e_aq_link_speed);
void ixl_handle_que(void *context, int pending);
@@ -223,9 +184,17 @@
void ixl_local_timer(void *);
void ixl_register_vlan(void *, struct ifnet *, u16);
void ixl_unregister_vlan(void *, struct ifnet *, u16);
-void ixl_intr(void *);
-void ixl_msix_que(void *);
-void ixl_msix_adminq(void *);
+
+/* IFLIB interface shared */
+// TODO: Maybe make a public interface to these instead?
+void ixl_if_init(if_ctx_t ctx);
+void ixl_if_stop(if_ctx_t ctx);
+
+/* Interrupt handlers */
+int ixl_intr(void *);
+int ixl_msix_que(void *);
+int ixl_msix_adminq(void *);
+
void ixl_do_adminq(void *, int);
int ixl_res_alloc_cmp(const void *, const void *);
@@ -242,10 +211,13 @@
int ixl_media_change(struct ifnet *);
int ixl_ioctl(struct ifnet *, u_long, caddr_t);
+void ixl_enable_adminq(struct i40e_hw *);
+void ixl_get_bus_info(struct i40e_hw *, device_t);
+void ixl_disable_adminq(struct i40e_hw *);
void ixl_enable_queue(struct i40e_hw *, int);
void ixl_disable_queue(struct i40e_hw *, int);
-void ixl_enable_intr0(struct i40e_hw *);
-void ixl_disable_intr0(struct i40e_hw *);
+void ixl_enable_legacy(struct i40e_hw *);
+void ixl_disable_legacy(struct i40e_hw *);
void ixl_nvm_version_str(struct i40e_hw *hw, struct sbuf *buf);
void ixl_stat_update48(struct i40e_hw *, u32, u32, bool,
u64 *, u64 *);
@@ -255,9 +227,7 @@
void ixl_stop(struct ixl_pf *);
void ixl_add_vsi_sysctls(struct ixl_pf *pf, struct ixl_vsi *vsi, struct sysctl_ctx_list *ctx, const char *sysctl_name);
int ixl_get_hw_capabilities(struct ixl_pf *);
-void ixl_link_up_msg(struct ixl_pf *);
void ixl_update_link_status(struct ixl_pf *);
-int ixl_allocate_pci_resources(struct ixl_pf *);
int ixl_setup_stations(struct ixl_pf *);
int ixl_switch_config(struct ixl_pf *);
void ixl_stop_locked(struct ixl_pf *);
@@ -273,7 +243,7 @@
void ixl_configure_intr0_msix(struct ixl_pf *);
void ixl_configure_queue_intr_msix(struct ixl_pf *);
void ixl_free_adminq_tq(struct ixl_pf *);
-int ixl_setup_legacy(struct ixl_pf *);
+//int ixl_assign_vsi_legacy(struct ixl_pf *);
int ixl_init_msix(struct ixl_pf *);
void ixl_configure_itr(struct ixl_pf *);
void ixl_configure_legacy(struct ixl_pf *);
@@ -288,16 +258,14 @@
void ixl_add_hw_stats(struct ixl_pf *);
void ixl_update_stats_counters(struct ixl_pf *);
void ixl_pf_reset_stats(struct ixl_pf *);
-void ixl_get_bus_info(struct ixl_pf *pf);
-int ixl_aq_get_link_status(struct ixl_pf *,
- struct i40e_aqc_get_link_status *);
+void ixl_dbg(struct ixl_pf *, enum ixl_dbg_mask, char *, ...);
int ixl_handle_nvmupd_cmd(struct ixl_pf *, struct ifdrv *);
void ixl_handle_empr_reset(struct ixl_pf *);
int ixl_rebuild_hw_structs_after_reset(struct ixl_pf *);
-void ixl_set_queue_rx_itr(struct ixl_queue *);
-void ixl_set_queue_tx_itr(struct ixl_queue *);
+//void ixl_set_queue_rx_itr(struct ixl_rx_queue *);
+//void ixl_set_queue_tx_itr(struct ixl_queue *);
void ixl_add_filter(struct ixl_vsi *, u8 *, s16 vlan);
void ixl_del_filter(struct ixl_vsi *, u8 *, s16 vlan);
@@ -314,9 +282,10 @@
int ixl_enable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *, u16);
void ixl_update_eth_stats(struct ixl_vsi *);
+void ixl_disable_intr(struct ixl_vsi *);
void ixl_cap_txcsum_tso(struct ixl_vsi *, struct ifnet *, int);
int ixl_initialize_vsi(struct ixl_vsi *);
-void ixl_add_ifmedia(struct ixl_vsi *, u64);
+void ixl_add_ifmedia(struct ixl_vsi *, u32);
int ixl_setup_queue_msix(struct ixl_vsi *);
int ixl_setup_queue_tqs(struct ixl_vsi *);
int ixl_teardown_queue_msix(struct ixl_vsi *);
@@ -337,13 +306,4 @@
void ixl_update_vsi_stats(struct ixl_vsi *);
void ixl_vsi_reset_stats(struct ixl_vsi *);
-/*
- * I2C Function prototypes
- */
-int ixl_find_i2c_interface(struct ixl_pf *);
-s32 ixl_read_i2c_byte(struct ixl_pf *pf, u8 byte_offset,
- u8 dev_addr, u8 *data);
-s32 ixl_write_i2c_byte(struct ixl_pf *pf, u8 byte_offset,
- u8 dev_addr, u8 data);
-
-#endif /* _IXL_PF_H_ */
+#endif /* _IFLIB_IXL_PF_H */
Index: sys/dev/ixl/ixl_pf_iov.c
===================================================================
--- sys/dev/ixl/ixl_pf_iov.c
+++ sys/dev/ixl/ixl_pf_iov.c
@@ -157,7 +157,7 @@
htole16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
vsi_ctx.info.mapping_flags = htole16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
- /* ERJ: Only scattered allocation is supported for VFs right now */
+ /* XXX: Only scattered allocation is supported for VFs right now */
for (i = 0; i < vf->qtag.num_active; i++)
vsi_ctx.info.queue_mapping[i] = vf->qtag.qidx[i];
for (; i < nitems(vsi_ctx.info.queue_mapping); i++)
@@ -172,8 +172,10 @@
return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
vf->vsi.seid = vsi_ctx.seid;
vf->vsi.vsi_num = vsi_ctx.vsi_number;
- // vf->vsi.first_queue = vf->qtag.qidx[0];
- vf->vsi.num_queues = vf->qtag.num_active;
+ // TODO: How to deal with num tx queues / num rx queues split?
+ // I don't think just assigning this variable is going to work
+ vf->vsi.num_rx_queues = vf->qtag.num_active;
+ vf->vsi.num_tx_queues = vf->qtag.num_active;
code = i40e_aq_get_vsi_params(hw, &vsi_ctx, NULL);
if (code != I40E_SUCCESS)
@@ -204,7 +206,7 @@
vf->vsi.hw_filters_add = 0;
vf->vsi.hw_filters_del = 0;
- ixl_add_filter(&vf->vsi, ixl_bcast_addr, IXL_VLAN_ANY);
+ // ixl_add_filter(&vf->vsi, ixl_bcast_addr, IXL_VLAN_ANY);
ixl_reconfigure_filters(&vf->vsi);
return (0);
@@ -253,7 +255,7 @@
/* Program index of each VF queue into PF queue space
* (This is only needed if QTABLE is enabled) */
- for (i = 0; i < vf->vsi.num_queues; i++) {
+ for (i = 0; i < vf->vsi.num_tx_queues; i++) {
qtable = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, i) <<
I40E_VPLAN_QTABLE_QINDEX_SHIFT;
@@ -266,7 +268,7 @@
/* Map queues allocated to VF to its VSI;
* This mapping matches the VF-wide mapping since the VF
* is only given a single VSI */
- for (i = 0; i < vf->vsi.num_queues; i++)
+ for (i = 0; i < vf->vsi.num_tx_queues; i++)
ixl_vf_map_vsi_queue(hw, vf, i,
ixl_pf_qidx_from_vsi_qidx(&vf->qtag, i));
@@ -335,7 +337,8 @@
ixl_vf_unregister_intr(hw, vpint_reg);
}
- vf->vsi.num_queues = 0;
+ vf->vsi.num_tx_queues = 0;
+ vf->vsi.num_rx_queues = 0;
}
static int
@@ -533,13 +536,13 @@
I40E_VIRTCHNL_VF_OFFLOAD_VLAN);
reply.num_vsis = 1;
- reply.num_queue_pairs = vf->vsi.num_queues;
+ reply.num_queue_pairs = vf->vsi.num_tx_queues;
reply.max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
reply.rss_key_size = 52;
reply.rss_lut_size = 64;
reply.vsi_res[0].vsi_id = vf->vsi.vsi_num;
reply.vsi_res[0].vsi_type = I40E_VSI_SRIOV;
- reply.vsi_res[0].num_queue_pairs = vf->vsi.num_queues;
+ reply.vsi_res[0].num_queue_pairs = vf->vsi.num_tx_queues;
memcpy(reply.vsi_res[0].default_mac_addr, vf->mac, ETHER_ADDR_LEN);
ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
@@ -674,9 +677,9 @@
}
info = msg;
- if (info->num_queue_pairs == 0 || info->num_queue_pairs > vf->vsi.num_queues) {
+ if (info->num_queue_pairs == 0 || info->num_queue_pairs > vf->vsi.num_tx_queues) {
device_printf(pf->dev, "VF %d: invalid # of qpairs (msg has %d, VSI has %d)\n",
- vf->vf_num, info->num_queue_pairs, vf->vsi.num_queues);
+ vf->vf_num, info->num_queue_pairs, vf->vsi.num_tx_queues);
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
I40E_ERR_PARAM);
return;
@@ -705,7 +708,7 @@
if (pair->txq.vsi_id != vf->vsi.vsi_num ||
pair->rxq.vsi_id != vf->vsi.vsi_num ||
pair->txq.queue_id != pair->rxq.queue_id ||
- pair->txq.queue_id >= vf->vsi.num_queues) {
+ pair->txq.queue_id >= vf->vsi.num_tx_queues) {
i40e_send_vf_nack(pf, vf,
I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
@@ -854,7 +857,7 @@
if (vector->rxq_map != 0) {
largest_rxq = fls(vector->rxq_map) - 1;
- if (largest_rxq >= vf->vsi.num_queues) {
+ if (largest_rxq >= vf->vsi.num_rx_queues) {
i40e_send_vf_nack(pf, vf,
I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
I40E_ERR_PARAM);
@@ -864,7 +867,7 @@
if (vector->txq_map != 0) {
largest_txq = fls(vector->txq_map) - 1;
- if (largest_txq >= vf->vsi.num_queues) {
+ if (largest_txq >= vf->vsi.num_tx_queues) {
i40e_send_vf_nack(pf, vf,
I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
I40E_ERR_PARAM);
@@ -911,7 +914,7 @@
for (int i = 0; i < 32; i++) {
if ((1 << i) & select->tx_queues) {
/* Warn if queue is out of VF allocation range */
- if (i >= vf->vsi.num_queues) {
+ if (i >= vf->vsi.num_tx_queues) {
device_printf(pf->dev, "VF %d: TX ring %d is outside of VF VSI allocation!\n",
vf->vf_num, i);
break;
@@ -936,7 +939,7 @@
for (int i = 0; i < 32; i++) {
if ((1 << i) & select->rx_queues) {
/* Warn if queue is out of VF allocation range */
- if (i >= vf->vsi.num_queues) {
+ if (i >= vf->vsi.num_rx_queues) {
device_printf(pf->dev, "VF %d: RX ring %d is outside of VF VSI allocation!\n",
vf->vf_num, i);
break;
@@ -990,7 +993,7 @@
for (int i = 0; i < 32; i++) {
if ((1 << i) & select->tx_queues) {
/* Warn if queue is out of VF allocation range */
- if (i >= vf->vsi.num_queues) {
+ if (i >= vf->vsi.num_tx_queues) {
device_printf(pf->dev, "VF %d: TX ring %d is outside of VF VSI allocation!\n",
vf->vf_num, i);
break;
@@ -1016,7 +1019,7 @@
for (int i = 0; i < 32; i++) {
if ((1 << i) & select->rx_queues) {
/* Warn if queue is out of VF allocation range */
- if (i >= vf->vsi.num_queues) {
+ if (i >= vf->vsi.num_rx_queues) {
device_printf(pf->dev, "VF %d: RX ring %d is outside of VF VSI allocation!\n",
vf->vf_num, i);
break;
@@ -1058,6 +1061,8 @@
static bool
ixl_bcast_mac(const uint8_t *addr)
{
+ static uint8_t ixl_bcast_addr[ETHER_ADDR_LEN] =
+ {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
return (cmp_etheraddr(addr, ixl_bcast_addr));
}
@@ -1600,7 +1605,7 @@
pf = arg;
hw = &pf->hw;
- IXL_PF_LOCK(pf);
+ /* TODO: May need to lock this */
for (i = 0; i < pf->num_vfs; i++) {
global_vf_num = hw->func_caps.vf_base_id + i;
@@ -1623,8 +1628,6 @@
icr0 |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
wr32(hw, I40E_PFINT_ICR0_ENA, icr0);
ixl_flush(hw);
-
- IXL_PF_UNLOCK(pf);
}
static int
@@ -1694,7 +1697,7 @@
hw = &pf->hw;
pf_vsi = &pf->vsi;
- IXL_PF_LOCK(pf);
+ // TODO: IXL_PF_LOCK() was here
pf->vfs = malloc(sizeof(struct ixl_vf) * num_vfs, M_IXL, M_NOWAIT |
M_ZERO);
@@ -1716,13 +1719,13 @@
}
pf->num_vfs = num_vfs;
- IXL_PF_UNLOCK(pf);
+ // TODO: IXL_PF_UNLOCK() was here
return (0);
fail:
free(pf->vfs, M_IXL);
pf->vfs = NULL;
- IXL_PF_UNLOCK(pf);
+ // TODO: IXL_PF_UNLOCK() was here
return (error);
}
@@ -1741,7 +1744,7 @@
vsi = &pf->vsi;
ifp = vsi->ifp;
- IXL_PF_LOCK(pf);
+ // TODO: IXL_PF_LOCK() was here
for (i = 0; i < pf->num_vfs; i++) {
if (pf->vfs[i].vsi.seid != 0)
i40e_aq_delete_element(hw, pf->vfs[i].vsi.seid, NULL);
@@ -1761,7 +1764,7 @@
pf->vfs = NULL;
pf->num_vfs = 0;
- IXL_PF_UNLOCK(pf);
+ // TODO: IXL_PF_UNLOCK() was here
/* Do this after the unlock as sysctl_ctx_free might sleep. */
for (i = 0; i < num_vfs; i++)
@@ -1814,7 +1817,7 @@
pf = device_get_softc(dev);
vf = &pf->vfs[vfnum];
- IXL_PF_LOCK(pf);
+ // TODO: IXL_PF_LOCK() was here
vf->vf_num = vfnum;
vf->vsi.back = pf;
@@ -1854,7 +1857,7 @@
ixl_reset_vf(pf, vf);
out:
- IXL_PF_UNLOCK(pf);
+ // TODO: IXL_PF_UNLOCK() was here
if (error == 0) {
snprintf(sysctl_name, sizeof(sysctl_name), "vf%d", vfnum);
ixl_add_vsi_sysctls(pf, &vf->vsi, &vf->ctx, sysctl_name);
Index: sys/dev/ixl/ixl_pf_main.c
===================================================================
--- sys/dev/ixl/ixl_pf_main.c
+++ sys/dev/ixl/ixl_pf_main.c
@@ -33,27 +33,15 @@
/*$FreeBSD$*/
+#include "ixl.h"
#include "ixl_pf.h"
+#include "ifdi_if.h"
+#include <net/iflib.h>
#ifdef PCI_IOV
#include "ixl_pf_iov.h"
#endif
-#ifdef IXL_IW
-#include "ixl_iw.h"
-#include "ixl_iw_int.h"
-#endif
-
-#ifdef DEV_NETMAP
-#include <net/netmap.h>
-#include <sys/selinfo.h>
-#include <dev/netmap/netmap_kern.h>
-#endif /* DEV_NETMAP */
-
-static int ixl_setup_queue(struct ixl_queue *, struct ixl_pf *, int);
-static u64 ixl_max_aq_speed_to_value(u8);
-static u8 ixl_convert_sysctl_aq_link_speed(u8, bool);
-
/* Sysctls */
static int ixl_set_flowcntl(SYSCTL_HANDLER_ARGS);
static int ixl_set_advertise(SYSCTL_HANDLER_ARGS);
@@ -70,37 +58,18 @@
static int ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS);
static int ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS);
static int ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS);
-static int ixl_sysctl_hena(SYSCTL_HANDLER_ARGS);
static int ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS);
-static int ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS);
-static int ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS);
-static int ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS);
-static int ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS);
-static int ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS);
-static int ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS);
-static int ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS);
-static int ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS);
-#ifdef IXL_DEBUG
-static int ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS);
-static int ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS);
-#endif
-
-#ifdef IXL_IW
-extern int ixl_enable_iwarp;
-#endif
void
-ixl_debug_core(struct ixl_pf *pf, enum ixl_dbg_mask mask, char *fmt, ...)
+ixl_dbg(struct ixl_pf *pf, enum ixl_dbg_mask mask, char *fmt, ...)
{
va_list args;
if (!(mask & pf->dbg_mask))
return;
- /* Re-implement device_printf() */
- device_print_prettyname(pf->dev);
va_start(args, fmt);
- vprintf(fmt, args);
+ device_printf(pf->dev, fmt, args);
va_end(args);
}
@@ -140,17 +109,18 @@
sbuf_delete(sbuf);
}
+#if 0
static void
ixl_configure_tx_itr(struct ixl_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
struct ixl_vsi *vsi = &pf->vsi;
- struct ixl_queue *que = vsi->queues;
+ struct ixl_tx_queue *tx_que = vsi->tx_queues;
vsi->tx_itr_setting = pf->tx_itr;
- for (int i = 0; i < vsi->num_queues; i++, que++) {
- struct tx_ring *txr = &que->txr;
+ for (int i = 0; i < vsi->num_tx_queues; i++, tx_que++) {
+ struct tx_ring *txr = &tx_que->txr;
wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, i),
vsi->tx_itr_setting);
@@ -158,18 +128,19 @@
txr->latency = IXL_AVE_LATENCY;
}
}
+#endif
static void
ixl_configure_rx_itr(struct ixl_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
struct ixl_vsi *vsi = &pf->vsi;
- struct ixl_queue *que = vsi->queues;
+ struct ixl_rx_queue *rx_que = vsi->rx_queues;
vsi->rx_itr_setting = pf->rx_itr;
- for (int i = 0; i < vsi->num_queues; i++, que++) {
- struct rx_ring *rxr = &que->rxr;
+ for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++) {
+ struct rx_ring *rxr = &rx_que->rxr;
wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, i),
vsi->rx_itr_setting);
@@ -184,140 +155,10 @@
void
ixl_configure_itr(struct ixl_pf *pf)
{
- ixl_configure_tx_itr(pf);
+ // ixl_configure_tx_itr(pf);
ixl_configure_rx_itr(pf);
}
-
-/*********************************************************************
- * Init entry point
- *
- * This routine is used in two ways. It is used by the stack as
- * init entry point in network interface structure. It is also used
- * by the driver as a hw/sw initialization routine to get to a
- * consistent state.
- *
- * return 0 on success, positive on failure
- **********************************************************************/
-void
-ixl_init_locked(struct ixl_pf *pf)
-{
- struct i40e_hw *hw = &pf->hw;
- struct ixl_vsi *vsi = &pf->vsi;
- struct ifnet *ifp = vsi->ifp;
- device_t dev = pf->dev;
- struct i40e_filter_control_settings filter;
- u8 tmpaddr[ETHER_ADDR_LEN];
- int ret;
-
- INIT_DEBUGOUT("ixl_init_locked: begin");
- IXL_PF_LOCK_ASSERT(pf);
-
- ixl_stop_locked(pf);
-
- /*
- * If the aq is dead here, it probably means something outside of the driver
- * did something to the adapter, like a PF reset.
- * So rebuild the driver's state here if that occurs.
- */
- if (!i40e_check_asq_alive(&pf->hw)) {
- device_printf(dev, "Admin Queue is down; resetting...\n");
- ixl_teardown_hw_structs(pf);
- ixl_reset(pf);
- }
-
- /* Get the latest mac address... User might use a LAA */
- bcopy(IF_LLADDR(vsi->ifp), tmpaddr,
- I40E_ETH_LENGTH_OF_ADDRESS);
- if (!cmp_etheraddr(hw->mac.addr, tmpaddr) &&
- (i40e_validate_mac_addr(tmpaddr) == I40E_SUCCESS)) {
- ixl_del_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
- bcopy(tmpaddr, hw->mac.addr,
- I40E_ETH_LENGTH_OF_ADDRESS);
- ret = i40e_aq_mac_address_write(hw,
- I40E_AQC_WRITE_TYPE_LAA_ONLY,
- hw->mac.addr, NULL);
- if (ret) {
- device_printf(dev, "LLA address"
- "change failed!!\n");
- return;
- }
- }
-
- ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
-
- /* Set the various hardware offload abilities */
- ifp->if_hwassist = 0;
- if (ifp->if_capenable & IFCAP_TSO)
- ifp->if_hwassist |= CSUM_TSO;
- if (ifp->if_capenable & IFCAP_TXCSUM)
- ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
- if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
- ifp->if_hwassist |= (CSUM_TCP_IPV6 | CSUM_UDP_IPV6);
-
- /* Set up the device filtering */
- bzero(&filter, sizeof(filter));
- filter.enable_ethtype = TRUE;
- filter.enable_macvlan = TRUE;
- filter.enable_fdir = FALSE;
- filter.hash_lut_size = I40E_HASH_LUT_SIZE_512;
- if (i40e_set_filter_control(hw, &filter))
- device_printf(dev, "i40e_set_filter_control() failed\n");
-
- /* Prepare the VSI: rings, hmc contexts, etc... */
- if (ixl_initialize_vsi(vsi)) {
- device_printf(dev, "initialize vsi failed!!\n");
- return;
- }
-
- /* Set up RSS */
- ixl_config_rss(pf);
-
- /* Add protocol filters to list */
- ixl_init_filters(vsi);
-
- /* Setup vlan's if needed */
- ixl_setup_vlan_filters(vsi);
-
- /* Set up MSI/X routing and the ITR settings */
- if (pf->msix > 1) {
- ixl_configure_queue_intr_msix(pf);
- ixl_configure_itr(pf);
- } else
- ixl_configure_legacy(pf);
-
- ixl_enable_rings(vsi);
-
- i40e_aq_set_default_vsi(hw, vsi->seid, NULL);
-
- ixl_reconfigure_filters(vsi);
-
- /* And now turn on interrupts */
- ixl_enable_intr(vsi);
-
- /* Get link info */
- hw->phy.get_link_info = TRUE;
- i40e_get_link_status(hw, &pf->link_up);
- ixl_update_link_status(pf);
-
- /* Start the local timer */
- callout_reset(&pf->timer, hz, ixl_local_timer, pf);
-
- /* Now inform the stack we're ready */
- ifp->if_drv_flags |= IFF_DRV_RUNNING;
-
-#ifdef IXL_IW
- if (ixl_enable_iwarp && pf->iw_enabled) {
- ret = ixl_iw_pf_init(pf);
- if (ret)
- device_printf(dev,
- "initialize iwarp failed, code %d\n", ret);
- }
-#endif
-
-}
-
-
/*********************************************************************
*
* Get the hardware capabilities
@@ -381,18 +222,13 @@
(hw->func_caps.mdio_port_mode == 1) ? "MDIO dedicated" :
"MDIO shared");
- struct i40e_osdep *osdep = (struct i40e_osdep *)hw->back;
- osdep->i2c_intfc_num = ixl_find_i2c_interface(pf);
- if (osdep->i2c_intfc_num != -1)
- pf->has_i2c = true;
-
return (error);
}
void
ixl_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask)
{
- device_t dev = vsi->dev;
+ device_t dev = iflib_get_dev(vsi->ctx);
/* Enable/disable TXCSUM/TSO4 */
if (!(ifp->if_capenable & IFCAP_TXCSUM)
@@ -480,8 +316,16 @@
return;
}
- pf->advertised_speed =
- ixl_convert_sysctl_aq_link_speed(abilities.link_speed, false);
+ if (abilities.link_speed & I40E_LINK_SPEED_40GB)
+ pf->advertised_speed |= 0x10;
+ if (abilities.link_speed & I40E_LINK_SPEED_20GB)
+ pf->advertised_speed |= 0x8;
+ if (abilities.link_speed & I40E_LINK_SPEED_10GB)
+ pf->advertised_speed |= 0x4;
+ if (abilities.link_speed & I40E_LINK_SPEED_1GB)
+ pf->advertised_speed |= 0x2;
+ if (abilities.link_speed & I40E_LINK_SPEED_100MB)
+ pf->advertised_speed |= 0x1;
}
int
@@ -608,131 +452,70 @@
return (error);
}
-/*
-** MSIX Interrupt Handlers and Tasklets
-*/
-void
-ixl_handle_que(void *context, int pending)
-{
- struct ixl_queue *que = context;
- struct ixl_vsi *vsi = que->vsi;
- struct i40e_hw *hw = vsi->hw;
- struct tx_ring *txr = &que->txr;
- struct ifnet *ifp = vsi->ifp;
- bool more;
-
- if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
- more = ixl_rxeof(que, IXL_RX_LIMIT);
- IXL_TX_LOCK(txr);
- ixl_txeof(que);
- if (!drbr_empty(ifp, txr->br))
- ixl_mq_start_locked(ifp, txr);
- IXL_TX_UNLOCK(txr);
- if (more) {
- taskqueue_enqueue(que->tq, &que->task);
- return;
- }
- }
-
- /* Reenable this interrupt - hmmm */
- ixl_enable_queue(hw, que->me);
- return;
-}
-
-
-/*********************************************************************
- *
- * Legacy Interrupt Service routine
- *
- **********************************************************************/
-void
+int
ixl_intr(void *arg)
{
struct ixl_pf *pf = arg;
struct i40e_hw *hw = &pf->hw;
struct ixl_vsi *vsi = &pf->vsi;
- struct ixl_queue *que = vsi->queues;
- struct ifnet *ifp = vsi->ifp;
- struct tx_ring *txr = &que->txr;
- u32 icr0;
- bool more_tx, more_rx;
+ struct ixl_rx_queue *que = vsi->rx_queues;
+ u32 reg, icr0, mask;
- pf->admin_irq++;
+ device_printf(iflib_get_dev(vsi->ctx), "%s: begin\n", __func__);
- /* Protect against spurious interrupts */
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
- return;
+ ++que->irqs;
icr0 = rd32(hw, I40E_PFINT_ICR0);
+ reg = rd32(hw, I40E_PFINT_DYN_CTL0);
+ reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
+ wr32(hw, I40E_PFINT_DYN_CTL0, reg);
+
+ mask = rd32(hw, I40E_PFINT_ICR0_ENA);
#ifdef PCI_IOV
if (icr0 & I40E_PFINT_ICR0_VFLR_MASK)
- taskqueue_enqueue(pf->tq, &pf->vflr_task);
+ iflib_iov_intr_deferred(vsi->ctx);
#endif
if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
- taskqueue_enqueue(pf->tq, &pf->adminq);
+ iflib_admin_intr_deferred(vsi->ctx);
+ device_printf(iflib_get_dev(vsi->ctx), "%s: end (adminq)\n", __func__);
+ return (FILTER_HANDLED);
}
- if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
- ++que->irqs;
+ /* re-enable other interrupt causes */
+ wr32(hw, I40E_PFINT_ICR0_ENA, mask);
- more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
+ /* And now the queues */
+ reg = rd32(hw, I40E_QINT_RQCTL(0));
+ reg |= I40E_QINT_RQCTL_CAUSE_ENA_MASK;
+ wr32(hw, I40E_QINT_RQCTL(0), reg);
- IXL_TX_LOCK(txr);
- more_tx = ixl_txeof(que);
- if (!drbr_empty(vsi->ifp, txr->br))
- more_tx = 1;
- IXL_TX_UNLOCK(txr);
- }
+ reg = rd32(hw, I40E_QINT_TQCTL(0));
+ reg |= I40E_QINT_TQCTL_CAUSE_ENA_MASK;
+ reg &= ~I40E_PFINT_ICR0_INTEVENT_MASK;
+ wr32(hw, I40E_QINT_TQCTL(0), reg);
- ixl_enable_intr0(hw);
+ device_printf(iflib_get_dev(vsi->ctx), "%s: end (regular)\n", __func__);
+ return (FILTER_SCHEDULE_THREAD);
}
-
/*********************************************************************
*
* MSIX VSI Interrupt Service routine
*
**********************************************************************/
-void
+int
ixl_msix_que(void *arg)
{
- struct ixl_queue *que = arg;
- struct ixl_vsi *vsi = que->vsi;
- struct i40e_hw *hw = vsi->hw;
- struct tx_ring *txr = &que->txr;
- bool more_tx, more_rx;
-
- /* Protect against spurious interrupts */
- if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING))
- return;
-
- ++que->irqs;
-
- more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
-
- IXL_TX_LOCK(txr);
- more_tx = ixl_txeof(que);
- /*
- ** Make certain that if the stack
- ** has anything queued the task gets
- ** scheduled to handle it.
- */
- if (!drbr_empty(vsi->ifp, txr->br))
- more_tx = 1;
- IXL_TX_UNLOCK(txr);
+ struct ixl_rx_queue *que = arg;
ixl_set_queue_rx_itr(que);
+#ifdef notyet
ixl_set_queue_tx_itr(que);
-
- if (more_tx || more_rx)
- taskqueue_enqueue(que->tq, &que->task);
- else
- ixl_enable_queue(hw, que->me);
-
- return;
+#endif
+ return (FILTER_SCHEDULE_THREAD);
}
@@ -741,7 +524,7 @@
* MSIX Admin Queue Interrupt Service routine
*
**********************************************************************/
-void
+int
ixl_msix_adminq(void *arg)
{
struct ixl_pf *pf = arg;
@@ -750,6 +533,8 @@
u32 reg, mask, rstat_reg;
bool do_task = FALSE;
+ device_printf(dev, "%s: begin\n", __func__);
+
++pf->admin_irq;
reg = rd32(hw, I40E_PFINT_ICR0);
@@ -814,49 +599,16 @@
#ifdef PCI_IOV
if (reg & I40E_PFINT_ICR0_VFLR_MASK) {
mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
- taskqueue_enqueue(pf->tq, &pf->vflr_task);
+ iflib_iov_intr_deferred(pf->vsi.ctx);
}
#endif
if (do_task)
- taskqueue_enqueue(pf->tq, &pf->adminq);
+ iflib_admin_intr_deferred(pf->vsi.ctx);
else
- ixl_enable_intr0(hw);
-}
-
-void
-ixl_set_promisc(struct ixl_vsi *vsi)
-{
- struct ifnet *ifp = vsi->ifp;
- struct i40e_hw *hw = vsi->hw;
- int err, mcnt = 0;
- bool uni = FALSE, multi = FALSE;
-
- if (ifp->if_flags & IFF_ALLMULTI)
- multi = TRUE;
- else { /* Need to count the multicast addresses */
- struct ifmultiaddr *ifma;
- if_maddr_rlock(ifp);
- TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
- if (ifma->ifma_addr->sa_family != AF_LINK)
- continue;
- if (mcnt == MAX_MULTICAST_ADDR)
- break;
- mcnt++;
- }
- if_maddr_runlock(ifp);
- }
+ ixl_enable_adminq(hw);
- if (mcnt >= MAX_MULTICAST_ADDR)
- multi = TRUE;
- if (ifp->if_flags & IFF_PROMISC)
- uni = TRUE;
-
- err = i40e_aq_set_vsi_unicast_promiscuous(hw,
- vsi->seid, uni, NULL, TRUE);
- err = i40e_aq_set_vsi_multicast_promiscuous(hw,
- vsi->seid, multi, NULL);
- return;
+ return (FILTER_HANDLED);
}
/*********************************************************************
@@ -951,909 +703,205 @@
ixl_del_hw_filters(vsi, mcnt);
}
-
-/*********************************************************************
- * Timer routine
- *
- * This routine checks for link status,updates statistics,
- * and runs the watchdog check.
- *
- * Only runs when the driver is configured UP and RUNNING.
- *
- **********************************************************************/
-
+/*
+ * Configure admin queue/misc interrupt cause registers in hardware.
+ */
void
-ixl_local_timer(void *arg)
+ixl_configure_intr0_msix(struct ixl_pf *pf)
{
- struct ixl_pf *pf = arg;
- struct i40e_hw *hw = &pf->hw;
- struct ixl_vsi *vsi = &pf->vsi;
- struct ixl_queue *que = vsi->queues;
- device_t dev = pf->dev;
- struct tx_ring *txr;
- int hung = 0;
- u32 mask;
- s32 timer, new_timer;
-
- IXL_PF_LOCK_ASSERT(pf);
-
- /* Fire off the adminq task */
- taskqueue_enqueue(pf->tq, &pf->adminq);
+ struct i40e_hw *hw = &pf->hw;
+ u32 reg;
- /* Update stats */
- ixl_update_stats_counters(pf);
+ /* First set up the adminq - vector 0 */
+ wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */
+ rd32(hw, I40E_PFINT_ICR0); /* read to clear */
- /* Check status of the queues */
- mask = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
- I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
- I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
-
- for (int i = 0; i < vsi->num_queues; i++, que++) {
- txr = &que->txr;
- timer = atomic_load_acq_32(&txr->watchdog_timer);
- if (timer > 0) {
- new_timer = timer - hz;
- if (new_timer <= 0) {
- atomic_store_rel_32(&txr->watchdog_timer, -1);
- device_printf(dev, "WARNING: queue %d "
- "appears to be hung!\n", que->me);
- ++hung;
- } else {
- /*
- * If this fails, that means something in the TX path has updated
- * the watchdog, so it means the TX path is still working and
- * the watchdog doesn't need to countdown.
- */
- atomic_cmpset_rel_32(&txr->watchdog_timer, timer, new_timer);
- /* Any queues with outstanding work get a sw irq */
- wr32(hw, I40E_PFINT_DYN_CTLN(que->me), mask);
- }
- }
- }
- /* Reset when a queue shows hung */
- if (hung)
- goto hung;
+ reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
+ I40E_PFINT_ICR0_ENA_GRST_MASK |
+ I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
+ I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
+ I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
+ I40E_PFINT_ICR0_ENA_VFLR_MASK |
+ I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK;
+ wr32(hw, I40E_PFINT_ICR0_ENA, reg);
- callout_reset(&pf->timer, hz, ixl_local_timer, pf);
- return;
+ /*
+ * 0x7FF is the end of the queue list.
+ * This means we won't use MSI-X vector 0 for a queue interrupt
+ * in MSIX mode.
+ */
+ wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
+ /* Value is in 2 usec units, so 0x3E is 62*2 = 124 usecs. */
+ wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x3E);
-hung:
- device_printf(dev, "WARNING: Resetting!\n");
- pf->watchdog_events++;
- ixl_init_locked(pf);
-}
+ wr32(hw, I40E_PFINT_DYN_CTL0,
+ I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
+ I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
-void
-ixl_link_up_msg(struct ixl_pf *pf)
-{
- struct i40e_hw *hw = &pf->hw;
- struct ifnet *ifp = pf->vsi.ifp;
-
- log(LOG_NOTICE, "%s: Link is up, %s Full Duplex, FEC: %s, Autoneg: %s, Flow Control: %s\n",
- ifp->if_xname,
- ixl_aq_speed_to_str(hw->phy.link_info.link_speed),
- (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA) ?
- "Clause 74 BASE-R FEC" : (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_RS_ENA) ?
- "Clause 108 RS-FEC" : "None",
- (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) ? "True" : "False",
- (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX &&
- hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ?
- ixl_fc_string[3] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) ?
- ixl_fc_string[2] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ?
- ixl_fc_string[1] : ixl_fc_string[0]);
+ wr32(hw, I40E_PFINT_STAT_CTL0, 0);
}
/*
-** Note: this routine updates the OS on the link state
-** the real check of the hardware only happens with
-** a link interrupt.
-*/
+ * Configure queue interrupt cause registers in hardware.
+ *
+ * Only generate interrupts from RX queue causes.
+ */
void
-ixl_update_link_status(struct ixl_pf *pf)
+ixl_configure_queue_intr_msix(struct ixl_pf *pf)
{
- struct ixl_vsi *vsi = &pf->vsi;
- struct ifnet *ifp = vsi->ifp;
- device_t dev = pf->dev;
+ struct i40e_hw *hw = &pf->hw;
+ struct ixl_vsi *vsi = &pf->vsi;
+ u32 reg;
+ u16 vector = 1;
- if (pf->link_up) {
- if (vsi->link_active == FALSE) {
- vsi->link_active = TRUE;
- ifp->if_baudrate = ixl_max_aq_speed_to_value(pf->link_speed);
- if_link_state_change(ifp, LINK_STATE_UP);
- ixl_link_up_msg(pf);
- }
- } else { /* Link down */
- if (vsi->link_active == TRUE) {
- if (bootverbose)
- device_printf(dev, "Link is Down\n");
- if_link_state_change(ifp, LINK_STATE_DOWN);
- vsi->link_active = FALSE;
- }
- }
+ for (int i = 0; i < vsi->num_rx_queues; i++, vector++) {
+ wr32(hw, I40E_PFINT_DYN_CTLN(i), 0);
+ /* First queue type is RX / 0 */
+ wr32(hw, I40E_PFINT_LNKLSTN(i), i);
- return;
+ reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
+ (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
+ (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
+ (IXL_QUEUE_EOL << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
+ (I40E_QUEUE_TYPE_UNKNOWN << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
+ wr32(hw, I40E_QINT_RQCTL(i), reg);
+ }
}
-/*********************************************************************
- *
- * This routine disables all traffic on the adapter by issuing a
- * global reset on the MAC and deallocates TX/RX buffers.
- *
- **********************************************************************/
-
+/*
+ * Configure for single interrupt vector operation
+ */
void
-ixl_stop_locked(struct ixl_pf *pf)
+ixl_configure_legacy(struct ixl_pf *pf)
{
- struct ixl_vsi *vsi = &pf->vsi;
- struct ifnet *ifp = vsi->ifp;
+ struct i40e_hw *hw = &pf->hw;
+ u32 reg;
- INIT_DEBUGOUT("ixl_stop: begin\n");
+ wr32(hw, I40E_PFINT_ITR0(0), 0);
+ wr32(hw, I40E_PFINT_ITR0(1), 0);
- IXL_PF_LOCK_ASSERT(pf);
+ /* Setup "other" causes */
+ reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK
+ | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
+ | I40E_PFINT_ICR0_ENA_GRST_MASK
+ | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK
+ | I40E_PFINT_ICR0_ENA_GPIO_MASK
+ | I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK
+ | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK
+ | I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
+ | I40E_PFINT_ICR0_ENA_VFLR_MASK
+ | I40E_PFINT_ICR0_ENA_ADMINQ_MASK
+ ;
+ wr32(hw, I40E_PFINT_ICR0_ENA, reg);
-#ifdef IXL_IW
- /* Stop iWARP device */
- if (ixl_enable_iwarp && pf->iw_enabled)
- ixl_iw_pf_stop(pf);
-#endif
+ /* SW_ITR_IDX = 0, but don't change INTENA */
+ wr32(hw, I40E_PFINT_DYN_CTL0,
+ I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK |
+ I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK);
+ /* SW_ITR_IDX = 0, OTHER_ITR_IDX = 0 */
+ wr32(hw, I40E_PFINT_STAT_CTL0, 0);
- /* Stop the local timer */
- callout_stop(&pf->timer);
+ /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
+ wr32(hw, I40E_PFINT_LNKLST0, 0);
- ixl_disable_rings_intr(vsi);
- ixl_disable_rings(vsi);
+ /* Associate the queue pair to the vector and enable the q int */
+ reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK
+ | (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
+ | (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
+ wr32(hw, I40E_QINT_RQCTL(0), reg);
- /* Tell the stack that the interface is no longer active */
- ifp->if_drv_flags &= ~(IFF_DRV_RUNNING);
+#if 0
+ reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK
+ | (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
+ | (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
+ wr32(hw, I40E_QINT_TQCTL(0), reg);
+#endif
}
void
-ixl_stop(struct ixl_pf *pf)
+ixl_free_pci_resources(struct ixl_pf *pf)
{
- IXL_PF_LOCK(pf);
- ixl_stop_locked(pf);
- IXL_PF_UNLOCK(pf);
-}
-
-/*********************************************************************
- *
- * Setup MSIX Interrupt resources and handlers for the VSI
- *
- **********************************************************************/
-int
-ixl_setup_legacy(struct ixl_pf *pf)
-{
- device_t dev = pf->dev;
- int error, rid = 0;
-
- if (pf->msix == 1)
- rid = 1;
- pf->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
- &rid, RF_SHAREABLE | RF_ACTIVE);
- if (pf->res == NULL) {
- device_printf(dev, "bus_alloc_resource_any() for"
- " legacy/msi interrupt\n");
- return (ENXIO);
- }
-
- /* Set the handler function */
- error = bus_setup_intr(dev, pf->res,
- INTR_TYPE_NET | INTR_MPSAFE, NULL,
- ixl_intr, pf, &pf->tag);
- if (error) {
- pf->res = NULL;
- device_printf(dev, "bus_setup_intr() for legacy/msi"
- " interrupt handler failed, error %d\n", error);
- return (ENXIO);
- }
- error = bus_describe_intr(dev, pf->res, pf->tag, "irq");
- if (error) {
- /* non-fatal */
- device_printf(dev, "bus_describe_intr() for Admin Queue"
- " interrupt name failed, error %d\n", error);
- }
-
- return (0);
-}
-
-int
-ixl_setup_adminq_tq(struct ixl_pf *pf)
-{
- device_t dev = pf->dev;
- int error = 0;
-
- /* Tasklet for Admin Queue interrupts */
- TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
-#ifdef PCI_IOV
- /* VFLR Tasklet */
- TASK_INIT(&pf->vflr_task, 0, ixl_handle_vflr, pf);
-#endif
- /* Create and start Admin Queue taskqueue */
- pf->tq = taskqueue_create_fast("ixl_aq", M_NOWAIT,
- taskqueue_thread_enqueue, &pf->tq);
- if (!pf->tq) {
- device_printf(dev, "taskqueue_create_fast (for AQ) returned NULL!\n");
- return (ENOMEM);
- }
- error = taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s aq",
- device_get_nameunit(dev));
- if (error) {
- device_printf(dev, "taskqueue_start_threads (for AQ) error: %d\n",
- error);
- taskqueue_free(pf->tq);
- return (error);
- }
- return (0);
-}
-
-int
-ixl_setup_queue_tqs(struct ixl_vsi *vsi)
-{
- struct ixl_queue *que = vsi->queues;
- device_t dev = vsi->dev;
-#ifdef RSS
- int cpu_id = 0;
- cpuset_t cpu_mask;
-#endif
-
- /* Create queue tasks and start queue taskqueues */
- for (int i = 0; i < vsi->num_queues; i++, que++) {
- TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
- TASK_INIT(&que->task, 0, ixl_handle_que, que);
- que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
- taskqueue_thread_enqueue, &que->tq);
-#ifdef RSS
- CPU_SETOF(cpu_id, &cpu_mask);
- taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
- &cpu_mask, "%s (bucket %d)",
- device_get_nameunit(dev), cpu_id);
-#else
- taskqueue_start_threads(&que->tq, 1, PI_NET,
- "%s (que %d)", device_get_nameunit(dev), que->me);
-#endif
- }
-
- return (0);
-}
-
-void
-ixl_free_adminq_tq(struct ixl_pf *pf)
-{
- if (pf->tq) {
- taskqueue_free(pf->tq);
- pf->tq = NULL;
- }
-}
-
-void
-ixl_free_queue_tqs(struct ixl_vsi *vsi)
-{
- struct ixl_queue *que = vsi->queues;
-
- for (int i = 0; i < vsi->num_queues; i++, que++) {
- if (que->tq) {
- taskqueue_free(que->tq);
- que->tq = NULL;
- }
- }
-}
-
-int
-ixl_setup_adminq_msix(struct ixl_pf *pf)
-{
- device_t dev = pf->dev;
- int rid, error = 0;
-
- /* Admin IRQ rid is 1, vector is 0 */
- rid = 1;
- /* Get interrupt resource from bus */
- pf->res = bus_alloc_resource_any(dev,
- SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
- if (!pf->res) {
- device_printf(dev, "bus_alloc_resource_any() for Admin Queue"
- " interrupt failed [rid=%d]\n", rid);
- return (ENXIO);
- }
- /* Then associate interrupt with handler */
- error = bus_setup_intr(dev, pf->res,
- INTR_TYPE_NET | INTR_MPSAFE, NULL,
- ixl_msix_adminq, pf, &pf->tag);
- if (error) {
- pf->res = NULL;
- device_printf(dev, "bus_setup_intr() for Admin Queue"
- " interrupt handler failed, error %d\n", error);
- return (ENXIO);
- }
- error = bus_describe_intr(dev, pf->res, pf->tag, "aq");
- if (error) {
- /* non-fatal */
- device_printf(dev, "bus_describe_intr() for Admin Queue"
- " interrupt name failed, error %d\n", error);
- }
- pf->admvec = 0;
-
- return (0);
-}
-
-/*
- * Allocate interrupt resources from bus and associate an interrupt handler
- * to those for the VSI's queues.
- */
-int
-ixl_setup_queue_msix(struct ixl_vsi *vsi)
-{
- device_t dev = vsi->dev;
- struct ixl_queue *que = vsi->queues;
- struct tx_ring *txr;
- int error, rid, vector = 1;
-
- /* Queue interrupt vector numbers start at 1 (adminq intr is 0) */
- for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
- int cpu_id = i;
- rid = vector + 1;
- txr = &que->txr;
- que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
- RF_SHAREABLE | RF_ACTIVE);
- if (!que->res) {
- device_printf(dev, "bus_alloc_resource_any() for"
- " Queue %d interrupt failed [rid=%d]\n",
- que->me, rid);
- return (ENXIO);
- }
- /* Set the handler function */
- error = bus_setup_intr(dev, que->res,
- INTR_TYPE_NET | INTR_MPSAFE, NULL,
- ixl_msix_que, que, &que->tag);
- if (error) {
- device_printf(dev, "bus_setup_intr() for Queue %d"
- " interrupt handler failed, error %d\n",
- que->me, error);
- bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
- return (error);
- }
- error = bus_describe_intr(dev, que->res, que->tag, "q%d", i);
- if (error) {
- device_printf(dev, "bus_describe_intr() for Queue %d"
- " interrupt name failed, error %d\n",
- que->me, error);
- }
- /* Bind the vector to a CPU */
-#ifdef RSS
- cpu_id = rss_getcpu(i % rss_getnumbuckets());
-#endif
- error = bus_bind_intr(dev, que->res, cpu_id);
- if (error) {
- device_printf(dev, "bus_bind_intr() for Queue %d"
- " to CPU %d failed, error %d\n",
- que->me, cpu_id, error);
- }
- que->msix = vector;
- }
-
- return (0);
-}
-
-/*
- * When used in a virtualized environment PCI BUSMASTER capability may not be set
- * so explicity set it here and rewrite the ENABLE in the MSIX control register
- * at this point to cause the host to successfully initialize us.
- */
-void
-ixl_set_busmaster(device_t dev)
-{
- u16 pci_cmd_word;
-
- pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
- pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
- pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
-}
-
-/*
- * rewrite the ENABLE in the MSIX control register
- * to cause the host to successfully initialize us.
- */
-void
-ixl_set_msix_enable(device_t dev)
-{
- int msix_ctrl, rid;
-
- pci_find_cap(dev, PCIY_MSIX, &rid);
- rid += PCIR_MSIX_CTRL;
- msix_ctrl = pci_read_config(dev, rid, 2);
- msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
- pci_write_config(dev, rid, msix_ctrl, 2);
-}
-
-/*
- * Allocate MSI/X vectors from the OS.
- * Returns 0 for legacy, 1 for MSI, >1 for MSIX.
- */
-int
-ixl_init_msix(struct ixl_pf *pf)
-{
- device_t dev = pf->dev;
- struct i40e_hw *hw = &pf->hw;
- int auto_max_queues;
- int rid, want, vectors, queues, available;
-#ifdef IXL_IW
- int iw_want, iw_vectors;
-
- pf->iw_msix = 0;
-#endif
-
- /* Override by tuneable */
- if (!pf->enable_msix)
- goto no_msix;
-
- /* Ensure proper operation in virtualized environment */
- ixl_set_busmaster(dev);
-
- /* First try MSI/X */
- rid = PCIR_BAR(IXL_MSIX_BAR);
- pf->msix_mem = bus_alloc_resource_any(dev,
- SYS_RES_MEMORY, &rid, RF_ACTIVE);
- if (!pf->msix_mem) {
- /* May not be enabled */
- device_printf(pf->dev,
- "Unable to map MSIX table\n");
- goto no_msix;
- }
-
- available = pci_msix_count(dev);
- if (available < 2) {
- /* system has msix disabled (0), or only one vector (1) */
- bus_release_resource(dev, SYS_RES_MEMORY,
- rid, pf->msix_mem);
- pf->msix_mem = NULL;
- goto no_msix;
- }
-
- /* Clamp max number of queues based on:
- * - # of MSI-X vectors available
- * - # of cpus available
- * - # of queues that can be assigned to the LAN VSI
- */
- auto_max_queues = min(mp_ncpus, available - 1);
- if (hw->mac.type == I40E_MAC_X722)
- auto_max_queues = min(auto_max_queues, 128);
- else
- auto_max_queues = min(auto_max_queues, 64);
-
- /* Override with tunable value if tunable is less than autoconfig count */
- if ((pf->max_queues != 0) && (pf->max_queues <= auto_max_queues))
- queues = pf->max_queues;
- /* Use autoconfig amount if that's lower */
- else if ((pf->max_queues != 0) && (pf->max_queues > auto_max_queues)) {
- device_printf(dev, "ixl_max_queues (%d) is too large, using "
- "autoconfig amount (%d)...\n",
- pf->max_queues, auto_max_queues);
- queues = auto_max_queues;
- }
- /* Limit maximum auto-configured queues to 8 if no user value is set */
- else
- queues = min(auto_max_queues, 8);
-
-#ifdef RSS
- /* If we're doing RSS, clamp at the number of RSS buckets */
- if (queues > rss_getnumbuckets())
- queues = rss_getnumbuckets();
-#endif
-
- /*
- ** Want one vector (RX/TX pair) per queue
- ** plus an additional for the admin queue.
- */
- want = queues + 1;
- if (want <= available) /* Have enough */
- vectors = want;
- else {
- device_printf(pf->dev,
- "MSIX Configuration Problem, "
- "%d vectors available but %d wanted!\n",
- available, want);
- pf->msix_mem = NULL;
- goto no_msix; /* Will go to Legacy setup */
- }
-
-#ifdef IXL_IW
- if (ixl_enable_iwarp) {
- /* iWARP wants additional vector for CQP */
- iw_want = mp_ncpus + 1;
- available -= vectors;
- if (available > 0) {
- iw_vectors = (available >= iw_want) ?
- iw_want : available;
- vectors += iw_vectors;
- } else
- iw_vectors = 0;
- }
-#endif
-
- ixl_set_msix_enable(dev);
- if (pci_alloc_msix(dev, &vectors) == 0) {
- device_printf(pf->dev,
- "Using MSIX interrupts with %d vectors\n", vectors);
- pf->msix = vectors;
-#ifdef IXL_IW
- if (ixl_enable_iwarp)
- pf->iw_msix = iw_vectors;
-#endif
-
- pf->vsi.num_queues = queues;
-#ifdef RSS
- /*
- * If we're doing RSS, the number of queues needs to
- * match the number of RSS buckets that are configured.
- *
- * + If there's more queues than RSS buckets, we'll end
- * up with queues that get no traffic.
- *
- * + If there's more RSS buckets than queues, we'll end
- * up having multiple RSS buckets map to the same queue,
- * so there'll be some contention.
- */
- if (queues != rss_getnumbuckets()) {
- device_printf(dev,
- "%s: queues (%d) != RSS buckets (%d)"
- "; performance will be impacted.\n",
- __func__, queues, rss_getnumbuckets());
- }
-#endif
- return (vectors);
- }
-no_msix:
- vectors = pci_msi_count(dev);
- pf->vsi.num_queues = 1;
- pf->max_queues = 1;
- if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0)
- device_printf(pf->dev, "Using an MSI interrupt\n");
- else {
- vectors = 0;
- device_printf(pf->dev, "Using a Legacy interrupt\n");
- }
- return (vectors);
-}
-
-/*
- * Configure admin queue/misc interrupt cause registers in hardware.
- */
-void
-ixl_configure_intr0_msix(struct ixl_pf *pf)
-{
- struct i40e_hw *hw = &pf->hw;
- u32 reg;
-
- /* First set up the adminq - vector 0 */
- wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */
- rd32(hw, I40E_PFINT_ICR0); /* read to clear */
-
- reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
- I40E_PFINT_ICR0_ENA_GRST_MASK |
- I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
- I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
- I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
- I40E_PFINT_ICR0_ENA_VFLR_MASK |
- I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK;
- wr32(hw, I40E_PFINT_ICR0_ENA, reg);
-
- /*
- * 0x7FF is the end of the queue list.
- * This means we won't use MSI-X vector 0 for a queue interrupt
- * in MSIX mode.
- */
- wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
- /* Value is in 2 usec units, so 0x3E is 62*2 = 124 usecs. */
- wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x3E);
-
- wr32(hw, I40E_PFINT_DYN_CTL0,
- I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
- I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
-
- wr32(hw, I40E_PFINT_STAT_CTL0, 0);
-}
-
-/*
- * Configure queue interrupt cause registers in hardware.
- */
-void
-ixl_configure_queue_intr_msix(struct ixl_pf *pf)
-{
- struct i40e_hw *hw = &pf->hw;
- struct ixl_vsi *vsi = &pf->vsi;
- u32 reg;
- u16 vector = 1;
-
- for (int i = 0; i < vsi->num_queues; i++, vector++) {
- wr32(hw, I40E_PFINT_DYN_CTLN(i), 0);
- /* First queue type is RX / 0 */
- wr32(hw, I40E_PFINT_LNKLSTN(i), i);
-
- reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
- (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
- (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
- (i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
- (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
- wr32(hw, I40E_QINT_RQCTL(i), reg);
-
- reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
- (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
- (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
- (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
- (I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
- wr32(hw, I40E_QINT_TQCTL(i), reg);
- }
-}
-
-/*
- * Configure for MSI single vector operation
- */
-void
-ixl_configure_legacy(struct ixl_pf *pf)
-{
- struct i40e_hw *hw = &pf->hw;
- struct ixl_vsi *vsi = &pf->vsi;
- struct ixl_queue *que = vsi->queues;
- struct rx_ring *rxr = &que->rxr;
- struct tx_ring *txr = &que->txr;
- u32 reg;
-
- /* Configure ITR */
- vsi->tx_itr_setting = pf->tx_itr;
- wr32(hw, I40E_PFINT_ITR0(IXL_TX_ITR),
- vsi->tx_itr_setting);
- txr->itr = vsi->tx_itr_setting;
-
- vsi->rx_itr_setting = pf->rx_itr;
- wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR),
- vsi->rx_itr_setting);
- rxr->itr = vsi->rx_itr_setting;
-
- /* Setup "other" causes */
- reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK
- | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
- | I40E_PFINT_ICR0_ENA_GRST_MASK
- | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK
- | I40E_PFINT_ICR0_ENA_GPIO_MASK
- | I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK
- | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK
- | I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
- | I40E_PFINT_ICR0_ENA_VFLR_MASK
- | I40E_PFINT_ICR0_ENA_ADMINQ_MASK
- ;
- wr32(hw, I40E_PFINT_ICR0_ENA, reg);
-
- /* No ITR for non-queue interrupts */
- wr32(hw, I40E_PFINT_STAT_CTL0,
- IXL_ITR_NONE << I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT);
-
- /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
- wr32(hw, I40E_PFINT_LNKLST0, 0);
-
- /* Associate the queue pair to the vector and enable the q int */
- reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK
- | (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
- | (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
- wr32(hw, I40E_QINT_RQCTL(0), reg);
-
- reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK
- | (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
- | (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
- wr32(hw, I40E_QINT_TQCTL(0), reg);
-}
-
-int
-ixl_allocate_pci_resources(struct ixl_pf *pf)
-{
- int rid;
- struct i40e_hw *hw = &pf->hw;
- device_t dev = pf->dev;
-
- /* Map BAR0 */
- rid = PCIR_BAR(0);
- pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
- &rid, RF_ACTIVE);
-
- if (!(pf->pci_mem)) {
- device_printf(dev, "Unable to allocate bus resource: PCI memory\n");
- return (ENXIO);
- }
-
- /* Save off the PCI information */
- hw->vendor_id = pci_get_vendor(dev);
- hw->device_id = pci_get_device(dev);
- hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
- hw->subsystem_vendor_id =
- pci_read_config(dev, PCIR_SUBVEND_0, 2);
- hw->subsystem_device_id =
- pci_read_config(dev, PCIR_SUBDEV_0, 2);
-
- hw->bus.device = pci_get_slot(dev);
- hw->bus.func = pci_get_function(dev);
-
- /* Save off register access information */
- pf->osdep.mem_bus_space_tag =
- rman_get_bustag(pf->pci_mem);
- pf->osdep.mem_bus_space_handle =
- rman_get_bushandle(pf->pci_mem);
- pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem);
- pf->osdep.flush_reg = I40E_GLGEN_STAT;
- pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle;
-
- pf->hw.back = &pf->osdep;
-
- return (0);
-}
-
-/*
- * Teardown and release the admin queue/misc vector
- * interrupt.
- */
-int
-ixl_teardown_adminq_msix(struct ixl_pf *pf)
-{
- device_t dev = pf->dev;
- int rid, error = 0;
-
- if (pf->admvec) /* we are doing MSIX */
- rid = pf->admvec + 1;
- else
- (pf->msix != 0) ? (rid = 1):(rid = 0);
-
- if (pf->tag != NULL) {
- bus_teardown_intr(dev, pf->res, pf->tag);
- if (error) {
- device_printf(dev, "bus_teardown_intr() for"
- " interrupt 0 failed\n");
- // return (ENXIO);
- }
- pf->tag = NULL;
- }
- if (pf->res != NULL) {
- bus_release_resource(dev, SYS_RES_IRQ, rid, pf->res);
- if (error) {
- device_printf(dev, "bus_release_resource() for"
- " interrupt 0 failed [rid=%d]\n", rid);
- // return (ENXIO);
- }
- pf->res = NULL;
- }
-
- return (0);
-}
-
-int
-ixl_teardown_queue_msix(struct ixl_vsi *vsi)
-{
- struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
- struct ixl_queue *que = vsi->queues;
- device_t dev = vsi->dev;
- int rid, error = 0;
+ struct ixl_vsi *vsi = &pf->vsi;
+ device_t dev = iflib_get_dev(vsi->ctx);
+ struct ixl_rx_queue *rx_que = vsi->rx_queues;
/* We may get here before stations are setup */
- if ((pf->msix < 2) || (que == NULL))
- return (0);
-
- /* Release all MSIX queue resources */
- for (int i = 0; i < vsi->num_queues; i++, que++) {
- rid = que->msix + 1;
- if (que->tag != NULL) {
- error = bus_teardown_intr(dev, que->res, que->tag);
- if (error) {
- device_printf(dev, "bus_teardown_intr() for"
- " Queue %d interrupt failed\n",
- que->me);
- // return (ENXIO);
- }
- que->tag = NULL;
- }
- if (que->res != NULL) {
- error = bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
- if (error) {
- device_printf(dev, "bus_release_resource() for"
- " Queue %d interrupt failed [rid=%d]\n",
- que->me, rid);
- // return (ENXIO);
- }
- que->res = NULL;
- }
- }
-
- return (0);
-}
-
-void
-ixl_free_pci_resources(struct ixl_pf *pf)
-{
- device_t dev = pf->dev;
- int memrid;
+ if ((!pf->enable_msix) || (rx_que == NULL))
+ goto early;
- ixl_teardown_queue_msix(&pf->vsi);
- ixl_teardown_adminq_msix(pf);
-
- if (pf->msix > 0)
- pci_release_msi(dev);
-
- memrid = PCIR_BAR(IXL_MSIX_BAR);
+ /*
+ ** Release all msix VSI resources:
+ */
+ iflib_irq_free(vsi->ctx, &vsi->irq);
- if (pf->msix_mem != NULL)
- bus_release_resource(dev, SYS_RES_MEMORY,
- memrid, pf->msix_mem);
+ for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
+ iflib_irq_free(vsi->ctx, &rx_que->que_irq);
+early:
if (pf->pci_mem != NULL)
bus_release_resource(dev, SYS_RES_MEMORY,
PCIR_BAR(0), pf->pci_mem);
-
- return;
}
void
-ixl_add_ifmedia(struct ixl_vsi *vsi, u64 phy_types)
+ixl_add_ifmedia(struct ixl_vsi *vsi, u32 phy_type)
{
/* Display supported media types */
- if (phy_types & (I40E_CAP_PHY_TYPE_100BASE_TX))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_100_TX, 0, NULL);
-
- if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_T))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL);
- if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_SX))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
- if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_LX))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_LX, 0, NULL);
-
- if (phy_types & (I40E_CAP_PHY_TYPE_XAUI) ||
- phy_types & (I40E_CAP_PHY_TYPE_XFI) ||
- phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
-
- if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SR))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
- if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_LR))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
- if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_T))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_T, 0, NULL);
-
- if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4) ||
- phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4_CU) ||
- phy_types & (I40E_CAP_PHY_TYPE_40GBASE_AOC) ||
- phy_types & (I40E_CAP_PHY_TYPE_XLAUI) ||
- phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
- if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_SR4))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
- if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_LR4))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
-
- if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_KX))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
-
- if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1_CU)
- || phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_CR1, 0, NULL);
- if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_AOC))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX_LONG, 0, NULL);
- if (phy_types & (I40E_CAP_PHY_TYPE_SFI))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SFI, 0, NULL);
- if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KX4))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
- if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KR))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
-
- if (phy_types & (I40E_CAP_PHY_TYPE_20GBASE_KR2))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_20G_KR2, 0, NULL);
-
- if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_KR4, 0, NULL);
- if (phy_types & (I40E_CAP_PHY_TYPE_XLPPI))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL);
-
- if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_KR))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_25G_KR, 0, NULL);
- if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_CR))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_25G_CR, 0, NULL);
- if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_SR))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_25G_SR, 0, NULL);
- if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_LR))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_UNKNOWN, 0, NULL);
+ if (phy_type & (1 << I40E_PHY_TYPE_100BASE_TX))
+ ifmedia_add(vsi->media, IFM_ETHER | IFM_100_TX, 0, NULL);
+
+ if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_T))
+ ifmedia_add(vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL);
+ if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_SX))
+ ifmedia_add(vsi->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
+ if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_LX))
+ ifmedia_add(vsi->media, IFM_ETHER | IFM_1000_LX, 0, NULL);
+
+ if (phy_type & (1 << I40E_PHY_TYPE_XAUI) ||
+ phy_type & (1 << I40E_PHY_TYPE_XFI) ||
+ phy_type & (1 << I40E_PHY_TYPE_10GBASE_SFPP_CU))
+ ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
+
+ if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_SR))
+ ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
+ if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_LR))
+ ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
+ if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_T))
+ ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_T, 0, NULL);
+
+ if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4) ||
+ phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4_CU) ||
+ phy_type & (1 << I40E_PHY_TYPE_40GBASE_AOC) ||
+ phy_type & (1 << I40E_PHY_TYPE_XLAUI) ||
+ phy_type & (1 << I40E_PHY_TYPE_40GBASE_KR4))
+ ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
+ if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_SR4))
+ ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
+ if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_LR4))
+ ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
+
+ if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_KX))
+ ifmedia_add(vsi->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
+
+ if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1_CU)
+ || phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1))
+ ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_CR1, 0, NULL);
+ if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_AOC))
+ ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_TWINAX_LONG, 0, NULL);
+ if (phy_type & (1 << I40E_PHY_TYPE_SFI))
+ ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_SFI, 0, NULL);
+ if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KX4))
+ ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
+ if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KR))
+ ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
+
+ if (phy_type & (1 << I40E_PHY_TYPE_20GBASE_KR2))
+ ifmedia_add(vsi->media, IFM_ETHER | IFM_20G_KR2, 0, NULL);
+
+ if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_KR4))
+ ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_KR4, 0, NULL);
+ if (phy_type & (1 << I40E_PHY_TYPE_XLPPI))
+ ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL);
}
/*********************************************************************
@@ -1864,63 +912,26 @@
int
ixl_setup_interface(device_t dev, struct ixl_vsi *vsi)
{
- struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
- struct ifnet *ifp;
- struct i40e_hw *hw = vsi->hw;
- struct ixl_queue *que = vsi->queues;
+ if_ctx_t ctx = vsi->ctx;
+ struct ixl_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ struct ifnet *ifp = iflib_get_ifp(ctx);
struct i40e_aq_get_phy_abilities_resp abilities;
enum i40e_status_code aq_error = 0;
+ uint64_t cap;
INIT_DEBUGOUT("ixl_setup_interface: begin");
-
- ifp = vsi->ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(dev, "can not allocate ifnet structure\n");
- return (-1);
- }
- if_initname(ifp, device_get_name(dev), device_get_unit(dev));
- ifp->if_mtu = ETHERMTU;
- ifp->if_init = ixl_init;
- ifp->if_softc = vsi;
- ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
- ifp->if_ioctl = ixl_ioctl;
-
-#if __FreeBSD_version >= 1100036
- if_setgetcounterfn(ifp, ixl_get_counter);
-#endif
-
- ifp->if_transmit = ixl_mq_start;
-
- ifp->if_qflush = ixl_qflush;
-
- ifp->if_snd.ifq_maxlen = que->num_desc - 2;
-
- vsi->max_frame_size =
- ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
- + ETHER_VLAN_ENCAP_LEN;
-
- /* Set TSO limits */
- ifp->if_hw_tsomax = IP_MAXPACKET - (ETHER_HDR_LEN + ETHER_CRC_LEN);
- ifp->if_hw_tsomaxsegcount = IXL_MAX_TSO_SEGS;
- ifp->if_hw_tsomaxsegsize = PAGE_SIZE;
-
- /*
- * Tell the upper layer(s) we support long frames.
- */
- ifp->if_hdrlen = sizeof(struct ether_vlan_header);
-
- ifp->if_capabilities |= IFCAP_HWCSUM;
- ifp->if_capabilities |= IFCAP_HWCSUM_IPV6;
- ifp->if_capabilities |= IFCAP_TSO;
- ifp->if_capabilities |= IFCAP_JUMBO_MTU;
- ifp->if_capabilities |= IFCAP_LRO;
-
- /* VLAN capabilties */
- ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
- | IFCAP_VLAN_HWTSO
- | IFCAP_VLAN_MTU
- | IFCAP_VLAN_HWCSUM;
- ifp->if_capenable = ifp->if_capabilities;
+ /* initialize fast path functions */
+
+ cap = IXL_CAPS;
+ if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
+ if_setcapabilitiesbit(ifp, cap, 0);
+ if_setcapenable(ifp, if_getcapabilities(ifp));
+ if_setbaudrate(ifp, IF_Gbps(40));
+ /* TODO: Remove VLAN_ENCAP_LEN? */
+ vsi->shared->isc_max_frame_size =
+ ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+ //+ ETHER_VLAN_ENCAP_LEN;
/*
** Don't turn this on by default, if vlans are
@@ -1930,19 +941,13 @@
** using vlans directly on the ixl driver you can
** enable this and get full hardware tag filtering.
*/
- ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
-
- /*
- * Specify the media types supported by this adapter and register
- * callbacks to update media and link information
- */
- ifmedia_init(&vsi->media, IFM_IMASK, ixl_media_change,
- ixl_media_status);
+ if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWFILTER, 0);
aq_error = i40e_aq_get_phy_capabilities(hw,
FALSE, TRUE, &abilities, NULL);
/* May need delay to detect fiber correctly */
if (aq_error == I40E_ERR_UNKNOWN_PHY) {
+ /* TODO: Maybe just retry this in a task... */
i40e_msec_delay(200);
aq_error = i40e_aq_get_phy_capabilities(hw, FALSE,
TRUE, &abilities, NULL);
@@ -1956,16 +961,12 @@
" AQ error %d\n", aq_error, hw->aq.asq_last_status);
return (0);
}
- pf->supported_speeds = abilities.link_speed;
- ifp->if_baudrate = ixl_max_aq_speed_to_value(pf->supported_speeds);
- ixl_add_ifmedia(vsi, hw->phy.phy_types);
+ ixl_add_ifmedia(vsi, abilities.phy_type);
/* Use autoselect media by default */
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
- ifmedia_set(&vsi->media, IFM_ETHER | IFM_AUTO);
-
- ether_ifattach(ifp, hw->mac.addr);
+ ifmedia_add(vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
+ ifmedia_set(vsi->media, IFM_ETHER | IFM_AUTO);
return (0);
}
@@ -1976,8 +977,8 @@
void
ixl_link_event(struct ixl_pf *pf, struct i40e_arq_event_info *e)
{
- struct i40e_hw *hw = &pf->hw;
- device_t dev = pf->dev;
+ struct i40e_hw *hw = &pf->hw;
+ device_t dev = iflib_get_dev(pf->vsi.ctx);
struct i40e_aqc_get_link_status *status =
(struct i40e_aqc_get_link_status *)&e->desc.params.raw;
@@ -1993,7 +994,7 @@
"an unqualified module was detected!\n");
/* Update OS link info */
- ixl_update_link_status(pf);
+ // ixl_update_link_status(pf);
}
/*********************************************************************
@@ -2008,7 +1009,7 @@
{
struct i40e_hw *hw = &pf->hw;
struct ixl_vsi *vsi = &pf->vsi;
- device_t dev = vsi->dev;
+ device_t dev = iflib_get_dev(vsi->ctx);
struct i40e_aqc_get_switch_config_resp *sw_config;
u8 aq_buf[I40E_AQ_LARGE_BUF];
int ret;
@@ -2029,7 +1030,7 @@
sw_config->header.num_reported, sw_config->header.num_total);
for (int i = 0; i < sw_config->header.num_reported; i++) {
device_printf(dev,
- "-> %d: type=%d seid=%d uplink=%d downlink=%d\n", i,
+ "%d: type=%d seid=%d uplink=%d downlink=%d\n", i,
sw_config->element[i].element_type,
sw_config->element[i].seid,
sw_config->element[i].uplink_seid,
@@ -2053,9 +1054,13 @@
int
ixl_initialize_vsi(struct ixl_vsi *vsi)
{
+ //if_shared_ctx_t sctx = iflib_get_sctx(vsi->ctx);
+ if_softc_ctx_t scctx = iflib_get_softc_ctx(vsi->ctx);
struct ixl_pf *pf = vsi->back;
- struct ixl_queue *que = vsi->queues;
- device_t dev = vsi->dev;
+ struct ixl_tx_queue *tx_que = vsi->tx_queues;
+ struct ixl_rx_queue *rx_que = vsi->rx_queues;
+ device_t dev = iflib_get_dev(vsi->ctx);
+ //struct ifnet *ifp = iflib_get_ifp(vsi->ctx);
struct i40e_hw *hw = vsi->hw;
struct i40e_vsi_context ctxt;
int tc_queues;
@@ -2108,14 +1113,6 @@
else
ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
-#ifdef IXL_IW
- /* Set TCP Enable for iWARP capable VSI */
- if (ixl_enable_iwarp && pf->iw_enabled) {
- ctxt.info.valid_sections |=
- htole16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
- ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
- }
-#endif
/* Save VSI number and info for use later */
vsi->vsi_num = ctxt.vsi_number;
bcopy(&ctxt.info, &vsi->info, sizeof(vsi->info));
@@ -2134,26 +1131,24 @@
return (err);
}
- for (int i = 0; i < vsi->num_queues; i++, que++) {
- struct tx_ring *txr = &que->txr;
- struct rx_ring *rxr = &que->rxr;
+ for (int i = 0; i < vsi->num_tx_queues; i++, tx_que++) {
+ struct tx_ring *txr = &tx_que->txr;
struct i40e_hmc_obj_txq tctx;
- struct i40e_hmc_obj_rxq rctx;
u32 txctl;
- u16 size;
+ //u16 size;
/* Setup the HMC TX Context */
- size = que->num_desc * sizeof(struct i40e_tx_desc);
+ //size = sctx->isc_ntxd[0] * sizeof(struct i40e_tx_desc);
memset(&tctx, 0, sizeof(struct i40e_hmc_obj_txq));
tctx.new_context = 1;
- tctx.base = (txr->dma.pa/IXL_TX_CTX_BASE_UNITS);
- tctx.qlen = que->num_desc;
+ tctx.base = (txr->tx_paddr/IXL_TX_CTX_BASE_UNITS);
+ tctx.qlen = scctx->isc_ntxd[0];
tctx.fc_ena = 0;
tctx.rdylist = vsi->info.qs_handle[0]; /* index is TC */
/* Enable HEAD writeback */
tctx.head_wb_ena = 1;
- tctx.head_wb_addr = txr->dma.pa +
- (que->num_desc * sizeof(struct i40e_tx_desc));
+ tctx.head_wb_addr = txr->tx_paddr +
+ (scctx->isc_ntxd[0] * sizeof(struct i40e_tx_desc));
tctx.rdylist_act = 0;
err = i40e_clear_lan_tx_queue_context(hw, i);
if (err) {
@@ -2173,10 +1168,14 @@
ixl_flush(hw);
/* Do ring (re)init */
- ixl_init_tx_ring(que);
+ ixl_init_tx_ring(vsi, tx_que);
+ }
+ for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++) {
+ struct rx_ring *rxr = &rx_que->rxr;
+ struct i40e_hmc_obj_rxq rctx;
/* Next setup the HMC RX Context */
- if (vsi->max_frame_size <= MCLBYTES)
+ if (scctx->isc_max_frame_size <= MCLBYTES)
rxr->mbuf_sz = MCLBYTES;
else
rxr->mbuf_sz = MJUMPAGESIZE;
@@ -2188,13 +1187,13 @@
rctx.dbuff = rxr->mbuf_sz >> I40E_RXQ_CTX_DBUFF_SHIFT;
/* ignore header split for now */
rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT;
- rctx.rxmax = (vsi->max_frame_size < max_rxmax) ?
- vsi->max_frame_size : max_rxmax;
+ rctx.rxmax = (scctx->isc_max_frame_size < max_rxmax) ?
+ scctx->isc_max_frame_size : max_rxmax;
rctx.dtype = 0;
rctx.dsize = 1; /* do 32byte descriptors */
rctx.hsplit_0 = 0; /* no HDR split initially */
- rctx.base = (rxr->dma.pa/IXL_RX_CTX_BASE_UNITS);
- rctx.qlen = que->num_desc;
+ rctx.base = (rxr->rx_paddr/IXL_RX_CTX_BASE_UNITS);
+ rctx.qlen = scctx->isc_nrxd[0];
rctx.tphrdesc_ena = 1;
rctx.tphwdesc_ena = 1;
rctx.tphdata_ena = 0;
@@ -2217,31 +1216,19 @@
device_printf(dev, "Unable to set RX context %d\n", i);
break;
}
+#if 0
err = ixl_init_rx_ring(que);
if (err) {
device_printf(dev, "Fail in init_rx_ring %d\n", i);
break;
}
-#ifdef DEV_NETMAP
- /* preserve queue */
- if (vsi->ifp->if_capenable & IFCAP_NETMAP) {
- struct netmap_adapter *na = NA(vsi->ifp);
- struct netmap_kring *kring = &na->rx_rings[i];
- int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
- wr32(vsi->hw, I40E_QRX_TAIL(que->me), t);
- } else
-#endif /* DEV_NETMAP */
- wr32(vsi->hw, I40E_QRX_TAIL(que->me), que->num_desc - 1);
+#endif
+ // wr32(vsi->hw, I40E_QRX_TAIL(que->me), que->num_desc - 1);
}
return (err);
}
-
-/*********************************************************************
- *
- * Free all VSI structs.
- *
- **********************************************************************/
+#if 0
void
ixl_free_vsi(struct ixl_vsi *vsi)
{
@@ -2280,6 +1267,7 @@
/* Free VSI filter list */
ixl_free_mac_filters(vsi);
}
+#endif
void
ixl_free_mac_filters(struct ixl_vsi *vsi)
@@ -2293,106 +1281,7 @@
}
}
-/*
- * Fill out fields in queue struct and setup tx/rx memory and structs
- */
-static int
-ixl_setup_queue(struct ixl_queue *que, struct ixl_pf *pf, int index)
-{
- device_t dev = pf->dev;
- struct i40e_hw *hw = &pf->hw;
- struct ixl_vsi *vsi = &pf->vsi;
- struct tx_ring *txr = &que->txr;
- struct rx_ring *rxr = &que->rxr;
- int error = 0;
- int rsize, tsize;
-
- que->num_desc = pf->ringsz;
- que->me = index;
- que->vsi = vsi;
-
- txr->que = que;
- txr->tail = I40E_QTX_TAIL(que->me);
-
- /* Initialize the TX lock */
- snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
- device_get_nameunit(dev), que->me);
- mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
- /* Create the TX descriptor ring */
- tsize = roundup2((que->num_desc *
- sizeof(struct i40e_tx_desc)) +
- sizeof(u32), DBA_ALIGN);
- if (i40e_allocate_dma_mem(hw,
- &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
- device_printf(dev,
- "Unable to allocate TX Descriptor memory\n");
- error = ENOMEM;
- goto fail;
- }
- txr->base = (struct i40e_tx_desc *)txr->dma.va;
- bzero((void *)txr->base, tsize);
- /* Now allocate transmit soft structs for the ring */
- if (ixl_allocate_tx_data(que)) {
- device_printf(dev,
- "Critical Failure setting up TX structures\n");
- error = ENOMEM;
- goto fail;
- }
- /* Allocate a buf ring */
- txr->br = buf_ring_alloc(DEFAULT_TXBRSZ, M_DEVBUF,
- M_NOWAIT, &txr->mtx);
- if (txr->br == NULL) {
- device_printf(dev,
- "Critical Failure setting up TX buf ring\n");
- error = ENOMEM;
- goto fail;
- }
-
- rsize = roundup2(que->num_desc *
- sizeof(union i40e_rx_desc), DBA_ALIGN);
- rxr->que = que;
- rxr->tail = I40E_QRX_TAIL(que->me);
-
- /* Initialize the RX side lock */
- snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
- device_get_nameunit(dev), que->me);
- mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
-
- if (i40e_allocate_dma_mem(hw,
- &rxr->dma, i40e_mem_reserved, rsize, 4096)) {
- device_printf(dev,
- "Unable to allocate RX Descriptor memory\n");
- error = ENOMEM;
- goto fail;
- }
- rxr->base = (union i40e_rx_desc *)rxr->dma.va;
- bzero((void *)rxr->base, rsize);
- /* Allocate receive soft structs for the ring*/
- if (ixl_allocate_rx_data(que)) {
- device_printf(dev,
- "Critical Failure setting up receive structs\n");
- error = ENOMEM;
- goto fail;
- }
-
- return (0);
-fail:
- if (rxr->base)
- i40e_free_dma_mem(&pf->hw, &rxr->dma);
- if (mtx_initialized(&rxr->mtx))
- mtx_destroy(&rxr->mtx);
- if (txr->br) {
- buf_ring_free(txr->br, M_DEVBUF);
- txr->br = NULL;
- }
- if (txr->base)
- i40e_free_dma_mem(&pf->hw, &txr->dma);
- if (mtx_initialized(&txr->mtx))
- mtx_destroy(&txr->mtx);
-
- return (error);
-}
-
+#if 0
/*********************************************************************
*
* Allocate memory for the VSI (virtual station interface) and their
@@ -2424,7 +1313,6 @@
return (error);
}
- /* Then setup each queue */
for (int i = 0; i < vsi->num_queues; i++) {
que = &vsi->queues[i];
error = ixl_setup_queue(que, pf, i);
@@ -2434,13 +1322,14 @@
return (0);
}
+#endif
/*
** Provide a update to the queue RX
** interrupt moderation value.
*/
void
-ixl_set_queue_rx_itr(struct ixl_queue *que)
+ixl_set_queue_rx_itr(struct ixl_rx_queue *que)
{
struct ixl_vsi *vsi = que->vsi;
struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
@@ -2489,9 +1378,9 @@
/* do an exponential smoothing */
rx_itr = (10 * rx_itr * rxr->itr) /
((9 * rx_itr) + rxr->itr);
- rxr->itr = min(rx_itr, IXL_MAX_ITR);
+ rxr->itr = rx_itr & IXL_MAX_ITR;
wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
- que->me), rxr->itr);
+ rxr->me), rxr->itr);
}
} else { /* We may have have toggled to non-dynamic */
if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
@@ -2500,7 +1389,7 @@
if (rxr->itr != vsi->rx_itr_setting) {
rxr->itr = vsi->rx_itr_setting;
wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
- que->me), rxr->itr);
+ rxr->me), rxr->itr);
}
}
rxr->bytes = 0;
@@ -2508,7 +1397,7 @@
return;
}
-
+#if 0
/*
** Provide a update to the queue TX
** interrupt moderation value.
@@ -2563,7 +1452,7 @@
/* do an exponential smoothing */
tx_itr = (10 * tx_itr * txr->itr) /
((9 * tx_itr) + txr->itr);
- txr->itr = min(tx_itr, IXL_MAX_ITR);
+ txr->itr = tx_itr & IXL_MAX_ITR;
wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
que->me), txr->itr);
}
@@ -2582,6 +1471,7 @@
txr->packets = 0;
return;
}
+#endif
void
ixl_add_vsi_sysctls(struct ixl_pf *pf, struct ixl_vsi *vsi,
@@ -2606,7 +1496,7 @@
* Retrieves I40E_QTX_TAIL value from hardware
* for a sysctl.
*/
-static int
+int
ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS)
{
struct ixl_queue *que;
@@ -2628,7 +1518,7 @@
* Retrieves I40E_QRX_TAIL value from hardware
* for a sysctl.
*/
-static int
+int
ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS)
{
struct ixl_queue *que;
@@ -2675,7 +1565,8 @@
}
pf->tx_itr = requested_tx_itr;
- ixl_configure_tx_itr(pf);
+ // TODO: Do we take out this sysctl?
+ // ixl_configure_tx_itr(pf);
return (error);
}
@@ -2717,6 +1608,34 @@
void
ixl_add_hw_stats(struct ixl_pf *pf)
{
+ struct ixl_vsi *vsi = &pf->vsi;
+ device_t dev = iflib_get_dev(vsi->ctx);
+ struct i40e_hw_port_stats *pf_stats = &pf->stats;
+
+ struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
+ struct sysctl_oid *tree = device_get_sysctl_tree(dev);
+ struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
+ struct sysctl_oid_list *vsi_list;
+
+ /* Driver statistics */
+ SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
+ CTLFLAG_RD, &pf->watchdog_events,
+ "Watchdog timeouts");
+ SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "admin_irq",
+ CTLFLAG_RD, &pf->admin_irq,
+ "Admin Queue IRQ Handled");
+
+ ixl_add_vsi_sysctls(pf, &pf->vsi, ctx, "pf");
+ vsi_list = SYSCTL_CHILDREN(pf->vsi.vsi_node);
+
+ /* MAC stats */
+ ixl_add_sysctls_mac_stats(ctx, child, pf_stats);
+}
+
+#if 0
+void
+ixl_add_hw_stats(struct ixl_pf *pf)
+{
device_t dev = pf->dev;
struct ixl_vsi *vsi = &pf->vsi;
struct ixl_queue *queues = vsi->queues;
@@ -2767,9 +1686,6 @@
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_dmamap_failed",
CTLFLAG_RD, &(queues[q].tx_dmamap_failed),
"Driver tx dma failure in xmit");
- SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mss_too_small",
- CTLFLAG_RD, &(queues[q].mss_too_small),
- "TSO sends with an MSS less than 64");
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
CTLFLAG_RD, &(txr->no_desc),
"Queue No Descriptor Available");
@@ -2820,6 +1736,7 @@
/* MAC stats */
ixl_add_sysctls_mac_stats(ctx, child, pf_stats);
}
+#endif
void
ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx,
@@ -2978,10 +1895,7 @@
if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
#else
- if (hw->mac.type == I40E_MAC_X722)
- set_hena = IXL_DEFAULT_RSS_HENA_X722;
- else
- set_hena = IXL_DEFAULT_RSS_HENA_XL710;
+ set_hena = IXL_DEFAULT_RSS_HENA;
#endif
hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
@@ -2995,8 +1909,8 @@
ixl_set_rss_hlut(struct ixl_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
- device_t dev = pf->dev;
struct ixl_vsi *vsi = &pf->vsi;
+ device_t dev = iflib_get_dev(vsi->ctx);
int i, que_id;
int lut_entry_width;
u32 lut = 0;
@@ -3017,9 +1931,9 @@
* num_queues.)
*/
que_id = rss_get_indirection_to_bucket(i);
- que_id = que_id % vsi->num_queues;
+ que_id = que_id % vsi->num_rx_queues;
#else
- que_id = i % vsi->num_queues;
+ que_id = i % vsi->num_rx_queues;
#endif
lut = (que_id & ((0x1 << lut_entry_width) - 1));
hlut_buf[i] = lut;
@@ -3049,56 +1963,6 @@
}
/*
-** This routine is run via an vlan config EVENT,
-** it enables us to use the HW Filter table since
-** we can get the vlan id. This just creates the
-** entry in the soft version of the VFTA, init will
-** repopulate the real table.
-*/
-void
-ixl_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
-{
- struct ixl_vsi *vsi = ifp->if_softc;
- struct i40e_hw *hw = vsi->hw;
- struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
-
- if (ifp->if_softc != arg) /* Not our event */
- return;
-
- if ((vtag == 0) || (vtag > 4095)) /* Invalid */
- return;
-
- IXL_PF_LOCK(pf);
- ++vsi->num_vlans;
- ixl_add_filter(vsi, hw->mac.addr, vtag);
- IXL_PF_UNLOCK(pf);
-}
-
-/*
-** This routine is run via an vlan
-** unconfig EVENT, remove our entry
-** in the soft vfta.
-*/
-void
-ixl_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
-{
- struct ixl_vsi *vsi = ifp->if_softc;
- struct i40e_hw *hw = vsi->hw;
- struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
-
- if (ifp->if_softc != arg)
- return;
-
- if ((vtag == 0) || (vtag > 4095)) /* Invalid */
- return;
-
- IXL_PF_LOCK(pf);
- --vsi->num_vlans;
- ixl_del_filter(vsi, hw->mac.addr, vtag);
- IXL_PF_UNLOCK(pf);
-}
-
-/*
** This routine updates vlan filters, called by init
** it scans the filter table and then updates the hw
** after a soft reset.
@@ -3131,7 +1995,6 @@
flags = IXL_FILTER_VLAN;
flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
ixl_add_hw_filters(vsi, flags, cnt);
- return;
}
/*
@@ -3145,8 +2008,7 @@
{
struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
- /* Add broadcast address */
- ixl_add_filter(vsi, ixl_bcast_addr, IXL_VLAN_ANY);
+ /* TODO: Set broadcast promiscuous here */
/*
* Prevent Tx flow control frames from being sent out by
@@ -3299,9 +2161,8 @@
int err, j = 0;
pf = vsi->back;
- dev = pf->dev;
+ dev = iflib_get_dev(vsi->ctx);
hw = &pf->hw;
- IXL_PF_LOCK_ASSERT(pf);
a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt,
M_DEVBUF, M_NOWAIT | M_ZERO);
@@ -3364,7 +2225,7 @@
pf = vsi->back;
hw = &pf->hw;
- dev = pf->dev;
+ dev = iflib_get_dev(vsi->ctx);
d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt,
M_DEVBUF, M_NOWAIT | M_ZERO);
@@ -3496,11 +2357,11 @@
struct ixl_pf *pf = vsi->back;
int error = 0;
- for (int i = 0; i < vsi->num_queues; i++) {
- error = ixl_enable_ring(pf, &pf->qtag, i);
- if (error)
- return (error);
- }
+ for (int i = 0; i < vsi->num_tx_queues; i++)
+ error = ixl_enable_tx_ring(pf, &pf->qtag, i);
+
+ for (int i = 0; i < vsi->num_rx_queues; i++)
+ error = ixl_enable_rx_ring(pf, &pf->qtag, i);
return (error);
}
@@ -3586,11 +2447,12 @@
struct ixl_pf *pf = vsi->back;
int error = 0;
- for (int i = 0; i < vsi->num_queues; i++) {
- error = ixl_disable_ring(pf, &pf->qtag, i);
- if (error)
- return (error);
- }
+ for (int i = 0; i < vsi->num_tx_queues; i++)
+ error = ixl_disable_tx_ring(pf, &pf->qtag, i);
+
+ for (int i = 0; i < vsi->num_rx_queues; i++)
+ error = ixl_disable_rx_ring(pf, &pf->qtag, i);
+
return (error);
}
@@ -3666,44 +2528,57 @@
ixl_flush(hw);
}
+/* This only enables HW interrupts for the RX queues */
void
ixl_enable_intr(struct ixl_vsi *vsi)
{
struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
struct i40e_hw *hw = vsi->hw;
- struct ixl_queue *que = vsi->queues;
+ struct ixl_rx_queue *que = vsi->rx_queues;
- if (pf->msix > 1) {
- for (int i = 0; i < vsi->num_queues; i++, que++)
- ixl_enable_queue(hw, que->me);
+ if (pf->enable_msix) {
+ for (int i = 0; i < vsi->num_rx_queues; i++, que++)
+ ixl_enable_queue(hw, que->rxr.me);
} else
- ixl_enable_intr0(hw);
+ ixl_enable_legacy(hw);
}
void
ixl_disable_rings_intr(struct ixl_vsi *vsi)
{
struct i40e_hw *hw = vsi->hw;
- struct ixl_queue *que = vsi->queues;
+ struct ixl_rx_queue *que = vsi->rx_queues;
+
+ for (int i = 0; i < vsi->num_rx_queues; i++, que++)
+ ixl_disable_queue(hw, que->rxr.me);
+}
+
+void
+ixl_disable_intr(struct ixl_vsi *vsi)
+{
+ struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
+ struct i40e_hw *hw = vsi->hw;
- for (int i = 0; i < vsi->num_queues; i++, que++)
- ixl_disable_queue(hw, que->me);
+ if (pf->enable_msix)
+ ixl_disable_adminq(hw);
+ else
+ ixl_disable_legacy(hw);
}
void
-ixl_enable_intr0(struct i40e_hw *hw)
+ixl_enable_adminq(struct i40e_hw *hw)
{
u32 reg;
- /* Use IXL_ITR_NONE so ITR isn't updated here */
reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
(IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
wr32(hw, I40E_PFINT_DYN_CTL0, reg);
+ ixl_flush(hw);
}
void
-ixl_disable_intr0(struct i40e_hw *hw)
+ixl_disable_adminq(struct i40e_hw *hw)
{
u32 reg;
@@ -3733,6 +2608,25 @@
}
void
+ixl_enable_legacy(struct i40e_hw *hw)
+{
+ u32 reg;
+ reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
+ I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
+ (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
+ wr32(hw, I40E_PFINT_DYN_CTL0, reg);
+}
+
+void
+ixl_disable_legacy(struct i40e_hw *hw)
+{
+ u32 reg;
+
+ reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
+ wr32(hw, I40E_PFINT_DYN_CTL0, reg);
+}
+
+void
ixl_update_stats_counters(struct ixl_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
@@ -3917,17 +2811,19 @@
bool is_up = false;
int error = 0;
+ // TODO: Find other way to get this info
is_up = !!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING);
/* Teardown */
if (is_up)
- ixl_stop(pf);
+ ixl_if_stop(vsi->ctx);
error = i40e_shutdown_lan_hmc(hw);
if (error)
device_printf(dev,
"Shutdown LAN HMC failed with code %d\n", error);
- ixl_disable_intr0(hw);
- ixl_teardown_adminq_msix(pf);
+ ixl_disable_adminq(hw);
+ // TODO: Replace
+ // ixl_teardown_adminq_msix(pf);
error = i40e_shutdown_adminq(hw);
if (error)
device_printf(dev,
@@ -3939,13 +2835,15 @@
device_printf(dev, "Unable to initialize Admin Queue, error %d\n",
error);
}
+#if 0 // TODO: Replace
error = ixl_setup_adminq_msix(pf);
if (error) {
device_printf(dev, "ixl_setup_adminq_msix error: %d\n",
error);
}
+#endif
ixl_configure_intr0_msix(pf);
- ixl_enable_intr0(hw);
+ ixl_enable_adminq(hw);
error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
hw->func_caps.num_rx_qp, 0, 0);
if (error) {
@@ -3956,106 +2854,36 @@
device_printf(dev, "configure_lan_hmc failed: %d\n", error);
}
if (is_up)
- ixl_init(pf);
+ ixl_if_init(vsi->ctx);
return (0);
-}
-
-void
-ixl_handle_empr_reset(struct ixl_pf *pf)
-{
- struct i40e_hw *hw = &pf->hw;
- device_t dev = pf->dev;
- int count = 0;
- u32 reg;
-
- /* Typically finishes within 3-4 seconds */
- while (count++ < 100) {
- reg = rd32(hw, I40E_GLGEN_RSTAT)
- & I40E_GLGEN_RSTAT_DEVSTATE_MASK;
- if (reg)
- i40e_msec_delay(100);
- else
- break;
- }
- ixl_dbg(pf, IXL_DBG_INFO,
- "EMPR reset wait count: %d\n", count);
-
- device_printf(dev, "Rebuilding driver state...\n");
- ixl_rebuild_hw_structs_after_reset(pf);
- device_printf(dev, "Rebuilding driver state done.\n");
-
- atomic_clear_int(&pf->state, IXL_PF_STATE_EMPR_RESETTING);
-}
-
-/*
-** Tasklet handler for MSIX Adminq interrupts
-** - do outside interrupt since it might sleep
-*/
-void
-ixl_do_adminq(void *context, int pending)
-{
- struct ixl_pf *pf = context;
- struct i40e_hw *hw = &pf->hw;
- struct i40e_arq_event_info event;
- i40e_status ret;
- device_t dev = pf->dev;
- u32 loop = 0;
- u16 opcode, result;
-
- if (pf->state & IXL_PF_STATE_EMPR_RESETTING) {
- /* Flag cleared at end of this function */
- ixl_handle_empr_reset(pf);
- return;
- }
-
- /* Admin Queue handling */
- event.buf_len = IXL_AQ_BUF_SZ;
- event.msg_buf = malloc(event.buf_len,
- M_DEVBUF, M_NOWAIT | M_ZERO);
- if (!event.msg_buf) {
- device_printf(dev, "%s: Unable to allocate memory for Admin"
- " Queue event!\n", __func__);
- return;
- }
-
- IXL_PF_LOCK(pf);
- /* clean and process any events */
- do {
- ret = i40e_clean_arq_element(hw, &event, &result);
- if (ret)
- break;
- opcode = LE16_TO_CPU(event.desc.opcode);
- ixl_dbg(pf, IXL_DBG_AQ,
- "Admin Queue event: %#06x\n", opcode);
- switch (opcode) {
- case i40e_aqc_opc_get_link_status:
- ixl_link_event(pf, &event);
- break;
- case i40e_aqc_opc_send_msg_to_pf:
-#ifdef PCI_IOV
- ixl_handle_vf_msg(pf, &event);
-#endif
- break;
- case i40e_aqc_opc_event_lan_overflow:
- default:
- break;
- }
+}
- } while (result && (loop++ < IXL_ADM_LIMIT));
+void
+ixl_handle_empr_reset(struct ixl_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ device_t dev = pf->dev;
+ int count = 0;
+ u32 reg;
- free(event.msg_buf, M_DEVBUF);
+ /* Typically finishes within 3-4 seconds */
+ while (count++ < 100) {
+ reg = rd32(hw, I40E_GLGEN_RSTAT)
+ & I40E_GLGEN_RSTAT_DEVSTATE_MASK;
+ if (reg)
+ i40e_msec_delay(100);
+ else
+ break;
+ }
+ ixl_dbg(pf, IXL_DBG_INFO,
+ "EMPR reset wait count: %d\n", count);
- /*
- * If there are still messages to process, reschedule ourselves.
- * Otherwise, re-enable our interrupt.
- */
- if (result > 0)
- taskqueue_enqueue(pf->tq, &pf->adminq);
- else
- ixl_enable_intr0(hw);
+ device_printf(dev, "Rebuilding driver state...\n");
+ ixl_rebuild_hw_structs_after_reset(pf);
+ device_printf(dev, "Rebuilding driver state done.\n");
- IXL_PF_UNLOCK(pf);
+ atomic_clear_int(&pf->state, IXL_PF_STATE_EMPR_RESETTING);
}
/**
@@ -4137,8 +2965,10 @@
ixl_update_eth_stats(vsi);
tx_discards = es->tx_discards + nsd->tx_dropped_link_down;
+#if 0 // I think new iflib statistics cover this
for (int i = 0; i < vsi->num_queues; i++)
tx_discards += vsi->queues[i].txr.br->br_drops;
+#endif
/* Update ifnet stats */
IXL_SET_IPACKETS(vsi, es->rx_unicast +
@@ -4240,7 +3070,6 @@
ixl_add_device_sysctls(struct ixl_pf *pf)
{
device_t dev = pf->dev;
- struct i40e_hw *hw = &pf->hw;
struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
struct sysctl_oid_list *ctx_list =
@@ -4249,9 +3078,6 @@
struct sysctl_oid *debug_node;
struct sysctl_oid_list *debug_list;
- struct sysctl_oid *fec_node;
- struct sysctl_oid_list *fec_list;
-
/* Set up sysctls */
SYSCTL_ADD_PROC(ctx, ctx_list,
OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
@@ -4292,38 +3118,6 @@
OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW,
&pf->dynamic_tx_itr, 0, "Enable dynamic TX ITR");
- /* Add FEC sysctls for 25G adapters */
- /*
- * XXX: These settings can be changed, but that isn't supported,
- * so these are read-only for now.
- */
- if (hw->device_id == I40E_DEV_ID_25G_B
- || hw->device_id == I40E_DEV_ID_25G_SFP28) {
- fec_node = SYSCTL_ADD_NODE(ctx, ctx_list,
- OID_AUTO, "fec", CTLFLAG_RD, NULL, "FEC Sysctls");
- fec_list = SYSCTL_CHILDREN(fec_node);
-
- SYSCTL_ADD_PROC(ctx, fec_list,
- OID_AUTO, "fc_ability", CTLTYPE_INT | CTLFLAG_RD,
- pf, 0, ixl_sysctl_fec_fc_ability, "I", "FC FEC ability enabled");
-
- SYSCTL_ADD_PROC(ctx, fec_list,
- OID_AUTO, "rs_ability", CTLTYPE_INT | CTLFLAG_RD,
- pf, 0, ixl_sysctl_fec_rs_ability, "I", "RS FEC ability enabled");
-
- SYSCTL_ADD_PROC(ctx, fec_list,
- OID_AUTO, "fc_requested", CTLTYPE_INT | CTLFLAG_RD,
- pf, 0, ixl_sysctl_fec_fc_request, "I", "FC FEC mode requested on link");
-
- SYSCTL_ADD_PROC(ctx, fec_list,
- OID_AUTO, "rs_requested", CTLTYPE_INT | CTLFLAG_RD,
- pf, 0, ixl_sysctl_fec_rs_request, "I", "RS FEC mode requested on link");
-
- SYSCTL_ADD_PROC(ctx, fec_list,
- OID_AUTO, "auto_fec_enabled", CTLTYPE_INT | CTLFLAG_RD,
- pf, 0, ixl_sysctl_fec_auto_enable, "I", "Let FW decide FEC ability/request modes");
- }
-
/* Add sysctls meant to print debug information, but don't list them
* in "sysctl -a" output. */
debug_node = SYSCTL_ADD_NODE(ctx, ctx_list,
@@ -4366,24 +3160,6 @@
OID_AUTO, "rss_lut", CTLTYPE_STRING | CTLFLAG_RD,
pf, 0, ixl_sysctl_hlut, "A", "View RSS lookup table");
- SYSCTL_ADD_PROC(ctx, debug_list,
- OID_AUTO, "rss_hena", CTLTYPE_ULONG | CTLFLAG_RD,
- pf, 0, ixl_sysctl_hena, "LU", "View enabled packet types for RSS");
-
- SYSCTL_ADD_PROC(ctx, debug_list,
- OID_AUTO, "disable_fw_link_management", CTLTYPE_INT | CTLFLAG_WR,
- pf, 0, ixl_sysctl_fw_link_management, "I", "Disable FW Link Management");
-
- if (pf->has_i2c) {
- SYSCTL_ADD_PROC(ctx, debug_list,
- OID_AUTO, "read_i2c_byte", CTLTYPE_INT | CTLFLAG_RW,
- pf, 0, ixl_sysctl_read_i2c_byte, "I", "Read byte from I2C bus");
-
- SYSCTL_ADD_PROC(ctx, debug_list,
- OID_AUTO, "write_i2c_byte", CTLTYPE_INT | CTLFLAG_RW,
- pf, 0, ixl_sysctl_write_i2c_byte, "I", "Write byte to I2C bus");
- }
-
#ifdef PCI_IOV
SYSCTL_ADD_UINT(ctx, debug_list,
OID_AUTO, "vc_debug_level", CTLFLAG_RW, &pf->vc_debug_lvl,
@@ -4401,9 +3177,8 @@
struct ixl_pf *pf = (struct ixl_pf *)arg1;
int queues;
- IXL_PF_LOCK(pf);
+ // TODO: Does this need its own lock?
queues = (int)ixl_pf_qmgr_get_num_free(&pf->qmgr);
- IXL_PF_UNLOCK(pf);
return sysctl_handle_int(oidp, NULL, queues, req);
}
@@ -4455,22 +3230,25 @@
return (0);
}
-char *
-ixl_aq_speed_to_str(enum i40e_aq_link_speed link_speed)
+int
+ixl_current_speed(SYSCTL_HANDLER_ARGS)
{
- int index;
+ struct ixl_pf *pf = (struct ixl_pf *)arg1;
+ struct i40e_hw *hw = &pf->hw;
+ int error = 0, index = 0;
char *speeds[] = {
"Unknown",
- "100 Mbps",
- "1 Gbps",
- "10 Gbps",
- "40 Gbps",
- "20 Gbps",
- "25 Gbps",
+ "100M",
+ "1G",
+ "10G",
+ "40G",
+ "20G"
};
- switch (link_speed) {
+ // ixl_update_link_status(pf);
+
+ switch (hw->phy.link_info.link_speed) {
case I40E_LINK_SPEED_100MB:
index = 1;
break;
@@ -4486,56 +3264,17 @@
case I40E_LINK_SPEED_20GB:
index = 5;
break;
- case I40E_LINK_SPEED_25GB:
- index = 6;
- break;
case I40E_LINK_SPEED_UNKNOWN:
default:
index = 0;
break;
}
- return speeds[index];
-}
-
-int
-ixl_current_speed(SYSCTL_HANDLER_ARGS)
-{
- struct ixl_pf *pf = (struct ixl_pf *)arg1;
- struct i40e_hw *hw = &pf->hw;
- int error = 0;
-
- ixl_update_link_status(pf);
-
- error = sysctl_handle_string(oidp,
- ixl_aq_speed_to_str(hw->phy.link_info.link_speed),
- 8, req);
+ error = sysctl_handle_string(oidp, speeds[index],
+ strlen(speeds[index]), req);
return (error);
}
-static u8
-ixl_convert_sysctl_aq_link_speed(u8 speeds, bool to_aq)
-{
- static u16 speedmap[6] = {
- (I40E_LINK_SPEED_100MB | (0x1 << 8)),
- (I40E_LINK_SPEED_1GB | (0x2 << 8)),
- (I40E_LINK_SPEED_10GB | (0x4 << 8)),
- (I40E_LINK_SPEED_20GB | (0x8 << 8)),
- (I40E_LINK_SPEED_25GB | (0x10 << 8)),
- (I40E_LINK_SPEED_40GB | (0x20 << 8))
- };
- u8 retval = 0;
-
- for (int i = 0; i < 6; i++) {
- if (to_aq)
- retval |= (speeds & (speedmap[i] >> 8)) ? (speedmap[i] & 0xff) : 0;
- else
- retval |= (speeds & speedmap[i]) ? (speedmap[i] >> 8) : 0;
- }
-
- return (retval);
-}
-
int
ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds)
{
@@ -4558,14 +3297,23 @@
/* Prepare new config */
bzero(&config, sizeof(config));
- config.link_speed = ixl_convert_sysctl_aq_link_speed(speeds, true);
config.phy_type = abilities.phy_type;
- config.phy_type_ext = abilities.phy_type_ext;
config.abilities = abilities.abilities
| I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
config.eee_capability = abilities.eee_capability;
config.eeer = abilities.eeer_val;
config.low_power_ctrl = abilities.d3_lpan;
+ /* Translate into aq cmd link_speed */
+ if (speeds & 0x10)
+ config.link_speed |= I40E_LINK_SPEED_40GB;
+ if (speeds & 0x8)
+ config.link_speed |= I40E_LINK_SPEED_20GB;
+ if (speeds & 0x4)
+ config.link_speed |= I40E_LINK_SPEED_10GB;
+ if (speeds & 0x2)
+ config.link_speed |= I40E_LINK_SPEED_1GB;
+ if (speeds & 0x1)
+ config.link_speed |= I40E_LINK_SPEED_100MB;
/* Do aq command & restart link */
aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
@@ -4574,9 +3322,20 @@
"%s: Error setting new phy config %d,"
" aq error: %d\n", __func__, aq_error,
hw->aq.asq_last_status);
- return (EIO);
+ return (EAGAIN);
}
+#if 0 // I think this was removed
+ /*
+ ** This seems a bit heavy handed, but we
+ ** need to get a reinit on some devices
+ */
+ IXL_PF_LOCK(pf);
+ ixl_stop_locked(pf);
+ ixl_init_locked(pf);
+ IXL_PF_UNLOCK(pf);
+#endif
+
return (0);
}
@@ -4587,8 +3346,7 @@
** 0x2 - advertise 1G
** 0x4 - advertise 10G
** 0x8 - advertise 20G
-** 0x10 - advertise 25G
-** 0x20 - advertise 40G
+** 0x10 - advertise 40G
**
** Set to 0 to disable link
*/
@@ -4598,7 +3356,6 @@
struct ixl_pf *pf = (struct ixl_pf *)arg1;
struct i40e_hw *hw = &pf->hw;
device_t dev = pf->dev;
- u8 converted_speeds;
int requested_ls = 0;
int error = 0;
@@ -4607,70 +3364,82 @@
error = sysctl_handle_int(oidp, &requested_ls, 0, req);
if ((error) || (req->newptr == NULL))
return (error);
- /* Check if changing speeds is supported */
- switch (hw->device_id) {
- case I40E_DEV_ID_25G_B:
- case I40E_DEV_ID_25G_SFP28:
- device_printf(dev, "Changing advertised speeds not supported"
- " on this device.\n");
+ /* Check for sane value */
+ if (requested_ls > 0x10) {
+ device_printf(dev, "Invalid advertised speed; "
+ "valid modes are 0x1 through 0x10\n");
return (EINVAL);
}
- if (requested_ls < 0 || requested_ls > 0xff) {
+ /* Then check for validity based on adapter type */
+ switch (hw->device_id) {
+ case I40E_DEV_ID_1G_BASE_T_X722:
+ /* 1G BaseT */
+ if (requested_ls & ~(0x2)) {
+ device_printf(dev,
+ "Only 1G speeds supported on this device.\n");
+ return (EINVAL);
+ }
+ break;
+ case I40E_DEV_ID_10G_BASE_T:
+ case I40E_DEV_ID_10G_BASE_T4:
+ /* 10G BaseT */
+ if (requested_ls & ~(0x7)) {
+ device_printf(dev,
+ "Only 100M/1G/10G speeds supported on this device.\n");
+ return (EINVAL);
+ }
+ break;
+ case I40E_DEV_ID_20G_KR2:
+ case I40E_DEV_ID_20G_KR2_A:
+ /* 20G */
+ if (requested_ls & ~(0xE)) {
+ device_printf(dev,
+ "Only 1G/10G/20G speeds supported on this device.\n");
+ return (EINVAL);
+ }
+ break;
+ case I40E_DEV_ID_KX_B:
+ case I40E_DEV_ID_QSFP_A:
+ case I40E_DEV_ID_QSFP_B:
+ /* 40G */
+ if (requested_ls & ~(0x10)) {
+ device_printf(dev,
+ "Only 40G speeds supported on this device.\n");
+ return (EINVAL);
+ }
+ break;
+ default:
+ /* 10G (1G) */
+ if (requested_ls & ~(0x6)) {
+ device_printf(dev,
+ "Only 1/10G speeds supported on this device.\n");
+ return (EINVAL);
+ }
+ break;
}
- /* Check for valid value */
- converted_speeds = ixl_convert_sysctl_aq_link_speed((u8)requested_ls, true);
- if ((converted_speeds | pf->supported_speeds) != pf->supported_speeds) {
- device_printf(dev, "Invalid advertised speed; "
- "valid flags are: 0x%02x\n",
- ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false));
- return (EINVAL);
- }
+ /* Exit if no change */
+ if (pf->advertised_speed == requested_ls)
+ return (0);
error = ixl_set_advertised_speeds(pf, requested_ls);
if (error)
return (error);
pf->advertised_speed = requested_ls;
- ixl_update_link_status(pf);
+ // ixl_update_link_status(pf);
return (0);
}
/*
- * Input: bitmap of enum i40e_aq_link_speed
- */
-static u64
-ixl_max_aq_speed_to_value(u8 link_speeds)
-{
- if (link_speeds & I40E_LINK_SPEED_40GB)
- return IF_Gbps(40);
- if (link_speeds & I40E_LINK_SPEED_25GB)
- return IF_Gbps(25);
- if (link_speeds & I40E_LINK_SPEED_20GB)
- return IF_Gbps(20);
- if (link_speeds & I40E_LINK_SPEED_10GB)
- return IF_Gbps(10);
- if (link_speeds & I40E_LINK_SPEED_1GB)
- return IF_Gbps(1);
- if (link_speeds & I40E_LINK_SPEED_100MB)
- return IF_Mbps(100);
- else
- /* Minimum supported link speed */
- return IF_Mbps(100);
-}
-
-/*
** Get the width and transaction speed of
** the bus this adapter is plugged into.
*/
void
-ixl_get_bus_info(struct ixl_pf *pf)
+ixl_get_bus_info(struct i40e_hw *hw, device_t dev)
{
- struct i40e_hw *hw = &pf->hw;
- device_t dev = pf->dev;
- u16 link;
- u32 offset, num_ports;
- u64 max_speed;
+ u16 link;
+ u32 offset;
/* Some devices don't use PCIE */
if (hw->mac.type == I40E_MAC_X722)
@@ -4690,28 +3459,16 @@
(hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"),
(hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" :
(hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" :
- (hw->bus.width == i40e_bus_width_pcie_x2) ? "Width x2" :
(hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" :
("Unknown"));
- /*
- * If adapter is in slot with maximum supported speed,
- * no warning message needs to be printed out.
- */
- if (hw->bus.speed >= i40e_bus_speed_8000
- && hw->bus.width >= i40e_bus_width_pcie_x8)
- return;
-
- num_ports = bitcount32(hw->func_caps.valid_functions);
- max_speed = ixl_max_aq_speed_to_value(pf->supported_speeds) / 1000000;
-
- if ((num_ports * max_speed) > hw->bus.speed * hw->bus.width) {
+ if ((hw->bus.width <= i40e_bus_width_pcie_x8) &&
+ (hw->bus.speed < i40e_bus_speed_8000)) {
device_printf(dev, "PCI-Express bandwidth available"
" for this device may be insufficient for"
" optimal performance.\n");
- device_printf(dev, "Please move the device to a different"
- " PCI-e link with more lanes and/or higher"
- " transfer rate.\n");
+ device_printf(dev, "For optimal performance, a x8 "
+ "PCIE Gen3 slot is required.\n");
}
}
@@ -4801,16 +3558,17 @@
}
if (!(pf->state & IXL_PF_STATE_EMPR_RESETTING)) {
- IXL_PF_LOCK(pf);
+ // TODO: Might need a different lock here
+ // IXL_PF_LOCK(pf);
status = i40e_nvmupd_command(hw, nvma, nvma->data, &perrno);
- IXL_PF_UNLOCK(pf);
+ // IXL_PF_UNLOCK(pf);
} else {
perrno = -EBUSY;
}
if (status)
- device_printf(dev, "i40e_nvmupd_command status %s, perrno %d\n",
- i40e_stat_str(hw, status), perrno);
+ device_printf(dev, "i40e_nvmupd_command status %d, perrno %d\n",
+ status, perrno);
/*
* -EPERM is actually ERESTART, which the kernel interprets as it needing
@@ -4822,180 +3580,61 @@
return (perrno);
}
-/*********************************************************************
- *
- * Media Ioctl callback
- *
- * This routine is called whenever the user queries the status of
- * the interface using ifconfig.
- *
- **********************************************************************/
+#if 0 // asq alive check might need to be re-added to ixl_if_init
void
-ixl_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
+ixl_init(void *arg)
{
- struct ixl_vsi *vsi = ifp->if_softc;
- struct ixl_pf *pf = vsi->back;
- struct i40e_hw *hw = &pf->hw;
-
- INIT_DEBUGOUT("ixl_media_status: begin");
- IXL_PF_LOCK(pf);
-
- hw->phy.get_link_info = TRUE;
- i40e_get_link_status(hw, &pf->link_up);
- ixl_update_link_status(pf);
-
- ifmr->ifm_status = IFM_AVALID;
- ifmr->ifm_active = IFM_ETHER;
+ struct ixl_pf *pf = arg;
+ struct ixl_vsi *vsi = &pf->vsi;
+ device_t dev = pf->dev;
+ int error = 0;
- if (!pf->link_up) {
+ /*
+ * If the aq is dead here, it probably means something outside of the driver
+ * did something to the adapter, like a PF reset.
+ * So rebuild the driver's state here if that occurs.
+ */
+ if (!i40e_check_asq_alive(&pf->hw)) {
+ device_printf(dev, "Admin Queue is down; resetting...\n");
+ IXL_PF_LOCK(pf);
+ ixl_teardown_hw_structs(pf);
+ ixl_reset(pf);
IXL_PF_UNLOCK(pf);
- return;
}
- ifmr->ifm_status |= IFM_ACTIVE;
-
- /* Hardware always does full-duplex */
- ifmr->ifm_active |= IFM_FDX;
-
- switch (hw->phy.link_info.phy_type) {
- /* 100 M */
- case I40E_PHY_TYPE_100BASE_TX:
- ifmr->ifm_active |= IFM_100_TX;
- break;
- /* 1 G */
- case I40E_PHY_TYPE_1000BASE_T:
- ifmr->ifm_active |= IFM_1000_T;
- break;
- case I40E_PHY_TYPE_1000BASE_SX:
- ifmr->ifm_active |= IFM_1000_SX;
- break;
- case I40E_PHY_TYPE_1000BASE_LX:
- ifmr->ifm_active |= IFM_1000_LX;
- break;
- case I40E_PHY_TYPE_1000BASE_T_OPTICAL:
- ifmr->ifm_active |= IFM_OTHER;
- break;
- /* 10 G */
- case I40E_PHY_TYPE_10GBASE_SFPP_CU:
- ifmr->ifm_active |= IFM_10G_TWINAX;
- break;
- case I40E_PHY_TYPE_10GBASE_SR:
- ifmr->ifm_active |= IFM_10G_SR;
- break;
- case I40E_PHY_TYPE_10GBASE_LR:
- ifmr->ifm_active |= IFM_10G_LR;
- break;
- case I40E_PHY_TYPE_10GBASE_T:
- ifmr->ifm_active |= IFM_10G_T;
- break;
- case I40E_PHY_TYPE_XAUI:
- case I40E_PHY_TYPE_XFI:
- case I40E_PHY_TYPE_10GBASE_AOC:
- ifmr->ifm_active |= IFM_OTHER;
- break;
- /* 25 G */
- case I40E_PHY_TYPE_25GBASE_KR:
- ifmr->ifm_active |= IFM_25G_KR;
- break;
- case I40E_PHY_TYPE_25GBASE_CR:
- ifmr->ifm_active |= IFM_25G_CR;
- break;
- case I40E_PHY_TYPE_25GBASE_SR:
- ifmr->ifm_active |= IFM_25G_SR;
- break;
- case I40E_PHY_TYPE_25GBASE_LR:
- ifmr->ifm_active |= IFM_UNKNOWN;
- break;
- /* 40 G */
- case I40E_PHY_TYPE_40GBASE_CR4:
- case I40E_PHY_TYPE_40GBASE_CR4_CU:
- ifmr->ifm_active |= IFM_40G_CR4;
- break;
- case I40E_PHY_TYPE_40GBASE_SR4:
- ifmr->ifm_active |= IFM_40G_SR4;
- break;
- case I40E_PHY_TYPE_40GBASE_LR4:
- ifmr->ifm_active |= IFM_40G_LR4;
- break;
- case I40E_PHY_TYPE_XLAUI:
- ifmr->ifm_active |= IFM_OTHER;
- break;
- case I40E_PHY_TYPE_1000BASE_KX:
- ifmr->ifm_active |= IFM_1000_KX;
- break;
- case I40E_PHY_TYPE_SGMII:
- ifmr->ifm_active |= IFM_1000_SGMII;
- break;
- /* ERJ: What's the difference between these? */
- case I40E_PHY_TYPE_10GBASE_CR1_CU:
- case I40E_PHY_TYPE_10GBASE_CR1:
- ifmr->ifm_active |= IFM_10G_CR1;
- break;
- case I40E_PHY_TYPE_10GBASE_KX4:
- ifmr->ifm_active |= IFM_10G_KX4;
- break;
- case I40E_PHY_TYPE_10GBASE_KR:
- ifmr->ifm_active |= IFM_10G_KR;
- break;
- case I40E_PHY_TYPE_SFI:
- ifmr->ifm_active |= IFM_10G_SFI;
- break;
- /* Our single 20G media type */
- case I40E_PHY_TYPE_20GBASE_KR2:
- ifmr->ifm_active |= IFM_20G_KR2;
- break;
- case I40E_PHY_TYPE_40GBASE_KR4:
- ifmr->ifm_active |= IFM_40G_KR4;
- break;
- case I40E_PHY_TYPE_XLPPI:
- case I40E_PHY_TYPE_40GBASE_AOC:
- ifmr->ifm_active |= IFM_40G_XLPPI;
- break;
- /* Unknown to driver */
- default:
- ifmr->ifm_active |= IFM_UNKNOWN;
- break;
+ /*
+ * Set up LAN queue interrupts here.
+ * Kernel interrupt setup functions cannot be called while holding a lock,
+ * so this is done outside of init_locked().
+ */
+ if (pf->msix > 1) {
+ /* Teardown existing interrupts, if they exist */
+ ixl_teardown_queue_msix(vsi);
+ ixl_free_queue_tqs(vsi);
+ /* Then set them up again */
+ error = ixl_setup_queue_msix(vsi);
+ if (error)
+ device_printf(dev, "ixl_setup_queue_msix() error: %d\n",
+ error);
+ error = ixl_setup_queue_tqs(vsi);
+ if (error)
+ device_printf(dev, "ixl_setup_queue_tqs() error: %d\n",
+ error);
+ } else
+ // possibly broken
+ error = ixl_assign_vsi_legacy(pf);
+ if (error) {
+ device_printf(pf->dev, "assign_vsi_msix/legacy error: %d\n", error);
+ return;
}
- /* Report flow control status as well */
- if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
- ifmr->ifm_active |= IFM_ETH_TXPAUSE;
- if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
- ifmr->ifm_active |= IFM_ETH_RXPAUSE;
-
- IXL_PF_UNLOCK(pf);
-}
-
-void
-ixl_init(void *arg)
-{
- struct ixl_pf *pf = arg;
IXL_PF_LOCK(pf);
ixl_init_locked(pf);
IXL_PF_UNLOCK(pf);
}
+#endif
-/*
- * NOTE: Fortville does not support forcing media speeds. Instead,
- * use the set_advertise sysctl to set the speeds Fortville
- * will advertise or be allowed to operate at.
- */
-int
-ixl_media_change(struct ifnet * ifp)
-{
- struct ixl_vsi *vsi = ifp->if_softc;
- struct ifmedia *ifm = &vsi->media;
-
- INIT_DEBUGOUT("ixl_media_change: begin");
-
- if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
- return (EINVAL);
-
- if_printf(ifp, "Use 'advertise_speed' sysctl to change advertised speeds\n");
-
- return (ENODEV);
-}
-
+#if 0
/*********************************************************************
* Ioctl entry point
*
@@ -5021,7 +3660,6 @@
switch (command) {
case SIOCSIFADDR:
- IOCTL_DEBUGOUT("ioctl: SIOCSIFADDR (Set Interface Address)");
#ifdef INET
if (ifa->ifa_addr->sa_family == AF_INET)
avoid_reset = TRUE;
@@ -5058,8 +3696,7 @@
vsi->max_frame_size =
ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
+ ETHER_VLAN_ENCAP_LEN;
- if (ifp->if_drv_flags & IFF_DRV_RUNNING)
- ixl_init_locked(pf);
+ ixl_init_locked(pf);
IXL_PF_UNLOCK(pf);
}
break;
@@ -5079,7 +3716,9 @@
}
} else {
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
- ixl_stop_locked(pf);
+ IXL_PF_UNLOCK(pf);
+ ixl_stop(pf);
+ IXL_PF_LOCK(pf);
}
}
pf->if_flags = ifp->if_flags;
@@ -5100,7 +3739,7 @@
IOCTL_DEBUGOUT("ioctl: SIOCADDMULTI");
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
IXL_PF_LOCK(pf);
- ixl_disable_rings_intr(vsi);
+ ixl_disable_intr(vsi);
ixl_add_multi(vsi);
ixl_enable_intr(vsi);
IXL_PF_UNLOCK(pf);
@@ -5110,7 +3749,7 @@
IOCTL_DEBUGOUT("ioctl: SIOCDELMULTI");
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
IXL_PF_LOCK(pf);
- ixl_disable_rings_intr(vsi);
+ ixl_disable_intr(vsi);
ixl_del_multi(vsi);
ixl_enable_intr(vsi);
IXL_PF_UNLOCK(pf);
@@ -5150,68 +3789,87 @@
break;
}
-#if __FreeBSD_version >= 1003000
- case SIOCGI2C:
- {
- struct ifi2creq i2c;
- int i;
- IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
- if (!pf->has_i2c)
- return (ENOTTY);
+ default:
+ IOCTL_DEBUGOUT("ioctl: UNKNOWN (0x%X)\n", (int)command);
+ error = ether_ioctl(ifp, command, data);
+ break;
+ }
+
+ return (error);
+}
+
+void
+ixl_set_queue_rx_itr(struct ixl_rx_queue *que)
+{
+ struct ixl_vsi *vsi = que->vsi;
+ struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
+ struct i40e_hw *hw = vsi->hw;
+ struct rx_ring *rxr = &que->rxr;
+ u16 rx_itr;
+ u16 rx_latency = 0;
+ int rx_bytes;
+
+ /* Idle, do nothing */
+ if (rxr->bytes == 0)
+ return;
+
+ if (pf->dynamic_rx_itr) {
+ rx_bytes = rxr->bytes/rxr->itr;
+ rx_itr = rxr->itr;
- error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
- if (error != 0)
+ /* Adjust latency range */
+ switch (rxr->latency) {
+ case IXL_LOW_LATENCY:
+ if (rx_bytes > 10) {
+ rx_latency = IXL_AVE_LATENCY;
+ rx_itr = IXL_ITR_20K;
+ }
break;
- if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
- error = EINVAL;
+ case IXL_AVE_LATENCY:
+ if (rx_bytes > 20) {
+ rx_latency = IXL_BULK_LATENCY;
+ rx_itr = IXL_ITR_8K;
+ } else if (rx_bytes <= 10) {
+ rx_latency = IXL_LOW_LATENCY;
+ rx_itr = IXL_ITR_100K;
+ }
break;
- }
- if (i2c.len > sizeof(i2c.data)) {
- error = EINVAL;
+ case IXL_BULK_LATENCY:
+ if (rx_bytes <= 20) {
+ rx_latency = IXL_AVE_LATENCY;
+ rx_itr = IXL_ITR_20K;
+ }
break;
- }
-
- for (i = 0; i < i2c.len; i++)
- if (ixl_read_i2c_byte(pf, i2c.offset + i,
- i2c.dev_addr, &i2c.data[i]))
- return (EIO);
-
- error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
- break;
- }
-#endif
- default:
- IOCTL_DEBUGOUT("ioctl: UNKNOWN (0x%X)\n", (int)command);
- error = ether_ioctl(ifp, command, data);
- break;
- }
-
- return (error);
-}
+ }
-int
-ixl_find_i2c_interface(struct ixl_pf *pf)
-{
- struct i40e_hw *hw = &pf->hw;
- bool i2c_en, port_matched;
- u32 reg;
+ rxr->latency = rx_latency;
- for (int i = 0; i < 4; i++) {
- reg = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(i));
- i2c_en = (reg & I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK);
- port_matched = ((reg & I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK)
- >> I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT)
- & BIT(hw->port);
- if (i2c_en && port_matched)
- return (i);
+ if (rx_itr != rxr->itr) {
+ /* do an exponential smoothing */
+ rx_itr = (10 * rx_itr * rxr->itr) /
+ ((9 * rx_itr) + rxr->itr);
+ rxr->itr = rx_itr & IXL_MAX_ITR;
+ wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
+ que->me), rxr->itr);
+ }
+ } else { /* We may have have toggled to non-dynamic */
+ if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
+ vsi->rx_itr_setting = pf->rx_itr;
+ /* Update the hardware if needed */
+ if (rxr->itr != vsi->rx_itr_setting) {
+ rxr->itr = vsi->rx_itr_setting;
+ wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
+ que->me), rxr->itr);
+ }
}
-
- return (-1);
+ rxr->bytes = 0;
+ rxr->packets = 0;
}
+#endif
static char *
-ixl_phy_type_string(u32 bit_pos, bool ext)
+ixl_phy_type_string(u32 bit_pos)
{
static char * phy_types_str[32] = {
"SGMII",
@@ -5247,59 +3905,20 @@
"20GBASE-KR2",
"Reserved (31)"
};
- static char * ext_phy_types_str[4] = {
- "25GBASE-KR",
- "25GBASE-CR",
- "25GBASE-SR",
- "25GBASE-LR"
- };
- if (ext && bit_pos > 3) return "Invalid_Ext";
if (bit_pos > 31) return "Invalid";
-
- return (ext) ? ext_phy_types_str[bit_pos] : phy_types_str[bit_pos];
-}
-
-int
-ixl_aq_get_link_status(struct ixl_pf *pf, struct i40e_aqc_get_link_status *link_status)
-{
- device_t dev = pf->dev;
- struct i40e_hw *hw = &pf->hw;
- struct i40e_aq_desc desc;
- enum i40e_status_code status;
-
- struct i40e_aqc_get_link_status *aq_link_status =
- (struct i40e_aqc_get_link_status *)&desc.params.raw;
-
- i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status);
- link_status->command_flags = CPU_TO_LE16(I40E_AQ_LSE_ENABLE);
- status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
- if (status) {
- device_printf(dev,
- "%s: i40e_aqc_opc_get_link_status status %s, aq error %s\n",
- __func__, i40e_stat_str(hw, status),
- i40e_aq_str(hw, hw->aq.asq_last_status));
- return (EIO);
- }
-
- bcopy(aq_link_status, link_status, sizeof(struct i40e_aqc_get_link_status));
- return (0);
+ return phy_types_str[bit_pos];
}
-static char *
-ixl_phy_type_string_ls(u8 val)
-{
- if (val >= 0x1F)
- return ixl_phy_type_string(val - 0x1F, true);
- else
- return ixl_phy_type_string(val, false);
-}
static int
ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
{
struct ixl_pf *pf = (struct ixl_pf *)arg1;
+ struct i40e_hw *hw = &pf->hw;
device_t dev = pf->dev;
+ struct i40e_link_status link_status;
+ enum i40e_status_code status;
struct sbuf *buf;
int error = 0;
@@ -5309,34 +3928,31 @@
return (ENOMEM);
}
- struct i40e_aqc_get_link_status link_status;
- error = ixl_aq_get_link_status(pf, &link_status);
- if (error) {
+ status = i40e_aq_get_link_info(hw, true, &link_status, NULL);
+ if (status) {
+ device_printf(dev,
+ "%s: i40e_aq_get_link_info() status %s, aq error %s\n",
+ __func__, i40e_stat_str(hw, status),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
sbuf_delete(buf);
- return (error);
+ return (EIO);
}
- /* TODO: Add 25G types */
sbuf_printf(buf, "\n"
"PHY Type : 0x%02x<%s>\n"
"Speed : 0x%02x\n"
"Link info: 0x%02x\n"
"AN info : 0x%02x\n"
"Ext info : 0x%02x\n"
- "Loopback : 0x%02x\n"
"Max Frame: %d\n"
- "Config : 0x%02x\n"
- "Power : 0x%02x",
- link_status.phy_type,
- ixl_phy_type_string_ls(link_status.phy_type),
+ "Pacing : 0x%02x\n"
+ "CRC En? : %s\n",
+ link_status.phy_type, ixl_phy_type_string(link_status.phy_type),
link_status.link_speed,
- link_status.link_info,
- link_status.an_info,
- link_status.ext_info,
- link_status.loopback,
- link_status.max_frame_size,
- link_status.config,
- link_status.power_desc);
+ link_status.link_info, link_status.an_info,
+ link_status.ext_info, link_status.max_frame_size,
+ link_status.pacing,
+ (link_status.crc_enable) ? "Yes" : "No");
error = sbuf_finish(buf);
if (error)
@@ -5364,7 +3980,7 @@
}
status = i40e_aq_get_phy_capabilities(hw,
- FALSE, FALSE, &abilities, NULL);
+ TRUE, FALSE, &abilities, NULL);
if (status) {
device_printf(dev,
"%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
@@ -5382,22 +3998,10 @@
sbuf_printf(buf, "<");
for (int i = 0; i < 32; i++)
if ((1 << i) & abilities.phy_type)
- sbuf_printf(buf, "%s,", ixl_phy_type_string(i, false));
+ sbuf_printf(buf, "%s,", ixl_phy_type_string(i));
sbuf_printf(buf, ">\n");
}
- sbuf_printf(buf, "PHY Ext : %02x",
- abilities.phy_type_ext);
-
- if (abilities.phy_type_ext != 0) {
- sbuf_printf(buf, "<");
- for (int i = 0; i < 4; i++)
- if ((1 << i) & abilities.phy_type_ext)
- sbuf_printf(buf, "%s,", ixl_phy_type_string(i, true));
- sbuf_printf(buf, ">");
- }
- sbuf_printf(buf, "\n");
-
sbuf_printf(buf,
"Speed : %02x\n"
"Abilities: %02x\n"
@@ -5405,19 +4009,14 @@
"EEER reg : %08x\n"
"D3 Lpan : %02x\n"
"ID : %02x %02x %02x %02x\n"
- "ModType : %02x %02x %02x\n"
- "ModType E: %01x\n"
- "FEC Cfg : %02x\n"
- "Ext CC : %02x",
+ "ModType : %02x %02x %02x",
abilities.link_speed,
abilities.abilities, abilities.eee_capability,
abilities.eeer_val, abilities.d3_lpan,
abilities.phy_id[0], abilities.phy_id[1],
abilities.phy_id[2], abilities.phy_id[3],
abilities.module_type[0], abilities.module_type[1],
- abilities.module_type[2], abilities.phy_type_ext >> 5,
- abilities.phy_type_ext & 0x1F,
- abilities.ext_comp_code);
+ abilities.module_type[2]);
error = sbuf_finish(buf);
if (error)
@@ -5490,7 +4089,7 @@
char *
ixl_switch_res_type_string(u8 type)
{
- static char * ixl_switch_res_type_strings[0x14] = {
+ char * ixl_switch_res_type_strings[0x14] = {
"VEB",
"VSI",
"Perfect Match MAC address",
@@ -5794,283 +4393,3 @@
return (error);
}
-static int
-ixl_sysctl_hena(SYSCTL_HANDLER_ARGS)
-{
- struct ixl_pf *pf = (struct ixl_pf *)arg1;
- struct i40e_hw *hw = &pf->hw;
- u64 hena;
-
- hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
- ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
-
- return sysctl_handle_long(oidp, NULL, hena, req);
-}
-
-/*
- * Sysctl to disable firmware's link management
- *
- * 1 - Disable link management on this port
- * 0 - Re-enable link management
- *
- * On normal NVMs, firmware manages link by default.
- */
-static int
-ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS)
-{
- struct ixl_pf *pf = (struct ixl_pf *)arg1;
- struct i40e_hw *hw = &pf->hw;
- device_t dev = pf->dev;
- int requested_mode = -1;
- enum i40e_status_code status = 0;
- int error = 0;
-
- /* Read in new mode */
- error = sysctl_handle_int(oidp, &requested_mode, 0, req);
- if ((error) || (req->newptr == NULL))
- return (error);
- /* Check for sane value */
- if (requested_mode < 0 || requested_mode > 1) {
- device_printf(dev, "Valid modes are 0 or 1\n");
- return (EINVAL);
- }
-
- /* Set new mode */
- status = i40e_aq_set_phy_debug(hw, !!(requested_mode) << 4, NULL);
- if (status) {
- device_printf(dev,
- "%s: Error setting new phy debug mode %s,"
- " aq error: %s\n", __func__, i40e_stat_str(hw, status),
- i40e_aq_str(hw, hw->aq.asq_last_status));
- return (EIO);
- }
-
- return (0);
-}
-
-/*
- * Sysctl to read a byte from I2C bus.
- *
- * Input: 32-bit value:
- * bits 0-7: device address (0xA0 or 0xA2)
- * bits 8-15: offset (0-255)
- * bits 16-31: unused
- * Output: 8-bit value read
- */
-static int
-ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS)
-{
- struct ixl_pf *pf = (struct ixl_pf *)arg1;
- device_t dev = pf->dev;
- int input = -1, error = 0;
-
- device_printf(dev, "%s: start\n", __func__);
-
- u8 dev_addr, offset, output;
-
- /* Read in I2C read parameters */
- error = sysctl_handle_int(oidp, &input, 0, req);
- if ((error) || (req->newptr == NULL))
- return (error);
- /* Validate device address */
- dev_addr = input & 0xFF;
- if (dev_addr != 0xA0 && dev_addr != 0xA2) {
- return (EINVAL);
- }
- offset = (input >> 8) & 0xFF;
-
- error = ixl_read_i2c_byte(pf, offset, dev_addr, &output);
- if (error)
- return (error);
-
- device_printf(dev, "%02X\n", output);
- return (0);
-}
-
-/*
- * Sysctl to write a byte to the I2C bus.
- *
- * Input: 32-bit value:
- * bits 0-7: device address (0xA0 or 0xA2)
- * bits 8-15: offset (0-255)
- * bits 16-23: value to write
- * bits 24-31: unused
- * Output: 8-bit value written
- */
-static int
-ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS)
-{
- struct ixl_pf *pf = (struct ixl_pf *)arg1;
- device_t dev = pf->dev;
- int input = -1, error = 0;
-
- u8 dev_addr, offset, value;
-
- /* Read in I2C write parameters */
- error = sysctl_handle_int(oidp, &input, 0, req);
- if ((error) || (req->newptr == NULL))
- return (error);
- /* Validate device address */
- dev_addr = input & 0xFF;
- if (dev_addr != 0xA0 && dev_addr != 0xA2) {
- return (EINVAL);
- }
- offset = (input >> 8) & 0xFF;
- value = (input >> 16) & 0xFF;
-
- error = ixl_write_i2c_byte(pf, offset, dev_addr, value);
- if (error)
- return (error);
-
- device_printf(dev, "%02X written\n", value);
- return (0);
-}
-
-static int
-ixl_get_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities,
- u8 bit_pos, int *is_set)
-{
- device_t dev = pf->dev;
- struct i40e_hw *hw = &pf->hw;
- enum i40e_status_code status;
-
- status = i40e_aq_get_phy_capabilities(hw,
- FALSE, FALSE, abilities, NULL);
- if (status) {
- device_printf(dev,
- "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
- __func__, i40e_stat_str(hw, status),
- i40e_aq_str(hw, hw->aq.asq_last_status));
- return (EIO);
- }
-
- *is_set = !!(abilities->phy_type_ext & bit_pos);
- return (0);
-}
-
-static int
-ixl_set_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities,
- u8 bit_pos, int set)
-{
- device_t dev = pf->dev;
- struct i40e_hw *hw = &pf->hw;
- struct i40e_aq_set_phy_config config;
- enum i40e_status_code status;
-
- /* Set new PHY config */
- memset(&config, 0, sizeof(config));
- config.fec_config = abilities->phy_type_ext & ~(bit_pos);
- if (set)
- config.fec_config |= bit_pos;
- if (config.fec_config != abilities->phy_type_ext) {
- config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
- config.phy_type = abilities->phy_type;
- config.phy_type_ext = abilities->phy_type_ext;
- config.link_speed = abilities->link_speed;
- config.eee_capability = abilities->eee_capability;
- config.eeer = abilities->eeer_val;
- config.low_power_ctrl = abilities->d3_lpan;
- status = i40e_aq_set_phy_config(hw, &config, NULL);
-
- if (status) {
- device_printf(dev,
- "%s: i40e_aq_set_phy_config() status %s, aq error %s\n",
- __func__, i40e_stat_str(hw, status),
- i40e_aq_str(hw, hw->aq.asq_last_status));
- return (EIO);
- }
- }
-
- return (0);
-}
-
-static int
-ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS)
-{
- struct ixl_pf *pf = (struct ixl_pf *)arg1;
- int mode, error = 0;
-
- struct i40e_aq_get_phy_abilities_resp abilities;
- error = ixl_get_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_KR, &mode);
- if (error)
- return (error);
- /* Read in new mode */
- error = sysctl_handle_int(oidp, &mode, 0, req);
- if ((error) || (req->newptr == NULL))
- return (error);
-
- return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_KR, !!(mode));
-}
-
-static int
-ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS)
-{
- struct ixl_pf *pf = (struct ixl_pf *)arg1;
- int mode, error = 0;
-
- struct i40e_aq_get_phy_abilities_resp abilities;
- error = ixl_get_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_RS, &mode);
- if (error)
- return (error);
- /* Read in new mode */
- error = sysctl_handle_int(oidp, &mode, 0, req);
- if ((error) || (req->newptr == NULL))
- return (error);
-
- return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_RS, !!(mode));
-}
-
-static int
-ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS)
-{
- struct ixl_pf *pf = (struct ixl_pf *)arg1;
- int mode, error = 0;
-
- struct i40e_aq_get_phy_abilities_resp abilities;
- error = ixl_get_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_KR, &mode);
- if (error)
- return (error);
- /* Read in new mode */
- error = sysctl_handle_int(oidp, &mode, 0, req);
- if ((error) || (req->newptr == NULL))
- return (error);
-
- return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_KR, !!(mode));
-}
-
-static int
-ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS)
-{
- struct ixl_pf *pf = (struct ixl_pf *)arg1;
- int mode, error = 0;
-
- struct i40e_aq_get_phy_abilities_resp abilities;
- error = ixl_get_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_RS, &mode);
- if (error)
- return (error);
- /* Read in new mode */
- error = sysctl_handle_int(oidp, &mode, 0, req);
- if ((error) || (req->newptr == NULL))
- return (error);
-
- return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_RS, !!(mode));
-}
-
-static int
-ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS)
-{
- struct ixl_pf *pf = (struct ixl_pf *)arg1;
- int mode, error = 0;
-
- struct i40e_aq_get_phy_abilities_resp abilities;
- error = ixl_get_fec_config(pf, &abilities, I40E_AQ_SET_FEC_AUTO, &mode);
- if (error)
- return (error);
- /* Read in new mode */
- error = sysctl_handle_int(oidp, &mode, 0, req);
- if ((error) || (req->newptr == NULL))
- return (error);
-
- return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_AUTO, !!(mode));
-}
-
Index: sys/dev/ixl/ixl_txrx.c
===================================================================
--- sys/dev/ixl/ixl_txrx.c
+++ sys/dev/ixl/ixl_txrx.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2013-2015, Intel Corporation
+ Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -51,26 +51,35 @@
#endif
/* Local Prototypes */
-static void ixl_rx_checksum(struct mbuf *, u32, u32, u8);
-static void ixl_refresh_mbufs(struct ixl_queue *, int);
-static int ixl_xmit(struct ixl_queue *, struct mbuf **);
-static int ixl_tx_setup_offload(struct ixl_queue *,
- struct mbuf *, u32 *, u32 *);
-static bool ixl_tso_setup(struct ixl_queue *, struct mbuf *);
-
-static inline void ixl_rx_discard(struct rx_ring *, int);
-static inline void ixl_rx_input(struct rx_ring *, struct ifnet *,
- struct mbuf *, u8);
-
-static inline bool ixl_tso_detect_sparse(struct mbuf *mp);
-static int ixl_tx_setup_offload(struct ixl_queue *que,
- struct mbuf *mp, u32 *cmd, u32 *off);
-static inline u32 ixl_get_tx_head(struct ixl_queue *que);
-
-#ifdef DEV_NETMAP
-#include <dev/netmap/if_ixl_netmap.h>
-int ixl_rx_miss, ixl_rx_miss_bufs, ixl_crcstrip = 1;
-#endif /* DEV_NETMAP */
+static void ixl_rx_checksum(if_rxd_info_t ri, u32 status, u32 error, u8 ptype);
+
+static int ixl_isc_txd_encap(void *arg, if_pkt_info_t pi);
+static void ixl_isc_txd_flush(void *arg, uint16_t txqid, uint32_t pidx);
+static int ixl_isc_txd_credits_update(void *arg, uint16_t qid, uint32_t cidx, bool clear);
+
+static void ixl_isc_rxd_refill(void *arg, uint16_t rxqid,
+ uint8_t flid __unused,
+ uint32_t pidx, uint64_t *paddrs,
+ caddr_t *vaddrs __unused, uint16_t count,
+ uint16_t buf_len __unused);
+static void ixl_isc_rxd_flush(void *arg, uint16_t rxqid, uint8_t flid __unused,
+ uint32_t pidx);
+static int ixl_isc_rxd_available(void *arg, uint16_t rxqid, uint32_t idx,
+ int budget);
+static int ixl_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri);
+
+extern int ixl_intr(void *arg);
+
+struct if_txrx ixl_txrx = {
+ ixl_isc_txd_encap,
+ ixl_isc_txd_flush,
+ ixl_isc_txd_credits_update,
+ ixl_isc_rxd_available,
+ ixl_isc_rxd_pkt_get,
+ ixl_isc_rxd_refill,
+ ixl_isc_rxd_flush,
+ ixl_intr
+};
/*
* @key key is saved into this parameter
@@ -89,535 +98,32 @@
bcopy(rss_seed, key, IXL_RSS_KEY_SIZE);
}
-/*
-** Multiqueue Transmit driver
-*/
-int
-ixl_mq_start(struct ifnet *ifp, struct mbuf *m)
-{
- struct ixl_vsi *vsi = ifp->if_softc;
- struct ixl_queue *que;
- struct tx_ring *txr;
- int err, i;
-#ifdef RSS
- u32 bucket_id;
-#endif
-
- /*
- ** Which queue to use:
- **
- ** When doing RSS, map it to the same outbound
- ** queue as the incoming flow would be mapped to.
- ** If everything is setup correctly, it should be
- ** the same bucket that the current CPU we're on is.
- */
- if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
-#ifdef RSS
- if (rss_hash2bucket(m->m_pkthdr.flowid,
- M_HASHTYPE_GET(m), &bucket_id) == 0) {
- i = bucket_id % vsi->num_queues;
- } else
-#endif
- i = m->m_pkthdr.flowid % vsi->num_queues;
- } else
- i = curcpu % vsi->num_queues;
-
- que = &vsi->queues[i];
- txr = &que->txr;
-
- err = drbr_enqueue(ifp, txr->br, m);
- if (err)
- return (err);
- if (IXL_TX_TRYLOCK(txr)) {
- ixl_mq_start_locked(ifp, txr);
- IXL_TX_UNLOCK(txr);
- } else
- taskqueue_enqueue(que->tq, &que->tx_task);
-
- return (0);
-}
-
-int
-ixl_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr)
-{
- struct ixl_queue *que = txr->que;
- struct ixl_vsi *vsi = que->vsi;
- struct mbuf *next;
- int err = 0;
-
-
- if (((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) ||
- vsi->link_active == 0)
- return (ENETDOWN);
-
- /* Process the transmit queue */
- while ((next = drbr_peek(ifp, txr->br)) != NULL) {
- if ((err = ixl_xmit(que, &next)) != 0) {
- if (next == NULL)
- drbr_advance(ifp, txr->br);
- else
- drbr_putback(ifp, txr->br, next);
- break;
- }
- drbr_advance(ifp, txr->br);
- /* Send a copy of the frame to the BPF listener */
- ETHER_BPF_MTAP(ifp, next);
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
- break;
- }
-
- if (txr->avail < IXL_TX_CLEANUP_THRESHOLD)
- ixl_txeof(que);
-
- return (err);
-}
-
-/*
- * Called from a taskqueue to drain queued transmit packets.
- */
-void
-ixl_deferred_mq_start(void *arg, int pending)
-{
- struct ixl_queue *que = arg;
- struct tx_ring *txr = &que->txr;
- struct ixl_vsi *vsi = que->vsi;
- struct ifnet *ifp = vsi->ifp;
-
- IXL_TX_LOCK(txr);
- if (!drbr_empty(ifp, txr->br))
- ixl_mq_start_locked(ifp, txr);
- IXL_TX_UNLOCK(txr);
-}
-
-/*
-** Flush all queue ring buffers
-*/
-void
-ixl_qflush(struct ifnet *ifp)
-{
- struct ixl_vsi *vsi = ifp->if_softc;
-
- for (int i = 0; i < vsi->num_queues; i++) {
- struct ixl_queue *que = &vsi->queues[i];
- struct tx_ring *txr = &que->txr;
- struct mbuf *m;
- IXL_TX_LOCK(txr);
- while ((m = buf_ring_dequeue_sc(txr->br)) != NULL)
- m_freem(m);
- IXL_TX_UNLOCK(txr);
- }
- if_qflush(ifp);
-}
-
+// TODO: Compare this version of iflib with current version in OOT driver
/*
** Find mbuf chains passed to the driver
** that are 'sparse', using more than 8
-** mbufs to deliver an mss-size chunk of data
+** segments to deliver an mss-size chunk of data
*/
-static inline bool
-ixl_tso_detect_sparse(struct mbuf *mp)
-{
- struct mbuf *m;
- int num, mss;
-
- num = 0;
- mss = mp->m_pkthdr.tso_segsz;
-
- /* Exclude first mbuf; assume it contains all headers */
- for (m = mp->m_next; m != NULL; m = m->m_next) {
- if (m == NULL)
- break;
- num++;
- mss -= m->m_len % mp->m_pkthdr.tso_segsz;
-
- if (mss < 1) {
- if (num > IXL_SPARSE_CHAIN)
- return (true);
- num = (mss == 0) ? 0 : 1;
- mss += mp->m_pkthdr.tso_segsz;
- }
- }
-
- return (false);
-}
-
-
-/*********************************************************************
- *
- * This routine maps the mbufs to tx descriptors, allowing the
- * TX engine to transmit the packets.
- * - return 0 on success, positive on failure
- *
- **********************************************************************/
-#define IXL_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
-
static int
-ixl_xmit(struct ixl_queue *que, struct mbuf **m_headp)
+ixl_tso_detect_sparse(bus_dma_segment_t *segs, int nsegs, int segsz)
{
- struct ixl_vsi *vsi = que->vsi;
- struct i40e_hw *hw = vsi->hw;
- struct tx_ring *txr = &que->txr;
- struct ixl_tx_buf *buf;
- struct i40e_tx_desc *txd = NULL;
- struct mbuf *m_head, *m;
- int i, j, error, nsegs;
- int first, last = 0;
- u16 vtag = 0;
- u32 cmd, off;
- bus_dmamap_t map;
- bus_dma_tag_t tag;
- bus_dma_segment_t segs[IXL_MAX_TSO_SEGS];
-
- cmd = off = 0;
- m_head = *m_headp;
-
- /*
- * Important to capture the first descriptor
- * used because it will contain the index of
- * the one we tell the hardware to report back
- */
- first = txr->next_avail;
- buf = &txr->buffers[first];
- map = buf->map;
- tag = txr->tx_tag;
-
- if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
- /* Use larger mapping for TSO */
- tag = txr->tso_tag;
- if (ixl_tso_detect_sparse(m_head)) {
- m = m_defrag(m_head, M_NOWAIT);
- if (m == NULL) {
- m_freem(*m_headp);
- *m_headp = NULL;
- return (ENOBUFS);
- }
- *m_headp = m;
- }
- }
-
- /*
- * Map the packet for DMA.
- */
- error = bus_dmamap_load_mbuf_sg(tag, map,
- *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
-
- if (error == EFBIG) {
- struct mbuf *m;
-
- m = m_defrag(*m_headp, M_NOWAIT);
- if (m == NULL) {
- que->mbuf_defrag_failed++;
- m_freem(*m_headp);
- *m_headp = NULL;
- return (ENOBUFS);
- }
- *m_headp = m;
-
- /* Try it again */
- error = bus_dmamap_load_mbuf_sg(tag, map,
- *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
-
- if (error != 0) {
- que->tx_dmamap_failed++;
- m_freem(*m_headp);
- *m_headp = NULL;
- return (error);
- }
- } else if (error != 0) {
- que->tx_dmamap_failed++;
- m_freem(*m_headp);
- *m_headp = NULL;
- return (error);
- }
-
- /* Make certain there are enough descriptors */
- if (nsegs > txr->avail - 2) {
- txr->no_desc++;
- error = ENOBUFS;
- goto xmit_fail;
- }
- m_head = *m_headp;
-
- /* Set up the TSO/CSUM offload */
- if (m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD) {
- error = ixl_tx_setup_offload(que, m_head, &cmd, &off);
- if (error)
- goto xmit_fail;
- }
-
- cmd |= I40E_TX_DESC_CMD_ICRC;
- /* Grab the VLAN tag */
- if (m_head->m_flags & M_VLANTAG) {
- cmd |= I40E_TX_DESC_CMD_IL2TAG1;
- vtag = htole16(m_head->m_pkthdr.ether_vtag);
- }
-
- i = txr->next_avail;
- for (j = 0; j < nsegs; j++) {
- bus_size_t seglen;
-
- buf = &txr->buffers[i];
- buf->tag = tag; /* Keep track of the type tag */
- txd = &txr->base[i];
- seglen = segs[j].ds_len;
-
- txd->buffer_addr = htole64(segs[j].ds_addr);
- txd->cmd_type_offset_bsz =
- htole64(I40E_TX_DESC_DTYPE_DATA
- | ((u64)cmd << I40E_TXD_QW1_CMD_SHIFT)
- | ((u64)off << I40E_TXD_QW1_OFFSET_SHIFT)
- | ((u64)seglen << I40E_TXD_QW1_TX_BUF_SZ_SHIFT)
- | ((u64)vtag << I40E_TXD_QW1_L2TAG1_SHIFT));
-
- last = i; /* descriptor that will get completion IRQ */
+ int i, count, curseg;
- if (++i == que->num_desc)
- i = 0;
-
- buf->m_head = NULL;
- buf->eop_index = -1;
+ if (nsegs <= IXL_MAX_TX_SEGS-2)
+ return (0);
+ for (curseg = count = i = 0; i < nsegs; i++) {
+ curseg += segs[i].ds_len;
+ count++;
+ if (__predict_false(count == IXL_MAX_TX_SEGS-2))
+ return (1);
+ if (curseg > segsz) {
+ curseg -= segsz;
+ count = 1;
+ }
+ if (curseg == segsz)
+ curseg = count = 0;
}
- /* Set the last descriptor for report */
- txd->cmd_type_offset_bsz |=
- htole64(((u64)IXL_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT));
- txr->avail -= nsegs;
- txr->next_avail = i;
-
- buf->m_head = m_head;
- /* Swap the dma map between the first and last descriptor */
- txr->buffers[first].map = buf->map;
- buf->map = map;
- bus_dmamap_sync(tag, map, BUS_DMASYNC_PREWRITE);
-
- /* Set the index of the descriptor that will be marked done */
- buf = &txr->buffers[first];
- buf->eop_index = last;
-
- bus_dmamap_sync(txr->dma.tag, txr->dma.map,
- BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
- /*
- * Advance the Transmit Descriptor Tail (Tdt), this tells the
- * hardware that this frame is available to transmit.
- */
- ++txr->total_packets;
- wr32(hw, txr->tail, i);
-
- /* Mark outstanding work */
- atomic_store_rel_32(&txr->watchdog_timer, IXL_WATCHDOG);
return (0);
-
-xmit_fail:
- bus_dmamap_unload(tag, buf->map);
- return (error);
-}
-
-
-/*********************************************************************
- *
- * Allocate memory for tx_buffer structures. The tx_buffer stores all
- * the information needed to transmit a packet on the wire. This is
- * called only once at attach, setup is done every reset.
- *
- **********************************************************************/
-int
-ixl_allocate_tx_data(struct ixl_queue *que)
-{
- struct tx_ring *txr = &que->txr;
- struct ixl_vsi *vsi = que->vsi;
- device_t dev = vsi->dev;
- struct ixl_tx_buf *buf;
- int error = 0;
-
- /*
- * Setup DMA descriptor areas.
- */
- if ((error = bus_dma_tag_create(NULL, /* parent */
- 1, 0, /* alignment, bounds */
- BUS_SPACE_MAXADDR, /* lowaddr */
- BUS_SPACE_MAXADDR, /* highaddr */
- NULL, NULL, /* filter, filterarg */
- IXL_TSO_SIZE, /* maxsize */
- IXL_MAX_TX_SEGS, /* nsegments */
- PAGE_SIZE, /* maxsegsize */
- 0, /* flags */
- NULL, /* lockfunc */
- NULL, /* lockfuncarg */
- &txr->tx_tag))) {
- device_printf(dev,"Unable to allocate TX DMA tag\n");
- goto fail;
- }
-
- /* Make a special tag for TSO */
- if ((error = bus_dma_tag_create(NULL, /* parent */
- 1, 0, /* alignment, bounds */
- BUS_SPACE_MAXADDR, /* lowaddr */
- BUS_SPACE_MAXADDR, /* highaddr */
- NULL, NULL, /* filter, filterarg */
- IXL_TSO_SIZE, /* maxsize */
- IXL_MAX_TSO_SEGS, /* nsegments */
- PAGE_SIZE, /* maxsegsize */
- 0, /* flags */
- NULL, /* lockfunc */
- NULL, /* lockfuncarg */
- &txr->tso_tag))) {
- device_printf(dev,"Unable to allocate TX TSO DMA tag\n");
- goto fail;
- }
-
- if (!(txr->buffers =
- (struct ixl_tx_buf *) malloc(sizeof(struct ixl_tx_buf) *
- que->num_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
- device_printf(dev, "Unable to allocate tx_buffer memory\n");
- error = ENOMEM;
- goto fail;
- }
-
- /* Create the descriptor buffer default dma maps */
- buf = txr->buffers;
- for (int i = 0; i < que->num_desc; i++, buf++) {
- buf->tag = txr->tx_tag;
- error = bus_dmamap_create(buf->tag, 0, &buf->map);
- if (error != 0) {
- device_printf(dev, "Unable to create TX DMA map\n");
- goto fail;
- }
- }
-fail:
- return (error);
-}
-
-
-/*********************************************************************
- *
- * (Re)Initialize a queue transmit ring.
- * - called by init, it clears the descriptor ring,
- * and frees any stale mbufs
- *
- **********************************************************************/
-void
-ixl_init_tx_ring(struct ixl_queue *que)
-{
-#ifdef DEV_NETMAP
- struct netmap_adapter *na = NA(que->vsi->ifp);
- struct netmap_slot *slot;
-#endif /* DEV_NETMAP */
- struct tx_ring *txr = &que->txr;
- struct ixl_tx_buf *buf;
-
- /* Clear the old ring contents */
- IXL_TX_LOCK(txr);
-
-#ifdef DEV_NETMAP
- /*
- * (under lock): if in netmap mode, do some consistency
- * checks and set slot to entry 0 of the netmap ring.
- */
- slot = netmap_reset(na, NR_TX, que->me, 0);
-#endif /* DEV_NETMAP */
-
- bzero((void *)txr->base,
- (sizeof(struct i40e_tx_desc)) * que->num_desc);
-
- /* Reset indices */
- txr->next_avail = 0;
- txr->next_to_clean = 0;
-
- /* Reset watchdog status */
- txr->watchdog_timer = 0;
-
-#ifdef IXL_FDIR
- /* Initialize flow director */
- txr->atr_rate = ixl_atr_rate;
- txr->atr_count = 0;
-#endif
- /* Free any existing tx mbufs. */
- buf = txr->buffers;
- for (int i = 0; i < que->num_desc; i++, buf++) {
- if (buf->m_head != NULL) {
- bus_dmamap_sync(buf->tag, buf->map,
- BUS_DMASYNC_POSTWRITE);
- bus_dmamap_unload(buf->tag, buf->map);
- m_freem(buf->m_head);
- buf->m_head = NULL;
- }
-#ifdef DEV_NETMAP
- /*
- * In netmap mode, set the map for the packet buffer.
- * NOTE: Some drivers (not this one) also need to set
- * the physical buffer address in the NIC ring.
- * netmap_idx_n2k() maps a nic index, i, into the corresponding
- * netmap slot index, si
- */
- if (slot) {
- int si = netmap_idx_n2k(&na->tx_rings[que->me], i);
- netmap_load_map(na, buf->tag, buf->map, NMB(na, slot + si));
- }
-#endif /* DEV_NETMAP */
- /* Clear the EOP index */
- buf->eop_index = -1;
- }
-
- /* Set number of descriptors available */
- txr->avail = que->num_desc;
-
- bus_dmamap_sync(txr->dma.tag, txr->dma.map,
- BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
- IXL_TX_UNLOCK(txr);
-}
-
-
-/*********************************************************************
- *
- * Free transmit ring related data structures.
- *
- **********************************************************************/
-void
-ixl_free_que_tx(struct ixl_queue *que)
-{
- struct tx_ring *txr = &que->txr;
- struct ixl_tx_buf *buf;
-
- INIT_DBG_IF(que->vsi->ifp, "queue %d: begin", que->me);
-
- for (int i = 0; i < que->num_desc; i++) {
- buf = &txr->buffers[i];
- if (buf->m_head != NULL) {
- bus_dmamap_sync(buf->tag, buf->map,
- BUS_DMASYNC_POSTWRITE);
- bus_dmamap_unload(buf->tag,
- buf->map);
- m_freem(buf->m_head);
- buf->m_head = NULL;
- if (buf->map != NULL) {
- bus_dmamap_destroy(buf->tag,
- buf->map);
- buf->map = NULL;
- }
- } else if (buf->map != NULL) {
- bus_dmamap_unload(buf->tag,
- buf->map);
- bus_dmamap_destroy(buf->tag,
- buf->map);
- buf->map = NULL;
- }
- }
- if (txr->br != NULL)
- buf_ring_free(txr->br, M_DEVBUF);
- if (txr->buffers != NULL) {
- free(txr->buffers, M_DEVBUF);
- txr->buffers = NULL;
- }
- if (txr->tx_tag != NULL) {
- bus_dma_tag_destroy(txr->tx_tag);
- txr->tx_tag = NULL;
- }
- if (txr->tso_tag != NULL) {
- bus_dma_tag_destroy(txr->tso_tag);
- txr->tso_tag = NULL;
- }
-
- INIT_DBG_IF(que->vsi->ifp, "queue %d: end", que->me);
- return;
}
/*********************************************************************
@@ -626,66 +132,18 @@
*
**********************************************************************/
-static int
-ixl_tx_setup_offload(struct ixl_queue *que,
- struct mbuf *mp, u32 *cmd, u32 *off)
+static void
+ixl_tx_setup_offload(struct ixl_tx_queue *que,
+ if_pkt_info_t pi, u32 *cmd, u32 *off)
{
- struct ether_vlan_header *eh;
-#ifdef INET
- struct ip *ip = NULL;
-#endif
- struct tcphdr *th = NULL;
-#ifdef INET6
- struct ip6_hdr *ip6;
-#endif
- int elen, ip_hlen = 0, tcp_hlen;
- u16 etype;
- u8 ipproto = 0;
- bool tso = FALSE;
-
- /* Set up the TSO context descriptor if required */
- if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
- tso = ixl_tso_setup(que, mp);
- if (tso)
- ++que->tso;
- else
- return (ENXIO);
- }
-
- /*
- * Determine where frame payload starts.
- * Jump over vlan headers if already present,
- * helpful for QinQ too.
- */
- eh = mtod(mp, struct ether_vlan_header *);
- if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
- etype = ntohs(eh->evl_proto);
- elen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
- } else {
- etype = ntohs(eh->evl_encap_proto);
- elen = ETHER_HDR_LEN;
- }
-
- switch (etype) {
+ switch (pi->ipi_etype) {
#ifdef INET
case ETHERTYPE_IP:
- ip = (struct ip *)(mp->m_data + elen);
- ip_hlen = ip->ip_hl << 2;
- ipproto = ip->ip_p;
- th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
- /* The IP checksum must be recalculated with TSO */
- if (tso)
- *cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
- else
- *cmd |= I40E_TX_DESC_CMD_IIPT_IPV4;
+ *cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
break;
#endif
#ifdef INET6
case ETHERTYPE_IPV6:
- ip6 = (struct ip6_hdr *)(mp->m_data + elen);
- ip_hlen = sizeof(struct ip6_hdr);
- ipproto = ip6->ip6_nxt;
- th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
*cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
break;
#endif
@@ -693,31 +151,26 @@
break;
}
- *off |= (elen >> 1) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
- *off |= (ip_hlen >> 2) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
+ *off |= (pi->ipi_ehdrlen >> 1) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
+ *off |= (pi->ipi_ip_hlen >> 2) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
- switch (ipproto) {
+ switch (pi->ipi_ipproto) {
case IPPROTO_TCP:
- tcp_hlen = th->th_off << 2;
- if (mp->m_pkthdr.csum_flags & (CSUM_TCP|CSUM_TCP_IPV6)) {
+ if (pi->ipi_csum_flags & (CSUM_TCP|CSUM_TCP_IPV6)) {
*cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
- *off |= (tcp_hlen >> 2) <<
+ *off |= (pi->ipi_tcp_hlen >> 2) <<
I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
}
-#ifdef IXL_FDIR
- ixl_atr(que, th, etype);
-#endif
break;
case IPPROTO_UDP:
- if (mp->m_pkthdr.csum_flags & (CSUM_UDP|CSUM_UDP_IPV6)) {
+ if (pi->ipi_csum_flags & (CSUM_UDP|CSUM_UDP_IPV6)) {
*cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
*off |= (sizeof(struct udphdr) >> 2) <<
I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
}
break;
-
case IPPROTO_SCTP:
- if (mp->m_pkthdr.csum_flags & (CSUM_SCTP|CSUM_SCTP_IPV6)) {
+ if (pi->ipi_csum_flags & (CSUM_SCTP|CSUM_SCTP_IPV6)) {
*cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
*off |= (sizeof(struct sctphdr) >> 2) <<
I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
@@ -727,103 +180,32 @@
break;
}
- return (0);
}
-
/**********************************************************************
*
* Setup context for hardware segmentation offload (TSO)
*
**********************************************************************/
-static bool
-ixl_tso_setup(struct ixl_queue *que, struct mbuf *mp)
+static int
+ixl_tso_setup(struct tx_ring *txr, if_pkt_info_t pi)
{
- struct tx_ring *txr = &que->txr;
+ if_softc_ctx_t scctx;
struct i40e_tx_context_desc *TXD;
- struct ixl_tx_buf *buf;
u32 cmd, mss, type, tsolen;
- u16 etype;
- int idx, elen, ip_hlen, tcp_hlen;
- struct ether_vlan_header *eh;
-#ifdef INET
- struct ip *ip;
-#endif
-#ifdef INET6
- struct ip6_hdr *ip6;
-#endif
-#if defined(INET6) || defined(INET)
- struct tcphdr *th;
-#endif
+ int idx;
u64 type_cmd_tso_mss;
- /*
- * Determine where frame payload starts.
- * Jump over vlan headers if already present
- */
- eh = mtod(mp, struct ether_vlan_header *);
- if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
- elen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
- etype = eh->evl_proto;
- } else {
- elen = ETHER_HDR_LEN;
- etype = eh->evl_encap_proto;
- }
+ // printf("%s: begin\n", __func__);
- switch (ntohs(etype)) {
-#ifdef INET6
- case ETHERTYPE_IPV6:
- ip6 = (struct ip6_hdr *)(mp->m_data + elen);
- if (ip6->ip6_nxt != IPPROTO_TCP)
- return (ENXIO);
- ip_hlen = sizeof(struct ip6_hdr);
- th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
- th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0);
- tcp_hlen = th->th_off << 2;
- /*
- * The corresponding flag is set by the stack in the IPv4
- * TSO case, but not in IPv6 (at least in FreeBSD 10.2).
- * So, set it here because the rest of the flow requires it.
- */
- mp->m_pkthdr.csum_flags |= CSUM_TCP_IPV6;
- break;
-#endif
-#ifdef INET
- case ETHERTYPE_IP:
- ip = (struct ip *)(mp->m_data + elen);
- if (ip->ip_p != IPPROTO_TCP)
- return (ENXIO);
- ip->ip_sum = 0;
- ip_hlen = ip->ip_hl << 2;
- th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
- th->th_sum = in_pseudo(ip->ip_src.s_addr,
- ip->ip_dst.s_addr, htons(IPPROTO_TCP));
- tcp_hlen = th->th_off << 2;
- break;
-#endif
- default:
- printf("%s: CSUM_TSO but no supported IP version (0x%04x)",
- __func__, ntohs(etype));
- return FALSE;
- }
-
- /* Ensure we have at least the IP+TCP header in the first mbuf. */
- if (mp->m_len < elen + ip_hlen + sizeof(struct tcphdr))
- return FALSE;
-
- idx = txr->next_avail;
- buf = &txr->buffers[idx];
- TXD = (struct i40e_tx_context_desc *) &txr->base[idx];
- tsolen = mp->m_pkthdr.len - (elen + ip_hlen + tcp_hlen);
+ idx = pi->ipi_pidx;
+ TXD = (struct i40e_tx_context_desc *) &txr->tx_base[idx];
+ tsolen = pi->ipi_len - (pi->ipi_ehdrlen + pi->ipi_ip_hlen + pi->ipi_tcp_hlen);
+ scctx = txr->que->vsi->shared;
type = I40E_TX_DESC_DTYPE_CONTEXT;
cmd = I40E_TX_CTX_DESC_TSO;
- /* TSO MSS must not be less than 64 */
- if (mp->m_pkthdr.tso_segsz < IXL_MIN_TSO_MSS) {
- que->mss_too_small++;
- mp->m_pkthdr.tso_segsz = IXL_MIN_TSO_MSS;
- }
- mss = mp->m_pkthdr.tso_segsz;
+ mss = pi->ipi_tso_segsz;
type_cmd_tso_mss = ((u64)type << I40E_TXD_CTX_QW1_DTYPE_SHIFT) |
((u64)cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
@@ -832,617 +214,234 @@
TXD->type_cmd_tso_mss = htole64(type_cmd_tso_mss);
TXD->tunneling_params = htole32(0);
- buf->m_head = NULL;
- buf->eop_index = -1;
- if (++idx == que->num_desc)
- idx = 0;
-
- txr->avail--;
- txr->next_avail = idx;
-
- return TRUE;
-}
-
-/*
-** ixl_get_tx_head - Retrieve the value from the
-** location the HW records its HEAD index
-*/
-static inline u32
-ixl_get_tx_head(struct ixl_queue *que)
-{
- struct tx_ring *txr = &que->txr;
- void *head = &txr->base[que->num_desc];
- return LE32_TO_CPU(*(volatile __le32 *)head);
+ // XXX: This guy really likes masking numbers
+ return ((idx + 1) & (scctx->isc_ntxd[0]-1));
}
-/**********************************************************************
+/*********************************************************************
*
- * Examine each tx_buffer in the used queue. If the hardware is done
- * processing the packet then free associated resources. The
- * tx_buffer is put back on the free queue.
+ * This routine maps the mbufs to tx descriptors, allowing the
+ * TX engine to transmit the packets.
+ * - return 0 on success, positive on failure
*
**********************************************************************/
-bool
-ixl_txeof(struct ixl_queue *que)
+#define IXL_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
+
+static int
+ixl_isc_txd_encap(void *arg, if_pkt_info_t pi)
{
+ struct ixl_vsi *vsi = arg;
+ if_softc_ctx_t scctx = vsi->shared;
+ struct ixl_tx_queue *que = &vsi->tx_queues[pi->ipi_qsidx];
struct tx_ring *txr = &que->txr;
- u32 first, last, head, done, processed;
- struct ixl_tx_buf *buf;
- struct i40e_tx_desc *tx_desc, *eop_desc;
+ int nsegs = pi->ipi_nsegs;
+ bus_dma_segment_t *segs = pi->ipi_segs;
+ struct i40e_tx_desc *txd = NULL;
+ int i, j, mask;
+ u32 cmd, off;
+ // device_printf(iflib_get_dev(vsi->ctx), "%s: begin\n", __func__);
- mtx_assert(&txr->mtx, MA_OWNED);
+ cmd = off = 0;
+ i = pi->ipi_pidx;
-#ifdef DEV_NETMAP
- // XXX todo: implement moderation
- if (netmap_tx_irq(que->vsi->ifp, que->me))
- return FALSE;
-#endif /* DEF_NETMAP */
+ if (pi->ipi_flags & IPI_TX_INTR)
+ cmd |= (I40E_TX_DESC_CMD_RS << I40E_TXD_QW1_CMD_SHIFT);
- /* These are not the descriptors you seek, move along :) */
- if (txr->avail == que->num_desc) {
- atomic_store_rel_32(&txr->watchdog_timer, 0);
- return FALSE;
+ /* Set up the TSO/CSUM offload */
+ if (pi->ipi_csum_flags & CSUM_OFFLOAD) {
+ /* Set up the TSO context descriptor if required */
+ if (pi->ipi_csum_flags & CSUM_TSO) {
+ if (ixl_tso_detect_sparse(segs, nsegs, pi->ipi_tso_segsz))
+ return (EFBIG);
+
+ i = ixl_tso_setup(txr, pi);
+ }
+ ixl_tx_setup_offload(que, pi, &cmd, &off);
}
- processed = 0;
- first = txr->next_to_clean;
- buf = &txr->buffers[first];
- tx_desc = (struct i40e_tx_desc *)&txr->base[first];
- last = buf->eop_index;
- if (last == -1)
- return FALSE;
- eop_desc = (struct i40e_tx_desc *)&txr->base[last];
+ if (pi->ipi_mflags & M_VLANTAG)
+ cmd |= I40E_TX_DESC_CMD_IL2TAG1;
- /* Get the Head WB value */
- head = ixl_get_tx_head(que);
+ cmd |= I40E_TX_DESC_CMD_ICRC;
+ mask = scctx->isc_ntxd[0] - 1;
+ for (j = 0; j < nsegs; j++) {
+ bus_size_t seglen;
- /*
- ** Get the index of the first descriptor
- ** BEYOND the EOP and call that 'done'.
- ** I do this so the comparison in the
- ** inner while loop below can be simple
- */
- if (++last == que->num_desc) last = 0;
- done = last;
-
- bus_dmamap_sync(txr->dma.tag, txr->dma.map,
- BUS_DMASYNC_POSTREAD);
- /*
- ** The HEAD index of the ring is written in a
- ** defined location, this rather than a done bit
- ** is what is used to keep track of what must be
- ** 'cleaned'.
- */
- while (first != head) {
- /* We clean the range of the packet */
- while (first != done) {
- ++txr->avail;
- ++processed;
-
- if (buf->m_head) {
- txr->bytes += /* for ITR adjustment */
- buf->m_head->m_pkthdr.len;
- txr->tx_bytes += /* for TX stats */
- buf->m_head->m_pkthdr.len;
- bus_dmamap_sync(buf->tag,
- buf->map,
- BUS_DMASYNC_POSTWRITE);
- bus_dmamap_unload(buf->tag,
- buf->map);
- m_freem(buf->m_head);
- buf->m_head = NULL;
- buf->map = NULL;
- }
- buf->eop_index = -1;
+ txd = &txr->tx_base[i];
+ seglen = segs[j].ds_len;
- if (++first == que->num_desc)
- first = 0;
+ txd->buffer_addr = htole64(segs[j].ds_addr);
+ txd->cmd_type_offset_bsz =
+ htole64(I40E_TX_DESC_DTYPE_DATA
+ | ((u64)cmd << I40E_TXD_QW1_CMD_SHIFT)
+ | ((u64)off << I40E_TXD_QW1_OFFSET_SHIFT)
+ | ((u64)seglen << I40E_TXD_QW1_TX_BUF_SZ_SHIFT)
+ | ((u64)htole16(pi->ipi_vtag) << I40E_TXD_QW1_L2TAG1_SHIFT));
- buf = &txr->buffers[first];
- tx_desc = &txr->base[first];
- }
- ++txr->packets;
- /* See if there is more work now */
- last = buf->eop_index;
- if (last != -1) {
- eop_desc = &txr->base[last];
- /* Get next done point */
- if (++last == que->num_desc) last = 0;
- done = last;
- } else
- break;
+ i = (i+1) & mask;
}
- bus_dmamap_sync(txr->dma.tag, txr->dma.map,
- BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+ /* Set the last descriptor for report */
+ txd->cmd_type_offset_bsz |=
+ htole64(((u64)IXL_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT));
+ pi->ipi_new_pidx = i;
- txr->next_to_clean = first;
+ ++txr->total_packets;
+ return (0);
+}
+static void
+ixl_isc_txd_flush(void *arg, uint16_t txqid, uint32_t pidx)
+{
+ struct ixl_vsi *vsi = arg;
+ struct tx_ring *txr = &vsi->tx_queues[txqid].txr;
+
+ // device_printf(iflib_get_dev(vsi->ctx), "%s: begin\n", __func__);
/*
- * If there are no pending descriptors, clear the timeout.
+ * Advance the Transmit Descriptor Tail (Tdt), this tells the
+ * hardware that this frame is available to transmit.
*/
- if (txr->avail == que->num_desc) {
- atomic_store_rel_32(&txr->watchdog_timer, 0);
- return FALSE;
- }
-
- return TRUE;
+ wr32(vsi->hw, txr->tail, pidx);
}
/*********************************************************************
*
- * Refresh mbuf buffers for RX descriptor rings
- * - now keeps its own state so discards due to resource
- * exhaustion are unnecessary, if an mbuf cannot be obtained
- * it just returns, keeping its placeholder, thus it can simply
- * be recalled to try again.
+ * (Re)Initialize a queue transmit ring.
+ * - called by init, it clears the descriptor ring,
+ * and frees any stale mbufs
*
**********************************************************************/
-static void
-ixl_refresh_mbufs(struct ixl_queue *que, int limit)
+void
+ixl_init_tx_ring(struct ixl_vsi *vsi, struct ixl_tx_queue *que)
{
- struct ixl_vsi *vsi = que->vsi;
- struct rx_ring *rxr = &que->rxr;
- bus_dma_segment_t hseg[1];
- bus_dma_segment_t pseg[1];
- struct ixl_rx_buf *buf;
- struct mbuf *mh, *mp;
- int i, j, nsegs, error;
- bool refreshed = FALSE;
-
- i = j = rxr->next_refresh;
- /* Control the loop with one beyond */
- if (++j == que->num_desc)
- j = 0;
-
- while (j != limit) {
- buf = &rxr->buffers[i];
- if (rxr->hdr_split == FALSE)
- goto no_split;
-
- if (buf->m_head == NULL) {
- mh = m_gethdr(M_NOWAIT, MT_DATA);
- if (mh == NULL)
- goto update;
- } else
- mh = buf->m_head;
-
- mh->m_pkthdr.len = mh->m_len = MHLEN;
- mh->m_len = MHLEN;
- mh->m_flags |= M_PKTHDR;
- /* Get the memory mapping */
- error = bus_dmamap_load_mbuf_sg(rxr->htag,
- buf->hmap, mh, hseg, &nsegs, BUS_DMA_NOWAIT);
- if (error != 0) {
- printf("Refresh mbufs: hdr dmamap load"
- " failure - %d\n", error);
- m_free(mh);
- buf->m_head = NULL;
- goto update;
- }
- buf->m_head = mh;
- bus_dmamap_sync(rxr->htag, buf->hmap,
- BUS_DMASYNC_PREREAD);
- rxr->base[i].read.hdr_addr =
- htole64(hseg[0].ds_addr);
-
-no_split:
- if (buf->m_pack == NULL) {
- mp = m_getjcl(M_NOWAIT, MT_DATA,
- M_PKTHDR, rxr->mbuf_sz);
- if (mp == NULL)
- goto update;
- } else
- mp = buf->m_pack;
-
- mp->m_pkthdr.len = mp->m_len = rxr->mbuf_sz;
- /* Get the memory mapping */
- error = bus_dmamap_load_mbuf_sg(rxr->ptag,
- buf->pmap, mp, pseg, &nsegs, BUS_DMA_NOWAIT);
- if (error != 0) {
- printf("Refresh mbufs: payload dmamap load"
- " failure - %d\n", error);
- m_free(mp);
- buf->m_pack = NULL;
- goto update;
- }
- buf->m_pack = mp;
- bus_dmamap_sync(rxr->ptag, buf->pmap,
- BUS_DMASYNC_PREREAD);
- rxr->base[i].read.pkt_addr =
- htole64(pseg[0].ds_addr);
- /* Used only when doing header split */
- rxr->base[i].read.hdr_addr = 0;
-
- refreshed = TRUE;
- /* Next is precalculated */
- i = j;
- rxr->next_refresh = i;
- if (++j == que->num_desc)
- j = 0;
- }
-update:
- if (refreshed) /* Update hardware tail index */
- wr32(vsi->hw, rxr->tail, rxr->next_refresh);
- return;
-}
+ struct tx_ring *txr = &que->txr;
+ // device_printf(iflib_get_dev(vsi->ctx), "%s: begin\n", __func__);
-/*********************************************************************
- *
- * Allocate memory for rx_buffer structures. Since we use one
- * rx_buffer per descriptor, the maximum number of rx_buffer's
- * that we'll need is equal to the number of receive descriptors
- * that we've defined.
- *
- **********************************************************************/
-int
-ixl_allocate_rx_data(struct ixl_queue *que)
-{
- struct rx_ring *rxr = &que->rxr;
- struct ixl_vsi *vsi = que->vsi;
- device_t dev = vsi->dev;
- struct ixl_rx_buf *buf;
- int i, bsize, error;
-
- bsize = sizeof(struct ixl_rx_buf) * que->num_desc;
- if (!(rxr->buffers =
- (struct ixl_rx_buf *) malloc(bsize,
- M_DEVBUF, M_NOWAIT | M_ZERO))) {
- device_printf(dev, "Unable to allocate rx_buffer memory\n");
- error = ENOMEM;
- return (error);
- }
+ /* Clear the old ring contents */
+ bzero((void *)txr->tx_base,
+ (sizeof(struct i40e_tx_desc)) * vsi->shared->isc_ntxd[0]);
- if ((error = bus_dma_tag_create(NULL, /* parent */
- 1, 0, /* alignment, bounds */
- BUS_SPACE_MAXADDR, /* lowaddr */
- BUS_SPACE_MAXADDR, /* highaddr */
- NULL, NULL, /* filter, filterarg */
- MSIZE, /* maxsize */
- 1, /* nsegments */
- MSIZE, /* maxsegsize */
- 0, /* flags */
- NULL, /* lockfunc */
- NULL, /* lockfuncarg */
- &rxr->htag))) {
- device_printf(dev, "Unable to create RX DMA htag\n");
- return (error);
- }
+ wr32(vsi->hw, I40E_QTX_TAIL(que->txr.me), 0);
+ wr32(vsi->hw, I40E_QTX_HEAD(que->txr.me), 0);
+}
- if ((error = bus_dma_tag_create(NULL, /* parent */
- 1, 0, /* alignment, bounds */
- BUS_SPACE_MAXADDR, /* lowaddr */
- BUS_SPACE_MAXADDR, /* highaddr */
- NULL, NULL, /* filter, filterarg */
- MJUM16BYTES, /* maxsize */
- 1, /* nsegments */
- MJUM16BYTES, /* maxsegsize */
- 0, /* flags */
- NULL, /* lockfunc */
- NULL, /* lockfuncarg */
- &rxr->ptag))) {
- device_printf(dev, "Unable to create RX DMA ptag\n");
- return (error);
- }
+/*
+** ixl_get_tx_head - Retrieve the value from the
+** location the HW records its HEAD index
+*/
+static inline u32
+ixl_get_tx_head(struct ixl_tx_queue *que)
+{
+ struct tx_ring *txr = &que->txr;
+ void *head = &txr->tx_base[que->vsi->shared->isc_ntxd[0]];
- for (i = 0; i < que->num_desc; i++) {
- buf = &rxr->buffers[i];
- error = bus_dmamap_create(rxr->htag,
- BUS_DMA_NOWAIT, &buf->hmap);
- if (error) {
- device_printf(dev, "Unable to create RX head map\n");
- break;
- }
- error = bus_dmamap_create(rxr->ptag,
- BUS_DMA_NOWAIT, &buf->pmap);
- if (error) {
- device_printf(dev, "Unable to create RX pkt map\n");
- break;
- }
- }
+ // device_printf(iflib_get_dev(que->vsi->ctx), "%s: begin\n", __func__);
- return (error);
+ return LE32_TO_CPU(*(volatile __le32 *)head);
}
-
-/*********************************************************************
+/**********************************************************************
*
- * (Re)Initialize the queue receive ring and its buffers.
+ * Examine each tx_buffer in the used queue. If the hardware is done
+ * processing the packet then free associated resources. The
+ * tx_buffer is put back on the free queue.
*
**********************************************************************/
-int
-ixl_init_rx_ring(struct ixl_queue *que)
+static int
+ixl_isc_txd_credits_update(void *arg, uint16_t qid, uint32_t cidx, bool clear)
{
- struct rx_ring *rxr = &que->rxr;
- struct ixl_vsi *vsi = que->vsi;
-#if defined(INET6) || defined(INET)
- struct ifnet *ifp = vsi->ifp;
- struct lro_ctrl *lro = &rxr->lro;
-#endif
- struct ixl_rx_buf *buf;
- bus_dma_segment_t pseg[1], hseg[1];
- int rsize, nsegs, error = 0;
-#ifdef DEV_NETMAP
- struct netmap_adapter *na = NA(que->vsi->ifp);
- struct netmap_slot *slot;
-#endif /* DEV_NETMAP */
-
- IXL_RX_LOCK(rxr);
-#ifdef DEV_NETMAP
- /* same as in ixl_init_tx_ring() */
- slot = netmap_reset(na, NR_RX, que->me, 0);
-#endif /* DEV_NETMAP */
- /* Clear the ring contents */
- rsize = roundup2(que->num_desc *
- sizeof(union i40e_rx_desc), DBA_ALIGN);
- bzero((void *)rxr->base, rsize);
- /* Cleanup any existing buffers */
- for (int i = 0; i < que->num_desc; i++) {
- buf = &rxr->buffers[i];
- if (buf->m_head != NULL) {
- bus_dmamap_sync(rxr->htag, buf->hmap,
- BUS_DMASYNC_POSTREAD);
- bus_dmamap_unload(rxr->htag, buf->hmap);
- buf->m_head->m_flags |= M_PKTHDR;
- m_freem(buf->m_head);
- }
- if (buf->m_pack != NULL) {
- bus_dmamap_sync(rxr->ptag, buf->pmap,
- BUS_DMASYNC_POSTREAD);
- bus_dmamap_unload(rxr->ptag, buf->pmap);
- buf->m_pack->m_flags |= M_PKTHDR;
- m_freem(buf->m_pack);
- }
- buf->m_head = NULL;
- buf->m_pack = NULL;
- }
-
- /* header split is off */
- rxr->hdr_split = FALSE;
-
- /* Now replenish the mbufs */
- for (int j = 0; j != que->num_desc; ++j) {
- struct mbuf *mh, *mp;
-
- buf = &rxr->buffers[j];
-#ifdef DEV_NETMAP
- /*
- * In netmap mode, fill the map and set the buffer
- * address in the NIC ring, considering the offset
- * between the netmap and NIC rings (see comment in
- * ixgbe_setup_transmit_ring() ). No need to allocate
- * an mbuf, so end the block with a continue;
- */
- if (slot) {
- int sj = netmap_idx_n2k(&na->rx_rings[que->me], j);
- uint64_t paddr;
- void *addr;
-
- addr = PNMB(na, slot + sj, &paddr);
- netmap_load_map(na, rxr->dma.tag, buf->pmap, addr);
- /* Update descriptor and the cached value */
- rxr->base[j].read.pkt_addr = htole64(paddr);
- rxr->base[j].read.hdr_addr = 0;
- continue;
- }
-#endif /* DEV_NETMAP */
- /*
- ** Don't allocate mbufs if not
- ** doing header split, its wasteful
- */
- if (rxr->hdr_split == FALSE)
- goto skip_head;
-
- /* First the header */
- buf->m_head = m_gethdr(M_NOWAIT, MT_DATA);
- if (buf->m_head == NULL) {
- error = ENOBUFS;
- goto fail;
- }
- m_adj(buf->m_head, ETHER_ALIGN);
- mh = buf->m_head;
- mh->m_len = mh->m_pkthdr.len = MHLEN;
- mh->m_flags |= M_PKTHDR;
- /* Get the memory mapping */
- error = bus_dmamap_load_mbuf_sg(rxr->htag,
- buf->hmap, buf->m_head, hseg,
- &nsegs, BUS_DMA_NOWAIT);
- if (error != 0) /* Nothing elegant to do here */
- goto fail;
- bus_dmamap_sync(rxr->htag,
- buf->hmap, BUS_DMASYNC_PREREAD);
- /* Update descriptor */
- rxr->base[j].read.hdr_addr = htole64(hseg[0].ds_addr);
-
-skip_head:
- /* Now the payload cluster */
- buf->m_pack = m_getjcl(M_NOWAIT, MT_DATA,
- M_PKTHDR, rxr->mbuf_sz);
- if (buf->m_pack == NULL) {
- error = ENOBUFS;
- goto fail;
- }
- mp = buf->m_pack;
- mp->m_pkthdr.len = mp->m_len = rxr->mbuf_sz;
- /* Get the memory mapping */
- error = bus_dmamap_load_mbuf_sg(rxr->ptag,
- buf->pmap, mp, pseg,
- &nsegs, BUS_DMA_NOWAIT);
- if (error != 0)
- goto fail;
- bus_dmamap_sync(rxr->ptag,
- buf->pmap, BUS_DMASYNC_PREREAD);
- /* Update descriptor */
- rxr->base[j].read.pkt_addr = htole64(pseg[0].ds_addr);
- rxr->base[j].read.hdr_addr = 0;
- }
-
+ struct ixl_vsi *vsi = arg;
+ struct ixl_tx_queue *que = &vsi->tx_queues[qid];
- /* Setup our descriptor indices */
- rxr->next_check = 0;
- rxr->next_refresh = 0;
- rxr->lro_enabled = FALSE;
- rxr->split = 0;
- rxr->bytes = 0;
- rxr->discard = FALSE;
+ int head, credits;
- wr32(vsi->hw, rxr->tail, que->num_desc - 1);
- ixl_flush(vsi->hw);
+ // device_printf(iflib_get_dev(vsi->ctx), "%s: begin\n", __func__);
-#if defined(INET6) || defined(INET)
- /*
- ** Now set up the LRO interface:
- */
- if (ifp->if_capenable & IFCAP_LRO) {
- int err = tcp_lro_init(lro);
- if (err) {
- if_printf(ifp, "queue %d: LRO Initialization failed!\n", que->me);
- goto fail;
- }
- INIT_DBG_IF(ifp, "queue %d: RX Soft LRO Initialized", que->me);
- rxr->lro_enabled = TRUE;
- lro->ifp = vsi->ifp;
- }
-#endif
+ /* Get the Head WB value */
+ head = ixl_get_tx_head(que);
- bus_dmamap_sync(rxr->dma.tag, rxr->dma.map,
- BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+ credits = head - cidx;
+ if (credits < 0)
+ credits += vsi->shared->isc_ntxd[0];
-fail:
- IXL_RX_UNLOCK(rxr);
- return (error);
+ // device_printf(iflib_get_dev(vsi->ctx), "%s: %d credits\n", __func__, credits);
+ return (credits);
}
-
/*********************************************************************
*
- * Free station receive ring data structures
+ * Refresh mbuf buffers for RX descriptor rings
+ * - now keeps its own state so discards due to resource
+ * exhaustion are unnecessary, if an mbuf cannot be obtained
+ * it just returns, keeping its placeholder, thus it can simply
+ * be recalled to try again.
*
**********************************************************************/
-void
-ixl_free_que_rx(struct ixl_queue *que)
+static void
+ixl_isc_rxd_refill(void *arg, uint16_t rxqid, uint8_t flid __unused,
+ uint32_t pidx, uint64_t *paddrs, caddr_t *vaddrs __unused,
+ uint16_t count, uint16_t buf_len __unused)
{
- struct rx_ring *rxr = &que->rxr;
- struct ixl_rx_buf *buf;
-
- INIT_DBG_IF(que->vsi->ifp, "queue %d: begin", que->me);
-
- /* Cleanup any existing buffers */
- if (rxr->buffers != NULL) {
- for (int i = 0; i < que->num_desc; i++) {
- buf = &rxr->buffers[i];
- if (buf->m_head != NULL) {
- bus_dmamap_sync(rxr->htag, buf->hmap,
- BUS_DMASYNC_POSTREAD);
- bus_dmamap_unload(rxr->htag, buf->hmap);
- buf->m_head->m_flags |= M_PKTHDR;
- m_freem(buf->m_head);
- }
- if (buf->m_pack != NULL) {
- bus_dmamap_sync(rxr->ptag, buf->pmap,
- BUS_DMASYNC_POSTREAD);
- bus_dmamap_unload(rxr->ptag, buf->pmap);
- buf->m_pack->m_flags |= M_PKTHDR;
- m_freem(buf->m_pack);
- }
- buf->m_head = NULL;
- buf->m_pack = NULL;
- if (buf->hmap != NULL) {
- bus_dmamap_destroy(rxr->htag, buf->hmap);
- buf->hmap = NULL;
- }
- if (buf->pmap != NULL) {
- bus_dmamap_destroy(rxr->ptag, buf->pmap);
- buf->pmap = NULL;
- }
- }
- if (rxr->buffers != NULL) {
- free(rxr->buffers, M_DEVBUF);
- rxr->buffers = NULL;
- }
- }
+ struct ixl_vsi *vsi = arg;
+ struct rx_ring *rxr = &vsi->rx_queues[rxqid].rxr;
+ int i;
+ uint32_t next_pidx;
- if (rxr->htag != NULL) {
- bus_dma_tag_destroy(rxr->htag);
- rxr->htag = NULL;
- }
- if (rxr->ptag != NULL) {
- bus_dma_tag_destroy(rxr->ptag);
- rxr->ptag = NULL;
- }
+ // device_printf(iflib_get_dev(vsi->ctx), "%s: begin\n", __func__);
- INIT_DBG_IF(que->vsi->ifp, "queue %d: end", que->me);
- return;
+ for (i = 0, next_pidx = pidx; i < count; i++) {
+ rxr->rx_base[next_pidx].read.pkt_addr = htole64(paddrs[i]);
+ if (++next_pidx == vsi->shared->isc_nrxd[0])
+ next_pidx = 0;
+ }
}
-static inline void
-ixl_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u8 ptype)
+static void
+ixl_isc_rxd_flush(void * arg, uint16_t rxqid, uint8_t flid __unused, uint32_t pidx)
{
+ struct ixl_vsi *vsi = arg;
+ struct rx_ring *rxr = &vsi->rx_queues[rxqid].rxr;
-#if defined(INET6) || defined(INET)
- /*
- * ATM LRO is only for IPv4/TCP packets and TCP checksum of the packet
- * should be computed by hardware. Also it should not have VLAN tag in
- * ethernet header.
- */
- if (rxr->lro_enabled &&
- (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
- (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
- (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
- /*
- * Send to the stack if:
- ** - LRO not enabled, or
- ** - no LRO resources, or
- ** - lro enqueue fails
- */
- if (rxr->lro.lro_cnt != 0)
- if (tcp_lro_rx(&rxr->lro, m, 0) == 0)
- return;
- }
-#endif
- IXL_RX_UNLOCK(rxr);
- (*ifp->if_input)(ifp, m);
- IXL_RX_LOCK(rxr);
-}
+ // device_printf(iflib_get_dev(vsi->ctx), "%s: begin\n", __func__);
+ // device_printf(iflib_get_dev(vsi->ctx), "rxqid: %d, tail: 0x%08x, pidx: %u\n", rxqid, rxr->tail, pidx);
+ wr32(vsi->hw, rxr->tail, pidx);
+}
-static inline void
-ixl_rx_discard(struct rx_ring *rxr, int i)
+// TODO: Check if changes to ixgbe-way-of-doing-things is correct
+static int
+ixl_isc_rxd_available(void *arg, uint16_t rxqid, uint32_t idx, int budget)
{
- struct ixl_rx_buf *rbuf;
-
- rbuf = &rxr->buffers[i];
-
- if (rbuf->fmp != NULL) {/* Partial chain ? */
- rbuf->fmp->m_flags |= M_PKTHDR;
- m_freem(rbuf->fmp);
- rbuf->fmp = NULL;
- }
-
- /*
- ** With advanced descriptors the writeback
- ** clobbers the buffer addrs, so its easier
- ** to just free the existing mbufs and take
- ** the normal refresh path to get new buffers
- ** and mapping.
- */
- if (rbuf->m_head) {
- m_free(rbuf->m_head);
- rbuf->m_head = NULL;
- }
-
- if (rbuf->m_pack) {
- m_free(rbuf->m_pack);
- rbuf->m_pack = NULL;
+ struct ixl_vsi *vsi = arg;
+ struct rx_ring *rxr = &vsi->rx_queues[rxqid].rxr;
+ union i40e_rx_desc *cur;
+ u64 qword;
+ uint32_t status;
+ int cnt, i, nrxd;
+
+ // device_printf(iflib_get_dev(vsi->ctx), "%s: begin\n", __func__);
+
+ nrxd = vsi->shared->isc_nrxd[0];
+ for (cnt = 0, i = idx; cnt < nrxd - 1 && cnt <= budget;) {
+ cur = &rxr->rx_base[i];
+ qword = le64toh(cur->wb.qword1.status_error_len);
+ status = (qword & I40E_RXD_QW1_STATUS_MASK)
+ >> I40E_RXD_QW1_STATUS_SHIFT;
+
+ if ((status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) == 0)
+ break;
+ if (++i == nrxd)
+ i = 0;
+ if (status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT))
+ cnt++;
}
- return;
+ return (cnt);
}
-#ifdef RSS
/*
** i40e_ptype_to_hash: parse the packet type
** to determine the appropriate hash.
@@ -1457,122 +456,95 @@
ex = decoded.outer_frag;
if (!decoded.known)
- return M_HASHTYPE_OPAQUE_HASH;
+ return M_HASHTYPE_OPAQUE;
if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_L2)
- return M_HASHTYPE_OPAQUE_HASH;
+ return M_HASHTYPE_OPAQUE;
/* Note: anything that gets to this point is IP */
if (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6) {
switch (decoded.inner_prot) {
- case I40E_RX_PTYPE_INNER_PROT_TCP:
- if (ex)
- return M_HASHTYPE_RSS_TCP_IPV6_EX;
- else
- return M_HASHTYPE_RSS_TCP_IPV6;
- case I40E_RX_PTYPE_INNER_PROT_UDP:
- if (ex)
- return M_HASHTYPE_RSS_UDP_IPV6_EX;
- else
- return M_HASHTYPE_RSS_UDP_IPV6;
- default:
- if (ex)
- return M_HASHTYPE_RSS_IPV6_EX;
- else
- return M_HASHTYPE_RSS_IPV6;
+ case I40E_RX_PTYPE_INNER_PROT_TCP:
+ if (ex)
+ return M_HASHTYPE_RSS_TCP_IPV6_EX;
+ else
+ return M_HASHTYPE_RSS_TCP_IPV6;
+ case I40E_RX_PTYPE_INNER_PROT_UDP:
+ if (ex)
+ return M_HASHTYPE_RSS_UDP_IPV6_EX;
+ else
+ return M_HASHTYPE_RSS_UDP_IPV6;
+ default:
+ if (ex)
+ return M_HASHTYPE_RSS_IPV6_EX;
+ else
+ return M_HASHTYPE_RSS_IPV6;
}
}
if (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4) {
switch (decoded.inner_prot) {
- case I40E_RX_PTYPE_INNER_PROT_TCP:
- return M_HASHTYPE_RSS_TCP_IPV4;
- case I40E_RX_PTYPE_INNER_PROT_UDP:
- if (ex)
- return M_HASHTYPE_RSS_UDP_IPV4_EX;
- else
- return M_HASHTYPE_RSS_UDP_IPV4;
- default:
- return M_HASHTYPE_RSS_IPV4;
+ case I40E_RX_PTYPE_INNER_PROT_TCP:
+ return M_HASHTYPE_RSS_TCP_IPV4;
+ case I40E_RX_PTYPE_INNER_PROT_UDP:
+ if (ex)
+ return M_HASHTYPE_RSS_UDP_IPV4_EX;
+ else
+ return M_HASHTYPE_RSS_UDP_IPV4;
+ default:
+ return M_HASHTYPE_RSS_IPV4;
}
}
/* We should never get here!! */
- return M_HASHTYPE_OPAQUE_HASH;
+ return M_HASHTYPE_OPAQUE;
}
-#endif /* RSS */
/*********************************************************************
*
- * This routine executes in interrupt context. It replenishes
- * the mbufs in the descriptor and sends data which has been
+ * This routine executes in ithread context. It sends data which has been
* dma'ed into host memory to upper layer.
*
- * We loop at most count times if count is > 0, or until done if
- * count < 0.
- *
- * Return TRUE for more work, FALSE for all clean.
+ * Returns 0 upon success, errno on failure
*********************************************************************/
-bool
-ixl_rxeof(struct ixl_queue *que, int count)
+
+static int
+ixl_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri)
{
- struct ixl_vsi *vsi = que->vsi;
+ struct ixl_vsi *vsi = arg;
+ struct ixl_rx_queue *que = &vsi->rx_queues[ri->iri_qsidx];
struct rx_ring *rxr = &que->rxr;
- struct ifnet *ifp = vsi->ifp;
-#if defined(INET6) || defined(INET)
- struct lro_ctrl *lro = &rxr->lro;
-#endif
- int i, nextp, processed = 0;
union i40e_rx_desc *cur;
- struct ixl_rx_buf *rbuf, *nbuf;
-
-
- IXL_RX_LOCK(rxr);
-
-#ifdef DEV_NETMAP
- if (netmap_rx_irq(ifp, que->me, &count)) {
- IXL_RX_UNLOCK(rxr);
- return (FALSE);
- }
-#endif /* DEV_NETMAP */
-
- for (i = rxr->next_check; count != 0;) {
- struct mbuf *sendmp, *mh, *mp;
- u32 status, error;
- u16 hlen, plen, vtag;
- u64 qword;
- u8 ptype;
- bool eop;
-
- /* Sync the ring. */
- bus_dmamap_sync(rxr->dma.tag, rxr->dma.map,
- BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
-
- cur = &rxr->base[i];
+ u32 status, error;
+ u16 hlen, plen, vtag;
+ u64 qword;
+ u8 ptype;
+ bool eop;
+ int i, cidx;
+
+ /* XXX: No packet split support, so hlen is unused */
+
+ cidx = ri->iri_cidx;
+ i = 0;
+ do {
+ cur = &rxr->rx_base[cidx];
qword = le64toh(cur->wb.qword1.status_error_len);
status = (qword & I40E_RXD_QW1_STATUS_MASK)
- >> I40E_RXD_QW1_STATUS_SHIFT;
+ >> I40E_RXD_QW1_STATUS_SHIFT;
error = (qword & I40E_RXD_QW1_ERROR_MASK)
- >> I40E_RXD_QW1_ERROR_SHIFT;
+ >> I40E_RXD_QW1_ERROR_SHIFT;
plen = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK)
- >> I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
+ >> I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
hlen = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK)
- >> I40E_RXD_QW1_LENGTH_HBUF_SHIFT;
+ >> I40E_RXD_QW1_LENGTH_HBUF_SHIFT;
ptype = (qword & I40E_RXD_QW1_PTYPE_MASK)
- >> I40E_RXD_QW1_PTYPE_SHIFT;
+ >> I40E_RXD_QW1_PTYPE_SHIFT;
- if ((status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) == 0) {
- ++rxr->not_done;
- break;
- }
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
- break;
+ /* we should never be called without a valid descriptor */
+ MPASS((status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) != 0);
+
+ ri->iri_len += plen;
+ rxr->bytes += plen;
- count--;
- sendmp = NULL;
- nbuf = NULL;
cur->wb.qword1.status_error_len = 0;
- rbuf = &rxr->buffers[i];
- mh = rbuf->m_head;
- mp = rbuf->m_pack;
eop = (status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT));
if (status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT))
vtag = le16toh(cur->wb.qword0.lo_dword.l2tag1);
@@ -1584,180 +556,36 @@
** note that only EOP descriptor has valid
** error results.
*/
- if (eop && (error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
+ if (eop && (error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
rxr->desc_errs++;
- ixl_rx_discard(rxr, i);
- goto next_desc;
- }
-
- /* Prefetch the next buffer */
- if (!eop) {
- nextp = i + 1;
- if (nextp == que->num_desc)
- nextp = 0;
- nbuf = &rxr->buffers[nextp];
- prefetch(nbuf);
- }
-
- /*
- ** The header mbuf is ONLY used when header
- ** split is enabled, otherwise we get normal
- ** behavior, ie, both header and payload
- ** are DMA'd into the payload buffer.
- **
- ** Rather than using the fmp/lmp global pointers
- ** we now keep the head of a packet chain in the
- ** buffer struct and pass this along from one
- ** descriptor to the next, until we get EOP.
- */
- if (rxr->hdr_split && (rbuf->fmp == NULL)) {
- if (hlen > IXL_RX_HDR)
- hlen = IXL_RX_HDR;
- mh->m_len = hlen;
- mh->m_flags |= M_PKTHDR;
- mh->m_next = NULL;
- mh->m_pkthdr.len = mh->m_len;
- /* Null buf pointer so it is refreshed */
- rbuf->m_head = NULL;
- /*
- ** Check the payload length, this
- ** could be zero if its a small
- ** packet.
- */
- if (plen > 0) {
- mp->m_len = plen;
- mp->m_next = NULL;
- mp->m_flags &= ~M_PKTHDR;
- mh->m_next = mp;
- mh->m_pkthdr.len += mp->m_len;
- /* Null buf pointer so it is refreshed */
- rbuf->m_pack = NULL;
- rxr->split++;
- }
- /*
- ** Now create the forward
- ** chain so when complete
- ** we wont have to.
- */
- if (eop == 0) {
- /* stash the chain head */
- nbuf->fmp = mh;
- /* Make forward chain */
- if (plen)
- mp->m_next = nbuf->m_pack;
- else
- mh->m_next = nbuf->m_pack;
- } else {
- /* Singlet, prepare to send */
- sendmp = mh;
- if (vtag) {
- sendmp->m_pkthdr.ether_vtag = vtag;
- sendmp->m_flags |= M_VLANTAG;
- }
- }
- } else {
- /*
- ** Either no header split, or a
- ** secondary piece of a fragmented
- ** split packet.
- */
- mp->m_len = plen;
- /*
- ** See if there is a stored head
- ** that determines what we are
- */
- sendmp = rbuf->fmp;
- rbuf->m_pack = rbuf->fmp = NULL;
-
- if (sendmp != NULL) /* secondary frag */
- sendmp->m_pkthdr.len += mp->m_len;
- else {
- /* first desc of a non-ps chain */
- sendmp = mp;
- sendmp->m_flags |= M_PKTHDR;
- sendmp->m_pkthdr.len = mp->m_len;
- }
- /* Pass the head pointer on */
- if (eop == 0) {
- nbuf->fmp = sendmp;
- sendmp = NULL;
- mp->m_next = nbuf->m_pack;
- }
- }
- ++processed;
- /* Sending this frame? */
- if (eop) {
- sendmp->m_pkthdr.rcvif = ifp;
- /* gather stats */
- rxr->rx_packets++;
- rxr->rx_bytes += sendmp->m_pkthdr.len;
- /* capture data for dynamic ITR adjustment */
- rxr->packets++;
- rxr->bytes += sendmp->m_pkthdr.len;
- /* Set VLAN tag (field only valid in eop desc) */
- if (vtag) {
- sendmp->m_pkthdr.ether_vtag = vtag;
- sendmp->m_flags |= M_VLANTAG;
- }
- if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
- ixl_rx_checksum(sendmp, status, error, ptype);
-#ifdef RSS
- sendmp->m_pkthdr.flowid =
- le32toh(cur->wb.qword0.hi_dword.rss);
- M_HASHTYPE_SET(sendmp, ixl_ptype_to_hash(ptype));
-#else
- sendmp->m_pkthdr.flowid = que->msix;
- M_HASHTYPE_SET(sendmp, M_HASHTYPE_OPAQUE);
-#endif
- }
-next_desc:
- bus_dmamap_sync(rxr->dma.tag, rxr->dma.map,
- BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
-
- /* Advance our pointers to the next descriptor. */
- if (++i == que->num_desc)
- i = 0;
-
- /* Now send to the stack or do LRO */
- if (sendmp != NULL) {
- rxr->next_check = i;
- ixl_rx_input(rxr, ifp, sendmp, ptype);
- i = rxr->next_check;
- }
-
- /* Every 8 descriptors we go to refresh mbufs */
- if (processed == 8) {
- ixl_refresh_mbufs(que, i);
- processed = 0;
- }
- }
-
- /* Refresh any remaining buf structs */
- if (ixl_rx_unrefreshed(que))
- ixl_refresh_mbufs(que, i);
-
- rxr->next_check = i;
-
-#if defined(INET6) || defined(INET)
- /*
- * Flush any outstanding LRO work
- */
-#if __FreeBSD_version >= 1100105
- tcp_lro_flush_all(lro);
-#else
- struct lro_entry *queued;
- while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
- SLIST_REMOVE_HEAD(&lro->lro_active, next);
- tcp_lro_flush(lro, queued);
- }
-#endif
-#endif /* defined(INET6) || defined(INET) */
-
- IXL_RX_UNLOCK(rxr);
- return (FALSE);
+ return (EBADMSG);
+ }
+ ri->iri_frags[i].irf_flid = 0;
+ ri->iri_frags[i].irf_idx = cidx;
+ ri->iri_frags[i].irf_len = plen;
+ if (++cidx == vsi->shared->isc_ntxd[0])
+ cidx = 0;
+ i++;
+ /* even a 16K packet shouldn't consume more than 8 clusters */
+ MPASS(i < 9);
+ } while (!eop);
+
+ /* capture data for dynamic ITR adjustment */
+ // TODO: Figure out why these are repeated...
+ rxr->packets++;
+ rxr->rx_packets++;
+
+ if ((vsi->ifp->if_capenable & IFCAP_RXCSUM) != 0)
+ ixl_rx_checksum(ri, status, error, ptype);
+ ri->iri_flowid = le32toh(cur->wb.qword0.hi_dword.rss);
+ ri->iri_rsstype = ixl_ptype_to_hash(ptype);
+ ri->iri_vtag = vtag;
+ ri->iri_nfrags = i;
+ if (vtag)
+ ri->iri_flags |= M_VLANTAG;
+ return (0);
}
-
/*********************************************************************
*
* Verify that the hardware indicated that the checksum is valid.
@@ -1766,16 +594,15 @@
*
*********************************************************************/
static void
-ixl_rx_checksum(struct mbuf * mp, u32 status, u32 error, u8 ptype)
+ixl_rx_checksum(if_rxd_info_t ri, u32 status, u32 error, u8 ptype)
{
struct i40e_rx_ptype_decoded decoded;
decoded = decode_rx_desc_ptype(ptype);
-
/* Errors? */
if (error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) |
(1 << I40E_RX_DESC_ERROR_L4E_SHIFT))) {
- mp->m_pkthdr.csum_flags = 0;
+ ri->iri_csum_flags = 0;
return;
}
@@ -1784,60 +611,17 @@
decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6)
if (status &
(1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT)) {
- mp->m_pkthdr.csum_flags = 0;
+ ri->iri_csum_flags = 0;
return;
}
-
/* IP Checksum Good */
- mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
- mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
+ ri->iri_csum_flags = CSUM_IP_CHECKED;
+ ri->iri_csum_flags |= CSUM_IP_VALID;
if (status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)) {
- mp->m_pkthdr.csum_flags |=
+ ri->iri_csum_flags |=
(CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
- mp->m_pkthdr.csum_data |= htons(0xffff);
+ ri->iri_csum_data |= htons(0xffff);
}
- return;
}
-
-#if __FreeBSD_version >= 1100000
-uint64_t
-ixl_get_counter(if_t ifp, ift_counter cnt)
-{
- struct ixl_vsi *vsi;
-
- vsi = if_getsoftc(ifp);
-
- switch (cnt) {
- case IFCOUNTER_IPACKETS:
- return (vsi->ipackets);
- case IFCOUNTER_IERRORS:
- return (vsi->ierrors);
- case IFCOUNTER_OPACKETS:
- return (vsi->opackets);
- case IFCOUNTER_OERRORS:
- return (vsi->oerrors);
- case IFCOUNTER_COLLISIONS:
- /* Collisions are by standard impossible in 40G/10G Ethernet */
- return (0);
- case IFCOUNTER_IBYTES:
- return (vsi->ibytes);
- case IFCOUNTER_OBYTES:
- return (vsi->obytes);
- case IFCOUNTER_IMCASTS:
- return (vsi->imcasts);
- case IFCOUNTER_OMCASTS:
- return (vsi->omcasts);
- case IFCOUNTER_IQDROPS:
- return (vsi->iqdrops);
- case IFCOUNTER_OQDROPS:
- return (vsi->oqdrops);
- case IFCOUNTER_NOPROTO:
- return (vsi->noproto);
- default:
- return (if_get_counter_default(ifp, cnt));
- }
-}
-#endif
-
Index: sys/dev/ixl/ixlv.h
===================================================================
--- sys/dev/ixl/ixlv.h
+++ sys/dev/ixl/ixlv.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2013-2015, Intel Corporation
+ Copyright (c) 2001-2016, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -32,206 +32,12 @@
******************************************************************************/
/*$FreeBSD$*/
+#ifndef KLD_MODULE
+#include "opt_iflib.h"
+#endif
-#ifndef _IXLV_H_
-#define _IXLV_H_
-
-#include "ixlv_vc_mgr.h"
-
-#define IXLV_AQ_MAX_ERR 30
-#define IXLV_MAX_INIT_WAIT 120
-#define IXLV_MAX_FILTERS 128
-#define IXLV_MAX_QUEUES 16
-#define IXLV_AQ_TIMEOUT (1 * hz)
-#define IXLV_CALLOUT_TIMO (hz / 50) /* 20 msec */
-
-#define IXLV_FLAG_AQ_ENABLE_QUEUES (u32)(1 << 0)
-#define IXLV_FLAG_AQ_DISABLE_QUEUES (u32)(1 << 1)
-#define IXLV_FLAG_AQ_ADD_MAC_FILTER (u32)(1 << 2)
-#define IXLV_FLAG_AQ_ADD_VLAN_FILTER (u32)(1 << 3)
-#define IXLV_FLAG_AQ_DEL_MAC_FILTER (u32)(1 << 4)
-#define IXLV_FLAG_AQ_DEL_VLAN_FILTER (u32)(1 << 5)
-#define IXLV_FLAG_AQ_CONFIGURE_QUEUES (u32)(1 << 6)
-#define IXLV_FLAG_AQ_MAP_VECTORS (u32)(1 << 7)
-#define IXLV_FLAG_AQ_HANDLE_RESET (u32)(1 << 8)
-#define IXLV_FLAG_AQ_CONFIGURE_PROMISC (u32)(1 << 9)
-#define IXLV_FLAG_AQ_GET_STATS (u32)(1 << 10)
-#define IXLV_FLAG_AQ_CONFIG_RSS_KEY (u32)(1 << 11)
-#define IXLV_FLAG_AQ_SET_RSS_HENA (u32)(1 << 12)
-#define IXLV_FLAG_AQ_GET_RSS_HENA_CAPS (u32)(1 << 13)
-#define IXLV_FLAG_AQ_CONFIG_RSS_LUT (u32)(1 << 14)
-
-/* printf %b arg */
-#define IXLV_FLAGS \
- "\20\1ENABLE_QUEUES\2DISABLE_QUEUES\3ADD_MAC_FILTER" \
- "\4ADD_VLAN_FILTER\5DEL_MAC_FILTER\6DEL_VLAN_FILTER" \
- "\7CONFIGURE_QUEUES\10MAP_VECTORS\11HANDLE_RESET" \
- "\12CONFIGURE_PROMISC\13GET_STATS"
-#define IXLV_PRINTF_VF_OFFLOAD_FLAGS \
- "\20\1I40E_VIRTCHNL_VF_OFFLOAD_L2" \
- "\2I40E_VIRTCHNL_VF_OFFLOAD_IWARP" \
- "\3I40E_VIRTCHNL_VF_OFFLOAD_FCOE" \
- "\4I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ" \
- "\5I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG" \
- "\6I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR" \
- "\21I40E_VIRTCHNL_VF_OFFLOAD_VLAN" \
- "\22I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING" \
- "\23I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2" \
- "\24I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF"
-
-/* Driver state */
-enum ixlv_state_t {
- IXLV_START,
- IXLV_FAILED,
- IXLV_RESET_REQUIRED,
- IXLV_RESET_PENDING,
- IXLV_VERSION_CHECK,
- IXLV_GET_RESOURCES,
- IXLV_INIT_READY,
- IXLV_INIT_START,
- IXLV_INIT_CONFIG,
- IXLV_INIT_MAPPING,
- IXLV_INIT_ENABLE,
- IXLV_INIT_COMPLETE,
- IXLV_RUNNING,
-};
-
-/* Structs */
-
-struct ixlv_mac_filter {
- SLIST_ENTRY(ixlv_mac_filter) next;
- u8 macaddr[ETHER_ADDR_LEN];
- u16 flags;
-};
-SLIST_HEAD(mac_list, ixlv_mac_filter);
-
-struct ixlv_vlan_filter {
- SLIST_ENTRY(ixlv_vlan_filter) next;
- u16 vlan;
- u16 flags;
-};
-SLIST_HEAD(vlan_list, ixlv_vlan_filter);
-
-/* Software controller structure */
-struct ixlv_sc {
- struct i40e_hw hw;
- struct i40e_osdep osdep;
- device_t dev;
-
- struct resource *pci_mem;
- struct resource *msix_mem;
-
- enum ixlv_state_t init_state;
- int init_in_progress;
-
- /*
- * Interrupt resources
- */
- void *tag;
- struct resource *res; /* For the AQ */
-
- struct ifmedia media;
- struct callout timer;
- int msix;
- int pf_version;
- int if_flags;
-
- bool link_up;
- u32 link_speed;
-
- struct mtx mtx;
-
- u32 qbase;
- u32 admvec;
- struct timeout_task timeout;
- struct task aq_irq;
- struct task aq_sched;
- struct taskqueue *tq;
-
- struct ixl_vsi vsi;
-
- /* Filter lists */
- struct mac_list *mac_filters;
- struct vlan_list *vlan_filters;
-
- /* Promiscuous mode */
- u32 promiscuous_flags;
-
- /* Admin queue task flags */
- u32 aq_wait_count;
-
- struct ixl_vc_mgr vc_mgr;
- struct ixl_vc_cmd add_mac_cmd;
- struct ixl_vc_cmd del_mac_cmd;
- struct ixl_vc_cmd config_queues_cmd;
- struct ixl_vc_cmd map_vectors_cmd;
- struct ixl_vc_cmd enable_queues_cmd;
- struct ixl_vc_cmd add_vlan_cmd;
- struct ixl_vc_cmd del_vlan_cmd;
- struct ixl_vc_cmd add_multi_cmd;
- struct ixl_vc_cmd del_multi_cmd;
- struct ixl_vc_cmd config_rss_key_cmd;
- struct ixl_vc_cmd get_rss_hena_caps_cmd;
- struct ixl_vc_cmd set_rss_hena_cmd;
- struct ixl_vc_cmd config_rss_lut_cmd;
-
- /* Virtual comm channel */
- struct i40e_virtchnl_vf_resource *vf_res;
- struct i40e_virtchnl_vsi_resource *vsi_res;
-
- /* Misc stats maintained by the driver */
- u64 watchdog_events;
- u64 admin_irq;
-
- u8 aq_buffer[IXL_AQ_BUF_SZ];
-};
-
-#define IXLV_CORE_LOCK_ASSERT(sc) mtx_assert(&(sc)->mtx, MA_OWNED)
-/*
-** This checks for a zero mac addr, something that will be likely
-** unless the Admin on the Host has created one.
-*/
-static inline bool
-ixlv_check_ether_addr(u8 *addr)
-{
- bool status = TRUE;
-
- if ((addr[0] == 0 && addr[1]== 0 && addr[2] == 0 &&
- addr[3] == 0 && addr[4]== 0 && addr[5] == 0))
- status = FALSE;
- return (status);
-}
-
-/*
-** VF Common function prototypes
-*/
-int ixlv_send_api_ver(struct ixlv_sc *);
-int ixlv_verify_api_ver(struct ixlv_sc *);
-int ixlv_send_vf_config_msg(struct ixlv_sc *);
-int ixlv_get_vf_config(struct ixlv_sc *);
-void ixlv_init(void *);
-int ixlv_reinit_locked(struct ixlv_sc *);
-void ixlv_configure_queues(struct ixlv_sc *);
-void ixlv_enable_queues(struct ixlv_sc *);
-void ixlv_disable_queues(struct ixlv_sc *);
-void ixlv_map_queues(struct ixlv_sc *);
-void ixlv_enable_intr(struct ixl_vsi *);
-void ixlv_disable_intr(struct ixl_vsi *);
-void ixlv_add_ether_filters(struct ixlv_sc *);
-void ixlv_del_ether_filters(struct ixlv_sc *);
-void ixlv_request_stats(struct ixlv_sc *);
-void ixlv_request_reset(struct ixlv_sc *);
-void ixlv_vc_completion(struct ixlv_sc *,
- enum i40e_virtchnl_ops, i40e_status, u8 *, u16);
-void ixlv_add_ether_filter(struct ixlv_sc *);
-void ixlv_add_vlans(struct ixlv_sc *);
-void ixlv_del_vlans(struct ixlv_sc *);
-void ixlv_update_stats_counters(struct ixlv_sc *,
- struct i40e_eth_stats *);
-void ixlv_update_link_status(struct ixlv_sc *);
-void ixlv_get_default_rss_key(u32 *, bool);
-void ixlv_config_rss_key(struct ixlv_sc *);
-void ixlv_set_rss_hena(struct ixlv_sc *);
-void ixlv_config_rss_lut(struct ixlv_sc *);
-
-#endif /* _IXLV_H_ */
+#ifdef IFLIB
+#include "iflib_ixlv.h"
+#else
+#include "legacy_ixlv.h"
+#endif
Index: sys/dev/ixl/ixlvc.c
===================================================================
--- sys/dev/ixl/ixlvc.c
+++ sys/dev/ixl/ixlvc.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2013-2015, Intel Corporation
+ Copyright (c) 2001-2016, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -32,1224 +32,12 @@
******************************************************************************/
/*$FreeBSD$*/
-/*
-** Virtual Channel support
-** These are support functions to communication
-** between the VF and PF drivers.
-*/
-
-#include "ixl.h"
-#include "ixlv.h"
-#include "i40e_prototype.h"
-
-
-/* busy wait delay in msec */
-#define IXLV_BUSY_WAIT_DELAY 10
-#define IXLV_BUSY_WAIT_COUNT 50
-
-static void ixl_vc_process_resp(struct ixl_vc_mgr *, uint32_t,
- enum i40e_status_code);
-static void ixl_vc_process_next(struct ixl_vc_mgr *mgr);
-static void ixl_vc_schedule_retry(struct ixl_vc_mgr *mgr);
-static void ixl_vc_send_current(struct ixl_vc_mgr *mgr);
-
-#ifdef IXL_DEBUG
-/*
-** Validate VF messages
-*/
-static int ixl_vc_validate_vf_msg(struct ixlv_sc *sc, u32 v_opcode,
- u8 *msg, u16 msglen)
-{
- bool err_msg_format = false;
- int valid_len;
-
- /* Validate message length. */
- switch (v_opcode) {
- case I40E_VIRTCHNL_OP_VERSION:
- valid_len = sizeof(struct i40e_virtchnl_version_info);
- break;
- case I40E_VIRTCHNL_OP_RESET_VF:
- valid_len = 0;
- break;
- case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
- /* Valid length in api v1.0 is 0, v1.1 is 4 */
- valid_len = 4;
- break;
- case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
- valid_len = sizeof(struct i40e_virtchnl_txq_info);
- break;
- case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
- valid_len = sizeof(struct i40e_virtchnl_rxq_info);
- break;
- case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
- valid_len = sizeof(struct i40e_virtchnl_vsi_queue_config_info);
- if (msglen >= valid_len) {
- struct i40e_virtchnl_vsi_queue_config_info *vqc =
- (struct i40e_virtchnl_vsi_queue_config_info *)msg;
- valid_len += (vqc->num_queue_pairs *
- sizeof(struct
- i40e_virtchnl_queue_pair_info));
- if (vqc->num_queue_pairs == 0)
- err_msg_format = true;
- }
- break;
- case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
- valid_len = sizeof(struct i40e_virtchnl_irq_map_info);
- if (msglen >= valid_len) {
- struct i40e_virtchnl_irq_map_info *vimi =
- (struct i40e_virtchnl_irq_map_info *)msg;
- valid_len += (vimi->num_vectors *
- sizeof(struct i40e_virtchnl_vector_map));
- if (vimi->num_vectors == 0)
- err_msg_format = true;
- }
- break;
- case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
- case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
- valid_len = sizeof(struct i40e_virtchnl_queue_select);
- break;
- case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
- case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
- valid_len = sizeof(struct i40e_virtchnl_ether_addr_list);
- if (msglen >= valid_len) {
- struct i40e_virtchnl_ether_addr_list *veal =
- (struct i40e_virtchnl_ether_addr_list *)msg;
- valid_len += veal->num_elements *
- sizeof(struct i40e_virtchnl_ether_addr);
- if (veal->num_elements == 0)
- err_msg_format = true;
- }
- break;
- case I40E_VIRTCHNL_OP_ADD_VLAN:
- case I40E_VIRTCHNL_OP_DEL_VLAN:
- valid_len = sizeof(struct i40e_virtchnl_vlan_filter_list);
- if (msglen >= valid_len) {
- struct i40e_virtchnl_vlan_filter_list *vfl =
- (struct i40e_virtchnl_vlan_filter_list *)msg;
- valid_len += vfl->num_elements * sizeof(u16);
- if (vfl->num_elements == 0)
- err_msg_format = true;
- }
- break;
- case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
- valid_len = sizeof(struct i40e_virtchnl_promisc_info);
- break;
- case I40E_VIRTCHNL_OP_GET_STATS:
- valid_len = sizeof(struct i40e_virtchnl_queue_select);
- break;
- /* These are always errors coming from the VF. */
- case I40E_VIRTCHNL_OP_EVENT:
- case I40E_VIRTCHNL_OP_UNKNOWN:
- default:
- return EPERM;
- break;
- }
- /* few more checks */
- if ((valid_len != msglen) || (err_msg_format))
- return EINVAL;
- else
- return 0;
-}
+#ifndef KLD_MODULE
+#include "opt_iflib.h"
#endif
-/*
-** ixlv_send_pf_msg
-**
-** Send message to PF and print status if failure.
-*/
-static int
-ixlv_send_pf_msg(struct ixlv_sc *sc,
- enum i40e_virtchnl_ops op, u8 *msg, u16 len)
-{
- struct i40e_hw *hw = &sc->hw;
- device_t dev = sc->dev;
- i40e_status err;
-
-#ifdef IXL_DEBUG
- /*
- ** Pre-validating messages to the PF
- */
- int val_err;
- val_err = ixl_vc_validate_vf_msg(sc, op, msg, len);
- if (val_err)
- device_printf(dev, "Error validating msg to PF for op %d,"
- " msglen %d: error %d\n", op, len, val_err);
-#endif
-
- err = i40e_aq_send_msg_to_pf(hw, op, I40E_SUCCESS, msg, len, NULL);
- if (err)
- device_printf(dev, "Unable to send opcode %s to PF, "
- "status %s, aq error %s\n",
- ixl_vc_opcode_str(op),
- i40e_stat_str(hw, err),
- i40e_aq_str(hw, hw->aq.asq_last_status));
- return err;
-}
-
-
-/*
-** ixlv_send_api_ver
-**
-** Send API version admin queue message to the PF. The reply is not checked
-** in this function. Returns 0 if the message was successfully
-** sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not.
-*/
-int
-ixlv_send_api_ver(struct ixlv_sc *sc)
-{
- struct i40e_virtchnl_version_info vvi;
-
- vvi.major = I40E_VIRTCHNL_VERSION_MAJOR;
- vvi.minor = I40E_VIRTCHNL_VERSION_MINOR;
-
- return ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_VERSION,
- (u8 *)&vvi, sizeof(vvi));
-}
-
-/*
-** ixlv_verify_api_ver
-**
-** Compare API versions with the PF. Must be called after admin queue is
-** initialized. Returns 0 if API versions match, EIO if
-** they do not, or I40E_ERR_ADMIN_QUEUE_NO_WORK if the admin queue is empty.
-*/
-int
-ixlv_verify_api_ver(struct ixlv_sc *sc)
-{
- struct i40e_virtchnl_version_info *pf_vvi;
- struct i40e_hw *hw = &sc->hw;
- struct i40e_arq_event_info event;
- device_t dev = sc->dev;
- i40e_status err;
- int retries = 0;
-
- event.buf_len = IXL_AQ_BUF_SZ;
- event.msg_buf = malloc(event.buf_len, M_DEVBUF, M_NOWAIT);
- if (!event.msg_buf) {
- err = ENOMEM;
- goto out;
- }
-
- for (;;) {
- if (++retries > IXLV_AQ_MAX_ERR)
- goto out_alloc;
-
- /* Initial delay here is necessary */
- i40e_msec_pause(100);
- err = i40e_clean_arq_element(hw, &event, NULL);
- if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK)
- continue;
- else if (err) {
- err = EIO;
- goto out_alloc;
- }
-
- if ((enum i40e_virtchnl_ops)le32toh(event.desc.cookie_high) !=
- I40E_VIRTCHNL_OP_VERSION) {
- DDPRINTF(dev, "Received unexpected op response: %d\n",
- le32toh(event.desc.cookie_high));
- /* Don't stop looking for expected response */
- continue;
- }
-
- err = (i40e_status)le32toh(event.desc.cookie_low);
- if (err) {
- err = EIO;
- goto out_alloc;
- } else
- break;
- }
-
- pf_vvi = (struct i40e_virtchnl_version_info *)event.msg_buf;
- if ((pf_vvi->major > I40E_VIRTCHNL_VERSION_MAJOR) ||
- ((pf_vvi->major == I40E_VIRTCHNL_VERSION_MAJOR) &&
- (pf_vvi->minor > I40E_VIRTCHNL_VERSION_MINOR))) {
- device_printf(dev, "Critical PF/VF API version mismatch!\n");
- err = EIO;
- } else
- sc->pf_version = pf_vvi->minor;
-
- /* Log PF/VF api versions */
- device_printf(dev, "PF API %d.%d / VF API %d.%d\n",
- pf_vvi->major, pf_vvi->minor,
- I40E_VIRTCHNL_VERSION_MAJOR, I40E_VIRTCHNL_VERSION_MINOR);
-
-out_alloc:
- free(event.msg_buf, M_DEVBUF);
-out:
- return (err);
-}
-
-/*
-** ixlv_send_vf_config_msg
-**
-** Send VF configuration request admin queue message to the PF. The reply
-** is not checked in this function. Returns 0 if the message was
-** successfully sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not.
-*/
-int
-ixlv_send_vf_config_msg(struct ixlv_sc *sc)
-{
- u32 caps;
-
- caps = I40E_VIRTCHNL_VF_OFFLOAD_L2 |
- I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF |
- I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
-
- if (sc->pf_version == I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS)
- return ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
- NULL, 0);
- else
- return ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
- (u8 *)&caps, sizeof(caps));
-}
-
-/*
-** ixlv_get_vf_config
-**
-** Get VF configuration from PF and populate hw structure. Must be called after
-** admin queue is initialized. Busy waits until response is received from PF,
-** with maximum timeout. Response from PF is returned in the buffer for further
-** processing by the caller.
-*/
-int
-ixlv_get_vf_config(struct ixlv_sc *sc)
-{
- struct i40e_hw *hw = &sc->hw;
- device_t dev = sc->dev;
- struct i40e_arq_event_info event;
- u16 len;
- i40e_status err = 0;
- u32 retries = 0;
-
- /* Note this assumes a single VSI */
- len = sizeof(struct i40e_virtchnl_vf_resource) +
- sizeof(struct i40e_virtchnl_vsi_resource);
- event.buf_len = len;
- event.msg_buf = malloc(event.buf_len, M_DEVBUF, M_NOWAIT);
- if (!event.msg_buf) {
- err = ENOMEM;
- goto out;
- }
-
- for (;;) {
- err = i40e_clean_arq_element(hw, &event, NULL);
- if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
- if (++retries <= IXLV_AQ_MAX_ERR)
- i40e_msec_pause(10);
- } else if ((enum i40e_virtchnl_ops)le32toh(event.desc.cookie_high) !=
- I40E_VIRTCHNL_OP_GET_VF_RESOURCES) {
- DDPRINTF(dev, "Received a response from PF,"
- " opcode %d, error %d",
- le32toh(event.desc.cookie_high),
- le32toh(event.desc.cookie_low));
- retries++;
- continue;
- } else {
- err = (i40e_status)le32toh(event.desc.cookie_low);
- if (err) {
- device_printf(dev, "%s: Error returned from PF,"
- " opcode %d, error %d\n", __func__,
- le32toh(event.desc.cookie_high),
- le32toh(event.desc.cookie_low));
- err = EIO;
- goto out_alloc;
- }
- /* We retrieved the config message, with no errors */
- break;
- }
-
- if (retries > IXLV_AQ_MAX_ERR) {
- INIT_DBG_DEV(dev, "Did not receive response after %d tries.",
- retries);
- err = ETIMEDOUT;
- goto out_alloc;
- }
- }
-
- memcpy(sc->vf_res, event.msg_buf, min(event.msg_len, len));
- i40e_vf_parse_hw_config(hw, sc->vf_res);
-
-out_alloc:
- free(event.msg_buf, M_DEVBUF);
-out:
- return err;
-}
-
-/*
-** ixlv_configure_queues
-**
-** Request that the PF set up our queues.
-*/
-void
-ixlv_configure_queues(struct ixlv_sc *sc)
-{
- device_t dev = sc->dev;
- struct ixl_vsi *vsi = &sc->vsi;
- struct ixl_queue *que = vsi->queues;
- struct tx_ring *txr;
- struct rx_ring *rxr;
- int len, pairs;
-
- struct i40e_virtchnl_vsi_queue_config_info *vqci;
- struct i40e_virtchnl_queue_pair_info *vqpi;
-
- pairs = vsi->num_queues;
- len = sizeof(struct i40e_virtchnl_vsi_queue_config_info) +
- (sizeof(struct i40e_virtchnl_queue_pair_info) * pairs);
- vqci = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
- if (!vqci) {
- device_printf(dev, "%s: unable to allocate memory\n", __func__);
- ixl_vc_schedule_retry(&sc->vc_mgr);
- return;
- }
- vqci->vsi_id = sc->vsi_res->vsi_id;
- vqci->num_queue_pairs = pairs;
- vqpi = vqci->qpair;
- /* Size check is not needed here - HW max is 16 queue pairs, and we
- * can fit info for 31 of them into the AQ buffer before it overflows.
- */
- for (int i = 0; i < pairs; i++, que++, vqpi++) {
- txr = &que->txr;
- rxr = &que->rxr;
- vqpi->txq.vsi_id = vqci->vsi_id;
- vqpi->txq.queue_id = i;
- vqpi->txq.ring_len = que->num_desc;
- vqpi->txq.dma_ring_addr = txr->dma.pa;
- /* Enable Head writeback */
- vqpi->txq.headwb_enabled = 1;
- vqpi->txq.dma_headwb_addr = txr->dma.pa +
- (que->num_desc * sizeof(struct i40e_tx_desc));
-
- vqpi->rxq.vsi_id = vqci->vsi_id;
- vqpi->rxq.queue_id = i;
- vqpi->rxq.ring_len = que->num_desc;
- vqpi->rxq.dma_ring_addr = rxr->dma.pa;
- vqpi->rxq.max_pkt_size = vsi->max_frame_size;
- vqpi->rxq.databuffer_size = rxr->mbuf_sz;
- vqpi->rxq.splithdr_enabled = 0;
- }
-
- ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
- (u8 *)vqci, len);
- free(vqci, M_DEVBUF);
-}
-
-/*
-** ixlv_enable_queues
-**
-** Request that the PF enable all of our queues.
-*/
-void
-ixlv_enable_queues(struct ixlv_sc *sc)
-{
- struct i40e_virtchnl_queue_select vqs;
-
- vqs.vsi_id = sc->vsi_res->vsi_id;
- vqs.tx_queues = (1 << sc->vsi_res->num_queue_pairs) - 1;
- vqs.rx_queues = vqs.tx_queues;
- ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
- (u8 *)&vqs, sizeof(vqs));
-}
-
-/*
-** ixlv_disable_queues
-**
-** Request that the PF disable all of our queues.
-*/
-void
-ixlv_disable_queues(struct ixlv_sc *sc)
-{
- struct i40e_virtchnl_queue_select vqs;
-
- vqs.vsi_id = sc->vsi_res->vsi_id;
- vqs.tx_queues = (1 << sc->vsi_res->num_queue_pairs) - 1;
- vqs.rx_queues = vqs.tx_queues;
- ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
- (u8 *)&vqs, sizeof(vqs));
-}
-
-/*
-** ixlv_map_queues
-**
-** Request that the PF map queues to interrupt vectors. Misc causes, including
-** admin queue, are always mapped to vector 0.
-*/
-void
-ixlv_map_queues(struct ixlv_sc *sc)
-{
- struct i40e_virtchnl_irq_map_info *vm;
- int i, q, len;
- struct ixl_vsi *vsi = &sc->vsi;
- struct ixl_queue *que = vsi->queues;
-
- /* How many queue vectors, adminq uses one */
- q = sc->msix - 1;
-
- len = sizeof(struct i40e_virtchnl_irq_map_info) +
- (sc->msix * sizeof(struct i40e_virtchnl_vector_map));
- vm = malloc(len, M_DEVBUF, M_NOWAIT);
- if (!vm) {
- printf("%s: unable to allocate memory\n", __func__);
- ixl_vc_schedule_retry(&sc->vc_mgr);
- return;
- }
-
- vm->num_vectors = sc->msix;
- /* Queue vectors first */
- for (i = 0; i < q; i++, que++) {
- vm->vecmap[i].vsi_id = sc->vsi_res->vsi_id;
- vm->vecmap[i].vector_id = i + 1; /* first is adminq */
- vm->vecmap[i].txq_map = (1 << que->me);
- vm->vecmap[i].rxq_map = (1 << que->me);
- vm->vecmap[i].rxitr_idx = 0;
- vm->vecmap[i].txitr_idx = 1;
- }
-
- /* Misc vector last - this is only for AdminQ messages */
- vm->vecmap[i].vsi_id = sc->vsi_res->vsi_id;
- vm->vecmap[i].vector_id = 0;
- vm->vecmap[i].txq_map = 0;
- vm->vecmap[i].rxq_map = 0;
- vm->vecmap[i].rxitr_idx = 0;
- vm->vecmap[i].txitr_idx = 0;
-
- ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
- (u8 *)vm, len);
- free(vm, M_DEVBUF);
-}
-
-/*
-** Scan the Filter List looking for vlans that need
-** to be added, then create the data to hand to the AQ
-** for handling.
-*/
-void
-ixlv_add_vlans(struct ixlv_sc *sc)
-{
- struct i40e_virtchnl_vlan_filter_list *v;
- struct ixlv_vlan_filter *f, *ftmp;
- device_t dev = sc->dev;
- int len, i = 0, cnt = 0;
-
- /* Get count of VLAN filters to add */
- SLIST_FOREACH(f, sc->vlan_filters, next) {
- if (f->flags & IXL_FILTER_ADD)
- cnt++;
- }
-
- if (!cnt) { /* no work... */
- ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_VLAN_FILTER,
- I40E_SUCCESS);
- return;
- }
-
- len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
- (cnt * sizeof(u16));
-
- if (len > IXL_AQ_BUF_SZ) {
- device_printf(dev, "%s: Exceeded Max AQ Buf size\n",
- __func__);
- ixl_vc_schedule_retry(&sc->vc_mgr);
- return;
- }
-
- v = malloc(len, M_DEVBUF, M_NOWAIT);
- if (!v) {
- device_printf(dev, "%s: unable to allocate memory\n",
- __func__);
- ixl_vc_schedule_retry(&sc->vc_mgr);
- return;
- }
-
- v->vsi_id = sc->vsi_res->vsi_id;
- v->num_elements = cnt;
-
- /* Scan the filter array */
- SLIST_FOREACH_SAFE(f, sc->vlan_filters, next, ftmp) {
- if (f->flags & IXL_FILTER_ADD) {
- bcopy(&f->vlan, &v->vlan_id[i], sizeof(u16));
- f->flags = IXL_FILTER_USED;
- i++;
- }
- if (i == cnt)
- break;
- }
-
- ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_ADD_VLAN, (u8 *)v, len);
- free(v, M_DEVBUF);
- /* add stats? */
-}
-
-/*
-** Scan the Filter Table looking for vlans that need
-** to be removed, then create the data to hand to the AQ
-** for handling.
-*/
-void
-ixlv_del_vlans(struct ixlv_sc *sc)
-{
- device_t dev = sc->dev;
- struct i40e_virtchnl_vlan_filter_list *v;
- struct ixlv_vlan_filter *f, *ftmp;
- int len, i = 0, cnt = 0;
-
- /* Get count of VLAN filters to delete */
- SLIST_FOREACH(f, sc->vlan_filters, next) {
- if (f->flags & IXL_FILTER_DEL)
- cnt++;
- }
-
- if (!cnt) { /* no work... */
- ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_VLAN_FILTER,
- I40E_SUCCESS);
- return;
- }
-
- len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
- (cnt * sizeof(u16));
-
- if (len > IXL_AQ_BUF_SZ) {
- device_printf(dev, "%s: Exceeded Max AQ Buf size\n",
- __func__);
- ixl_vc_schedule_retry(&sc->vc_mgr);
- return;
- }
-
- v = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
- if (!v) {
- device_printf(dev, "%s: unable to allocate memory\n",
- __func__);
- ixl_vc_schedule_retry(&sc->vc_mgr);
- return;
- }
-
- v->vsi_id = sc->vsi_res->vsi_id;
- v->num_elements = cnt;
-
- /* Scan the filter array */
- SLIST_FOREACH_SAFE(f, sc->vlan_filters, next, ftmp) {
- if (f->flags & IXL_FILTER_DEL) {
- bcopy(&f->vlan, &v->vlan_id[i], sizeof(u16));
- i++;
- SLIST_REMOVE(sc->vlan_filters, f, ixlv_vlan_filter, next);
- free(f, M_DEVBUF);
- }
- if (i == cnt)
- break;
- }
-
- ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_DEL_VLAN, (u8 *)v, len);
- free(v, M_DEVBUF);
- /* add stats? */
-}
-
-
-/*
-** This routine takes additions to the vsi filter
-** table and creates an Admin Queue call to create
-** the filters in the hardware.
-*/
-void
-ixlv_add_ether_filters(struct ixlv_sc *sc)
-{
- struct i40e_virtchnl_ether_addr_list *a;
- struct ixlv_mac_filter *f;
- device_t dev = sc->dev;
- int len, j = 0, cnt = 0;
-
- /* Get count of MAC addresses to add */
- SLIST_FOREACH(f, sc->mac_filters, next) {
- if (f->flags & IXL_FILTER_ADD)
- cnt++;
- }
- if (cnt == 0) { /* Should not happen... */
- DDPRINTF(dev, "cnt == 0, exiting...");
- ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_MAC_FILTER,
- I40E_SUCCESS);
- return;
- }
-
- len = sizeof(struct i40e_virtchnl_ether_addr_list) +
- (cnt * sizeof(struct i40e_virtchnl_ether_addr));
-
- a = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
- if (a == NULL) {
- device_printf(dev, "%s: Failed to get memory for "
- "virtchnl_ether_addr_list\n", __func__);
- ixl_vc_schedule_retry(&sc->vc_mgr);
- return;
- }
- a->vsi_id = sc->vsi.id;
- a->num_elements = cnt;
-
- /* Scan the filter array */
- SLIST_FOREACH(f, sc->mac_filters, next) {
- if (f->flags & IXL_FILTER_ADD) {
- bcopy(f->macaddr, a->list[j].addr, ETHER_ADDR_LEN);
- f->flags &= ~IXL_FILTER_ADD;
- j++;
-
- DDPRINTF(dev, "ADD: " MAC_FORMAT,
- MAC_FORMAT_ARGS(f->macaddr));
- }
- if (j == cnt)
- break;
- }
- DDPRINTF(dev, "len %d, j %d, cnt %d",
- len, j, cnt);
- ixlv_send_pf_msg(sc,
- I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, (u8 *)a, len);
- /* add stats? */
- free(a, M_DEVBUF);
- return;
-}
-
-/*
-** This routine takes filters flagged for deletion in the
-** sc MAC filter list and creates an Admin Queue call
-** to delete those filters in the hardware.
-*/
-void
-ixlv_del_ether_filters(struct ixlv_sc *sc)
-{
- struct i40e_virtchnl_ether_addr_list *d;
- device_t dev = sc->dev;
- struct ixlv_mac_filter *f, *f_temp;
- int len, j = 0, cnt = 0;
-
- /* Get count of MAC addresses to delete */
- SLIST_FOREACH(f, sc->mac_filters, next) {
- if (f->flags & IXL_FILTER_DEL)
- cnt++;
- }
- if (cnt == 0) {
- DDPRINTF(dev, "cnt == 0, exiting...");
- ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_MAC_FILTER,
- I40E_SUCCESS);
- return;
- }
-
- len = sizeof(struct i40e_virtchnl_ether_addr_list) +
- (cnt * sizeof(struct i40e_virtchnl_ether_addr));
-
- d = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
- if (d == NULL) {
- device_printf(dev, "%s: Failed to get memory for "
- "virtchnl_ether_addr_list\n", __func__);
- ixl_vc_schedule_retry(&sc->vc_mgr);
- return;
- }
- d->vsi_id = sc->vsi.id;
- d->num_elements = cnt;
-
- /* Scan the filter array */
- SLIST_FOREACH_SAFE(f, sc->mac_filters, next, f_temp) {
- if (f->flags & IXL_FILTER_DEL) {
- bcopy(f->macaddr, d->list[j].addr, ETHER_ADDR_LEN);
- DDPRINTF(dev, "DEL: " MAC_FORMAT,
- MAC_FORMAT_ARGS(f->macaddr));
- j++;
- SLIST_REMOVE(sc->mac_filters, f, ixlv_mac_filter, next);
- free(f, M_DEVBUF);
- }
- if (j == cnt)
- break;
- }
- ixlv_send_pf_msg(sc,
- I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS, (u8 *)d, len);
- /* add stats? */
- free(d, M_DEVBUF);
- return;
-}
-
-/*
-** ixlv_request_reset
-** Request that the PF reset this VF. No response is expected.
-*/
-void
-ixlv_request_reset(struct ixlv_sc *sc)
-{
- /*
- ** Set the reset status to "in progress" before
- ** the request, this avoids any possibility of
- ** a mistaken early detection of completion.
- */
- wr32(&sc->hw, I40E_VFGEN_RSTAT, I40E_VFR_INPROGRESS);
- ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_RESET_VF, NULL, 0);
-}
-
-/*
-** ixlv_request_stats
-** Request the statistics for this VF's VSI from PF.
-*/
-void
-ixlv_request_stats(struct ixlv_sc *sc)
-{
- struct i40e_virtchnl_queue_select vqs;
- int error = 0;
-
- vqs.vsi_id = sc->vsi_res->vsi_id;
- /* Low priority, we don't need to error check */
- error = ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_GET_STATS,
- (u8 *)&vqs, sizeof(vqs));
-#ifdef IXL_DEBUG
- if (error)
- device_printf(sc->dev, "Error sending stats request to PF: %d\n", error);
-#endif
-}
-
-/*
-** Updates driver's stats counters with VSI stats returned from PF.
-*/
-void
-ixlv_update_stats_counters(struct ixlv_sc *sc, struct i40e_eth_stats *es)
-{
- struct ixl_vsi *vsi = &sc->vsi;
- uint64_t tx_discards;
-
- tx_discards = es->tx_discards;
- for (int i = 0; i < vsi->num_queues; i++)
- tx_discards += sc->vsi.queues[i].txr.br->br_drops;
-
- /* Update ifnet stats */
- IXL_SET_IPACKETS(vsi, es->rx_unicast +
- es->rx_multicast +
- es->rx_broadcast);
- IXL_SET_OPACKETS(vsi, es->tx_unicast +
- es->tx_multicast +
- es->tx_broadcast);
- IXL_SET_IBYTES(vsi, es->rx_bytes);
- IXL_SET_OBYTES(vsi, es->tx_bytes);
- IXL_SET_IMCASTS(vsi, es->rx_multicast);
- IXL_SET_OMCASTS(vsi, es->tx_multicast);
-
- IXL_SET_OERRORS(vsi, es->tx_errors);
- IXL_SET_IQDROPS(vsi, es->rx_discards);
- IXL_SET_OQDROPS(vsi, tx_discards);
- IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
- IXL_SET_COLLISIONS(vsi, 0);
-
- vsi->eth_stats = *es;
-}
-
-void
-ixlv_config_rss_key(struct ixlv_sc *sc)
-{
- struct i40e_virtchnl_rss_key *rss_key_msg;
- int msg_len, key_length;
- u8 rss_seed[IXL_RSS_KEY_SIZE];
-
-#ifdef RSS
- /* Fetch the configured RSS key */
- rss_getkey((uint8_t *) &rss_seed);
-#else
- ixl_get_default_rss_key((u32 *)rss_seed);
-#endif
-
- /* Send the fetched key */
- key_length = IXL_RSS_KEY_SIZE;
- msg_len = sizeof(struct i40e_virtchnl_rss_key) + (sizeof(u8) * key_length) - 1;
- rss_key_msg = malloc(msg_len, M_DEVBUF, M_NOWAIT | M_ZERO);
- if (rss_key_msg == NULL) {
- device_printf(sc->dev, "Unable to allocate msg memory for RSS key msg.\n");
- return;
- }
-
- rss_key_msg->vsi_id = sc->vsi_res->vsi_id;
- rss_key_msg->key_len = key_length;
- bcopy(rss_seed, &rss_key_msg->key[0], key_length);
-
- DDPRINTF(sc->dev, "config_rss: vsi_id %d, key_len %d",
- rss_key_msg->vsi_id, rss_key_msg->key_len);
-
- ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY,
- (u8 *)rss_key_msg, msg_len);
-
- free(rss_key_msg, M_DEVBUF);
-}
-
-void
-ixlv_set_rss_hena(struct ixlv_sc *sc)
-{
- struct i40e_virtchnl_rss_hena hena;
-
- hena.hena = IXL_DEFAULT_RSS_HENA_X722;
-
- ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_SET_RSS_HENA,
- (u8 *)&hena, sizeof(hena));
-}
-
-void
-ixlv_config_rss_lut(struct ixlv_sc *sc)
-{
- struct i40e_virtchnl_rss_lut *rss_lut_msg;
- int msg_len;
- u16 lut_length;
- u32 lut;
- int i, que_id;
-
- lut_length = IXL_RSS_VSI_LUT_SIZE;
- msg_len = sizeof(struct i40e_virtchnl_rss_lut) + (lut_length * sizeof(u8)) - 1;
- rss_lut_msg = malloc(msg_len, M_DEVBUF, M_NOWAIT | M_ZERO);
- if (rss_lut_msg == NULL) {
- device_printf(sc->dev, "Unable to allocate msg memory for RSS lut msg.\n");
- return;
- }
-
- rss_lut_msg->vsi_id = sc->vsi_res->vsi_id;
- /* Each LUT entry is a max of 1 byte, so this is easy */
- rss_lut_msg->lut_entries = lut_length;
-
- /* Populate the LUT with max no. of queues in round robin fashion */
- for (i = 0; i < lut_length; i++) {
-#ifdef RSS
- /*
- * Fetch the RSS bucket id for the given indirection entry.
- * Cap it at the number of configured buckets (which is
- * num_queues.)
- */
- que_id = rss_get_indirection_to_bucket(i);
- que_id = que_id % sc->vsi.num_queues;
+#ifdef IFLIB
+#include <dev/ixl/iflib_ixlvc.c>
#else
- que_id = i % sc->vsi.num_queues;
-#endif
- lut = que_id & IXL_RSS_VSI_LUT_ENTRY_MASK;
- rss_lut_msg->lut[i] = lut;
- }
-
- ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT,
- (u8 *)rss_lut_msg, msg_len);
-
- free(rss_lut_msg, M_DEVBUF);
-}
-
-/*
-** ixlv_vc_completion
-**
-** Asynchronous completion function for admin queue messages. Rather than busy
-** wait, we fire off our requests and assume that no errors will be returned.
-** This function handles the reply messages.
-*/
-void
-ixlv_vc_completion(struct ixlv_sc *sc,
- enum i40e_virtchnl_ops v_opcode,
- i40e_status v_retval, u8 *msg, u16 msglen)
-{
- device_t dev = sc->dev;
- struct ixl_vsi *vsi = &sc->vsi;
-
- if (v_opcode == I40E_VIRTCHNL_OP_EVENT) {
- struct i40e_virtchnl_pf_event *vpe =
- (struct i40e_virtchnl_pf_event *)msg;
-
- switch (vpe->event) {
- case I40E_VIRTCHNL_EVENT_LINK_CHANGE:
-#ifdef IXL_DEBUG
- device_printf(dev, "Link change: status %d, speed %d\n",
- vpe->event_data.link_event.link_status,
- vpe->event_data.link_event.link_speed);
+#include <dev/ixl/legacy_ixlvc.c>
#endif
- sc->link_up =
- vpe->event_data.link_event.link_status;
- sc->link_speed =
- vpe->event_data.link_event.link_speed;
- ixlv_update_link_status(sc);
- break;
- case I40E_VIRTCHNL_EVENT_RESET_IMPENDING:
- device_printf(dev, "PF initiated reset!\n");
- sc->init_state = IXLV_RESET_PENDING;
- mtx_unlock(&sc->mtx);
- ixlv_init(vsi);
- mtx_lock(&sc->mtx);
- break;
- default:
- device_printf(dev, "%s: Unknown event %d from AQ\n",
- __func__, vpe->event);
- break;
- }
-
- return;
- }
-
- /* Catch-all error response */
- if (v_retval) {
- device_printf(dev,
- "%s: AQ returned error %s to our request %s!\n",
- __func__, i40e_stat_str(&sc->hw, v_retval), ixl_vc_opcode_str(v_opcode));
- }
-
-#ifdef IXL_DEBUG
- if (v_opcode != I40E_VIRTCHNL_OP_GET_STATS)
- DDPRINTF(dev, "opcode %d", v_opcode);
-#endif
-
- switch (v_opcode) {
- case I40E_VIRTCHNL_OP_GET_STATS:
- ixlv_update_stats_counters(sc, (struct i40e_eth_stats *)msg);
- break;
- case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
- ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_MAC_FILTER,
- v_retval);
- if (v_retval) {
- device_printf(dev, "WARNING: Error adding VF mac filter!\n");
- device_printf(dev, "WARNING: Device may not receive traffic!\n");
- }
- break;
- case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
- ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_MAC_FILTER,
- v_retval);
- break;
- case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
- ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_CONFIGURE_PROMISC,
- v_retval);
- break;
- case I40E_VIRTCHNL_OP_ADD_VLAN:
- ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_VLAN_FILTER,
- v_retval);
- break;
- case I40E_VIRTCHNL_OP_DEL_VLAN:
- ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_VLAN_FILTER,
- v_retval);
- break;
- case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
- ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ENABLE_QUEUES,
- v_retval);
- if (v_retval == 0) {
- /* Update link status */
- ixlv_update_link_status(sc);
- /* Turn on all interrupts */
- ixlv_enable_intr(vsi);
- /* And inform the stack we're ready */
- vsi->ifp->if_drv_flags |= IFF_DRV_RUNNING;
- /* TODO: Clear a state flag, so we know we're ready to run init again */
- }
- break;
- case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
- ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DISABLE_QUEUES,
- v_retval);
- if (v_retval == 0) {
- /* Turn off all interrupts */
- ixlv_disable_intr(vsi);
- /* Tell the stack that the interface is no longer active */
- vsi->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING);
- }
- break;
- case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
- ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_CONFIGURE_QUEUES,
- v_retval);
- break;
- case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
- ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_MAP_VECTORS,
- v_retval);
- break;
- case I40E_VIRTCHNL_OP_CONFIG_RSS_KEY:
- ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_CONFIG_RSS_KEY,
- v_retval);
- break;
- case I40E_VIRTCHNL_OP_SET_RSS_HENA:
- ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_SET_RSS_HENA,
- v_retval);
- break;
- case I40E_VIRTCHNL_OP_CONFIG_RSS_LUT:
- ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_CONFIG_RSS_LUT,
- v_retval);
- break;
- default:
-#ifdef IXL_DEBUG
- device_printf(dev,
- "%s: Received unexpected message %s from PF.\n",
- __func__, ixl_vc_opcode_str(v_opcode));
-#endif
- break;
- }
- return;
-}
-
-static void
-ixl_vc_send_cmd(struct ixlv_sc *sc, uint32_t request)
-{
-
- switch (request) {
- case IXLV_FLAG_AQ_MAP_VECTORS:
- ixlv_map_queues(sc);
- break;
-
- case IXLV_FLAG_AQ_ADD_MAC_FILTER:
- ixlv_add_ether_filters(sc);
- break;
-
- case IXLV_FLAG_AQ_ADD_VLAN_FILTER:
- ixlv_add_vlans(sc);
- break;
-
- case IXLV_FLAG_AQ_DEL_MAC_FILTER:
- ixlv_del_ether_filters(sc);
- break;
-
- case IXLV_FLAG_AQ_DEL_VLAN_FILTER:
- ixlv_del_vlans(sc);
- break;
-
- case IXLV_FLAG_AQ_CONFIGURE_QUEUES:
- ixlv_configure_queues(sc);
- break;
-
- case IXLV_FLAG_AQ_DISABLE_QUEUES:
- ixlv_disable_queues(sc);
- break;
-
- case IXLV_FLAG_AQ_ENABLE_QUEUES:
- ixlv_enable_queues(sc);
- break;
-
- case IXLV_FLAG_AQ_CONFIG_RSS_KEY:
- ixlv_config_rss_key(sc);
- break;
-
- case IXLV_FLAG_AQ_SET_RSS_HENA:
- ixlv_set_rss_hena(sc);
- break;
-
- case IXLV_FLAG_AQ_CONFIG_RSS_LUT:
- ixlv_config_rss_lut(sc);
- break;
- }
-}
-
-void
-ixl_vc_init_mgr(struct ixlv_sc *sc, struct ixl_vc_mgr *mgr)
-{
- mgr->sc = sc;
- mgr->current = NULL;
- TAILQ_INIT(&mgr->pending);
- callout_init_mtx(&mgr->callout, &sc->mtx, 0);
-}
-
-static void
-ixl_vc_process_completion(struct ixl_vc_mgr *mgr, enum i40e_status_code err)
-{
- struct ixl_vc_cmd *cmd;
-
- cmd = mgr->current;
- mgr->current = NULL;
- cmd->flags &= ~IXLV_VC_CMD_FLAG_BUSY;
-
- cmd->callback(cmd, cmd->arg, err);
- ixl_vc_process_next(mgr);
-}
-
-static void
-ixl_vc_process_resp(struct ixl_vc_mgr *mgr, uint32_t request,
- enum i40e_status_code err)
-{
- struct ixl_vc_cmd *cmd;
-
- cmd = mgr->current;
- if (cmd == NULL || cmd->request != request)
- return;
-
- callout_stop(&mgr->callout);
- ixl_vc_process_completion(mgr, err);
-}
-
-static void
-ixl_vc_cmd_timeout(void *arg)
-{
- struct ixl_vc_mgr *mgr = (struct ixl_vc_mgr *)arg;
-
- IXLV_CORE_LOCK_ASSERT(mgr->sc);
- ixl_vc_process_completion(mgr, I40E_ERR_TIMEOUT);
-}
-
-static void
-ixl_vc_cmd_retry(void *arg)
-{
- struct ixl_vc_mgr *mgr = (struct ixl_vc_mgr *)arg;
-
- IXLV_CORE_LOCK_ASSERT(mgr->sc);
- ixl_vc_send_current(mgr);
-}
-
-static void
-ixl_vc_send_current(struct ixl_vc_mgr *mgr)
-{
- struct ixl_vc_cmd *cmd;
-
- cmd = mgr->current;
- ixl_vc_send_cmd(mgr->sc, cmd->request);
- callout_reset(&mgr->callout, IXLV_VC_TIMEOUT, ixl_vc_cmd_timeout, mgr);
-}
-
-static void
-ixl_vc_process_next(struct ixl_vc_mgr *mgr)
-{
- struct ixl_vc_cmd *cmd;
-
- if (mgr->current != NULL)
- return;
-
- if (TAILQ_EMPTY(&mgr->pending))
- return;
-
- cmd = TAILQ_FIRST(&mgr->pending);
- TAILQ_REMOVE(&mgr->pending, cmd, next);
-
- mgr->current = cmd;
- ixl_vc_send_current(mgr);
-}
-
-static void
-ixl_vc_schedule_retry(struct ixl_vc_mgr *mgr)
-{
-
- callout_reset(&mgr->callout, howmany(hz, 100), ixl_vc_cmd_retry, mgr);
-}
-
-void
-ixl_vc_enqueue(struct ixl_vc_mgr *mgr, struct ixl_vc_cmd *cmd,
- uint32_t req, ixl_vc_callback_t *callback, void *arg)
-{
- IXLV_CORE_LOCK_ASSERT(mgr->sc);
-
- if (cmd->flags & IXLV_VC_CMD_FLAG_BUSY) {
- if (mgr->current == cmd)
- mgr->current = NULL;
- else
- TAILQ_REMOVE(&mgr->pending, cmd, next);
- }
-
- cmd->request = req;
- cmd->callback = callback;
- cmd->arg = arg;
- cmd->flags |= IXLV_VC_CMD_FLAG_BUSY;
- TAILQ_INSERT_TAIL(&mgr->pending, cmd, next);
-
- ixl_vc_process_next(mgr);
-}
-
-void
-ixl_vc_flush(struct ixl_vc_mgr *mgr)
-{
- struct ixl_vc_cmd *cmd;
-
- IXLV_CORE_LOCK_ASSERT(mgr->sc);
- KASSERT(TAILQ_EMPTY(&mgr->pending) || mgr->current != NULL,
- ("ixlv: pending commands waiting but no command in progress"));
-
- cmd = mgr->current;
- if (cmd != NULL) {
- mgr->current = NULL;
- cmd->flags &= ~IXLV_VC_CMD_FLAG_BUSY;
- cmd->callback(cmd, cmd->arg, I40E_ERR_ADAPTER_STOPPED);
- }
-
- while ((cmd = TAILQ_FIRST(&mgr->pending)) != NULL) {
- TAILQ_REMOVE(&mgr->pending, cmd, next);
- cmd->flags &= ~IXLV_VC_CMD_FLAG_BUSY;
- cmd->callback(cmd, cmd->arg, I40E_ERR_ADAPTER_STOPPED);
- }
-
- callout_stop(&mgr->callout);
-}
-
Index: sys/modules/Makefile
===================================================================
--- sys/modules/Makefile
+++ sys/modules/Makefile
@@ -194,7 +194,6 @@
${_ixv} \
${_ixgb} \
${_ixl} \
- ${_ixlv} \
jme \
joy \
kbdmux \
Index: sys/modules/ixl/Makefile
===================================================================
--- sys/modules/ixl/Makefile
+++ sys/modules/ixl/Makefile
@@ -3,8 +3,8 @@
.PATH: ${.CURDIR}/../../dev/ixl
KMOD = if_ixl
-SRCS = device_if.h bus_if.h pci_if.h pci_iov_if.h
-SRCS += opt_inet.h opt_inet6.h opt_rss.h opt_ixl.h
+SRCS = device_if.h bus_if.h pci_if.h pci_iov_if.h ifdi_if.h
+SRCS += opt_inet.h opt_inet6.h opt_rss.h opt_ixl.h opt_iflib.h
SRCS += if_ixl.c ixl_pf_main.c ixl_pf_qmgr.c ixl_txrx.c ixl_pf_i2c.c i40e_osdep.c
SRCS += ixl_pf_iov.c ixl_iw.c
Index: sys/modules/ixlv/Makefile
===================================================================
--- sys/modules/ixlv/Makefile
+++ sys/modules/ixlv/Makefile
@@ -3,8 +3,8 @@
.PATH: ${.CURDIR}/../../dev/ixl
KMOD = if_ixlv
-SRCS = device_if.h bus_if.h pci_if.h
-SRCS += opt_inet.h opt_inet6.h opt_rss.h opt_ixl.h
+SRCS = device_if.h bus_if.h pci_if.h ifdi_if.h
+SRCS += opt_inet.h opt_inet6.h opt_rss.h opt_ixl.h opt_iflib.h
SRCS += if_ixlv.c ixlvc.c ixl_txrx.c i40e_osdep.c
# Shared source
Index: sys/net/iflib.c
===================================================================
--- sys/net/iflib.c
+++ sys/net/iflib.c
@@ -386,6 +386,9 @@
caddr_t ifl_vm_addrs[IFLIB_MAX_RX_REFRESH];
} __aligned(CACHE_LINE_SIZE);
+/*
+ * XXX: This really needs a comment
+ */
static inline int
get_inuse(int size, int cidx, int pidx, int gen)
{
@@ -3376,7 +3379,7 @@
*/
if (avoid_reset) {
if_setflagbits(ifp, IFF_UP,0);
- if (!(if_getdrvflags(ifp)& IFF_DRV_RUNNING))
+ if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))
reinit = 1;
#ifdef INET
if (!(if_getflags(ifp) & IFF_NOARP))
@@ -3471,7 +3474,7 @@
#endif
setmask |= (mask & IFCAP_FLAGS);
- if (setmask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6))
+ if (setmask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6))
setmask |= (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6);
if ((mask & IFCAP_WOL) &&
(if_getcapabilities(ifp) & IFCAP_WOL) != 0)
@@ -3492,7 +3495,7 @@
CTX_UNLOCK(ctx);
}
break;
- }
+ }
case SIOCGPRIVATE_0:
case SIOCSDRVSPEC:
case SIOCGDRVSPEC:
@@ -3636,7 +3639,6 @@
uint16_t main_txq;
uint16_t main_rxq;
-
ctx = malloc(sizeof(* ctx), M_IFLIB, M_WAITOK|M_ZERO);
if (sc == NULL) {
@@ -4837,6 +4839,8 @@
if (enable_msix == 0)
goto msi;
+// Don't need to set busmaster here...
+#if 0
/*
** When used in a virtualized environment
** PCI BUSMASTER capability may not be set
@@ -4861,6 +4865,7 @@
goto msi;
}
}
+#endif
/*
* bar == -1 => "trust me I know what I'm doing"

File Metadata

Mime Type
text/plain
Expires
Tue, Apr 29, 3:40 AM (4 h, 6 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
17836731
Default Alt Text
D5214.id25255.diff (374 KB)

Event Timeline