Page MenuHomeFreeBSD

D28636.id.diff
No OneTemporary

D28636.id.diff

This file is larger than 256 KB, so syntax highlighting was skipped.
diff --git a/sys/conf/files.amd64 b/sys/conf/files.amd64
--- a/sys/conf/files.amd64
+++ b/sys/conf/files.amd64
@@ -110,6 +110,22 @@
dev/axgbe/xgbe-phy-v2.c optional axp
dev/hyperv/vmbus/amd64/hyperv_machdep.c optional hyperv
dev/hyperv/vmbus/amd64/vmbus_vector.S optional hyperv
+dev/iavf/if_iavf_iflib.c optional iavf pci \
+ compile-with "${NORMAL_C} -I$S/dev/iavf"
+dev/iavf/iavf_lib.c optional iavf pci \
+ compile-with "${NORMAL_C} -I$S/dev/iavf"
+dev/iavf/iavf_osdep.c optional iavf pci \
+ compile-with "${NORMAL_C} -I$S/dev/iavf"
+dev/iavf/iavf_txrx_iflib.c optional iavf pci \
+ compile-with "${NORMAL_C} -I$S/dev/iavf"
+dev/iavf/iavf_common.c optional iavf pci \
+ compile-with "${NORMAL_C} -I$S/dev/iavf"
+dev/iavf/iavf_adminq.c optional iavf pci \
+ compile-with "${NORMAL_C} -I$S/dev/iavf"
+dev/iavf/iavf_vc_common.c optional iavf pci \
+ compile-with "${NORMAL_C} -I$S/dev/iavf"
+dev/iavf/iavf_vc_iflib.c optional iavf pci \
+ compile-with "${NORMAL_C} -I$S/dev/iavf"
dev/ice/if_ice_iflib.c optional ice pci \
compile-with "${NORMAL_C} -I$S/dev/ice"
dev/ice/ice_lib.c optional ice pci \
@@ -172,23 +188,19 @@
compile-with "${NORMAL_C} -I$S/dev/ixl"
dev/ixl/ixl_pf_i2c.c optional ixl pci \
compile-with "${NORMAL_C} -I$S/dev/ixl"
-dev/ixl/if_iavf.c optional iavf pci \
+dev/ixl/ixl_txrx.c optional ixl pci \
compile-with "${NORMAL_C} -I$S/dev/ixl"
-dev/ixl/iavf_vc.c optional iavf pci \
+dev/ixl/i40e_osdep.c optional ixl pci \
compile-with "${NORMAL_C} -I$S/dev/ixl"
-dev/ixl/ixl_txrx.c optional ixl pci | iavf pci \
+dev/ixl/i40e_lan_hmc.c optional ixl pci \
compile-with "${NORMAL_C} -I$S/dev/ixl"
-dev/ixl/i40e_osdep.c optional ixl pci | iavf pci \
+dev/ixl/i40e_hmc.c optional ixl pci \
compile-with "${NORMAL_C} -I$S/dev/ixl"
-dev/ixl/i40e_lan_hmc.c optional ixl pci | iavf pci \
+dev/ixl/i40e_common.c optional ixl pci \
compile-with "${NORMAL_C} -I$S/dev/ixl"
-dev/ixl/i40e_hmc.c optional ixl pci | iavf pci \
+dev/ixl/i40e_nvm.c optional ixl pci \
compile-with "${NORMAL_C} -I$S/dev/ixl"
-dev/ixl/i40e_common.c optional ixl pci | iavf pci \
- compile-with "${NORMAL_C} -I$S/dev/ixl"
-dev/ixl/i40e_nvm.c optional ixl pci | iavf pci \
- compile-with "${NORMAL_C} -I$S/dev/ixl"
-dev/ixl/i40e_adminq.c optional ixl pci | iavf pci \
+dev/ixl/i40e_adminq.c optional ixl pci \
compile-with "${NORMAL_C} -I$S/dev/ixl"
dev/ixl/i40e_dcb.c optional ixl pci \
compile-with "${NORMAL_C} -I$S/dev/ixl"
diff --git a/sys/dev/iavf/iavf_adminq.h b/sys/dev/iavf/iavf_adminq.h
new file mode 100644
--- /dev/null
+++ b/sys/dev/iavf/iavf_adminq.h
@@ -0,0 +1,122 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2021, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+#ifndef _IAVF_ADMINQ_H_
+#define _IAVF_ADMINQ_H_
+
+#include "iavf_osdep.h"
+#include "iavf_status.h"
+#include "iavf_adminq_cmd.h"
+
+#define IAVF_ADMINQ_DESC(R, i) \
+ (&(((struct iavf_aq_desc *)((R).desc_buf.va))[i]))
+
+#define IAVF_ADMINQ_DESC_ALIGNMENT 4096
+
+struct iavf_adminq_ring {
+ struct iavf_virt_mem dma_head; /* space for dma structures */
+ struct iavf_dma_mem desc_buf; /* descriptor ring memory */
+ struct iavf_virt_mem cmd_buf; /* command buffer memory */
+
+ union {
+ struct iavf_dma_mem *asq_bi;
+ struct iavf_dma_mem *arq_bi;
+ } r;
+
+ u16 count; /* Number of descriptors */
+ u16 rx_buf_len; /* Admin Receive Queue buffer length */
+
+ /* used for interrupt processing */
+ u16 next_to_use;
+ u16 next_to_clean;
+
+ /* used for queue tracking */
+ u32 head;
+ u32 tail;
+ u32 len;
+ u32 bah;
+ u32 bal;
+};
+
+/* ASQ transaction details */
+struct iavf_asq_cmd_details {
+ void *callback; /* cast from type IAVF_ADMINQ_CALLBACK */
+ u64 cookie;
+ u16 flags_ena;
+ u16 flags_dis;
+ bool async;
+ bool postpone;
+ struct iavf_aq_desc *wb_desc;
+};
+
+#define IAVF_ADMINQ_DETAILS(R, i) \
+ (&(((struct iavf_asq_cmd_details *)((R).cmd_buf.va))[i]))
+
+/* ARQ event information */
+struct iavf_arq_event_info {
+ struct iavf_aq_desc desc;
+ u16 msg_len;
+ u16 buf_len;
+ u8 *msg_buf;
+};
+
+/* Admin Queue information */
+struct iavf_adminq_info {
+ struct iavf_adminq_ring arq; /* receive queue */
+ struct iavf_adminq_ring asq; /* send queue */
+ u32 asq_cmd_timeout; /* send queue cmd write back timeout*/
+ u16 num_arq_entries; /* receive queue depth */
+ u16 num_asq_entries; /* send queue depth */
+ u16 arq_buf_size; /* receive queue buffer size */
+ u16 asq_buf_size; /* send queue buffer size */
+ u16 fw_maj_ver; /* firmware major version */
+ u16 fw_min_ver; /* firmware minor version */
+ u32 fw_build; /* firmware build number */
+ u16 api_maj_ver; /* api major version */
+ u16 api_min_ver; /* api minor version */
+
+ struct iavf_spinlock asq_spinlock; /* Send queue spinlock */
+ struct iavf_spinlock arq_spinlock; /* Receive queue spinlock */
+
+ /* last status values on send and receive queues */
+ enum iavf_admin_queue_err asq_last_status;
+ enum iavf_admin_queue_err arq_last_status;
+};
+
+/* general information */
+#define IAVF_AQ_LARGE_BUF 512
+#define IAVF_ASQ_CMD_TIMEOUT 250000 /* usecs */
+
+void iavf_fill_default_direct_cmd_desc(struct iavf_aq_desc *desc,
+ u16 opcode);
+
+#endif /* _IAVF_ADMINQ_H_ */
diff --git a/sys/dev/iavf/iavf_adminq.c b/sys/dev/iavf/iavf_adminq.c
new file mode 100644
--- /dev/null
+++ b/sys/dev/iavf/iavf_adminq.c
@@ -0,0 +1,990 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2021, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+#include "iavf_status.h"
+#include "iavf_type.h"
+#include "iavf_register.h"
+#include "iavf_adminq.h"
+#include "iavf_prototype.h"
+
+/**
+ * iavf_adminq_init_regs - Initialize AdminQ registers
+ * @hw: pointer to the hardware structure
+ *
+ * This assumes the alloc_asq and alloc_arq functions have already been called
+ **/
+STATIC void iavf_adminq_init_regs(struct iavf_hw *hw)
+{
+ /* set head and tail registers in our local struct */
+ hw->aq.asq.tail = IAVF_VF_ATQT1;
+ hw->aq.asq.head = IAVF_VF_ATQH1;
+ hw->aq.asq.len = IAVF_VF_ATQLEN1;
+ hw->aq.asq.bal = IAVF_VF_ATQBAL1;
+ hw->aq.asq.bah = IAVF_VF_ATQBAH1;
+ hw->aq.arq.tail = IAVF_VF_ARQT1;
+ hw->aq.arq.head = IAVF_VF_ARQH1;
+ hw->aq.arq.len = IAVF_VF_ARQLEN1;
+ hw->aq.arq.bal = IAVF_VF_ARQBAL1;
+ hw->aq.arq.bah = IAVF_VF_ARQBAH1;
+}
+
+/**
+ * iavf_alloc_adminq_asq_ring - Allocate Admin Queue send rings
+ * @hw: pointer to the hardware structure
+ **/
+enum iavf_status iavf_alloc_adminq_asq_ring(struct iavf_hw *hw)
+{
+ enum iavf_status ret_code;
+
+ ret_code = iavf_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
+ iavf_mem_atq_ring,
+ (hw->aq.num_asq_entries *
+ sizeof(struct iavf_aq_desc)),
+ IAVF_ADMINQ_DESC_ALIGNMENT);
+ if (ret_code)
+ return ret_code;
+
+ ret_code = iavf_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
+ (hw->aq.num_asq_entries *
+ sizeof(struct iavf_asq_cmd_details)));
+ if (ret_code) {
+ iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
+ return ret_code;
+ }
+
+ return ret_code;
+}
+
+/**
+ * iavf_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
+ * @hw: pointer to the hardware structure
+ **/
+enum iavf_status iavf_alloc_adminq_arq_ring(struct iavf_hw *hw)
+{
+ enum iavf_status ret_code;
+
+ ret_code = iavf_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
+ iavf_mem_arq_ring,
+ (hw->aq.num_arq_entries *
+ sizeof(struct iavf_aq_desc)),
+ IAVF_ADMINQ_DESC_ALIGNMENT);
+
+ return ret_code;
+}
+
+/**
+ * iavf_free_adminq_asq - Free Admin Queue send rings
+ * @hw: pointer to the hardware structure
+ *
+ * This assumes the posted send buffers have already been cleaned
+ * and de-allocated
+ **/
+void iavf_free_adminq_asq(struct iavf_hw *hw)
+{
+ iavf_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
+ iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
+}
+
+/**
+ * iavf_free_adminq_arq - Free Admin Queue receive rings
+ * @hw: pointer to the hardware structure
+ *
+ * This assumes the posted receive buffers have already been cleaned
+ * and de-allocated
+ **/
+void iavf_free_adminq_arq(struct iavf_hw *hw)
+{
+ iavf_free_dma_mem(hw, &hw->aq.arq.desc_buf);
+}
+
+/**
+ * iavf_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
+ * @hw: pointer to the hardware structure
+ **/
+STATIC enum iavf_status iavf_alloc_arq_bufs(struct iavf_hw *hw)
+{
+ enum iavf_status ret_code;
+ struct iavf_aq_desc *desc;
+ struct iavf_dma_mem *bi;
+ int i;
+
+ /* We'll be allocating the buffer info memory first, then we can
+ * allocate the mapped buffers for the event processing
+ */
+
+ /* buffer_info structures do not need alignment */
+ ret_code = iavf_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
+ (hw->aq.num_arq_entries * sizeof(struct iavf_dma_mem)));
+ if (ret_code)
+ goto alloc_arq_bufs;
+ hw->aq.arq.r.arq_bi = (struct iavf_dma_mem *)hw->aq.arq.dma_head.va;
+
+ /* allocate the mapped buffers */
+ for (i = 0; i < hw->aq.num_arq_entries; i++) {
+ bi = &hw->aq.arq.r.arq_bi[i];
+ ret_code = iavf_allocate_dma_mem(hw, bi,
+ iavf_mem_arq_buf,
+ hw->aq.arq_buf_size,
+ IAVF_ADMINQ_DESC_ALIGNMENT);
+ if (ret_code)
+ goto unwind_alloc_arq_bufs;
+
+ /* now configure the descriptors for use */
+ desc = IAVF_ADMINQ_DESC(hw->aq.arq, i);
+
+ desc->flags = CPU_TO_LE16(IAVF_AQ_FLAG_BUF);
+ if (hw->aq.arq_buf_size > IAVF_AQ_LARGE_BUF)
+ desc->flags |= CPU_TO_LE16(IAVF_AQ_FLAG_LB);
+ desc->opcode = 0;
+ /* This is in accordance with Admin queue design, there is no
+ * register for buffer size configuration
+ */
+ desc->datalen = CPU_TO_LE16((u16)bi->size);
+ desc->retval = 0;
+ desc->cookie_high = 0;
+ desc->cookie_low = 0;
+ desc->params.external.addr_high =
+ CPU_TO_LE32(IAVF_HI_DWORD(bi->pa));
+ desc->params.external.addr_low =
+ CPU_TO_LE32(IAVF_LO_DWORD(bi->pa));
+ desc->params.external.param0 = 0;
+ desc->params.external.param1 = 0;
+ }
+
+alloc_arq_bufs:
+ return ret_code;
+
+unwind_alloc_arq_bufs:
+ /* don't try to free the one that failed... */
+ i--;
+ for (; i >= 0; i--)
+ iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
+ iavf_free_virt_mem(hw, &hw->aq.arq.dma_head);
+
+ return ret_code;
+}
+
+/**
+ * iavf_alloc_asq_bufs - Allocate empty buffer structs for the send queue
+ * @hw: pointer to the hardware structure
+ **/
+STATIC enum iavf_status iavf_alloc_asq_bufs(struct iavf_hw *hw)
+{
+ enum iavf_status ret_code;
+ struct iavf_dma_mem *bi;
+ int i;
+
+ /* No mapped memory needed yet, just the buffer info structures */
+ ret_code = iavf_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
+ (hw->aq.num_asq_entries * sizeof(struct iavf_dma_mem)));
+ if (ret_code)
+ goto alloc_asq_bufs;
+ hw->aq.asq.r.asq_bi = (struct iavf_dma_mem *)hw->aq.asq.dma_head.va;
+
+ /* allocate the mapped buffers */
+ for (i = 0; i < hw->aq.num_asq_entries; i++) {
+ bi = &hw->aq.asq.r.asq_bi[i];
+ ret_code = iavf_allocate_dma_mem(hw, bi,
+ iavf_mem_asq_buf,
+ hw->aq.asq_buf_size,
+ IAVF_ADMINQ_DESC_ALIGNMENT);
+ if (ret_code)
+ goto unwind_alloc_asq_bufs;
+ }
+alloc_asq_bufs:
+ return ret_code;
+
+unwind_alloc_asq_bufs:
+ /* don't try to free the one that failed... */
+ i--;
+ for (; i >= 0; i--)
+ iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
+ iavf_free_virt_mem(hw, &hw->aq.asq.dma_head);
+
+ return ret_code;
+}
+
+/**
+ * iavf_free_arq_bufs - Free receive queue buffer info elements
+ * @hw: pointer to the hardware structure
+ **/
+STATIC void iavf_free_arq_bufs(struct iavf_hw *hw)
+{
+ int i;
+
+ /* free descriptors */
+ for (i = 0; i < hw->aq.num_arq_entries; i++)
+ iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
+
+ /* free the descriptor memory */
+ iavf_free_dma_mem(hw, &hw->aq.arq.desc_buf);
+
+ /* free the dma header */
+ iavf_free_virt_mem(hw, &hw->aq.arq.dma_head);
+}
+
+/**
+ * iavf_free_asq_bufs - Free send queue buffer info elements
+ * @hw: pointer to the hardware structure
+ **/
+STATIC void iavf_free_asq_bufs(struct iavf_hw *hw)
+{
+ int i;
+
+ /* only unmap if the address is non-NULL */
+ for (i = 0; i < hw->aq.num_asq_entries; i++)
+ if (hw->aq.asq.r.asq_bi[i].pa)
+ iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
+
+ /* free the buffer info list */
+ iavf_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
+
+ /* free the descriptor memory */
+ iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
+
+ /* free the dma header */
+ iavf_free_virt_mem(hw, &hw->aq.asq.dma_head);
+}
+
+/**
+ * iavf_config_asq_regs - configure ASQ registers
+ * @hw: pointer to the hardware structure
+ *
+ * Configure base address and length registers for the transmit queue
+ **/
+STATIC enum iavf_status iavf_config_asq_regs(struct iavf_hw *hw)
+{
+ enum iavf_status ret_code = IAVF_SUCCESS;
+ u32 reg = 0;
+
+ /* Clear Head and Tail */
+ wr32(hw, hw->aq.asq.head, 0);
+ wr32(hw, hw->aq.asq.tail, 0);
+
+ /* set starting point */
+ wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
+ IAVF_VF_ATQLEN1_ATQENABLE_MASK));
+ wr32(hw, hw->aq.asq.bal, IAVF_LO_DWORD(hw->aq.asq.desc_buf.pa));
+ wr32(hw, hw->aq.asq.bah, IAVF_HI_DWORD(hw->aq.asq.desc_buf.pa));
+
+ /* Check one register to verify that config was applied */
+ reg = rd32(hw, hw->aq.asq.bal);
+ if (reg != IAVF_LO_DWORD(hw->aq.asq.desc_buf.pa))
+ ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR;
+
+ return ret_code;
+}
+
+/**
+ * iavf_config_arq_regs - ARQ register configuration
+ * @hw: pointer to the hardware structure
+ *
+ * Configure base address and length registers for the receive (event queue)
+ **/
+STATIC enum iavf_status iavf_config_arq_regs(struct iavf_hw *hw)
+{
+ enum iavf_status ret_code = IAVF_SUCCESS;
+ u32 reg = 0;
+
+ /* Clear Head and Tail */
+ wr32(hw, hw->aq.arq.head, 0);
+ wr32(hw, hw->aq.arq.tail, 0);
+
+ /* set starting point */
+ wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
+ IAVF_VF_ARQLEN1_ARQENABLE_MASK));
+ wr32(hw, hw->aq.arq.bal, IAVF_LO_DWORD(hw->aq.arq.desc_buf.pa));
+ wr32(hw, hw->aq.arq.bah, IAVF_HI_DWORD(hw->aq.arq.desc_buf.pa));
+
+ /* Update tail in the HW to post pre-allocated buffers */
+ wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
+
+ /* Check one register to verify that config was applied */
+ reg = rd32(hw, hw->aq.arq.bal);
+ if (reg != IAVF_LO_DWORD(hw->aq.arq.desc_buf.pa))
+ ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR;
+
+ return ret_code;
+}
+
+/**
+ * iavf_init_asq - main initialization routine for ASQ
+ * @hw: pointer to the hardware structure
+ *
+ * This is the main initialization routine for the Admin Send Queue
+ * Prior to calling this function, drivers *MUST* set the following fields
+ * in the hw->aq structure:
+ * - hw->aq.num_asq_entries
+ * - hw->aq.arq_buf_size
+ *
+ * Do *NOT* hold the lock when calling this as the memory allocation routines
+ * called are not going to be atomic context safe
+ **/
+enum iavf_status iavf_init_asq(struct iavf_hw *hw)
+{
+ enum iavf_status ret_code = IAVF_SUCCESS;
+
+ if (hw->aq.asq.count > 0) {
+ /* queue already initialized */
+ ret_code = IAVF_ERR_NOT_READY;
+ goto init_adminq_exit;
+ }
+
+ /* verify input for valid configuration */
+ if ((hw->aq.num_asq_entries == 0) ||
+ (hw->aq.asq_buf_size == 0)) {
+ ret_code = IAVF_ERR_CONFIG;
+ goto init_adminq_exit;
+ }
+
+ hw->aq.asq.next_to_use = 0;
+ hw->aq.asq.next_to_clean = 0;
+
+ /* allocate the ring memory */
+ ret_code = iavf_alloc_adminq_asq_ring(hw);
+ if (ret_code != IAVF_SUCCESS)
+ goto init_adminq_exit;
+
+ /* allocate buffers in the rings */
+ ret_code = iavf_alloc_asq_bufs(hw);
+ if (ret_code != IAVF_SUCCESS)
+ goto init_adminq_free_rings;
+
+ /* initialize base registers */
+ ret_code = iavf_config_asq_regs(hw);
+ if (ret_code != IAVF_SUCCESS)
+ goto init_config_regs;
+
+ /* success! */
+ hw->aq.asq.count = hw->aq.num_asq_entries;
+ goto init_adminq_exit;
+
+init_adminq_free_rings:
+ iavf_free_adminq_asq(hw);
+ return ret_code;
+
+init_config_regs:
+ iavf_free_asq_bufs(hw);
+
+init_adminq_exit:
+ return ret_code;
+}
+
+/**
+ * iavf_init_arq - initialize ARQ
+ * @hw: pointer to the hardware structure
+ *
+ * The main initialization routine for the Admin Receive (Event) Queue.
+ * Prior to calling this function, drivers *MUST* set the following fields
+ * in the hw->aq structure:
+ * - hw->aq.num_asq_entries
+ * - hw->aq.arq_buf_size
+ *
+ * Do *NOT* hold the lock when calling this as the memory allocation routines
+ * called are not going to be atomic context safe
+ **/
+enum iavf_status iavf_init_arq(struct iavf_hw *hw)
+{
+ enum iavf_status ret_code = IAVF_SUCCESS;
+
+ if (hw->aq.arq.count > 0) {
+ /* queue already initialized */
+ ret_code = IAVF_ERR_NOT_READY;
+ goto init_adminq_exit;
+ }
+
+ /* verify input for valid configuration */
+ if ((hw->aq.num_arq_entries == 0) ||
+ (hw->aq.arq_buf_size == 0)) {
+ ret_code = IAVF_ERR_CONFIG;
+ goto init_adminq_exit;
+ }
+
+ hw->aq.arq.next_to_use = 0;
+ hw->aq.arq.next_to_clean = 0;
+
+ /* allocate the ring memory */
+ ret_code = iavf_alloc_adminq_arq_ring(hw);
+ if (ret_code != IAVF_SUCCESS)
+ goto init_adminq_exit;
+
+ /* allocate buffers in the rings */
+ ret_code = iavf_alloc_arq_bufs(hw);
+ if (ret_code != IAVF_SUCCESS)
+ goto init_adminq_free_rings;
+
+ /* initialize base registers */
+ ret_code = iavf_config_arq_regs(hw);
+ if (ret_code != IAVF_SUCCESS)
+ goto init_adminq_free_rings;
+
+ /* success! */
+ hw->aq.arq.count = hw->aq.num_arq_entries;
+ goto init_adminq_exit;
+
+init_adminq_free_rings:
+ iavf_free_adminq_arq(hw);
+
+init_adminq_exit:
+ return ret_code;
+}
+
+/**
+ * iavf_shutdown_asq - shutdown the ASQ
+ * @hw: pointer to the hardware structure
+ *
+ * The main shutdown routine for the Admin Send Queue
+ **/
+enum iavf_status iavf_shutdown_asq(struct iavf_hw *hw)
+{
+ enum iavf_status ret_code = IAVF_SUCCESS;
+
+ iavf_acquire_spinlock(&hw->aq.asq_spinlock);
+
+ if (hw->aq.asq.count == 0) {
+ ret_code = IAVF_ERR_NOT_READY;
+ goto shutdown_asq_out;
+ }
+
+ /* Stop firmware AdminQ processing */
+ wr32(hw, hw->aq.asq.head, 0);
+ wr32(hw, hw->aq.asq.tail, 0);
+ wr32(hw, hw->aq.asq.len, 0);
+ wr32(hw, hw->aq.asq.bal, 0);
+ wr32(hw, hw->aq.asq.bah, 0);
+
+ hw->aq.asq.count = 0; /* to indicate uninitialized queue */
+
+ /* free ring buffers */
+ iavf_free_asq_bufs(hw);
+
+shutdown_asq_out:
+ iavf_release_spinlock(&hw->aq.asq_spinlock);
+ return ret_code;
+}
+
+/**
+ * iavf_shutdown_arq - shutdown ARQ
+ * @hw: pointer to the hardware structure
+ *
+ * The main shutdown routine for the Admin Receive Queue
+ **/
+enum iavf_status iavf_shutdown_arq(struct iavf_hw *hw)
+{
+ enum iavf_status ret_code = IAVF_SUCCESS;
+
+ iavf_acquire_spinlock(&hw->aq.arq_spinlock);
+
+ if (hw->aq.arq.count == 0) {
+ ret_code = IAVF_ERR_NOT_READY;
+ goto shutdown_arq_out;
+ }
+
+ /* Stop firmware AdminQ processing */
+ wr32(hw, hw->aq.arq.head, 0);
+ wr32(hw, hw->aq.arq.tail, 0);
+ wr32(hw, hw->aq.arq.len, 0);
+ wr32(hw, hw->aq.arq.bal, 0);
+ wr32(hw, hw->aq.arq.bah, 0);
+
+ hw->aq.arq.count = 0; /* to indicate uninitialized queue */
+
+ /* free ring buffers */
+ iavf_free_arq_bufs(hw);
+
+shutdown_arq_out:
+ iavf_release_spinlock(&hw->aq.arq_spinlock);
+ return ret_code;
+}
+
+/**
+ * iavf_init_adminq - main initialization routine for Admin Queue
+ * @hw: pointer to the hardware structure
+ *
+ * Prior to calling this function, drivers *MUST* set the following fields
+ * in the hw->aq structure:
+ * - hw->aq.num_asq_entries
+ * - hw->aq.num_arq_entries
+ * - hw->aq.arq_buf_size
+ * - hw->aq.asq_buf_size
+ **/
+enum iavf_status iavf_init_adminq(struct iavf_hw *hw)
+{
+ enum iavf_status ret_code;
+
+ /* verify input for valid configuration */
+ if ((hw->aq.num_arq_entries == 0) ||
+ (hw->aq.num_asq_entries == 0) ||
+ (hw->aq.arq_buf_size == 0) ||
+ (hw->aq.asq_buf_size == 0)) {
+ ret_code = IAVF_ERR_CONFIG;
+ goto init_adminq_exit;
+ }
+ iavf_init_spinlock(&hw->aq.asq_spinlock);
+ iavf_init_spinlock(&hw->aq.arq_spinlock);
+
+ /* Set up register offsets */
+ iavf_adminq_init_regs(hw);
+
+ /* setup ASQ command write back timeout */
+ hw->aq.asq_cmd_timeout = IAVF_ASQ_CMD_TIMEOUT;
+
+ /* allocate the ASQ */
+ ret_code = iavf_init_asq(hw);
+ if (ret_code != IAVF_SUCCESS)
+ goto init_adminq_destroy_spinlocks;
+
+ /* allocate the ARQ */
+ ret_code = iavf_init_arq(hw);
+ if (ret_code != IAVF_SUCCESS)
+ goto init_adminq_free_asq;
+
+ /* success! */
+ goto init_adminq_exit;
+
+init_adminq_free_asq:
+ iavf_shutdown_asq(hw);
+init_adminq_destroy_spinlocks:
+ iavf_destroy_spinlock(&hw->aq.asq_spinlock);
+ iavf_destroy_spinlock(&hw->aq.arq_spinlock);
+
+init_adminq_exit:
+ return ret_code;
+}
+
+/**
+ * iavf_shutdown_adminq - shutdown routine for the Admin Queue
+ * @hw: pointer to the hardware structure
+ **/
+enum iavf_status iavf_shutdown_adminq(struct iavf_hw *hw)
+{
+ enum iavf_status ret_code = IAVF_SUCCESS;
+
+ if (iavf_check_asq_alive(hw))
+ iavf_aq_queue_shutdown(hw, true);
+
+ iavf_shutdown_asq(hw);
+ iavf_shutdown_arq(hw);
+ iavf_destroy_spinlock(&hw->aq.asq_spinlock);
+ iavf_destroy_spinlock(&hw->aq.arq_spinlock);
+
+ return ret_code;
+}
+
+/**
+ * iavf_clean_asq - cleans Admin send queue
+ * @hw: pointer to the hardware structure
+ *
+ * returns the number of free desc
+ **/
+u16 iavf_clean_asq(struct iavf_hw *hw)
+{
+ struct iavf_adminq_ring *asq = &(hw->aq.asq);
+ struct iavf_asq_cmd_details *details;
+ u16 ntc = asq->next_to_clean;
+ struct iavf_aq_desc desc_cb;
+ struct iavf_aq_desc *desc;
+
+ desc = IAVF_ADMINQ_DESC(*asq, ntc);
+ details = IAVF_ADMINQ_DETAILS(*asq, ntc);
+ while (rd32(hw, hw->aq.asq.head) != ntc) {
+ iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
+ "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
+
+ if (details->callback) {
+ IAVF_ADMINQ_CALLBACK cb_func =
+ (IAVF_ADMINQ_CALLBACK)details->callback;
+ iavf_memcpy(&desc_cb, desc, sizeof(struct iavf_aq_desc),
+ IAVF_DMA_TO_DMA);
+ cb_func(hw, &desc_cb);
+ }
+ iavf_memset(desc, 0, sizeof(*desc), IAVF_DMA_MEM);
+ iavf_memset(details, 0, sizeof(*details), IAVF_NONDMA_MEM);
+ ntc++;
+ if (ntc == asq->count)
+ ntc = 0;
+ desc = IAVF_ADMINQ_DESC(*asq, ntc);
+ details = IAVF_ADMINQ_DETAILS(*asq, ntc);
+ }
+
+ asq->next_to_clean = ntc;
+
+ return IAVF_DESC_UNUSED(asq);
+}
+
+/**
+ * iavf_asq_done - check if FW has processed the Admin Send Queue
+ * @hw: pointer to the hw struct
+ *
+ * Returns true if the firmware has processed all descriptors on the
+ * admin send queue. Returns false if there are still requests pending.
+ **/
+bool iavf_asq_done(struct iavf_hw *hw)
+{
+ /* AQ designers suggest use of head for better
+ * timing reliability than DD bit
+ */
+ return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
+
+}
+
+/**
+ * iavf_asq_send_command - send command to Admin Queue
+ * @hw: pointer to the hw struct
+ * @desc: prefilled descriptor describing the command (non DMA mem)
+ * @buff: buffer to use for indirect commands
+ * @buff_size: size of buffer for indirect commands
+ * @cmd_details: pointer to command details structure
+ *
+ * This is the main send command driver routine for the Admin Queue send
+ * queue. It runs the queue, cleans the queue, etc
+ **/
+enum iavf_status iavf_asq_send_command(struct iavf_hw *hw,
+ struct iavf_aq_desc *desc,
+ void *buff, /* can be NULL */
+ u16 buff_size,
+ struct iavf_asq_cmd_details *cmd_details)
+{
+ enum iavf_status status = IAVF_SUCCESS;
+ struct iavf_dma_mem *dma_buff = NULL;
+ struct iavf_asq_cmd_details *details;
+ struct iavf_aq_desc *desc_on_ring;
+ bool cmd_completed = false;
+ u16 retval = 0;
+ u32 val = 0;
+
+ iavf_acquire_spinlock(&hw->aq.asq_spinlock);
+
+ hw->aq.asq_last_status = IAVF_AQ_RC_OK;
+
+ if (hw->aq.asq.count == 0) {
+ iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
+ "AQTX: Admin queue not initialized.\n");
+ status = IAVF_ERR_QUEUE_EMPTY;
+ goto asq_send_command_error;
+ }
+
+ val = rd32(hw, hw->aq.asq.head);
+ if (val >= hw->aq.num_asq_entries) {
+ iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
+ "AQTX: head overrun at %d\n", val);
+ status = IAVF_ERR_QUEUE_EMPTY;
+ goto asq_send_command_error;
+ }
+
+ details = IAVF_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
+ if (cmd_details) {
+ iavf_memcpy(details,
+ cmd_details,
+ sizeof(struct iavf_asq_cmd_details),
+ IAVF_NONDMA_TO_NONDMA);
+
+ /* If the cmd_details are defined copy the cookie. The
+ * CPU_TO_LE32 is not needed here because the data is ignored
+ * by the FW, only used by the driver
+ */
+ if (details->cookie) {
+ desc->cookie_high =
+ CPU_TO_LE32(IAVF_HI_DWORD(details->cookie));
+ desc->cookie_low =
+ CPU_TO_LE32(IAVF_LO_DWORD(details->cookie));
+ }
+ } else {
+ iavf_memset(details, 0,
+ sizeof(struct iavf_asq_cmd_details),
+ IAVF_NONDMA_MEM);
+ }
+
+ /* clear requested flags and then set additional flags if defined */
+ desc->flags &= ~CPU_TO_LE16(details->flags_dis);
+ desc->flags |= CPU_TO_LE16(details->flags_ena);
+
+ if (buff_size > hw->aq.asq_buf_size) {
+ iavf_debug(hw,
+ IAVF_DEBUG_AQ_MESSAGE,
+ "AQTX: Invalid buffer size: %d.\n",
+ buff_size);
+ status = IAVF_ERR_INVALID_SIZE;
+ goto asq_send_command_error;
+ }
+
+ if (details->postpone && !details->async) {
+ iavf_debug(hw,
+ IAVF_DEBUG_AQ_MESSAGE,
+ "AQTX: Async flag not set along with postpone flag");
+ status = IAVF_ERR_PARAM;
+ goto asq_send_command_error;
+ }
+
+ /* call clean and check queue available function to reclaim the
+ * descriptors that were processed by FW, the function returns the
+ * number of desc available
+ */
+ /* the clean function called here could be called in a separate thread
+ * in case of asynchronous completions
+ */
+ if (iavf_clean_asq(hw) == 0) {
+ iavf_debug(hw,
+ IAVF_DEBUG_AQ_MESSAGE,
+ "AQTX: Error queue is full.\n");
+ status = IAVF_ERR_ADMIN_QUEUE_FULL;
+ goto asq_send_command_error;
+ }
+
+ /* initialize the temp desc pointer with the right desc */
+ desc_on_ring = IAVF_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
+
+ /* if the desc is available copy the temp desc to the right place */
+ iavf_memcpy(desc_on_ring, desc, sizeof(struct iavf_aq_desc),
+ IAVF_NONDMA_TO_DMA);
+
+ /* if buff is not NULL assume indirect command */
+ if (buff != NULL) {
+ dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
+ /* copy the user buff into the respective DMA buff */
+ iavf_memcpy(dma_buff->va, buff, buff_size,
+ IAVF_NONDMA_TO_DMA);
+ desc_on_ring->datalen = CPU_TO_LE16(buff_size);
+
+ /* Update the address values in the desc with the pa value
+ * for respective buffer
+ */
+ desc_on_ring->params.external.addr_high =
+ CPU_TO_LE32(IAVF_HI_DWORD(dma_buff->pa));
+ desc_on_ring->params.external.addr_low =
+ CPU_TO_LE32(IAVF_LO_DWORD(dma_buff->pa));
+ }
+
+ /* bump the tail */
+ iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
+ iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
+ buff, buff_size);
+ (hw->aq.asq.next_to_use)++;
+ if (hw->aq.asq.next_to_use == hw->aq.asq.count)
+ hw->aq.asq.next_to_use = 0;
+ if (!details->postpone)
+ wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
+
+ /* if cmd_details are not defined or async flag is not set,
+ * we need to wait for desc write back
+ */
+ if (!details->async && !details->postpone) {
+ u32 total_delay = 0;
+
+ do {
+ /* AQ designers suggest use of head for better
+ * timing reliability than DD bit
+ */
+ if (iavf_asq_done(hw))
+ break;
+ iavf_usec_delay(50);
+ total_delay += 50;
+ } while (total_delay < hw->aq.asq_cmd_timeout);
+ }
+
+ /* if ready, copy the desc back to temp */
+ if (iavf_asq_done(hw)) {
+ iavf_memcpy(desc, desc_on_ring, sizeof(struct iavf_aq_desc),
+ IAVF_DMA_TO_NONDMA);
+ if (buff != NULL)
+ iavf_memcpy(buff, dma_buff->va, buff_size,
+ IAVF_DMA_TO_NONDMA);
+ retval = LE16_TO_CPU(desc->retval);
+ if (retval != 0) {
+ iavf_debug(hw,
+ IAVF_DEBUG_AQ_MESSAGE,
+ "AQTX: Command completed with error 0x%X.\n",
+ retval);
+
+ /* strip off FW internal code */
+ retval &= 0xff;
+ }
+ cmd_completed = true;
+ if ((enum iavf_admin_queue_err)retval == IAVF_AQ_RC_OK)
+ status = IAVF_SUCCESS;
+ else if ((enum iavf_admin_queue_err)retval == IAVF_AQ_RC_EBUSY)
+ status = IAVF_ERR_NOT_READY;
+ else
+ status = IAVF_ERR_ADMIN_QUEUE_ERROR;
+ hw->aq.asq_last_status = (enum iavf_admin_queue_err)retval;
+ }
+
+ iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
+ "AQTX: desc and buffer writeback:\n");
+ iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
+
+ /* save writeback aq if requested */
+ if (details->wb_desc)
+ iavf_memcpy(details->wb_desc, desc_on_ring,
+ sizeof(struct iavf_aq_desc), IAVF_DMA_TO_NONDMA);
+
+ /* update the error if time out occurred */
+ if ((!cmd_completed) &&
+ (!details->async && !details->postpone)) {
+ if (rd32(hw, hw->aq.asq.len) & IAVF_VF_ATQLEN1_ATQCRIT_MASK) {
+ iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
+ "AQTX: AQ Critical error.\n");
+ status = IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR;
+ } else {
+ iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
+ "AQTX: Writeback timeout.\n");
+ status = IAVF_ERR_ADMIN_QUEUE_TIMEOUT;
+ }
+ }
+
+asq_send_command_error:
+ iavf_release_spinlock(&hw->aq.asq_spinlock);
+ return status;
+}
+
+/**
+ * iavf_fill_default_direct_cmd_desc - AQ descriptor helper function
+ * @desc: pointer to the temp descriptor (non DMA mem)
+ * @opcode: the opcode can be used to decide which flags to turn off or on
+ *
+ * Fill the desc with default values
+ **/
+void iavf_fill_default_direct_cmd_desc(struct iavf_aq_desc *desc,
+ u16 opcode)
+{
+ /* zero out the desc */
+ iavf_memset((void *)desc, 0, sizeof(struct iavf_aq_desc),
+ IAVF_NONDMA_MEM);
+ desc->opcode = CPU_TO_LE16(opcode);
+ desc->flags = CPU_TO_LE16(IAVF_AQ_FLAG_SI);
+}
+
+/**
+ * iavf_clean_arq_element
+ * @hw: pointer to the hw struct
+ * @e: event info from the receive descriptor, includes any buffers
+ * @pending: number of events that could be left to process
+ *
+ * This function cleans one Admin Receive Queue element and returns
+ * the contents through e. It can also return how many events are
+ * left to process through 'pending'
+ **/
+enum iavf_status iavf_clean_arq_element(struct iavf_hw *hw,
+ struct iavf_arq_event_info *e,
+ u16 *pending)
+{
+ enum iavf_status ret_code = IAVF_SUCCESS;
+ u16 ntc = hw->aq.arq.next_to_clean;
+ struct iavf_aq_desc *desc;
+ struct iavf_dma_mem *bi;
+ u16 desc_idx;
+ u16 datalen;
+ u16 flags;
+ u16 ntu;
+
+ /* pre-clean the event info */
+ iavf_memset(&e->desc, 0, sizeof(e->desc), IAVF_NONDMA_MEM);
+
+ /* take the lock before we start messing with the ring */
+ iavf_acquire_spinlock(&hw->aq.arq_spinlock);
+
+ if (hw->aq.arq.count == 0) {
+ iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
+ "AQRX: Admin queue not initialized.\n");
+ ret_code = IAVF_ERR_QUEUE_EMPTY;
+ goto clean_arq_element_err;
+ }
+
+ /* set next_to_use to head */
+ ntu = rd32(hw, hw->aq.arq.head) & IAVF_VF_ARQH1_ARQH_MASK;
+ if (ntu == ntc) {
+ /* nothing to do - shouldn't need to update ring's values */
+ ret_code = IAVF_ERR_ADMIN_QUEUE_NO_WORK;
+ goto clean_arq_element_out;
+ }
+
+ /* now clean the next descriptor */
+ desc = IAVF_ADMINQ_DESC(hw->aq.arq, ntc);
+ desc_idx = ntc;
+
+ hw->aq.arq_last_status =
+ (enum iavf_admin_queue_err)LE16_TO_CPU(desc->retval);
+ flags = LE16_TO_CPU(desc->flags);
+ if (flags & IAVF_AQ_FLAG_ERR) {
+ ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR;
+ iavf_debug(hw,
+ IAVF_DEBUG_AQ_MESSAGE,
+ "AQRX: Event received with error 0x%X.\n",
+ hw->aq.arq_last_status);
+ }
+
+ iavf_memcpy(&e->desc, desc, sizeof(struct iavf_aq_desc),
+ IAVF_DMA_TO_NONDMA);
+ datalen = LE16_TO_CPU(desc->datalen);
+ e->msg_len = min(datalen, e->buf_len);
+ if (e->msg_buf != NULL && (e->msg_len != 0))
+ iavf_memcpy(e->msg_buf,
+ hw->aq.arq.r.arq_bi[desc_idx].va,
+ e->msg_len, IAVF_DMA_TO_NONDMA);
+
+ iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
+ iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
+ hw->aq.arq_buf_size);
+
+ /* Restore the original datalen and buffer address in the desc,
+ * FW updates datalen to indicate the event message
+ * size
+ */
+ bi = &hw->aq.arq.r.arq_bi[ntc];
+ iavf_memset((void *)desc, 0, sizeof(struct iavf_aq_desc), IAVF_DMA_MEM);
+
+ desc->flags = CPU_TO_LE16(IAVF_AQ_FLAG_BUF);
+ if (hw->aq.arq_buf_size > IAVF_AQ_LARGE_BUF)
+ desc->flags |= CPU_TO_LE16(IAVF_AQ_FLAG_LB);
+ desc->datalen = CPU_TO_LE16((u16)bi->size);
+ desc->params.external.addr_high = CPU_TO_LE32(IAVF_HI_DWORD(bi->pa));
+ desc->params.external.addr_low = CPU_TO_LE32(IAVF_LO_DWORD(bi->pa));
+
+ /* set tail = the last cleaned desc index. */
+ wr32(hw, hw->aq.arq.tail, ntc);
+ /* ntc is updated to tail + 1 */
+ ntc++;
+ if (ntc == hw->aq.num_arq_entries)
+ ntc = 0;
+ hw->aq.arq.next_to_clean = ntc;
+ hw->aq.arq.next_to_use = ntu;
+
+clean_arq_element_out:
+ /* Set pending if needed, unlock and return */
+ if (pending != NULL)
+ *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
+clean_arq_element_err:
+ iavf_release_spinlock(&hw->aq.arq_spinlock);
+
+ return ret_code;
+}
+
diff --git a/sys/dev/iavf/iavf_adminq_cmd.h b/sys/dev/iavf/iavf_adminq_cmd.h
new file mode 100644
--- /dev/null
+++ b/sys/dev/iavf/iavf_adminq_cmd.h
@@ -0,0 +1,678 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2021, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+#ifndef _IAVF_ADMINQ_CMD_H_
+#define _IAVF_ADMINQ_CMD_H_
+
+/* This header file defines the iavf Admin Queue commands and is shared between
+ * iavf Firmware and Software. Do not change the names in this file to IAVF
+ * because this file should be diff-able against the iavf version, even
+ * though many parts have been removed in this VF version.
+ *
+ * This file needs to comply with the Linux Kernel coding style.
+ */
+
+#define IAVF_FW_API_VERSION_MAJOR 0x0001
+#define IAVF_FW_API_VERSION_MINOR_X722 0x0006
+#define IAVF_FW_API_VERSION_MINOR_X710 0x0007
+
+#define IAVF_FW_MINOR_VERSION(_h) ((_h)->mac.type == IAVF_MAC_XL710 ? \
+ IAVF_FW_API_VERSION_MINOR_X710 : \
+ IAVF_FW_API_VERSION_MINOR_X722)
+
+/* API version 1.7 implements additional link and PHY-specific APIs */
+#define IAVF_MINOR_VER_GET_LINK_INFO_XL710 0x0007
+/* API version 1.6 for X722 devices adds ability to stop FW LLDP agent */
+#define IAVF_MINOR_VER_FW_LLDP_STOPPABLE_X722 0x0006
+
+struct iavf_aq_desc {
+ __le16 flags;
+ __le16 opcode;
+ __le16 datalen;
+ __le16 retval;
+ __le32 cookie_high;
+ __le32 cookie_low;
+ union {
+ struct {
+ __le32 param0;
+ __le32 param1;
+ __le32 param2;
+ __le32 param3;
+ } internal;
+ struct {
+ __le32 param0;
+ __le32 param1;
+ __le32 addr_high;
+ __le32 addr_low;
+ } external;
+ u8 raw[16];
+ } params;
+};
+
+/* Flags sub-structure
+ * |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 |
+ * |DD |CMP|ERR|VFE| * * RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE |
+ */
+
+/* command flags and offsets*/
+#define IAVF_AQ_FLAG_DD_SHIFT 0
+#define IAVF_AQ_FLAG_CMP_SHIFT 1
+#define IAVF_AQ_FLAG_ERR_SHIFT 2
+#define IAVF_AQ_FLAG_VFE_SHIFT 3
+#define IAVF_AQ_FLAG_LB_SHIFT 9
+#define IAVF_AQ_FLAG_RD_SHIFT 10
+#define IAVF_AQ_FLAG_VFC_SHIFT 11
+#define IAVF_AQ_FLAG_BUF_SHIFT 12
+#define IAVF_AQ_FLAG_SI_SHIFT 13
+#define IAVF_AQ_FLAG_EI_SHIFT 14
+#define IAVF_AQ_FLAG_FE_SHIFT 15
+
+#define IAVF_AQ_FLAG_DD (1 << IAVF_AQ_FLAG_DD_SHIFT) /* 0x1 */
+#define IAVF_AQ_FLAG_CMP (1 << IAVF_AQ_FLAG_CMP_SHIFT) /* 0x2 */
+#define IAVF_AQ_FLAG_ERR (1 << IAVF_AQ_FLAG_ERR_SHIFT) /* 0x4 */
+#define IAVF_AQ_FLAG_VFE (1 << IAVF_AQ_FLAG_VFE_SHIFT) /* 0x8 */
+#define IAVF_AQ_FLAG_LB (1 << IAVF_AQ_FLAG_LB_SHIFT) /* 0x200 */
+#define IAVF_AQ_FLAG_RD (1 << IAVF_AQ_FLAG_RD_SHIFT) /* 0x400 */
+#define IAVF_AQ_FLAG_VFC (1 << IAVF_AQ_FLAG_VFC_SHIFT) /* 0x800 */
+#define IAVF_AQ_FLAG_BUF (1 << IAVF_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
+#define IAVF_AQ_FLAG_SI (1 << IAVF_AQ_FLAG_SI_SHIFT) /* 0x2000 */
+#define IAVF_AQ_FLAG_EI (1 << IAVF_AQ_FLAG_EI_SHIFT) /* 0x4000 */
+#define IAVF_AQ_FLAG_FE (1 << IAVF_AQ_FLAG_FE_SHIFT) /* 0x8000 */
+
+/* error codes */
+enum iavf_admin_queue_err {
+ IAVF_AQ_RC_OK = 0, /* success */
+ IAVF_AQ_RC_EPERM = 1, /* Operation not permitted */
+ IAVF_AQ_RC_ENOENT = 2, /* No such element */
+ IAVF_AQ_RC_ESRCH = 3, /* Bad opcode */
+ IAVF_AQ_RC_EINTR = 4, /* operation interrupted */
+ IAVF_AQ_RC_EIO = 5, /* I/O error */
+ IAVF_AQ_RC_ENXIO = 6, /* No such resource */
+ IAVF_AQ_RC_E2BIG = 7, /* Arg too long */
+ IAVF_AQ_RC_EAGAIN = 8, /* Try again */
+ IAVF_AQ_RC_ENOMEM = 9, /* Out of memory */
+ IAVF_AQ_RC_EACCES = 10, /* Permission denied */
+ IAVF_AQ_RC_EFAULT = 11, /* Bad address */
+ IAVF_AQ_RC_EBUSY = 12, /* Device or resource busy */
+ IAVF_AQ_RC_EEXIST = 13, /* object already exists */
+ IAVF_AQ_RC_EINVAL = 14, /* Invalid argument */
+ IAVF_AQ_RC_ENOTTY = 15, /* Not a typewriter */
+ IAVF_AQ_RC_ENOSPC = 16, /* No space left or alloc failure */
+ IAVF_AQ_RC_ENOSYS = 17, /* Function not implemented */
+ IAVF_AQ_RC_ERANGE = 18, /* Parameter out of range */
+ IAVF_AQ_RC_EFLUSHED = 19, /* Cmd flushed due to prev cmd error */
+ IAVF_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */
+ IAVF_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */
+ IAVF_AQ_RC_EFBIG = 22, /* File too large */
+};
+
+/* Admin Queue command opcodes */
+enum iavf_admin_queue_opc {
+ /* aq commands */
+ iavf_aqc_opc_get_version = 0x0001,
+ iavf_aqc_opc_driver_version = 0x0002,
+ iavf_aqc_opc_queue_shutdown = 0x0003,
+ iavf_aqc_opc_set_pf_context = 0x0004,
+
+ /* resource ownership */
+ iavf_aqc_opc_request_resource = 0x0008,
+ iavf_aqc_opc_release_resource = 0x0009,
+
+ iavf_aqc_opc_list_func_capabilities = 0x000A,
+ iavf_aqc_opc_list_dev_capabilities = 0x000B,
+
+ /* Proxy commands */
+ iavf_aqc_opc_set_proxy_config = 0x0104,
+ iavf_aqc_opc_set_ns_proxy_table_entry = 0x0105,
+
+ /* LAA */
+ iavf_aqc_opc_mac_address_read = 0x0107,
+ iavf_aqc_opc_mac_address_write = 0x0108,
+
+ /* PXE */
+ iavf_aqc_opc_clear_pxe_mode = 0x0110,
+
+ /* WoL commands */
+ iavf_aqc_opc_set_wol_filter = 0x0120,
+ iavf_aqc_opc_get_wake_reason = 0x0121,
+ iavf_aqc_opc_clear_all_wol_filters = 0x025E,
+
+ /* internal switch commands */
+ iavf_aqc_opc_get_switch_config = 0x0200,
+ iavf_aqc_opc_add_statistics = 0x0201,
+ iavf_aqc_opc_remove_statistics = 0x0202,
+ iavf_aqc_opc_set_port_parameters = 0x0203,
+ iavf_aqc_opc_get_switch_resource_alloc = 0x0204,
+ iavf_aqc_opc_set_switch_config = 0x0205,
+ iavf_aqc_opc_rx_ctl_reg_read = 0x0206,
+ iavf_aqc_opc_rx_ctl_reg_write = 0x0207,
+
+ iavf_aqc_opc_add_vsi = 0x0210,
+ iavf_aqc_opc_update_vsi_parameters = 0x0211,
+ iavf_aqc_opc_get_vsi_parameters = 0x0212,
+
+ iavf_aqc_opc_add_pv = 0x0220,
+ iavf_aqc_opc_update_pv_parameters = 0x0221,
+ iavf_aqc_opc_get_pv_parameters = 0x0222,
+
+ iavf_aqc_opc_add_veb = 0x0230,
+ iavf_aqc_opc_update_veb_parameters = 0x0231,
+ iavf_aqc_opc_get_veb_parameters = 0x0232,
+
+ iavf_aqc_opc_delete_element = 0x0243,
+
+ iavf_aqc_opc_add_macvlan = 0x0250,
+ iavf_aqc_opc_remove_macvlan = 0x0251,
+ iavf_aqc_opc_add_vlan = 0x0252,
+ iavf_aqc_opc_remove_vlan = 0x0253,
+ iavf_aqc_opc_set_vsi_promiscuous_modes = 0x0254,
+ iavf_aqc_opc_add_tag = 0x0255,
+ iavf_aqc_opc_remove_tag = 0x0256,
+ iavf_aqc_opc_add_multicast_etag = 0x0257,
+ iavf_aqc_opc_remove_multicast_etag = 0x0258,
+ iavf_aqc_opc_update_tag = 0x0259,
+ iavf_aqc_opc_add_control_packet_filter = 0x025A,
+ iavf_aqc_opc_remove_control_packet_filter = 0x025B,
+ iavf_aqc_opc_add_cloud_filters = 0x025C,
+ iavf_aqc_opc_remove_cloud_filters = 0x025D,
+ iavf_aqc_opc_clear_wol_switch_filters = 0x025E,
+ iavf_aqc_opc_replace_cloud_filters = 0x025F,
+
+ iavf_aqc_opc_add_mirror_rule = 0x0260,
+ iavf_aqc_opc_delete_mirror_rule = 0x0261,
+
+ /* Dynamic Device Personalization */
+ iavf_aqc_opc_write_personalization_profile = 0x0270,
+ iavf_aqc_opc_get_personalization_profile_list = 0x0271,
+
+ /* DCB commands */
+ iavf_aqc_opc_dcb_ignore_pfc = 0x0301,
+ iavf_aqc_opc_dcb_updated = 0x0302,
+ iavf_aqc_opc_set_dcb_parameters = 0x0303,
+
+ /* TX scheduler */
+ iavf_aqc_opc_configure_vsi_bw_limit = 0x0400,
+ iavf_aqc_opc_configure_vsi_ets_sla_bw_limit = 0x0406,
+ iavf_aqc_opc_configure_vsi_tc_bw = 0x0407,
+ iavf_aqc_opc_query_vsi_bw_config = 0x0408,
+ iavf_aqc_opc_query_vsi_ets_sla_config = 0x040A,
+ iavf_aqc_opc_configure_switching_comp_bw_limit = 0x0410,
+
+ iavf_aqc_opc_enable_switching_comp_ets = 0x0413,
+ iavf_aqc_opc_modify_switching_comp_ets = 0x0414,
+ iavf_aqc_opc_disable_switching_comp_ets = 0x0415,
+ iavf_aqc_opc_configure_switching_comp_ets_bw_limit = 0x0416,
+ iavf_aqc_opc_configure_switching_comp_bw_config = 0x0417,
+ iavf_aqc_opc_query_switching_comp_ets_config = 0x0418,
+ iavf_aqc_opc_query_port_ets_config = 0x0419,
+ iavf_aqc_opc_query_switching_comp_bw_config = 0x041A,
+ iavf_aqc_opc_suspend_port_tx = 0x041B,
+ iavf_aqc_opc_resume_port_tx = 0x041C,
+ iavf_aqc_opc_configure_partition_bw = 0x041D,
+ /* hmc */
+ iavf_aqc_opc_query_hmc_resource_profile = 0x0500,
+ iavf_aqc_opc_set_hmc_resource_profile = 0x0501,
+
+ /* phy commands*/
+
+ /* phy commands*/
+ iavf_aqc_opc_get_phy_abilities = 0x0600,
+ iavf_aqc_opc_set_phy_config = 0x0601,
+ iavf_aqc_opc_set_mac_config = 0x0603,
+ iavf_aqc_opc_set_link_restart_an = 0x0605,
+ iavf_aqc_opc_get_link_status = 0x0607,
+ iavf_aqc_opc_set_phy_int_mask = 0x0613,
+ iavf_aqc_opc_get_local_advt_reg = 0x0614,
+ iavf_aqc_opc_set_local_advt_reg = 0x0615,
+ iavf_aqc_opc_get_partner_advt = 0x0616,
+ iavf_aqc_opc_set_lb_modes = 0x0618,
+ iavf_aqc_opc_get_phy_wol_caps = 0x0621,
+ iavf_aqc_opc_set_phy_debug = 0x0622,
+ iavf_aqc_opc_upload_ext_phy_fm = 0x0625,
+ iavf_aqc_opc_run_phy_activity = 0x0626,
+ iavf_aqc_opc_set_phy_register = 0x0628,
+ iavf_aqc_opc_get_phy_register = 0x0629,
+
+ /* NVM commands */
+ iavf_aqc_opc_nvm_read = 0x0701,
+ iavf_aqc_opc_nvm_erase = 0x0702,
+ iavf_aqc_opc_nvm_update = 0x0703,
+ iavf_aqc_opc_nvm_config_read = 0x0704,
+ iavf_aqc_opc_nvm_config_write = 0x0705,
+ iavf_aqc_opc_nvm_progress = 0x0706,
+ iavf_aqc_opc_oem_post_update = 0x0720,
+ iavf_aqc_opc_thermal_sensor = 0x0721,
+
+ /* virtualization commands */
+ iavf_aqc_opc_send_msg_to_pf = 0x0801,
+ iavf_aqc_opc_send_msg_to_vf = 0x0802,
+ iavf_aqc_opc_send_msg_to_peer = 0x0803,
+
+ /* alternate structure */
+ iavf_aqc_opc_alternate_write = 0x0900,
+ iavf_aqc_opc_alternate_write_indirect = 0x0901,
+ iavf_aqc_opc_alternate_read = 0x0902,
+ iavf_aqc_opc_alternate_read_indirect = 0x0903,
+ iavf_aqc_opc_alternate_write_done = 0x0904,
+ iavf_aqc_opc_alternate_set_mode = 0x0905,
+ iavf_aqc_opc_alternate_clear_port = 0x0906,
+
+ /* LLDP commands */
+ iavf_aqc_opc_lldp_get_mib = 0x0A00,
+ iavf_aqc_opc_lldp_update_mib = 0x0A01,
+ iavf_aqc_opc_lldp_add_tlv = 0x0A02,
+ iavf_aqc_opc_lldp_update_tlv = 0x0A03,
+ iavf_aqc_opc_lldp_delete_tlv = 0x0A04,
+ iavf_aqc_opc_lldp_stop = 0x0A05,
+ iavf_aqc_opc_lldp_start = 0x0A06,
+ iavf_aqc_opc_get_cee_dcb_cfg = 0x0A07,
+ iavf_aqc_opc_lldp_set_local_mib = 0x0A08,
+ iavf_aqc_opc_lldp_stop_start_spec_agent = 0x0A09,
+
+ /* Tunnel commands */
+ iavf_aqc_opc_add_udp_tunnel = 0x0B00,
+ iavf_aqc_opc_del_udp_tunnel = 0x0B01,
+ iavf_aqc_opc_set_rss_key = 0x0B02,
+ iavf_aqc_opc_set_rss_lut = 0x0B03,
+ iavf_aqc_opc_get_rss_key = 0x0B04,
+ iavf_aqc_opc_get_rss_lut = 0x0B05,
+
+ /* Async Events */
+ iavf_aqc_opc_event_lan_overflow = 0x1001,
+
+ /* OEM commands */
+ iavf_aqc_opc_oem_parameter_change = 0xFE00,
+ iavf_aqc_opc_oem_device_status_change = 0xFE01,
+ iavf_aqc_opc_oem_ocsd_initialize = 0xFE02,
+ iavf_aqc_opc_oem_ocbb_initialize = 0xFE03,
+
+ /* debug commands */
+ iavf_aqc_opc_debug_read_reg = 0xFF03,
+ iavf_aqc_opc_debug_write_reg = 0xFF04,
+ iavf_aqc_opc_debug_modify_reg = 0xFF07,
+ iavf_aqc_opc_debug_dump_internals = 0xFF08,
+};
+
+/* command structures and indirect data structures */
+
+/* Structure naming conventions:
+ * - no suffix for direct command descriptor structures
+ * - _data for indirect sent data
+ * - _resp for indirect return data (data which is both will use _data)
+ * - _completion for direct return data
+ * - _element_ for repeated elements (may also be _data or _resp)
+ *
+ * Command structures are expected to overlay the params.raw member of the basic
+ * descriptor, and as such cannot exceed 16 bytes in length.
+ */
+
+/* This macro is used to generate a compilation error if a structure
+ * is not exactly the correct length. It gives a divide by zero error if the
+ * structure is not of the correct size, otherwise it creates an enum that is
+ * never used.
+ */
+#define IAVF_CHECK_STRUCT_LEN(n, X) enum iavf_static_assert_enum_##X \
+ { iavf_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) }
+
+/* This macro is used extensively to ensure that command structures are 16
+ * bytes in length as they have to map to the raw array of that size.
+ */
+#define IAVF_CHECK_CMD_LENGTH(X) IAVF_CHECK_STRUCT_LEN(16, X)
+
+/* Queue Shutdown (direct 0x0003) */
+struct iavf_aqc_queue_shutdown {
+ __le32 driver_unloading;
+#define IAVF_AQ_DRIVER_UNLOADING 0x1
+ u8 reserved[12];
+};
+
+IAVF_CHECK_CMD_LENGTH(iavf_aqc_queue_shutdown);
+
+#define IAVF_AQC_WOL_PRESERVE_STATUS 0x200
+#define IAVF_AQC_MC_MAG_EN 0x0100
+#define IAVF_AQC_WOL_PRESERVE_ON_PFR 0x0200
+
+struct iavf_aqc_vsi_properties_data {
+ /* first 96 byte are written by SW */
+ __le16 valid_sections;
+#define IAVF_AQ_VSI_PROP_SWITCH_VALID 0x0001
+#define IAVF_AQ_VSI_PROP_SECURITY_VALID 0x0002
+#define IAVF_AQ_VSI_PROP_VLAN_VALID 0x0004
+#define IAVF_AQ_VSI_PROP_CAS_PV_VALID 0x0008
+#define IAVF_AQ_VSI_PROP_INGRESS_UP_VALID 0x0010
+#define IAVF_AQ_VSI_PROP_EGRESS_UP_VALID 0x0020
+#define IAVF_AQ_VSI_PROP_QUEUE_MAP_VALID 0x0040
+#define IAVF_AQ_VSI_PROP_QUEUE_OPT_VALID 0x0080
+#define IAVF_AQ_VSI_PROP_OUTER_UP_VALID 0x0100
+#define IAVF_AQ_VSI_PROP_SCHED_VALID 0x0200
+ /* switch section */
+ __le16 switch_id; /* 12bit id combined with flags below */
+#define IAVF_AQ_VSI_SW_ID_SHIFT 0x0000
+#define IAVF_AQ_VSI_SW_ID_MASK (0xFFF << IAVF_AQ_VSI_SW_ID_SHIFT)
+#define IAVF_AQ_VSI_SW_ID_FLAG_NOT_STAG 0x1000
+#define IAVF_AQ_VSI_SW_ID_FLAG_ALLOW_LB 0x2000
+#define IAVF_AQ_VSI_SW_ID_FLAG_LOCAL_LB 0x4000
+ u8 sw_reserved[2];
+ /* security section */
+ u8 sec_flags;
+#define IAVF_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD 0x01
+#define IAVF_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK 0x02
+#define IAVF_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK 0x04
+ u8 sec_reserved;
+ /* VLAN section */
+ __le16 pvid; /* VLANS include priority bits */
+ __le16 fcoe_pvid;
+ u8 port_vlan_flags;
+#define IAVF_AQ_VSI_PVLAN_MODE_SHIFT 0x00
+#define IAVF_AQ_VSI_PVLAN_MODE_MASK (0x03 << \
+ IAVF_AQ_VSI_PVLAN_MODE_SHIFT)
+#define IAVF_AQ_VSI_PVLAN_MODE_TAGGED 0x01
+#define IAVF_AQ_VSI_PVLAN_MODE_UNTAGGED 0x02
+#define IAVF_AQ_VSI_PVLAN_MODE_ALL 0x03
+#define IAVF_AQ_VSI_PVLAN_INSERT_PVID 0x04
+#define IAVF_AQ_VSI_PVLAN_EMOD_SHIFT 0x03
+#define IAVF_AQ_VSI_PVLAN_EMOD_MASK (0x3 << \
+ IAVF_AQ_VSI_PVLAN_EMOD_SHIFT)
+#define IAVF_AQ_VSI_PVLAN_EMOD_STR_BOTH 0x0
+#define IAVF_AQ_VSI_PVLAN_EMOD_STR_UP 0x08
+#define IAVF_AQ_VSI_PVLAN_EMOD_STR 0x10
+#define IAVF_AQ_VSI_PVLAN_EMOD_NOTHING 0x18
+ u8 pvlan_reserved[3];
+ /* ingress egress up sections */
+ __le32 ingress_table; /* bitmap, 3 bits per up */
+#define IAVF_AQ_VSI_UP_TABLE_UP0_SHIFT 0
+#define IAVF_AQ_VSI_UP_TABLE_UP0_MASK (0x7 << \
+ IAVF_AQ_VSI_UP_TABLE_UP0_SHIFT)
+#define IAVF_AQ_VSI_UP_TABLE_UP1_SHIFT 3
+#define IAVF_AQ_VSI_UP_TABLE_UP1_MASK (0x7 << \
+ IAVF_AQ_VSI_UP_TABLE_UP1_SHIFT)
+#define IAVF_AQ_VSI_UP_TABLE_UP2_SHIFT 6
+#define IAVF_AQ_VSI_UP_TABLE_UP2_MASK (0x7 << \
+ IAVF_AQ_VSI_UP_TABLE_UP2_SHIFT)
+#define IAVF_AQ_VSI_UP_TABLE_UP3_SHIFT 9
+#define IAVF_AQ_VSI_UP_TABLE_UP3_MASK (0x7 << \
+ IAVF_AQ_VSI_UP_TABLE_UP3_SHIFT)
+#define IAVF_AQ_VSI_UP_TABLE_UP4_SHIFT 12
+#define IAVF_AQ_VSI_UP_TABLE_UP4_MASK (0x7 << \
+ IAVF_AQ_VSI_UP_TABLE_UP4_SHIFT)
+#define IAVF_AQ_VSI_UP_TABLE_UP5_SHIFT 15
+#define IAVF_AQ_VSI_UP_TABLE_UP5_MASK (0x7 << \
+ IAVF_AQ_VSI_UP_TABLE_UP5_SHIFT)
+#define IAVF_AQ_VSI_UP_TABLE_UP6_SHIFT 18
+#define IAVF_AQ_VSI_UP_TABLE_UP6_MASK (0x7 << \
+ IAVF_AQ_VSI_UP_TABLE_UP6_SHIFT)
+#define IAVF_AQ_VSI_UP_TABLE_UP7_SHIFT 21
+#define IAVF_AQ_VSI_UP_TABLE_UP7_MASK (0x7 << \
+ IAVF_AQ_VSI_UP_TABLE_UP7_SHIFT)
+ __le32 egress_table; /* same defines as for ingress table */
+ /* cascaded PV section */
+ __le16 cas_pv_tag;
+ u8 cas_pv_flags;
+#define IAVF_AQ_VSI_CAS_PV_TAGX_SHIFT 0x00
+#define IAVF_AQ_VSI_CAS_PV_TAGX_MASK (0x03 << \
+ IAVF_AQ_VSI_CAS_PV_TAGX_SHIFT)
+#define IAVF_AQ_VSI_CAS_PV_TAGX_LEAVE 0x00
+#define IAVF_AQ_VSI_CAS_PV_TAGX_REMOVE 0x01
+#define IAVF_AQ_VSI_CAS_PV_TAGX_COPY 0x02
+#define IAVF_AQ_VSI_CAS_PV_INSERT_TAG 0x10
+#define IAVF_AQ_VSI_CAS_PV_ETAG_PRUNE 0x20
+#define IAVF_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG 0x40
+ u8 cas_pv_reserved;
+ /* queue mapping section */
+ __le16 mapping_flags;
+#define IAVF_AQ_VSI_QUE_MAP_CONTIG 0x0
+#define IAVF_AQ_VSI_QUE_MAP_NONCONTIG 0x1
+ __le16 queue_mapping[16];
+#define IAVF_AQ_VSI_QUEUE_SHIFT 0x0
+#define IAVF_AQ_VSI_QUEUE_MASK (0x7FF << IAVF_AQ_VSI_QUEUE_SHIFT)
+ __le16 tc_mapping[8];
+#define IAVF_AQ_VSI_TC_QUE_OFFSET_SHIFT 0
+#define IAVF_AQ_VSI_TC_QUE_OFFSET_MASK (0x1FF << \
+ IAVF_AQ_VSI_TC_QUE_OFFSET_SHIFT)
+#define IAVF_AQ_VSI_TC_QUE_NUMBER_SHIFT 9
+#define IAVF_AQ_VSI_TC_QUE_NUMBER_MASK (0x7 << \
+ IAVF_AQ_VSI_TC_QUE_NUMBER_SHIFT)
+ /* queueing option section */
+ u8 queueing_opt_flags;
+#define IAVF_AQ_VSI_QUE_OPT_MULTICAST_UDP_ENA 0x04
+#define IAVF_AQ_VSI_QUE_OPT_UNICAST_UDP_ENA 0x08
+#define IAVF_AQ_VSI_QUE_OPT_TCP_ENA 0x10
+#define IAVF_AQ_VSI_QUE_OPT_FCOE_ENA 0x20
+#define IAVF_AQ_VSI_QUE_OPT_RSS_LUT_PF 0x00
+#define IAVF_AQ_VSI_QUE_OPT_RSS_LUT_VSI 0x40
+ u8 queueing_opt_reserved[3];
+ /* scheduler section */
+ u8 up_enable_bits;
+ u8 sched_reserved;
+ /* outer up section */
+ __le32 outer_up_table; /* same structure and defines as ingress tbl */
+ u8 cmd_reserved[8];
+ /* last 32 bytes are written by FW */
+ __le16 qs_handle[8];
+#define IAVF_AQ_VSI_QS_HANDLE_INVALID 0xFFFF
+ __le16 stat_counter_idx;
+ __le16 sched_id;
+ u8 resp_reserved[12];
+};
+
+IAVF_CHECK_STRUCT_LEN(128, iavf_aqc_vsi_properties_data);
+
+/* Get VEB Parameters (direct 0x0232)
+ * uses iavf_aqc_switch_seid for the descriptor
+ */
+struct iavf_aqc_get_veb_parameters_completion {
+ __le16 seid;
+ __le16 switch_id;
+ __le16 veb_flags; /* only the first/last flags from 0x0230 is valid */
+ __le16 statistic_index;
+ __le16 vebs_used;
+ __le16 vebs_free;
+ u8 reserved[4];
+};
+
+IAVF_CHECK_CMD_LENGTH(iavf_aqc_get_veb_parameters_completion);
+
+#define IAVF_LINK_SPEED_100MB_SHIFT 0x1
+#define IAVF_LINK_SPEED_1000MB_SHIFT 0x2
+#define IAVF_LINK_SPEED_10GB_SHIFT 0x3
+#define IAVF_LINK_SPEED_40GB_SHIFT 0x4
+#define IAVF_LINK_SPEED_20GB_SHIFT 0x5
+#define IAVF_LINK_SPEED_25GB_SHIFT 0x6
+
+enum iavf_aq_link_speed {
+ IAVF_LINK_SPEED_UNKNOWN = 0,
+ IAVF_LINK_SPEED_100MB = (1 << IAVF_LINK_SPEED_100MB_SHIFT),
+ IAVF_LINK_SPEED_1GB = (1 << IAVF_LINK_SPEED_1000MB_SHIFT),
+ IAVF_LINK_SPEED_10GB = (1 << IAVF_LINK_SPEED_10GB_SHIFT),
+ IAVF_LINK_SPEED_40GB = (1 << IAVF_LINK_SPEED_40GB_SHIFT),
+ IAVF_LINK_SPEED_20GB = (1 << IAVF_LINK_SPEED_20GB_SHIFT),
+ IAVF_LINK_SPEED_25GB = (1 << IAVF_LINK_SPEED_25GB_SHIFT),
+};
+
+#define IAVF_AQ_LINK_UP_FUNCTION 0x01
+
+/* Send to PF command (indirect 0x0801) id is only used by PF
+ * Send to VF command (indirect 0x0802) id is only used by PF
+ * Send to Peer PF command (indirect 0x0803)
+ */
+struct iavf_aqc_pf_vf_message {
+ __le32 id;
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+IAVF_CHECK_CMD_LENGTH(iavf_aqc_pf_vf_message);
+
+/* Get CEE DCBX Oper Config (0x0A07)
+ * uses the generic descriptor struct
+ * returns below as indirect response
+ */
+
+#define IAVF_AQC_CEE_APP_FCOE_SHIFT 0x0
+#define IAVF_AQC_CEE_APP_FCOE_MASK (0x7 << IAVF_AQC_CEE_APP_FCOE_SHIFT)
+#define IAVF_AQC_CEE_APP_ISCSI_SHIFT 0x3
+#define IAVF_AQC_CEE_APP_ISCSI_MASK (0x7 << IAVF_AQC_CEE_APP_ISCSI_SHIFT)
+#define IAVF_AQC_CEE_APP_FIP_SHIFT 0x8
+#define IAVF_AQC_CEE_APP_FIP_MASK (0x7 << IAVF_AQC_CEE_APP_FIP_SHIFT)
+
+#define IAVF_AQC_CEE_PG_STATUS_SHIFT 0x0
+#define IAVF_AQC_CEE_PG_STATUS_MASK (0x7 << IAVF_AQC_CEE_PG_STATUS_SHIFT)
+#define IAVF_AQC_CEE_PFC_STATUS_SHIFT 0x3
+#define IAVF_AQC_CEE_PFC_STATUS_MASK (0x7 << IAVF_AQC_CEE_PFC_STATUS_SHIFT)
+#define IAVF_AQC_CEE_APP_STATUS_SHIFT 0x8
+#define IAVF_AQC_CEE_APP_STATUS_MASK (0x7 << IAVF_AQC_CEE_APP_STATUS_SHIFT)
+#define IAVF_AQC_CEE_FCOE_STATUS_SHIFT 0x8
+#define IAVF_AQC_CEE_FCOE_STATUS_MASK (0x7 << IAVF_AQC_CEE_FCOE_STATUS_SHIFT)
+#define IAVF_AQC_CEE_ISCSI_STATUS_SHIFT 0xB
+#define IAVF_AQC_CEE_ISCSI_STATUS_MASK (0x7 << IAVF_AQC_CEE_ISCSI_STATUS_SHIFT)
+#define IAVF_AQC_CEE_FIP_STATUS_SHIFT 0x10
+#define IAVF_AQC_CEE_FIP_STATUS_MASK (0x7 << IAVF_AQC_CEE_FIP_STATUS_SHIFT)
+
+/* struct iavf_aqc_get_cee_dcb_cfg_v1_resp was originally defined with
+ * word boundary layout issues, which the Linux compilers silently deal
+ * with by adding padding, making the actual struct larger than designed.
+ * However, the FW compiler for the NIC is less lenient and complains
+ * about the struct. Hence, the struct defined here has an extra byte in
+ * fields reserved3 and reserved4 to directly acknowledge that padding,
+ * and the new length is used in the length check macro.
+ */
+struct iavf_aqc_get_cee_dcb_cfg_v1_resp {
+ u8 reserved1;
+ u8 oper_num_tc;
+ u8 oper_prio_tc[4];
+ u8 reserved2;
+ u8 oper_tc_bw[8];
+ u8 oper_pfc_en;
+ u8 reserved3[2];
+ __le16 oper_app_prio;
+ u8 reserved4[2];
+ __le16 tlv_status;
+};
+
+IAVF_CHECK_STRUCT_LEN(0x18, iavf_aqc_get_cee_dcb_cfg_v1_resp);
+
+struct iavf_aqc_get_cee_dcb_cfg_resp {
+ u8 oper_num_tc;
+ u8 oper_prio_tc[4];
+ u8 oper_tc_bw[8];
+ u8 oper_pfc_en;
+ __le16 oper_app_prio;
+ __le32 tlv_status;
+ u8 reserved[12];
+};
+
+IAVF_CHECK_STRUCT_LEN(0x20, iavf_aqc_get_cee_dcb_cfg_resp);
+
+/* Set Local LLDP MIB (indirect 0x0A08)
+ * Used to replace the local MIB of a given LLDP agent. e.g. DCBx
+ */
+struct iavf_aqc_lldp_set_local_mib {
+#define SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT 0
+#define SET_LOCAL_MIB_AC_TYPE_DCBX_MASK (1 << \
+ SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT)
+#define SET_LOCAL_MIB_AC_TYPE_LOCAL_MIB 0x0
+#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT (1)
+#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_MASK (1 << \
+ SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT)
+#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS 0x1
+ u8 type;
+ u8 reserved0;
+ __le16 length;
+ u8 reserved1[4];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+IAVF_CHECK_CMD_LENGTH(iavf_aqc_lldp_set_local_mib);
+
+struct iavf_aqc_lldp_set_local_mib_resp {
+#define SET_LOCAL_MIB_RESP_EVENT_TRIGGERED_MASK 0x01
+ u8 status;
+ u8 reserved[15];
+};
+
+IAVF_CHECK_STRUCT_LEN(0x10, iavf_aqc_lldp_set_local_mib_resp);
+
+/* Stop/Start LLDP Agent (direct 0x0A09)
+ * Used for stopping/starting specific LLDP agent. e.g. DCBx
+ */
+struct iavf_aqc_lldp_stop_start_specific_agent {
+#define IAVF_AQC_START_SPECIFIC_AGENT_SHIFT 0
+#define IAVF_AQC_START_SPECIFIC_AGENT_MASK \
+ (1 << IAVF_AQC_START_SPECIFIC_AGENT_SHIFT)
+ u8 command;
+ u8 reserved[15];
+};
+
+IAVF_CHECK_CMD_LENGTH(iavf_aqc_lldp_stop_start_specific_agent);
+
+struct iavf_aqc_get_set_rss_key {
+#define IAVF_AQC_SET_RSS_KEY_VSI_VALID (0x1 << 15)
+#define IAVF_AQC_SET_RSS_KEY_VSI_ID_SHIFT 0
+#define IAVF_AQC_SET_RSS_KEY_VSI_ID_MASK (0x3FF << \
+ IAVF_AQC_SET_RSS_KEY_VSI_ID_SHIFT)
+ __le16 vsi_id;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+IAVF_CHECK_CMD_LENGTH(iavf_aqc_get_set_rss_key);
+
+struct iavf_aqc_get_set_rss_key_data {
+ u8 standard_rss_key[0x28];
+ u8 extended_hash_key[0xc];
+};
+
+IAVF_CHECK_STRUCT_LEN(0x34, iavf_aqc_get_set_rss_key_data);
+
+struct iavf_aqc_get_set_rss_lut {
+#define IAVF_AQC_SET_RSS_LUT_VSI_VALID (0x1 << 15)
+#define IAVF_AQC_SET_RSS_LUT_VSI_ID_SHIFT 0
+#define IAVF_AQC_SET_RSS_LUT_VSI_ID_MASK (0x3FF << \
+ IAVF_AQC_SET_RSS_LUT_VSI_ID_SHIFT)
+ __le16 vsi_id;
+#define IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT 0
+#define IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_MASK (0x1 << \
+ IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT)
+
+#define IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_VSI 0
+#define IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_PF 1
+ __le16 flags;
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+IAVF_CHECK_CMD_LENGTH(iavf_aqc_get_set_rss_lut);
+#endif /* _IAVF_ADMINQ_CMD_H_ */
diff --git a/sys/dev/iavf/iavf_alloc.h b/sys/dev/iavf/iavf_alloc.h
new file mode 100644
--- /dev/null
+++ b/sys/dev/iavf/iavf_alloc.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2021, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+#ifndef _IAVF_ALLOC_H_
+#define _IAVF_ALLOC_H_
+
+struct iavf_hw;
+
+/* Memory allocation types */
+enum iavf_memory_type {
+ iavf_mem_arq_buf = 0, /* ARQ indirect command buffer */
+ iavf_mem_asq_buf = 1,
+ iavf_mem_atq_buf = 2, /* ATQ indirect command buffer */
+ iavf_mem_arq_ring = 3, /* ARQ descriptor ring */
+ iavf_mem_atq_ring = 4, /* ATQ descriptor ring */
+ iavf_mem_pd = 5, /* Page Descriptor */
+ iavf_mem_bp = 6, /* Backing Page - 4KB */
+ iavf_mem_bp_jumbo = 7, /* Backing Page - > 4KB */
+ iavf_mem_reserved
+};
+
+/* prototype for functions used for dynamic memory allocation */
+enum iavf_status iavf_allocate_dma_mem(struct iavf_hw *hw,
+ struct iavf_dma_mem *mem,
+ enum iavf_memory_type type,
+ u64 size, u32 alignment);
+enum iavf_status iavf_free_dma_mem(struct iavf_hw *hw,
+ struct iavf_dma_mem *mem);
+enum iavf_status iavf_allocate_virt_mem(struct iavf_hw *hw,
+ struct iavf_virt_mem *mem,
+ u32 size);
+enum iavf_status iavf_free_virt_mem(struct iavf_hw *hw,
+ struct iavf_virt_mem *mem);
+
+#endif /* _IAVF_ALLOC_H_ */
diff --git a/sys/dev/iavf/iavf_common.c b/sys/dev/iavf/iavf_common.c
new file mode 100644
--- /dev/null
+++ b/sys/dev/iavf/iavf_common.c
@@ -0,0 +1,1053 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2021, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+#include "iavf_type.h"
+#include "iavf_adminq.h"
+#include "iavf_prototype.h"
+#include "virtchnl.h"
+
+/**
+ * iavf_set_mac_type - Sets MAC type
+ * @hw: pointer to the HW structure
+ *
+ * This function sets the mac type of the adapter based on the
+ * vendor ID and device ID stored in the hw structure.
+ **/
+enum iavf_status iavf_set_mac_type(struct iavf_hw *hw)
+{
+ enum iavf_status status = IAVF_SUCCESS;
+
+ DEBUGFUNC("iavf_set_mac_type\n");
+
+ if (hw->vendor_id == IAVF_INTEL_VENDOR_ID) {
+ switch (hw->device_id) {
+ case IAVF_DEV_ID_X722_VF:
+ hw->mac.type = IAVF_MAC_X722_VF;
+ break;
+ case IAVF_DEV_ID_VF:
+ case IAVF_DEV_ID_VF_HV:
+ case IAVF_DEV_ID_ADAPTIVE_VF:
+ hw->mac.type = IAVF_MAC_VF;
+ break;
+ default:
+ hw->mac.type = IAVF_MAC_GENERIC;
+ break;
+ }
+ } else {
+ status = IAVF_ERR_DEVICE_NOT_SUPPORTED;
+ }
+
+ DEBUGOUT2("iavf_set_mac_type found mac: %d, returns: %d\n",
+ hw->mac.type, status);
+ return status;
+}
+
+/**
+ * iavf_aq_str - convert AQ err code to a string
+ * @hw: pointer to the HW structure
+ * @aq_err: the AQ error code to convert
+ **/
+const char *iavf_aq_str(struct iavf_hw *hw, enum iavf_admin_queue_err aq_err)
+{
+ switch (aq_err) {
+ case IAVF_AQ_RC_OK:
+ return "OK";
+ case IAVF_AQ_RC_EPERM:
+ return "IAVF_AQ_RC_EPERM";
+ case IAVF_AQ_RC_ENOENT:
+ return "IAVF_AQ_RC_ENOENT";
+ case IAVF_AQ_RC_ESRCH:
+ return "IAVF_AQ_RC_ESRCH";
+ case IAVF_AQ_RC_EINTR:
+ return "IAVF_AQ_RC_EINTR";
+ case IAVF_AQ_RC_EIO:
+ return "IAVF_AQ_RC_EIO";
+ case IAVF_AQ_RC_ENXIO:
+ return "IAVF_AQ_RC_ENXIO";
+ case IAVF_AQ_RC_E2BIG:
+ return "IAVF_AQ_RC_E2BIG";
+ case IAVF_AQ_RC_EAGAIN:
+ return "IAVF_AQ_RC_EAGAIN";
+ case IAVF_AQ_RC_ENOMEM:
+ return "IAVF_AQ_RC_ENOMEM";
+ case IAVF_AQ_RC_EACCES:
+ return "IAVF_AQ_RC_EACCES";
+ case IAVF_AQ_RC_EFAULT:
+ return "IAVF_AQ_RC_EFAULT";
+ case IAVF_AQ_RC_EBUSY:
+ return "IAVF_AQ_RC_EBUSY";
+ case IAVF_AQ_RC_EEXIST:
+ return "IAVF_AQ_RC_EEXIST";
+ case IAVF_AQ_RC_EINVAL:
+ return "IAVF_AQ_RC_EINVAL";
+ case IAVF_AQ_RC_ENOTTY:
+ return "IAVF_AQ_RC_ENOTTY";
+ case IAVF_AQ_RC_ENOSPC:
+ return "IAVF_AQ_RC_ENOSPC";
+ case IAVF_AQ_RC_ENOSYS:
+ return "IAVF_AQ_RC_ENOSYS";
+ case IAVF_AQ_RC_ERANGE:
+ return "IAVF_AQ_RC_ERANGE";
+ case IAVF_AQ_RC_EFLUSHED:
+ return "IAVF_AQ_RC_EFLUSHED";
+ case IAVF_AQ_RC_BAD_ADDR:
+ return "IAVF_AQ_RC_BAD_ADDR";
+ case IAVF_AQ_RC_EMODE:
+ return "IAVF_AQ_RC_EMODE";
+ case IAVF_AQ_RC_EFBIG:
+ return "IAVF_AQ_RC_EFBIG";
+ }
+
+ snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err);
+ return hw->err_str;
+}
+
+/**
+ * iavf_stat_str - convert status err code to a string
+ * @hw: pointer to the HW structure
+ * @stat_err: the status error code to convert
+ **/
+const char *iavf_stat_str(struct iavf_hw *hw, enum iavf_status stat_err)
+{
+ switch (stat_err) {
+ case IAVF_SUCCESS:
+ return "OK";
+ case IAVF_ERR_NVM:
+ return "IAVF_ERR_NVM";
+ case IAVF_ERR_NVM_CHECKSUM:
+ return "IAVF_ERR_NVM_CHECKSUM";
+ case IAVF_ERR_PHY:
+ return "IAVF_ERR_PHY";
+ case IAVF_ERR_CONFIG:
+ return "IAVF_ERR_CONFIG";
+ case IAVF_ERR_PARAM:
+ return "IAVF_ERR_PARAM";
+ case IAVF_ERR_MAC_TYPE:
+ return "IAVF_ERR_MAC_TYPE";
+ case IAVF_ERR_UNKNOWN_PHY:
+ return "IAVF_ERR_UNKNOWN_PHY";
+ case IAVF_ERR_LINK_SETUP:
+ return "IAVF_ERR_LINK_SETUP";
+ case IAVF_ERR_ADAPTER_STOPPED:
+ return "IAVF_ERR_ADAPTER_STOPPED";
+ case IAVF_ERR_INVALID_MAC_ADDR:
+ return "IAVF_ERR_INVALID_MAC_ADDR";
+ case IAVF_ERR_DEVICE_NOT_SUPPORTED:
+ return "IAVF_ERR_DEVICE_NOT_SUPPORTED";
+ case IAVF_ERR_MASTER_REQUESTS_PENDING:
+ return "IAVF_ERR_MASTER_REQUESTS_PENDING";
+ case IAVF_ERR_INVALID_LINK_SETTINGS:
+ return "IAVF_ERR_INVALID_LINK_SETTINGS";
+ case IAVF_ERR_AUTONEG_NOT_COMPLETE:
+ return "IAVF_ERR_AUTONEG_NOT_COMPLETE";
+ case IAVF_ERR_RESET_FAILED:
+ return "IAVF_ERR_RESET_FAILED";
+ case IAVF_ERR_SWFW_SYNC:
+ return "IAVF_ERR_SWFW_SYNC";
+ case IAVF_ERR_NO_AVAILABLE_VSI:
+ return "IAVF_ERR_NO_AVAILABLE_VSI";
+ case IAVF_ERR_NO_MEMORY:
+ return "IAVF_ERR_NO_MEMORY";
+ case IAVF_ERR_BAD_PTR:
+ return "IAVF_ERR_BAD_PTR";
+ case IAVF_ERR_RING_FULL:
+ return "IAVF_ERR_RING_FULL";
+ case IAVF_ERR_INVALID_PD_ID:
+ return "IAVF_ERR_INVALID_PD_ID";
+ case IAVF_ERR_INVALID_QP_ID:
+ return "IAVF_ERR_INVALID_QP_ID";
+ case IAVF_ERR_INVALID_CQ_ID:
+ return "IAVF_ERR_INVALID_CQ_ID";
+ case IAVF_ERR_INVALID_CEQ_ID:
+ return "IAVF_ERR_INVALID_CEQ_ID";
+ case IAVF_ERR_INVALID_AEQ_ID:
+ return "IAVF_ERR_INVALID_AEQ_ID";
+ case IAVF_ERR_INVALID_SIZE:
+ return "IAVF_ERR_INVALID_SIZE";
+ case IAVF_ERR_INVALID_ARP_INDEX:
+ return "IAVF_ERR_INVALID_ARP_INDEX";
+ case IAVF_ERR_INVALID_FPM_FUNC_ID:
+ return "IAVF_ERR_INVALID_FPM_FUNC_ID";
+ case IAVF_ERR_QP_INVALID_MSG_SIZE:
+ return "IAVF_ERR_QP_INVALID_MSG_SIZE";
+ case IAVF_ERR_QP_TOOMANY_WRS_POSTED:
+ return "IAVF_ERR_QP_TOOMANY_WRS_POSTED";
+ case IAVF_ERR_INVALID_FRAG_COUNT:
+ return "IAVF_ERR_INVALID_FRAG_COUNT";
+ case IAVF_ERR_QUEUE_EMPTY:
+ return "IAVF_ERR_QUEUE_EMPTY";
+ case IAVF_ERR_INVALID_ALIGNMENT:
+ return "IAVF_ERR_INVALID_ALIGNMENT";
+ case IAVF_ERR_FLUSHED_QUEUE:
+ return "IAVF_ERR_FLUSHED_QUEUE";
+ case IAVF_ERR_INVALID_PUSH_PAGE_INDEX:
+ return "IAVF_ERR_INVALID_PUSH_PAGE_INDEX";
+ case IAVF_ERR_INVALID_IMM_DATA_SIZE:
+ return "IAVF_ERR_INVALID_IMM_DATA_SIZE";
+ case IAVF_ERR_TIMEOUT:
+ return "IAVF_ERR_TIMEOUT";
+ case IAVF_ERR_OPCODE_MISMATCH:
+ return "IAVF_ERR_OPCODE_MISMATCH";
+ case IAVF_ERR_CQP_COMPL_ERROR:
+ return "IAVF_ERR_CQP_COMPL_ERROR";
+ case IAVF_ERR_INVALID_VF_ID:
+ return "IAVF_ERR_INVALID_VF_ID";
+ case IAVF_ERR_INVALID_HMCFN_ID:
+ return "IAVF_ERR_INVALID_HMCFN_ID";
+ case IAVF_ERR_BACKING_PAGE_ERROR:
+ return "IAVF_ERR_BACKING_PAGE_ERROR";
+ case IAVF_ERR_NO_PBLCHUNKS_AVAILABLE:
+ return "IAVF_ERR_NO_PBLCHUNKS_AVAILABLE";
+ case IAVF_ERR_INVALID_PBLE_INDEX:
+ return "IAVF_ERR_INVALID_PBLE_INDEX";
+ case IAVF_ERR_INVALID_SD_INDEX:
+ return "IAVF_ERR_INVALID_SD_INDEX";
+ case IAVF_ERR_INVALID_PAGE_DESC_INDEX:
+ return "IAVF_ERR_INVALID_PAGE_DESC_INDEX";
+ case IAVF_ERR_INVALID_SD_TYPE:
+ return "IAVF_ERR_INVALID_SD_TYPE";
+ case IAVF_ERR_MEMCPY_FAILED:
+ return "IAVF_ERR_MEMCPY_FAILED";
+ case IAVF_ERR_INVALID_HMC_OBJ_INDEX:
+ return "IAVF_ERR_INVALID_HMC_OBJ_INDEX";
+ case IAVF_ERR_INVALID_HMC_OBJ_COUNT:
+ return "IAVF_ERR_INVALID_HMC_OBJ_COUNT";
+ case IAVF_ERR_INVALID_SRQ_ARM_LIMIT:
+ return "IAVF_ERR_INVALID_SRQ_ARM_LIMIT";
+ case IAVF_ERR_SRQ_ENABLED:
+ return "IAVF_ERR_SRQ_ENABLED";
+ case IAVF_ERR_ADMIN_QUEUE_ERROR:
+ return "IAVF_ERR_ADMIN_QUEUE_ERROR";
+ case IAVF_ERR_ADMIN_QUEUE_TIMEOUT:
+ return "IAVF_ERR_ADMIN_QUEUE_TIMEOUT";
+ case IAVF_ERR_BUF_TOO_SHORT:
+ return "IAVF_ERR_BUF_TOO_SHORT";
+ case IAVF_ERR_ADMIN_QUEUE_FULL:
+ return "IAVF_ERR_ADMIN_QUEUE_FULL";
+ case IAVF_ERR_ADMIN_QUEUE_NO_WORK:
+ return "IAVF_ERR_ADMIN_QUEUE_NO_WORK";
+ case IAVF_ERR_BAD_IWARP_CQE:
+ return "IAVF_ERR_BAD_IWARP_CQE";
+ case IAVF_ERR_NVM_BLANK_MODE:
+ return "IAVF_ERR_NVM_BLANK_MODE";
+ case IAVF_ERR_NOT_IMPLEMENTED:
+ return "IAVF_ERR_NOT_IMPLEMENTED";
+ case IAVF_ERR_PE_DOORBELL_NOT_ENABLED:
+ return "IAVF_ERR_PE_DOORBELL_NOT_ENABLED";
+ case IAVF_ERR_DIAG_TEST_FAILED:
+ return "IAVF_ERR_DIAG_TEST_FAILED";
+ case IAVF_ERR_NOT_READY:
+ return "IAVF_ERR_NOT_READY";
+ case IAVF_NOT_SUPPORTED:
+ return "IAVF_NOT_SUPPORTED";
+ case IAVF_ERR_FIRMWARE_API_VERSION:
+ return "IAVF_ERR_FIRMWARE_API_VERSION";
+ case IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR:
+ return "IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR";
+ }
+
+ snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err);
+ return hw->err_str;
+}
+
+/**
+ * iavf_debug_aq
+ * @hw: debug mask related to admin queue
+ * @mask: debug mask
+ * @desc: pointer to admin queue descriptor
+ * @buffer: pointer to command buffer
+ * @buf_len: max length of buffer
+ *
+ * Dumps debug log about adminq command with descriptor contents.
+ **/
+void iavf_debug_aq(struct iavf_hw *hw, enum iavf_debug_mask mask, void *desc,
+ void *buffer, u16 buf_len)
+{
+ struct iavf_aq_desc *aq_desc = (struct iavf_aq_desc *)desc;
+ u8 *buf = (u8 *)buffer;
+ u16 len;
+ u16 i = 0;
+
+ if ((!(mask & hw->debug_mask)) || (desc == NULL))
+ return;
+
+ len = LE16_TO_CPU(aq_desc->datalen);
+
+ iavf_debug(hw, mask,
+ "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
+ LE16_TO_CPU(aq_desc->opcode),
+ LE16_TO_CPU(aq_desc->flags),
+ LE16_TO_CPU(aq_desc->datalen),
+ LE16_TO_CPU(aq_desc->retval));
+ iavf_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n",
+ LE32_TO_CPU(aq_desc->cookie_high),
+ LE32_TO_CPU(aq_desc->cookie_low));
+ iavf_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n",
+ LE32_TO_CPU(aq_desc->params.internal.param0),
+ LE32_TO_CPU(aq_desc->params.internal.param1));
+ iavf_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n",
+ LE32_TO_CPU(aq_desc->params.external.addr_high),
+ LE32_TO_CPU(aq_desc->params.external.addr_low));
+
+ if ((buffer != NULL) && (aq_desc->datalen != 0)) {
+ iavf_debug(hw, mask, "AQ CMD Buffer:\n");
+ if (buf_len < len)
+ len = buf_len;
+ /* write the full 16-byte chunks */
+ for (i = 0; i < (len - 16); i += 16)
+ iavf_debug(hw, mask,
+ "\t0x%04X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n",
+ i, buf[i], buf[i+1], buf[i+2], buf[i+3],
+ buf[i+4], buf[i+5], buf[i+6], buf[i+7],
+ buf[i+8], buf[i+9], buf[i+10], buf[i+11],
+ buf[i+12], buf[i+13], buf[i+14], buf[i+15]);
+ /* the most we could have left is 16 bytes, pad with zeros */
+ if (i < len) {
+ char d_buf[16];
+ int j, i_sav;
+
+ i_sav = i;
+ memset(d_buf, 0, sizeof(d_buf));
+ for (j = 0; i < len; j++, i++)
+ d_buf[j] = buf[i];
+ iavf_debug(hw, mask,
+ "\t0x%04X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n",
+ i_sav, d_buf[0], d_buf[1], d_buf[2], d_buf[3],
+ d_buf[4], d_buf[5], d_buf[6], d_buf[7],
+ d_buf[8], d_buf[9], d_buf[10], d_buf[11],
+ d_buf[12], d_buf[13], d_buf[14], d_buf[15]);
+ }
+ }
+}
+
+/**
+ * iavf_check_asq_alive
+ * @hw: pointer to the hw struct
+ *
+ * Returns true if Queue is enabled else false.
+ **/
+bool iavf_check_asq_alive(struct iavf_hw *hw)
+{
+ if (hw->aq.asq.len)
+ return !!(rd32(hw, hw->aq.asq.len) &
+ IAVF_VF_ATQLEN1_ATQENABLE_MASK);
+ else
+ return false;
+}
+
+/**
+ * iavf_aq_queue_shutdown
+ * @hw: pointer to the hw struct
+ * @unloading: is the driver unloading itself
+ *
+ * Tell the Firmware that we're shutting down the AdminQ and whether
+ * or not the driver is unloading as well.
+ **/
+enum iavf_status iavf_aq_queue_shutdown(struct iavf_hw *hw,
+ bool unloading)
+{
+ struct iavf_aq_desc desc;
+ struct iavf_aqc_queue_shutdown *cmd =
+ (struct iavf_aqc_queue_shutdown *)&desc.params.raw;
+ enum iavf_status status;
+
+ iavf_fill_default_direct_cmd_desc(&desc,
+ iavf_aqc_opc_queue_shutdown);
+
+ if (unloading)
+ cmd->driver_unloading = CPU_TO_LE32(IAVF_AQ_DRIVER_UNLOADING);
+ status = iavf_asq_send_command(hw, &desc, NULL, 0, NULL);
+
+ return status;
+}
+
+/**
+ * iavf_aq_get_set_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set true, for VSI table set false
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ * @set: set true to set the table, false to get the table
+ *
+ * Internal function to get or set RSS look up table
+ **/
+STATIC enum iavf_status iavf_aq_get_set_rss_lut(struct iavf_hw *hw,
+ u16 vsi_id, bool pf_lut,
+ u8 *lut, u16 lut_size,
+ bool set)
+{
+ enum iavf_status status;
+ struct iavf_aq_desc desc;
+ struct iavf_aqc_get_set_rss_lut *cmd_resp =
+ (struct iavf_aqc_get_set_rss_lut *)&desc.params.raw;
+
+ if (set)
+ iavf_fill_default_direct_cmd_desc(&desc,
+ iavf_aqc_opc_set_rss_lut);
+ else
+ iavf_fill_default_direct_cmd_desc(&desc,
+ iavf_aqc_opc_get_rss_lut);
+
+ /* Indirect command */
+ desc.flags |= CPU_TO_LE16((u16)IAVF_AQ_FLAG_BUF);
+ desc.flags |= CPU_TO_LE16((u16)IAVF_AQ_FLAG_RD);
+
+ cmd_resp->vsi_id =
+ CPU_TO_LE16((u16)((vsi_id <<
+ IAVF_AQC_SET_RSS_LUT_VSI_ID_SHIFT) &
+ IAVF_AQC_SET_RSS_LUT_VSI_ID_MASK));
+ cmd_resp->vsi_id |= CPU_TO_LE16((u16)IAVF_AQC_SET_RSS_LUT_VSI_VALID);
+
+ if (pf_lut)
+ cmd_resp->flags |= CPU_TO_LE16((u16)
+ ((IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_PF <<
+ IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
+ IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
+ else
+ cmd_resp->flags |= CPU_TO_LE16((u16)
+ ((IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_VSI <<
+ IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
+ IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
+
+ status = iavf_asq_send_command(hw, &desc, lut, lut_size, NULL);
+
+ return status;
+}
+
+/**
+ * iavf_aq_get_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set true, for VSI table set false
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ *
+ * get the RSS lookup table, PF or VSI type
+ **/
+enum iavf_status iavf_aq_get_rss_lut(struct iavf_hw *hw, u16 vsi_id,
+ bool pf_lut, u8 *lut, u16 lut_size)
+{
+ return iavf_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size,
+ false);
+}
+
+/**
+ * iavf_aq_set_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set true, for VSI table set false
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ *
+ * set the RSS lookup table, PF or VSI type
+ **/
+enum iavf_status iavf_aq_set_rss_lut(struct iavf_hw *hw, u16 vsi_id,
+ bool pf_lut, u8 *lut, u16 lut_size)
+{
+ return iavf_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true);
+}
+
+/**
+ * iavf_aq_get_set_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ * @set: set true to set the key, false to get the key
+ *
+ * get the RSS key per VSI
+ **/
+STATIC enum iavf_status iavf_aq_get_set_rss_key(struct iavf_hw *hw,
+ u16 vsi_id,
+ struct iavf_aqc_get_set_rss_key_data *key,
+ bool set)
+{
+ enum iavf_status status;
+ struct iavf_aq_desc desc;
+ struct iavf_aqc_get_set_rss_key *cmd_resp =
+ (struct iavf_aqc_get_set_rss_key *)&desc.params.raw;
+ u16 key_size = sizeof(struct iavf_aqc_get_set_rss_key_data);
+
+ if (set)
+ iavf_fill_default_direct_cmd_desc(&desc,
+ iavf_aqc_opc_set_rss_key);
+ else
+ iavf_fill_default_direct_cmd_desc(&desc,
+ iavf_aqc_opc_get_rss_key);
+
+ /* Indirect command */
+ desc.flags |= CPU_TO_LE16((u16)IAVF_AQ_FLAG_BUF);
+ desc.flags |= CPU_TO_LE16((u16)IAVF_AQ_FLAG_RD);
+
+ cmd_resp->vsi_id =
+ CPU_TO_LE16((u16)((vsi_id <<
+ IAVF_AQC_SET_RSS_KEY_VSI_ID_SHIFT) &
+ IAVF_AQC_SET_RSS_KEY_VSI_ID_MASK));
+ cmd_resp->vsi_id |= CPU_TO_LE16((u16)IAVF_AQC_SET_RSS_KEY_VSI_VALID);
+
+ status = iavf_asq_send_command(hw, &desc, key, key_size, NULL);
+
+ return status;
+}
+
+/**
+ * iavf_aq_get_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ *
+ **/
+enum iavf_status iavf_aq_get_rss_key(struct iavf_hw *hw,
+ u16 vsi_id,
+ struct iavf_aqc_get_set_rss_key_data *key)
+{
+ return iavf_aq_get_set_rss_key(hw, vsi_id, key, false);
+}
+
+/**
+ * iavf_aq_set_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ *
+ * set the RSS key per VSI
+ **/
+enum iavf_status iavf_aq_set_rss_key(struct iavf_hw *hw,
+ u16 vsi_id,
+ struct iavf_aqc_get_set_rss_key_data *key)
+{
+ return iavf_aq_get_set_rss_key(hw, vsi_id, key, true);
+}
+
+/* The iavf_ptype_lookup table is used to convert from the 8-bit ptype in the
+ * hardware to a bit-field that can be used by SW to more easily determine the
+ * packet type.
+ *
+ * Macros are used to shorten the table lines and make this table human
+ * readable.
+ *
+ * We store the PTYPE in the top byte of the bit field - this is just so that
+ * we can check that the table doesn't have a row missing, as the index into
+ * the table should be the PTYPE.
+ *
+ * Typical work flow:
+ *
+ * IF NOT iavf_ptype_lookup[ptype].known
+ * THEN
+ * Packet is unknown
+ * ELSE IF iavf_ptype_lookup[ptype].outer_ip == IAVF_RX_PTYPE_OUTER_IP
+ * Use the rest of the fields to look at the tunnels, inner protocols, etc
+ * ELSE
+ * Use the enum iavf_rx_l2_ptype to decode the packet type
+ * ENDIF
+ */
+
+/* macro to make the table lines short */
+#define IAVF_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
+ { PTYPE, \
+ 1, \
+ IAVF_RX_PTYPE_OUTER_##OUTER_IP, \
+ IAVF_RX_PTYPE_OUTER_##OUTER_IP_VER, \
+ IAVF_RX_PTYPE_##OUTER_FRAG, \
+ IAVF_RX_PTYPE_TUNNEL_##T, \
+ IAVF_RX_PTYPE_TUNNEL_END_##TE, \
+ IAVF_RX_PTYPE_##TEF, \
+ IAVF_RX_PTYPE_INNER_PROT_##I, \
+ IAVF_RX_PTYPE_PAYLOAD_LAYER_##PL }
+
+#define IAVF_PTT_UNUSED_ENTRY(PTYPE) \
+ { PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+
+/* shorter macros makes the table fit but are terse */
+#define IAVF_RX_PTYPE_NOF IAVF_RX_PTYPE_NOT_FRAG
+#define IAVF_RX_PTYPE_FRG IAVF_RX_PTYPE_FRAG
+#define IAVF_RX_PTYPE_INNER_PROT_TS IAVF_RX_PTYPE_INNER_PROT_TIMESYNC
+
+/* Lookup table mapping the HW PTYPE to the bit field for decoding */
+struct iavf_rx_ptype_decoded iavf_ptype_lookup[] = {
+ /* L2 Packet types */
+ IAVF_PTT_UNUSED_ENTRY(0),
+ IAVF_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ IAVF_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, TS, PAY2),
+ IAVF_PTT(3, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ IAVF_PTT_UNUSED_ENTRY(4),
+ IAVF_PTT_UNUSED_ENTRY(5),
+ IAVF_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ IAVF_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ IAVF_PTT_UNUSED_ENTRY(8),
+ IAVF_PTT_UNUSED_ENTRY(9),
+ IAVF_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ IAVF_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
+ IAVF_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ IAVF_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ IAVF_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ IAVF_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ IAVF_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ IAVF_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ IAVF_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ IAVF_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ IAVF_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ IAVF_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+
+ /* Non Tunneled IPv4 */
+ IAVF_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3),
+ IAVF_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3),
+ IAVF_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4),
+ IAVF_PTT_UNUSED_ENTRY(25),
+ IAVF_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4),
+ IAVF_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4),
+ IAVF_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4),
+
+ /* IPv4 --> IPv4 */
+ IAVF_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
+ IAVF_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
+ IAVF_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
+ IAVF_PTT_UNUSED_ENTRY(32),
+ IAVF_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
+ IAVF_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
+ IAVF_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 --> IPv6 */
+ IAVF_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
+ IAVF_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
+ IAVF_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
+ IAVF_PTT_UNUSED_ENTRY(39),
+ IAVF_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
+ IAVF_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
+ IAVF_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT */
+ IAVF_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
+
+ /* IPv4 --> GRE/NAT --> IPv4 */
+ IAVF_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
+ IAVF_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
+ IAVF_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
+ IAVF_PTT_UNUSED_ENTRY(47),
+ IAVF_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
+ IAVF_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
+ IAVF_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT --> IPv6 */
+ IAVF_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
+ IAVF_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
+ IAVF_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
+ IAVF_PTT_UNUSED_ENTRY(54),
+ IAVF_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
+ IAVF_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
+ IAVF_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT --> MAC */
+ IAVF_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
+
+ /* IPv4 --> GRE/NAT --> MAC --> IPv4 */
+ IAVF_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
+ IAVF_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
+ IAVF_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
+ IAVF_PTT_UNUSED_ENTRY(62),
+ IAVF_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
+ IAVF_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
+ IAVF_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT -> MAC --> IPv6 */
+ IAVF_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
+ IAVF_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
+ IAVF_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
+ IAVF_PTT_UNUSED_ENTRY(69),
+ IAVF_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
+ IAVF_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
+ IAVF_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT --> MAC/VLAN */
+ IAVF_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
+
+ /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */
+ IAVF_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
+ IAVF_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
+ IAVF_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
+ IAVF_PTT_UNUSED_ENTRY(77),
+ IAVF_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
+ IAVF_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
+ IAVF_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */
+ IAVF_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
+ IAVF_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
+ IAVF_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
+ IAVF_PTT_UNUSED_ENTRY(84),
+ IAVF_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
+ IAVF_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
+ IAVF_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
+
+ /* Non Tunneled IPv6 */
+ IAVF_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
+ IAVF_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
+ IAVF_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4),
+ IAVF_PTT_UNUSED_ENTRY(91),
+ IAVF_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4),
+ IAVF_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
+ IAVF_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4),
+
+ /* IPv6 --> IPv4 */
+ IAVF_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
+ IAVF_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
+ IAVF_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
+ IAVF_PTT_UNUSED_ENTRY(98),
+ IAVF_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
+ IAVF_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
+ IAVF_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> IPv6 */
+ IAVF_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
+ IAVF_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
+ IAVF_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
+ IAVF_PTT_UNUSED_ENTRY(105),
+ IAVF_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
+ IAVF_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
+ IAVF_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT */
+ IAVF_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
+
+ /* IPv6 --> GRE/NAT -> IPv4 */
+ IAVF_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
+ IAVF_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
+ IAVF_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
+ IAVF_PTT_UNUSED_ENTRY(113),
+ IAVF_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
+ IAVF_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
+ IAVF_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> IPv6 */
+ IAVF_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
+ IAVF_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
+ IAVF_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
+ IAVF_PTT_UNUSED_ENTRY(120),
+ IAVF_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
+ IAVF_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
+ IAVF_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC */
+ IAVF_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
+
+ /* IPv6 --> GRE/NAT -> MAC -> IPv4 */
+ IAVF_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
+ IAVF_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
+ IAVF_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
+ IAVF_PTT_UNUSED_ENTRY(128),
+ IAVF_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
+ IAVF_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
+ IAVF_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC -> IPv6 */
+ IAVF_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
+ IAVF_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
+ IAVF_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
+ IAVF_PTT_UNUSED_ENTRY(135),
+ IAVF_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
+ IAVF_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
+ IAVF_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC/VLAN */
+ IAVF_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
+
+ /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */
+ IAVF_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
+ IAVF_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
+ IAVF_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
+ IAVF_PTT_UNUSED_ENTRY(143),
+ IAVF_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
+ IAVF_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
+ IAVF_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */
+ IAVF_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
+ IAVF_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
+ IAVF_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
+ IAVF_PTT_UNUSED_ENTRY(150),
+ IAVF_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
+ IAVF_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
+ IAVF_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
+
+ /* unused entries */
+ IAVF_PTT_UNUSED_ENTRY(154),
+ IAVF_PTT_UNUSED_ENTRY(155),
+ IAVF_PTT_UNUSED_ENTRY(156),
+ IAVF_PTT_UNUSED_ENTRY(157),
+ IAVF_PTT_UNUSED_ENTRY(158),
+ IAVF_PTT_UNUSED_ENTRY(159),
+
+ IAVF_PTT_UNUSED_ENTRY(160),
+ IAVF_PTT_UNUSED_ENTRY(161),
+ IAVF_PTT_UNUSED_ENTRY(162),
+ IAVF_PTT_UNUSED_ENTRY(163),
+ IAVF_PTT_UNUSED_ENTRY(164),
+ IAVF_PTT_UNUSED_ENTRY(165),
+ IAVF_PTT_UNUSED_ENTRY(166),
+ IAVF_PTT_UNUSED_ENTRY(167),
+ IAVF_PTT_UNUSED_ENTRY(168),
+ IAVF_PTT_UNUSED_ENTRY(169),
+
+ IAVF_PTT_UNUSED_ENTRY(170),
+ IAVF_PTT_UNUSED_ENTRY(171),
+ IAVF_PTT_UNUSED_ENTRY(172),
+ IAVF_PTT_UNUSED_ENTRY(173),
+ IAVF_PTT_UNUSED_ENTRY(174),
+ IAVF_PTT_UNUSED_ENTRY(175),
+ IAVF_PTT_UNUSED_ENTRY(176),
+ IAVF_PTT_UNUSED_ENTRY(177),
+ IAVF_PTT_UNUSED_ENTRY(178),
+ IAVF_PTT_UNUSED_ENTRY(179),
+
+ IAVF_PTT_UNUSED_ENTRY(180),
+ IAVF_PTT_UNUSED_ENTRY(181),
+ IAVF_PTT_UNUSED_ENTRY(182),
+ IAVF_PTT_UNUSED_ENTRY(183),
+ IAVF_PTT_UNUSED_ENTRY(184),
+ IAVF_PTT_UNUSED_ENTRY(185),
+ IAVF_PTT_UNUSED_ENTRY(186),
+ IAVF_PTT_UNUSED_ENTRY(187),
+ IAVF_PTT_UNUSED_ENTRY(188),
+ IAVF_PTT_UNUSED_ENTRY(189),
+
+ IAVF_PTT_UNUSED_ENTRY(190),
+ IAVF_PTT_UNUSED_ENTRY(191),
+ IAVF_PTT_UNUSED_ENTRY(192),
+ IAVF_PTT_UNUSED_ENTRY(193),
+ IAVF_PTT_UNUSED_ENTRY(194),
+ IAVF_PTT_UNUSED_ENTRY(195),
+ IAVF_PTT_UNUSED_ENTRY(196),
+ IAVF_PTT_UNUSED_ENTRY(197),
+ IAVF_PTT_UNUSED_ENTRY(198),
+ IAVF_PTT_UNUSED_ENTRY(199),
+
+ IAVF_PTT_UNUSED_ENTRY(200),
+ IAVF_PTT_UNUSED_ENTRY(201),
+ IAVF_PTT_UNUSED_ENTRY(202),
+ IAVF_PTT_UNUSED_ENTRY(203),
+ IAVF_PTT_UNUSED_ENTRY(204),
+ IAVF_PTT_UNUSED_ENTRY(205),
+ IAVF_PTT_UNUSED_ENTRY(206),
+ IAVF_PTT_UNUSED_ENTRY(207),
+ IAVF_PTT_UNUSED_ENTRY(208),
+ IAVF_PTT_UNUSED_ENTRY(209),
+
+ IAVF_PTT_UNUSED_ENTRY(210),
+ IAVF_PTT_UNUSED_ENTRY(211),
+ IAVF_PTT_UNUSED_ENTRY(212),
+ IAVF_PTT_UNUSED_ENTRY(213),
+ IAVF_PTT_UNUSED_ENTRY(214),
+ IAVF_PTT_UNUSED_ENTRY(215),
+ IAVF_PTT_UNUSED_ENTRY(216),
+ IAVF_PTT_UNUSED_ENTRY(217),
+ IAVF_PTT_UNUSED_ENTRY(218),
+ IAVF_PTT_UNUSED_ENTRY(219),
+
+ IAVF_PTT_UNUSED_ENTRY(220),
+ IAVF_PTT_UNUSED_ENTRY(221),
+ IAVF_PTT_UNUSED_ENTRY(222),
+ IAVF_PTT_UNUSED_ENTRY(223),
+ IAVF_PTT_UNUSED_ENTRY(224),
+ IAVF_PTT_UNUSED_ENTRY(225),
+ IAVF_PTT_UNUSED_ENTRY(226),
+ IAVF_PTT_UNUSED_ENTRY(227),
+ IAVF_PTT_UNUSED_ENTRY(228),
+ IAVF_PTT_UNUSED_ENTRY(229),
+
+ IAVF_PTT_UNUSED_ENTRY(230),
+ IAVF_PTT_UNUSED_ENTRY(231),
+ IAVF_PTT_UNUSED_ENTRY(232),
+ IAVF_PTT_UNUSED_ENTRY(233),
+ IAVF_PTT_UNUSED_ENTRY(234),
+ IAVF_PTT_UNUSED_ENTRY(235),
+ IAVF_PTT_UNUSED_ENTRY(236),
+ IAVF_PTT_UNUSED_ENTRY(237),
+ IAVF_PTT_UNUSED_ENTRY(238),
+ IAVF_PTT_UNUSED_ENTRY(239),
+
+ IAVF_PTT_UNUSED_ENTRY(240),
+ IAVF_PTT_UNUSED_ENTRY(241),
+ IAVF_PTT_UNUSED_ENTRY(242),
+ IAVF_PTT_UNUSED_ENTRY(243),
+ IAVF_PTT_UNUSED_ENTRY(244),
+ IAVF_PTT_UNUSED_ENTRY(245),
+ IAVF_PTT_UNUSED_ENTRY(246),
+ IAVF_PTT_UNUSED_ENTRY(247),
+ IAVF_PTT_UNUSED_ENTRY(248),
+ IAVF_PTT_UNUSED_ENTRY(249),
+
+ IAVF_PTT_UNUSED_ENTRY(250),
+ IAVF_PTT_UNUSED_ENTRY(251),
+ IAVF_PTT_UNUSED_ENTRY(252),
+ IAVF_PTT_UNUSED_ENTRY(253),
+ IAVF_PTT_UNUSED_ENTRY(254),
+ IAVF_PTT_UNUSED_ENTRY(255)
+};
+
+/**
+ * iavf_validate_mac_addr - Validate unicast MAC address
+ * @mac_addr: pointer to MAC address
+ *
+ * Tests a MAC address to ensure it is a valid Individual Address
+ **/
+enum iavf_status iavf_validate_mac_addr(u8 *mac_addr)
+{
+ enum iavf_status status = IAVF_SUCCESS;
+
+ DEBUGFUNC("iavf_validate_mac_addr");
+
+ /* Broadcast addresses ARE multicast addresses
+ * Make sure it is not a multicast address
+ * Reject the zero address
+ */
+ if (IAVF_IS_MULTICAST(mac_addr) ||
+ (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
+ mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0))
+ status = IAVF_ERR_INVALID_MAC_ADDR;
+
+ return status;
+}
+
+/**
+ * iavf_aq_send_msg_to_pf
+ * @hw: pointer to the hardware structure
+ * @v_opcode: opcodes for VF-PF communication
+ * @v_retval: return error code
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ * @cmd_details: pointer to command details
+ *
+ * Send message to PF driver using admin queue. By default, this message
+ * is sent asynchronously, i.e. iavf_asq_send_command() does not wait for
+ * completion before returning.
+ **/
+enum iavf_status iavf_aq_send_msg_to_pf(struct iavf_hw *hw,
+ enum virtchnl_ops v_opcode,
+ enum iavf_status v_retval,
+ u8 *msg, u16 msglen,
+ struct iavf_asq_cmd_details *cmd_details)
+{
+ struct iavf_aq_desc desc;
+ struct iavf_asq_cmd_details details;
+ enum iavf_status status;
+
+ iavf_fill_default_direct_cmd_desc(&desc, iavf_aqc_opc_send_msg_to_pf);
+ desc.flags |= CPU_TO_LE16((u16)IAVF_AQ_FLAG_SI);
+ desc.cookie_high = CPU_TO_LE32(v_opcode);
+ desc.cookie_low = CPU_TO_LE32(v_retval);
+ if (msglen) {
+ desc.flags |= CPU_TO_LE16((u16)(IAVF_AQ_FLAG_BUF
+ | IAVF_AQ_FLAG_RD));
+ if (msglen > IAVF_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)IAVF_AQ_FLAG_LB);
+ desc.datalen = CPU_TO_LE16(msglen);
+ }
+ if (!cmd_details) {
+ iavf_memset(&details, 0, sizeof(details), IAVF_NONDMA_MEM);
+ details.async = true;
+ cmd_details = &details;
+ }
+ status = iavf_asq_send_command(hw, (struct iavf_aq_desc *)&desc, msg,
+ msglen, cmd_details);
+ return status;
+}
+
+/**
+ * iavf_vf_parse_hw_config
+ * @hw: pointer to the hardware structure
+ * @msg: pointer to the virtual channel VF resource structure
+ *
+ * Given a VF resource message from the PF, populate the hw struct
+ * with appropriate information.
+ **/
+void iavf_vf_parse_hw_config(struct iavf_hw *hw,
+ struct virtchnl_vf_resource *msg)
+{
+ struct virtchnl_vsi_resource *vsi_res;
+ int i;
+
+ vsi_res = &msg->vsi_res[0];
+
+ hw->dev_caps.num_vsis = msg->num_vsis;
+ hw->dev_caps.num_rx_qp = msg->num_queue_pairs;
+ hw->dev_caps.num_tx_qp = msg->num_queue_pairs;
+ hw->dev_caps.num_msix_vectors_vf = msg->max_vectors;
+ hw->dev_caps.dcb = msg->vf_cap_flags &
+ VIRTCHNL_VF_OFFLOAD_L2;
+ for (i = 0; i < msg->num_vsis; i++) {
+ if (vsi_res->vsi_type == VIRTCHNL_VSI_SRIOV) {
+ iavf_memcpy(hw->mac.perm_addr,
+ vsi_res->default_mac_addr,
+ ETH_ALEN,
+ IAVF_NONDMA_TO_NONDMA);
+ iavf_memcpy(hw->mac.addr, vsi_res->default_mac_addr,
+ ETH_ALEN,
+ IAVF_NONDMA_TO_NONDMA);
+ }
+ vsi_res++;
+ }
+}
+
+/**
+ * iavf_vf_reset
+ * @hw: pointer to the hardware structure
+ *
+ * Send a VF_RESET message to the PF. Does not wait for response from PF
+ * as none will be forthcoming. Immediately after calling this function,
+ * the admin queue should be shut down and (optionally) reinitialized.
+ **/
+enum iavf_status iavf_vf_reset(struct iavf_hw *hw)
+{
+ return iavf_aq_send_msg_to_pf(hw, VIRTCHNL_OP_RESET_VF,
+ IAVF_SUCCESS, NULL, 0, NULL);
+}
+
+/**
+* iavf_aq_clear_all_wol_filters
+* @hw: pointer to the hw struct
+* @cmd_details: pointer to command details structure or NULL
+*
+* Get information for the reason of a Wake Up event
+**/
+enum iavf_status iavf_aq_clear_all_wol_filters(struct iavf_hw *hw,
+ struct iavf_asq_cmd_details *cmd_details)
+{
+ struct iavf_aq_desc desc;
+ enum iavf_status status;
+
+ iavf_fill_default_direct_cmd_desc(&desc,
+ iavf_aqc_opc_clear_all_wol_filters);
+
+ status = iavf_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
diff --git a/sys/dev/iavf/iavf_debug.h b/sys/dev/iavf/iavf_debug.h
new file mode 100644
--- /dev/null
+++ b/sys/dev/iavf/iavf_debug.h
@@ -0,0 +1,131 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2021, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+/**
+ * @file iavf_debug.h
+ * @brief Debug macros
+ *
+ * Contains definitions for useful debug macros which can be enabled by
+ * building with IAVF_DEBUG defined.
+ */
+#ifndef _IAVF_DEBUG_H_
+#define _IAVF_DEBUG_H_
+
+#define MAC_FORMAT "%02x:%02x:%02x:%02x:%02x:%02x"
+#define MAC_FORMAT_ARGS(mac_addr) \
+ (mac_addr)[0], (mac_addr)[1], (mac_addr)[2], (mac_addr)[3], \
+ (mac_addr)[4], (mac_addr)[5]
+
+#ifdef IAVF_DEBUG
+
+#define _DBG_PRINTF(S, ...) printf("%s: " S "\n", __func__, ##__VA_ARGS__)
+#define _DEV_DBG_PRINTF(dev, S, ...) device_printf(dev, "%s: " S "\n", __func__, ##__VA_ARGS__)
+#define _IF_DBG_PRINTF(ifp, S, ...) if_printf(ifp, "%s: " S "\n", __func__, ##__VA_ARGS__)
+
+/* Defines for printing generic debug information */
+#define DPRINTF(...) _DBG_PRINTF(__VA_ARGS__)
+#define DDPRINTF(...) _DEV_DBG_PRINTF(__VA_ARGS__)
+#define IDPRINTF(...) _IF_DBG_PRINTF(__VA_ARGS__)
+
+/* Defines for printing specific debug information */
+#define DEBUG_INIT 1
+#define DEBUG_IOCTL 1
+#define DEBUG_HW 1
+
+#define INIT_DEBUGOUT(...) if (DEBUG_INIT) _DBG_PRINTF(__VA_ARGS__)
+#define INIT_DBG_DEV(...) if (DEBUG_INIT) _DEV_DBG_PRINTF(__VA_ARGS__)
+#define INIT_DBG_IF(...) if (DEBUG_INIT) _IF_DBG_PRINTF(__VA_ARGS__)
+
+#define IOCTL_DEBUGOUT(...) if (DEBUG_IOCTL) _DBG_PRINTF(__VA_ARGS__)
+#define IOCTL_DBG_IF2(ifp, S, ...) if (DEBUG_IOCTL) \
+ if_printf(ifp, S "\n", ##__VA_ARGS__)
+#define IOCTL_DBG_IF(...) if (DEBUG_IOCTL) _IF_DBG_PRINTF(__VA_ARGS__)
+
+#define HW_DEBUGOUT(...) if (DEBUG_HW) _DBG_PRINTF(__VA_ARGS__)
+
+#else /* no IAVF_DEBUG */
+#define DEBUG_INIT 0
+#define DEBUG_IOCTL 0
+#define DEBUG_HW 0
+
+#define DPRINTF(...)
+#define DDPRINTF(...)
+#define IDPRINTF(...)
+
+#define INIT_DEBUGOUT(...)
+#define INIT_DBG_DEV(...)
+#define INIT_DBG_IF(...)
+#define IOCTL_DEBUGOUT(...)
+#define IOCTL_DBG_IF2(...)
+#define IOCTL_DBG_IF(...)
+#define HW_DEBUGOUT(...)
+#endif /* IAVF_DEBUG */
+
+/**
+ * @enum iavf_dbg_mask
+ * @brief Bitmask values for various debug messages
+ *
+ * Enumeration of possible debug message categories, represented as a bitmask.
+ *
+ * Bits are set in the softc dbg_mask field indicating which messages are
+ * enabled.
+ *
+ * Used by debug print macros in order to compare the message type with the
+ * enabled bits in the dbg_mask to decide whether to print the message or not.
+ */
+enum iavf_dbg_mask {
+ IAVF_DBG_INFO = 0x00000001,
+ IAVF_DBG_EN_DIS = 0x00000002,
+ IAVF_DBG_AQ = 0x00000004,
+ IAVF_DBG_INIT = 0x00000008,
+ IAVF_DBG_FILTER = 0x00000010,
+
+ IAVF_DBG_RSS = 0x00000100,
+
+ IAVF_DBG_VC = 0x00001000,
+
+ IAVF_DBG_SWITCH_INFO = 0x00010000,
+
+ IAVF_DBG_ALL = 0xFFFFFFFF
+};
+
+/* Debug printing */
+void iavf_debug_core(device_t dev, uint32_t enabled_mask, uint32_t mask, char *fmt, ...) __printflike(4,5);
+
+#define iavf_dbg(sc, m, s, ...) iavf_debug_core(sc->dev, sc->dbg_mask, m, s, ##__VA_ARGS__)
+#define iavf_dbg_init(sc, s, ...) iavf_debug_core(sc->dev, sc->dbg_mask, IAVF_DBG_INIT, s, ##__VA_ARGS__)
+#define iavf_dbg_info(sc, s, ...) iavf_debug_core(sc->dev, sc->dbg_mask, IAVF_DBG_INFO, s, ##__VA_ARGS__)
+#define iavf_dbg_vc(sc, s, ...) iavf_debug_core(sc->dev, sc->dbg_mask, IAVF_DBG_VC, s, ##__VA_ARGS__)
+#define iavf_dbg_filter(sc, s, ...) iavf_debug_core(sc->dev, sc->dbg_mask, IAVF_DBG_FILTER, s, ##__VA_ARGS__)
+#define iavf_dbg_rss(sc, s, ...) iavf_debug_core(sc->dev, sc->dbg_mask, IAVF_DBG_RSS, s, ##__VA_ARGS__)
+
+#endif /* _IAVF_DEBUG_H_ */
diff --git a/sys/dev/iavf/iavf_devids.h b/sys/dev/iavf/iavf_devids.h
new file mode 100644
--- /dev/null
+++ b/sys/dev/iavf/iavf_devids.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2021, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+#ifndef _IAVF_DEVIDS_H_
+#define _IAVF_DEVIDS_H_
+
+/* Vendor ID */
+#define IAVF_INTEL_VENDOR_ID 0x8086
+
+/* Device IDs for the VF driver */
+#define IAVF_DEV_ID_VF 0x154C
+#define IAVF_DEV_ID_VF_HV 0x1571
+#define IAVF_DEV_ID_ADAPTIVE_VF 0x1889
+#define IAVF_DEV_ID_X722_VF 0x37CD
+
+#endif /* _IAVF_DEVIDS_H_ */
diff --git a/sys/dev/iavf/iavf_drv_info.h b/sys/dev/iavf/iavf_drv_info.h
new file mode 100644
--- /dev/null
+++ b/sys/dev/iavf/iavf_drv_info.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2021, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+/**
+ * @file iavf_drv_info.h
+ * @brief device IDs and driver version
+ *
+ * Contains the device IDs tables and the driver version string.
+ *
+ * It must be included after iavf_legacy.h or iavf_iflib.h, and is expected to
+ * be included exactly once in the associated if_iavf file. Thus, it does not
+ * have the standard header guard.
+ */
+
+/**
+ * @var iavf_driver_version
+ * @brief driver version string
+ *
+ * Driver version information, used for display as part of an informational
+ * sysctl.
+ */
+const char iavf_driver_version[] = "3.0.26-k";
+
+#define PVIDV(vendor, devid, name) \
+ PVID(vendor, devid, name " - 3.0.26-k")
+#define PVIDV_OEM(vendor, devid, svid, sdevid, revid, name) \
+ PVID_OEM(vendor, devid, svid, sdevid, revid, name " - 3.0.26-k")
+
+/**
+ * @var iavf_vendor_info_array
+ * @brief array of PCI devices supported by this driver
+ *
+ * Array of PCI devices which are supported by this driver. Used to determine
+ * whether a given device should be loaded by this driver. This information is
+ * also exported as part of the module information for other tools to analyze.
+ *
+ * @remark Each type of device ID needs to be listed from most-specific entry
+ * to most-generic entry; e.g. PVIDV_OEM()s for a device ID must come before
+ * the PVIDV() for it.
+ */
+static pci_vendor_info_t iavf_vendor_info_array[] = {
+ PVIDV(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_VF,
+ "Intel(R) Ethernet Virtual Function 700 Series"),
+ PVIDV(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_X722_VF,
+ "Intel(R) Ethernet Virtual Function 700 Series (X722)"),
+ PVIDV(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_ADAPTIVE_VF,
+ "Intel(R) Ethernet Adaptive Virtual Function"),
+ PVID_END
+};
diff --git a/sys/dev/iavf/iavf_iflib.h b/sys/dev/iavf/iavf_iflib.h
new file mode 100644
--- /dev/null
+++ b/sys/dev/iavf/iavf_iflib.h
@@ -0,0 +1,407 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2021, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+/**
+ * @file iavf_iflib.h
+ * @brief main header for the iflib driver
+ *
+ * Contains definitions for various driver structures used throughout the
+ * driver code. This header is used by the iflib implementation.
+ */
+#ifndef _IAVF_IFLIB_H_
+#define _IAVF_IFLIB_H_
+
+#include "iavf_opts.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/buf_ring.h>
+#include <sys/mbuf.h>
+#include <sys/protosw.h>
+#include <sys/socket.h>
+#include <sys/malloc.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/sockio.h>
+#include <sys/eventhandler.h>
+#include <sys/syslog.h>
+
+#include <net/if.h>
+#include <net/if_var.h>
+#include <net/if_arp.h>
+#include <net/bpf.h>
+#include <net/if_dl.h>
+#include <net/if_media.h>
+
+#include <net/bpf.h>
+#include <net/if_types.h>
+#include <net/if_vlan_var.h>
+
+#include <netinet/in_systm.h>
+#include <netinet/in.h>
+#include <netinet/if_ether.h>
+#include <netinet/ip.h>
+#include <netinet/ip6.h>
+#include <netinet/tcp.h>
+#include <netinet/tcp_lro.h>
+#include <netinet/udp.h>
+#include <netinet/sctp.h>
+
+#include <machine/in_cksum.h>
+
+#include <sys/bus.h>
+#include <sys/pciio.h>
+#include <machine/bus.h>
+#include <sys/rman.h>
+#include <machine/resource.h>
+#include <vm/vm.h>
+#include <vm/pmap.h>
+#include <machine/clock.h>
+#include <dev/pci/pcivar.h>
+#include <dev/pci/pcireg.h>
+#include <sys/proc.h>
+#include <sys/endian.h>
+#include <sys/taskqueue.h>
+#include <sys/pcpu.h>
+#include <sys/smp.h>
+#include <sys/sbuf.h>
+#include <machine/smp.h>
+#include <machine/stdarg.h>
+#include <net/ethernet.h>
+#include <net/iflib.h>
+#include "ifdi_if.h"
+
+#include "iavf_lib.h"
+
+#define IAVF_CSUM_TCP \
+ (CSUM_IP_TCP|CSUM_IP_TSO|CSUM_IP6_TSO|CSUM_IP6_TCP)
+#define IAVF_CSUM_UDP \
+ (CSUM_IP_UDP|CSUM_IP6_UDP)
+#define IAVF_CSUM_SCTP \
+ (CSUM_IP_SCTP|CSUM_IP6_SCTP)
+#define IAVF_CSUM_IPV4 \
+ (CSUM_IP|CSUM_IP_TSO)
+
+#define IAVF_CAPS \
+ (IFCAP_TSO4 | IFCAP_TSO6 | \
+ IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6 | \
+ IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | \
+ IFCAP_VLAN_HWFILTER | IFCAP_VLAN_HWTSO | \
+ IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM | \
+ IFCAP_VLAN_MTU | IFCAP_JUMBO_MTU | IFCAP_LRO)
+
+#define iavf_sc_from_ctx(_ctx) \
+ ((struct iavf_sc *)iflib_get_softc(_ctx))
+
+/* Use the correct assert function for each lock type */
+#define IFLIB_CTX_ASSERT(_ctx) \
+ sx_assert(iflib_ctx_lock_get(_ctx), SA_XLOCKED)
+
+#define IAVF_VC_LOCK(_sc) mtx_lock(&(_sc)->vc_mtx)
+#define IAVF_VC_UNLOCK(_sc) mtx_unlock(&(_sc)->vc_mtx)
+#define IAVF_VC_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->vc_mtx)
+#define IAVF_VC_TRYLOCK(_sc) mtx_trylock(&(_sc)->vc_mtx)
+#define IAVF_VC_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->vc_mtx, MA_OWNED)
+
+/**
+ * @struct tx_ring
+ * @brief Transmit ring control struct
+ *
+ * Structure used to track the hardware Tx ring data.
+ */
+struct tx_ring {
+ struct iavf_tx_queue *que;
+ u32 tail;
+ struct iavf_tx_desc *tx_base;
+ u64 tx_paddr;
+ u32 packets;
+ u32 me;
+
+ /*
+ * For reporting completed packet status
+ * in descriptor writeback mdoe
+ */
+ qidx_t *tx_rsq;
+ qidx_t tx_rs_cidx;
+ qidx_t tx_rs_pidx;
+ qidx_t tx_cidx_processed;
+
+ /* Used for Dynamic ITR calculation */
+ u32 bytes;
+ u32 itr;
+ u32 latency;
+
+ /* Soft Stats */
+ u64 tx_bytes;
+ u64 tx_packets;
+ u64 mss_too_small;
+};
+
+/**
+ * @struct rx_ring
+ * @brief Receive ring control struct
+ *
+ * Structure used to track the hardware Rx ring data.
+ */
+struct rx_ring {
+ struct iavf_rx_queue *que;
+ union iavf_rx_desc *rx_base;
+ uint64_t rx_paddr;
+ bool discard;
+ u32 itr;
+ u32 latency;
+ u32 mbuf_sz;
+ u32 tail;
+ u32 me;
+
+ /* Used for Dynamic ITR calculation */
+ u32 packets;
+ u32 bytes;
+
+ /* Soft stats */
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 desc_errs;
+};
+
+/**
+ * @struct iavf_tx_queue
+ * @brief Driver Tx queue structure
+ *
+ * Structure to track the Tx ring, IRQ, MSI-X vector, and some software stats
+ * for a Tx queue.
+ */
+struct iavf_tx_queue {
+ struct iavf_vsi *vsi;
+ struct tx_ring txr;
+ struct if_irq que_irq;
+ u32 msix;
+
+ /* Stats */
+ u64 irqs;
+ u64 tso;
+ u32 pkt_too_small;
+};
+
+/**
+ * @struct iavf_rx_queue
+ * @brief Driver Rx queue structure
+ *
+ * Structure to track the Rx ring, IRQ, MSI-X vector, and some software stats
+ * for an Rx queue.
+ */
+struct iavf_rx_queue {
+ struct iavf_vsi *vsi;
+ struct rx_ring rxr;
+ struct if_irq que_irq;
+ u32 msix;
+
+ /* Stats */
+ u64 irqs;
+};
+
+/**
+ * @struct iavf_vsi
+ * @brief Virtual Station Interface
+ *
+ * Data tracking a VSI for an iavf device.
+ */
+struct iavf_vsi {
+ if_ctx_t ctx;
+ if_softc_ctx_t shared;
+ struct ifnet *ifp;
+ struct iavf_sc *back;
+ device_t dev;
+ struct iavf_hw *hw;
+
+ int id;
+ u16 num_rx_queues;
+ u16 num_tx_queues;
+ u32 rx_itr_setting;
+ u32 tx_itr_setting;
+ u16 max_frame_size;
+ bool enable_head_writeback;
+
+ bool link_active;
+
+ struct iavf_tx_queue *tx_queues;
+ struct iavf_rx_queue *rx_queues;
+ struct if_irq irq;
+
+ u16 num_vlans;
+ u16 num_macs;
+
+ /* Per-VSI stats from hardware */
+ struct iavf_eth_stats eth_stats;
+ struct iavf_eth_stats eth_stats_offsets;
+ bool stat_offsets_loaded;
+ /* VSI stat counters */
+ u64 ipackets;
+ u64 ierrors;
+ u64 opackets;
+ u64 oerrors;
+ u64 ibytes;
+ u64 obytes;
+ u64 imcasts;
+ u64 omcasts;
+ u64 iqdrops;
+ u64 oqdrops;
+ u64 noproto;
+
+ /* Misc. */
+ u64 flags;
+ struct sysctl_oid *vsi_node;
+ struct sysctl_ctx_list sysctl_ctx;
+};
+
+/**
+ * @struct iavf_mac_filter
+ * @brief MAC Address filter data
+ *
+ * Entry in the MAC filter list describing a MAC address filter used to
+ * program hardware to filter a specific MAC address.
+ */
+struct iavf_mac_filter {
+ SLIST_ENTRY(iavf_mac_filter) next;
+ u8 macaddr[ETHER_ADDR_LEN];
+ u16 flags;
+};
+
+/**
+ * @struct mac_list
+ * @brief MAC filter list head
+ *
+ * List head type for a singly-linked list of MAC address filters.
+ */
+SLIST_HEAD(mac_list, iavf_mac_filter);
+
+/**
+ * @struct iavf_vlan_filter
+ * @brief VLAN filter data
+ *
+ * Entry in the VLAN filter list describing a VLAN filter used to
+ * program hardware to filter traffic on a specific VLAN.
+ */
+struct iavf_vlan_filter {
+ SLIST_ENTRY(iavf_vlan_filter) next;
+ u16 vlan;
+ u16 flags;
+};
+
+/**
+ * @struct vlan_list
+ * @brief VLAN filter list head
+ *
+ * List head type for a singly-linked list of VLAN filters.
+ */
+SLIST_HEAD(vlan_list, iavf_vlan_filter);
+
+/**
+ * @struct iavf_sc
+ * @brief Main context structure for the iavf driver
+ *
+ * Software context structure used to store information about a single device
+ * that is loaded by the iavf driver.
+ */
+struct iavf_sc {
+ struct iavf_vsi vsi;
+
+ struct iavf_hw hw;
+ struct iavf_osdep osdep;
+ device_t dev;
+
+ struct resource *pci_mem;
+
+ /* driver state flags, only access using atomic functions */
+ u32 state;
+
+ struct ifmedia *media;
+ struct virtchnl_version_info version;
+ enum iavf_dbg_mask dbg_mask;
+ u16 promisc_flags;
+
+ bool link_up;
+ union {
+ enum virtchnl_link_speed link_speed;
+ u32 link_speed_adv;
+ };
+
+ /* Tunable settings */
+ int tx_itr;
+ int rx_itr;
+ int dynamic_tx_itr;
+ int dynamic_rx_itr;
+
+ /* Filter lists */
+ struct mac_list *mac_filters;
+ struct vlan_list *vlan_filters;
+
+ /* Virtual comm channel */
+ struct virtchnl_vf_resource *vf_res;
+ struct virtchnl_vsi_resource *vsi_res;
+
+ /* Misc stats maintained by the driver */
+ u64 admin_irq;
+
+ /* Buffer used for reading AQ responses */
+ u8 aq_buffer[IAVF_AQ_BUF_SZ];
+
+ /* State flag used in init/stop */
+ u32 queues_enabled;
+ u8 enable_queues_chan;
+ u8 disable_queues_chan;
+
+ /* For virtchnl message processing task */
+ struct task vc_task;
+ struct taskqueue *vc_tq;
+ char vc_mtx_name[16];
+ struct mtx vc_mtx;
+};
+
+/* Function prototypes */
+void iavf_init_tx_ring(struct iavf_vsi *vsi, struct iavf_tx_queue *que);
+void iavf_get_default_rss_key(u32 *);
+const char * iavf_vc_stat_str(struct iavf_hw *hw,
+ enum virtchnl_status_code stat_err);
+void iavf_init_tx_rsqs(struct iavf_vsi *vsi);
+void iavf_init_tx_cidx(struct iavf_vsi *vsi);
+u64 iavf_max_vc_speed_to_value(u8 link_speeds);
+void iavf_add_vsi_sysctls(device_t dev, struct iavf_vsi *vsi,
+ struct sysctl_ctx_list *ctx, const char *sysctl_name);
+void iavf_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx,
+ struct sysctl_oid_list *child,
+ struct iavf_eth_stats *eth_stats);
+void iavf_add_queues_sysctls(device_t dev, struct iavf_vsi *vsi);
+
+void iavf_enable_intr(struct iavf_vsi *);
+void iavf_disable_intr(struct iavf_vsi *);
+#endif /* _IAVF_IFLIB_H_ */
diff --git a/sys/dev/iavf/iavf_lib.h b/sys/dev/iavf/iavf_lib.h
new file mode 100644
--- /dev/null
+++ b/sys/dev/iavf/iavf_lib.h
@@ -0,0 +1,512 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2021, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+/**
+ * @file iavf_lib.h
+ * @brief header for structures and functions common to legacy and iflib
+ *
+ * Contains definitions and function declarations which are shared between the
+ * legacy and iflib driver implementation.
+ */
+#ifndef _IAVF_LIB_H_
+#define _IAVF_LIB_H_
+
+#include <sys/malloc.h>
+#include <machine/stdarg.h>
+#include <sys/sysctl.h>
+
+#include "iavf_debug.h"
+#include "iavf_osdep.h"
+#include "iavf_type.h"
+#include "iavf_prototype.h"
+
+MALLOC_DECLARE(M_IAVF);
+
+/*
+ * Ring Descriptors Valid Range: 32-4096 Default Value: 1024 This value is the
+ * number of tx/rx descriptors allocated by the driver. Increasing this
+ * value allows the driver to queue more operations.
+ *
+ * Tx descriptors are always 16 bytes, but Rx descriptors can be 32 bytes.
+ * The driver currently always uses 32 byte Rx descriptors.
+ */
+#define IAVF_DEFAULT_RING 1024
+#define IAVF_MAX_RING 4096
+#define IAVF_MIN_RING 64
+#define IAVF_RING_INCREMENT 32
+
+#define IAVF_AQ_LEN 256
+#define IAVF_AQ_LEN_MAX 1024
+
+/*
+** Default number of entries in Tx queue buf_ring.
+*/
+#define DEFAULT_TXBRSZ 4096
+
+/* Alignment for rings */
+#define DBA_ALIGN 128
+
+/*
+ * Max number of multicast MAC addrs added to the driver's
+ * internal lists before converting to promiscuous mode
+ */
+#define MAX_MULTICAST_ADDR 128
+
+/* Byte alignment for Tx/Rx descriptor rings */
+#define DBA_ALIGN 128
+
+#define IAVF_MSIX_BAR 3
+#define IAVF_ADM_LIMIT 2
+#define IAVF_TSO_SIZE ((255*1024)-1)
+#define IAVF_AQ_BUF_SZ ((u32) 4096)
+#define IAVF_RX_HDR 128
+#define IAVF_RX_LIMIT 512
+#define IAVF_RX_ITR 0
+#define IAVF_TX_ITR 1
+/**
+ * The maximum packet length allowed to be sent or received by the adapter.
+ */
+#define IAVF_MAX_FRAME 9728
+/**
+ * The minimum packet length allowed to be sent by the adapter.
+ */
+#define IAVF_MIN_FRAME 17
+#define IAVF_MAX_TX_SEGS 8
+#define IAVF_MAX_RX_SEGS 5
+#define IAVF_MAX_TSO_SEGS 128
+#define IAVF_SPARSE_CHAIN 7
+#define IAVF_MIN_TSO_MSS 64
+#define IAVF_MAX_TSO_MSS 9668
+#define IAVF_MAX_DMA_SEG_SIZE ((16 * 1024) - 1)
+#define IAVF_AQ_MAX_ERR 30
+#define IAVF_MAX_INIT_WAIT 120
+#define IAVF_AQ_TIMEOUT (1 * hz)
+#define IAVF_ADV_LINK_SPEED_SCALE ((u64)1000000)
+#define IAVF_MAX_DIS_Q_RETRY 10
+
+#define IAVF_RSS_KEY_SIZE_REG 13
+#define IAVF_RSS_KEY_SIZE (IAVF_RSS_KEY_SIZE_REG * 4)
+#define IAVF_RSS_VSI_LUT_SIZE 64 /* X722 -> VSI, X710 -> VF */
+#define IAVF_RSS_VSI_LUT_ENTRY_MASK 0x3F
+#define IAVF_RSS_VF_LUT_ENTRY_MASK 0xF
+
+/* Maximum MTU size */
+#define IAVF_MAX_MTU (IAVF_MAX_FRAME - \
+ ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN)
+
+/*
+ * Hardware requires that TSO packets have an segment size of at least 64
+ * bytes. To avoid sending bad frames to the hardware, the driver forces the
+ * MSS for all TSO packets to have a segment size of at least 64 bytes.
+ *
+ * However, if the MTU is reduced below a certain size, then the resulting
+ * larger MSS can result in transmitting segmented frames with a packet size
+ * larger than the MTU.
+ *
+ * Avoid this by preventing the MTU from being lowered below this limit.
+ * Alternative solutions require changing the TCP stack to disable offloading
+ * the segmentation when the requested segment size goes below 64 bytes.
+ */
+#define IAVF_MIN_MTU 112
+
+/*
+ * Interrupt Moderation parameters
+ * Multiply ITR values by 2 for real ITR value
+ */
+#define IAVF_MAX_ITR 0x0FF0
+#define IAVF_ITR_100K 0x0005
+#define IAVF_ITR_20K 0x0019
+#define IAVF_ITR_8K 0x003E
+#define IAVF_ITR_4K 0x007A
+#define IAVF_ITR_1K 0x01F4
+#define IAVF_ITR_DYNAMIC 0x8000
+#define IAVF_LOW_LATENCY 0
+#define IAVF_AVE_LATENCY 1
+#define IAVF_BULK_LATENCY 2
+
+/* MacVlan Flags */
+#define IAVF_FILTER_USED (u16)(1 << 0)
+#define IAVF_FILTER_VLAN (u16)(1 << 1)
+#define IAVF_FILTER_ADD (u16)(1 << 2)
+#define IAVF_FILTER_DEL (u16)(1 << 3)
+#define IAVF_FILTER_MC (u16)(1 << 4)
+/* used in the vlan field of the filter when not a vlan */
+#define IAVF_VLAN_ANY -1
+
+#define CSUM_OFFLOAD_IPV4 (CSUM_IP|CSUM_TCP|CSUM_UDP|CSUM_SCTP)
+#define CSUM_OFFLOAD_IPV6 (CSUM_TCP_IPV6|CSUM_UDP_IPV6|CSUM_SCTP_IPV6)
+#define CSUM_OFFLOAD (CSUM_OFFLOAD_IPV4|CSUM_OFFLOAD_IPV6|CSUM_TSO)
+
+/* Misc flags for iavf_vsi.flags */
+#define IAVF_FLAGS_KEEP_TSO4 (1 << 0)
+#define IAVF_FLAGS_KEEP_TSO6 (1 << 1)
+
+#define IAVF_DEFAULT_RSS_HENA_BASE (\
+ BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_UDP) | \
+ BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_TCP) | \
+ BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
+ BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
+ BIT_ULL(IAVF_FILTER_PCTYPE_FRAG_IPV4) | \
+ BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_UDP) | \
+ BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_TCP) | \
+ BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
+ BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
+ BIT_ULL(IAVF_FILTER_PCTYPE_FRAG_IPV6))
+
+#define IAVF_DEFAULT_ADV_RSS_HENA (\
+ BIT_ULL(IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
+ BIT_ULL(IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
+ BIT_ULL(IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
+ BIT_ULL(IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) | \
+ BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
+ BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK))
+
+#define IAVF_DEFAULT_RSS_HENA_XL710 (\
+ IAVF_DEFAULT_RSS_HENA_BASE | \
+ BIT_ULL(IAVF_FILTER_PCTYPE_L2_PAYLOAD))
+
+#define IAVF_DEFAULT_RSS_HENA_X722 (\
+ IAVF_DEFAULT_RSS_HENA_XL710 | \
+ IAVF_DEFAULT_ADV_RSS_HENA)
+
+#define IAVF_DEFAULT_RSS_HENA_AVF (\
+ IAVF_DEFAULT_RSS_HENA_BASE | \
+ IAVF_DEFAULT_ADV_RSS_HENA)
+
+/* Pre-11 counter(9) compatibility */
+#if __FreeBSD_version >= 1100036
+#define IAVF_SET_IPACKETS(vsi, count) (vsi)->ipackets = (count)
+#define IAVF_SET_IERRORS(vsi, count) (vsi)->ierrors = (count)
+#define IAVF_SET_OPACKETS(vsi, count) (vsi)->opackets = (count)
+#define IAVF_SET_OERRORS(vsi, count) (vsi)->oerrors = (count)
+#define IAVF_SET_COLLISIONS(vsi, count) /* Do nothing; collisions is always 0. */
+#define IAVF_SET_IBYTES(vsi, count) (vsi)->ibytes = (count)
+#define IAVF_SET_OBYTES(vsi, count) (vsi)->obytes = (count)
+#define IAVF_SET_IMCASTS(vsi, count) (vsi)->imcasts = (count)
+#define IAVF_SET_OMCASTS(vsi, count) (vsi)->omcasts = (count)
+#define IAVF_SET_IQDROPS(vsi, count) (vsi)->iqdrops = (count)
+#define IAVF_SET_OQDROPS(vsi, count) (vsi)->oqdrops = (count)
+#define IAVF_SET_NOPROTO(vsi, count) (vsi)->noproto = (count)
+#else
+#define IAVF_SET_IPACKETS(vsi, count) (vsi)->ifp->if_ipackets = (count)
+#define IAVF_SET_IERRORS(vsi, count) (vsi)->ifp->if_ierrors = (count)
+#define IAVF_SET_OPACKETS(vsi, count) (vsi)->ifp->if_opackets = (count)
+#define IAVF_SET_OERRORS(vsi, count) (vsi)->ifp->if_oerrors = (count)
+#define IAVF_SET_COLLISIONS(vsi, count) (vsi)->ifp->if_collisions = (count)
+#define IAVF_SET_IBYTES(vsi, count) (vsi)->ifp->if_ibytes = (count)
+#define IAVF_SET_OBYTES(vsi, count) (vsi)->ifp->if_obytes = (count)
+#define IAVF_SET_IMCASTS(vsi, count) (vsi)->ifp->if_imcasts = (count)
+#define IAVF_SET_OMCASTS(vsi, count) (vsi)->ifp->if_omcasts = (count)
+#define IAVF_SET_IQDROPS(vsi, count) (vsi)->ifp->if_iqdrops = (count)
+#define IAVF_SET_OQDROPS(vsi, odrops) (vsi)->ifp->if_snd.ifq_drops = (odrops)
+#define IAVF_SET_NOPROTO(vsi, count) (vsi)->noproto = (count)
+#endif
+
+/* For stats sysctl naming */
+#define IAVF_QUEUE_NAME_LEN 32
+
+#define IAVF_FLAG_AQ_ENABLE_QUEUES (u32)(1 << 0)
+#define IAVF_FLAG_AQ_DISABLE_QUEUES (u32)(1 << 1)
+#define IAVF_FLAG_AQ_ADD_MAC_FILTER (u32)(1 << 2)
+#define IAVF_FLAG_AQ_ADD_VLAN_FILTER (u32)(1 << 3)
+#define IAVF_FLAG_AQ_DEL_MAC_FILTER (u32)(1 << 4)
+#define IAVF_FLAG_AQ_DEL_VLAN_FILTER (u32)(1 << 5)
+#define IAVF_FLAG_AQ_CONFIGURE_QUEUES (u32)(1 << 6)
+#define IAVF_FLAG_AQ_MAP_VECTORS (u32)(1 << 7)
+#define IAVF_FLAG_AQ_HANDLE_RESET (u32)(1 << 8)
+#define IAVF_FLAG_AQ_CONFIGURE_PROMISC (u32)(1 << 9)
+#define IAVF_FLAG_AQ_GET_STATS (u32)(1 << 10)
+#define IAVF_FLAG_AQ_CONFIG_RSS_KEY (u32)(1 << 11)
+#define IAVF_FLAG_AQ_SET_RSS_HENA (u32)(1 << 12)
+#define IAVF_FLAG_AQ_GET_RSS_HENA_CAPS (u32)(1 << 13)
+#define IAVF_FLAG_AQ_CONFIG_RSS_LUT (u32)(1 << 14)
+
+#define IAVF_CAP_ADV_LINK_SPEED(_sc) \
+ ((_sc)->vf_res->vf_cap_flags & VIRTCHNL_VF_CAP_ADV_LINK_SPEED)
+
+#define IAVF_NRXQS(_vsi) ((_vsi)->num_rx_queues)
+#define IAVF_NTXQS(_vsi) ((_vsi)->num_tx_queues)
+
+/**
+ * printf %b flag args
+ */
+#define IAVF_FLAGS \
+ "\20\1ENABLE_QUEUES\2DISABLE_QUEUES\3ADD_MAC_FILTER" \
+ "\4ADD_VLAN_FILTER\5DEL_MAC_FILTER\6DEL_VLAN_FILTER" \
+ "\7CONFIGURE_QUEUES\10MAP_VECTORS\11HANDLE_RESET" \
+ "\12CONFIGURE_PROMISC\13GET_STATS\14CONFIG_RSS_KEY" \
+ "\15SET_RSS_HENA\16GET_RSS_HENA_CAPS\17CONFIG_RSS_LUT"
+/**
+ * printf %b flag args for offloads from virtchnl.h
+ */
+#define IAVF_PRINTF_VF_OFFLOAD_FLAGS \
+ "\20\1L2" \
+ "\2IWARP" \
+ "\3FCOE" \
+ "\4RSS_AQ" \
+ "\5RSS_REG" \
+ "\6WB_ON_ITR" \
+ "\7REQ_QUEUES" \
+ "\10ADV_LINK_SPEED" \
+ "\21VLAN" \
+ "\22RX_POLLING" \
+ "\23RSS_PCTYPE_V2" \
+ "\24RSS_PF" \
+ "\25ENCAP" \
+ "\26ENCAP_CSUM" \
+ "\27RX_ENCAP_CSUM" \
+ "\30ADQ"
+
+/**
+ * @enum iavf_ext_link_speed
+ * @brief Extended link speed enumeration
+ *
+ * Enumeration of possible link speeds that the device could be operating in.
+ * Contains an extended list compared to the virtchnl_link_speed, including
+ * additional higher speeds such as 50GB, and 100GB.
+ *
+ * The enumeration is used to convert between the old virtchnl_link_speed, the
+ * newer advanced speed reporting value specified in Mb/s, and the ifmedia
+ * link speeds reported to the operating system.
+ */
+enum iavf_ext_link_speed {
+ IAVF_EXT_LINK_SPEED_UNKNOWN,
+ IAVF_EXT_LINK_SPEED_10MB,
+ IAVF_EXT_LINK_SPEED_100MB,
+ IAVF_EXT_LINK_SPEED_1000MB,
+ IAVF_EXT_LINK_SPEED_2500MB,
+ IAVF_EXT_LINK_SPEED_5GB,
+ IAVF_EXT_LINK_SPEED_10GB,
+ IAVF_EXT_LINK_SPEED_20GB,
+ IAVF_EXT_LINK_SPEED_25GB,
+ IAVF_EXT_LINK_SPEED_40GB,
+ IAVF_EXT_LINK_SPEED_50GB,
+ IAVF_EXT_LINK_SPEED_100GB,
+};
+
+/**
+ * @struct iavf_sysctl_info
+ * @brief sysctl statistic info
+ *
+ * Structure describing a single statistics sysctl, used for reporting
+ * specific hardware and software statistics via the sysctl interface.
+ */
+struct iavf_sysctl_info {
+ u64 *stat;
+ char *name;
+ char *description;
+};
+
+/* Forward struct declarations */
+struct iavf_sc;
+struct iavf_vsi;
+
+/**
+ * @enum iavf_state
+ * @brief Driver state flags
+ *
+ * Used to indicate the status of various driver events. Intended to be
+ * modified only using atomic operations, so that we can use it even in places
+ * which aren't locked.
+ */
+enum iavf_state {
+ IAVF_STATE_INITIALIZED,
+ IAVF_STATE_RESET_REQUIRED,
+ IAVF_STATE_RESET_PENDING,
+ IAVF_STATE_RUNNING,
+ /* This entry must be last */
+ IAVF_STATE_LAST,
+};
+
+/* Functions for setting and checking driver state. Note the functions take
+ * bit positions, not bitmasks. The atomic_testandset_32 and
+ * atomic_testandclear_32 operations require bit positions, while the
+ * atomic_set_32 and atomic_clear_32 require bitmasks. This can easily lead to
+ * programming error, so we provide wrapper functions to avoid this.
+ */
+
+/**
+ * iavf_set_state - Set the specified state
+ * @s: the state bitmap
+ * @bit: the state to set
+ *
+ * Atomically update the state bitmap with the specified bit set.
+ */
+static inline void
+iavf_set_state(volatile u32 *s, enum iavf_state bit)
+{
+ /* atomic_set_32 expects a bitmask */
+ atomic_set_32(s, BIT(bit));
+}
+
+/**
+ * iavf_clear_state - Clear the specified state
+ * @s: the state bitmap
+ * @bit: the state to clear
+ *
+ * Atomically update the state bitmap with the specified bit cleared.
+ */
+static inline void
+iavf_clear_state(volatile u32 *s, enum iavf_state bit)
+{
+ /* atomic_clear_32 expects a bitmask */
+ atomic_clear_32(s, BIT(bit));
+}
+
+/**
+ * iavf_testandset_state - Test and set the specified state
+ * @s: the state bitmap
+ * @bit: the bit to test
+ *
+ * Atomically update the state bitmap, setting the specified bit.
+ *
+ * @returns the previous value of the bit.
+ */
+static inline u32
+iavf_testandset_state(volatile u32 *s, enum iavf_state bit)
+{
+ /* atomic_testandset_32 expects a bit position */
+ return atomic_testandset_32(s, bit);
+}
+
+/**
+ * iavf_testandclear_state - Test and clear the specified state
+ * @s: the state bitmap
+ * @bit: the bit to test
+ *
+ * Atomically update the state bitmap, clearing the specified bit.
+ *
+ * @returns the previous value of the bit.
+ */
+static inline u32
+iavf_testandclear_state(volatile u32 *s, enum iavf_state bit)
+{
+ /* atomic_testandclear_32 expects a bit position */
+ return atomic_testandclear_32(s, bit);
+}
+
+/**
+ * iavf_test_state - Test the specified state
+ * @s: the state bitmap
+ * @bit: the bit to test
+ *
+ * @returns true if the state is set, false otherwise.
+ *
+ * @remark Use this only if the flow does not need to update the state. If you
+ * must update the state as well, prefer iavf_testandset_state or
+ * iavf_testandclear_state.
+ */
+static inline u32
+iavf_test_state(volatile u32 *s, enum iavf_state bit)
+{
+ return (*s & BIT(bit)) ? true : false;
+}
+
+/**
+ * cmp_etheraddr - Compare two ethernet addresses
+ * @ea1: first ethernet address
+ * @ea2: second ethernet address
+ *
+ * Compares two ethernet addresses.
+ *
+ * @returns true if the addresses are equal, false otherwise.
+ */
+static inline bool
+cmp_etheraddr(const u8 *ea1, const u8 *ea2)
+{
+ bool cmp = FALSE;
+
+ if ((ea1[0] == ea2[0]) && (ea1[1] == ea2[1]) &&
+ (ea1[2] == ea2[2]) && (ea1[3] == ea2[3]) &&
+ (ea1[4] == ea2[4]) && (ea1[5] == ea2[5]))
+ cmp = TRUE;
+
+ return (cmp);
+}
+
+int iavf_send_vc_msg(struct iavf_sc *sc, u32 op);
+int iavf_send_vc_msg_sleep(struct iavf_sc *sc, u32 op);
+void iavf_update_link_status(struct iavf_sc *);
+bool iavf_driver_is_detaching(struct iavf_sc *sc);
+void iavf_msec_pause(int msecs);
+void iavf_get_default_rss_key(u32 *key);
+int iavf_allocate_pci_resources_common(struct iavf_sc *sc);
+int iavf_reset_complete(struct iavf_hw *hw);
+int iavf_setup_vc(struct iavf_sc *sc);
+int iavf_reset(struct iavf_sc *sc);
+void iavf_enable_adminq_irq(struct iavf_hw *hw);
+void iavf_disable_adminq_irq(struct iavf_hw *hw);
+int iavf_vf_config(struct iavf_sc *sc);
+void iavf_print_device_info(struct iavf_sc *sc);
+int iavf_get_vsi_res_from_vf_res(struct iavf_sc *sc);
+void iavf_set_mac_addresses(struct iavf_sc *sc);
+void iavf_init_filters(struct iavf_sc *sc);
+void iavf_free_filters(struct iavf_sc *sc);
+void iavf_add_device_sysctls_common(struct iavf_sc *sc);
+void iavf_configure_tx_itr(struct iavf_sc *sc);
+void iavf_configure_rx_itr(struct iavf_sc *sc);
+struct sysctl_oid_list *
+ iavf_create_debug_sysctl_tree(struct iavf_sc *sc);
+void iavf_add_debug_sysctls_common(struct iavf_sc *sc,
+ struct sysctl_oid_list *debug_list);
+void iavf_add_vsi_sysctls(device_t dev, struct iavf_vsi *vsi,
+ struct sysctl_ctx_list *ctx, const char *sysctl_name);
+void iavf_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx,
+ struct sysctl_oid_list *child, struct iavf_eth_stats *eth_stats);
+void iavf_media_status_common(struct iavf_sc *sc,
+ struct ifmediareq *ifmr);
+int iavf_media_change_common(struct ifnet *ifp);
+void iavf_set_initial_baudrate(struct ifnet *ifp);
+u64 iavf_max_vc_speed_to_value(u8 link_speeds);
+void iavf_config_rss_reg(struct iavf_sc *sc);
+void iavf_config_rss_pf(struct iavf_sc *sc);
+void iavf_config_rss(struct iavf_sc *sc);
+int iavf_config_promisc(struct iavf_sc *sc, int flags);
+void iavf_init_multi(struct iavf_sc *sc);
+void iavf_multi_set(struct iavf_sc *sc);
+int iavf_add_mac_filter(struct iavf_sc *sc, u8 *macaddr, u16 flags);
+struct iavf_mac_filter *
+ iavf_find_mac_filter(struct iavf_sc *sc, u8 *macaddr);
+struct iavf_mac_filter *
+ iavf_get_mac_filter(struct iavf_sc *sc);
+u64 iavf_baudrate_from_link_speed(struct iavf_sc *sc);
+void iavf_add_vlan_filter(struct iavf_sc *sc, u16 vtag);
+int iavf_mark_del_vlan_filter(struct iavf_sc *sc, u16 vtag);
+void iavf_update_msix_devinfo(device_t dev);
+void iavf_disable_queues_with_retries(struct iavf_sc *);
+
+int iavf_sysctl_current_speed(SYSCTL_HANDLER_ARGS);
+int iavf_sysctl_tx_itr(SYSCTL_HANDLER_ARGS);
+int iavf_sysctl_rx_itr(SYSCTL_HANDLER_ARGS);
+int iavf_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
+
+#endif /* _IAVF_LIB_H_ */
diff --git a/sys/dev/iavf/iavf_lib.c b/sys/dev/iavf/iavf_lib.c
new file mode 100644
--- /dev/null
+++ b/sys/dev/iavf/iavf_lib.c
@@ -0,0 +1,1531 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2021, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+/**
+ * @file iavf_lib.c
+ * @brief library code common to both legacy and iflib
+ *
+ * Contains functions common to the iflib and legacy drivers. Includes
+ * hardware initialization and control functions, as well as sysctl handlers
+ * for the sysctls which are shared between the legacy and iflib drivers.
+ */
+#include "iavf_iflib.h"
+#include "iavf_vc_common.h"
+
+static void iavf_init_hw(struct iavf_hw *hw, device_t dev);
+static u_int iavf_mc_filter_apply(void *arg, struct sockaddr_dl *sdl, u_int cnt);
+
+/**
+ * iavf_msec_pause - Pause for at least the specified number of milliseconds
+ * @msecs: number of milliseconds to pause for
+ *
+ * Pause execution of the current thread for a specified number of
+ * milliseconds. Used to enforce minimum delay times when waiting for various
+ * hardware events.
+ */
+void
+iavf_msec_pause(int msecs)
+{
+ pause("iavf_msec_pause", MSEC_2_TICKS(msecs));
+}
+
+/**
+ * iavf_get_default_rss_key - Get the default RSS key for this driver
+ * @key: output parameter to store the key in
+ *
+ * Copies the driver's default RSS key into the provided key variable.
+ *
+ * @pre assumes that key is not NULL and has at least IAVF_RSS_KEY_SIZE
+ * storage space.
+ */
+void
+iavf_get_default_rss_key(u32 *key)
+{
+ MPASS(key != NULL);
+
+ u32 rss_seed[IAVF_RSS_KEY_SIZE_REG] = {0x41b01687,
+ 0x183cfd8c, 0xce880440, 0x580cbc3c,
+ 0x35897377, 0x328b25e1, 0x4fa98922,
+ 0xb7d90c14, 0xd5bad70d, 0xcd15a2c1,
+ 0x0, 0x0, 0x0};
+
+ bcopy(rss_seed, key, IAVF_RSS_KEY_SIZE);
+}
+
+/**
+ * iavf_allocate_pci_resources_common - Allocate PCI resources
+ * @sc: the private device softc pointer
+ *
+ * @pre sc->dev is set
+ *
+ * Allocates the common PCI resources used by the driver.
+ *
+ * @returns zero on success, or an error code on failure.
+ */
+int
+iavf_allocate_pci_resources_common(struct iavf_sc *sc)
+{
+ struct iavf_hw *hw = &sc->hw;
+ device_t dev = sc->dev;
+ int rid;
+
+ /* Map PCI BAR0 */
+ rid = PCIR_BAR(0);
+ sc->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
+ &rid, RF_ACTIVE);
+
+ if (!(sc->pci_mem)) {
+ device_printf(dev, "Unable to allocate bus resource: PCI memory\n");
+ return (ENXIO);
+ }
+
+ iavf_init_hw(hw, dev);
+
+ /* Save off register access information */
+ sc->osdep.mem_bus_space_tag =
+ rman_get_bustag(sc->pci_mem);
+ sc->osdep.mem_bus_space_handle =
+ rman_get_bushandle(sc->pci_mem);
+ sc->osdep.mem_bus_space_size = rman_get_size(sc->pci_mem);
+ sc->osdep.flush_reg = IAVF_VFGEN_RSTAT;
+ sc->osdep.dev = dev;
+
+ sc->hw.hw_addr = (u8 *)&sc->osdep.mem_bus_space_handle;
+ sc->hw.back = &sc->osdep;
+
+ return (0);
+}
+
+/**
+ * iavf_init_hw - Initialize the device HW
+ * @hw: device hardware structure
+ * @dev: the stack device_t pointer
+ *
+ * Attach helper function. Gathers information about the (virtual) hardware
+ * for use elsewhere in the driver.
+ */
+static void
+iavf_init_hw(struct iavf_hw *hw, device_t dev)
+{
+ /* Save off the information about this board */
+ hw->vendor_id = pci_get_vendor(dev);
+ hw->device_id = pci_get_device(dev);
+ hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
+ hw->subsystem_vendor_id =
+ pci_read_config(dev, PCIR_SUBVEND_0, 2);
+ hw->subsystem_device_id =
+ pci_read_config(dev, PCIR_SUBDEV_0, 2);
+
+ hw->bus.device = pci_get_slot(dev);
+ hw->bus.func = pci_get_function(dev);
+}
+
+/**
+ * iavf_sysctl_current_speed - Sysctl to display the current device speed
+ * @oidp: syctl oid pointer
+ * @arg1: pointer to the device softc typecasted to void *
+ * @arg2: unused sysctl argument
+ * @req: sysctl request structure
+ *
+ * Reads the current speed reported from the physical device into a string for
+ * display by the current_speed sysctl.
+ *
+ * @returns zero or an error code on failure.
+ */
+int
+iavf_sysctl_current_speed(SYSCTL_HANDLER_ARGS)
+{
+ struct iavf_sc *sc = (struct iavf_sc *)arg1;
+ int error = 0;
+
+ UNREFERENCED_PARAMETER(arg2);
+
+ if (iavf_driver_is_detaching(sc))
+ return (ESHUTDOWN);
+
+ if (IAVF_CAP_ADV_LINK_SPEED(sc))
+ error = sysctl_handle_string(oidp,
+ __DECONST(char *, iavf_ext_speed_to_str(iavf_adv_speed_to_ext_speed(sc->link_speed_adv))),
+ 8, req);
+ else
+ error = sysctl_handle_string(oidp,
+ __DECONST(char *, iavf_vc_speed_to_string(sc->link_speed)),
+ 8, req);
+
+ return (error);
+}
+
+/**
+ * iavf_reset_complete - Wait for a device reset to complete
+ * @hw: pointer to the hardware structure
+ *
+ * Reads the reset registers and waits until they indicate that a device reset
+ * is complete.
+ *
+ * @pre this function may call pause() and must not be called from a context
+ * that cannot sleep.
+ *
+ * @returns zero on success, or EBUSY if it times out waiting for reset.
+ */
+int
+iavf_reset_complete(struct iavf_hw *hw)
+{
+ u32 reg;
+
+ /* Wait up to ~10 seconds */
+ for (int i = 0; i < 100; i++) {
+ reg = rd32(hw, IAVF_VFGEN_RSTAT) &
+ IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
+
+ if ((reg == VIRTCHNL_VFR_VFACTIVE) ||
+ (reg == VIRTCHNL_VFR_COMPLETED))
+ return (0);
+ iavf_msec_pause(100);
+ }
+
+ return (EBUSY);
+}
+
+/**
+ * iavf_setup_vc - Setup virtchnl communication
+ * @sc: device private softc
+ *
+ * iavf_attach() helper function. Initializes the admin queue and attempts to
+ * establish contact with the PF by retrying the initial "API version" message
+ * several times or until the PF responds.
+ *
+ * @returns zero on success, or an error code on failure.
+ */
+int
+iavf_setup_vc(struct iavf_sc *sc)
+{
+ struct iavf_hw *hw = &sc->hw;
+ device_t dev = sc->dev;
+ int error = 0, ret_error = 0, asq_retries = 0;
+ bool send_api_ver_retried = 0;
+
+ /* Need to set these AQ parameters before initializing AQ */
+ hw->aq.num_arq_entries = IAVF_AQ_LEN;
+ hw->aq.num_asq_entries = IAVF_AQ_LEN;
+ hw->aq.arq_buf_size = IAVF_AQ_BUF_SZ;
+ hw->aq.asq_buf_size = IAVF_AQ_BUF_SZ;
+
+ for (int i = 0; i < IAVF_AQ_MAX_ERR; i++) {
+ /* Initialize admin queue */
+ error = iavf_init_adminq(hw);
+ if (error) {
+ device_printf(dev, "%s: init_adminq failed: %d\n",
+ __func__, error);
+ ret_error = 1;
+ continue;
+ }
+
+ iavf_dbg_init(sc, "Initialized Admin Queue; starting"
+ " send_api_ver attempt %d", i+1);
+
+retry_send:
+ /* Send VF's API version */
+ error = iavf_send_api_ver(sc);
+ if (error) {
+ iavf_shutdown_adminq(hw);
+ ret_error = 2;
+ device_printf(dev, "%s: unable to send api"
+ " version to PF on attempt %d, error %d\n",
+ __func__, i+1, error);
+ }
+
+ asq_retries = 0;
+ while (!iavf_asq_done(hw)) {
+ if (++asq_retries > IAVF_AQ_MAX_ERR) {
+ iavf_shutdown_adminq(hw);
+ device_printf(dev, "Admin Queue timeout "
+ "(waiting for send_api_ver), %d more tries...\n",
+ IAVF_AQ_MAX_ERR - (i + 1));
+ ret_error = 3;
+ break;
+ }
+ iavf_msec_pause(10);
+ }
+ if (asq_retries > IAVF_AQ_MAX_ERR)
+ continue;
+
+ iavf_dbg_init(sc, "Sent API version message to PF");
+
+ /* Verify that the VF accepts the PF's API version */
+ error = iavf_verify_api_ver(sc);
+ if (error == ETIMEDOUT) {
+ if (!send_api_ver_retried) {
+ /* Resend message, one more time */
+ send_api_ver_retried = true;
+ device_printf(dev,
+ "%s: Timeout while verifying API version on first"
+ " try!\n", __func__);
+ goto retry_send;
+ } else {
+ device_printf(dev,
+ "%s: Timeout while verifying API version on second"
+ " try!\n", __func__);
+ ret_error = 4;
+ break;
+ }
+ }
+ if (error) {
+ device_printf(dev,
+ "%s: Unable to verify API version,"
+ " error %d\n", __func__, error);
+ ret_error = 5;
+ }
+ break;
+ }
+
+ if (ret_error >= 4)
+ iavf_shutdown_adminq(hw);
+ return (ret_error);
+}
+
+/**
+ * iavf_reset - Requests a VF reset from the PF.
+ * @sc: device private softc
+ *
+ * @pre Requires the VF's Admin Queue to be initialized.
+ * @returns zero on success, or an error code on failure.
+ */
+int
+iavf_reset(struct iavf_sc *sc)
+{
+ struct iavf_hw *hw = &sc->hw;
+ device_t dev = sc->dev;
+ int error = 0;
+
+ /* Ask the PF to reset us if we are initiating */
+ if (!iavf_test_state(&sc->state, IAVF_STATE_RESET_PENDING))
+ iavf_request_reset(sc);
+
+ iavf_msec_pause(100);
+ error = iavf_reset_complete(hw);
+ if (error) {
+ device_printf(dev, "%s: VF reset failed\n",
+ __func__);
+ return (error);
+ }
+ pci_enable_busmaster(dev);
+
+ error = iavf_shutdown_adminq(hw);
+ if (error) {
+ device_printf(dev, "%s: shutdown_adminq failed: %d\n",
+ __func__, error);
+ return (error);
+ }
+
+ error = iavf_init_adminq(hw);
+ if (error) {
+ device_printf(dev, "%s: init_adminq failed: %d\n",
+ __func__, error);
+ return (error);
+ }
+
+ /* IFLIB: This is called only in the iflib driver */
+ iavf_enable_adminq_irq(hw);
+ return (0);
+}
+
+/**
+ * iavf_enable_admin_irq - Enable the administrative interrupt
+ * @hw: pointer to the hardware structure
+ *
+ * Writes to registers to enable the administrative interrupt cause, in order
+ * to handle non-queue related interrupt events.
+ */
+void
+iavf_enable_adminq_irq(struct iavf_hw *hw)
+{
+ wr32(hw, IAVF_VFINT_DYN_CTL01,
+ IAVF_VFINT_DYN_CTL01_INTENA_MASK |
+ IAVF_VFINT_DYN_CTL01_CLEARPBA_MASK |
+ IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK);
+ wr32(hw, IAVF_VFINT_ICR0_ENA1, IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK);
+ /* flush */
+ rd32(hw, IAVF_VFGEN_RSTAT);
+}
+
+/**
+ * iavf_disable_admin_irq - Disable the administrative interrupt cause
+ * @hw: pointer to the hardware structure
+ *
+ * Writes to registers to disable the administrative interrupt cause.
+ */
+void
+iavf_disable_adminq_irq(struct iavf_hw *hw)
+{
+ wr32(hw, IAVF_VFINT_DYN_CTL01, 0);
+ wr32(hw, IAVF_VFINT_ICR0_ENA1, 0);
+ iavf_flush(hw);
+}
+
+/**
+ * iavf_vf_config - Configure this VF over the virtchnl
+ * @sc: device private softc
+ *
+ * iavf_attach() helper function. Asks the PF for this VF's configuration, and
+ * saves the information if it receives it.
+ *
+ * @returns zero on success, or an error code on failure.
+ */
+int
+iavf_vf_config(struct iavf_sc *sc)
+{
+ struct iavf_hw *hw = &sc->hw;
+ device_t dev = sc->dev;
+ int bufsz, error = 0, ret_error = 0;
+ int asq_retries, retried = 0;
+
+retry_config:
+ error = iavf_send_vf_config_msg(sc);
+ if (error) {
+ device_printf(dev,
+ "%s: Unable to send VF config request, attempt %d,"
+ " error %d\n", __func__, retried + 1, error);
+ ret_error = 2;
+ }
+
+ asq_retries = 0;
+ while (!iavf_asq_done(hw)) {
+ if (++asq_retries > IAVF_AQ_MAX_ERR) {
+ device_printf(dev, "%s: Admin Queue timeout "
+ "(waiting for send_vf_config_msg), attempt %d\n",
+ __func__, retried + 1);
+ ret_error = 3;
+ goto fail;
+ }
+ iavf_msec_pause(10);
+ }
+
+ iavf_dbg_init(sc, "Sent VF config message to PF, attempt %d\n",
+ retried + 1);
+
+ if (!sc->vf_res) {
+ bufsz = sizeof(struct virtchnl_vf_resource) +
+ (IAVF_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource));
+ sc->vf_res = (struct virtchnl_vf_resource *)malloc(bufsz, M_IAVF, M_NOWAIT);
+ if (!sc->vf_res) {
+ device_printf(dev,
+ "%s: Unable to allocate memory for VF configuration"
+ " message from PF on attempt %d\n", __func__, retried + 1);
+ ret_error = 1;
+ goto fail;
+ }
+ }
+
+ /* Check for VF config response */
+ error = iavf_get_vf_config(sc);
+ if (error == ETIMEDOUT) {
+ /* The 1st time we timeout, send the configuration message again */
+ if (!retried) {
+ retried++;
+ goto retry_config;
+ }
+ device_printf(dev,
+ "%s: iavf_get_vf_config() timed out waiting for a response\n",
+ __func__);
+ }
+ if (error) {
+ device_printf(dev,
+ "%s: Unable to get VF configuration from PF after %d tries!\n",
+ __func__, retried + 1);
+ ret_error = 4;
+ }
+ goto done;
+
+fail:
+ free(sc->vf_res, M_IAVF);
+done:
+ return (ret_error);
+}
+
+/**
+ * iavf_print_device_info - Print some device parameters at attach
+ * @sc: device private softc
+ *
+ * Log a message about this virtual device's capabilities at attach time.
+ */
+void
+iavf_print_device_info(struct iavf_sc *sc)
+{
+ device_t dev = sc->dev;
+
+ device_printf(dev,
+ "VSIs %d, QPs %d, MSI-X %d, RSS sizes: key %d lut %d\n",
+ sc->vf_res->num_vsis,
+ sc->vf_res->num_queue_pairs,
+ sc->vf_res->max_vectors,
+ sc->vf_res->rss_key_size,
+ sc->vf_res->rss_lut_size);
+ iavf_dbg_info(sc, "Capabilities=%b\n",
+ sc->vf_res->vf_cap_flags, IAVF_PRINTF_VF_OFFLOAD_FLAGS);
+}
+
+/**
+ * iavf_get_vsi_res_from_vf_res - Get VSI parameters and info for this VF
+ * @sc: device private softc
+ *
+ * Get the VSI parameters and information from the general VF resource info
+ * received by the physical device.
+ *
+ * @returns zero on success, or an error code on failure.
+ */
+int
+iavf_get_vsi_res_from_vf_res(struct iavf_sc *sc)
+{
+ struct iavf_vsi *vsi = &sc->vsi;
+ device_t dev = sc->dev;
+
+ sc->vsi_res = NULL;
+
+ for (int i = 0; i < sc->vf_res->num_vsis; i++) {
+ /* XXX: We only use the first VSI we find */
+ if (sc->vf_res->vsi_res[i].vsi_type == IAVF_VSI_SRIOV)
+ sc->vsi_res = &sc->vf_res->vsi_res[i];
+ }
+ if (!sc->vsi_res) {
+ device_printf(dev, "%s: no LAN VSI found\n", __func__);
+ return (EIO);
+ }
+
+ vsi->id = sc->vsi_res->vsi_id;
+ return (0);
+}
+
+/**
+ * iavf_set_mac_addresses - Set the MAC address for this interface
+ * @sc: device private softc
+ *
+ * Set the permanent MAC address field in the HW structure. If a MAC address
+ * has not yet been set for this device by the physical function, generate one
+ * randomly.
+ */
+void
+iavf_set_mac_addresses(struct iavf_sc *sc)
+{
+ struct iavf_hw *hw = &sc->hw;
+ device_t dev = sc->dev;
+ u8 addr[ETHER_ADDR_LEN];
+
+ /* If no mac address was assigned just make a random one */
+ if (ETHER_IS_ZERO(hw->mac.addr)) {
+ arc4rand(&addr, sizeof(addr), 0);
+ addr[0] &= 0xFE;
+ addr[0] |= 0x02;
+ memcpy(hw->mac.addr, addr, sizeof(addr));
+ device_printf(dev, "Generated random MAC address\n");
+ }
+ memcpy(hw->mac.perm_addr, hw->mac.addr, ETHER_ADDR_LEN);
+}
+
+/**
+ * iavf_init_filters - Initialize filter structures
+ * @sc: device private softc
+ *
+ * Initialize the MAC and VLAN filter list heads.
+ *
+ * @remark this is intended to be called only once during the device attach
+ * process.
+ *
+ * @pre Because it uses M_WAITOK, this function should only be called in
+ * a context that is safe to sleep.
+ */
+void
+iavf_init_filters(struct iavf_sc *sc)
+{
+ sc->mac_filters = (struct mac_list *)malloc(sizeof(struct iavf_mac_filter),
+ M_IAVF, M_WAITOK | M_ZERO);
+ SLIST_INIT(sc->mac_filters);
+ sc->vlan_filters = (struct vlan_list *)malloc(sizeof(struct iavf_vlan_filter),
+ M_IAVF, M_WAITOK | M_ZERO);
+ SLIST_INIT(sc->vlan_filters);
+}
+
+/**
+ * iavf_free_filters - Release filter lists
+ * @sc: device private softc
+ *
+ * Free the MAC and VLAN filter lists.
+ *
+ * @remark this is intended to be called only once during the device detach
+ * process.
+ */
+void
+iavf_free_filters(struct iavf_sc *sc)
+{
+ struct iavf_mac_filter *f;
+ struct iavf_vlan_filter *v;
+
+ while (!SLIST_EMPTY(sc->mac_filters)) {
+ f = SLIST_FIRST(sc->mac_filters);
+ SLIST_REMOVE_HEAD(sc->mac_filters, next);
+ free(f, M_IAVF);
+ }
+ free(sc->mac_filters, M_IAVF);
+ while (!SLIST_EMPTY(sc->vlan_filters)) {
+ v = SLIST_FIRST(sc->vlan_filters);
+ SLIST_REMOVE_HEAD(sc->vlan_filters, next);
+ free(v, M_IAVF);
+ }
+ free(sc->vlan_filters, M_IAVF);
+}
+
+/**
+ * iavf_add_device_sysctls_common - Initialize common device sysctls
+ * @sc: device private softc
+ *
+ * Setup sysctls common to both the iflib and legacy drivers.
+ */
+void
+iavf_add_device_sysctls_common(struct iavf_sc *sc)
+{
+ device_t dev = sc->dev;
+ struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
+ struct sysctl_oid_list *ctx_list =
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
+
+ SYSCTL_ADD_PROC(ctx, ctx_list,
+ OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD,
+ sc, 0, iavf_sysctl_current_speed, "A", "Current Port Speed");
+
+ SYSCTL_ADD_PROC(ctx, ctx_list,
+ OID_AUTO, "tx_itr", CTLTYPE_INT | CTLFLAG_RW,
+ sc, 0, iavf_sysctl_tx_itr, "I",
+ "Immediately set TX ITR value for all queues");
+
+ SYSCTL_ADD_PROC(ctx, ctx_list,
+ OID_AUTO, "rx_itr", CTLTYPE_INT | CTLFLAG_RW,
+ sc, 0, iavf_sysctl_rx_itr, "I",
+ "Immediately set RX ITR value for all queues");
+
+ SYSCTL_ADD_UQUAD(ctx, ctx_list,
+ OID_AUTO, "admin_irq", CTLFLAG_RD,
+ &sc->admin_irq, "Admin Queue IRQ Handled");
+}
+
+/**
+ * iavf_add_debug_sysctls_common - Initialize common debug sysctls
+ * @sc: device private softc
+ * @debug_list: pionter to debug sysctl node
+ *
+ * Setup sysctls used for debugging the device driver into the debug sysctl
+ * node.
+ */
+void
+iavf_add_debug_sysctls_common(struct iavf_sc *sc, struct sysctl_oid_list *debug_list)
+{
+ device_t dev = sc->dev;
+ struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
+
+ SYSCTL_ADD_UINT(ctx, debug_list,
+ OID_AUTO, "shared_debug_mask", CTLFLAG_RW,
+ &sc->hw.debug_mask, 0, "Shared code debug message level");
+
+ SYSCTL_ADD_UINT(ctx, debug_list,
+ OID_AUTO, "core_debug_mask", CTLFLAG_RW,
+ (unsigned int *)&sc->dbg_mask, 0, "Non-shared code debug message level");
+
+ SYSCTL_ADD_PROC(ctx, debug_list,
+ OID_AUTO, "filter_list", CTLTYPE_STRING | CTLFLAG_RD,
+ sc, 0, iavf_sysctl_sw_filter_list, "A", "SW Filter List");
+}
+
+/**
+ * iavf_sysctl_tx_itr - Sysctl to set the Tx ITR value
+ * @oidp: sysctl oid pointer
+ * @arg1: pointer to the device softc
+ * @arg2: unused sysctl argument
+ * @req: sysctl req pointer
+ *
+ * On read, returns the Tx ITR value for all of the VF queues. On write,
+ * update the Tx ITR registers with the new Tx ITR value.
+ *
+ * @returns zero on success, or an error code on failure.
+ */
+int
+iavf_sysctl_tx_itr(SYSCTL_HANDLER_ARGS)
+{
+ struct iavf_sc *sc = (struct iavf_sc *)arg1;
+ device_t dev = sc->dev;
+ int requested_tx_itr;
+ int error = 0;
+
+ UNREFERENCED_PARAMETER(arg2);
+
+ if (iavf_driver_is_detaching(sc))
+ return (ESHUTDOWN);
+
+ requested_tx_itr = sc->tx_itr;
+ error = sysctl_handle_int(oidp, &requested_tx_itr, 0, req);
+ if ((error) || (req->newptr == NULL))
+ return (error);
+ if (requested_tx_itr < 0 || requested_tx_itr > IAVF_MAX_ITR) {
+ device_printf(dev,
+ "Invalid TX itr value; value must be between 0 and %d\n",
+ IAVF_MAX_ITR);
+ return (EINVAL);
+ }
+
+ sc->tx_itr = requested_tx_itr;
+ iavf_configure_tx_itr(sc);
+
+ return (error);
+}
+
+/**
+ * iavf_sysctl_rx_itr - Sysctl to set the Rx ITR value
+ * @oidp: sysctl oid pointer
+ * @arg1: pointer to the device softc
+ * @arg2: unused sysctl argument
+ * @req: sysctl req pointer
+ *
+ * On read, returns the Rx ITR value for all of the VF queues. On write,
+ * update the ITR registers with the new Rx ITR value.
+ *
+ * @returns zero on success, or an error code on failure.
+ */
+int
+iavf_sysctl_rx_itr(SYSCTL_HANDLER_ARGS)
+{
+ struct iavf_sc *sc = (struct iavf_sc *)arg1;
+ device_t dev = sc->dev;
+ int requested_rx_itr;
+ int error = 0;
+
+ UNREFERENCED_PARAMETER(arg2);
+
+ if (iavf_driver_is_detaching(sc))
+ return (ESHUTDOWN);
+
+ requested_rx_itr = sc->rx_itr;
+ error = sysctl_handle_int(oidp, &requested_rx_itr, 0, req);
+ if ((error) || (req->newptr == NULL))
+ return (error);
+ if (requested_rx_itr < 0 || requested_rx_itr > IAVF_MAX_ITR) {
+ device_printf(dev,
+ "Invalid RX itr value; value must be between 0 and %d\n",
+ IAVF_MAX_ITR);
+ return (EINVAL);
+ }
+
+ sc->rx_itr = requested_rx_itr;
+ iavf_configure_rx_itr(sc);
+
+ return (error);
+}
+
+/**
+ * iavf_configure_tx_itr - Configure the Tx ITR
+ * @sc: device private softc
+ *
+ * Updates the ITR registers with a new Tx ITR setting.
+ */
+void
+iavf_configure_tx_itr(struct iavf_sc *sc)
+{
+ struct iavf_hw *hw = &sc->hw;
+ struct iavf_vsi *vsi = &sc->vsi;
+ struct iavf_tx_queue *que = vsi->tx_queues;
+
+ vsi->tx_itr_setting = sc->tx_itr;
+
+ for (int i = 0; i < IAVF_NTXQS(vsi); i++, que++) {
+ struct tx_ring *txr = &que->txr;
+
+ wr32(hw, IAVF_VFINT_ITRN1(IAVF_TX_ITR, i),
+ vsi->tx_itr_setting);
+ txr->itr = vsi->tx_itr_setting;
+ txr->latency = IAVF_AVE_LATENCY;
+ }
+}
+
+/**
+ * iavf_configure_rx_itr - Configure the Rx ITR
+ * @sc: device private softc
+ *
+ * Updates the ITR registers with a new Rx ITR setting.
+ */
+void
+iavf_configure_rx_itr(struct iavf_sc *sc)
+{
+ struct iavf_hw *hw = &sc->hw;
+ struct iavf_vsi *vsi = &sc->vsi;
+ struct iavf_rx_queue *que = vsi->rx_queues;
+
+ vsi->rx_itr_setting = sc->rx_itr;
+
+ for (int i = 0; i < IAVF_NRXQS(vsi); i++, que++) {
+ struct rx_ring *rxr = &que->rxr;
+
+ wr32(hw, IAVF_VFINT_ITRN1(IAVF_RX_ITR, i),
+ vsi->rx_itr_setting);
+ rxr->itr = vsi->rx_itr_setting;
+ rxr->latency = IAVF_AVE_LATENCY;
+ }
+}
+
+/**
+ * iavf_create_debug_sysctl_tree - Create a debug sysctl node
+ * @sc: device private softc
+ *
+ * Create a sysctl node meant to hold sysctls used to print debug information.
+ * Mark it as CTLFLAG_SKIP so that these sysctls do not show up in the
+ * "sysctl -a" output.
+ *
+ * @returns a pointer to the created sysctl node.
+ */
+struct sysctl_oid_list *
+iavf_create_debug_sysctl_tree(struct iavf_sc *sc)
+{
+ device_t dev = sc->dev;
+ struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
+ struct sysctl_oid_list *ctx_list =
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
+ struct sysctl_oid *debug_node;
+
+ debug_node = SYSCTL_ADD_NODE(ctx, ctx_list,
+ OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP, NULL, "Debug Sysctls");
+
+ return (SYSCTL_CHILDREN(debug_node));
+}
+
+/**
+ * iavf_add_vsi_sysctls - Add sysctls for a given VSI
+ * @dev: device pointer
+ * @vsi: pointer to the VSI
+ * @ctx: sysctl context to add to
+ * @sysctl_name: name of the sysctl node (containing the VSI number)
+ *
+ * Adds a new sysctl node for holding specific sysctls for the given VSI.
+ */
+void
+iavf_add_vsi_sysctls(device_t dev, struct iavf_vsi *vsi,
+ struct sysctl_ctx_list *ctx, const char *sysctl_name)
+{
+ struct sysctl_oid *tree;
+ struct sysctl_oid_list *child;
+ struct sysctl_oid_list *vsi_list;
+
+ tree = device_get_sysctl_tree(dev);
+ child = SYSCTL_CHILDREN(tree);
+ vsi->vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, sysctl_name,
+ CTLFLAG_RD, NULL, "VSI Number");
+ vsi_list = SYSCTL_CHILDREN(vsi->vsi_node);
+
+ iavf_add_sysctls_eth_stats(ctx, vsi_list, &vsi->eth_stats);
+}
+
+/**
+ * iavf_sysctl_sw_filter_list - Dump software filters
+ * @oidp: sysctl oid pointer
+ * @arg1: pointer to the device softc
+ * @arg2: unused sysctl argument
+ * @req: sysctl req pointer
+ *
+ * On read, generates a string which lists the MAC and VLAN filters added to
+ * this virtual device. Useful for debugging to see whether or not the
+ * expected filters have been configured by software.
+ *
+ * @returns zero on success, or an error code on failure.
+ */
+int
+iavf_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
+{
+ struct iavf_sc *sc = (struct iavf_sc *)arg1;
+ struct iavf_mac_filter *f;
+ struct iavf_vlan_filter *v;
+ device_t dev = sc->dev;
+ int ftl_len, ftl_counter = 0, error = 0;
+ struct sbuf *buf;
+
+ UNREFERENCED_2PARAMETER(arg2, oidp);
+
+ if (iavf_driver_is_detaching(sc))
+ return (ESHUTDOWN);
+
+ buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
+ if (!buf) {
+ device_printf(dev, "Could not allocate sbuf for output.\n");
+ return (ENOMEM);
+ }
+
+ sbuf_printf(buf, "\n");
+
+ /* Print MAC filters */
+ sbuf_printf(buf, "MAC Filters:\n");
+ ftl_len = 0;
+ SLIST_FOREACH(f, sc->mac_filters, next)
+ ftl_len++;
+ if (ftl_len < 1)
+ sbuf_printf(buf, "(none)\n");
+ else {
+ SLIST_FOREACH(f, sc->mac_filters, next) {
+ sbuf_printf(buf,
+ MAC_FORMAT ", flags %#06x\n",
+ MAC_FORMAT_ARGS(f->macaddr), f->flags);
+ }
+ }
+
+ /* Print VLAN filters */
+ sbuf_printf(buf, "VLAN Filters:\n");
+ ftl_len = 0;
+ SLIST_FOREACH(v, sc->vlan_filters, next)
+ ftl_len++;
+ if (ftl_len < 1)
+ sbuf_printf(buf, "(none)");
+ else {
+ SLIST_FOREACH(v, sc->vlan_filters, next) {
+ sbuf_printf(buf,
+ "%d, flags %#06x",
+ v->vlan, v->flags);
+ /* don't print '\n' for last entry */
+ if (++ftl_counter != ftl_len)
+ sbuf_printf(buf, "\n");
+ }
+ }
+
+ error = sbuf_finish(buf);
+ if (error)
+ device_printf(dev, "Error finishing sbuf: %d\n", error);
+
+ sbuf_delete(buf);
+ return (error);
+}
+
+/**
+ * iavf_media_status_common - Get media status for this device
+ * @sc: device softc pointer
+ * @ifmr: ifmedia request structure
+ *
+ * Report the media status for this device into the given ifmr structure.
+ */
+void
+iavf_media_status_common(struct iavf_sc *sc, struct ifmediareq *ifmr)
+{
+ enum iavf_ext_link_speed ext_speed;
+
+ iavf_update_link_status(sc);
+
+ ifmr->ifm_status = IFM_AVALID;
+ ifmr->ifm_active = IFM_ETHER;
+
+ if (!sc->link_up)
+ return;
+
+ ifmr->ifm_status |= IFM_ACTIVE;
+ /* Hardware is always full-duplex */
+ ifmr->ifm_active |= IFM_FDX;
+
+ /* Based on the link speed reported by the PF over the AdminQ, choose a
+ * PHY type to report. This isn't 100% correct since we don't really
+ * know the underlying PHY type of the PF, but at least we can report
+ * a valid link speed...
+ */
+ if (IAVF_CAP_ADV_LINK_SPEED(sc))
+ ext_speed = iavf_adv_speed_to_ext_speed(sc->link_speed_adv);
+ else
+ ext_speed = iavf_vc_speed_to_ext_speed(sc->link_speed);
+
+ ifmr->ifm_active |= iavf_ext_speed_to_ifmedia(ext_speed);
+}
+
+/**
+ * iavf_media_change_common - Change the media type for this device
+ * @ifp: ifnet structure
+ *
+ * @returns ENODEV because changing the media and speed is not supported.
+ */
+int
+iavf_media_change_common(struct ifnet *ifp)
+{
+ if_printf(ifp, "Changing speed is not supported\n");
+
+ return (ENODEV);
+}
+
+/**
+ * iavf_set_initial_baudrate - Set the initial device baudrate
+ * @ifp: ifnet structure
+ *
+ * Set the baudrate for this ifnet structure to the expected initial value of
+ * 40Gbps. This maybe updated to a lower baudrate after the physical function
+ * reports speed to us over the virtchnl interface.
+ */
+void
+iavf_set_initial_baudrate(struct ifnet *ifp)
+{
+#if __FreeBSD_version >= 1100000
+ if_setbaudrate(ifp, IF_Gbps(40));
+#else
+ if_initbaudrate(ifp, IF_Gbps(40));
+#endif
+}
+
+/**
+ * iavf_add_sysctls_eth_stats - Add ethernet statistics sysctls
+ * @ctx: the sysctl ctx to add to
+ * @child: the node to add the sysctls to
+ * @eth_stats: ethernet stats structure
+ *
+ * Creates sysctls that report the values of the provided ethernet stats
+ * structure.
+ */
+void
+iavf_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx,
+ struct sysctl_oid_list *child,
+ struct iavf_eth_stats *eth_stats)
+{
+ struct iavf_sysctl_info ctls[] =
+ {
+ {&eth_stats->rx_bytes, "good_octets_rcvd", "Good Octets Received"},
+ {&eth_stats->rx_unicast, "ucast_pkts_rcvd",
+ "Unicast Packets Received"},
+ {&eth_stats->rx_multicast, "mcast_pkts_rcvd",
+ "Multicast Packets Received"},
+ {&eth_stats->rx_broadcast, "bcast_pkts_rcvd",
+ "Broadcast Packets Received"},
+ {&eth_stats->rx_discards, "rx_discards", "Discarded RX packets"},
+ {&eth_stats->rx_unknown_protocol, "rx_unknown_proto",
+ "RX unknown protocol packets"},
+ {&eth_stats->tx_bytes, "good_octets_txd", "Good Octets Transmitted"},
+ {&eth_stats->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"},
+ {&eth_stats->tx_multicast, "mcast_pkts_txd",
+ "Multicast Packets Transmitted"},
+ {&eth_stats->tx_broadcast, "bcast_pkts_txd",
+ "Broadcast Packets Transmitted"},
+ {&eth_stats->tx_errors, "tx_errors", "TX packet errors"},
+ // end
+ {0,0,0}
+ };
+
+ struct iavf_sysctl_info *entry = ctls;
+
+ while (entry->stat != 0)
+ {
+ SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, entry->name,
+ CTLFLAG_RD, entry->stat,
+ entry->description);
+ entry++;
+ }
+}
+
+/**
+ * iavf_max_vc_speed_to_value - Convert link speed to IF speed value
+ * @link_speeds: bitmap of supported link speeds
+ *
+ * @returns the link speed value for the highest speed reported in the
+ * link_speeds bitmap.
+ */
+u64
+iavf_max_vc_speed_to_value(u8 link_speeds)
+{
+ if (link_speeds & VIRTCHNL_LINK_SPEED_40GB)
+ return IF_Gbps(40);
+ if (link_speeds & VIRTCHNL_LINK_SPEED_25GB)
+ return IF_Gbps(25);
+ if (link_speeds & VIRTCHNL_LINK_SPEED_20GB)
+ return IF_Gbps(20);
+ if (link_speeds & VIRTCHNL_LINK_SPEED_10GB)
+ return IF_Gbps(10);
+ if (link_speeds & VIRTCHNL_LINK_SPEED_1GB)
+ return IF_Gbps(1);
+ if (link_speeds & VIRTCHNL_LINK_SPEED_100MB)
+ return IF_Mbps(100);
+ else
+ /* Minimum supported link speed */
+ return IF_Mbps(100);
+}
+
+/**
+ * iavf_config_rss_reg - Configure RSS using registers
+ * @sc: device private softc
+ *
+ * Configures RSS for this function using the device registers. Called if the
+ * PF does not support configuring RSS over the virtchnl interface.
+ */
+void
+iavf_config_rss_reg(struct iavf_sc *sc)
+{
+ struct iavf_hw *hw = &sc->hw;
+ struct iavf_vsi *vsi = &sc->vsi;
+ u32 lut = 0;
+ u64 set_hena = 0, hena;
+ int i, j, que_id;
+ u32 rss_seed[IAVF_RSS_KEY_SIZE_REG];
+#ifdef RSS
+ u32 rss_hash_config;
+#endif
+
+ /* Don't set up RSS if using a single queue */
+ if (IAVF_NRXQS(vsi) == 1) {
+ wr32(hw, IAVF_VFQF_HENA(0), 0);
+ wr32(hw, IAVF_VFQF_HENA(1), 0);
+ iavf_flush(hw);
+ return;
+ }
+
+#ifdef RSS
+ /* Fetch the configured RSS key */
+ rss_getkey((uint8_t *) &rss_seed);
+#else
+ iavf_get_default_rss_key(rss_seed);
+#endif
+
+ /* Fill out hash function seed */
+ for (i = 0; i < IAVF_RSS_KEY_SIZE_REG; i++)
+ wr32(hw, IAVF_VFQF_HKEY(i), rss_seed[i]);
+
+ /* Enable PCTYPES for RSS: */
+#ifdef RSS
+ rss_hash_config = rss_gethashconfig();
+ if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
+ set_hena |= ((u64)1 << IAVF_FILTER_PCTYPE_NONF_IPV4_OTHER);
+ if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
+ set_hena |= ((u64)1 << IAVF_FILTER_PCTYPE_NONF_IPV4_TCP);
+ if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
+ set_hena |= ((u64)1 << IAVF_FILTER_PCTYPE_NONF_IPV4_UDP);
+ if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
+ set_hena |= ((u64)1 << IAVF_FILTER_PCTYPE_NONF_IPV6_OTHER);
+ if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
+ set_hena |= ((u64)1 << IAVF_FILTER_PCTYPE_FRAG_IPV6);
+ if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
+ set_hena |= ((u64)1 << IAVF_FILTER_PCTYPE_NONF_IPV6_TCP);
+ if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
+ set_hena |= ((u64)1 << IAVF_FILTER_PCTYPE_NONF_IPV6_UDP);
+#else
+ set_hena = IAVF_DEFAULT_RSS_HENA_XL710;
+#endif
+ hena = (u64)rd32(hw, IAVF_VFQF_HENA(0)) |
+ ((u64)rd32(hw, IAVF_VFQF_HENA(1)) << 32);
+ hena |= set_hena;
+ wr32(hw, IAVF_VFQF_HENA(0), (u32)hena);
+ wr32(hw, IAVF_VFQF_HENA(1), (u32)(hena >> 32));
+
+ /* Populate the LUT with max no. of queues in round robin fashion */
+ for (i = 0, j = 0; i < IAVF_RSS_VSI_LUT_SIZE; i++, j++) {
+ if (j == IAVF_NRXQS(vsi))
+ j = 0;
+#ifdef RSS
+ /*
+ * Fetch the RSS bucket id for the given indirection entry.
+ * Cap it at the number of configured buckets (which is
+ * num_rx_queues.)
+ */
+ que_id = rss_get_indirection_to_bucket(i);
+ que_id = que_id % IAVF_NRXQS(vsi);
+#else
+ que_id = j;
+#endif
+ /* lut = 4-byte sliding window of 4 lut entries */
+ lut = (lut << 8) | (que_id & IAVF_RSS_VF_LUT_ENTRY_MASK);
+ /* On i = 3, we have 4 entries in lut; write to the register */
+ if ((i & 3) == 3) {
+ wr32(hw, IAVF_VFQF_HLUT(i >> 2), lut);
+ iavf_dbg_rss(sc, "%s: HLUT(%2d): %#010x", __func__,
+ i, lut);
+ }
+ }
+ iavf_flush(hw);
+}
+
+/**
+ * iavf_config_rss_pf - Configure RSS using PF virtchnl messages
+ * @sc: device private softc
+ *
+ * Configure RSS by sending virtchnl messages to the PF.
+ */
+void
+iavf_config_rss_pf(struct iavf_sc *sc)
+{
+ iavf_send_vc_msg(sc, IAVF_FLAG_AQ_CONFIG_RSS_KEY);
+
+ iavf_send_vc_msg(sc, IAVF_FLAG_AQ_SET_RSS_HENA);
+
+ iavf_send_vc_msg(sc, IAVF_FLAG_AQ_CONFIG_RSS_LUT);
+}
+
+/**
+ * iavf_config_rss - setup RSS
+ * @sc: device private softc
+ *
+ * Configures RSS using the method determined by capability flags in the VF
+ * resources structure sent from the PF over the virtchnl interface.
+ *
+ * @remark RSS keys and table are cleared on VF reset.
+ */
+void
+iavf_config_rss(struct iavf_sc *sc)
+{
+ if (sc->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_REG) {
+ iavf_dbg_info(sc, "Setting up RSS using VF registers...\n");
+ iavf_config_rss_reg(sc);
+ } else if (sc->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
+ iavf_dbg_info(sc, "Setting up RSS using messages to PF...\n");
+ iavf_config_rss_pf(sc);
+ } else
+ device_printf(sc->dev, "VF does not support RSS capability sent by PF.\n");
+}
+
+/**
+ * iavf_config_promisc - setup promiscuous mode
+ * @sc: device private softc
+ * @flags: promiscuous flags to configure
+ *
+ * Request that promiscuous modes be enabled from the PF
+ *
+ * @returns zero on success, or an error code on failure.
+ */
+int
+iavf_config_promisc(struct iavf_sc *sc, int flags)
+{
+ struct ifnet *ifp = sc->vsi.ifp;
+
+ sc->promisc_flags = 0;
+
+ if (flags & IFF_ALLMULTI ||
+ if_llmaddr_count(ifp) == MAX_MULTICAST_ADDR)
+ sc->promisc_flags |= FLAG_VF_MULTICAST_PROMISC;
+ if (flags & IFF_PROMISC)
+ sc->promisc_flags |= FLAG_VF_UNICAST_PROMISC;
+
+ iavf_send_vc_msg(sc, IAVF_FLAG_AQ_CONFIGURE_PROMISC);
+
+ return (0);
+}
+
+/**
+ * iavf_mc_filter_apply - Program a MAC filter for this VF
+ * @arg: pointer to the device softc
+ * @sdl: MAC multicast address
+ * @cnt: unused parameter
+ *
+ * Program a MAC address multicast filter for this device. Intended
+ * to be used with the map-like function if_foreach_llmaddr().
+ *
+ * @returns 1 on success, or 0 on failure
+ */
+static u_int
+iavf_mc_filter_apply(void *arg, struct sockaddr_dl *sdl, u_int cnt __unused)
+{
+ struct iavf_sc *sc = (struct iavf_sc *)arg;
+ int error;
+
+ error = iavf_add_mac_filter(sc, (u8*)LLADDR(sdl), IAVF_FILTER_MC);
+
+ return (!error);
+}
+
+/**
+ * iavf_init_multi - Initialize multicast address filters
+ * @sc: device private softc
+ *
+ * Called during initialization to reset multicast address filters to a known
+ * fresh state by deleting all currently active filters.
+ */
+void
+iavf_init_multi(struct iavf_sc *sc)
+{
+ struct iavf_mac_filter *f;
+ int mcnt = 0;
+
+ /* First clear any multicast filters */
+ SLIST_FOREACH(f, sc->mac_filters, next) {
+ if ((f->flags & IAVF_FILTER_USED)
+ && (f->flags & IAVF_FILTER_MC)) {
+ f->flags |= IAVF_FILTER_DEL;
+ mcnt++;
+ }
+ }
+ if (mcnt > 0)
+ iavf_send_vc_msg(sc, IAVF_FLAG_AQ_DEL_MAC_FILTER);
+}
+
+/**
+ * iavf_multi_set - Set multicast filters
+ * @sc: device private softc
+ *
+ * Set multicast MAC filters for this device. If there are too many filters,
+ * this will request the device to go into multicast promiscuous mode instead.
+ */
+void
+iavf_multi_set(struct iavf_sc *sc)
+{
+ if_t ifp = sc->vsi.ifp;
+ int mcnt = 0;
+
+ IOCTL_DEBUGOUT("iavf_multi_set: begin");
+
+ mcnt = if_llmaddr_count(ifp);
+ if (__predict_false(mcnt == MAX_MULTICAST_ADDR)) {
+ /* Delete MC filters and enable mulitcast promisc instead */
+ iavf_init_multi(sc);
+ sc->promisc_flags |= FLAG_VF_MULTICAST_PROMISC;
+ iavf_send_vc_msg(sc, IAVF_FLAG_AQ_CONFIGURE_PROMISC);
+ return;
+ }
+
+ /* If there aren't too many filters, delete existing MC filters */
+ iavf_init_multi(sc);
+
+ /* And (re-)install filters for all mcast addresses */
+ mcnt = if_foreach_llmaddr(ifp, iavf_mc_filter_apply, sc);
+
+ if (mcnt > 0)
+ iavf_send_vc_msg(sc, IAVF_FLAG_AQ_ADD_MAC_FILTER);
+}
+
+/**
+ * iavf_add_mac_filter - Add a MAC filter to the sc MAC list
+ * @sc: device private softc
+ * @macaddr: MAC address to add
+ * @flags: filter flags
+ *
+ * Add a new MAC filter to the softc MAC filter list. These will later be sent
+ * to the physical function (and ultimately hardware) via the virtchnl
+ * interface.
+ *
+ * @returns zero on success, EEXIST if the filter already exists, and ENOMEM
+ * if we ran out of memory allocating the filter structure.
+ */
+int
+iavf_add_mac_filter(struct iavf_sc *sc, u8 *macaddr, u16 flags)
+{
+ struct iavf_mac_filter *f;
+
+ /* Does one already exist? */
+ f = iavf_find_mac_filter(sc, macaddr);
+ if (f != NULL) {
+ iavf_dbg_filter(sc, "exists: " MAC_FORMAT "\n",
+ MAC_FORMAT_ARGS(macaddr));
+ return (EEXIST);
+ }
+
+ /* If not, get a new empty filter */
+ f = iavf_get_mac_filter(sc);
+ if (f == NULL) {
+ device_printf(sc->dev, "%s: no filters available!!\n",
+ __func__);
+ return (ENOMEM);
+ }
+
+ iavf_dbg_filter(sc, "marked: " MAC_FORMAT "\n",
+ MAC_FORMAT_ARGS(macaddr));
+
+ bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
+ f->flags |= (IAVF_FILTER_ADD | IAVF_FILTER_USED);
+ f->flags |= flags;
+ return (0);
+}
+
+/**
+ * iavf_find_mac_filter - Find a MAC filter with the given address
+ * @sc: device private softc
+ * @macaddr: the MAC address to find
+ *
+ * Finds the filter structure in the MAC filter list with the corresponding
+ * MAC address.
+ *
+ * @returns a pointer to the filter structure, or NULL if no such filter
+ * exists in the list yet.
+ */
+struct iavf_mac_filter *
+iavf_find_mac_filter(struct iavf_sc *sc, u8 *macaddr)
+{
+ struct iavf_mac_filter *f;
+ bool match = FALSE;
+
+ SLIST_FOREACH(f, sc->mac_filters, next) {
+ if (cmp_etheraddr(f->macaddr, macaddr)) {
+ match = TRUE;
+ break;
+ }
+ }
+
+ if (!match)
+ f = NULL;
+ return (f);
+}
+
+/**
+ * iavf_get_mac_filter - Get a new MAC address filter
+ * @sc: device private softc
+ *
+ * Allocates a new filter structure and inserts it into the MAC filter list.
+ *
+ * @post the caller must fill in the structure details after calling this
+ * function, but does not need to insert it into the linked list.
+ *
+ * @returns a pointer to the new filter structure, or NULL of we failed to
+ * allocate it.
+ */
+struct iavf_mac_filter *
+iavf_get_mac_filter(struct iavf_sc *sc)
+{
+ struct iavf_mac_filter *f;
+
+ f = (struct iavf_mac_filter *)malloc(sizeof(struct iavf_mac_filter),
+ M_IAVF, M_NOWAIT | M_ZERO);
+ if (f)
+ SLIST_INSERT_HEAD(sc->mac_filters, f, next);
+
+ return (f);
+}
+
+/**
+ * iavf_baudrate_from_link_speed - Convert link speed to baudrate
+ * @sc: device private softc
+ *
+ * @post The link_speed_adv field is in Mbps, so it is multipled by
+ * 1,000,000 before it's returned.
+ *
+ * @returns the adapter link speed in bits/sec
+ */
+u64
+iavf_baudrate_from_link_speed(struct iavf_sc *sc)
+{
+ if (sc->vf_res->vf_cap_flags & VIRTCHNL_VF_CAP_ADV_LINK_SPEED)
+ return (sc->link_speed_adv * IAVF_ADV_LINK_SPEED_SCALE);
+ else
+ return iavf_max_vc_speed_to_value(sc->link_speed);
+}
+
+/**
+ * iavf_add_vlan_filter - Add a VLAN filter to the softc VLAN list
+ * @sc: device private softc
+ * @vtag: the VLAN id to filter
+ *
+ * Allocate a new VLAN filter structure and insert it into the VLAN list.
+ */
+void
+iavf_add_vlan_filter(struct iavf_sc *sc, u16 vtag)
+{
+ struct iavf_vlan_filter *v;
+
+ v = (struct iavf_vlan_filter *)malloc(sizeof(struct iavf_vlan_filter),
+ M_IAVF, M_WAITOK | M_ZERO);
+ SLIST_INSERT_HEAD(sc->vlan_filters, v, next);
+ v->vlan = vtag;
+ v->flags = IAVF_FILTER_ADD;
+}
+
+/**
+ * iavf_mark_del_vlan_filter - Mark a given VLAN id for deletion
+ * @sc: device private softc
+ * @vtag: the VLAN id to delete
+ *
+ * Marks all VLAN filters matching the given vtag for deletion.
+ *
+ * @returns the number of filters marked for deletion.
+ *
+ * @remark the filters are not removed immediately, but will be removed from
+ * the list by another function that synchronizes over the virtchnl interface.
+ */
+int
+iavf_mark_del_vlan_filter(struct iavf_sc *sc, u16 vtag)
+{
+ struct iavf_vlan_filter *v;
+ int i = 0;
+
+ SLIST_FOREACH(v, sc->vlan_filters, next) {
+ if (v->vlan == vtag) {
+ v->flags = IAVF_FILTER_DEL;
+ ++i;
+ }
+ }
+
+ return (i);
+}
+
+/**
+ * iavf_update_msix_devinfo - Fix MSIX values for pci_msix_count()
+ * @dev: pointer to kernel device
+ *
+ * Fix cached MSI-X control register information. This is a workaround
+ * for an issue where VFs spawned in non-passthrough mode on FreeBSD
+ * will have their PCI information cached before the PF driver
+ * finishes updating their PCI information.
+ *
+ * @pre Must be called before pci_msix_count()
+ */
+void
+iavf_update_msix_devinfo(device_t dev)
+{
+ struct pci_devinfo *dinfo;
+ u32 msix_ctrl;
+
+ dinfo = (struct pci_devinfo *)device_get_ivars(dev);
+ /* We can hardcode this offset since we know the device */
+ msix_ctrl = pci_read_config(dev, 0x70 + PCIR_MSIX_CTRL, 2);
+ dinfo->cfg.msix.msix_ctrl = msix_ctrl;
+ dinfo->cfg.msix.msix_msgnum = (msix_ctrl & PCIM_MSIXCTRL_TABLE_SIZE) + 1;
+}
+
+/**
+ * iavf_disable_queues_with_retries - Send PF multiple DISABLE_QUEUES messages
+ * @sc: device softc
+ *
+ * Send a virtual channel message to the PF to DISABLE_QUEUES, but resend it up
+ * to IAVF_MAX_DIS_Q_RETRY times if the response says that it wasn't
+ * successful. This is intended to workaround a bug that can appear on the PF.
+ */
+void
+iavf_disable_queues_with_retries(struct iavf_sc *sc)
+{
+ bool in_detach = iavf_driver_is_detaching(sc);
+ int max_attempts = IAVF_MAX_DIS_Q_RETRY;
+ int msg_count = 0;
+
+ /* While the driver is detaching, it doesn't care if the queue
+ * disable finishes successfully or not. Just send one message
+ * to just notify the PF driver.
+ */
+ if (in_detach)
+ max_attempts = 1;
+
+ while ((msg_count < max_attempts) &&
+ atomic_load_acq_32(&sc->queues_enabled)) {
+ msg_count++;
+ iavf_send_vc_msg_sleep(sc, IAVF_FLAG_AQ_DISABLE_QUEUES);
+ }
+
+ /* Possibly print messages about retry attempts and issues */
+ if (msg_count > 1)
+ iavf_dbg_vc(sc, "DISABLE_QUEUES messages sent: %d\n",
+ msg_count);
+
+ if (!in_detach && msg_count >= max_attempts)
+ device_printf(sc->dev, "%s: DISABLE_QUEUES may have failed\n",
+ __func__);
+}
diff --git a/sys/dev/iavf/iavf_opts.h b/sys/dev/iavf/iavf_opts.h
new file mode 100644
--- /dev/null
+++ b/sys/dev/iavf/iavf_opts.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2021, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+/**
+ * @file iavf_opts.h
+ * @brief header including the kernel option files
+ *
+ * Contains includes for the opt_*.h header files which define macros
+ * indicating whether certain kernel functionality is enabled based on kernel
+ * configuration.
+ */
+#ifndef _IAVF_OPTS_H_
+#define _IAVF_OPTS_H_
+
+#include "opt_inet.h"
+#include "opt_inet6.h"
+#include "opt_rss.h"
+
+#endif
diff --git a/sys/dev/iavf/iavf_osdep.h b/sys/dev/iavf/iavf_osdep.h
new file mode 100644
--- /dev/null
+++ b/sys/dev/iavf/iavf_osdep.h
@@ -0,0 +1,250 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2021, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+/**
+ * @file iavf_osdep.h
+ * @brief OS compatibility layer definitions
+ *
+ * Contains macros and definitions used to implement an OS compatibility layer
+ * used by some of the hardware files.
+ */
+#ifndef _IAVF_OSDEP_H_
+#define _IAVF_OSDEP_H_
+
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/endian.h>
+#include <sys/mbuf.h>
+#include <sys/protosw.h>
+#include <sys/socket.h>
+#include <sys/malloc.h>
+#include <sys/kernel.h>
+#include <sys/bus.h>
+#include <machine/bus.h>
+#include <sys/rman.h>
+#include <machine/resource.h>
+#include <vm/vm.h>
+#include <vm/pmap.h>
+#include <machine/clock.h>
+#include <dev/pci/pcivar.h>
+#include <dev/pci/pcireg.h>
+
+#include "iavf_status.h"
+#include "iavf_debug.h"
+
+#define iavf_usec_delay(x) DELAY(x)
+#define iavf_msec_delay(x) DELAY(1000 * (x))
+
+#define DBG 0
+#define DEBUGFUNC(F) DEBUGOUT(F);
+#if DBG
+ #define DEBUGOUT(S) printf(S "\n")
+ #define DEBUGOUT1(S,A) printf(S "\n",A)
+ #define DEBUGOUT2(S,A,B) printf(S "\n",A,B)
+ #define DEBUGOUT3(S,A,B,C) printf(S "\n",A,B,C)
+ #define DEBUGOUT7(S,A,B,C,D,E,F,G) printf(S "\n",A,B,C,D,E,F,G)
+#else
+ #define DEBUGOUT(S)
+ #define DEBUGOUT1(S,A)
+ #define DEBUGOUT2(S,A,B)
+ #define DEBUGOUT3(S,A,B,C)
+ #define DEBUGOUT6(S,A,B,C,D,E,F)
+ #define DEBUGOUT7(S,A,B,C,D,E,F,G)
+#endif
+
+#define UNREFERENCED_PARAMETER(_p) _p = _p
+#define UNREFERENCED_1PARAMETER(_p) do { \
+ UNREFERENCED_PARAMETER(_p); \
+} while (0)
+#define UNREFERENCED_2PARAMETER(_p, _q) do { \
+ UNREFERENCED_PARAMETER(_p); \
+ UNREFERENCED_PARAMETER(_q); \
+} while (0)
+#define UNREFERENCED_3PARAMETER(_p, _q, _r) do { \
+ UNREFERENCED_PARAMETER(_p); \
+ UNREFERENCED_PARAMETER(_q); \
+ UNREFERENCED_PARAMETER(_r); \
+} while (0)
+#define UNREFERENCED_4PARAMETER(_p, _q, _r, _s) do { \
+ UNREFERENCED_PARAMETER(_p); \
+ UNREFERENCED_PARAMETER(_q); \
+ UNREFERENCED_PARAMETER(_r); \
+ UNREFERENCED_PARAMETER(_s); \
+} while (0)
+#define UNREFERENCED_5PARAMETER(_p, _q, _r, _s, _t) do { \
+ UNREFERENCED_PARAMETER(_p); \
+ UNREFERENCED_PARAMETER(_q); \
+ UNREFERENCED_PARAMETER(_r); \
+ UNREFERENCED_PARAMETER(_s); \
+ UNREFERENCED_PARAMETER(_t); \
+} while (0)
+
+#define STATIC static
+#define INLINE inline
+
+#define iavf_memset(a, b, c, d) memset((a), (b), (c))
+#define iavf_memcpy(a, b, c, d) memcpy((a), (b), (c))
+
+#define CPU_TO_LE16(o) htole16(o)
+#define CPU_TO_LE32(s) htole32(s)
+#define CPU_TO_LE64(h) htole64(h)
+#define LE16_TO_CPU(a) le16toh(a)
+#define LE32_TO_CPU(c) le32toh(c)
+#define LE64_TO_CPU(k) le64toh(k)
+
+/**
+ * @typedef u8
+ * @brief compatibility typedef for uint8_t
+ */
+typedef uint8_t u8;
+
+/**
+ * @typedef s8
+ * @brief compatibility typedef for int8_t
+ */
+typedef int8_t s8;
+
+/**
+ * @typedef u16
+ * @brief compatibility typedef for uint16_t
+ */
+typedef uint16_t u16;
+
+/**
+ * @typedef s16
+ * @brief compatibility typedef for int16_t
+ */
+typedef int16_t s16;
+
+/**
+ * @typedef u32
+ * @brief compatibility typedef for uint32_t
+ */
+typedef uint32_t u32;
+
+/**
+ * @typedef s32
+ * @brief compatibility typedef for int32_t
+ */
+typedef int32_t s32;
+
+/**
+ * @typedef u64
+ * @brief compatibility typedef for uint64_t
+ */
+typedef uint64_t u64;
+
+#define __le16 u16
+#define __le32 u32
+#define __le64 u64
+#define __be16 u16
+#define __be32 u32
+#define __be64 u64
+
+/**
+ * @struct iavf_spinlock
+ * @brief OS wrapper for a non-sleeping lock
+ *
+ * Wrapper used to provide an implementation of a non-sleeping lock.
+ */
+struct iavf_spinlock {
+ struct mtx mutex;
+};
+
+/**
+ * @struct iavf_osdep
+ * @brief Storage for data used by the osdep interface
+ *
+ * Contains data used by the osdep layer. Accessed via the hw->back pointer.
+ */
+struct iavf_osdep {
+ bus_space_tag_t mem_bus_space_tag;
+ bus_space_handle_t mem_bus_space_handle;
+ bus_size_t mem_bus_space_size;
+ uint32_t flush_reg;
+ int i2c_intfc_num;
+ device_t dev;
+};
+
+/**
+ * @struct iavf_dma_mem
+ * @brief DMA memory map
+ *
+ * Structure representing a DMA memory mapping.
+ */
+struct iavf_dma_mem {
+ void *va;
+ u64 pa;
+ bus_dma_tag_t tag;
+ bus_dmamap_t map;
+ bus_dma_segment_t seg;
+ bus_size_t size;
+ int nseg;
+ int flags;
+};
+
+/**
+ * @struct iavf_virt_mem
+ * @brief Virtual memory
+ *
+ * Structure representing some virtual memory.
+ */
+struct iavf_virt_mem {
+ void *va;
+ u32 size;
+};
+
+struct iavf_hw; /* forward decl */
+u16 iavf_read_pci_cfg(struct iavf_hw *, u32);
+void iavf_write_pci_cfg(struct iavf_hw *, u32, u16);
+
+/*
+** iavf_debug - OS dependent version of shared code debug printing
+*/
+#define iavf_debug(h, m, s, ...) iavf_debug_shared(h, m, s, ##__VA_ARGS__)
+void iavf_debug_shared(struct iavf_hw *hw, uint64_t mask,
+ char *fmt_str, ...) __printflike(3, 4);
+
+/*
+** This hardware supports either 16 or 32 byte rx descriptors;
+** the driver only uses the 32 byte kind.
+*/
+#define iavf_rx_desc iavf_32byte_rx_desc
+
+uint32_t iavf_rd32(struct iavf_hw *hw, uint32_t reg);
+void iavf_wr32(struct iavf_hw *hw, uint32_t reg, uint32_t val);
+void iavf_flush(struct iavf_hw *hw);
+#define rd32(hw, reg) iavf_rd32(hw, reg)
+#define wr32(hw, reg, val) iavf_wr32(hw, reg, val)
+
+#endif /* _IAVF_OSDEP_H_ */
diff --git a/sys/dev/iavf/iavf_osdep.c b/sys/dev/iavf/iavf_osdep.c
new file mode 100644
--- /dev/null
+++ b/sys/dev/iavf/iavf_osdep.c
@@ -0,0 +1,405 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2021, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+/**
+ * @file iavf_osdep.c
+ * @brief OS compatibility layer
+ *
+ * Contains definitions for various functions used to provide an OS
+ * independent layer for sharing code between drivers on different operating
+ * systems.
+ */
+#include <machine/stdarg.h>
+
+#include "iavf_iflib.h"
+
+/********************************************************************
+ * Manage DMA'able memory.
+ *******************************************************************/
+
+/**
+ * iavf_dmamap_cb - DMA mapping callback function
+ * @arg: pointer to return the segment address
+ * @segs: the segments array
+ * @nseg: number of segments in the array
+ * @error: error code
+ *
+ * Callback used by the bus DMA code to obtain the segment address.
+ */
+static void
+iavf_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg __unused,
+ int error)
+{
+ if (error)
+ return;
+ *(bus_addr_t *) arg = segs->ds_addr;
+ return;
+}
+
+/**
+ * iavf_allocate_virt_mem - Allocate virtual memory
+ * @hw: hardware structure
+ * @mem: structure describing the memory allocation
+ * @size: size of the allocation
+ *
+ * OS compatibility function to allocate virtual memory.
+ *
+ * @returns zero on success, or a status code on failure.
+ */
+enum iavf_status
+iavf_allocate_virt_mem(struct iavf_hw *hw __unused, struct iavf_virt_mem *mem,
+ u32 size)
+{
+ mem->va = malloc(size, M_IAVF, M_NOWAIT | M_ZERO);
+ return(mem->va == NULL);
+}
+
+/**
+ * iavf_free_virt_mem - Free virtual memory
+ * @hw: hardware structure
+ * @mem: structure describing the memory to free
+ *
+ * OS compatibility function to free virtual memory
+ *
+ * @returns zero.
+ */
+enum iavf_status
+iavf_free_virt_mem(struct iavf_hw *hw __unused, struct iavf_virt_mem *mem)
+{
+ free(mem->va, M_IAVF);
+ mem->va = NULL;
+
+ return(0);
+}
+
+/**
+ * iavf_allocate_dma_mem - Allocate DMA memory
+ * @hw: hardware structure
+ * @mem: structure describing the memory allocation
+ * @type: unused type parameter specifying the type of allocation
+ * @size: size of the allocation
+ * @alignment: alignment requirements for the allocation
+ *
+ * Allocates DMA memory by using bus_dma_tag_create to create a DMA tag, and
+ * them bus_dmamem_alloc to allocate the associated memory.
+ *
+ * @returns zero on success, or a status code on failure.
+ */
+enum iavf_status
+iavf_allocate_dma_mem(struct iavf_hw *hw, struct iavf_dma_mem *mem,
+ enum iavf_memory_type type __unused, u64 size, u32 alignment)
+{
+ device_t dev = ((struct iavf_osdep *)hw->back)->dev;
+ int err;
+
+
+ err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
+ alignment, 0, /* alignment, bounds */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ size, /* maxsize */
+ 1, /* nsegments */
+ size, /* maxsegsize */
+ BUS_DMA_ALLOCNOW, /* flags */
+ NULL, /* lockfunc */
+ NULL, /* lockfuncarg */
+ &mem->tag);
+ if (err != 0) {
+ device_printf(dev,
+ "iavf_allocate_dma: bus_dma_tag_create failed, "
+ "error %u\n", err);
+ goto fail_0;
+ }
+ err = bus_dmamem_alloc(mem->tag, (void **)&mem->va,
+ BUS_DMA_NOWAIT | BUS_DMA_ZERO, &mem->map);
+ if (err != 0) {
+ device_printf(dev,
+ "iavf_allocate_dma: bus_dmamem_alloc failed, "
+ "error %u\n", err);
+ goto fail_1;
+ }
+ err = bus_dmamap_load(mem->tag, mem->map, mem->va,
+ size,
+ iavf_dmamap_cb,
+ &mem->pa,
+ BUS_DMA_NOWAIT);
+ if (err != 0) {
+ device_printf(dev,
+ "iavf_allocate_dma: bus_dmamap_load failed, "
+ "error %u\n", err);
+ goto fail_2;
+ }
+ mem->nseg = 1;
+ mem->size = size;
+ bus_dmamap_sync(mem->tag, mem->map,
+ BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
+ return (0);
+fail_2:
+ bus_dmamem_free(mem->tag, mem->va, mem->map);
+fail_1:
+ bus_dma_tag_destroy(mem->tag);
+fail_0:
+ mem->map = NULL;
+ mem->tag = NULL;
+ return (err);
+}
+
+/**
+ * iavf_free_dma_mem - Free DMA memory allocation
+ * @hw: hardware structure
+ * @mem: pointer to memory structure previously allocated
+ *
+ * Releases DMA memory that was previously allocated by iavf_allocate_dma_mem.
+ *
+ * @returns zero.
+ */
+enum iavf_status
+iavf_free_dma_mem(struct iavf_hw *hw __unused, struct iavf_dma_mem *mem)
+{
+ bus_dmamap_sync(mem->tag, mem->map,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(mem->tag, mem->map);
+ bus_dmamem_free(mem->tag, mem->va, mem->map);
+ bus_dma_tag_destroy(mem->tag);
+ return (0);
+}
+
+/**
+ * iavf_init_spinlock - Initialize a spinlock
+ * @lock: OS compatibility lock structure
+ *
+ * Use the mutex layer to initialize a spin lock that can be used via the OS
+ * compatibility layer accessors.
+ *
+ * @remark we pass MTX_DUPOK because the mutex name will not be unique. An
+ * alternative would be to somehow generate a name, such as by passing in the
+ * __file__ and __line__ values from a macro.
+ */
+void
+iavf_init_spinlock(struct iavf_spinlock *lock)
+{
+ mtx_init(&lock->mutex, "mutex",
+ "iavf spinlock", MTX_DEF | MTX_DUPOK);
+}
+
+/**
+ * iavf_acquire_spinlock - Acquire a spin lock
+ * @lock: OS compatibility lock structure
+ *
+ * Acquire a spin lock using mtx_lock.
+ */
+void
+iavf_acquire_spinlock(struct iavf_spinlock *lock)
+{
+ mtx_lock(&lock->mutex);
+}
+
+/**
+ * iavf_release_spinlock - Release a spin lock
+ * @lock: OS compatibility lock structure
+ *
+ * Release a spin lock using mtx_unlock.
+ */
+void
+iavf_release_spinlock(struct iavf_spinlock *lock)
+{
+ mtx_unlock(&lock->mutex);
+}
+
+/**
+ * iavf_destroy_spinlock - Destroy a spin lock
+ * @lock: OS compatibility lock structure
+ *
+ * Destroy (deinitialize) a spin lock by calling mtx_destroy.
+ *
+ * @remark we only destroy the lock if it was initialized. This means that
+ * calling iavf_destroy_spinlock on a lock that was already destroyed or was
+ * never initialized is not considered a bug.
+ */
+void
+iavf_destroy_spinlock(struct iavf_spinlock *lock)
+{
+ if (mtx_initialized(&lock->mutex))
+ mtx_destroy(&lock->mutex);
+}
+
+/**
+ * iavf_debug_shared - Log a debug message if enabled
+ * @hw: device hardware structure
+ * @mask: bit indicating the type of the message
+ * @fmt: printf format string
+ *
+ * Checks if the mask is enabled in the hw->debug_mask. If so, prints
+ * a message to the console using vprintf().
+ */
+void
+iavf_debug_shared(struct iavf_hw *hw, uint64_t mask, char *fmt, ...)
+{
+ va_list args;
+ device_t dev;
+
+ if (!(mask & ((struct iavf_hw *)hw)->debug_mask))
+ return;
+
+ dev = ((struct iavf_osdep *)hw->back)->dev;
+
+ /* Re-implement device_printf() */
+ device_print_prettyname(dev);
+ va_start(args, fmt);
+ vprintf(fmt, args);
+ va_end(args);
+}
+
+/**
+ * iavf_read_pci_cfg - Read a PCI config register
+ * @hw: device hardware structure
+ * @reg: the PCI register to read
+ *
+ * Calls pci_read_config to read the given PCI register from the PCI config
+ * space.
+ *
+ * @returns the value of the register.
+ */
+u16
+iavf_read_pci_cfg(struct iavf_hw *hw, u32 reg)
+{
+ u16 value;
+
+ value = pci_read_config(((struct iavf_osdep *)hw->back)->dev,
+ reg, 2);
+
+ return (value);
+}
+
+/**
+ * iavf_write_pci_cfg - Write a PCI config register
+ * @hw: device hardware structure
+ * @reg: the PCI register to write
+ * @value: the value to write
+ *
+ * Calls pci_write_config to write to a given PCI register in the PCI config
+ * space.
+ */
+void
+iavf_write_pci_cfg(struct iavf_hw *hw, u32 reg, u16 value)
+{
+ pci_write_config(((struct iavf_osdep *)hw->back)->dev,
+ reg, value, 2);
+
+ return;
+}
+
+/**
+ * iavf_rd32 - Read a 32bit hardware register value
+ * @hw: the private hardware structure
+ * @reg: register address to read
+ *
+ * Read the specified 32bit register value from BAR0 and return its contents.
+ *
+ * @returns the value of the 32bit register.
+ */
+inline uint32_t
+iavf_rd32(struct iavf_hw *hw, uint32_t reg)
+{
+ struct iavf_osdep *osdep = (struct iavf_osdep *)hw->back;
+
+ KASSERT(reg < osdep->mem_bus_space_size,
+ ("iavf: register offset %#jx too large (max is %#jx)",
+ (uintmax_t)reg, (uintmax_t)osdep->mem_bus_space_size));
+
+ return (bus_space_read_4(osdep->mem_bus_space_tag,
+ osdep->mem_bus_space_handle, reg));
+}
+
+/**
+ * iavf_wr32 - Write a 32bit hardware register
+ * @hw: the private hardware structure
+ * @reg: the register address to write to
+ * @val: the 32bit value to write
+ *
+ * Write the specified 32bit value to a register address in BAR0.
+ */
+inline void
+iavf_wr32(struct iavf_hw *hw, uint32_t reg, uint32_t val)
+{
+ struct iavf_osdep *osdep = (struct iavf_osdep *)hw->back;
+
+ KASSERT(reg < osdep->mem_bus_space_size,
+ ("iavf: register offset %#jx too large (max is %#jx)",
+ (uintmax_t)reg, (uintmax_t)osdep->mem_bus_space_size));
+
+ bus_space_write_4(osdep->mem_bus_space_tag,
+ osdep->mem_bus_space_handle, reg, val);
+}
+
+/**
+ * iavf_flush - Flush register writes
+ * @hw: private hardware structure
+ *
+ * Forces the completion of outstanding PCI register writes by reading from
+ * a specific hardware register.
+ */
+inline void
+iavf_flush(struct iavf_hw *hw)
+{
+ struct iavf_osdep *osdep = (struct iavf_osdep *)hw->back;
+
+ rd32(hw, osdep->flush_reg);
+}
+
+/**
+ * iavf_debug_core - Debug printf for core driver code
+ * @dev: the device_t to log under
+ * @enabled_mask: the mask of enabled messages
+ * @mask: the mask of the requested message to print
+ * @fmt: printf format string
+ *
+ * If enabled_mask has the bit from the mask set, print a message to the
+ * console using the specified format. This is used to conditionally enable
+ * log messages at run time by toggling the enabled_mask in the device
+ * structure.
+ */
+void
+iavf_debug_core(device_t dev, u32 enabled_mask, u32 mask, char *fmt, ...)
+{
+ va_list args;
+
+ if (!(mask & enabled_mask))
+ return;
+
+ /* Re-implement device_printf() */
+ device_print_prettyname(dev);
+ va_start(args, fmt);
+ vprintf(fmt, args);
+ va_end(args);
+}
diff --git a/sys/dev/iavf/iavf_prototype.h b/sys/dev/iavf/iavf_prototype.h
new file mode 100644
--- /dev/null
+++ b/sys/dev/iavf/iavf_prototype.h
@@ -0,0 +1,122 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2021, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+#ifndef _IAVF_PROTOTYPE_H_
+#define _IAVF_PROTOTYPE_H_
+
+#include "iavf_type.h"
+#include "iavf_alloc.h"
+#include "virtchnl.h"
+
+/* Prototypes for shared code functions that are not in
+ * the standard function pointer structures. These are
+ * mostly because they are needed even before the init
+ * has happened and will assist in the early SW and FW
+ * setup.
+ */
+
+/* adminq functions */
+enum iavf_status iavf_init_adminq(struct iavf_hw *hw);
+enum iavf_status iavf_shutdown_adminq(struct iavf_hw *hw);
+enum iavf_status iavf_init_asq(struct iavf_hw *hw);
+enum iavf_status iavf_init_arq(struct iavf_hw *hw);
+enum iavf_status iavf_alloc_adminq_asq_ring(struct iavf_hw *hw);
+enum iavf_status iavf_alloc_adminq_arq_ring(struct iavf_hw *hw);
+enum iavf_status iavf_shutdown_asq(struct iavf_hw *hw);
+enum iavf_status iavf_shutdown_arq(struct iavf_hw *hw);
+u16 iavf_clean_asq(struct iavf_hw *hw);
+void iavf_free_adminq_asq(struct iavf_hw *hw);
+void iavf_free_adminq_arq(struct iavf_hw *hw);
+enum iavf_status iavf_validate_mac_addr(u8 *mac_addr);
+void iavf_adminq_init_ring_data(struct iavf_hw *hw);
+enum iavf_status iavf_clean_arq_element(struct iavf_hw *hw,
+ struct iavf_arq_event_info *e,
+ u16 *events_pending);
+enum iavf_status iavf_asq_send_command(struct iavf_hw *hw,
+ struct iavf_aq_desc *desc,
+ void *buff, /* can be NULL */
+ u16 buff_size,
+ struct iavf_asq_cmd_details *cmd_details);
+bool iavf_asq_done(struct iavf_hw *hw);
+
+/* debug function for adminq */
+void iavf_debug_aq(struct iavf_hw *hw, enum iavf_debug_mask mask,
+ void *desc, void *buffer, u16 buf_len);
+
+void iavf_idle_aq(struct iavf_hw *hw);
+bool iavf_check_asq_alive(struct iavf_hw *hw);
+enum iavf_status iavf_aq_queue_shutdown(struct iavf_hw *hw, bool unloading);
+
+enum iavf_status iavf_aq_get_rss_lut(struct iavf_hw *hw, u16 seid,
+ bool pf_lut, u8 *lut, u16 lut_size);
+enum iavf_status iavf_aq_set_rss_lut(struct iavf_hw *hw, u16 seid,
+ bool pf_lut, u8 *lut, u16 lut_size);
+enum iavf_status iavf_aq_get_rss_key(struct iavf_hw *hw,
+ u16 seid,
+ struct iavf_aqc_get_set_rss_key_data *key);
+enum iavf_status iavf_aq_set_rss_key(struct iavf_hw *hw,
+ u16 seid,
+ struct iavf_aqc_get_set_rss_key_data *key);
+const char *iavf_aq_str(struct iavf_hw *hw, enum iavf_admin_queue_err aq_err);
+const char *iavf_stat_str(struct iavf_hw *hw, enum iavf_status stat_err);
+
+enum iavf_status iavf_set_mac_type(struct iavf_hw *hw);
+
+extern struct iavf_rx_ptype_decoded iavf_ptype_lookup[];
+
+STATIC INLINE struct iavf_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype)
+{
+ return iavf_ptype_lookup[ptype];
+}
+
+/* prototype for functions used for SW spinlocks */
+void iavf_init_spinlock(struct iavf_spinlock *sp);
+void iavf_acquire_spinlock(struct iavf_spinlock *sp);
+void iavf_release_spinlock(struct iavf_spinlock *sp);
+void iavf_destroy_spinlock(struct iavf_spinlock *sp);
+
+void iavf_vf_parse_hw_config(struct iavf_hw *hw,
+ struct virtchnl_vf_resource *msg);
+enum iavf_status iavf_vf_reset(struct iavf_hw *hw);
+enum iavf_status iavf_aq_send_msg_to_pf(struct iavf_hw *hw,
+ enum virtchnl_ops v_opcode,
+ enum iavf_status v_retval,
+ u8 *msg, u16 msglen,
+ struct iavf_asq_cmd_details *cmd_details);
+enum iavf_status iavf_aq_debug_dump(struct iavf_hw *hw, u8 cluster_id,
+ u8 table_id, u32 start_index, u16 buff_size,
+ void *buff, u16 *ret_buff_size,
+ u8 *ret_next_table, u32 *ret_next_index,
+ struct iavf_asq_cmd_details *cmd_details);
+enum iavf_status iavf_aq_clear_all_wol_filters(struct iavf_hw *hw,
+ struct iavf_asq_cmd_details *cmd_details);
+#endif /* _IAVF_PROTOTYPE_H_ */
diff --git a/sys/dev/iavf/iavf_register.h b/sys/dev/iavf/iavf_register.h
new file mode 100644
--- /dev/null
+++ b/sys/dev/iavf/iavf_register.h
@@ -0,0 +1,121 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2021, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+#ifndef _IAVF_REGISTER_H_
+#define _IAVF_REGISTER_H_
+
+#define IAVF_VF_ARQBAH1 0x00006000 /* Reset: EMPR */
+#define IAVF_VF_ARQBAL1 0x00006C00 /* Reset: EMPR */
+#define IAVF_VF_ARQH1 0x00007400 /* Reset: EMPR */
+#define IAVF_VF_ARQH1_ARQH_SHIFT 0
+#define IAVF_VF_ARQH1_ARQH_MASK IAVF_MASK(0x3FF, IAVF_VF_ARQH1_ARQH_SHIFT)
+#define IAVF_VF_ARQLEN1 0x00008000 /* Reset: EMPR */
+#define IAVF_VF_ARQLEN1_ARQVFE_SHIFT 28
+#define IAVF_VF_ARQLEN1_ARQVFE_MASK IAVF_MASK(1UL, IAVF_VF_ARQLEN1_ARQVFE_SHIFT)
+#define IAVF_VF_ARQLEN1_ARQOVFL_SHIFT 29
+#define IAVF_VF_ARQLEN1_ARQOVFL_MASK IAVF_MASK(1UL, IAVF_VF_ARQLEN1_ARQOVFL_SHIFT)
+#define IAVF_VF_ARQLEN1_ARQCRIT_SHIFT 30
+#define IAVF_VF_ARQLEN1_ARQCRIT_MASK IAVF_MASK(1UL, IAVF_VF_ARQLEN1_ARQCRIT_SHIFT)
+#define IAVF_VF_ARQLEN1_ARQENABLE_SHIFT 31
+#define IAVF_VF_ARQLEN1_ARQENABLE_MASK IAVF_MASK(1UL, IAVF_VF_ARQLEN1_ARQENABLE_SHIFT)
+#define IAVF_VF_ARQT1 0x00007000 /* Reset: EMPR */
+#define IAVF_VF_ATQBAH1 0x00007800 /* Reset: EMPR */
+#define IAVF_VF_ATQBAL1 0x00007C00 /* Reset: EMPR */
+#define IAVF_VF_ATQH1 0x00006400 /* Reset: EMPR */
+#define IAVF_VF_ATQLEN1 0x00006800 /* Reset: EMPR */
+#define IAVF_VF_ATQLEN1_ATQVFE_SHIFT 28
+#define IAVF_VF_ATQLEN1_ATQVFE_MASK IAVF_MASK(1UL, IAVF_VF_ATQLEN1_ATQVFE_SHIFT)
+#define IAVF_VF_ATQLEN1_ATQOVFL_SHIFT 29
+#define IAVF_VF_ATQLEN1_ATQOVFL_MASK IAVF_MASK(1UL, IAVF_VF_ATQLEN1_ATQOVFL_SHIFT)
+#define IAVF_VF_ATQLEN1_ATQCRIT_SHIFT 30
+#define IAVF_VF_ATQLEN1_ATQCRIT_MASK IAVF_MASK(1UL, IAVF_VF_ATQLEN1_ATQCRIT_SHIFT)
+#define IAVF_VF_ATQLEN1_ATQENABLE_SHIFT 31
+#define IAVF_VF_ATQLEN1_ATQENABLE_MASK IAVF_MASK(1UL, IAVF_VF_ATQLEN1_ATQENABLE_SHIFT)
+#define IAVF_VF_ATQT1 0x00008400 /* Reset: EMPR */
+#define IAVF_VFGEN_RSTAT 0x00008800 /* Reset: VFR */
+#define IAVF_VFGEN_RSTAT_VFR_STATE_SHIFT 0
+#define IAVF_VFGEN_RSTAT_VFR_STATE_MASK IAVF_MASK(0x3, IAVF_VFGEN_RSTAT_VFR_STATE_SHIFT)
+#define IAVF_VFINT_DYN_CTL01 0x00005C00 /* Reset: VFR */
+#define IAVF_VFINT_DYN_CTL01_INTENA_SHIFT 0
+#define IAVF_VFINT_DYN_CTL01_INTENA_MASK IAVF_MASK(1UL, IAVF_VFINT_DYN_CTL01_INTENA_SHIFT)
+#define IAVF_VFINT_DYN_CTL01_CLEARPBA_SHIFT 1
+#define IAVF_VFINT_DYN_CTL01_CLEARPBA_MASK IAVF_MASK(1UL, IAVF_VFINT_DYN_CTL01_CLEARPBA_SHIFT)
+#define IAVF_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT 2
+#define IAVF_VFINT_DYN_CTL01_SWINT_TRIG_MASK IAVF_MASK(1UL, IAVF_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT)
+#define IAVF_VFINT_DYN_CTL01_ITR_INDX_SHIFT 3
+#define IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK IAVF_MASK(0x3, IAVF_VFINT_DYN_CTL01_ITR_INDX_SHIFT)
+#define IAVF_VFINT_DYN_CTL01_INTERVAL_SHIFT 5
+#define IAVF_VFINT_DYN_CTL01_INTERVAL_MASK IAVF_MASK(0xFFF, IAVF_VFINT_DYN_CTL01_INTERVAL_SHIFT)
+#define IAVF_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT 24
+#define IAVF_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_MASK IAVF_MASK(1UL, IAVF_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT)
+#define IAVF_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT 25
+#define IAVF_VFINT_DYN_CTL01_SW_ITR_INDX_MASK IAVF_MASK(0x3, IAVF_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT)
+#define IAVF_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4)) /* _i=0...15 */ /* Reset: VFR */
+#define IAVF_VFINT_DYN_CTLN1_INTENA_SHIFT 0
+#define IAVF_VFINT_DYN_CTLN1_INTENA_MASK IAVF_MASK(1UL, IAVF_VFINT_DYN_CTLN1_INTENA_SHIFT)
+#define IAVF_VFINT_DYN_CTLN1_CLEARPBA_SHIFT 1
+#define IAVF_VFINT_DYN_CTLN1_CLEARPBA_MASK IAVF_MASK(1UL, IAVF_VFINT_DYN_CTLN1_CLEARPBA_SHIFT)
+#define IAVF_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT 2
+#define IAVF_VFINT_DYN_CTLN1_SWINT_TRIG_MASK IAVF_MASK(1UL, IAVF_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT)
+#define IAVF_VFINT_DYN_CTLN1_ITR_INDX_SHIFT 3
+#define IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK IAVF_MASK(0x3, IAVF_VFINT_DYN_CTLN1_ITR_INDX_SHIFT)
+#define IAVF_VFINT_DYN_CTLN1_INTERVAL_SHIFT 5
+#define IAVF_VFINT_DYN_CTLN1_INTERVAL_MASK IAVF_MASK(0xFFF, IAVF_VFINT_DYN_CTLN1_INTERVAL_SHIFT)
+#define IAVF_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT 24
+#define IAVF_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK IAVF_MASK(1UL, IAVF_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT)
+#define IAVF_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT 25
+#define IAVF_VFINT_DYN_CTLN1_SW_ITR_INDX_MASK IAVF_MASK(0x3, IAVF_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT)
+#define IAVF_VFINT_ICR0_ENA1 0x00005000 /* Reset: CORER */
+#define IAVF_VFINT_ICR0_ENA1_ADMINQ_SHIFT 30
+#define IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK IAVF_MASK(1UL, IAVF_VFINT_ICR0_ENA1_ADMINQ_SHIFT)
+#define IAVF_VFINT_ICR0_ENA1_RSVD_SHIFT 31
+#define IAVF_VFINT_ICR01 0x00004800 /* Reset: CORER */
+#define IAVF_VFINT_ICR01_QUEUE_0_SHIFT 1
+#define IAVF_VFINT_ICR01_QUEUE_0_MASK IAVF_MASK(1UL, IAVF_VFINT_ICR01_QUEUE_0_SHIFT)
+#define IAVF_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT 25
+#define IAVF_VFINT_ICR01_LINK_STAT_CHANGE_MASK IAVF_MASK(1UL, IAVF_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT)
+#define IAVF_VFINT_ICR01_ADMINQ_SHIFT 30
+#define IAVF_VFINT_ICR01_ADMINQ_MASK IAVF_MASK(1UL, IAVF_VFINT_ICR01_ADMINQ_SHIFT)
+#define IAVF_VFINT_ITR01(_i) (0x00004C00 + ((_i) * 4)) /* _i=0...2 */ /* Reset: VFR */
+#define IAVF_VFINT_ITRN1(_i, _INTVF) (0x00002800 + ((_i) * 64 + (_INTVF) * 4)) /* _i=0...2, _INTVF=0...15 */ /* Reset: VFR */
+#define IAVF_VFINT_STAT_CTL01 0x00005400 /* Reset: CORER */
+#define IAVF_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define IAVF_QTX_TAIL1(_Q) (0x00000000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: PFR */
+#define IAVF_VFQF_HENA(_i) (0x0000C400 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */
+#define IAVF_VFQF_HKEY(_i) (0x0000CC00 + ((_i) * 4)) /* _i=0...12 */ /* Reset: CORER */
+#define IAVF_VFQF_HKEY_MAX_INDEX 12
+#define IAVF_VFQF_HLUT(_i) (0x0000D000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define IAVF_VFQF_HLUT_MAX_INDEX 15
+#define IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT 30
+#define IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_MASK IAVF_MASK(1UL, IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT)
+
+#endif /* _IAVF_REGISTER_H_ */
diff --git a/sys/dev/iavf/iavf_status.h b/sys/dev/iavf/iavf_status.h
new file mode 100644
--- /dev/null
+++ b/sys/dev/iavf/iavf_status.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2021, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+#ifndef _IAVF_STATUS_H_
+#define _IAVF_STATUS_H_
+
+/* Error Codes */
+enum iavf_status {
+ IAVF_SUCCESS = 0,
+ IAVF_ERR_NVM = -1,
+ IAVF_ERR_NVM_CHECKSUM = -2,
+ IAVF_ERR_PHY = -3,
+ IAVF_ERR_CONFIG = -4,
+ IAVF_ERR_PARAM = -5,
+ IAVF_ERR_MAC_TYPE = -6,
+ IAVF_ERR_UNKNOWN_PHY = -7,
+ IAVF_ERR_LINK_SETUP = -8,
+ IAVF_ERR_ADAPTER_STOPPED = -9,
+ IAVF_ERR_INVALID_MAC_ADDR = -10,
+ IAVF_ERR_DEVICE_NOT_SUPPORTED = -11,
+ IAVF_ERR_MASTER_REQUESTS_PENDING = -12,
+ IAVF_ERR_INVALID_LINK_SETTINGS = -13,
+ IAVF_ERR_AUTONEG_NOT_COMPLETE = -14,
+ IAVF_ERR_RESET_FAILED = -15,
+ IAVF_ERR_SWFW_SYNC = -16,
+ IAVF_ERR_NO_AVAILABLE_VSI = -17,
+ IAVF_ERR_NO_MEMORY = -18,
+ IAVF_ERR_BAD_PTR = -19,
+ IAVF_ERR_RING_FULL = -20,
+ IAVF_ERR_INVALID_PD_ID = -21,
+ IAVF_ERR_INVALID_QP_ID = -22,
+ IAVF_ERR_INVALID_CQ_ID = -23,
+ IAVF_ERR_INVALID_CEQ_ID = -24,
+ IAVF_ERR_INVALID_AEQ_ID = -25,
+ IAVF_ERR_INVALID_SIZE = -26,
+ IAVF_ERR_INVALID_ARP_INDEX = -27,
+ IAVF_ERR_INVALID_FPM_FUNC_ID = -28,
+ IAVF_ERR_QP_INVALID_MSG_SIZE = -29,
+ IAVF_ERR_QP_TOOMANY_WRS_POSTED = -30,
+ IAVF_ERR_INVALID_FRAG_COUNT = -31,
+ IAVF_ERR_QUEUE_EMPTY = -32,
+ IAVF_ERR_INVALID_ALIGNMENT = -33,
+ IAVF_ERR_FLUSHED_QUEUE = -34,
+ IAVF_ERR_INVALID_PUSH_PAGE_INDEX = -35,
+ IAVF_ERR_INVALID_IMM_DATA_SIZE = -36,
+ IAVF_ERR_TIMEOUT = -37,
+ IAVF_ERR_OPCODE_MISMATCH = -38,
+ IAVF_ERR_CQP_COMPL_ERROR = -39,
+ IAVF_ERR_INVALID_VF_ID = -40,
+ IAVF_ERR_INVALID_HMCFN_ID = -41,
+ IAVF_ERR_BACKING_PAGE_ERROR = -42,
+ IAVF_ERR_NO_PBLCHUNKS_AVAILABLE = -43,
+ IAVF_ERR_INVALID_PBLE_INDEX = -44,
+ IAVF_ERR_INVALID_SD_INDEX = -45,
+ IAVF_ERR_INVALID_PAGE_DESC_INDEX = -46,
+ IAVF_ERR_INVALID_SD_TYPE = -47,
+ IAVF_ERR_MEMCPY_FAILED = -48,
+ IAVF_ERR_INVALID_HMC_OBJ_INDEX = -49,
+ IAVF_ERR_INVALID_HMC_OBJ_COUNT = -50,
+ IAVF_ERR_INVALID_SRQ_ARM_LIMIT = -51,
+ IAVF_ERR_SRQ_ENABLED = -52,
+ IAVF_ERR_ADMIN_QUEUE_ERROR = -53,
+ IAVF_ERR_ADMIN_QUEUE_TIMEOUT = -54,
+ IAVF_ERR_BUF_TOO_SHORT = -55,
+ IAVF_ERR_ADMIN_QUEUE_FULL = -56,
+ IAVF_ERR_ADMIN_QUEUE_NO_WORK = -57,
+ IAVF_ERR_BAD_IWARP_CQE = -58,
+ IAVF_ERR_NVM_BLANK_MODE = -59,
+ IAVF_ERR_NOT_IMPLEMENTED = -60,
+ IAVF_ERR_PE_DOORBELL_NOT_ENABLED = -61,
+ IAVF_ERR_DIAG_TEST_FAILED = -62,
+ IAVF_ERR_NOT_READY = -63,
+ IAVF_NOT_SUPPORTED = -64,
+ IAVF_ERR_FIRMWARE_API_VERSION = -65,
+ IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR = -66,
+};
+
+#endif /* _IAVF_STATUS_H_ */
diff --git a/sys/dev/iavf/iavf_sysctls_common.h b/sys/dev/iavf/iavf_sysctls_common.h
new file mode 100644
--- /dev/null
+++ b/sys/dev/iavf/iavf_sysctls_common.h
@@ -0,0 +1,158 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2021, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+/**
+ * @file iavf_sysctls_common.h
+ * @brief Sysctls common to the legacy and iflib drivers
+ *
+ * Contains global sysctl definitions which are shared between the legacy and
+ * iflib driver implementations.
+ */
+#ifndef _IAVF_SYSCTLS_COMMON_H_
+#define _IAVF_SYSCTLS_COMMON_H_
+
+#include <sys/sysctl.h>
+
+/* Root node for tunables */
+static SYSCTL_NODE(_hw, OID_AUTO, iavf, CTLFLAG_RD, 0,
+ "IAVF driver parameters");
+
+/**
+ * @var iavf_enable_head_writeback
+ * @brief Sysctl to control Tx descriptor completion method
+ *
+ * Global sysctl value indicating whether to enable the head writeback method
+ * of Tx descriptor completion notification.
+ *
+ * @remark Head writeback has been deprecated and will only work on 700-series
+ * virtual functions only.
+ */
+static int iavf_enable_head_writeback = 0;
+SYSCTL_INT(_hw_iavf, OID_AUTO, enable_head_writeback, CTLFLAG_RDTUN,
+ &iavf_enable_head_writeback, 0,
+ "For detecting last completed TX descriptor by hardware, use value written by HW instead of checking descriptors. For 700 series VFs only.");
+
+/**
+ * @var iavf_core_debug_mask
+ * @brief Debug mask for driver messages
+ *
+ * Global sysctl value used to control what set of debug messages are printed.
+ * Used by messages in core driver code.
+ */
+static int iavf_core_debug_mask = 0;
+SYSCTL_INT(_hw_iavf, OID_AUTO, core_debug_mask, CTLFLAG_RDTUN,
+ &iavf_core_debug_mask, 0,
+ "Display debug statements that are printed in non-shared code");
+
+/**
+ * @var iavf_shared_debug_mask
+ * @brief Debug mask for shared code messages
+ *
+ * Global sysctl value used to control what set of debug messages are printed.
+ * Used by messages in shared device logic code.
+ */
+static int iavf_shared_debug_mask = 0;
+SYSCTL_INT(_hw_iavf, OID_AUTO, shared_debug_mask, CTLFLAG_RDTUN,
+ &iavf_shared_debug_mask, 0,
+ "Display debug statements that are printed in shared code");
+
+/**
+ * @var iavf_rx_itr
+ * @brief Rx interrupt throttling rate
+ *
+ * Controls the default interrupt throttling rate for receive interrupts.
+ */
+int iavf_rx_itr = IAVF_ITR_8K;
+SYSCTL_INT(_hw_iavf, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
+ &iavf_rx_itr, 0, "RX Interrupt Rate");
+
+/**
+ * @var iavf_tx_itr
+ * @brief Tx interrupt throttling rate
+ *
+ * Controls the default interrupt throttling rate for transmit interrupts.
+ */
+int iavf_tx_itr = IAVF_ITR_4K;
+SYSCTL_INT(_hw_iavf, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
+ &iavf_tx_itr, 0, "TX Interrupt Rate");
+
+/**
+ * iavf_save_tunables - Sanity check and save off tunable values
+ * @sc: device softc
+ *
+ * @pre "iavf_drv_info.h" is included before this file
+ * @pre dev pointer in sc is valid
+ */
+static void
+iavf_save_tunables(struct iavf_sc *sc)
+{
+ device_t dev = sc->dev;
+ u16 pci_device_id = pci_get_device(dev);
+
+ /* Save tunable information */
+ sc->dbg_mask = (enum iavf_dbg_mask)iavf_core_debug_mask;
+ sc->hw.debug_mask = iavf_shared_debug_mask;
+
+ if (pci_device_id == IAVF_DEV_ID_VF ||
+ pci_device_id == IAVF_DEV_ID_X722_VF)
+ sc->vsi.enable_head_writeback = !!(iavf_enable_head_writeback);
+ else if (iavf_enable_head_writeback) {
+ device_printf(dev, "Head writeback can only be enabled on 700 series Virtual Functions\n");
+ device_printf(dev, "Using descriptor writeback instead...\n");
+ sc->vsi.enable_head_writeback = 0;
+ }
+
+ if (iavf_tx_itr < 0 || iavf_tx_itr > IAVF_MAX_ITR) {
+ device_printf(dev, "Invalid tx_itr value of %d set!\n",
+ iavf_tx_itr);
+ device_printf(dev, "tx_itr must be between %d and %d, "
+ "inclusive\n",
+ 0, IAVF_MAX_ITR);
+ device_printf(dev, "Using default value of %d instead\n",
+ IAVF_ITR_4K);
+ sc->tx_itr = IAVF_ITR_4K;
+ } else
+ sc->tx_itr = iavf_tx_itr;
+
+ if (iavf_rx_itr < 0 || iavf_rx_itr > IAVF_MAX_ITR) {
+ device_printf(dev, "Invalid rx_itr value of %d set!\n",
+ iavf_rx_itr);
+ device_printf(dev, "rx_itr must be between %d and %d, "
+ "inclusive\n",
+ 0, IAVF_MAX_ITR);
+ device_printf(dev, "Using default value of %d instead\n",
+ IAVF_ITR_8K);
+ sc->rx_itr = IAVF_ITR_8K;
+ } else
+ sc->rx_itr = iavf_rx_itr;
+}
+#endif /* _IAVF_SYSCTLS_COMMON_H_ */
diff --git a/sys/dev/iavf/iavf_sysctls_iflib.h b/sys/dev/iavf/iavf_sysctls_iflib.h
new file mode 100644
--- /dev/null
+++ b/sys/dev/iavf/iavf_sysctls_iflib.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2021, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+/**
+ * @file iavf_sysctls_iflib.h
+ * @brief global sysctls for the iflib driver
+ *
+ * Contains sysctl definitions which are used by the iflib driver
+ * implementation. Sysctls which are unique to the iflib driver should be
+ * declared in this file.
+ */
+#ifndef _IAVF_SYSCTLS_IFLIB_H_
+#define _IAVF_SYSCTLS_IFLIB_H_
+
+#include "iavf_sysctls_common.h"
+
+#endif /* _IAVF_SYSCTLS_IFLIB_H_ */
diff --git a/sys/dev/iavf/iavf_txrx_common.h b/sys/dev/iavf/iavf_txrx_common.h
new file mode 100644
--- /dev/null
+++ b/sys/dev/iavf/iavf_txrx_common.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2021, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+/**
+ * @file iavf_txrx_common.h
+ * @brief Tx/Rx hotpath functions common to legacy and iflib
+ *
+ * Contains implementations for functions used in the hotpath for both the
+ * legacy and iflib driver implementations.
+ */
+#ifndef _IAVF_TXRX_COMMON_H_
+#define _IAVF_TXRX_COMMON_H_
+
+#include "iavf_iflib.h"
+
+static inline int iavf_ptype_to_hash(u8 ptype);
+
+/**
+ * iavf_ptype_to_hash - parse the packet type
+ * @ptype: packet type
+ *
+ * Determine the appropriate hash for a given packet type
+ *
+ * @returns the M_HASHTYPE_* value for the given packet type.
+ */
+static inline int
+iavf_ptype_to_hash(u8 ptype)
+{
+ struct iavf_rx_ptype_decoded decoded;
+
+ decoded = decode_rx_desc_ptype(ptype);
+
+ if (!decoded.known)
+ return M_HASHTYPE_OPAQUE;
+
+ if (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_L2)
+ return M_HASHTYPE_OPAQUE;
+
+ /* Note: anything that gets to this point is IP */
+ if (decoded.outer_ip_ver == IAVF_RX_PTYPE_OUTER_IPV6) {
+ switch (decoded.inner_prot) {
+ case IAVF_RX_PTYPE_INNER_PROT_TCP:
+ return M_HASHTYPE_RSS_TCP_IPV6;
+ case IAVF_RX_PTYPE_INNER_PROT_UDP:
+ return M_HASHTYPE_RSS_UDP_IPV6;
+ default:
+ return M_HASHTYPE_RSS_IPV6;
+ }
+ }
+ if (decoded.outer_ip_ver == IAVF_RX_PTYPE_OUTER_IPV4) {
+ switch (decoded.inner_prot) {
+ case IAVF_RX_PTYPE_INNER_PROT_TCP:
+ return M_HASHTYPE_RSS_TCP_IPV4;
+ case IAVF_RX_PTYPE_INNER_PROT_UDP:
+ return M_HASHTYPE_RSS_UDP_IPV4;
+ default:
+ return M_HASHTYPE_RSS_IPV4;
+ }
+ }
+ /* We should never get here! */
+ return M_HASHTYPE_OPAQUE;
+}
+
+#endif /* _IAVF_TXRX_COMMON_H_ */
diff --git a/sys/dev/iavf/iavf_txrx_iflib.c b/sys/dev/iavf/iavf_txrx_iflib.c
new file mode 100644
--- /dev/null
+++ b/sys/dev/iavf/iavf_txrx_iflib.c
@@ -0,0 +1,789 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2021, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+/**
+ * @file iavf_txrx_iflib.c
+ * @brief Tx/Rx hotpath implementation for the iflib driver
+ *
+ * Contains functions used to implement the Tx and Rx hotpaths of the iflib
+ * driver implementation.
+ */
+#include "iavf_iflib.h"
+#include "iavf_txrx_common.h"
+
+#ifdef RSS
+#include <net/rss_config.h>
+#endif
+
+/* Local Prototypes */
+static void iavf_rx_checksum(if_rxd_info_t ri, u32 status, u32 error, u8 ptype);
+
+static int iavf_isc_txd_encap(void *arg, if_pkt_info_t pi);
+static void iavf_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx);
+static int iavf_isc_txd_credits_update_hwb(void *arg, uint16_t txqid, bool clear);
+static int iavf_isc_txd_credits_update_dwb(void *arg, uint16_t txqid, bool clear);
+
+static void iavf_isc_rxd_refill(void *arg, if_rxd_update_t iru);
+static void iavf_isc_rxd_flush(void *arg, uint16_t rxqid, uint8_t flid __unused,
+ qidx_t pidx);
+static int iavf_isc_rxd_available(void *arg, uint16_t rxqid, qidx_t idx,
+ qidx_t budget);
+static int iavf_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri);
+
+/**
+ * @var iavf_txrx_hwb
+ * @brief iflib Tx/Rx operations for head write back
+ *
+ * iflib ops structure for when operating the device in head write back mode.
+ */
+struct if_txrx iavf_txrx_hwb = {
+ iavf_isc_txd_encap,
+ iavf_isc_txd_flush,
+ iavf_isc_txd_credits_update_hwb,
+ iavf_isc_rxd_available,
+ iavf_isc_rxd_pkt_get,
+ iavf_isc_rxd_refill,
+ iavf_isc_rxd_flush,
+ NULL
+};
+
+/**
+ * @var iavf_txrx_dwb
+ * @brief iflib Tx/Rx operations for descriptor write back
+ *
+ * iflib ops structure for when operating the device in descriptor write back
+ * mode.
+ */
+struct if_txrx iavf_txrx_dwb = {
+ iavf_isc_txd_encap,
+ iavf_isc_txd_flush,
+ iavf_isc_txd_credits_update_dwb,
+ iavf_isc_rxd_available,
+ iavf_isc_rxd_pkt_get,
+ iavf_isc_rxd_refill,
+ iavf_isc_rxd_flush,
+ NULL
+};
+
+/**
+ * iavf_is_tx_desc_done - Check if a Tx descriptor is ready
+ * @txr: the Tx ring to check in
+ * @idx: ring index to check
+ *
+ * @returns true if the descriptor has been written back by hardware, and
+ * false otherwise.
+ */
+static bool
+iavf_is_tx_desc_done(struct tx_ring *txr, int idx)
+{
+ return (((txr->tx_base[idx].cmd_type_offset_bsz >> IAVF_TXD_QW1_DTYPE_SHIFT)
+ & IAVF_TXD_QW1_DTYPE_MASK) == IAVF_TX_DESC_DTYPE_DESC_DONE);
+}
+
+
+/**
+ * iavf_tso_detect_sparse - detect TSO packets with too many segments
+ * @segs: packet segments array
+ * @nsegs: number of packet segments
+ * @pi: packet information
+ *
+ * Hardware only transmits packets with a maximum of 8 descriptors. For TSO
+ * packets, hardware needs to be able to build the split packets using 8 or
+ * fewer descriptors. Additionally, the header must be contained within at
+ * most 3 descriptors.
+ *
+ * To verify this, we walk the headers to find out how many descriptors the
+ * headers require (usually 1). Then we ensure that, for each TSO segment, its
+ * data plus the headers are contained within 8 or fewer descriptors.
+ *
+ * @returns zero if the packet is valid, one otherwise.
+ */
+static int
+iavf_tso_detect_sparse(bus_dma_segment_t *segs, int nsegs, if_pkt_info_t pi)
+{
+ int count, curseg, i, hlen, segsz, seglen, tsolen;
+
+ if (nsegs <= IAVF_MAX_TX_SEGS-2)
+ return (0);
+ segsz = pi->ipi_tso_segsz;
+ curseg = count = 0;
+
+ hlen = pi->ipi_ehdrlen + pi->ipi_ip_hlen + pi->ipi_tcp_hlen;
+ tsolen = pi->ipi_len - hlen;
+
+ i = 0;
+ curseg = segs[0].ds_len;
+ while (hlen > 0) {
+ count++;
+ if (count > IAVF_MAX_TX_SEGS - 2)
+ return (1);
+ if (curseg == 0) {
+ i++;
+ if (__predict_false(i == nsegs))
+ return (1);
+
+ curseg = segs[i].ds_len;
+ }
+ seglen = min(curseg, hlen);
+ curseg -= seglen;
+ hlen -= seglen;
+ }
+ while (tsolen > 0) {
+ segsz = pi->ipi_tso_segsz;
+ while (segsz > 0 && tsolen != 0) {
+ count++;
+ if (count > IAVF_MAX_TX_SEGS - 2) {
+ return (1);
+ }
+ if (curseg == 0) {
+ i++;
+ if (__predict_false(i == nsegs)) {
+ return (1);
+ }
+ curseg = segs[i].ds_len;
+ }
+ seglen = min(curseg, segsz);
+ segsz -= seglen;
+ curseg -= seglen;
+ tsolen -= seglen;
+ }
+ count = 0;
+ }
+
+ return (0);
+}
+
+/**
+ * iavf_tx_setup_offload - Setup Tx offload parameters
+ * @que: pointer to the Tx queue
+ * @pi: Tx packet info
+ * @cmd: pointer to command descriptor value
+ * @off: pointer to offset descriptor value
+ *
+ * Based on packet type and Tx offloads requested, sets up the command and
+ * offset values for a Tx descriptor to enable the requested offloads.
+ */
+static void
+iavf_tx_setup_offload(struct iavf_tx_queue *que __unused,
+ if_pkt_info_t pi, u32 *cmd, u32 *off)
+{
+ switch (pi->ipi_etype) {
+#ifdef INET
+ case ETHERTYPE_IP:
+ if (pi->ipi_csum_flags & IAVF_CSUM_IPV4)
+ *cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM;
+ else
+ *cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4;
+ break;
+#endif
+#ifdef INET6
+ case ETHERTYPE_IPV6:
+ *cmd |= IAVF_TX_DESC_CMD_IIPT_IPV6;
+ break;
+#endif
+ default:
+ break;
+ }
+
+ *off |= (pi->ipi_ehdrlen >> 1) << IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
+ *off |= (pi->ipi_ip_hlen >> 2) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
+
+ switch (pi->ipi_ipproto) {
+ case IPPROTO_TCP:
+ if (pi->ipi_csum_flags & IAVF_CSUM_TCP) {
+ *cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
+ *off |= (pi->ipi_tcp_hlen >> 2) <<
+ IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+ /* Check for NO_HEAD MDD event */
+ MPASS(pi->ipi_tcp_hlen != 0);
+ }
+ break;
+ case IPPROTO_UDP:
+ if (pi->ipi_csum_flags & IAVF_CSUM_UDP) {
+ *cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP;
+ *off |= (sizeof(struct udphdr) >> 2) <<
+ IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+ }
+ break;
+ case IPPROTO_SCTP:
+ if (pi->ipi_csum_flags & IAVF_CSUM_SCTP) {
+ *cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP;
+ *off |= (sizeof(struct sctphdr) >> 2) <<
+ IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+ }
+ /* Fall Thru */
+ default:
+ break;
+ }
+}
+
+/**
+ * iavf_tso_setup - Setup TSO context descriptor
+ * @txr: the Tx ring to process
+ * @pi: packet info structure
+ *
+ * Enable hardware segmentation offload (TSO) for a given packet by creating
+ * a context descriptor with the necessary details for offloading.
+ *
+ * @returns the new ring index to use for the data descriptor.
+ */
+static int
+iavf_tso_setup(struct tx_ring *txr, if_pkt_info_t pi)
+{
+ if_softc_ctx_t scctx;
+ struct iavf_tx_context_desc *TXD;
+ u32 cmd, mss, type, tsolen;
+ int idx, total_hdr_len;
+ u64 type_cmd_tso_mss;
+
+ idx = pi->ipi_pidx;
+ TXD = (struct iavf_tx_context_desc *) &txr->tx_base[idx];
+ total_hdr_len = pi->ipi_ehdrlen + pi->ipi_ip_hlen + pi->ipi_tcp_hlen;
+ tsolen = pi->ipi_len - total_hdr_len;
+ scctx = txr->que->vsi->shared;
+
+ type = IAVF_TX_DESC_DTYPE_CONTEXT;
+ cmd = IAVF_TX_CTX_DESC_TSO;
+ /*
+ * TSO MSS must not be less than 64; this prevents a
+ * BAD_LSO_MSS MDD event when the MSS is too small.
+ */
+ if (pi->ipi_tso_segsz < IAVF_MIN_TSO_MSS) {
+ txr->mss_too_small++;
+ pi->ipi_tso_segsz = IAVF_MIN_TSO_MSS;
+ }
+ mss = pi->ipi_tso_segsz;
+
+ /* Check for BAD_LS0_MSS MDD event (mss too large) */
+ MPASS(mss <= IAVF_MAX_TSO_MSS);
+ /* Check for NO_HEAD MDD event (header lengths are 0) */
+ MPASS(pi->ipi_ehdrlen != 0);
+ MPASS(pi->ipi_ip_hlen != 0);
+ /* Partial check for BAD_LSO_LEN MDD event */
+ MPASS(tsolen != 0);
+ /* Partial check for WRONG_SIZE MDD event (during TSO) */
+ MPASS(total_hdr_len + mss <= IAVF_MAX_FRAME);
+
+ type_cmd_tso_mss = ((u64)type << IAVF_TXD_CTX_QW1_DTYPE_SHIFT) |
+ ((u64)cmd << IAVF_TXD_CTX_QW1_CMD_SHIFT) |
+ ((u64)tsolen << IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT) |
+ ((u64)mss << IAVF_TXD_CTX_QW1_MSS_SHIFT);
+ TXD->type_cmd_tso_mss = htole64(type_cmd_tso_mss);
+
+ TXD->tunneling_params = htole32(0);
+ txr->que->tso++;
+
+ return ((idx + 1) & (scctx->isc_ntxd[0]-1));
+}
+
+#define IAVF_TXD_CMD (IAVF_TX_DESC_CMD_EOP | IAVF_TX_DESC_CMD_RS)
+
+/**
+ * iavf_isc_txd_encap - Encapsulate a Tx packet into descriptors
+ * @arg: void pointer to the VSI structure
+ * @pi: packet info to encapsulate
+ *
+ * This routine maps the mbufs to tx descriptors, allowing the
+ * TX engine to transmit the packets.
+ *
+ * @returns 0 on success, positive on failure
+ */
+static int
+iavf_isc_txd_encap(void *arg, if_pkt_info_t pi)
+{
+ struct iavf_vsi *vsi = arg;
+ if_softc_ctx_t scctx = vsi->shared;
+ struct iavf_tx_queue *que = &vsi->tx_queues[pi->ipi_qsidx];
+ struct tx_ring *txr = &que->txr;
+ int nsegs = pi->ipi_nsegs;
+ bus_dma_segment_t *segs = pi->ipi_segs;
+ struct iavf_tx_desc *txd = NULL;
+ int i, j, mask, pidx_last;
+ u32 cmd, off, tx_intr;
+
+ if (__predict_false(pi->ipi_len < IAVF_MIN_FRAME)) {
+ que->pkt_too_small++;
+ return (EINVAL);
+ }
+
+ cmd = off = 0;
+ i = pi->ipi_pidx;
+
+ tx_intr = (pi->ipi_flags & IPI_TX_INTR);
+
+ /* Set up the TSO/CSUM offload */
+ if (pi->ipi_csum_flags & CSUM_OFFLOAD) {
+ /* Set up the TSO context descriptor if required */
+ if (pi->ipi_csum_flags & CSUM_TSO) {
+ /* Prevent MAX_BUFF MDD event (for TSO) */
+ if (iavf_tso_detect_sparse(segs, nsegs, pi))
+ return (EFBIG);
+ i = iavf_tso_setup(txr, pi);
+ }
+ iavf_tx_setup_offload(que, pi, &cmd, &off);
+ }
+ if (pi->ipi_mflags & M_VLANTAG)
+ cmd |= IAVF_TX_DESC_CMD_IL2TAG1;
+
+ cmd |= IAVF_TX_DESC_CMD_ICRC;
+ mask = scctx->isc_ntxd[0] - 1;
+ /* Check for WRONG_SIZE MDD event */
+ MPASS(pi->ipi_len >= IAVF_MIN_FRAME);
+#ifdef INVARIANTS
+ if (!(pi->ipi_csum_flags & CSUM_TSO))
+ MPASS(pi->ipi_len <= IAVF_MAX_FRAME);
+#endif
+ for (j = 0; j < nsegs; j++) {
+ bus_size_t seglen;
+
+ txd = &txr->tx_base[i];
+ seglen = segs[j].ds_len;
+
+ /* Check for ZERO_BSIZE MDD event */
+ MPASS(seglen != 0);
+
+ txd->buffer_addr = htole64(segs[j].ds_addr);
+ txd->cmd_type_offset_bsz =
+ htole64(IAVF_TX_DESC_DTYPE_DATA
+ | ((u64)cmd << IAVF_TXD_QW1_CMD_SHIFT)
+ | ((u64)off << IAVF_TXD_QW1_OFFSET_SHIFT)
+ | ((u64)seglen << IAVF_TXD_QW1_TX_BUF_SZ_SHIFT)
+ | ((u64)htole16(pi->ipi_vtag) << IAVF_TXD_QW1_L2TAG1_SHIFT));
+
+ txr->tx_bytes += seglen;
+ pidx_last = i;
+ i = (i+1) & mask;
+ }
+ /* Set the last descriptor for report */
+ txd->cmd_type_offset_bsz |=
+ htole64(((u64)IAVF_TXD_CMD << IAVF_TXD_QW1_CMD_SHIFT));
+ /* Add to report status array (if using TX interrupts) */
+ if (!vsi->enable_head_writeback && tx_intr) {
+ txr->tx_rsq[txr->tx_rs_pidx] = pidx_last;
+ txr->tx_rs_pidx = (txr->tx_rs_pidx+1) & mask;
+ MPASS(txr->tx_rs_pidx != txr->tx_rs_cidx);
+ }
+ pi->ipi_new_pidx = i;
+
+ ++txr->tx_packets;
+ return (0);
+}
+
+/**
+ * iavf_isc_txd_flush - Flush Tx ring
+ * @arg: void pointer to the VSI
+ * @txqid: the Tx queue to flush
+ * @pidx: the ring index to flush to
+ *
+ * Advance the Transmit Descriptor Tail (Tdt), this tells the
+ * hardware that this frame is available to transmit.
+ */
+static void
+iavf_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx)
+{
+ struct iavf_vsi *vsi = arg;
+ struct tx_ring *txr = &vsi->tx_queues[txqid].txr;
+
+ /* Check for ENDLESS_TX MDD event */
+ MPASS(pidx < vsi->shared->isc_ntxd[0]);
+ wr32(vsi->hw, txr->tail, pidx);
+}
+
+/**
+ * iavf_init_tx_ring - Initialize queue Tx ring
+ * @vsi: pointer to the VSI
+ * @que: pointer to queue to initialize
+ *
+ * (Re)Initialize a queue transmit ring by clearing its memory.
+ */
+void
+iavf_init_tx_ring(struct iavf_vsi *vsi, struct iavf_tx_queue *que)
+{
+ struct tx_ring *txr = &que->txr;
+
+ /* Clear the old ring contents */
+ bzero((void *)txr->tx_base,
+ (sizeof(struct iavf_tx_desc)) *
+ (vsi->shared->isc_ntxd[0] + (vsi->enable_head_writeback ? 1 : 0)));
+
+ wr32(vsi->hw, txr->tail, 0);
+}
+
+/**
+ * iavf_get_tx_head - Get the index of the head of a ring
+ * @que: queue to read
+ *
+ * Retrieve the value from the location the HW records its HEAD index
+ *
+ * @returns the index of the HW head of the Tx queue
+ */
+static inline u32
+iavf_get_tx_head(struct iavf_tx_queue *que)
+{
+ if_softc_ctx_t scctx = que->vsi->shared;
+ struct tx_ring *txr = &que->txr;
+ void *head = &txr->tx_base[scctx->isc_ntxd[0]];
+
+ return LE32_TO_CPU(*(volatile __le32 *)head);
+}
+
+/**
+ * iavf_isc_txd_credits_update_hwb - Update Tx ring credits
+ * @arg: void pointer to the VSI
+ * @qid: the queue id to update
+ * @clear: whether to update or only report current status
+ *
+ * Checks the number of packets in the queue that could be cleaned up.
+ *
+ * if clear is true, the iflib stack has cleaned the packets and is
+ * notifying the driver to update its processed ring pointer.
+ *
+ * @returns the number of packets in the ring that can be cleaned.
+ *
+ * @remark this function is intended for the head write back mode.
+ */
+static int
+iavf_isc_txd_credits_update_hwb(void *arg, uint16_t qid, bool clear)
+{
+ struct iavf_vsi *vsi = arg;
+ if_softc_ctx_t scctx = vsi->shared;
+ struct iavf_tx_queue *que = &vsi->tx_queues[qid];
+ struct tx_ring *txr = &que->txr;
+ int head, credits;
+
+ /* Get the Head WB value */
+ head = iavf_get_tx_head(que);
+
+ credits = head - txr->tx_cidx_processed;
+ if (credits < 0)
+ credits += scctx->isc_ntxd[0];
+ if (clear)
+ txr->tx_cidx_processed = head;
+
+ return (credits);
+}
+
+/**
+ * iavf_isc_txd_credits_update_dwb - Update Tx ring credits
+ * @arg: void pointer to the VSI
+ * @txqid: the queue id to update
+ * @clear: whether to update or only report current status
+ *
+ * Checks the number of packets in the queue that could be cleaned up.
+ *
+ * if clear is true, the iflib stack has cleaned the packets and is
+ * notifying the driver to update its processed ring pointer.
+ *
+ * @returns the number of packets in the ring that can be cleaned.
+ *
+ * @remark this function is intended for the descriptor write back mode.
+ */
+static int
+iavf_isc_txd_credits_update_dwb(void *arg, uint16_t txqid, bool clear)
+{
+ struct iavf_vsi *vsi = arg;
+ struct iavf_tx_queue *tx_que = &vsi->tx_queues[txqid];
+ if_softc_ctx_t scctx = vsi->shared;
+ struct tx_ring *txr = &tx_que->txr;
+
+ qidx_t processed = 0;
+ qidx_t cur, prev, ntxd, rs_cidx;
+ int32_t delta;
+ bool is_done;
+
+ rs_cidx = txr->tx_rs_cidx;
+ if (rs_cidx == txr->tx_rs_pidx)
+ return (0);
+ cur = txr->tx_rsq[rs_cidx];
+ MPASS(cur != QIDX_INVALID);
+ is_done = iavf_is_tx_desc_done(txr, cur);
+
+ if (!is_done)
+ return (0);
+
+ /* If clear is false just let caller know that there
+ * are descriptors to reclaim */
+ if (!clear)
+ return (1);
+
+ prev = txr->tx_cidx_processed;
+ ntxd = scctx->isc_ntxd[0];
+ do {
+ MPASS(prev != cur);
+ delta = (int32_t)cur - (int32_t)prev;
+ if (delta < 0)
+ delta += ntxd;
+ MPASS(delta > 0);
+ processed += delta;
+ prev = cur;
+ rs_cidx = (rs_cidx + 1) & (ntxd-1);
+ if (rs_cidx == txr->tx_rs_pidx)
+ break;
+ cur = txr->tx_rsq[rs_cidx];
+ MPASS(cur != QIDX_INVALID);
+ is_done = iavf_is_tx_desc_done(txr, cur);
+ } while (is_done);
+
+ txr->tx_rs_cidx = rs_cidx;
+ txr->tx_cidx_processed = prev;
+
+ return (processed);
+}
+
+/**
+ * iavf_isc_rxd_refill - Prepare descriptors for re-use
+ * @arg: void pointer to the VSI
+ * @iru: the Rx descriptor update structure
+ *
+ * Update Rx descriptors for a given queue so that they can be re-used by
+ * hardware for future packets.
+ */
+static void
+iavf_isc_rxd_refill(void *arg, if_rxd_update_t iru)
+{
+ struct iavf_vsi *vsi = arg;
+ if_softc_ctx_t scctx = vsi->shared;
+ struct rx_ring *rxr = &((vsi->rx_queues[iru->iru_qsidx]).rxr);
+ uint64_t *paddrs;
+ uint16_t next_pidx, pidx;
+ uint16_t count;
+ int i;
+
+ paddrs = iru->iru_paddrs;
+ pidx = iru->iru_pidx;
+ count = iru->iru_count;
+
+ for (i = 0, next_pidx = pidx; i < count; i++) {
+ rxr->rx_base[next_pidx].read.pkt_addr = htole64(paddrs[i]);
+ if (++next_pidx == scctx->isc_nrxd[0])
+ next_pidx = 0;
+ }
+}
+
+/**
+ * iavf_isc_rxd_flush - Notify hardware of new Rx descriptors
+ * @arg: void pointer to the VSI
+ * @rxqid: Rx queue to update
+ * @flid: unused parameter
+ * @pidx: ring index to update to
+ *
+ * Updates the tail pointer of the Rx ring, notifying hardware of new
+ * descriptors available for receiving packets.
+ */
+static void
+iavf_isc_rxd_flush(void * arg, uint16_t rxqid, uint8_t flid __unused, qidx_t pidx)
+{
+ struct iavf_vsi *vsi = arg;
+ struct rx_ring *rxr = &vsi->rx_queues[rxqid].rxr;
+
+ wr32(vsi->hw, rxr->tail, pidx);
+}
+
+/**
+ * iavf_isc_rxd_available - Calculate number of available Rx descriptors
+ * @arg: void pointer to the VSI
+ * @rxqid: Rx queue to check
+ * @idx: starting index to check from
+ * @budget: maximum Rx budget
+ *
+ * Determines how many packets are ready to be processed in the Rx queue, up
+ * to the specified budget.
+ *
+ * @returns the number of packets ready to be processed, up to the budget.
+ */
+static int
+iavf_isc_rxd_available(void *arg, uint16_t rxqid, qidx_t idx, qidx_t budget)
+{
+ struct iavf_vsi *vsi = arg;
+ struct rx_ring *rxr = &vsi->rx_queues[rxqid].rxr;
+ union iavf_rx_desc *rxd;
+ u64 qword;
+ uint32_t status;
+ int cnt, i, nrxd;
+
+ nrxd = vsi->shared->isc_nrxd[0];
+
+ for (cnt = 0, i = idx; cnt < nrxd - 1 && cnt <= budget;) {
+ rxd = &rxr->rx_base[i];
+ qword = le64toh(rxd->wb.qword1.status_error_len);
+ status = (qword & IAVF_RXD_QW1_STATUS_MASK)
+ >> IAVF_RXD_QW1_STATUS_SHIFT;
+
+ if ((status & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)) == 0)
+ break;
+ if (++i == nrxd)
+ i = 0;
+ if (status & (1 << IAVF_RX_DESC_STATUS_EOF_SHIFT))
+ cnt++;
+ }
+
+ return (cnt);
+}
+
+/**
+ * iavf_isc_rxd_pkt_get - Decapsulate packet from Rx descriptors
+ * @arg: void pointer to the VSI
+ * @ri: packet info structure
+ *
+ * Read packet data from the Rx ring descriptors and fill in the packet info
+ * structure so that the iflib stack can process the packet.
+ *
+ * @remark this routine executes in ithread context.
+ *
+ * @returns zero success, or EBADMSG if the packet is corrupted.
+ */
+static int
+iavf_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri)
+{
+ struct iavf_vsi *vsi = arg;
+ struct iavf_rx_queue *que = &vsi->rx_queues[ri->iri_qsidx];
+ struct rx_ring *rxr = &que->rxr;
+ union iavf_rx_desc *cur;
+ u32 status, error;
+ u16 plen, vtag;
+ u64 qword;
+ u8 ptype;
+ bool eop;
+ int i, cidx;
+
+ cidx = ri->iri_cidx;
+ i = 0;
+ do {
+ /* 5 descriptor receive limit */
+ MPASS(i < IAVF_MAX_RX_SEGS);
+
+ cur = &rxr->rx_base[cidx];
+ qword = le64toh(cur->wb.qword1.status_error_len);
+ status = (qword & IAVF_RXD_QW1_STATUS_MASK)
+ >> IAVF_RXD_QW1_STATUS_SHIFT;
+ error = (qword & IAVF_RXD_QW1_ERROR_MASK)
+ >> IAVF_RXD_QW1_ERROR_SHIFT;
+ plen = (qword & IAVF_RXD_QW1_LENGTH_PBUF_MASK)
+ >> IAVF_RXD_QW1_LENGTH_PBUF_SHIFT;
+ ptype = (qword & IAVF_RXD_QW1_PTYPE_MASK)
+ >> IAVF_RXD_QW1_PTYPE_SHIFT;
+
+ /* we should never be called without a valid descriptor */
+ MPASS((status & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)) != 0);
+
+ ri->iri_len += plen;
+ rxr->rx_bytes += plen;
+
+ cur->wb.qword1.status_error_len = 0;
+ eop = (status & (1 << IAVF_RX_DESC_STATUS_EOF_SHIFT));
+ if (status & (1 << IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT))
+ vtag = le16toh(cur->wb.qword0.lo_dword.l2tag1);
+ else
+ vtag = 0;
+
+ /*
+ ** Make sure bad packets are discarded,
+ ** note that only EOP descriptor has valid
+ ** error results.
+ */
+ if (eop && (error & (1 << IAVF_RX_DESC_ERROR_RXE_SHIFT))) {
+ rxr->desc_errs++;
+ return (EBADMSG);
+ }
+ ri->iri_frags[i].irf_flid = 0;
+ ri->iri_frags[i].irf_idx = cidx;
+ ri->iri_frags[i].irf_len = plen;
+ if (++cidx == vsi->shared->isc_nrxd[0])
+ cidx = 0;
+ i++;
+ } while (!eop);
+
+ /* capture data for dynamic ITR adjustment */
+ rxr->packets++;
+ rxr->rx_packets++;
+
+ if ((if_getcapenable(vsi->ifp) & IFCAP_RXCSUM) != 0)
+ iavf_rx_checksum(ri, status, error, ptype);
+ ri->iri_flowid = le32toh(cur->wb.qword0.hi_dword.rss);
+ ri->iri_rsstype = iavf_ptype_to_hash(ptype);
+ ri->iri_vtag = vtag;
+ ri->iri_nfrags = i;
+ if (vtag)
+ ri->iri_flags |= M_VLANTAG;
+ return (0);
+}
+
+/**
+ * iavf_rx_checksum - Handle Rx hardware checksum indication
+ * @ri: Rx packet info structure
+ * @status: status from Rx descriptor
+ * @error: error from Rx descriptor
+ * @ptype: packet type
+ *
+ * Verify that the hardware indicated that the checksum is valid.
+ * Inform the stack about the status of checksum so that stack
+ * doesn't spend time verifying the checksum.
+ */
+static void
+iavf_rx_checksum(if_rxd_info_t ri, u32 status, u32 error, u8 ptype)
+{
+ struct iavf_rx_ptype_decoded decoded;
+
+ ri->iri_csum_flags = 0;
+
+ /* No L3 or L4 checksum was calculated */
+ if (!(status & (1 << IAVF_RX_DESC_STATUS_L3L4P_SHIFT)))
+ return;
+
+ decoded = decode_rx_desc_ptype(ptype);
+
+ /* IPv6 with extension headers likely have bad csum */
+ if (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP &&
+ decoded.outer_ip_ver == IAVF_RX_PTYPE_OUTER_IPV6) {
+ if (status &
+ (1 << IAVF_RX_DESC_STATUS_IPV6EXADD_SHIFT)) {
+ ri->iri_csum_flags = 0;
+ return;
+ }
+ }
+
+ ri->iri_csum_flags |= CSUM_L3_CALC;
+
+ /* IPv4 checksum error */
+ if (error & (1 << IAVF_RX_DESC_ERROR_IPE_SHIFT))
+ return;
+
+ ri->iri_csum_flags |= CSUM_L3_VALID;
+ ri->iri_csum_flags |= CSUM_L4_CALC;
+
+ /* L4 checksum error */
+ if (error & (1 << IAVF_RX_DESC_ERROR_L4E_SHIFT))
+ return;
+
+ ri->iri_csum_flags |= CSUM_L4_VALID;
+ ri->iri_csum_data |= htons(0xffff);
+}
diff --git a/sys/dev/iavf/iavf_type.h b/sys/dev/iavf/iavf_type.h
new file mode 100644
--- /dev/null
+++ b/sys/dev/iavf/iavf_type.h
@@ -0,0 +1,1037 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2021, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+#ifndef _IAVF_TYPE_H_
+#define _IAVF_TYPE_H_
+
+#include "iavf_status.h"
+#include "iavf_osdep.h"
+#include "iavf_register.h"
+#include "iavf_adminq.h"
+#include "iavf_devids.h"
+
+#define IAVF_RXQ_CTX_DBUFF_SHIFT 7
+
+#define BIT(a) (1UL << (a))
+#define BIT_ULL(a) (1ULL << (a))
+
+#ifndef IAVF_MASK
+/* IAVF_MASK is a macro used on 32 bit registers */
+#define IAVF_MASK(mask, shift) (mask << shift)
+#endif
+
+#define IAVF_MAX_PF 16
+#define IAVF_MAX_PF_VSI 64
+#define IAVF_MAX_PF_QP 128
+#define IAVF_MAX_VSI_QP 16
+#define IAVF_MAX_VF_VSI 4
+#define IAVF_MAX_CHAINED_RX_BUFFERS 5
+
+/* something less than 1 minute */
+#define IAVF_HEARTBEAT_TIMEOUT (HZ * 50)
+
+/* Check whether address is multicast. */
+#define IAVF_IS_MULTICAST(address) (bool)(((u8 *)(address))[0] & ((u8)0x01))
+
+/* Check whether an address is broadcast. */
+#define IAVF_IS_BROADCAST(address) \
+ ((((u8 *)(address))[0] == ((u8)0xff)) && \
+ (((u8 *)(address))[1] == ((u8)0xff)))
+
+/* forward declaration */
+struct iavf_hw;
+typedef void (*IAVF_ADMINQ_CALLBACK)(struct iavf_hw *, struct iavf_aq_desc *);
+
+#define ETH_ALEN 6
+/* Data type manipulation macros. */
+#define IAVF_HI_DWORD(x) ((u32)((((x) >> 16) >> 16) & 0xFFFFFFFF))
+#define IAVF_LO_DWORD(x) ((u32)((x) & 0xFFFFFFFF))
+
+#define IAVF_HI_WORD(x) ((u16)(((x) >> 16) & 0xFFFF))
+#define IAVF_LO_WORD(x) ((u16)((x) & 0xFFFF))
+
+#define IAVF_HI_BYTE(x) ((u8)(((x) >> 8) & 0xFF))
+#define IAVF_LO_BYTE(x) ((u8)((x) & 0xFF))
+
+/* Number of Transmit Descriptors must be a multiple of 8. */
+#define IAVF_REQ_TX_DESCRIPTOR_MULTIPLE 8
+/* Number of Receive Descriptors must be a multiple of 32 if
+ * the number of descriptors is greater than 32.
+ */
+#define IAVF_REQ_RX_DESCRIPTOR_MULTIPLE 32
+
+#define IAVF_DESC_UNUSED(R) \
+ ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
+ (R)->next_to_clean - (R)->next_to_use - 1)
+
+/* bitfields for Tx queue mapping in QTX_CTL */
+#define IAVF_QTX_CTL_VF_QUEUE 0x0
+#define IAVF_QTX_CTL_VM_QUEUE 0x1
+#define IAVF_QTX_CTL_PF_QUEUE 0x2
+
+/* debug masks - set these bits in hw->debug_mask to control output */
+enum iavf_debug_mask {
+ IAVF_DEBUG_INIT = 0x00000001,
+ IAVF_DEBUG_RELEASE = 0x00000002,
+
+ IAVF_DEBUG_LINK = 0x00000010,
+ IAVF_DEBUG_PHY = 0x00000020,
+ IAVF_DEBUG_HMC = 0x00000040,
+ IAVF_DEBUG_NVM = 0x00000080,
+ IAVF_DEBUG_LAN = 0x00000100,
+ IAVF_DEBUG_FLOW = 0x00000200,
+ IAVF_DEBUG_DCB = 0x00000400,
+ IAVF_DEBUG_DIAG = 0x00000800,
+ IAVF_DEBUG_FD = 0x00001000,
+ IAVF_DEBUG_PACKAGE = 0x00002000,
+
+ IAVF_DEBUG_IWARP = 0x00F00000,
+
+ IAVF_DEBUG_AQ_MESSAGE = 0x01000000,
+ IAVF_DEBUG_AQ_DESCRIPTOR = 0x02000000,
+ IAVF_DEBUG_AQ_DESC_BUFFER = 0x04000000,
+ IAVF_DEBUG_AQ_COMMAND = 0x06000000,
+ IAVF_DEBUG_AQ = 0x0F000000,
+
+ IAVF_DEBUG_USER = 0xF0000000,
+
+ IAVF_DEBUG_ALL = 0xFFFFFFFF
+};
+
+/* PCI Bus Info */
+#define IAVF_PCI_LINK_STATUS 0xB2
+#define IAVF_PCI_LINK_WIDTH 0x3F0
+#define IAVF_PCI_LINK_WIDTH_1 0x10
+#define IAVF_PCI_LINK_WIDTH_2 0x20
+#define IAVF_PCI_LINK_WIDTH_4 0x40
+#define IAVF_PCI_LINK_WIDTH_8 0x80
+#define IAVF_PCI_LINK_SPEED 0xF
+#define IAVF_PCI_LINK_SPEED_2500 0x1
+#define IAVF_PCI_LINK_SPEED_5000 0x2
+#define IAVF_PCI_LINK_SPEED_8000 0x3
+
+#define IAVF_MDIO_CLAUSE22_STCODE_MASK IAVF_MASK(1, \
+ IAVF_GLGEN_MSCA_STCODE_SHIFT)
+#define IAVF_MDIO_CLAUSE22_OPCODE_WRITE_MASK IAVF_MASK(1, \
+ IAVF_GLGEN_MSCA_OPCODE_SHIFT)
+#define IAVF_MDIO_CLAUSE22_OPCODE_READ_MASK IAVF_MASK(2, \
+ IAVF_GLGEN_MSCA_OPCODE_SHIFT)
+
+#define IAVF_MDIO_CLAUSE45_STCODE_MASK IAVF_MASK(0, \
+ IAVF_GLGEN_MSCA_STCODE_SHIFT)
+#define IAVF_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK IAVF_MASK(0, \
+ IAVF_GLGEN_MSCA_OPCODE_SHIFT)
+#define IAVF_MDIO_CLAUSE45_OPCODE_WRITE_MASK IAVF_MASK(1, \
+ IAVF_GLGEN_MSCA_OPCODE_SHIFT)
+#define IAVF_MDIO_CLAUSE45_OPCODE_READ_INC_ADDR_MASK IAVF_MASK(2, \
+ IAVF_GLGEN_MSCA_OPCODE_SHIFT)
+#define IAVF_MDIO_CLAUSE45_OPCODE_READ_MASK IAVF_MASK(3, \
+ IAVF_GLGEN_MSCA_OPCODE_SHIFT)
+
+#define IAVF_PHY_COM_REG_PAGE 0x1E
+#define IAVF_PHY_LED_LINK_MODE_MASK 0xF0
+#define IAVF_PHY_LED_MANUAL_ON 0x100
+#define IAVF_PHY_LED_PROV_REG_1 0xC430
+#define IAVF_PHY_LED_MODE_MASK 0xFFFF
+#define IAVF_PHY_LED_MODE_ORIG 0x80000000
+
+/* Memory types */
+enum iavf_memset_type {
+ IAVF_NONDMA_MEM = 0,
+ IAVF_DMA_MEM
+};
+
+/* Memcpy types */
+enum iavf_memcpy_type {
+ IAVF_NONDMA_TO_NONDMA = 0,
+ IAVF_NONDMA_TO_DMA,
+ IAVF_DMA_TO_DMA,
+ IAVF_DMA_TO_NONDMA
+};
+
+/* These are structs for managing the hardware information and the operations.
+ * The structures of function pointers are filled out at init time when we
+ * know for sure exactly which hardware we're working with. This gives us the
+ * flexibility of using the same main driver code but adapting to slightly
+ * different hardware needs as new parts are developed. For this architecture,
+ * the Firmware and AdminQ are intended to insulate the driver from most of the
+ * future changes, but these structures will also do part of the job.
+ */
+enum iavf_mac_type {
+ IAVF_MAC_UNKNOWN = 0,
+ IAVF_MAC_XL710,
+ IAVF_MAC_VF,
+ IAVF_MAC_X722,
+ IAVF_MAC_X722_VF,
+ IAVF_MAC_GENERIC,
+};
+
+enum iavf_vsi_type {
+ IAVF_VSI_MAIN = 0,
+ IAVF_VSI_VMDQ1 = 1,
+ IAVF_VSI_VMDQ2 = 2,
+ IAVF_VSI_CTRL = 3,
+ IAVF_VSI_FCOE = 4,
+ IAVF_VSI_MIRROR = 5,
+ IAVF_VSI_SRIOV = 6,
+ IAVF_VSI_FDIR = 7,
+ IAVF_VSI_IWARP = 8,
+ IAVF_VSI_TYPE_UNKNOWN
+};
+
+enum iavf_queue_type {
+ IAVF_QUEUE_TYPE_RX = 0,
+ IAVF_QUEUE_TYPE_TX,
+ IAVF_QUEUE_TYPE_PE_CEQ,
+ IAVF_QUEUE_TYPE_UNKNOWN
+};
+
+#define IAVF_HW_CAP_MAX_GPIO 30
+#define IAVF_HW_CAP_MDIO_PORT_MODE_MDIO 0
+#define IAVF_HW_CAP_MDIO_PORT_MODE_I2C 1
+
+enum iavf_acpi_programming_method {
+ IAVF_ACPI_PROGRAMMING_METHOD_HW_FVL = 0,
+ IAVF_ACPI_PROGRAMMING_METHOD_AQC_FPK = 1
+};
+
+#define IAVF_WOL_SUPPORT_MASK 0x1
+#define IAVF_ACPI_PROGRAMMING_METHOD_MASK 0x2
+#define IAVF_PROXY_SUPPORT_MASK 0x4
+
+/* Capabilities of a PF or a VF or the whole device */
+struct iavf_hw_capabilities {
+ /* Cloud filter modes:
+ * Mode1: Filter on L4 port only
+ * Mode2: Filter for non-tunneled traffic
+ * Mode3: Filter for tunnel traffic
+ */
+#define IAVF_CLOUD_FILTER_MODE1 0x6
+#define IAVF_CLOUD_FILTER_MODE2 0x7
+#define IAVF_CLOUD_FILTER_MODE3 0x8
+#define IAVF_SWITCH_MODE_MASK 0xF
+
+ bool dcb;
+ bool fcoe;
+ bool iwarp;
+ u32 num_vsis;
+ u32 num_rx_qp;
+ u32 num_tx_qp;
+ u32 base_queue;
+ u32 num_msix_vectors_vf;
+ bool apm_wol_support;
+ enum iavf_acpi_programming_method acpi_prog_method;
+ bool proxy_support;
+};
+
+struct iavf_mac_info {
+ enum iavf_mac_type type;
+ u8 addr[ETH_ALEN];
+ u8 perm_addr[ETH_ALEN];
+ u8 san_addr[ETH_ALEN];
+ u8 port_addr[ETH_ALEN];
+ u16 max_fcoeq;
+};
+
+#define IAVF_NVM_EXEC_GET_AQ_RESULT 0x0
+#define IAVF_NVM_EXEC_FEATURES 0xe
+#define IAVF_NVM_EXEC_STATUS 0xf
+
+/* NVMUpdate features API */
+#define IAVF_NVMUPD_FEATURES_API_VER_MAJOR 0
+#define IAVF_NVMUPD_FEATURES_API_VER_MINOR 14
+#define IAVF_NVMUPD_FEATURES_API_FEATURES_ARRAY_LEN 12
+
+#define IAVF_NVMUPD_FEATURE_FLAT_NVM_SUPPORT BIT(0)
+
+struct iavf_nvmupd_features {
+ u8 major;
+ u8 minor;
+ u16 size;
+ u8 features[IAVF_NVMUPD_FEATURES_API_FEATURES_ARRAY_LEN];
+};
+
+#define IAVF_MODULE_SFF_DIAG_CAPAB 0x40
+/* PCI bus types */
+enum iavf_bus_type {
+ iavf_bus_type_unknown = 0,
+ iavf_bus_type_pci,
+ iavf_bus_type_pcix,
+ iavf_bus_type_pci_express,
+ iavf_bus_type_reserved
+};
+
+/* PCI bus speeds */
+enum iavf_bus_speed {
+ iavf_bus_speed_unknown = 0,
+ iavf_bus_speed_33 = 33,
+ iavf_bus_speed_66 = 66,
+ iavf_bus_speed_100 = 100,
+ iavf_bus_speed_120 = 120,
+ iavf_bus_speed_133 = 133,
+ iavf_bus_speed_2500 = 2500,
+ iavf_bus_speed_5000 = 5000,
+ iavf_bus_speed_8000 = 8000,
+ iavf_bus_speed_reserved
+};
+
+/* PCI bus widths */
+enum iavf_bus_width {
+ iavf_bus_width_unknown = 0,
+ iavf_bus_width_pcie_x1 = 1,
+ iavf_bus_width_pcie_x2 = 2,
+ iavf_bus_width_pcie_x4 = 4,
+ iavf_bus_width_pcie_x8 = 8,
+ iavf_bus_width_32 = 32,
+ iavf_bus_width_64 = 64,
+ iavf_bus_width_reserved
+};
+
+/* Bus parameters */
+struct iavf_bus_info {
+ enum iavf_bus_speed speed;
+ enum iavf_bus_width width;
+ enum iavf_bus_type type;
+
+ u16 func;
+ u16 device;
+ u16 lan_id;
+ u16 bus_id;
+};
+
+#define IAVF_MAX_USER_PRIORITY 8
+#define IAVF_TLV_STATUS_OPER 0x1
+#define IAVF_TLV_STATUS_SYNC 0x2
+#define IAVF_TLV_STATUS_ERR 0x4
+#define IAVF_CEE_OPER_MAX_APPS 3
+#define IAVF_APP_PROTOID_FCOE 0x8906
+#define IAVF_APP_PROTOID_ISCSI 0x0cbc
+#define IAVF_APP_PROTOID_FIP 0x8914
+#define IAVF_APP_SEL_ETHTYPE 0x1
+#define IAVF_APP_SEL_TCPIP 0x2
+#define IAVF_CEE_APP_SEL_ETHTYPE 0x0
+#define IAVF_CEE_APP_SEL_TCPIP 0x1
+
+/* Port hardware description */
+struct iavf_hw {
+ u8 *hw_addr;
+ void *back;
+
+ /* subsystem structs */
+ struct iavf_mac_info mac;
+ struct iavf_bus_info bus;
+
+ /* pci info */
+ u16 device_id;
+ u16 vendor_id;
+ u16 subsystem_device_id;
+ u16 subsystem_vendor_id;
+ u8 revision_id;
+
+ /* capabilities for entire device and PCI func */
+ struct iavf_hw_capabilities dev_caps;
+
+ /* Admin Queue info */
+ struct iavf_adminq_info aq;
+
+ /* WoL and proxy support */
+ u16 num_wol_proxy_filters;
+ u16 wol_proxy_vsi_seid;
+
+#define IAVF_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE BIT_ULL(0)
+#define IAVF_HW_FLAG_802_1AD_CAPABLE BIT_ULL(1)
+#define IAVF_HW_FLAG_AQ_PHY_ACCESS_CAPABLE BIT_ULL(2)
+#define IAVF_HW_FLAG_NVM_READ_REQUIRES_LOCK BIT_ULL(3)
+#define IAVF_HW_FLAG_FW_LLDP_STOPPABLE BIT_ULL(4)
+ u64 flags;
+
+ /* NVMUpdate features */
+ struct iavf_nvmupd_features nvmupd_features;
+
+ /* debug mask */
+ u32 debug_mask;
+ char err_str[16];
+};
+
+struct iavf_driver_version {
+ u8 major_version;
+ u8 minor_version;
+ u8 build_version;
+ u8 subbuild_version;
+ u8 driver_string[32];
+};
+
+/* RX Descriptors */
+union iavf_16byte_rx_desc {
+ struct {
+ __le64 pkt_addr; /* Packet buffer address */
+ __le64 hdr_addr; /* Header buffer address */
+ } read;
+ struct {
+ struct {
+ struct {
+ union {
+ __le16 mirroring_status;
+ __le16 fcoe_ctx_id;
+ } mirr_fcoe;
+ __le16 l2tag1;
+ } lo_dword;
+ union {
+ __le32 rss; /* RSS Hash */
+ __le32 fd_id; /* Flow director filter id */
+ __le32 fcoe_param; /* FCoE DDP Context id */
+ } hi_dword;
+ } qword0;
+ struct {
+ /* ext status/error/pktype/length */
+ __le64 status_error_len;
+ } qword1;
+ } wb; /* writeback */
+};
+
+union iavf_32byte_rx_desc {
+ struct {
+ __le64 pkt_addr; /* Packet buffer address */
+ __le64 hdr_addr; /* Header buffer address */
+ /* bit 0 of hdr_buffer_addr is DD bit */
+ __le64 rsvd1;
+ __le64 rsvd2;
+ } read;
+ struct {
+ struct {
+ struct {
+ union {
+ __le16 mirroring_status;
+ __le16 fcoe_ctx_id;
+ } mirr_fcoe;
+ __le16 l2tag1;
+ } lo_dword;
+ union {
+ __le32 rss; /* RSS Hash */
+ __le32 fcoe_param; /* FCoE DDP Context id */
+ /* Flow director filter id in case of
+ * Programming status desc WB
+ */
+ __le32 fd_id;
+ } hi_dword;
+ } qword0;
+ struct {
+ /* status/error/pktype/length */
+ __le64 status_error_len;
+ } qword1;
+ struct {
+ __le16 ext_status; /* extended status */
+ __le16 rsvd;
+ __le16 l2tag2_1;
+ __le16 l2tag2_2;
+ } qword2;
+ struct {
+ union {
+ __le32 flex_bytes_lo;
+ __le32 pe_status;
+ } lo_dword;
+ union {
+ __le32 flex_bytes_hi;
+ __le32 fd_id;
+ } hi_dword;
+ } qword3;
+ } wb; /* writeback */
+};
+
+#define IAVF_RXD_QW0_MIRROR_STATUS_SHIFT 8
+#define IAVF_RXD_QW0_MIRROR_STATUS_MASK (0x3FUL << \
+ IAVF_RXD_QW0_MIRROR_STATUS_SHIFT)
+#define IAVF_RXD_QW0_FCOEINDX_SHIFT 0
+#define IAVF_RXD_QW0_FCOEINDX_MASK (0xFFFUL << \
+ IAVF_RXD_QW0_FCOEINDX_SHIFT)
+
+enum iavf_rx_desc_status_bits {
+ /* Note: These are predefined bit offsets */
+ IAVF_RX_DESC_STATUS_DD_SHIFT = 0,
+ IAVF_RX_DESC_STATUS_EOF_SHIFT = 1,
+ IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT = 2,
+ IAVF_RX_DESC_STATUS_L3L4P_SHIFT = 3,
+ IAVF_RX_DESC_STATUS_CRCP_SHIFT = 4,
+ IAVF_RX_DESC_STATUS_TSYNINDX_SHIFT = 5, /* 2 BITS */
+ IAVF_RX_DESC_STATUS_TSYNVALID_SHIFT = 7,
+ IAVF_RX_DESC_STATUS_EXT_UDP_0_SHIFT = 8,
+
+ IAVF_RX_DESC_STATUS_UMBCAST_SHIFT = 9, /* 2 BITS */
+ IAVF_RX_DESC_STATUS_FLM_SHIFT = 11,
+ IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT = 12, /* 2 BITS */
+ IAVF_RX_DESC_STATUS_LPBK_SHIFT = 14,
+ IAVF_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15,
+ IAVF_RX_DESC_STATUS_RESERVED_SHIFT = 16, /* 2 BITS */
+ IAVF_RX_DESC_STATUS_INT_UDP_0_SHIFT = 18,
+ IAVF_RX_DESC_STATUS_LAST /* this entry must be last!!! */
+};
+
+#define IAVF_RXD_QW1_STATUS_SHIFT 0
+#define IAVF_RXD_QW1_STATUS_MASK ((BIT(IAVF_RX_DESC_STATUS_LAST) - 1) \
+ << IAVF_RXD_QW1_STATUS_SHIFT)
+
+#define IAVF_RXD_QW1_STATUS_TSYNINDX_SHIFT IAVF_RX_DESC_STATUS_TSYNINDX_SHIFT
+#define IAVF_RXD_QW1_STATUS_TSYNINDX_MASK (0x3UL << \
+ IAVF_RXD_QW1_STATUS_TSYNINDX_SHIFT)
+
+#define IAVF_RXD_QW1_STATUS_TSYNVALID_SHIFT IAVF_RX_DESC_STATUS_TSYNVALID_SHIFT
+#define IAVF_RXD_QW1_STATUS_TSYNVALID_MASK BIT_ULL(IAVF_RXD_QW1_STATUS_TSYNVALID_SHIFT)
+
+#define IAVF_RXD_QW1_STATUS_UMBCAST_SHIFT IAVF_RX_DESC_STATUS_UMBCAST
+#define IAVF_RXD_QW1_STATUS_UMBCAST_MASK (0x3UL << \
+ IAVF_RXD_QW1_STATUS_UMBCAST_SHIFT)
+
+enum iavf_rx_desc_fltstat_values {
+ IAVF_RX_DESC_FLTSTAT_NO_DATA = 0,
+ IAVF_RX_DESC_FLTSTAT_RSV_FD_ID = 1, /* 16byte desc? FD_ID : RSV */
+ IAVF_RX_DESC_FLTSTAT_RSV = 2,
+ IAVF_RX_DESC_FLTSTAT_RSS_HASH = 3,
+};
+
+#define IAVF_RXD_PACKET_TYPE_UNICAST 0
+#define IAVF_RXD_PACKET_TYPE_MULTICAST 1
+#define IAVF_RXD_PACKET_TYPE_BROADCAST 2
+#define IAVF_RXD_PACKET_TYPE_MIRRORED 3
+
+#define IAVF_RXD_QW1_ERROR_SHIFT 19
+#define IAVF_RXD_QW1_ERROR_MASK (0xFFUL << IAVF_RXD_QW1_ERROR_SHIFT)
+
+enum iavf_rx_desc_error_bits {
+ /* Note: These are predefined bit offsets */
+ IAVF_RX_DESC_ERROR_RXE_SHIFT = 0,
+ IAVF_RX_DESC_ERROR_RECIPE_SHIFT = 1,
+ IAVF_RX_DESC_ERROR_HBO_SHIFT = 2,
+ IAVF_RX_DESC_ERROR_L3L4E_SHIFT = 3, /* 3 BITS */
+ IAVF_RX_DESC_ERROR_IPE_SHIFT = 3,
+ IAVF_RX_DESC_ERROR_L4E_SHIFT = 4,
+ IAVF_RX_DESC_ERROR_EIPE_SHIFT = 5,
+ IAVF_RX_DESC_ERROR_OVERSIZE_SHIFT = 6,
+ IAVF_RX_DESC_ERROR_PPRS_SHIFT = 7
+};
+
+enum iavf_rx_desc_error_l3l4e_fcoe_masks {
+ IAVF_RX_DESC_ERROR_L3L4E_NONE = 0,
+ IAVF_RX_DESC_ERROR_L3L4E_PROT = 1,
+ IAVF_RX_DESC_ERROR_L3L4E_FC = 2,
+ IAVF_RX_DESC_ERROR_L3L4E_DMAC_ERR = 3,
+ IAVF_RX_DESC_ERROR_L3L4E_DMAC_WARN = 4
+};
+
+#define IAVF_RXD_QW1_PTYPE_SHIFT 30
+#define IAVF_RXD_QW1_PTYPE_MASK (0xFFULL << IAVF_RXD_QW1_PTYPE_SHIFT)
+
+/* Packet type non-ip values */
+enum iavf_rx_l2_ptype {
+ IAVF_RX_PTYPE_L2_RESERVED = 0,
+ IAVF_RX_PTYPE_L2_MAC_PAY2 = 1,
+ IAVF_RX_PTYPE_L2_TIMESYNC_PAY2 = 2,
+ IAVF_RX_PTYPE_L2_FIP_PAY2 = 3,
+ IAVF_RX_PTYPE_L2_OUI_PAY2 = 4,
+ IAVF_RX_PTYPE_L2_MACCNTRL_PAY2 = 5,
+ IAVF_RX_PTYPE_L2_LLDP_PAY2 = 6,
+ IAVF_RX_PTYPE_L2_ECP_PAY2 = 7,
+ IAVF_RX_PTYPE_L2_EVB_PAY2 = 8,
+ IAVF_RX_PTYPE_L2_QCN_PAY2 = 9,
+ IAVF_RX_PTYPE_L2_EAPOL_PAY2 = 10,
+ IAVF_RX_PTYPE_L2_ARP = 11,
+ IAVF_RX_PTYPE_L2_FCOE_PAY3 = 12,
+ IAVF_RX_PTYPE_L2_FCOE_FCDATA_PAY3 = 13,
+ IAVF_RX_PTYPE_L2_FCOE_FCRDY_PAY3 = 14,
+ IAVF_RX_PTYPE_L2_FCOE_FCRSP_PAY3 = 15,
+ IAVF_RX_PTYPE_L2_FCOE_FCOTHER_PA = 16,
+ IAVF_RX_PTYPE_L2_FCOE_VFT_PAY3 = 17,
+ IAVF_RX_PTYPE_L2_FCOE_VFT_FCDATA = 18,
+ IAVF_RX_PTYPE_L2_FCOE_VFT_FCRDY = 19,
+ IAVF_RX_PTYPE_L2_FCOE_VFT_FCRSP = 20,
+ IAVF_RX_PTYPE_L2_FCOE_VFT_FCOTHER = 21,
+ IAVF_RX_PTYPE_GRENAT4_MAC_PAY3 = 58,
+ IAVF_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4 = 87,
+ IAVF_RX_PTYPE_GRENAT6_MAC_PAY3 = 124,
+ IAVF_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4 = 153,
+ IAVF_RX_PTYPE_PARSER_ABORTED = 255
+};
+
+struct iavf_rx_ptype_decoded {
+ u32 ptype:8;
+ u32 known:1;
+ u32 outer_ip:1;
+ u32 outer_ip_ver:1;
+ u32 outer_frag:1;
+ u32 tunnel_type:3;
+ u32 tunnel_end_prot:2;
+ u32 tunnel_end_frag:1;
+ u32 inner_prot:4;
+ u32 payload_layer:3;
+};
+
+enum iavf_rx_ptype_outer_ip {
+ IAVF_RX_PTYPE_OUTER_L2 = 0,
+ IAVF_RX_PTYPE_OUTER_IP = 1
+};
+
+enum iavf_rx_ptype_outer_ip_ver {
+ IAVF_RX_PTYPE_OUTER_NONE = 0,
+ IAVF_RX_PTYPE_OUTER_IPV4 = 0,
+ IAVF_RX_PTYPE_OUTER_IPV6 = 1
+};
+
+enum iavf_rx_ptype_outer_fragmented {
+ IAVF_RX_PTYPE_NOT_FRAG = 0,
+ IAVF_RX_PTYPE_FRAG = 1
+};
+
+enum iavf_rx_ptype_tunnel_type {
+ IAVF_RX_PTYPE_TUNNEL_NONE = 0,
+ IAVF_RX_PTYPE_TUNNEL_IP_IP = 1,
+ IAVF_RX_PTYPE_TUNNEL_IP_GRENAT = 2,
+ IAVF_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3,
+ IAVF_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4,
+};
+
+enum iavf_rx_ptype_tunnel_end_prot {
+ IAVF_RX_PTYPE_TUNNEL_END_NONE = 0,
+ IAVF_RX_PTYPE_TUNNEL_END_IPV4 = 1,
+ IAVF_RX_PTYPE_TUNNEL_END_IPV6 = 2,
+};
+
+enum iavf_rx_ptype_inner_prot {
+ IAVF_RX_PTYPE_INNER_PROT_NONE = 0,
+ IAVF_RX_PTYPE_INNER_PROT_UDP = 1,
+ IAVF_RX_PTYPE_INNER_PROT_TCP = 2,
+ IAVF_RX_PTYPE_INNER_PROT_SCTP = 3,
+ IAVF_RX_PTYPE_INNER_PROT_ICMP = 4,
+ IAVF_RX_PTYPE_INNER_PROT_TIMESYNC = 5
+};
+
+enum iavf_rx_ptype_payload_layer {
+ IAVF_RX_PTYPE_PAYLOAD_LAYER_NONE = 0,
+ IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1,
+ IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2,
+ IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3,
+};
+
+#define IAVF_RX_PTYPE_BIT_MASK 0x0FFFFFFF
+#define IAVF_RX_PTYPE_SHIFT 56
+
+#define IAVF_RXD_QW1_LENGTH_PBUF_SHIFT 38
+#define IAVF_RXD_QW1_LENGTH_PBUF_MASK (0x3FFFULL << \
+ IAVF_RXD_QW1_LENGTH_PBUF_SHIFT)
+
+#define IAVF_RXD_QW1_LENGTH_HBUF_SHIFT 52
+#define IAVF_RXD_QW1_LENGTH_HBUF_MASK (0x7FFULL << \
+ IAVF_RXD_QW1_LENGTH_HBUF_SHIFT)
+
+#define IAVF_RXD_QW1_LENGTH_SPH_SHIFT 63
+#define IAVF_RXD_QW1_LENGTH_SPH_MASK BIT_ULL(IAVF_RXD_QW1_LENGTH_SPH_SHIFT)
+
+#define IAVF_RXD_QW1_NEXTP_SHIFT 38
+#define IAVF_RXD_QW1_NEXTP_MASK (0x1FFFULL << IAVF_RXD_QW1_NEXTP_SHIFT)
+
+#define IAVF_RXD_QW2_EXT_STATUS_SHIFT 0
+#define IAVF_RXD_QW2_EXT_STATUS_MASK (0xFFFFFUL << \
+ IAVF_RXD_QW2_EXT_STATUS_SHIFT)
+
+enum iavf_rx_desc_ext_status_bits {
+ /* Note: These are predefined bit offsets */
+ IAVF_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT = 0,
+ IAVF_RX_DESC_EXT_STATUS_L2TAG3P_SHIFT = 1,
+ IAVF_RX_DESC_EXT_STATUS_FLEXBL_SHIFT = 2, /* 2 BITS */
+ IAVF_RX_DESC_EXT_STATUS_FLEXBH_SHIFT = 4, /* 2 BITS */
+ IAVF_RX_DESC_EXT_STATUS_FDLONGB_SHIFT = 9,
+ IAVF_RX_DESC_EXT_STATUS_FCOELONGB_SHIFT = 10,
+ IAVF_RX_DESC_EXT_STATUS_PELONGB_SHIFT = 11,
+};
+
+#define IAVF_RXD_QW2_L2TAG2_SHIFT 0
+#define IAVF_RXD_QW2_L2TAG2_MASK (0xFFFFUL << IAVF_RXD_QW2_L2TAG2_SHIFT)
+
+#define IAVF_RXD_QW2_L2TAG3_SHIFT 16
+#define IAVF_RXD_QW2_L2TAG3_MASK (0xFFFFUL << IAVF_RXD_QW2_L2TAG3_SHIFT)
+
+enum iavf_rx_desc_pe_status_bits {
+ /* Note: These are predefined bit offsets */
+ IAVF_RX_DESC_PE_STATUS_QPID_SHIFT = 0, /* 18 BITS */
+ IAVF_RX_DESC_PE_STATUS_L4PORT_SHIFT = 0, /* 16 BITS */
+ IAVF_RX_DESC_PE_STATUS_IPINDEX_SHIFT = 16, /* 8 BITS */
+ IAVF_RX_DESC_PE_STATUS_QPIDHIT_SHIFT = 24,
+ IAVF_RX_DESC_PE_STATUS_APBVTHIT_SHIFT = 25,
+ IAVF_RX_DESC_PE_STATUS_PORTV_SHIFT = 26,
+ IAVF_RX_DESC_PE_STATUS_URG_SHIFT = 27,
+ IAVF_RX_DESC_PE_STATUS_IPFRAG_SHIFT = 28,
+ IAVF_RX_DESC_PE_STATUS_IPOPT_SHIFT = 29
+};
+
+#define IAVF_RX_PROG_STATUS_DESC_LENGTH_SHIFT 38
+#define IAVF_RX_PROG_STATUS_DESC_LENGTH 0x2000000
+
+#define IAVF_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT 2
+#define IAVF_RX_PROG_STATUS_DESC_QW1_PROGID_MASK (0x7UL << \
+ IAVF_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT)
+
+#define IAVF_RX_PROG_STATUS_DESC_QW1_STATUS_SHIFT 0
+#define IAVF_RX_PROG_STATUS_DESC_QW1_STATUS_MASK (0x7FFFUL << \
+ IAVF_RX_PROG_STATUS_DESC_QW1_STATUS_SHIFT)
+
+#define IAVF_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT 19
+#define IAVF_RX_PROG_STATUS_DESC_QW1_ERROR_MASK (0x3FUL << \
+ IAVF_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT)
+
+enum iavf_rx_prog_status_desc_status_bits {
+ /* Note: These are predefined bit offsets */
+ IAVF_RX_PROG_STATUS_DESC_DD_SHIFT = 0,
+ IAVF_RX_PROG_STATUS_DESC_PROG_ID_SHIFT = 2 /* 3 BITS */
+};
+
+enum iavf_rx_prog_status_desc_prog_id_masks {
+ IAVF_RX_PROG_STATUS_DESC_FD_FILTER_STATUS = 1,
+ IAVF_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS = 2,
+ IAVF_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS = 4,
+};
+
+enum iavf_rx_prog_status_desc_error_bits {
+ /* Note: These are predefined bit offsets */
+ IAVF_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT = 0,
+ IAVF_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT = 1,
+ IAVF_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT = 2,
+ IAVF_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT = 3
+};
+
+#define IAVF_TWO_BIT_MASK 0x3
+#define IAVF_THREE_BIT_MASK 0x7
+#define IAVF_FOUR_BIT_MASK 0xF
+#define IAVF_EIGHTEEN_BIT_MASK 0x3FFFF
+
+/* TX Descriptor */
+struct iavf_tx_desc {
+ __le64 buffer_addr; /* Address of descriptor's data buf */
+ __le64 cmd_type_offset_bsz;
+};
+
+#define IAVF_TXD_QW1_DTYPE_SHIFT 0
+#define IAVF_TXD_QW1_DTYPE_MASK (0xFUL << IAVF_TXD_QW1_DTYPE_SHIFT)
+
+enum iavf_tx_desc_dtype_value {
+ IAVF_TX_DESC_DTYPE_DATA = 0x0,
+ IAVF_TX_DESC_DTYPE_NOP = 0x1, /* same as Context desc */
+ IAVF_TX_DESC_DTYPE_CONTEXT = 0x1,
+ IAVF_TX_DESC_DTYPE_FCOE_CTX = 0x2,
+ IAVF_TX_DESC_DTYPE_FILTER_PROG = 0x8,
+ IAVF_TX_DESC_DTYPE_DDP_CTX = 0x9,
+ IAVF_TX_DESC_DTYPE_FLEX_DATA = 0xB,
+ IAVF_TX_DESC_DTYPE_FLEX_CTX_1 = 0xC,
+ IAVF_TX_DESC_DTYPE_FLEX_CTX_2 = 0xD,
+ IAVF_TX_DESC_DTYPE_DESC_DONE = 0xF
+};
+
+#define IAVF_TXD_QW1_CMD_SHIFT 4
+#define IAVF_TXD_QW1_CMD_MASK (0x3FFUL << IAVF_TXD_QW1_CMD_SHIFT)
+
+enum iavf_tx_desc_cmd_bits {
+ IAVF_TX_DESC_CMD_EOP = 0x0001,
+ IAVF_TX_DESC_CMD_RS = 0x0002,
+ IAVF_TX_DESC_CMD_ICRC = 0x0004,
+ IAVF_TX_DESC_CMD_IL2TAG1 = 0x0008,
+ IAVF_TX_DESC_CMD_DUMMY = 0x0010,
+ IAVF_TX_DESC_CMD_IIPT_NONIP = 0x0000, /* 2 BITS */
+ IAVF_TX_DESC_CMD_IIPT_IPV6 = 0x0020, /* 2 BITS */
+ IAVF_TX_DESC_CMD_IIPT_IPV4 = 0x0040, /* 2 BITS */
+ IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM = 0x0060, /* 2 BITS */
+ IAVF_TX_DESC_CMD_FCOET = 0x0080,
+ IAVF_TX_DESC_CMD_L4T_EOFT_UNK = 0x0000, /* 2 BITS */
+ IAVF_TX_DESC_CMD_L4T_EOFT_TCP = 0x0100, /* 2 BITS */
+ IAVF_TX_DESC_CMD_L4T_EOFT_SCTP = 0x0200, /* 2 BITS */
+ IAVF_TX_DESC_CMD_L4T_EOFT_UDP = 0x0300, /* 2 BITS */
+ IAVF_TX_DESC_CMD_L4T_EOFT_EOF_N = 0x0000, /* 2 BITS */
+ IAVF_TX_DESC_CMD_L4T_EOFT_EOF_T = 0x0100, /* 2 BITS */
+ IAVF_TX_DESC_CMD_L4T_EOFT_EOF_NI = 0x0200, /* 2 BITS */
+ IAVF_TX_DESC_CMD_L4T_EOFT_EOF_A = 0x0300, /* 2 BITS */
+};
+
+#define IAVF_TXD_QW1_OFFSET_SHIFT 16
+#define IAVF_TXD_QW1_OFFSET_MASK (0x3FFFFULL << \
+ IAVF_TXD_QW1_OFFSET_SHIFT)
+
+enum iavf_tx_desc_length_fields {
+ /* Note: These are predefined bit offsets */
+ IAVF_TX_DESC_LENGTH_MACLEN_SHIFT = 0, /* 7 BITS */
+ IAVF_TX_DESC_LENGTH_IPLEN_SHIFT = 7, /* 7 BITS */
+ IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT = 14 /* 4 BITS */
+};
+
+#define IAVF_TXD_QW1_MACLEN_MASK (0x7FUL << IAVF_TX_DESC_LENGTH_MACLEN_SHIFT)
+#define IAVF_TXD_QW1_IPLEN_MASK (0x7FUL << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT)
+#define IAVF_TXD_QW1_L4LEN_MASK (0xFUL << IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT)
+#define IAVF_TXD_QW1_FCLEN_MASK (0xFUL << IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT)
+
+#define IAVF_TXD_QW1_TX_BUF_SZ_SHIFT 34
+#define IAVF_TXD_QW1_TX_BUF_SZ_MASK (0x3FFFULL << \
+ IAVF_TXD_QW1_TX_BUF_SZ_SHIFT)
+
+#define IAVF_TXD_QW1_L2TAG1_SHIFT 48
+#define IAVF_TXD_QW1_L2TAG1_MASK (0xFFFFULL << IAVF_TXD_QW1_L2TAG1_SHIFT)
+
+/* Context descriptors */
+struct iavf_tx_context_desc {
+ __le32 tunneling_params;
+ __le16 l2tag2;
+ __le16 rsvd;
+ __le64 type_cmd_tso_mss;
+};
+
+#define IAVF_TXD_CTX_QW1_DTYPE_SHIFT 0
+#define IAVF_TXD_CTX_QW1_DTYPE_MASK (0xFUL << IAVF_TXD_CTX_QW1_DTYPE_SHIFT)
+
+#define IAVF_TXD_CTX_QW1_CMD_SHIFT 4
+#define IAVF_TXD_CTX_QW1_CMD_MASK (0xFFFFUL << IAVF_TXD_CTX_QW1_CMD_SHIFT)
+
+enum iavf_tx_ctx_desc_cmd_bits {
+ IAVF_TX_CTX_DESC_TSO = 0x01,
+ IAVF_TX_CTX_DESC_TSYN = 0x02,
+ IAVF_TX_CTX_DESC_IL2TAG2 = 0x04,
+ IAVF_TX_CTX_DESC_IL2TAG2_IL2H = 0x08,
+ IAVF_TX_CTX_DESC_SWTCH_NOTAG = 0x00,
+ IAVF_TX_CTX_DESC_SWTCH_UPLINK = 0x10,
+ IAVF_TX_CTX_DESC_SWTCH_LOCAL = 0x20,
+ IAVF_TX_CTX_DESC_SWTCH_VSI = 0x30,
+ IAVF_TX_CTX_DESC_SWPE = 0x40
+};
+
+struct iavf_nop_desc {
+ __le64 rsvd;
+ __le64 dtype_cmd;
+};
+
+#define IAVF_TXD_NOP_QW1_DTYPE_SHIFT 0
+#define IAVF_TXD_NOP_QW1_DTYPE_MASK (0xFUL << IAVF_TXD_NOP_QW1_DTYPE_SHIFT)
+
+#define IAVF_TXD_NOP_QW1_CMD_SHIFT 4
+#define IAVF_TXD_NOP_QW1_CMD_MASK (0x7FUL << IAVF_TXD_NOP_QW1_CMD_SHIFT)
+
+enum iavf_tx_nop_desc_cmd_bits {
+ /* Note: These are predefined bit offsets */
+ IAVF_TX_NOP_DESC_EOP_SHIFT = 0,
+ IAVF_TX_NOP_DESC_RS_SHIFT = 1,
+ IAVF_TX_NOP_DESC_RSV_SHIFT = 2 /* 5 bits */
+};
+
+/* Packet Classifier Types for filters */
+enum iavf_filter_pctype {
+ /* Note: Values 0-28 are reserved for future use.
+ * Value 29, 30, 32 are not supported on XL710 and X710.
+ */
+ IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29,
+ IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30,
+ IAVF_FILTER_PCTYPE_NONF_IPV4_UDP = 31,
+ IAVF_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK = 32,
+ IAVF_FILTER_PCTYPE_NONF_IPV4_TCP = 33,
+ IAVF_FILTER_PCTYPE_NONF_IPV4_SCTP = 34,
+ IAVF_FILTER_PCTYPE_NONF_IPV4_OTHER = 35,
+ IAVF_FILTER_PCTYPE_FRAG_IPV4 = 36,
+ /* Note: Values 37-38 are reserved for future use.
+ * Value 39, 40, 42 are not supported on XL710 and X710.
+ */
+ IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39,
+ IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40,
+ IAVF_FILTER_PCTYPE_NONF_IPV6_UDP = 41,
+ IAVF_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK = 42,
+ IAVF_FILTER_PCTYPE_NONF_IPV6_TCP = 43,
+ IAVF_FILTER_PCTYPE_NONF_IPV6_SCTP = 44,
+ IAVF_FILTER_PCTYPE_NONF_IPV6_OTHER = 45,
+ IAVF_FILTER_PCTYPE_FRAG_IPV6 = 46,
+ /* Note: Value 47 is reserved for future use */
+ IAVF_FILTER_PCTYPE_FCOE_OX = 48,
+ IAVF_FILTER_PCTYPE_FCOE_RX = 49,
+ IAVF_FILTER_PCTYPE_FCOE_OTHER = 50,
+ /* Note: Values 51-62 are reserved for future use */
+ IAVF_FILTER_PCTYPE_L2_PAYLOAD = 63,
+};
+
+#define IAVF_TXD_FLTR_QW1_DTYPE_SHIFT 0
+#define IAVF_TXD_FLTR_QW1_DTYPE_MASK (0xFUL << IAVF_TXD_FLTR_QW1_DTYPE_SHIFT)
+
+#define IAVF_TXD_FLTR_QW1_ATR_SHIFT (0xEULL + \
+ IAVF_TXD_FLTR_QW1_CMD_SHIFT)
+#define IAVF_TXD_FLTR_QW1_ATR_MASK BIT_ULL(IAVF_TXD_FLTR_QW1_ATR_SHIFT)
+
+
+#define IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT 30
+#define IAVF_TXD_CTX_QW1_TSO_LEN_MASK (0x3FFFFULL << \
+ IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT)
+
+#define IAVF_TXD_CTX_QW1_MSS_SHIFT 50
+#define IAVF_TXD_CTX_QW1_MSS_MASK (0x3FFFULL << \
+ IAVF_TXD_CTX_QW1_MSS_SHIFT)
+
+#define IAVF_TXD_CTX_QW1_VSI_SHIFT 50
+#define IAVF_TXD_CTX_QW1_VSI_MASK (0x1FFULL << IAVF_TXD_CTX_QW1_VSI_SHIFT)
+
+#define IAVF_TXD_CTX_QW0_EXT_IP_SHIFT 0
+#define IAVF_TXD_CTX_QW0_EXT_IP_MASK (0x3ULL << \
+ IAVF_TXD_CTX_QW0_EXT_IP_SHIFT)
+
+enum iavf_tx_ctx_desc_eipt_offload {
+ IAVF_TX_CTX_EXT_IP_NONE = 0x0,
+ IAVF_TX_CTX_EXT_IP_IPV6 = 0x1,
+ IAVF_TX_CTX_EXT_IP_IPV4_NO_CSUM = 0x2,
+ IAVF_TX_CTX_EXT_IP_IPV4 = 0x3
+};
+
+#define IAVF_TXD_CTX_QW0_EXT_IPLEN_SHIFT 2
+#define IAVF_TXD_CTX_QW0_EXT_IPLEN_MASK (0x3FULL << \
+ IAVF_TXD_CTX_QW0_EXT_IPLEN_SHIFT)
+
+#define IAVF_TXD_CTX_QW0_NATT_SHIFT 9
+#define IAVF_TXD_CTX_QW0_NATT_MASK (0x3ULL << IAVF_TXD_CTX_QW0_NATT_SHIFT)
+
+#define IAVF_TXD_CTX_UDP_TUNNELING BIT_ULL(IAVF_TXD_CTX_QW0_NATT_SHIFT)
+#define IAVF_TXD_CTX_GRE_TUNNELING (0x2ULL << IAVF_TXD_CTX_QW0_NATT_SHIFT)
+
+#define IAVF_TXD_CTX_QW0_EIP_NOINC_SHIFT 11
+#define IAVF_TXD_CTX_QW0_EIP_NOINC_MASK \
+ BIT_ULL(IAVF_TXD_CTX_QW0_EIP_NOINC_SHIFT)
+
+#define IAVF_TXD_CTX_EIP_NOINC_IPID_CONST IAVF_TXD_CTX_QW0_EIP_NOINC_MASK
+
+#define IAVF_TXD_CTX_QW0_NATLEN_SHIFT 12
+#define IAVF_TXD_CTX_QW0_NATLEN_MASK (0X7FULL << \
+ IAVF_TXD_CTX_QW0_NATLEN_SHIFT)
+
+#define IAVF_TXD_CTX_QW0_DECTTL_SHIFT 19
+#define IAVF_TXD_CTX_QW0_DECTTL_MASK (0xFULL << \
+ IAVF_TXD_CTX_QW0_DECTTL_SHIFT)
+
+#define IAVF_TXD_CTX_QW0_L4T_CS_SHIFT 23
+#define IAVF_TXD_CTX_QW0_L4T_CS_MASK BIT_ULL(IAVF_TXD_CTX_QW0_L4T_CS_SHIFT)
+
+/* Statistics collected by each port, VSI, VEB, and S-channel */
+struct iavf_eth_stats {
+ u64 rx_bytes; /* gorc */
+ u64 rx_unicast; /* uprc */
+ u64 rx_multicast; /* mprc */
+ u64 rx_broadcast; /* bprc */
+ u64 rx_discards; /* rdpc */
+ u64 rx_unknown_protocol; /* rupp */
+ u64 tx_bytes; /* gotc */
+ u64 tx_unicast; /* uptc */
+ u64 tx_multicast; /* mptc */
+ u64 tx_broadcast; /* bptc */
+ u64 tx_discards; /* tdpc */
+ u64 tx_errors; /* tepc */
+};
+#define IAVF_SR_PCIE_ANALOG_CONFIG_PTR 0x03
+#define IAVF_SR_PHY_ANALOG_CONFIG_PTR 0x04
+#define IAVF_SR_OPTION_ROM_PTR 0x05
+#define IAVF_SR_RO_PCIR_REGS_AUTO_LOAD_PTR 0x06
+#define IAVF_SR_AUTO_GENERATED_POINTERS_PTR 0x07
+#define IAVF_SR_PCIR_REGS_AUTO_LOAD_PTR 0x08
+#define IAVF_SR_EMP_GLOBAL_MODULE_PTR 0x09
+#define IAVF_SR_RO_PCIE_LCB_PTR 0x0A
+#define IAVF_SR_EMP_IMAGE_PTR 0x0B
+#define IAVF_SR_PE_IMAGE_PTR 0x0C
+#define IAVF_SR_CSR_PROTECTED_LIST_PTR 0x0D
+#define IAVF_SR_MNG_CONFIG_PTR 0x0E
+#define IAVF_SR_PBA_FLAGS 0x15
+#define IAVF_SR_PBA_BLOCK_PTR 0x16
+#define IAVF_SR_BOOT_CONFIG_PTR 0x17
+#define IAVF_SR_PERMANENT_SAN_MAC_ADDRESS_PTR 0x28
+#define IAVF_SR_NVM_MAP_VERSION 0x29
+#define IAVF_SR_NVM_IMAGE_VERSION 0x2A
+#define IAVF_SR_NVM_STRUCTURE_VERSION 0x2B
+#define IAVF_SR_PXE_SETUP_PTR 0x30
+#define IAVF_SR_PXE_CONFIG_CUST_OPTIONS_PTR 0x31
+#define IAVF_SR_NVM_ORIGINAL_EETRACK_LO 0x34
+#define IAVF_SR_NVM_ORIGINAL_EETRACK_HI 0x35
+#define IAVF_SR_SW_ETHERNET_MAC_ADDRESS_PTR 0x37
+#define IAVF_SR_POR_REGS_AUTO_LOAD_PTR 0x38
+#define IAVF_SR_EMPR_REGS_AUTO_LOAD_PTR 0x3A
+#define IAVF_SR_GLOBR_REGS_AUTO_LOAD_PTR 0x3B
+#define IAVF_SR_CORER_REGS_AUTO_LOAD_PTR 0x3C
+#define IAVF_SR_PHY_ACTIVITY_LIST_PTR 0x3D
+#define IAVF_SR_1ST_FREE_PROVISION_AREA_PTR 0x40
+#define IAVF_SR_4TH_FREE_PROVISION_AREA_PTR 0x42
+#define IAVF_SR_3RD_FREE_PROVISION_AREA_PTR 0x44
+#define IAVF_SR_2ND_FREE_PROVISION_AREA_PTR 0x46
+#define IAVF_SR_EMP_SR_SETTINGS_PTR 0x48
+#define IAVF_SR_FEATURE_CONFIGURATION_PTR 0x49
+#define IAVF_SR_CONFIGURATION_METADATA_PTR 0x4D
+#define IAVF_SR_IMMEDIATE_VALUES_PTR 0x4E
+#define IAVF_SR_OCP_CFG_WORD0 0x2B
+#define IAVF_SR_OCP_ENABLED BIT(15)
+#define IAVF_SR_BUF_ALIGNMENT 4096
+
+struct iavf_lldp_variables {
+ u16 length;
+ u16 adminstatus;
+ u16 msgfasttx;
+ u16 msgtxinterval;
+ u16 txparams;
+ u16 timers;
+ u16 crc8;
+};
+
+/* Offsets into Alternate Ram */
+#define IAVF_ALT_STRUCT_FIRST_PF_OFFSET 0 /* in dwords */
+#define IAVF_ALT_STRUCT_DWORDS_PER_PF 64 /* in dwords */
+#define IAVF_ALT_STRUCT_OUTER_VLAN_TAG_OFFSET 0xD /* in dwords */
+#define IAVF_ALT_STRUCT_USER_PRIORITY_OFFSET 0xC /* in dwords */
+#define IAVF_ALT_STRUCT_MIN_BW_OFFSET 0xE /* in dwords */
+#define IAVF_ALT_STRUCT_MAX_BW_OFFSET 0xF /* in dwords */
+
+/* Alternate Ram Bandwidth Masks */
+#define IAVF_ALT_BW_VALUE_MASK 0xFF
+#define IAVF_ALT_BW_RELATIVE_MASK 0x40000000
+#define IAVF_ALT_BW_VALID_MASK 0x80000000
+
+#define IAVF_DDP_TRACKID_RDONLY 0
+#define IAVF_DDP_TRACKID_INVALID 0xFFFFFFFF
+#define SECTION_TYPE_RB_MMIO 0x00001800
+#define SECTION_TYPE_RB_AQ 0x00001801
+#define SECTION_TYPE_PROTO 0x80000002
+#define SECTION_TYPE_PCTYPE 0x80000003
+#define SECTION_TYPE_PTYPE 0x80000004
+struct iavf_profile_tlv_section_record {
+ u8 rtype;
+ u8 type;
+ u16 len;
+ u8 data[12];
+};
+
+/* Generic AQ section in proflie */
+struct iavf_profile_aq_section {
+ u16 opcode;
+ u16 flags;
+ u8 param[16];
+ u16 datalen;
+ u8 data[1];
+};
+
+#endif /* _IAVF_TYPE_H_ */
diff --git a/sys/dev/iavf/iavf_vc_common.h b/sys/dev/iavf/iavf_vc_common.h
new file mode 100644
--- /dev/null
+++ b/sys/dev/iavf/iavf_vc_common.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2021, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+/**
+ * @file iavf_vc_common.h
+ * @brief header for the virtchnl interface
+ *
+ * Contains function declarations for the virtchnl PF to VF communication
+ * interface.
+ */
+#ifndef _IAVF_VC_COMMON_H_
+#define _IAVF_VC_COMMON_H_
+
+#include "iavf_iflib.h"
+
+int iavf_send_pf_msg(struct iavf_sc *sc,
+ enum virtchnl_ops op, u8 *msg, u16 len);
+int iavf_verify_api_ver(struct iavf_sc *);
+int iavf_send_api_ver(struct iavf_sc *sc);
+int iavf_enable_queues(struct iavf_sc *sc);
+int iavf_disable_queues(struct iavf_sc *sc);
+int iavf_add_vlans(struct iavf_sc *sc);
+int iavf_send_vf_config_msg(struct iavf_sc *sc);
+int iavf_get_vf_config(struct iavf_sc *sc);
+int iavf_del_vlans(struct iavf_sc *sc);
+int iavf_add_ether_filters(struct iavf_sc *sc);
+int iavf_del_ether_filters(struct iavf_sc *sc);
+int iavf_request_reset(struct iavf_sc *sc);
+int iavf_request_stats(struct iavf_sc *sc);
+void iavf_update_stats_counters(struct iavf_sc *sc, struct iavf_eth_stats *es);
+int iavf_config_rss_key(struct iavf_sc *sc);
+int iavf_set_rss_hena(struct iavf_sc *sc);
+int iavf_config_rss_lut(struct iavf_sc *sc);
+int iavf_config_promisc_mode(struct iavf_sc *sc);
+void *iavf_vc_get_op_chan(struct iavf_sc *sc, uint32_t request);
+int iavf_vc_send_cmd(struct iavf_sc *sc, uint32_t request);
+const char * iavf_vc_stat_str(struct iavf_hw *hw,
+ enum virtchnl_status_code stat_err);
+const char *
+ iavf_vc_speed_to_string(enum virtchnl_link_speed link_speed);
+const char * iavf_vc_opcode_str(uint16_t op);
+void
+iavf_vc_completion(struct iavf_sc *sc,
+ enum virtchnl_ops v_opcode,
+ enum virtchnl_status_code v_retval, u8 *msg, u16 msglen);
+enum iavf_ext_link_speed iavf_adv_speed_to_ext_speed(u32 adv_link_speed);
+u32 iavf_ext_speed_to_ifmedia(enum iavf_ext_link_speed link_speed);
+enum iavf_ext_link_speed iavf_vc_speed_to_ext_speed(enum virtchnl_link_speed link_speed);
+const char * iavf_ext_speed_to_str(enum iavf_ext_link_speed link_speed);
+
+int iavf_configure_queues(struct iavf_sc *sc);
+int iavf_map_queues(struct iavf_sc *sc);
+
+#endif /* _IAVF_VC_COMMON_H_ */
diff --git a/sys/dev/iavf/iavf_vc_common.c b/sys/dev/iavf/iavf_vc_common.c
new file mode 100644
--- /dev/null
+++ b/sys/dev/iavf/iavf_vc_common.c
@@ -0,0 +1,1330 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2021, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+/**
+ * @file iavf_vc_common.c
+ * @brief Common virtchnl interface functions
+ *
+ * Contains functions implementing the virtchnl interface for connecting to
+ * the PF driver. This file contains the functions which are common between
+ * the legacy and iflib driver implementations.
+ */
+#include "iavf_vc_common.h"
+
+/* busy wait delay in msec */
+#define IAVF_BUSY_WAIT_DELAY 10
+#define IAVF_BUSY_WAIT_COUNT 50
+
+/* Static function decls */
+static void iavf_handle_link_event(struct iavf_sc *sc,
+ struct virtchnl_pf_event *vpe);
+
+/**
+ * iavf_send_pf_msg - Send virtchnl message to PF device
+ * @sc: device softc
+ * @op: the op to send
+ * @msg: message contents
+ * @len: length of the message
+ *
+ * Send a message to the PF device over the virtchnl connection. Print
+ * a status code if the message reports an error.
+ *
+ * @returns zero on success, or an error code on failure.
+ */
+int
+iavf_send_pf_msg(struct iavf_sc *sc,
+ enum virtchnl_ops op, u8 *msg, u16 len)
+{
+ struct iavf_hw *hw = &sc->hw;
+ device_t dev = sc->dev;
+ enum iavf_status status;
+ int val_err;
+
+ /* Validating message before sending it to the PF */
+ val_err = virtchnl_vc_validate_vf_msg(&sc->version, op, msg, len);
+ if (val_err)
+ device_printf(dev, "Error validating msg to PF for op %d,"
+ " msglen %d: error %d\n", op, len, val_err);
+
+ if (!iavf_check_asq_alive(hw)) {
+ if (op != VIRTCHNL_OP_GET_STATS)
+ device_printf(dev, "Unable to send opcode %s to PF, "
+ "ASQ is not alive\n", iavf_vc_opcode_str(op));
+ return (0);
+ }
+
+ if (op != VIRTCHNL_OP_GET_STATS)
+ iavf_dbg_vc(sc,
+ "Sending msg (op=%s[%d]) to PF\n",
+ iavf_vc_opcode_str(op), op);
+
+ status = iavf_aq_send_msg_to_pf(hw, op, IAVF_SUCCESS, msg, len, NULL);
+ if (status && op != VIRTCHNL_OP_GET_STATS)
+ device_printf(dev, "Unable to send opcode %s to PF, "
+ "status %s, aq error %s\n",
+ iavf_vc_opcode_str(op),
+ iavf_stat_str(hw, status),
+ iavf_aq_str(hw, hw->aq.asq_last_status));
+
+ return (status);
+}
+
+/**
+ * iavf_send_api_ver - Send the API version we support to the PF
+ * @sc: device softc
+ *
+ * Send API version admin queue message to the PF. The reply is not checked
+ * in this function.
+ *
+ * @returns 0 if the message was successfully sent, or one of the
+ * IAVF_ADMIN_QUEUE_ERROR_ statuses if not.
+ */
+int
+iavf_send_api_ver(struct iavf_sc *sc)
+{
+ struct virtchnl_version_info vvi;
+
+ vvi.major = VIRTCHNL_VERSION_MAJOR;
+ vvi.minor = VIRTCHNL_VERSION_MINOR;
+
+ return iavf_send_pf_msg(sc, VIRTCHNL_OP_VERSION,
+ (u8 *)&vvi, sizeof(vvi));
+}
+
+/**
+ * iavf_verify_api_ver - Verify the PF supports our API version
+ * @sc: device softc
+ *
+ * Compare API versions with the PF. Must be called after admin queue is
+ * initialized.
+ *
+ * @returns 0 if API versions match, EIO if they do not, or
+ * IAVF_ERR_ADMIN_QUEUE_NO_WORK if the admin queue is empty.
+ */
+int
+iavf_verify_api_ver(struct iavf_sc *sc)
+{
+ struct virtchnl_version_info *pf_vvi;
+ struct iavf_hw *hw = &sc->hw;
+ struct iavf_arq_event_info event;
+ enum iavf_status status;
+ device_t dev = sc->dev;
+ int error = 0;
+ int retries = 0;
+
+ event.buf_len = IAVF_AQ_BUF_SZ;
+ event.msg_buf = (u8 *)malloc(event.buf_len, M_IAVF, M_WAITOK);
+
+ for (;;) {
+ if (++retries > IAVF_AQ_MAX_ERR)
+ goto out_alloc;
+
+ /* Initial delay here is necessary */
+ iavf_msec_pause(100);
+ status = iavf_clean_arq_element(hw, &event, NULL);
+ if (status == IAVF_ERR_ADMIN_QUEUE_NO_WORK)
+ continue;
+ else if (status) {
+ error = EIO;
+ goto out_alloc;
+ }
+
+ if ((enum virtchnl_ops)le32toh(event.desc.cookie_high) !=
+ VIRTCHNL_OP_VERSION) {
+ iavf_dbg_vc(sc, "%s: Received unexpected op response: %d\n",
+ __func__, le32toh(event.desc.cookie_high));
+ /* Don't stop looking for expected response */
+ continue;
+ }
+
+ status = (enum iavf_status)le32toh(event.desc.cookie_low);
+ if (status) {
+ error = EIO;
+ goto out_alloc;
+ } else
+ break;
+ }
+
+ pf_vvi = (struct virtchnl_version_info *)event.msg_buf;
+ if ((pf_vvi->major > VIRTCHNL_VERSION_MAJOR) ||
+ ((pf_vvi->major == VIRTCHNL_VERSION_MAJOR) &&
+ (pf_vvi->minor > VIRTCHNL_VERSION_MINOR))) {
+ device_printf(dev, "Critical PF/VF API version mismatch!\n");
+ error = EIO;
+ } else {
+ sc->version.major = pf_vvi->major;
+ sc->version.minor = pf_vvi->minor;
+ }
+
+ /* Log PF/VF api versions */
+ device_printf(dev, "PF API %d.%d / VF API %d.%d\n",
+ pf_vvi->major, pf_vvi->minor,
+ VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR);
+
+out_alloc:
+ free(event.msg_buf, M_IAVF);
+ return (error);
+}
+
+/**
+ * iavf_send_vf_config_msg - Send VF configuration request
+ * @sc: device softc
+ *
+ * Send VF configuration request admin queue message to the PF. The reply
+ * is not checked in this function.
+ *
+ * @returns 0 if the message was successfully sent, or one of the
+ * IAVF_ADMIN_QUEUE_ERROR_ statuses if not.
+ */
+int
+iavf_send_vf_config_msg(struct iavf_sc *sc)
+{
+ u32 caps;
+
+ /* Support the base mode functionality, as well as advanced
+ * speed reporting capability.
+ */
+ caps = VF_BASE_MODE_OFFLOADS |
+ VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
+
+ iavf_dbg_info(sc, "Sending offload flags: 0x%b\n",
+ caps, IAVF_PRINTF_VF_OFFLOAD_FLAGS);
+
+ if (sc->version.minor == VIRTCHNL_VERSION_MINOR_NO_VF_CAPS)
+ return iavf_send_pf_msg(sc, VIRTCHNL_OP_GET_VF_RESOURCES,
+ NULL, 0);
+ else
+ return iavf_send_pf_msg(sc, VIRTCHNL_OP_GET_VF_RESOURCES,
+ (u8 *)&caps, sizeof(caps));
+}
+
+/**
+ * iavf_get_vf_config - Get the VF configuration from the PF
+ * @sc: device softc
+ *
+ * Get VF configuration from PF and populate hw structure. Must be called after
+ * admin queue is initialized. Busy waits until response is received from PF,
+ * with maximum timeout. Response from PF is returned in the buffer for further
+ * processing by the caller.
+ *
+ * @returns zero on success, or an error code on failure
+ */
+int
+iavf_get_vf_config(struct iavf_sc *sc)
+{
+ struct iavf_hw *hw = &sc->hw;
+ device_t dev = sc->dev;
+ enum iavf_status status = IAVF_SUCCESS;
+ struct iavf_arq_event_info event;
+ u16 len;
+ u32 retries = 0;
+ int error = 0;
+
+ /* Note this assumes a single VSI */
+ len = sizeof(struct virtchnl_vf_resource) +
+ sizeof(struct virtchnl_vsi_resource);
+ event.buf_len = len;
+ event.msg_buf = (u8 *)malloc(event.buf_len, M_IAVF, M_WAITOK);
+
+ for (;;) {
+ status = iavf_clean_arq_element(hw, &event, NULL);
+ if (status == IAVF_ERR_ADMIN_QUEUE_NO_WORK) {
+ if (++retries <= IAVF_AQ_MAX_ERR)
+ iavf_msec_pause(10);
+ } else if ((enum virtchnl_ops)le32toh(event.desc.cookie_high) !=
+ VIRTCHNL_OP_GET_VF_RESOURCES) {
+ iavf_dbg_vc(sc, "%s: Received a response from PF,"
+ " opcode %d, error %d",
+ __func__,
+ le32toh(event.desc.cookie_high),
+ le32toh(event.desc.cookie_low));
+ retries++;
+ continue;
+ } else {
+ status = (enum iavf_status)le32toh(event.desc.cookie_low);
+ if (status) {
+ device_printf(dev, "%s: Error returned from PF,"
+ " opcode %d, error %d\n", __func__,
+ le32toh(event.desc.cookie_high),
+ le32toh(event.desc.cookie_low));
+ error = EIO;
+ goto out_alloc;
+ }
+ /* We retrieved the config message, with no errors */
+ break;
+ }
+
+ if (retries > IAVF_AQ_MAX_ERR) {
+ iavf_dbg_vc(sc,
+ "%s: Did not receive response after %d tries.",
+ __func__, retries);
+ error = ETIMEDOUT;
+ goto out_alloc;
+ }
+ }
+
+ memcpy(sc->vf_res, event.msg_buf, min(event.msg_len, len));
+ iavf_vf_parse_hw_config(hw, sc->vf_res);
+
+out_alloc:
+ free(event.msg_buf, M_IAVF);
+ return (error);
+}
+
+/**
+ * iavf_enable_queues - Enable queues
+ * @sc: device softc
+ *
+ * Request that the PF enable all of our queues.
+ *
+ * @remark the reply from the PF is not checked by this function.
+ *
+ * @returns zero
+ */
+int
+iavf_enable_queues(struct iavf_sc *sc)
+{
+ struct virtchnl_queue_select vqs;
+ struct iavf_vsi *vsi = &sc->vsi;
+
+ vqs.vsi_id = sc->vsi_res->vsi_id;
+ vqs.tx_queues = (1 << IAVF_NTXQS(vsi)) - 1;
+ vqs.rx_queues = vqs.tx_queues;
+ iavf_send_pf_msg(sc, VIRTCHNL_OP_ENABLE_QUEUES,
+ (u8 *)&vqs, sizeof(vqs));
+ return (0);
+}
+
+/**
+ * iavf_disable_queues - Disable queues
+ * @sc: device softc
+ *
+ * Request that the PF disable all of our queues.
+ *
+ * @remark the reply from the PF is not checked by this function.
+ *
+ * @returns zero
+ */
+int
+iavf_disable_queues(struct iavf_sc *sc)
+{
+ struct virtchnl_queue_select vqs;
+ struct iavf_vsi *vsi = &sc->vsi;
+
+ vqs.vsi_id = sc->vsi_res->vsi_id;
+ vqs.tx_queues = (1 << IAVF_NTXQS(vsi)) - 1;
+ vqs.rx_queues = vqs.tx_queues;
+ iavf_send_pf_msg(sc, VIRTCHNL_OP_DISABLE_QUEUES,
+ (u8 *)&vqs, sizeof(vqs));
+ return (0);
+}
+
+/**
+ * iavf_add_vlans - Add VLAN filters
+ * @sc: device softc
+ *
+ * Scan the Filter List looking for vlans that need
+ * to be added, then create the data to hand to the AQ
+ * for handling.
+ *
+ * @returns zero on success, or an error code on failure.
+ */
+int
+iavf_add_vlans(struct iavf_sc *sc)
+{
+ struct virtchnl_vlan_filter_list *v;
+ struct iavf_vlan_filter *f, *ftmp;
+ device_t dev = sc->dev;
+ int i = 0, cnt = 0;
+ u32 len;
+
+ /* Get count of VLAN filters to add */
+ SLIST_FOREACH(f, sc->vlan_filters, next) {
+ if (f->flags & IAVF_FILTER_ADD)
+ cnt++;
+ }
+
+ if (!cnt) /* no work... */
+ return (ENOENT);
+
+ len = sizeof(struct virtchnl_vlan_filter_list) +
+ (cnt * sizeof(u16));
+
+ if (len > IAVF_AQ_BUF_SZ) {
+ device_printf(dev, "%s: Exceeded Max AQ Buf size\n",
+ __func__);
+ return (EFBIG);
+ }
+
+ v = (struct virtchnl_vlan_filter_list *)malloc(len, M_IAVF, M_NOWAIT | M_ZERO);
+ if (!v) {
+ device_printf(dev, "%s: unable to allocate memory\n",
+ __func__);
+ return (ENOMEM);
+ }
+
+ v->vsi_id = sc->vsi_res->vsi_id;
+ v->num_elements = cnt;
+
+ /* Scan the filter array */
+ SLIST_FOREACH_SAFE(f, sc->vlan_filters, next, ftmp) {
+ if (f->flags & IAVF_FILTER_ADD) {
+ bcopy(&f->vlan, &v->vlan_id[i], sizeof(u16));
+ f->flags = IAVF_FILTER_USED;
+ i++;
+ }
+ if (i == cnt)
+ break;
+ }
+
+ iavf_send_pf_msg(sc, VIRTCHNL_OP_ADD_VLAN, (u8 *)v, len);
+ free(v, M_IAVF);
+ /* add stats? */
+ return (0);
+}
+
+/**
+ * iavf_del_vlans - Delete VLAN filters
+ * @sc: device softc
+ *
+ * Scan the Filter Table looking for vlans that need
+ * to be removed, then create the data to hand to the AQ
+ * for handling.
+ *
+ * @returns zero on success, or an error code on failure.
+ */
+int
+iavf_del_vlans(struct iavf_sc *sc)
+{
+ struct virtchnl_vlan_filter_list *v;
+ struct iavf_vlan_filter *f, *ftmp;
+ device_t dev = sc->dev;
+ int i = 0, cnt = 0;
+ u32 len;
+
+ /* Get count of VLAN filters to delete */
+ SLIST_FOREACH(f, sc->vlan_filters, next) {
+ if (f->flags & IAVF_FILTER_DEL)
+ cnt++;
+ }
+
+ if (!cnt) /* no work... */
+ return (ENOENT);
+
+ len = sizeof(struct virtchnl_vlan_filter_list) +
+ (cnt * sizeof(u16));
+
+ if (len > IAVF_AQ_BUF_SZ) {
+ device_printf(dev, "%s: Exceeded Max AQ Buf size\n",
+ __func__);
+ return (EFBIG);
+ }
+
+ v = (struct virtchnl_vlan_filter_list *)
+ malloc(len, M_IAVF, M_NOWAIT | M_ZERO);
+ if (!v) {
+ device_printf(dev, "%s: unable to allocate memory\n",
+ __func__);
+ return (ENOMEM);
+ }
+
+ v->vsi_id = sc->vsi_res->vsi_id;
+ v->num_elements = cnt;
+
+ /* Scan the filter array */
+ SLIST_FOREACH_SAFE(f, sc->vlan_filters, next, ftmp) {
+ if (f->flags & IAVF_FILTER_DEL) {
+ bcopy(&f->vlan, &v->vlan_id[i], sizeof(u16));
+ i++;
+ SLIST_REMOVE(sc->vlan_filters, f, iavf_vlan_filter, next);
+ free(f, M_IAVF);
+ }
+ if (i == cnt)
+ break;
+ }
+
+ iavf_send_pf_msg(sc, VIRTCHNL_OP_DEL_VLAN, (u8 *)v, len);
+ free(v, M_IAVF);
+ /* add stats? */
+ return (0);
+}
+
+/**
+ * iavf_add_ether_filters - Add MAC filters
+ * @sc: device softc
+ *
+ * This routine takes additions to the vsi filter
+ * table and creates an Admin Queue call to create
+ * the filters in the hardware.
+ *
+ * @returns zero on success, or an error code on failure.
+ */
+int
+iavf_add_ether_filters(struct iavf_sc *sc)
+{
+ struct virtchnl_ether_addr_list *a;
+ struct iavf_mac_filter *f;
+ device_t dev = sc->dev;
+ int len, j = 0, cnt = 0;
+ int error;
+
+ /* Get count of MAC addresses to add */
+ SLIST_FOREACH(f, sc->mac_filters, next) {
+ if (f->flags & IAVF_FILTER_ADD)
+ cnt++;
+ }
+ if (cnt == 0) { /* Should not happen... */
+ iavf_dbg_vc(sc, "%s: cnt == 0, exiting...\n", __func__);
+ return (ENOENT);
+ }
+
+ len = sizeof(struct virtchnl_ether_addr_list) +
+ (cnt * sizeof(struct virtchnl_ether_addr));
+
+ a = (struct virtchnl_ether_addr_list *)
+ malloc(len, M_IAVF, M_NOWAIT | M_ZERO);
+ if (a == NULL) {
+ device_printf(dev, "%s: Failed to get memory for "
+ "virtchnl_ether_addr_list\n", __func__);
+ return (ENOMEM);
+ }
+ a->vsi_id = sc->vsi.id;
+ a->num_elements = cnt;
+
+ /* Scan the filter array */
+ SLIST_FOREACH(f, sc->mac_filters, next) {
+ if (f->flags & IAVF_FILTER_ADD) {
+ bcopy(f->macaddr, a->list[j].addr, ETHER_ADDR_LEN);
+ f->flags &= ~IAVF_FILTER_ADD;
+ j++;
+
+ iavf_dbg_vc(sc, "%s: ADD: " MAC_FORMAT "\n",
+ __func__, MAC_FORMAT_ARGS(f->macaddr));
+ }
+ if (j == cnt)
+ break;
+ }
+ iavf_dbg_vc(sc, "%s: len %d, j %d, cnt %d\n", __func__,
+ len, j, cnt);
+
+ error = iavf_send_pf_msg(sc,
+ VIRTCHNL_OP_ADD_ETH_ADDR, (u8 *)a, len);
+ /* add stats? */
+ free(a, M_IAVF);
+ return (error);
+}
+
+/**
+ * iavf_del_ether_filters - Delete MAC filters
+ * @sc: device softc
+ *
+ * This routine takes filters flagged for deletion in the
+ * sc MAC filter list and creates an Admin Queue call
+ * to delete those filters in the hardware.
+ *
+ * @returns zero on success, or an error code on failure.
+*/
+int
+iavf_del_ether_filters(struct iavf_sc *sc)
+{
+ struct virtchnl_ether_addr_list *d;
+ struct iavf_mac_filter *f, *f_temp;
+ device_t dev = sc->dev;
+ int len, j = 0, cnt = 0;
+
+ /* Get count of MAC addresses to delete */
+ SLIST_FOREACH(f, sc->mac_filters, next) {
+ if (f->flags & IAVF_FILTER_DEL)
+ cnt++;
+ }
+ if (cnt == 0) {
+ iavf_dbg_vc(sc, "%s: cnt == 0, exiting...\n", __func__);
+ return (ENOENT);
+ }
+
+ len = sizeof(struct virtchnl_ether_addr_list) +
+ (cnt * sizeof(struct virtchnl_ether_addr));
+
+ d = (struct virtchnl_ether_addr_list *)
+ malloc(len, M_IAVF, M_NOWAIT | M_ZERO);
+ if (d == NULL) {
+ device_printf(dev, "%s: Failed to get memory for "
+ "virtchnl_ether_addr_list\n", __func__);
+ return (ENOMEM);
+ }
+ d->vsi_id = sc->vsi.id;
+ d->num_elements = cnt;
+
+ /* Scan the filter array */
+ SLIST_FOREACH_SAFE(f, sc->mac_filters, next, f_temp) {
+ if (f->flags & IAVF_FILTER_DEL) {
+ bcopy(f->macaddr, d->list[j].addr, ETHER_ADDR_LEN);
+ iavf_dbg_vc(sc, "DEL: " MAC_FORMAT "\n",
+ MAC_FORMAT_ARGS(f->macaddr));
+ j++;
+ SLIST_REMOVE(sc->mac_filters, f, iavf_mac_filter, next);
+ free(f, M_IAVF);
+ }
+ if (j == cnt)
+ break;
+ }
+ iavf_send_pf_msg(sc,
+ VIRTCHNL_OP_DEL_ETH_ADDR, (u8 *)d, len);
+ /* add stats? */
+ free(d, M_IAVF);
+ return (0);
+}
+
+/**
+ * iavf_request_reset - Request a device reset
+ * @sc: device softc
+ *
+ * Request that the PF reset this VF. No response is expected.
+ *
+ * @returns zero
+ */
+int
+iavf_request_reset(struct iavf_sc *sc)
+{
+ /*
+ ** Set the reset status to "in progress" before
+ ** the request, this avoids any possibility of
+ ** a mistaken early detection of completion.
+ */
+ wr32(&sc->hw, IAVF_VFGEN_RSTAT, VIRTCHNL_VFR_INPROGRESS);
+ iavf_send_pf_msg(sc, VIRTCHNL_OP_RESET_VF, NULL, 0);
+ return (0);
+}
+
+/**
+ * iavf_request_stats - Request VF stats
+ * @sc: device softc
+ *
+ * Request the statistics for this VF's VSI from PF.
+ *
+ * @remark prints an error message on failure to obtain stats, but does not
+ * return with an error code.
+ *
+ * @returns zero
+ */
+int
+iavf_request_stats(struct iavf_sc *sc)
+{
+ struct virtchnl_queue_select vqs;
+ int error = 0;
+
+ vqs.vsi_id = sc->vsi_res->vsi_id;
+ /* Low priority, we don't need to error check */
+ error = iavf_send_pf_msg(sc, VIRTCHNL_OP_GET_STATS,
+ (u8 *)&vqs, sizeof(vqs));
+ if (error)
+ device_printf(sc->dev, "Error sending stats request to PF: %d\n", error);
+
+ return (0);
+}
+
+/**
+ * iavf_update_stats_counters - Update driver statistics
+ * @sc: device softc
+ * @es: ethernet stats storage
+ *
+ * Updates driver's stats counters with VSI stats returned from PF.
+ */
+void
+iavf_update_stats_counters(struct iavf_sc *sc, struct iavf_eth_stats *es)
+{
+ struct iavf_vsi *vsi = &sc->vsi;
+ uint64_t tx_discards;
+
+ tx_discards = es->tx_discards;
+
+ /* Update ifnet stats */
+ IAVF_SET_IPACKETS(vsi, es->rx_unicast +
+ es->rx_multicast +
+ es->rx_broadcast);
+ IAVF_SET_OPACKETS(vsi, es->tx_unicast +
+ es->tx_multicast +
+ es->tx_broadcast);
+ IAVF_SET_IBYTES(vsi, es->rx_bytes);
+ IAVF_SET_OBYTES(vsi, es->tx_bytes);
+ IAVF_SET_IMCASTS(vsi, es->rx_multicast);
+ IAVF_SET_OMCASTS(vsi, es->tx_multicast);
+
+ IAVF_SET_OERRORS(vsi, es->tx_errors);
+ IAVF_SET_IQDROPS(vsi, es->rx_discards);
+ IAVF_SET_OQDROPS(vsi, tx_discards);
+ IAVF_SET_NOPROTO(vsi, es->rx_unknown_protocol);
+ IAVF_SET_COLLISIONS(vsi, 0);
+
+ vsi->eth_stats = *es;
+}
+
+/**
+ * iavf_config_rss_key - Configure RSS key over virtchnl
+ * @sc: device softc
+ *
+ * Send a message to the PF to configure the RSS key using the virtchnl
+ * interface.
+ *
+ * @remark this does not check the reply from the PF.
+ *
+ * @returns zero on success, or an error code on failure.
+ */
+int
+iavf_config_rss_key(struct iavf_sc *sc)
+{
+ struct virtchnl_rss_key *rss_key_msg;
+ int msg_len, key_length;
+ u8 rss_seed[IAVF_RSS_KEY_SIZE];
+
+#ifdef RSS
+ /* Fetch the configured RSS key */
+ rss_getkey((uint8_t *) &rss_seed);
+#else
+ iavf_get_default_rss_key((u32 *)rss_seed);
+#endif
+
+ /* Send the fetched key */
+ key_length = IAVF_RSS_KEY_SIZE;
+ msg_len = sizeof(struct virtchnl_rss_key) + (sizeof(u8) * key_length) - 1;
+ rss_key_msg = (struct virtchnl_rss_key *)
+ malloc(msg_len, M_IAVF, M_NOWAIT | M_ZERO);
+ if (rss_key_msg == NULL) {
+ device_printf(sc->dev, "Unable to allocate msg memory for RSS key msg.\n");
+ return (ENOMEM);
+ }
+
+ rss_key_msg->vsi_id = sc->vsi_res->vsi_id;
+ rss_key_msg->key_len = key_length;
+ bcopy(rss_seed, &rss_key_msg->key[0], key_length);
+
+ iavf_dbg_vc(sc, "%s: vsi_id %d, key_len %d\n", __func__,
+ rss_key_msg->vsi_id, rss_key_msg->key_len);
+
+ iavf_send_pf_msg(sc, VIRTCHNL_OP_CONFIG_RSS_KEY,
+ (u8 *)rss_key_msg, msg_len);
+
+ free(rss_key_msg, M_IAVF);
+ return (0);
+}
+
+/**
+ * iavf_set_rss_hena - Configure the RSS HENA
+ * @sc: device softc
+ *
+ * Configure the RSS HENA values by sending a virtchnl message to the PF
+ *
+ * @remark the reply from the PF is not checked by this function.
+ *
+ * @returns zero
+ */
+int
+iavf_set_rss_hena(struct iavf_sc *sc)
+{
+ struct virtchnl_rss_hena hena;
+ struct iavf_hw *hw = &sc->hw;
+
+ if (hw->mac.type == IAVF_MAC_VF)
+ hena.hena = IAVF_DEFAULT_RSS_HENA_AVF;
+ else if (hw->mac.type == IAVF_MAC_X722_VF)
+ hena.hena = IAVF_DEFAULT_RSS_HENA_X722;
+ else
+ hena.hena = IAVF_DEFAULT_RSS_HENA_BASE;
+
+ iavf_send_pf_msg(sc, VIRTCHNL_OP_SET_RSS_HENA,
+ (u8 *)&hena, sizeof(hena));
+ return (0);
+}
+
+/**
+ * iavf_config_rss_lut - Configure RSS lookup table
+ * @sc: device softc
+ *
+ * Configure the RSS lookup table by sending a virtchnl message to the PF.
+ *
+ * @remark the reply from the PF is not checked in this function.
+ *
+ * @returns zero on success, or an error code on failure.
+ */
+int
+iavf_config_rss_lut(struct iavf_sc *sc)
+{
+ struct virtchnl_rss_lut *rss_lut_msg;
+ int msg_len;
+ u16 lut_length;
+ u32 lut;
+ int i, que_id;
+
+ lut_length = IAVF_RSS_VSI_LUT_SIZE;
+ msg_len = sizeof(struct virtchnl_rss_lut) + (lut_length * sizeof(u8)) - 1;
+ rss_lut_msg = (struct virtchnl_rss_lut *)
+ malloc(msg_len, M_IAVF, M_NOWAIT | M_ZERO);
+ if (rss_lut_msg == NULL) {
+ device_printf(sc->dev, "Unable to allocate msg memory for RSS lut msg.\n");
+ return (ENOMEM);
+ }
+
+ rss_lut_msg->vsi_id = sc->vsi_res->vsi_id;
+ /* Each LUT entry is a max of 1 byte, so this is easy */
+ rss_lut_msg->lut_entries = lut_length;
+
+ /* Populate the LUT with max no. of queues in round robin fashion */
+ for (i = 0; i < lut_length; i++) {
+#ifdef RSS
+ /*
+ * Fetch the RSS bucket id for the given indirection entry.
+ * Cap it at the number of configured buckets (which is
+ * num_queues.)
+ */
+ que_id = rss_get_indirection_to_bucket(i);
+ que_id = que_id % sc->vsi.num_rx_queues;
+#else
+ que_id = i % sc->vsi.num_rx_queues;
+#endif
+ lut = que_id & IAVF_RSS_VSI_LUT_ENTRY_MASK;
+ rss_lut_msg->lut[i] = lut;
+ }
+
+ iavf_send_pf_msg(sc, VIRTCHNL_OP_CONFIG_RSS_LUT,
+ (u8 *)rss_lut_msg, msg_len);
+
+ free(rss_lut_msg, M_IAVF);
+ return (0);
+}
+
+/**
+ * iavf_config_promisc_mode - Configure promiscuous mode
+ * @sc: device softc
+ *
+ * Configure the device into promiscuous mode by sending a virtchnl message to
+ * the PF.
+ *
+ * @remark the reply from the PF is not checked in this function.
+ *
+ * @returns zero
+ */
+int
+iavf_config_promisc_mode(struct iavf_sc *sc)
+{
+ struct virtchnl_promisc_info pinfo;
+
+ pinfo.vsi_id = sc->vsi_res->vsi_id;
+ pinfo.flags = sc->promisc_flags;
+
+ iavf_send_pf_msg(sc, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
+ (u8 *)&pinfo, sizeof(pinfo));
+ return (0);
+}
+
+/**
+ * iavf_vc_send_cmd - Convert request into virtchnl calls
+ * @sc: device softc
+ * @request: the requested command to run
+ *
+ * Send the proper virtchnl call based on the request value.
+ *
+ * @returns zero on success, or an error code on failure. Note that unknown
+ * requests will return zero.
+ */
+int
+iavf_vc_send_cmd(struct iavf_sc *sc, uint32_t request)
+{
+ switch (request) {
+ case IAVF_FLAG_AQ_MAP_VECTORS:
+ return iavf_map_queues(sc);
+
+ case IAVF_FLAG_AQ_ADD_MAC_FILTER:
+ return iavf_add_ether_filters(sc);
+
+ case IAVF_FLAG_AQ_ADD_VLAN_FILTER:
+ return iavf_add_vlans(sc);
+
+ case IAVF_FLAG_AQ_DEL_MAC_FILTER:
+ return iavf_del_ether_filters(sc);
+
+ case IAVF_FLAG_AQ_DEL_VLAN_FILTER:
+ return iavf_del_vlans(sc);
+
+ case IAVF_FLAG_AQ_CONFIGURE_QUEUES:
+ return iavf_configure_queues(sc);
+
+ case IAVF_FLAG_AQ_DISABLE_QUEUES:
+ return iavf_disable_queues(sc);
+
+ case IAVF_FLAG_AQ_ENABLE_QUEUES:
+ return iavf_enable_queues(sc);
+
+ case IAVF_FLAG_AQ_CONFIG_RSS_KEY:
+ return iavf_config_rss_key(sc);
+
+ case IAVF_FLAG_AQ_SET_RSS_HENA:
+ return iavf_set_rss_hena(sc);
+
+ case IAVF_FLAG_AQ_CONFIG_RSS_LUT:
+ return iavf_config_rss_lut(sc);
+
+ case IAVF_FLAG_AQ_CONFIGURE_PROMISC:
+ return iavf_config_promisc_mode(sc);
+ }
+
+ return (0);
+}
+
+/**
+ * iavf_vc_get_op_chan - Get op channel for a request
+ * @sc: device softc
+ * @request: the request type
+ *
+ * @returns the op channel for the given request, or NULL if no channel is
+ * used.
+ */
+void *
+iavf_vc_get_op_chan(struct iavf_sc *sc, uint32_t request)
+{
+ switch (request) {
+ case IAVF_FLAG_AQ_ENABLE_QUEUES:
+ return (&sc->enable_queues_chan);
+ case IAVF_FLAG_AQ_DISABLE_QUEUES:
+ return (&sc->disable_queues_chan);
+ default:
+ return (NULL);
+ }
+}
+
+/**
+ * iavf_vc_stat_str - convert virtchnl status err code to a string
+ * @hw: pointer to the HW structure
+ * @stat_err: the status error code to convert
+ *
+ * @returns the human readable string representing the specified error code.
+ **/
+const char *
+iavf_vc_stat_str(struct iavf_hw *hw, enum virtchnl_status_code stat_err)
+{
+ switch (stat_err) {
+ case VIRTCHNL_STATUS_SUCCESS:
+ return "OK";
+ case VIRTCHNL_ERR_PARAM:
+ return "VIRTCHNL_ERR_PARAM";
+ case VIRTCHNL_STATUS_ERR_NO_MEMORY:
+ return "VIRTCHNL_STATUS_ERR_NO_MEMORY";
+ case VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH:
+ return "VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH";
+ case VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR:
+ return "VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR";
+ case VIRTCHNL_STATUS_ERR_INVALID_VF_ID:
+ return "VIRTCHNL_STATUS_ERR_INVALID_VF_ID";
+ case VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR:
+ return "VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR";
+ case VIRTCHNL_STATUS_NOT_SUPPORTED:
+ return "VIRTCHNL_STATUS_NOT_SUPPORTED";
+ }
+
+ snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err);
+ return hw->err_str;
+}
+
+/**
+ * iavf_adv_speed_to_ext_speed - Convert numeric speed to iavf speed enum
+ * @adv_link_speed: link speed in Mb/s
+ *
+ * Converts the link speed from the "advanced" link speed virtchnl op into the
+ * closest approximation of the internal iavf link speed, rounded down.
+ *
+ * @returns the link speed as an iavf_ext_link_speed enum value
+ */
+enum iavf_ext_link_speed
+iavf_adv_speed_to_ext_speed(u32 adv_link_speed)
+{
+ if (adv_link_speed >= 100000)
+ return IAVF_EXT_LINK_SPEED_100GB;
+ if (adv_link_speed >= 50000)
+ return IAVF_EXT_LINK_SPEED_50GB;
+ if (adv_link_speed >= 40000)
+ return IAVF_EXT_LINK_SPEED_40GB;
+ if (adv_link_speed >= 25000)
+ return IAVF_EXT_LINK_SPEED_25GB;
+ if (adv_link_speed >= 20000)
+ return IAVF_EXT_LINK_SPEED_20GB;
+ if (adv_link_speed >= 10000)
+ return IAVF_EXT_LINK_SPEED_10GB;
+ if (adv_link_speed >= 5000)
+ return IAVF_EXT_LINK_SPEED_5GB;
+ if (adv_link_speed >= 2500)
+ return IAVF_EXT_LINK_SPEED_2500MB;
+ if (adv_link_speed >= 1000)
+ return IAVF_EXT_LINK_SPEED_1000MB;
+ if (adv_link_speed >= 100)
+ return IAVF_EXT_LINK_SPEED_100MB;
+ if (adv_link_speed >= 10)
+ return IAVF_EXT_LINK_SPEED_10MB;
+
+ return IAVF_EXT_LINK_SPEED_UNKNOWN;
+}
+
+/**
+ * iavf_ext_speed_to_ifmedia - Convert internal iavf speed to ifmedia value
+ * @link_speed: the link speed
+ *
+ * @remark this is sort of a hack, because we don't actually know what media
+ * type the VF is running on. In an ideal world we might just report the media
+ * type as "virtual" and have another mechanism for reporting the link
+ * speed.
+ *
+ * @returns a suitable ifmedia type for the given link speed.
+ */
+u32
+iavf_ext_speed_to_ifmedia(enum iavf_ext_link_speed link_speed)
+{
+ switch (link_speed) {
+ case IAVF_EXT_LINK_SPEED_100GB:
+ return IFM_100G_SR4;
+ case IAVF_EXT_LINK_SPEED_50GB:
+ return IFM_50G_SR2;
+ case IAVF_EXT_LINK_SPEED_40GB:
+ return IFM_40G_SR4;
+ case IAVF_EXT_LINK_SPEED_25GB:
+ return IFM_25G_SR;
+ case IAVF_EXT_LINK_SPEED_20GB:
+ return IFM_20G_KR2;
+ case IAVF_EXT_LINK_SPEED_10GB:
+ return IFM_10G_SR;
+ case IAVF_EXT_LINK_SPEED_5GB:
+ return IFM_5000_T;
+ case IAVF_EXT_LINK_SPEED_2500MB:
+ return IFM_2500_T;
+ case IAVF_EXT_LINK_SPEED_1000MB:
+ return IFM_1000_T;
+ case IAVF_EXT_LINK_SPEED_100MB:
+ return IFM_100_TX;
+ case IAVF_EXT_LINK_SPEED_10MB:
+ return IFM_10_T;
+ case IAVF_EXT_LINK_SPEED_UNKNOWN:
+ default:
+ return IFM_UNKNOWN;
+ }
+}
+
+/**
+ * iavf_vc_speed_to_ext_speed - Convert virtchnl speed enum to native iavf
+ * driver speed representation.
+ * @link_speed: link speed enum value
+ *
+ * @returns the link speed in the native iavf format.
+ */
+enum iavf_ext_link_speed
+iavf_vc_speed_to_ext_speed(enum virtchnl_link_speed link_speed)
+{
+ switch (link_speed) {
+ case VIRTCHNL_LINK_SPEED_40GB:
+ return IAVF_EXT_LINK_SPEED_40GB;
+ case VIRTCHNL_LINK_SPEED_25GB:
+ return IAVF_EXT_LINK_SPEED_25GB;
+ case VIRTCHNL_LINK_SPEED_20GB:
+ return IAVF_EXT_LINK_SPEED_20GB;
+ case VIRTCHNL_LINK_SPEED_10GB:
+ return IAVF_EXT_LINK_SPEED_10GB;
+ case VIRTCHNL_LINK_SPEED_1GB:
+ return IAVF_EXT_LINK_SPEED_1000MB;
+ case VIRTCHNL_LINK_SPEED_100MB:
+ return IAVF_EXT_LINK_SPEED_100MB;
+ case VIRTCHNL_LINK_SPEED_UNKNOWN:
+ default:
+ return IAVF_EXT_LINK_SPEED_UNKNOWN;
+ }
+}
+
+/**
+ * iavf_vc_speed_to_string - Convert virtchnl speed to a string
+ * @link_speed: the speed to convert
+ *
+ * @returns string representing the link speed as reported by the virtchnl
+ * interface.
+ */
+const char *
+iavf_vc_speed_to_string(enum virtchnl_link_speed link_speed)
+{
+ return iavf_ext_speed_to_str(iavf_vc_speed_to_ext_speed(link_speed));
+}
+
+/**
+ * iavf_ext_speed_to_str - Convert iavf speed enum to string representation
+ * @link_speed: link speed enum value
+ *
+ * XXX: This is an iavf-modified copy of ice_aq_speed_to_str()
+ *
+ * @returns the string representation of the given link speed.
+ */
+const char *
+iavf_ext_speed_to_str(enum iavf_ext_link_speed link_speed)
+{
+ switch (link_speed) {
+ case IAVF_EXT_LINK_SPEED_100GB:
+ return "100 Gbps";
+ case IAVF_EXT_LINK_SPEED_50GB:
+ return "50 Gbps";
+ case IAVF_EXT_LINK_SPEED_40GB:
+ return "40 Gbps";
+ case IAVF_EXT_LINK_SPEED_25GB:
+ return "25 Gbps";
+ case IAVF_EXT_LINK_SPEED_20GB:
+ return "20 Gbps";
+ case IAVF_EXT_LINK_SPEED_10GB:
+ return "10 Gbps";
+ case IAVF_EXT_LINK_SPEED_5GB:
+ return "5 Gbps";
+ case IAVF_EXT_LINK_SPEED_2500MB:
+ return "2.5 Gbps";
+ case IAVF_EXT_LINK_SPEED_1000MB:
+ return "1 Gbps";
+ case IAVF_EXT_LINK_SPEED_100MB:
+ return "100 Mbps";
+ case IAVF_EXT_LINK_SPEED_10MB:
+ return "10 Mbps";
+ case IAVF_EXT_LINK_SPEED_UNKNOWN:
+ default:
+ return "Unknown";
+ }
+}
+
+/**
+ * iavf_vc_opcode_str - Convert virtchnl opcode to string
+ * @op: the virtchnl op code
+ *
+ * @returns the string representation of the given virtchnl op code
+ */
+const char *
+iavf_vc_opcode_str(uint16_t op)
+{
+ switch (op) {
+ case VIRTCHNL_OP_VERSION:
+ return ("VERSION");
+ case VIRTCHNL_OP_RESET_VF:
+ return ("RESET_VF");
+ case VIRTCHNL_OP_GET_VF_RESOURCES:
+ return ("GET_VF_RESOURCES");
+ case VIRTCHNL_OP_CONFIG_TX_QUEUE:
+ return ("CONFIG_TX_QUEUE");
+ case VIRTCHNL_OP_CONFIG_RX_QUEUE:
+ return ("CONFIG_RX_QUEUE");
+ case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
+ return ("CONFIG_VSI_QUEUES");
+ case VIRTCHNL_OP_CONFIG_IRQ_MAP:
+ return ("CONFIG_IRQ_MAP");
+ case VIRTCHNL_OP_ENABLE_QUEUES:
+ return ("ENABLE_QUEUES");
+ case VIRTCHNL_OP_DISABLE_QUEUES:
+ return ("DISABLE_QUEUES");
+ case VIRTCHNL_OP_ADD_ETH_ADDR:
+ return ("ADD_ETH_ADDR");
+ case VIRTCHNL_OP_DEL_ETH_ADDR:
+ return ("DEL_ETH_ADDR");
+ case VIRTCHNL_OP_ADD_VLAN:
+ return ("ADD_VLAN");
+ case VIRTCHNL_OP_DEL_VLAN:
+ return ("DEL_VLAN");
+ case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
+ return ("CONFIG_PROMISCUOUS_MODE");
+ case VIRTCHNL_OP_GET_STATS:
+ return ("GET_STATS");
+ case VIRTCHNL_OP_RSVD:
+ return ("RSVD");
+ case VIRTCHNL_OP_EVENT:
+ return ("EVENT");
+ case VIRTCHNL_OP_CONFIG_RSS_KEY:
+ return ("CONFIG_RSS_KEY");
+ case VIRTCHNL_OP_CONFIG_RSS_LUT:
+ return ("CONFIG_RSS_LUT");
+ case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
+ return ("GET_RSS_HENA_CAPS");
+ case VIRTCHNL_OP_SET_RSS_HENA:
+ return ("SET_RSS_HENA");
+ default:
+ return ("UNKNOWN");
+ }
+}
+
+/**
+ * iavf_vc_completion - Handle PF reply messages
+ * @sc: device softc
+ * @v_opcode: virtchnl op code
+ * @v_retval: virtchnl return value
+ * @msg: the message to send
+ * @msglen: length of the msg buffer
+ *
+ * Asynchronous completion function for admin queue messages. Rather than busy
+ * wait, we fire off our requests and assume that no errors will be returned.
+ * This function handles the reply messages.
+ */
+void
+iavf_vc_completion(struct iavf_sc *sc,
+ enum virtchnl_ops v_opcode,
+ enum virtchnl_status_code v_retval, u8 *msg, u16 msglen __unused)
+{
+ device_t dev = sc->dev;
+
+ if (v_opcode != VIRTCHNL_OP_GET_STATS)
+ iavf_dbg_vc(sc, "%s: opcode %s\n", __func__,
+ iavf_vc_opcode_str(v_opcode));
+
+ if (v_opcode == VIRTCHNL_OP_EVENT) {
+ struct virtchnl_pf_event *vpe =
+ (struct virtchnl_pf_event *)msg;
+
+ switch (vpe->event) {
+ case VIRTCHNL_EVENT_LINK_CHANGE:
+ iavf_handle_link_event(sc, vpe);
+ break;
+ case VIRTCHNL_EVENT_RESET_IMPENDING:
+ device_printf(dev, "PF initiated reset!\n");
+ iavf_set_state(&sc->state, IAVF_STATE_RESET_PENDING);
+ break;
+ default:
+ iavf_dbg_vc(sc, "Unknown event %d from AQ\n",
+ vpe->event);
+ break;
+ }
+
+ return;
+ }
+
+ /* Catch-all error response */
+ if (v_retval) {
+ bool print_error = true;
+
+ switch (v_opcode) {
+ case VIRTCHNL_OP_ADD_ETH_ADDR:
+ device_printf(dev, "WARNING: Error adding VF mac filter!\n");
+ device_printf(dev, "WARNING: Device may not receive traffic!\n");
+ break;
+ case VIRTCHNL_OP_ENABLE_QUEUES:
+ sc->enable_queues_chan = 1;
+ wakeup_one(&sc->enable_queues_chan);
+ break;
+ case VIRTCHNL_OP_DISABLE_QUEUES:
+ sc->disable_queues_chan = 1;
+ wakeup_one(&sc->disable_queues_chan);
+ /* This may fail, but it does not necessarily mean that
+ * something is critically wrong.
+ */
+ if (!(sc->dbg_mask & IAVF_DBG_VC))
+ print_error = false;
+ break;
+ default:
+ break;
+ }
+
+ if (print_error)
+ device_printf(dev,
+ "%s: AQ returned error %s to our request %s!\n",
+ __func__, iavf_vc_stat_str(&sc->hw, v_retval),
+ iavf_vc_opcode_str(v_opcode));
+ return;
+ }
+
+ switch (v_opcode) {
+ case VIRTCHNL_OP_GET_STATS:
+ iavf_update_stats_counters(sc, (struct iavf_eth_stats *)msg);
+ break;
+ case VIRTCHNL_OP_ADD_ETH_ADDR:
+ break;
+ case VIRTCHNL_OP_DEL_ETH_ADDR:
+ break;
+ case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
+ break;
+ case VIRTCHNL_OP_ADD_VLAN:
+ break;
+ case VIRTCHNL_OP_DEL_VLAN:
+ break;
+ case VIRTCHNL_OP_ENABLE_QUEUES:
+ atomic_store_rel_32(&sc->queues_enabled, 1);
+ sc->enable_queues_chan = 1;
+ wakeup_one(&sc->enable_queues_chan);
+ break;
+ case VIRTCHNL_OP_DISABLE_QUEUES:
+ atomic_store_rel_32(&sc->queues_enabled, 0);
+ sc->disable_queues_chan = 1;
+ wakeup_one(&sc->disable_queues_chan);
+ break;
+ case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
+ break;
+ case VIRTCHNL_OP_CONFIG_IRQ_MAP:
+ break;
+ case VIRTCHNL_OP_CONFIG_RSS_KEY:
+ break;
+ case VIRTCHNL_OP_SET_RSS_HENA:
+ break;
+ case VIRTCHNL_OP_CONFIG_RSS_LUT:
+ break;
+ default:
+ iavf_dbg_vc(sc,
+ "Received unexpected message %s from PF.\n",
+ iavf_vc_opcode_str(v_opcode));
+ break;
+ }
+}
+
+/**
+ * iavf_handle_link_event - Handle Link event virtchml message
+ * @sc: device softc
+ * @vpe: virtchnl PF link event structure
+ *
+ * Process a virtchnl PF link event and update the driver and stack status of
+ * the link event.
+ */
+static void
+iavf_handle_link_event(struct iavf_sc *sc, struct virtchnl_pf_event *vpe)
+{
+ MPASS(vpe->event == VIRTCHNL_EVENT_LINK_CHANGE);
+
+ if (sc->vf_res->vf_cap_flags & VIRTCHNL_VF_CAP_ADV_LINK_SPEED)
+ {
+ iavf_dbg_vc(sc, "Link change (adv): status %d, speed %u\n",
+ vpe->event_data.link_event_adv.link_status,
+ vpe->event_data.link_event_adv.link_speed);
+ sc->link_up =
+ vpe->event_data.link_event_adv.link_status;
+ sc->link_speed_adv =
+ vpe->event_data.link_event_adv.link_speed;
+
+ } else {
+ iavf_dbg_vc(sc, "Link change: status %d, speed %x\n",
+ vpe->event_data.link_event.link_status,
+ vpe->event_data.link_event.link_speed);
+ sc->link_up =
+ vpe->event_data.link_event.link_status;
+ sc->link_speed =
+ vpe->event_data.link_event.link_speed;
+ }
+
+ iavf_update_link_status(sc);
+}
diff --git a/sys/dev/iavf/iavf_vc_iflib.c b/sys/dev/iavf/iavf_vc_iflib.c
new file mode 100644
--- /dev/null
+++ b/sys/dev/iavf/iavf_vc_iflib.c
@@ -0,0 +1,178 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2021, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+/**
+ * @file iavf_vc_iflib.c
+ * @brief iflib-specific Virtchnl interface functions
+ *
+ * Contains functions implementing the virtchnl interface for communicating
+ * with the PF driver. This file contains definitions specific to the iflib
+ * driver implementation.
+ */
+
+#include "iavf_iflib.h"
+#include "iavf_vc_common.h"
+
+/**
+ * iavf_configure_queues - Configure queues
+ * @sc: device softc
+ *
+ * Request that the PF set up our queues.
+ *
+ * @returns zero on success, or an error code on failure.
+ */
+int
+iavf_configure_queues(struct iavf_sc *sc)
+{
+ device_t dev = sc->dev;
+ struct iavf_vsi *vsi = &sc->vsi;
+ if_softc_ctx_t scctx = iflib_get_softc_ctx(vsi->ctx);
+ struct iavf_tx_queue *tx_que = vsi->tx_queues;
+ struct iavf_rx_queue *rx_que = vsi->rx_queues;
+ struct tx_ring *txr;
+ struct rx_ring *rxr;
+ int len, pairs;
+
+ struct virtchnl_vsi_queue_config_info *vqci;
+ struct virtchnl_queue_pair_info *vqpi;
+
+ /* XXX: Linux PF driver wants matching ids in each tx/rx struct, so both TX/RX
+ * queues of a pair need to be configured */
+ pairs = max(vsi->num_tx_queues, vsi->num_rx_queues);
+ len = sizeof(struct virtchnl_vsi_queue_config_info) +
+ (sizeof(struct virtchnl_queue_pair_info) * pairs);
+ vqci = malloc(len, M_IAVF, M_NOWAIT | M_ZERO);
+ if (!vqci) {
+ device_printf(dev, "%s: unable to allocate memory\n", __func__);
+ return (ENOMEM);
+ }
+ vqci->vsi_id = sc->vsi_res->vsi_id;
+ vqci->num_queue_pairs = pairs;
+ vqpi = vqci->qpair;
+ /* Size check is not needed here - HW max is 16 queue pairs, and we
+ * can fit info for 31 of them into the AQ buffer before it overflows.
+ */
+ // TODO: the above is wrong now; X722 VFs can have 256 queues
+ for (int i = 0; i < pairs; i++, tx_que++, rx_que++, vqpi++) {
+ txr = &tx_que->txr;
+ rxr = &rx_que->rxr;
+
+ vqpi->txq.vsi_id = vqci->vsi_id;
+ vqpi->txq.queue_id = i;
+ vqpi->txq.ring_len = scctx->isc_ntxd[0];
+ vqpi->txq.dma_ring_addr = txr->tx_paddr;
+ /* Enable Head writeback */
+ if (!vsi->enable_head_writeback) {
+ vqpi->txq.headwb_enabled = 0;
+ vqpi->txq.dma_headwb_addr = 0;
+ } else {
+ vqpi->txq.headwb_enabled = 1;
+ vqpi->txq.dma_headwb_addr = txr->tx_paddr +
+ sizeof(struct iavf_tx_desc) * scctx->isc_ntxd[0];
+ }
+
+ vqpi->rxq.vsi_id = vqci->vsi_id;
+ vqpi->rxq.queue_id = i;
+ vqpi->rxq.ring_len = scctx->isc_nrxd[0];
+ vqpi->rxq.dma_ring_addr = rxr->rx_paddr;
+ vqpi->rxq.max_pkt_size = scctx->isc_max_frame_size;
+ vqpi->rxq.databuffer_size = rxr->mbuf_sz;
+ vqpi->rxq.splithdr_enabled = 0;
+ }
+
+ iavf_send_pf_msg(sc, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+ (u8 *)vqci, len);
+ free(vqci, M_IAVF);
+
+ return (0);
+}
+
+/**
+ * iavf_map_queues - Map queues to interrupt vectors
+ * @sc: device softc
+ *
+ * Request that the PF map queues to interrupt vectors. Misc causes, including
+ * admin queue, are always mapped to vector 0.
+ *
+ * @returns zero on success, or an error code on failure.
+ */
+int
+iavf_map_queues(struct iavf_sc *sc)
+{
+ struct virtchnl_irq_map_info *vm;
+ int i, q, len;
+ struct iavf_vsi *vsi = &sc->vsi;
+ struct iavf_rx_queue *rx_que = vsi->rx_queues;
+ if_softc_ctx_t scctx = vsi->shared;
+ device_t dev = sc->dev;
+
+ // XXX: What happens if we only get 1 MSI-X vector?
+ MPASS(scctx->isc_vectors > 1);
+
+ /* How many queue vectors, adminq uses one */
+ // XXX: How do we know how many interrupt vectors we have?
+ q = scctx->isc_vectors - 1;
+
+ len = sizeof(struct virtchnl_irq_map_info) +
+ (scctx->isc_vectors * sizeof(struct virtchnl_vector_map));
+ vm = malloc(len, M_IAVF, M_NOWAIT);
+ if (!vm) {
+ device_printf(dev, "%s: unable to allocate memory\n", __func__);
+ return (ENOMEM);
+ }
+
+ vm->num_vectors = scctx->isc_vectors;
+ /* Queue vectors first */
+ for (i = 0; i < q; i++, rx_que++) {
+ vm->vecmap[i].vsi_id = sc->vsi_res->vsi_id;
+ vm->vecmap[i].vector_id = i + 1; /* first is adminq */
+ // TODO: Re-examine this
+ vm->vecmap[i].txq_map = (1 << rx_que->rxr.me);
+ vm->vecmap[i].rxq_map = (1 << rx_que->rxr.me);
+ vm->vecmap[i].rxitr_idx = 0;
+ vm->vecmap[i].txitr_idx = 1;
+ }
+
+ /* Misc vector last - this is only for AdminQ messages */
+ vm->vecmap[i].vsi_id = sc->vsi_res->vsi_id;
+ vm->vecmap[i].vector_id = 0;
+ vm->vecmap[i].txq_map = 0;
+ vm->vecmap[i].rxq_map = 0;
+ vm->vecmap[i].rxitr_idx = 0;
+ vm->vecmap[i].txitr_idx = 0;
+
+ iavf_send_pf_msg(sc, VIRTCHNL_OP_CONFIG_IRQ_MAP,
+ (u8 *)vm, len);
+ free(vm, M_IAVF);
+
+ return (0);
+}
diff --git a/sys/dev/iavf/if_iavf_iflib.c b/sys/dev/iavf/if_iavf_iflib.c
new file mode 100644
--- /dev/null
+++ b/sys/dev/iavf/if_iavf_iflib.c
@@ -0,0 +1,2138 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2021, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+/**
+ * @file if_iavf_iflib.c
+ * @brief iflib driver implementation
+ *
+ * Contains the main entry point for the iflib driver implementation. It
+ * implements the various ifdi driver methods, and sets up the module and
+ * driver values to load an iflib driver.
+ */
+
+#include "iavf_iflib.h"
+#include "iavf_vc_common.h"
+
+#include "iavf_drv_info.h"
+#include "iavf_sysctls_iflib.h"
+
+/*********************************************************************
+ * Function prototypes
+ *********************************************************************/
+static void *iavf_register(device_t dev);
+static int iavf_if_attach_pre(if_ctx_t ctx);
+static int iavf_if_attach_post(if_ctx_t ctx);
+static int iavf_if_detach(if_ctx_t ctx);
+static int iavf_if_shutdown(if_ctx_t ctx);
+static int iavf_if_suspend(if_ctx_t ctx);
+static int iavf_if_resume(if_ctx_t ctx);
+static int iavf_if_msix_intr_assign(if_ctx_t ctx, int msix);
+static void iavf_if_enable_intr(if_ctx_t ctx);
+static void iavf_if_disable_intr(if_ctx_t ctx);
+static int iavf_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid);
+static int iavf_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid);
+static int iavf_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets);
+static int iavf_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nqs, int nqsets);
+static void iavf_if_queues_free(if_ctx_t ctx);
+static void iavf_if_update_admin_status(if_ctx_t ctx);
+static void iavf_if_multi_set(if_ctx_t ctx);
+static int iavf_if_mtu_set(if_ctx_t ctx, uint32_t mtu);
+static void iavf_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr);
+static int iavf_if_media_change(if_ctx_t ctx);
+static int iavf_if_promisc_set(if_ctx_t ctx, int flags);
+static void iavf_if_timer(if_ctx_t ctx, uint16_t qid);
+static void iavf_if_vlan_register(if_ctx_t ctx, u16 vtag);
+static void iavf_if_vlan_unregister(if_ctx_t ctx, u16 vtag);
+static uint64_t iavf_if_get_counter(if_ctx_t ctx, ift_counter cnt);
+static void iavf_if_init(if_ctx_t ctx);
+static void iavf_if_stop(if_ctx_t ctx);
+
+static int iavf_allocate_pci_resources(struct iavf_sc *);
+static void iavf_free_pci_resources(struct iavf_sc *);
+static void iavf_setup_interface(struct iavf_sc *);
+static void iavf_add_device_sysctls(struct iavf_sc *);
+static void iavf_enable_queue_irq(struct iavf_hw *, int);
+static void iavf_disable_queue_irq(struct iavf_hw *, int);
+static void iavf_stop(struct iavf_sc *);
+
+static int iavf_del_mac_filter(struct iavf_sc *sc, u8 *macaddr);
+static int iavf_msix_que(void *);
+static int iavf_msix_adminq(void *);
+static void iavf_configure_itr(struct iavf_sc *sc);
+
+static int iavf_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS);
+#ifdef IAVF_DEBUG
+static int iavf_sysctl_vf_reset(SYSCTL_HANDLER_ARGS);
+static int iavf_sysctl_vflr_reset(SYSCTL_HANDLER_ARGS);
+#endif
+
+static enum iavf_status iavf_process_adminq(struct iavf_sc *, u16 *);
+static void iavf_vc_task(void *arg, int pending __unused);
+static int iavf_setup_vc_tq(struct iavf_sc *sc);
+static int iavf_vc_sleep_wait(struct iavf_sc *sc, u32 op);
+
+/*********************************************************************
+ * FreeBSD Device Interface Entry Points
+ *********************************************************************/
+
+/**
+ * @var iavf_methods
+ * @brief device methods for the iavf driver
+ *
+ * Device method callbacks used to interact with the driver. For iflib this
+ * primarily resolves to the default iflib implementations.
+ */
+static device_method_t iavf_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_register, iavf_register),
+ DEVMETHOD(device_probe, iflib_device_probe),
+ DEVMETHOD(device_attach, iflib_device_attach),
+ DEVMETHOD(device_detach, iflib_device_detach),
+ DEVMETHOD(device_shutdown, iflib_device_shutdown),
+ DEVMETHOD_END
+};
+
+static driver_t iavf_driver = {
+ "iavf", iavf_methods, sizeof(struct iavf_sc),
+};
+
+devclass_t iavf_devclass;
+DRIVER_MODULE(iavf, pci, iavf_driver, iavf_devclass, 0, 0);
+MODULE_VERSION(iavf, 1);
+
+MODULE_DEPEND(iavf, pci, 1, 1, 1);
+MODULE_DEPEND(iavf, ether, 1, 1, 1);
+MODULE_DEPEND(iavf, iflib, 1, 1, 1);
+
+IFLIB_PNP_INFO(pci, iavf, iavf_vendor_info_array);
+
+/**
+ * @var M_IAVF
+ * @brief main iavf driver allocation type
+ *
+ * malloc(9) allocation type used by the majority of memory allocations in the
+ * iavf iflib driver.
+ */
+MALLOC_DEFINE(M_IAVF, "iavf", "iavf driver allocations");
+
+static device_method_t iavf_if_methods[] = {
+ DEVMETHOD(ifdi_attach_pre, iavf_if_attach_pre),
+ DEVMETHOD(ifdi_attach_post, iavf_if_attach_post),
+ DEVMETHOD(ifdi_detach, iavf_if_detach),
+ DEVMETHOD(ifdi_shutdown, iavf_if_shutdown),
+ DEVMETHOD(ifdi_suspend, iavf_if_suspend),
+ DEVMETHOD(ifdi_resume, iavf_if_resume),
+ DEVMETHOD(ifdi_init, iavf_if_init),
+ DEVMETHOD(ifdi_stop, iavf_if_stop),
+ DEVMETHOD(ifdi_msix_intr_assign, iavf_if_msix_intr_assign),
+ DEVMETHOD(ifdi_intr_enable, iavf_if_enable_intr),
+ DEVMETHOD(ifdi_intr_disable, iavf_if_disable_intr),
+ DEVMETHOD(ifdi_rx_queue_intr_enable, iavf_if_rx_queue_intr_enable),
+ DEVMETHOD(ifdi_tx_queue_intr_enable, iavf_if_tx_queue_intr_enable),
+ DEVMETHOD(ifdi_tx_queues_alloc, iavf_if_tx_queues_alloc),
+ DEVMETHOD(ifdi_rx_queues_alloc, iavf_if_rx_queues_alloc),
+ DEVMETHOD(ifdi_queues_free, iavf_if_queues_free),
+ DEVMETHOD(ifdi_update_admin_status, iavf_if_update_admin_status),
+ DEVMETHOD(ifdi_multi_set, iavf_if_multi_set),
+ DEVMETHOD(ifdi_mtu_set, iavf_if_mtu_set),
+ DEVMETHOD(ifdi_media_status, iavf_if_media_status),
+ DEVMETHOD(ifdi_media_change, iavf_if_media_change),
+ DEVMETHOD(ifdi_promisc_set, iavf_if_promisc_set),
+ DEVMETHOD(ifdi_timer, iavf_if_timer),
+ DEVMETHOD(ifdi_vlan_register, iavf_if_vlan_register),
+ DEVMETHOD(ifdi_vlan_unregister, iavf_if_vlan_unregister),
+ DEVMETHOD(ifdi_get_counter, iavf_if_get_counter),
+ DEVMETHOD_END
+};
+
+static driver_t iavf_if_driver = {
+ "iavf_if", iavf_if_methods, sizeof(struct iavf_sc)
+};
+
+extern struct if_txrx iavf_txrx_hwb;
+extern struct if_txrx iavf_txrx_dwb;
+
+static struct if_shared_ctx iavf_sctx = {
+ .isc_magic = IFLIB_MAGIC,
+ .isc_q_align = PAGE_SIZE,
+ .isc_tx_maxsize = IAVF_MAX_FRAME,
+ .isc_tx_maxsegsize = IAVF_MAX_FRAME,
+ .isc_tso_maxsize = IAVF_TSO_SIZE + sizeof(struct ether_vlan_header),
+ .isc_tso_maxsegsize = IAVF_MAX_DMA_SEG_SIZE,
+ .isc_rx_maxsize = IAVF_MAX_FRAME,
+ .isc_rx_nsegments = IAVF_MAX_RX_SEGS,
+ .isc_rx_maxsegsize = IAVF_MAX_FRAME,
+ .isc_nfl = 1,
+ .isc_ntxqs = 1,
+ .isc_nrxqs = 1,
+
+ .isc_admin_intrcnt = 1,
+ .isc_vendor_info = iavf_vendor_info_array,
+ .isc_driver_version = __DECONST(char *, iavf_driver_version),
+ .isc_driver = &iavf_if_driver,
+ .isc_flags = IFLIB_NEED_SCRATCH | IFLIB_NEED_ZERO_CSUM | IFLIB_TSO_INIT_IP | IFLIB_IS_VF,
+
+ .isc_nrxd_min = {IAVF_MIN_RING},
+ .isc_ntxd_min = {IAVF_MIN_RING},
+ .isc_nrxd_max = {IAVF_MAX_RING},
+ .isc_ntxd_max = {IAVF_MAX_RING},
+ .isc_nrxd_default = {IAVF_DEFAULT_RING},
+ .isc_ntxd_default = {IAVF_DEFAULT_RING},
+};
+
+/*** Functions ***/
+
+/**
+ * iavf_register - iflib callback to obtain the shared context pointer
+ * @dev: the device being registered
+ *
+ * Called when the driver is first being attached to the driver. This function
+ * is used by iflib to obtain a pointer to the shared context structure which
+ * describes the device features.
+ *
+ * @returns a pointer to the iavf shared context structure.
+ */
+static void *
+iavf_register(device_t dev __unused)
+{
+ return (&iavf_sctx);
+}
+
+/**
+ * iavf_allocate_pci_resources - Allocate PCI resources
+ * @sc: the device private softc
+ *
+ * Allocate PCI resources used by the iflib driver.
+ *
+ * @returns zero or a non-zero error code on failure
+ */
+static int
+iavf_allocate_pci_resources(struct iavf_sc *sc)
+{
+ return iavf_allocate_pci_resources_common(sc);
+}
+
+/**
+ * iavf_if_attach_pre - Begin attaching the device to the driver
+ * @ctx: the iflib context pointer
+ *
+ * Called by iflib to begin the attach process. Allocates resources and
+ * initializes the hardware for operation.
+ *
+ * @returns zero or a non-zero error code on failure.
+ */
+static int
+iavf_if_attach_pre(if_ctx_t ctx)
+{
+ device_t dev;
+ struct iavf_sc *sc;
+ struct iavf_hw *hw;
+ struct iavf_vsi *vsi;
+ if_softc_ctx_t scctx;
+ int error = 0;
+
+ /* Setup pointers */
+ dev = iflib_get_dev(ctx);
+ sc = iavf_sc_from_ctx(ctx);
+
+ vsi = &sc->vsi;
+ vsi->back = sc;
+ sc->dev = sc->osdep.dev = dev;
+ hw = &sc->hw;
+
+ vsi->dev = dev;
+ vsi->hw = &sc->hw;
+ vsi->num_vlans = 0;
+ vsi->ctx = ctx;
+ sc->media = iflib_get_media(ctx);
+ vsi->ifp = iflib_get_ifp(ctx);
+ vsi->shared = scctx = iflib_get_softc_ctx(ctx);
+
+ iavf_save_tunables(sc);
+
+ /* Setup VC mutex */
+ snprintf(sc->vc_mtx_name, sizeof(sc->vc_mtx_name),
+ "%s:vc", device_get_nameunit(dev));
+ mtx_init(&sc->vc_mtx, sc->vc_mtx_name, NULL, MTX_DEF);
+
+ /* Do PCI setup - map BAR0, etc */
+ error = iavf_allocate_pci_resources(sc);
+ if (error) {
+ device_printf(dev, "%s: Allocation of PCI resources failed\n",
+ __func__);
+ goto err_early;
+ }
+
+ iavf_dbg_init(sc, "Allocated PCI resources and MSI-X vectors\n");
+
+ error = iavf_set_mac_type(hw);
+ if (error) {
+ device_printf(dev, "%s: set_mac_type failed: %d\n",
+ __func__, error);
+ goto err_pci_res;
+ }
+
+ error = iavf_reset_complete(hw);
+ if (error) {
+ device_printf(dev, "%s: Device is still being reset\n",
+ __func__);
+ goto err_pci_res;
+ }
+
+ iavf_dbg_init(sc, "VF Device is ready for configuration\n");
+
+ /* Sets up Admin Queue */
+ error = iavf_setup_vc(sc);
+ if (error) {
+ device_printf(dev, "%s: Error setting up PF comms, %d\n",
+ __func__, error);
+ goto err_pci_res;
+ }
+
+ iavf_dbg_init(sc, "PF API version verified\n");
+
+ /* Need API version before sending reset message */
+ error = iavf_reset(sc);
+ if (error) {
+ device_printf(dev, "VF reset failed; reload the driver\n");
+ goto err_aq;
+ }
+
+ iavf_dbg_init(sc, "VF reset complete\n");
+
+ /* Ask for VF config from PF */
+ error = iavf_vf_config(sc);
+ if (error) {
+ device_printf(dev, "Error getting configuration from PF: %d\n",
+ error);
+ goto err_aq;
+ }
+
+ iavf_print_device_info(sc);
+
+ error = iavf_get_vsi_res_from_vf_res(sc);
+ if (error)
+ goto err_res_buf;
+
+ iavf_dbg_init(sc, "Resource Acquisition complete\n");
+
+ /* Setup taskqueue to service VC messages */
+ error = iavf_setup_vc_tq(sc);
+ if (error)
+ goto err_vc_tq;
+
+ iavf_set_mac_addresses(sc);
+ iflib_set_mac(ctx, hw->mac.addr);
+
+ /* Allocate filter lists */
+ iavf_init_filters(sc);
+
+ /* Fill out more iflib parameters */
+ scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max =
+ sc->vsi_res->num_queue_pairs;
+ if (vsi->enable_head_writeback) {
+ scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0]
+ * sizeof(struct iavf_tx_desc) + sizeof(u32), DBA_ALIGN);
+ scctx->isc_txrx = &iavf_txrx_hwb;
+ } else {
+ scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0]
+ * sizeof(struct iavf_tx_desc), DBA_ALIGN);
+ scctx->isc_txrx = &iavf_txrx_dwb;
+ }
+ scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0]
+ * sizeof(union iavf_32byte_rx_desc), DBA_ALIGN);
+ scctx->isc_msix_bar = PCIR_BAR(IAVF_MSIX_BAR);
+ scctx->isc_tx_nsegments = IAVF_MAX_TX_SEGS;
+ scctx->isc_tx_tso_segments_max = IAVF_MAX_TSO_SEGS;
+ scctx->isc_tx_tso_size_max = IAVF_TSO_SIZE;
+ scctx->isc_tx_tso_segsize_max = IAVF_MAX_DMA_SEG_SIZE;
+ scctx->isc_rss_table_size = IAVF_RSS_VSI_LUT_SIZE;
+ scctx->isc_capabilities = scctx->isc_capenable = IAVF_CAPS;
+ scctx->isc_tx_csum_flags = CSUM_OFFLOAD;
+
+ /* Update OS cache of MSIX control register values */
+ iavf_update_msix_devinfo(dev);
+
+ return (0);
+
+err_vc_tq:
+ taskqueue_free(sc->vc_tq);
+err_res_buf:
+ free(sc->vf_res, M_IAVF);
+err_aq:
+ iavf_shutdown_adminq(hw);
+err_pci_res:
+ iavf_free_pci_resources(sc);
+err_early:
+ IAVF_VC_LOCK_DESTROY(sc);
+ return (error);
+}
+
+/**
+ * iavf_vc_task - task used to process VC messages
+ * @arg: device softc
+ * @pending: unused
+ *
+ * Processes the admin queue, in order to process the virtual
+ * channel messages received from the PF.
+ */
+static void
+iavf_vc_task(void *arg, int pending __unused)
+{
+ struct iavf_sc *sc = (struct iavf_sc *)arg;
+ u16 var;
+
+ iavf_process_adminq(sc, &var);
+}
+
+/**
+ * iavf_setup_vc_tq - Setup task queues
+ * @sc: device softc
+ *
+ * Create taskqueue and tasklet for processing virtual channel messages. This
+ * is done in a separate non-iflib taskqueue so that the iflib context lock
+ * does not need to be held for VC messages to be processed.
+ *
+ * @returns zero on success, or an error code on failure.
+ */
+static int
+iavf_setup_vc_tq(struct iavf_sc *sc)
+{
+ device_t dev = sc->dev;
+ int error = 0;
+
+ TASK_INIT(&sc->vc_task, 0, iavf_vc_task, sc);
+
+ sc->vc_tq = taskqueue_create_fast("iavf_vc", M_NOWAIT,
+ taskqueue_thread_enqueue, &sc->vc_tq);
+ if (!sc->vc_tq) {
+ device_printf(dev, "taskqueue_create_fast (for VC task) returned NULL!\n");
+ return (ENOMEM);
+ }
+ error = taskqueue_start_threads(&sc->vc_tq, 1, PI_NET, "%s vc",
+ device_get_nameunit(dev));
+ if (error) {
+ device_printf(dev, "taskqueue_start_threads (for VC task) error: %d\n",
+ error);
+ taskqueue_free(sc->vc_tq);
+ return (error);
+ }
+
+ return (error);
+}
+
+/**
+ * iavf_if_attach_post - Finish attaching the device to the driver
+ * @ctx: the iflib context pointer
+ *
+ * Called by iflib after it has setup queues and interrupts. Used to finish up
+ * the attach process for a device. Attach logic which must occur after Tx and
+ * Rx queues are setup belongs here.
+ *
+ * @returns zero or a non-zero error code on failure
+ */
+static int
+iavf_if_attach_post(if_ctx_t ctx)
+{
+ device_t dev;
+ struct iavf_sc *sc;
+ struct iavf_hw *hw;
+ struct iavf_vsi *vsi;
+ int error = 0;
+
+ dev = iflib_get_dev(ctx);
+ INIT_DBG_DEV(dev, "begin");
+
+ sc = iavf_sc_from_ctx(ctx);
+ vsi = &sc->vsi;
+ hw = &sc->hw;
+
+ /* Save off determined number of queues for interface */
+ vsi->num_rx_queues = vsi->shared->isc_nrxqsets;
+ vsi->num_tx_queues = vsi->shared->isc_ntxqsets;
+
+ /* Setup the stack interface */
+ iavf_setup_interface(sc);
+
+ iavf_dbg_init(sc, "Interface setup complete\n");
+
+ /* Initialize statistics & add sysctls */
+ bzero(&sc->vsi.eth_stats, sizeof(struct iavf_eth_stats));
+ iavf_add_device_sysctls(sc);
+
+ atomic_store_rel_32(&sc->queues_enabled, 0);
+ iavf_set_state(&sc->state, IAVF_STATE_INITIALIZED);
+
+ /* We want AQ enabled early for init */
+ iavf_enable_adminq_irq(hw);
+
+ INIT_DBG_DEV(dev, "end");
+
+ return (error);
+}
+
+/**
+ * iavf_if_detach - Detach a device from the driver
+ * @ctx: the iflib context of the device to detach
+ *
+ * Called by iflib to detach a given device from the driver. Clean up any
+ * resources associated with the driver and shut the device down.
+ *
+ * @remark iflib always ignores the return value of IFDI_DETACH, so this
+ * function is effectively not allowed to fail. Instead, it should clean up
+ * and release as much as possible even if something goes wrong.
+ *
+ * @returns zero
+ */
+static int
+iavf_if_detach(if_ctx_t ctx)
+{
+ struct iavf_sc *sc = iavf_sc_from_ctx(ctx);
+ struct iavf_hw *hw = &sc->hw;
+ device_t dev = sc->dev;
+ enum iavf_status status;
+
+ INIT_DBG_DEV(dev, "begin");
+
+ iavf_clear_state(&sc->state, IAVF_STATE_INITIALIZED);
+
+ /* Drain admin queue taskqueue */
+ taskqueue_free(sc->vc_tq);
+ IAVF_VC_LOCK_DESTROY(sc);
+
+ /* Remove all the media and link information */
+ ifmedia_removeall(sc->media);
+
+ iavf_disable_adminq_irq(hw);
+ status = iavf_shutdown_adminq(&sc->hw);
+ if (status != IAVF_SUCCESS) {
+ device_printf(dev,
+ "iavf_shutdown_adminq() failed with status %s\n",
+ iavf_stat_str(hw, status));
+ }
+
+ free(sc->vf_res, M_IAVF);
+ sc->vf_res = NULL;
+ iavf_free_pci_resources(sc);
+ iavf_free_filters(sc);
+
+ INIT_DBG_DEV(dev, "end");
+ return (0);
+}
+
+/**
+ * iavf_if_shutdown - called by iflib to handle shutdown
+ * @ctx: the iflib context pointer
+ *
+ * Callback for the IFDI_SHUTDOWN iflib function.
+ *
+ * @returns zero or an error code on failure
+ */
+static int
+iavf_if_shutdown(if_ctx_t ctx __unused)
+{
+ return (0);
+}
+
+/**
+ * iavf_if_suspend - called by iflib to handle suspend
+ * @ctx: the iflib context pointer
+ *
+ * Callback for the IFDI_SUSPEND iflib function.
+ *
+ * @returns zero or an error code on failure
+ */
+static int
+iavf_if_suspend(if_ctx_t ctx __unused)
+{
+ return (0);
+}
+
+/**
+ * iavf_if_resume - called by iflib to handle resume
+ * @ctx: the iflib context pointer
+ *
+ * Callback for the IFDI_RESUME iflib function.
+ *
+ * @returns zero or an error code on failure
+ */
+static int
+iavf_if_resume(if_ctx_t ctx __unused)
+{
+ return (0);
+}
+
+/**
+ * iavf_vc_sleep_wait - Sleep for a response from a VC message
+ * @sc: device softc
+ * @op: the op code to sleep on
+ *
+ * Sleep until a response from the PF for the VC message sent by the
+ * given op.
+ *
+ * @returns zero on success, or EWOULDBLOCK if the sleep times out.
+ */
+static int
+iavf_vc_sleep_wait(struct iavf_sc *sc, u32 op)
+{
+ int error = 0;
+
+ IAVF_VC_LOCK_ASSERT(sc);
+
+ iavf_dbg_vc(sc, "Sleeping for op %b\n", op, IAVF_FLAGS);
+
+ error = mtx_sleep(iavf_vc_get_op_chan(sc, op),
+ &sc->vc_mtx, PRI_MAX, "iavf_vc", IAVF_AQ_TIMEOUT);
+
+ return (error);
+}
+
+/**
+ * iavf_send_vc_msg_sleep - Send a virtchnl message and wait for a response
+ * @sc: device softc
+ * @op: the op code to send
+ *
+ * Send a virtchnl message to the PF, and sleep or busy wait for a response
+ * from the PF, depending on iflib context lock type.
+ *
+ * @remark this function does not wait if the device is detaching, on kernels
+ * that support indicating to the driver that the device is detaching
+ *
+ * @returns zero or an error code on failure.
+ */
+int
+iavf_send_vc_msg_sleep(struct iavf_sc *sc, u32 op)
+{
+ if_ctx_t ctx = sc->vsi.ctx;
+ int error = 0;
+
+ IAVF_VC_LOCK(sc);
+ error = iavf_vc_send_cmd(sc, op);
+ if (error != 0) {
+ iavf_dbg_vc(sc, "Error sending %b: %d\n", op, IAVF_FLAGS, error);
+ goto release_lock;
+ }
+
+ /* Don't wait for a response if the device is being detached. */
+ if (!iflib_in_detach(ctx)) {
+ error = iavf_vc_sleep_wait(sc, op);
+ IAVF_VC_LOCK_ASSERT(sc);
+
+ if (error == EWOULDBLOCK)
+ device_printf(sc->dev, "%b timed out\n", op, IAVF_FLAGS);
+ }
+release_lock:
+ IAVF_VC_UNLOCK(sc);
+ return (error);
+}
+
+/**
+ * iavf_send_vc_msg - Send a virtchnl message to the PF
+ * @sc: device softc
+ * @op: the op code to send
+ *
+ * Send a virtchnl message to the PF and do not wait for a response.
+ *
+ * @returns zero on success, or an error code on failure.
+ */
+int
+iavf_send_vc_msg(struct iavf_sc *sc, u32 op)
+{
+ int error = 0;
+
+ error = iavf_vc_send_cmd(sc, op);
+ if (error != 0)
+ iavf_dbg_vc(sc, "Error sending %b: %d\n", op, IAVF_FLAGS, error);
+
+ return (error);
+}
+
+/**
+ * iavf_init_queues - initialize Tx and Rx queues
+ * @vsi: the VSI to initialize
+ *
+ * Refresh the Tx and Rx ring contents and update the tail pointers for each
+ * queue.
+ */
+static void
+iavf_init_queues(struct iavf_vsi *vsi)
+{
+ struct iavf_tx_queue *tx_que = vsi->tx_queues;
+ struct iavf_rx_queue *rx_que = vsi->rx_queues;
+ struct rx_ring *rxr;
+ uint32_t mbuf_sz;
+
+ mbuf_sz = iflib_get_rx_mbuf_sz(vsi->ctx);
+ MPASS(mbuf_sz <= UINT16_MAX);
+
+ for (int i = 0; i < vsi->num_tx_queues; i++, tx_que++)
+ iavf_init_tx_ring(vsi, tx_que);
+
+ for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++) {
+ rxr = &rx_que->rxr;
+
+ rxr->mbuf_sz = mbuf_sz;
+ wr32(vsi->hw, rxr->tail, 0);
+ }
+}
+
+/**
+ * iavf_if_init - Initialize device for operation
+ * @ctx: the iflib context pointer
+ *
+ * Initializes a device for operation. Called by iflib in response to an
+ * interface up event from the stack.
+ *
+ * @remark this function does not return a value and thus cannot indicate
+ * failure to initialize.
+ */
+static void
+iavf_if_init(if_ctx_t ctx)
+{
+ struct iavf_sc *sc = iavf_sc_from_ctx(ctx);
+ struct iavf_vsi *vsi = &sc->vsi;
+ struct iavf_hw *hw = &sc->hw;
+ if_t ifp = iflib_get_ifp(ctx);
+ u8 tmpaddr[ETHER_ADDR_LEN];
+ enum iavf_status status;
+ device_t dev = sc->dev;
+ int error = 0;
+
+ INIT_DBG_IF(ifp, "begin");
+
+ IFLIB_CTX_ASSERT(ctx);
+
+ error = iavf_reset_complete(hw);
+ if (error) {
+ device_printf(sc->dev, "%s: VF reset failed\n",
+ __func__);
+ }
+
+ if (!iavf_check_asq_alive(hw)) {
+ iavf_dbg_info(sc, "ASQ is not alive, re-initializing AQ\n");
+ pci_enable_busmaster(dev);
+
+ status = iavf_shutdown_adminq(hw);
+ if (status != IAVF_SUCCESS) {
+ device_printf(dev,
+ "%s: iavf_shutdown_adminq failed: %s\n",
+ __func__, iavf_stat_str(hw, status));
+ return;
+ }
+
+ status = iavf_init_adminq(hw);
+ if (status != IAVF_SUCCESS) {
+ device_printf(dev,
+ "%s: iavf_init_adminq failed: %s\n",
+ __func__, iavf_stat_str(hw, status));
+ return;
+ }
+ }
+
+ /* Make sure queues are disabled */
+ iavf_disable_queues_with_retries(sc);
+
+ bcopy(IF_LLADDR(ifp), tmpaddr, ETHER_ADDR_LEN);
+ if (!cmp_etheraddr(hw->mac.addr, tmpaddr) &&
+ (iavf_validate_mac_addr(tmpaddr) == IAVF_SUCCESS)) {
+ error = iavf_del_mac_filter(sc, hw->mac.addr);
+ if (error == 0)
+ iavf_send_vc_msg(sc, IAVF_FLAG_AQ_DEL_MAC_FILTER);
+
+ bcopy(tmpaddr, hw->mac.addr, ETH_ALEN);
+ }
+
+ error = iavf_add_mac_filter(sc, hw->mac.addr, 0);
+ if (!error || error == EEXIST)
+ iavf_send_vc_msg(sc, IAVF_FLAG_AQ_ADD_MAC_FILTER);
+ iflib_set_mac(ctx, hw->mac.addr);
+
+ /* Prepare the queues for operation */
+ iavf_init_queues(vsi);
+
+ /* Set initial ITR values */
+ iavf_configure_itr(sc);
+
+ iavf_send_vc_msg(sc, IAVF_FLAG_AQ_CONFIGURE_QUEUES);
+
+ /* Set up RSS */
+ iavf_config_rss(sc);
+
+ /* Map vectors */
+ iavf_send_vc_msg(sc, IAVF_FLAG_AQ_MAP_VECTORS);
+
+ /* Init SW TX ring indices */
+ if (vsi->enable_head_writeback)
+ iavf_init_tx_cidx(vsi);
+ else
+ iavf_init_tx_rsqs(vsi);
+
+ /* Configure promiscuous mode */
+ iavf_config_promisc(sc, if_getflags(ifp));
+
+ /* Enable queues */
+ iavf_send_vc_msg_sleep(sc, IAVF_FLAG_AQ_ENABLE_QUEUES);
+
+ iavf_set_state(&sc->state, IAVF_STATE_RUNNING);
+}
+
+/**
+ * iavf_if_msix_intr_assign - Assign MSI-X interrupts
+ * @ctx: the iflib context pointer
+ * @msix: the number of MSI-X vectors available
+ *
+ * Called by iflib to assign MSI-X interrupt vectors to queues. Assigns and
+ * sets up vectors for each Tx and Rx queue, as well as the administrative
+ * control interrupt.
+ *
+ * @returns zero or an error code on failure
+ */
+static int
+iavf_if_msix_intr_assign(if_ctx_t ctx, int msix __unused)
+{
+ struct iavf_sc *sc = iavf_sc_from_ctx(ctx);
+ struct iavf_vsi *vsi = &sc->vsi;
+ struct iavf_rx_queue *rx_que = vsi->rx_queues;
+ struct iavf_tx_queue *tx_que = vsi->tx_queues;
+ int err, i, rid, vector = 0;
+ char buf[16];
+
+ MPASS(vsi->shared->isc_nrxqsets > 0);
+ MPASS(vsi->shared->isc_ntxqsets > 0);
+
+ /* Admin Que is vector 0*/
+ rid = vector + 1;
+ err = iflib_irq_alloc_generic(ctx, &vsi->irq, rid, IFLIB_INTR_ADMIN,
+ iavf_msix_adminq, sc, 0, "aq");
+ if (err) {
+ iflib_irq_free(ctx, &vsi->irq);
+ device_printf(iflib_get_dev(ctx),
+ "Failed to register Admin Que handler");
+ return (err);
+ }
+
+ /* Now set up the stations */
+ for (i = 0, vector = 1; i < vsi->shared->isc_nrxqsets; i++, vector++, rx_que++) {
+ rid = vector + 1;
+
+ snprintf(buf, sizeof(buf), "rxq%d", i);
+ err = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
+ IFLIB_INTR_RXTX, iavf_msix_que, rx_que, rx_que->rxr.me, buf);
+ if (err) {
+ device_printf(iflib_get_dev(ctx),
+ "Failed to allocate queue RX int vector %d, err: %d\n", i, err);
+ vsi->num_rx_queues = i + 1;
+ goto fail;
+ }
+ rx_que->msix = vector;
+ }
+
+ bzero(buf, sizeof(buf));
+
+ for (i = 0; i < vsi->shared->isc_ntxqsets; i++, tx_que++) {
+ snprintf(buf, sizeof(buf), "txq%d", i);
+ iflib_softirq_alloc_generic(ctx,
+ &vsi->rx_queues[i % vsi->shared->isc_nrxqsets].que_irq,
+ IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
+
+ tx_que->msix = (i % vsi->shared->isc_nrxqsets) + 1;
+ }
+
+ return (0);
+fail:
+ iflib_irq_free(ctx, &vsi->irq);
+ rx_que = vsi->rx_queues;
+ for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
+ iflib_irq_free(ctx, &rx_que->que_irq);
+ return (err);
+}
+
+/**
+ * iavf_if_enable_intr - Enable all interrupts for a device
+ * @ctx: the iflib context pointer
+ *
+ * Called by iflib to request enabling all interrupts.
+ */
+static void
+iavf_if_enable_intr(if_ctx_t ctx)
+{
+ struct iavf_sc *sc = iavf_sc_from_ctx(ctx);
+ struct iavf_vsi *vsi = &sc->vsi;
+
+ iavf_enable_intr(vsi);
+}
+
+/**
+ * iavf_if_disable_intr - Disable all interrupts for a device
+ * @ctx: the iflib context pointer
+ *
+ * Called by iflib to request disabling all interrupts.
+ */
+static void
+iavf_if_disable_intr(if_ctx_t ctx)
+{
+ struct iavf_sc *sc = iavf_sc_from_ctx(ctx);
+ struct iavf_vsi *vsi = &sc->vsi;
+
+ iavf_disable_intr(vsi);
+}
+
+/**
+ * iavf_if_rx_queue_intr_enable - Enable one Rx queue interrupt
+ * @ctx: the iflib context pointer
+ * @rxqid: Rx queue index
+ *
+ * Enables the interrupt associated with a specified Rx queue.
+ *
+ * @returns zero
+ */
+static int
+iavf_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
+{
+ struct iavf_sc *sc = iavf_sc_from_ctx(ctx);
+ struct iavf_vsi *vsi = &sc->vsi;
+ struct iavf_hw *hw = vsi->hw;
+ struct iavf_rx_queue *rx_que = &vsi->rx_queues[rxqid];
+
+ iavf_enable_queue_irq(hw, rx_que->msix - 1);
+ return (0);
+}
+
+/**
+ * iavf_if_tx_queue_intr_enable - Enable one Tx queue interrupt
+ * @ctx: the iflib context pointer
+ * @txqid: Tx queue index
+ *
+ * Enables the interrupt associated with a specified Tx queue.
+ *
+ * @returns zero
+ */
+static int
+iavf_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid)
+{
+ struct iavf_sc *sc = iavf_sc_from_ctx(ctx);
+ struct iavf_vsi *vsi = &sc->vsi;
+ struct iavf_hw *hw = vsi->hw;
+ struct iavf_tx_queue *tx_que = &vsi->tx_queues[txqid];
+
+ iavf_enable_queue_irq(hw, tx_que->msix - 1);
+ return (0);
+}
+
+/**
+ * iavf_if_tx_queues_alloc - Allocate Tx queue memory
+ * @ctx: the iflib context pointer
+ * @vaddrs: Array of virtual addresses
+ * @paddrs: Array of physical addresses
+ * @ntxqs: the number of Tx queues per group (should always be 1)
+ * @ntxqsets: the number of Tx queues
+ *
+ * Allocates memory for the specified number of Tx queues. This includes
+ * memory for the queue structures and the report status array for the queues.
+ * The virtual and physical addresses are saved for later use during
+ * initialization.
+ *
+ * @returns zero or a non-zero error code on failure
+ */
+static int
+iavf_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets)
+{
+ struct iavf_sc *sc = iavf_sc_from_ctx(ctx);
+ struct iavf_vsi *vsi = &sc->vsi;
+ if_softc_ctx_t scctx = vsi->shared;
+ struct iavf_tx_queue *que;
+ int i, j, error = 0;
+
+ MPASS(scctx->isc_ntxqsets > 0);
+ MPASS(ntxqs == 1);
+ MPASS(scctx->isc_ntxqsets == ntxqsets);
+
+ /* Allocate queue structure memory */
+ if (!(vsi->tx_queues =
+ (struct iavf_tx_queue *)malloc(sizeof(struct iavf_tx_queue) *ntxqsets, M_IAVF, M_NOWAIT | M_ZERO))) {
+ device_printf(iflib_get_dev(ctx), "Unable to allocate TX ring memory\n");
+ return (ENOMEM);
+ }
+
+ for (i = 0, que = vsi->tx_queues; i < ntxqsets; i++, que++) {
+ struct tx_ring *txr = &que->txr;
+
+ txr->me = i;
+ que->vsi = vsi;
+
+ if (!vsi->enable_head_writeback) {
+ /* Allocate report status array */
+ if (!(txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IAVF, M_NOWAIT))) {
+ device_printf(iflib_get_dev(ctx), "failed to allocate tx_rsq memory\n");
+ error = ENOMEM;
+ goto fail;
+ }
+ /* Init report status array */
+ for (j = 0; j < scctx->isc_ntxd[0]; j++)
+ txr->tx_rsq[j] = QIDX_INVALID;
+ }
+ /* get the virtual and physical address of the hardware queues */
+ txr->tail = IAVF_QTX_TAIL1(txr->me);
+ txr->tx_base = (struct iavf_tx_desc *)vaddrs[i * ntxqs];
+ txr->tx_paddr = paddrs[i * ntxqs];
+ txr->que = que;
+ }
+
+ return (0);
+fail:
+ iavf_if_queues_free(ctx);
+ return (error);
+}
+
+/**
+ * iavf_if_rx_queues_alloc - Allocate Rx queue memory
+ * @ctx: the iflib context pointer
+ * @vaddrs: Array of virtual addresses
+ * @paddrs: Array of physical addresses
+ * @nrxqs: number of Rx queues per group (should always be 1)
+ * @nrxqsets: the number of Rx queues to allocate
+ *
+ * Called by iflib to allocate driver memory for a number of Rx queues.
+ * Allocates memory for the drivers private Rx queue data structure, and saves
+ * the physical and virtual addresses for later use.
+ *
+ * @returns zero or a non-zero error code on failure
+ */
+static int
+iavf_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets)
+{
+ struct iavf_sc *sc = iavf_sc_from_ctx(ctx);
+ struct iavf_vsi *vsi = &sc->vsi;
+ struct iavf_rx_queue *que;
+ int i, error = 0;
+
+#ifdef INVARIANTS
+ if_softc_ctx_t scctx = vsi->shared;
+ MPASS(scctx->isc_nrxqsets > 0);
+ MPASS(nrxqs == 1);
+ MPASS(scctx->isc_nrxqsets == nrxqsets);
+#endif
+
+ /* Allocate queue structure memory */
+ if (!(vsi->rx_queues =
+ (struct iavf_rx_queue *) malloc(sizeof(struct iavf_rx_queue) *
+ nrxqsets, M_IAVF, M_NOWAIT | M_ZERO))) {
+ device_printf(iflib_get_dev(ctx), "Unable to allocate RX ring memory\n");
+ error = ENOMEM;
+ goto fail;
+ }
+
+ for (i = 0, que = vsi->rx_queues; i < nrxqsets; i++, que++) {
+ struct rx_ring *rxr = &que->rxr;
+
+ rxr->me = i;
+ que->vsi = vsi;
+
+ /* get the virtual and physical address of the hardware queues */
+ rxr->tail = IAVF_QRX_TAIL1(rxr->me);
+ rxr->rx_base = (union iavf_rx_desc *)vaddrs[i * nrxqs];
+ rxr->rx_paddr = paddrs[i * nrxqs];
+ rxr->que = que;
+ }
+
+ return (0);
+fail:
+ iavf_if_queues_free(ctx);
+ return (error);
+}
+
+/**
+ * iavf_if_queues_free - Free driver queue memory
+ * @ctx: the iflib context pointer
+ *
+ * Called by iflib to release memory allocated by the driver when setting up
+ * Tx and Rx queues.
+ *
+ * @remark The ordering of this function and iavf_if_detach is not guaranteed.
+ * It is possible for this function to be called either before or after the
+ * iavf_if_detach. Thus, care must be taken to ensure that either ordering of
+ * iavf_if_detach and iavf_if_queues_free is safe.
+ */
+static void
+iavf_if_queues_free(if_ctx_t ctx)
+{
+ struct iavf_sc *sc = iavf_sc_from_ctx(ctx);
+ struct iavf_vsi *vsi = &sc->vsi;
+
+ if (!vsi->enable_head_writeback) {
+ struct iavf_tx_queue *que;
+ int i = 0;
+
+ for (i = 0, que = vsi->tx_queues; i < vsi->shared->isc_ntxqsets; i++, que++) {
+ struct tx_ring *txr = &que->txr;
+ if (txr->tx_rsq != NULL) {
+ free(txr->tx_rsq, M_IAVF);
+ txr->tx_rsq = NULL;
+ }
+ }
+ }
+
+ if (vsi->tx_queues != NULL) {
+ free(vsi->tx_queues, M_IAVF);
+ vsi->tx_queues = NULL;
+ }
+ if (vsi->rx_queues != NULL) {
+ free(vsi->rx_queues, M_IAVF);
+ vsi->rx_queues = NULL;
+ }
+}
+
+/**
+ * iavf_check_aq_errors - Check for AdminQ errors
+ * @sc: device softc
+ *
+ * Check the AdminQ registers for errors, and determine whether or not a reset
+ * may be required to resolve them.
+ *
+ * @post if there are errors, the VF device will be stopped and a reset will
+ * be requested.
+ *
+ * @returns zero if there are no issues, EBUSY if the device is resetting,
+ * or EIO if there are any AQ errors.
+ */
+static int
+iavf_check_aq_errors(struct iavf_sc *sc)
+{
+ struct iavf_hw *hw = &sc->hw;
+ device_t dev = sc->dev;
+ u32 reg, oldreg;
+ u8 aq_error = false;
+
+ oldreg = reg = rd32(hw, hw->aq.arq.len);
+
+ /* Check if device is in reset */
+ if (reg == 0xdeadbeef || reg == 0xffffffff) {
+ device_printf(dev, "VF in reset\n");
+ return (EBUSY);
+ }
+
+ /* Check for Admin queue errors */
+ if (reg & IAVF_VF_ARQLEN1_ARQVFE_MASK) {
+ device_printf(dev, "ARQ VF Error detected\n");
+ reg &= ~IAVF_VF_ARQLEN1_ARQVFE_MASK;
+ aq_error = true;
+ }
+ if (reg & IAVF_VF_ARQLEN1_ARQOVFL_MASK) {
+ device_printf(dev, "ARQ Overflow Error detected\n");
+ reg &= ~IAVF_VF_ARQLEN1_ARQOVFL_MASK;
+ aq_error = true;
+ }
+ if (reg & IAVF_VF_ARQLEN1_ARQCRIT_MASK) {
+ device_printf(dev, "ARQ Critical Error detected\n");
+ reg &= ~IAVF_VF_ARQLEN1_ARQCRIT_MASK;
+ aq_error = true;
+ }
+ if (oldreg != reg)
+ wr32(hw, hw->aq.arq.len, reg);
+
+ oldreg = reg = rd32(hw, hw->aq.asq.len);
+ if (reg & IAVF_VF_ATQLEN1_ATQVFE_MASK) {
+ device_printf(dev, "ASQ VF Error detected\n");
+ reg &= ~IAVF_VF_ATQLEN1_ATQVFE_MASK;
+ aq_error = true;
+ }
+ if (reg & IAVF_VF_ATQLEN1_ATQOVFL_MASK) {
+ device_printf(dev, "ASQ Overflow Error detected\n");
+ reg &= ~IAVF_VF_ATQLEN1_ATQOVFL_MASK;
+ aq_error = true;
+ }
+ if (reg & IAVF_VF_ATQLEN1_ATQCRIT_MASK) {
+ device_printf(dev, "ASQ Critical Error detected\n");
+ reg &= ~IAVF_VF_ATQLEN1_ATQCRIT_MASK;
+ aq_error = true;
+ }
+ if (oldreg != reg)
+ wr32(hw, hw->aq.asq.len, reg);
+
+ return (aq_error ? EIO : 0);
+}
+
+/**
+ * iavf_process_adminq - Process adminq responses from the PF
+ * @sc: device softc
+ * @pending: output parameter indicating how many messages remain
+ *
+ * Process the adminq to handle replies from the PF over the virtchnl
+ * connection.
+ *
+ * @returns zero or an iavf_status code on failure
+ */
+static enum iavf_status
+iavf_process_adminq(struct iavf_sc *sc, u16 *pending)
+{
+ enum iavf_status status = IAVF_SUCCESS;
+ struct iavf_arq_event_info event;
+ struct iavf_hw *hw = &sc->hw;
+ struct virtchnl_msg *v_msg;
+ int error = 0, loop = 0;
+ u32 reg;
+
+ if (iavf_test_state(&sc->state, IAVF_STATE_RESET_PENDING)) {
+ status = IAVF_ERR_ADMIN_QUEUE_ERROR;
+ goto reenable_interrupt;
+ }
+
+ error = iavf_check_aq_errors(sc);
+ if (error) {
+ status = IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR;
+ goto reenable_interrupt;
+ }
+
+ event.buf_len = IAVF_AQ_BUF_SZ;
+ event.msg_buf = sc->aq_buffer;
+ bzero(event.msg_buf, IAVF_AQ_BUF_SZ);
+ v_msg = (struct virtchnl_msg *)&event.desc;
+
+ IAVF_VC_LOCK(sc);
+ /* clean and process any events */
+ do {
+ status = iavf_clean_arq_element(hw, &event, pending);
+ /*
+ * Also covers normal case when iavf_clean_arq_element()
+ * returns "IAVF_ERR_ADMIN_QUEUE_NO_WORK"
+ */
+ if (status)
+ break;
+ iavf_vc_completion(sc, v_msg->v_opcode,
+ v_msg->v_retval, event.msg_buf, event.msg_len);
+ bzero(event.msg_buf, IAVF_AQ_BUF_SZ);
+ } while (*pending && (loop++ < IAVF_ADM_LIMIT));
+ IAVF_VC_UNLOCK(sc);
+
+reenable_interrupt:
+ /* Re-enable admin queue interrupt cause */
+ reg = rd32(hw, IAVF_VFINT_ICR0_ENA1);
+ reg |= IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK;
+ wr32(hw, IAVF_VFINT_ICR0_ENA1, reg);
+
+ return (status);
+}
+
+/**
+ * iavf_if_update_admin_status - Administrative status task
+ * @ctx: iflib context
+ *
+ * Called by iflib to handle administrative status events. The iavf driver
+ * uses this to process the adminq virtchnl messages outside of interrupt
+ * context.
+ */
+static void
+iavf_if_update_admin_status(if_ctx_t ctx)
+{
+ struct iavf_sc *sc = iavf_sc_from_ctx(ctx);
+ struct iavf_hw *hw = &sc->hw;
+ u16 pending = 0;
+
+ iavf_process_adminq(sc, &pending);
+ iavf_update_link_status(sc);
+
+ /*
+ * If there are still messages to process, reschedule.
+ * Otherwise, re-enable the Admin Queue interrupt.
+ */
+ if (pending > 0)
+ iflib_admin_intr_deferred(ctx);
+ else
+ iavf_enable_adminq_irq(hw);
+}
+
+/**
+ * iavf_if_multi_set - Set multicast address filters
+ * @ctx: iflib context
+ *
+ * Called by iflib to update the current list of multicast filters for the
+ * device.
+ */
+static void
+iavf_if_multi_set(if_ctx_t ctx)
+{
+ struct iavf_sc *sc = iavf_sc_from_ctx(ctx);
+
+ iavf_multi_set(sc);
+}
+
+/**
+ * iavf_if_mtu_set - Set the device MTU
+ * @ctx: iflib context
+ * @mtu: MTU value to set
+ *
+ * Called by iflib to set the device MTU.
+ *
+ * @returns zero on success, or EINVAL if the MTU is invalid.
+ */
+static int
+iavf_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
+{
+ struct iavf_sc *sc = iavf_sc_from_ctx(ctx);
+ struct iavf_vsi *vsi = &sc->vsi;
+
+ IOCTL_DEBUGOUT("ioctl: SiOCSIFMTU (Set Interface MTU)");
+ if (mtu < IAVF_MIN_MTU || mtu > IAVF_MAX_MTU) {
+ device_printf(sc->dev, "mtu %d is not in valid range [%d-%d]\n",
+ mtu, IAVF_MIN_MTU, IAVF_MAX_MTU);
+ return (EINVAL);
+ }
+
+ vsi->shared->isc_max_frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN +
+ ETHER_VLAN_ENCAP_LEN;
+
+ return (0);
+}
+
+/**
+ * iavf_if_media_status - Report current media status
+ * @ctx: iflib context
+ * @ifmr: ifmedia request structure
+ *
+ * Called by iflib to report the current media status in the ifmr.
+ */
+static void
+iavf_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr)
+{
+ struct iavf_sc *sc = iavf_sc_from_ctx(ctx);
+
+ iavf_media_status_common(sc, ifmr);
+}
+
+/**
+ * iavf_if_media_change - Change the current media settings
+ * @ctx: iflib context
+ *
+ * Called by iflib to change the current media settings.
+ *
+ * @returns zero on success, or an error code on failure.
+ */
+static int
+iavf_if_media_change(if_ctx_t ctx)
+{
+ return iavf_media_change_common(iflib_get_ifp(ctx));
+}
+
+/**
+ * iavf_if_promisc_set - Set device promiscuous mode
+ * @ctx: iflib context
+ * @flags: promiscuous configuration
+ *
+ * Called by iflib to request that the device enter promiscuous mode.
+ *
+ * @returns zero on success, or an error code on failure.
+ */
+static int
+iavf_if_promisc_set(if_ctx_t ctx, int flags)
+{
+ struct iavf_sc *sc = iavf_sc_from_ctx(ctx);
+
+ return iavf_config_promisc(sc, flags);
+}
+
+/**
+ * iavf_if_timer - Periodic timer called by iflib
+ * @ctx: iflib context
+ * @qid: The queue being triggered
+ *
+ * Called by iflib periodically as a timer task, so that the driver can handle
+ * periodic work.
+ *
+ * @remark this timer is only called while the interface is up, even if
+ * IFLIB_ADMIN_ALWAYS_RUN is set.
+ */
+static void
+iavf_if_timer(if_ctx_t ctx, uint16_t qid)
+{
+ struct iavf_sc *sc = iavf_sc_from_ctx(ctx);
+ struct iavf_hw *hw = &sc->hw;
+ u32 val;
+
+ if (qid != 0)
+ return;
+
+ /* Check for when PF triggers a VF reset */
+ val = rd32(hw, IAVF_VFGEN_RSTAT) &
+ IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
+ if (val != VIRTCHNL_VFR_VFACTIVE
+ && val != VIRTCHNL_VFR_COMPLETED) {
+ iavf_dbg_info(sc, "reset in progress! (%d)\n", val);
+ return;
+ }
+
+ /* Fire off the adminq task */
+ iflib_admin_intr_deferred(ctx);
+
+ /* Update stats */
+ iavf_request_stats(sc);
+}
+
+/**
+ * iavf_if_vlan_register - Register a VLAN
+ * @ctx: iflib context
+ * @vtag: the VLAN to register
+ *
+ * Register a VLAN filter for a given vtag.
+ */
+static void
+iavf_if_vlan_register(if_ctx_t ctx, u16 vtag)
+{
+ struct iavf_sc *sc = iavf_sc_from_ctx(ctx);
+ struct iavf_vsi *vsi = &sc->vsi;
+
+ if ((vtag == 0) || (vtag > 4095)) /* Invalid */
+ return;
+
+ /* Add VLAN 0 to list, for untagged traffic */
+ if (vsi->num_vlans == 0)
+ iavf_add_vlan_filter(sc, 0);
+
+ iavf_add_vlan_filter(sc, vtag);
+
+ ++vsi->num_vlans;
+
+ iavf_send_vc_msg(sc, IAVF_FLAG_AQ_ADD_VLAN_FILTER);
+}
+
+/**
+ * iavf_if_vlan_unregister - Unregister a VLAN
+ * @ctx: iflib context
+ * @vtag: the VLAN to remove
+ *
+ * Unregister (remove) a VLAN filter for the given vtag.
+ */
+static void
+iavf_if_vlan_unregister(if_ctx_t ctx, u16 vtag)
+{
+ struct iavf_sc *sc = iavf_sc_from_ctx(ctx);
+ struct iavf_vsi *vsi = &sc->vsi;
+ int i = 0;
+
+ if ((vtag == 0) || (vtag > 4095) || (vsi->num_vlans == 0)) /* Invalid */
+ return;
+
+ i = iavf_mark_del_vlan_filter(sc, vtag);
+ vsi->num_vlans -= i;
+
+ /* Remove VLAN filter 0 if the last VLAN is being removed */
+ if (vsi->num_vlans == 0)
+ i += iavf_mark_del_vlan_filter(sc, 0);
+
+ if (i > 0)
+ iavf_send_vc_msg(sc, IAVF_FLAG_AQ_DEL_VLAN_FILTER);
+}
+
+/**
+ * iavf_if_get_counter - Get network statistic counters
+ * @ctx: iflib context
+ * @cnt: The counter to obtain
+ *
+ * Called by iflib to obtain the value of the specified counter.
+ *
+ * @returns the uint64_t counter value.
+ */
+static uint64_t
+iavf_if_get_counter(if_ctx_t ctx, ift_counter cnt)
+{
+ struct iavf_sc *sc = iavf_sc_from_ctx(ctx);
+ struct iavf_vsi *vsi = &sc->vsi;
+ if_t ifp = iflib_get_ifp(ctx);
+
+ switch (cnt) {
+ case IFCOUNTER_IPACKETS:
+ return (vsi->ipackets);
+ case IFCOUNTER_IERRORS:
+ return (vsi->ierrors);
+ case IFCOUNTER_OPACKETS:
+ return (vsi->opackets);
+ case IFCOUNTER_OERRORS:
+ return (vsi->oerrors);
+ case IFCOUNTER_COLLISIONS:
+ /* Collisions are by standard impossible in 40G/10G Ethernet */
+ return (0);
+ case IFCOUNTER_IBYTES:
+ return (vsi->ibytes);
+ case IFCOUNTER_OBYTES:
+ return (vsi->obytes);
+ case IFCOUNTER_IMCASTS:
+ return (vsi->imcasts);
+ case IFCOUNTER_OMCASTS:
+ return (vsi->omcasts);
+ case IFCOUNTER_IQDROPS:
+ return (vsi->iqdrops);
+ case IFCOUNTER_OQDROPS:
+ return (vsi->oqdrops);
+ case IFCOUNTER_NOPROTO:
+ return (vsi->noproto);
+ default:
+ return (if_get_counter_default(ifp, cnt));
+ }
+}
+
+/**
+ * iavf_free_pci_resources - Free PCI resources
+ * @sc: device softc
+ *
+ * Called to release the PCI resources allocated during attach. May be called
+ * in the error flow of attach_pre, or during detach as part of cleanup.
+ */
+static void
+iavf_free_pci_resources(struct iavf_sc *sc)
+{
+ struct iavf_vsi *vsi = &sc->vsi;
+ struct iavf_rx_queue *rx_que = vsi->rx_queues;
+ device_t dev = sc->dev;
+
+ /* We may get here before stations are set up */
+ if (rx_que == NULL)
+ goto early;
+
+ /* Release all interrupts */
+ iflib_irq_free(vsi->ctx, &vsi->irq);
+
+ for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
+ iflib_irq_free(vsi->ctx, &rx_que->que_irq);
+
+early:
+ if (sc->pci_mem != NULL)
+ bus_release_resource(dev, SYS_RES_MEMORY,
+ rman_get_rid(sc->pci_mem), sc->pci_mem);
+}
+
+/**
+ * iavf_setup_interface - Setup the device interface
+ * @sc: device softc
+ *
+ * Called to setup some device interface settings, such as the ifmedia
+ * structure.
+ */
+static void
+iavf_setup_interface(struct iavf_sc *sc)
+{
+ struct iavf_vsi *vsi = &sc->vsi;
+ if_ctx_t ctx = vsi->ctx;
+ struct ifnet *ifp = iflib_get_ifp(ctx);
+
+ iavf_dbg_init(sc, "begin\n");
+
+ vsi->shared->isc_max_frame_size =
+ ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
+ + ETHER_VLAN_ENCAP_LEN;
+
+ iavf_set_initial_baudrate(ifp);
+
+ ifmedia_add(sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
+ ifmedia_set(sc->media, IFM_ETHER | IFM_AUTO);
+}
+
+/**
+ * iavf_msix_adminq - Admin Queue interrupt handler
+ * @arg: void pointer to the device softc
+ *
+ * Interrupt handler for the non-queue interrupt causes. Primarily this will
+ * be the adminq interrupt, but also includes other miscellaneous causes.
+ *
+ * @returns FILTER_SCHEDULE_THREAD if the admin task needs to be run, otherwise
+ * returns FITLER_HANDLED.
+ */
+static int
+iavf_msix_adminq(void *arg)
+{
+ struct iavf_sc *sc = (struct iavf_sc *)arg;
+ struct iavf_hw *hw = &sc->hw;
+ u32 reg, mask;
+
+ ++sc->admin_irq;
+
+ if (!iavf_test_state(&sc->state, IAVF_STATE_INITIALIZED))
+ return (FILTER_HANDLED);
+
+ reg = rd32(hw, IAVF_VFINT_ICR01);
+ /*
+ * For masking off interrupt causes that need to be handled before
+ * they can be re-enabled
+ */
+ mask = rd32(hw, IAVF_VFINT_ICR0_ENA1);
+
+ /* Check on the cause */
+ if (reg & IAVF_VFINT_ICR01_ADMINQ_MASK) {
+ mask &= ~IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK;
+
+ /* Process messages outside of the iflib context lock */
+ taskqueue_enqueue(sc->vc_tq, &sc->vc_task);
+ }
+
+ wr32(hw, IAVF_VFINT_ICR0_ENA1, mask);
+ iavf_enable_adminq_irq(hw);
+
+ return (FILTER_HANDLED);
+}
+
+/**
+ * iavf_enable_intr - Enable device interrupts
+ * @vsi: the main VSI
+ *
+ * Called to enable all queue interrupts.
+ */
+void
+iavf_enable_intr(struct iavf_vsi *vsi)
+{
+ struct iavf_hw *hw = vsi->hw;
+ struct iavf_rx_queue *que = vsi->rx_queues;
+
+ iavf_enable_adminq_irq(hw);
+ for (int i = 0; i < vsi->num_rx_queues; i++, que++)
+ iavf_enable_queue_irq(hw, que->rxr.me);
+}
+
+/**
+ * iavf_disable_intr - Disable device interrupts
+ * @vsi: the main VSI
+ *
+ * Called to disable all interrupts
+ *
+ * @remark we never disable the admin status interrupt.
+ */
+void
+iavf_disable_intr(struct iavf_vsi *vsi)
+{
+ struct iavf_hw *hw = vsi->hw;
+ struct iavf_rx_queue *que = vsi->rx_queues;
+
+ for (int i = 0; i < vsi->num_rx_queues; i++, que++)
+ iavf_disable_queue_irq(hw, que->rxr.me);
+}
+
+/**
+ * iavf_enable_queue_irq - Enable IRQ register for a queue interrupt
+ * @hw: hardware structure
+ * @id: IRQ vector to enable
+ *
+ * Writes the IAVF_VFINT_DYN_CTLN1 register to enable a given IRQ interrupt.
+ */
+static void
+iavf_enable_queue_irq(struct iavf_hw *hw, int id)
+{
+ u32 reg;
+
+ reg = IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
+ IAVF_VFINT_DYN_CTLN1_CLEARPBA_MASK |
+ IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK;
+ wr32(hw, IAVF_VFINT_DYN_CTLN1(id), reg);
+}
+
+/**
+ * iavf_disable_queue_irq - Disable IRQ register for a queue interrupt
+ * @hw: hardware structure
+ * @id: IRQ vector to disable
+ *
+ * Writes the IAVF_VFINT_DYN_CTLN1 register to disable a given IRQ interrupt.
+ */
+static void
+iavf_disable_queue_irq(struct iavf_hw *hw, int id)
+{
+ wr32(hw, IAVF_VFINT_DYN_CTLN1(id),
+ IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK);
+ rd32(hw, IAVF_VFGEN_RSTAT);
+}
+
+/**
+ * iavf_configure_itr - Get initial ITR values from tunable values.
+ * @sc: device softc
+ *
+ * Load the initial tunable values for the ITR configuration.
+ */
+static void
+iavf_configure_itr(struct iavf_sc *sc)
+{
+ iavf_configure_tx_itr(sc);
+ iavf_configure_rx_itr(sc);
+}
+
+/**
+ * iavf_set_queue_rx_itr - Update Rx ITR value
+ * @que: Rx queue to update
+ *
+ * Provide a update to the queue RX interrupt moderation value.
+ */
+static void
+iavf_set_queue_rx_itr(struct iavf_rx_queue *que)
+{
+ struct iavf_vsi *vsi = que->vsi;
+ struct iavf_hw *hw = vsi->hw;
+ struct rx_ring *rxr = &que->rxr;
+
+ /* Idle, do nothing */
+ if (rxr->bytes == 0)
+ return;
+
+ /* Update the hardware if needed */
+ if (rxr->itr != vsi->rx_itr_setting) {
+ rxr->itr = vsi->rx_itr_setting;
+ wr32(hw, IAVF_VFINT_ITRN1(IAVF_RX_ITR,
+ que->rxr.me), rxr->itr);
+ }
+}
+
+/**
+ * iavf_msix_que - Main Rx queue interrupt handler
+ * @arg: void pointer to the Rx queue
+ *
+ * Main MSI-X interrupt handler for Rx queue interrupts
+ *
+ * @returns FILTER_SCHEDULE_THREAD if the main thread for Rx needs to run,
+ * otherwise returns FILTER_HANDLED.
+ */
+static int
+iavf_msix_que(void *arg)
+{
+ struct iavf_rx_queue *rx_que = (struct iavf_rx_queue *)arg;
+ struct iavf_sc *sc = rx_que->vsi->back;
+
+ ++rx_que->irqs;
+
+ if (!iavf_test_state(&sc->state, IAVF_STATE_RUNNING))
+ return (FILTER_HANDLED);
+
+ iavf_set_queue_rx_itr(rx_que);
+
+ return (FILTER_SCHEDULE_THREAD);
+}
+
+/**
+ * iavf_update_link_status - Update iflib Link status
+ * @sc: device softc
+ *
+ * Notify the iflib stack of changes in link status. Called after the device
+ * receives a virtchnl message indicating a change in link status.
+ */
+void
+iavf_update_link_status(struct iavf_sc *sc)
+{
+ struct iavf_vsi *vsi = &sc->vsi;
+ u64 baudrate;
+
+ if (sc->link_up){
+ if (vsi->link_active == FALSE) {
+ vsi->link_active = TRUE;
+ baudrate = iavf_baudrate_from_link_speed(sc);
+ iavf_dbg_info(sc, "baudrate: %llu\n", (unsigned long long)baudrate);
+ iflib_link_state_change(vsi->ctx, LINK_STATE_UP, baudrate);
+ }
+ } else { /* Link down */
+ if (vsi->link_active == TRUE) {
+ vsi->link_active = FALSE;
+ iflib_link_state_change(vsi->ctx, LINK_STATE_DOWN, 0);
+ }
+ }
+}
+
+/**
+ * iavf_stop - Stop the interface
+ * @sc: device softc
+ *
+ * This routine disables all traffic on the adapter by disabling interrupts
+ * and sending a message to the PF to tell it to stop the hardware
+ * Tx/Rx LAN queues.
+ */
+static void
+iavf_stop(struct iavf_sc *sc)
+{
+ struct ifnet *ifp;
+
+ ifp = sc->vsi.ifp;
+
+ iavf_clear_state(&sc->state, IAVF_STATE_RUNNING);
+
+ iavf_disable_intr(&sc->vsi);
+
+ iavf_disable_queues_with_retries(sc);
+}
+
+/**
+ * iavf_if_stop - iflib stop handler
+ * @ctx: iflib context
+ *
+ * Call iavf_stop to stop the interface.
+ */
+static void
+iavf_if_stop(if_ctx_t ctx)
+{
+ struct iavf_sc *sc = iavf_sc_from_ctx(ctx);
+
+ iavf_stop(sc);
+}
+
+/**
+ * iavf_del_mac_filter - Delete a MAC filter
+ * @sc: device softc
+ * @macaddr: MAC address to remove
+ *
+ * Marks a MAC filter for deletion.
+ *
+ * @returns zero if the filter existed, or ENOENT if it did not.
+ */
+static int
+iavf_del_mac_filter(struct iavf_sc *sc, u8 *macaddr)
+{
+ struct iavf_mac_filter *f;
+
+ f = iavf_find_mac_filter(sc, macaddr);
+ if (f == NULL)
+ return (ENOENT);
+
+ f->flags |= IAVF_FILTER_DEL;
+ return (0);
+}
+
+/**
+ * iavf_init_tx_rsqs - Initialize Report Status array
+ * @vsi: the main VSI
+ *
+ * Set the Report Status queue fields to zero in order to initialize the
+ * queues for transmit.
+ */
+void
+iavf_init_tx_rsqs(struct iavf_vsi *vsi)
+{
+ if_softc_ctx_t scctx = vsi->shared;
+ struct iavf_tx_queue *tx_que;
+ int i, j;
+
+ for (i = 0, tx_que = vsi->tx_queues; i < vsi->num_tx_queues; i++, tx_que++) {
+ struct tx_ring *txr = &tx_que->txr;
+
+ txr->tx_rs_cidx = txr->tx_rs_pidx;
+
+ /* Initialize the last processed descriptor to be the end of
+ * the ring, rather than the start, so that we avoid an
+ * off-by-one error when calculating how many descriptors are
+ * done in the credits_update function.
+ */
+ txr->tx_cidx_processed = scctx->isc_ntxd[0] - 1;
+
+ for (j = 0; j < scctx->isc_ntxd[0]; j++)
+ txr->tx_rsq[j] = QIDX_INVALID;
+ }
+}
+
+/**
+ * iavf_init_tx_cidx - Initialize Tx cidx values
+ * @vsi: the main VSI
+ *
+ * Initialize the tx_cidx_processed values for Tx queues in order to
+ * initialize the Tx queues for transmit.
+ */
+void
+iavf_init_tx_cidx(struct iavf_vsi *vsi)
+{
+ if_softc_ctx_t scctx = vsi->shared;
+ struct iavf_tx_queue *tx_que;
+ int i;
+
+ for (i = 0, tx_que = vsi->tx_queues; i < vsi->num_tx_queues; i++, tx_que++) {
+ struct tx_ring *txr = &tx_que->txr;
+
+ txr->tx_cidx_processed = scctx->isc_ntxd[0] - 1;
+ }
+}
+
+/**
+ * iavf_add_device_sysctls - Add device sysctls for configuration
+ * @sc: device softc
+ *
+ * Add the main sysctl nodes and sysctls for device configuration.
+ */
+static void
+iavf_add_device_sysctls(struct iavf_sc *sc)
+{
+ struct iavf_vsi *vsi = &sc->vsi;
+ device_t dev = sc->dev;
+ struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
+ struct sysctl_oid_list *debug_list;
+
+ iavf_add_device_sysctls_common(sc);
+
+ debug_list = iavf_create_debug_sysctl_tree(sc);
+
+ iavf_add_debug_sysctls_common(sc, debug_list);
+
+ SYSCTL_ADD_PROC(ctx, debug_list,
+ OID_AUTO, "queue_interrupt_table", CTLTYPE_STRING | CTLFLAG_RD,
+ sc, 0, iavf_sysctl_queue_interrupt_table, "A", "View MSI-X indices for TX/RX queues");
+
+#ifdef IAVF_DEBUG
+ SYSCTL_ADD_PROC(ctx, debug_list,
+ OID_AUTO, "do_vf_reset", CTLTYPE_INT | CTLFLAG_WR,
+ sc, 0, iavf_sysctl_vf_reset, "A", "Request a VF reset from PF");
+
+ SYSCTL_ADD_PROC(ctx, debug_list,
+ OID_AUTO, "do_vflr_reset", CTLTYPE_INT | CTLFLAG_WR,
+ sc, 0, iavf_sysctl_vflr_reset, "A", "Request a VFLR reset from HW");
+#endif
+
+ /* Add stats sysctls */
+ iavf_add_vsi_sysctls(dev, vsi, ctx, "vsi");
+
+ iavf_add_queues_sysctls(dev, vsi);
+}
+
+/**
+ * iavf_add_queues_sysctls - Add per-queue sysctls
+ * @dev: device pointer
+ * @vsi: the main VSI
+ *
+ * Add sysctls for each Tx and Rx queue.
+ */
+void
+iavf_add_queues_sysctls(device_t dev, struct iavf_vsi *vsi)
+{
+ struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
+ struct sysctl_oid_list *vsi_list, *queue_list;
+ struct sysctl_oid *queue_node;
+ char queue_namebuf[32];
+
+ struct iavf_rx_queue *rx_que;
+ struct iavf_tx_queue *tx_que;
+ struct tx_ring *txr;
+ struct rx_ring *rxr;
+
+ vsi_list = SYSCTL_CHILDREN(vsi->vsi_node);
+
+ /* Queue statistics */
+ for (int q = 0; q < vsi->num_rx_queues; q++) {
+ bzero(queue_namebuf, sizeof(queue_namebuf));
+ snprintf(queue_namebuf, IAVF_QUEUE_NAME_LEN, "rxq%02d", q);
+ queue_node = SYSCTL_ADD_NODE(ctx, vsi_list,
+ OID_AUTO, queue_namebuf, CTLFLAG_RD, NULL, "RX Queue #");
+ queue_list = SYSCTL_CHILDREN(queue_node);
+
+ rx_que = &(vsi->rx_queues[q]);
+ rxr = &(rx_que->rxr);
+
+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
+ CTLFLAG_RD, &(rx_que->irqs),
+ "irqs on this queue (both Tx and Rx)");
+
+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "packets",
+ CTLFLAG_RD, &(rxr->rx_packets),
+ "Queue Packets Received");
+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "bytes",
+ CTLFLAG_RD, &(rxr->rx_bytes),
+ "Queue Bytes Received");
+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "desc_err",
+ CTLFLAG_RD, &(rxr->desc_errs),
+ "Queue Rx Descriptor Errors");
+ SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "itr",
+ CTLFLAG_RD, &(rxr->itr), 0,
+ "Queue Rx ITR Interval");
+ }
+ for (int q = 0; q < vsi->num_tx_queues; q++) {
+ bzero(queue_namebuf, sizeof(queue_namebuf));
+ snprintf(queue_namebuf, IAVF_QUEUE_NAME_LEN, "txq%02d", q);
+ queue_node = SYSCTL_ADD_NODE(ctx, vsi_list,
+ OID_AUTO, queue_namebuf, CTLFLAG_RD, NULL, "TX Queue #");
+ queue_list = SYSCTL_CHILDREN(queue_node);
+
+ tx_que = &(vsi->tx_queues[q]);
+ txr = &(tx_que->txr);
+
+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso",
+ CTLFLAG_RD, &(tx_que->tso),
+ "TSO");
+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mss_too_small",
+ CTLFLAG_RD, &(txr->mss_too_small),
+ "TSO sends with an MSS less than 64");
+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "packets",
+ CTLFLAG_RD, &(txr->tx_packets),
+ "Queue Packets Transmitted");
+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "bytes",
+ CTLFLAG_RD, &(txr->tx_bytes),
+ "Queue Bytes Transmitted");
+ SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "itr",
+ CTLFLAG_RD, &(txr->itr), 0,
+ "Queue Tx ITR Interval");
+ }
+}
+
+/**
+ * iavf_driver_is_detaching - Check if the driver is detaching/unloading
+ * @sc: device private softc
+ *
+ * @returns true if the driver is detaching, false otherwise.
+ *
+ * @remark on newer kernels, take advantage of iflib_in_detach in order to
+ * report detachment correctly as early as possible.
+ *
+ * @remark this function is used by various code paths that want to avoid
+ * running if the driver is about to be removed. This includes sysctls and
+ * other driver access points. Note that it does not fully resolve
+ * detach-based race conditions as it is possible for a thread to race with
+ * iflib_in_detach.
+ */
+bool
+iavf_driver_is_detaching(struct iavf_sc *sc)
+{
+ return (!iavf_test_state(&sc->state, IAVF_STATE_INITIALIZED) ||
+ iflib_in_detach(sc->vsi.ctx));
+}
+
+/**
+ * iavf_sysctl_queue_interrupt_table - Sysctl for displaying Tx queue mapping
+ * @oidp: sysctl oid structure
+ * @arg1: void pointer to device softc
+ * @arg2: unused
+ * @req: sysctl request pointer
+ *
+ * Print out mapping of TX queue indexes and Rx queue indexes to MSI-X vectors.
+ *
+ * @returns zero on success, or an error code on failure.
+ */
+static int
+iavf_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS)
+{
+ struct iavf_sc *sc = (struct iavf_sc *)arg1;
+ struct iavf_vsi *vsi = &sc->vsi;
+ device_t dev = sc->dev;
+ struct sbuf *buf;
+ int error = 0;
+
+ struct iavf_rx_queue *rx_que;
+ struct iavf_tx_queue *tx_que;
+
+ UNREFERENCED_2PARAMETER(arg2, oidp);
+
+ if (iavf_driver_is_detaching(sc))
+ return (ESHUTDOWN);
+
+ buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
+ if (!buf) {
+ device_printf(dev, "Could not allocate sbuf for output.\n");
+ return (ENOMEM);
+ }
+
+ sbuf_cat(buf, "\n");
+ for (int i = 0; i < vsi->num_rx_queues; i++) {
+ rx_que = &vsi->rx_queues[i];
+ sbuf_printf(buf, "(rxq %3d): %d\n", i, rx_que->msix);
+ }
+ for (int i = 0; i < vsi->num_tx_queues; i++) {
+ tx_que = &vsi->tx_queues[i];
+ sbuf_printf(buf, "(txq %3d): %d\n", i, tx_que->msix);
+ }
+
+ error = sbuf_finish(buf);
+ if (error)
+ device_printf(dev, "Error finishing sbuf: %d\n", error);
+ sbuf_delete(buf);
+
+ return (error);
+}
+
+#ifdef IAVF_DEBUG
+#define CTX_ACTIVE(ctx) ((if_getdrvflags(iflib_get_ifp(ctx)) & IFF_DRV_RUNNING))
+
+/**
+ * iavf_sysctl_vf_reset - Request a VF reset
+ * @oidp: sysctl oid pointer
+ * @arg1: void pointer to device softc
+ * @arg2: unused
+ * @req: sysctl request pointer
+ *
+ * Request a VF reset for the device.
+ *
+ * @returns zero on success, or an error code on failure.
+ */
+static int
+iavf_sysctl_vf_reset(SYSCTL_HANDLER_ARGS)
+{
+ struct iavf_sc *sc = (struct iavf_sc *)arg1;
+ int do_reset = 0, error = 0;
+
+ UNREFERENCED_PARAMETER(arg2);
+
+ if (iavf_driver_is_detaching(sc))
+ return (ESHUTDOWN);
+
+ error = sysctl_handle_int(oidp, &do_reset, 0, req);
+ if ((error) || (req->newptr == NULL))
+ return (error);
+
+ if (do_reset == 1) {
+ iavf_reset(sc);
+ if (CTX_ACTIVE(sc->vsi.ctx))
+ iflib_request_reset(sc->vsi.ctx);
+ }
+
+ return (error);
+}
+
+/**
+ * iavf_sysctl_vflr_reset - Trigger a PCIe FLR for the device
+ * @oidp: sysctl oid pointer
+ * @arg1: void pointer to device softc
+ * @arg2: unused
+ * @req: sysctl request pointer
+ *
+ * Sysctl callback to trigger a PCIe FLR.
+ *
+ * @returns zero on success, or an error code on failure.
+ */
+static int
+iavf_sysctl_vflr_reset(SYSCTL_HANDLER_ARGS)
+{
+ struct iavf_sc *sc = (struct iavf_sc *)arg1;
+ device_t dev = sc->dev;
+ int do_reset = 0, error = 0;
+
+ UNREFERENCED_PARAMETER(arg2);
+
+ if (iavf_driver_is_detaching(sc))
+ return (ESHUTDOWN);
+
+ error = sysctl_handle_int(oidp, &do_reset, 0, req);
+ if ((error) || (req->newptr == NULL))
+ return (error);
+
+ if (do_reset == 1) {
+ if (!pcie_flr(dev, max(pcie_get_max_completion_timeout(dev) / 1000, 10), true)) {
+ device_printf(dev, "PCIE FLR failed\n");
+ error = EIO;
+ }
+ else if (CTX_ACTIVE(sc->vsi.ctx))
+ iflib_request_reset(sc->vsi.ctx);
+ }
+
+ return (error);
+}
+#undef CTX_ACTIVE
+#endif
diff --git a/sys/dev/iavf/virtchnl.h b/sys/dev/iavf/virtchnl.h
new file mode 100644
--- /dev/null
+++ b/sys/dev/iavf/virtchnl.h
@@ -0,0 +1,991 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2021, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+#ifndef _VIRTCHNL_H_
+#define _VIRTCHNL_H_
+
+/* Description:
+ * This header file describes the VF-PF communication protocol used
+ * by the drivers for all devices starting from our 40G product line
+ *
+ * Admin queue buffer usage:
+ * desc->opcode is always aqc_opc_send_msg_to_pf
+ * flags, retval, datalen, and data addr are all used normally.
+ * The Firmware copies the cookie fields when sending messages between the
+ * PF and VF, but uses all other fields internally. Due to this limitation,
+ * we must send all messages as "indirect", i.e. using an external buffer.
+ *
+ * All the VSI indexes are relative to the VF. Each VF can have maximum of
+ * three VSIs. All the queue indexes are relative to the VSI. Each VF can
+ * have a maximum of sixteen queues for all of its VSIs.
+ *
+ * The PF is required to return a status code in v_retval for all messages
+ * except RESET_VF, which does not require any response. The return value
+ * is of status_code type, defined in the shared type.h.
+ *
+ * In general, VF driver initialization should roughly follow the order of
+ * these opcodes. The VF driver must first validate the API version of the
+ * PF driver, then request a reset, then get resources, then configure
+ * queues and interrupts. After these operations are complete, the VF
+ * driver may start its queues, optionally add MAC and VLAN filters, and
+ * process traffic.
+ */
+
+/* START GENERIC DEFINES
+ * Need to ensure the following enums and defines hold the same meaning and
+ * value in current and future projects
+ */
+
+/* Error Codes */
+enum virtchnl_status_code {
+ VIRTCHNL_STATUS_SUCCESS = 0,
+ VIRTCHNL_STATUS_ERR_PARAM = -5,
+ VIRTCHNL_STATUS_ERR_NO_MEMORY = -18,
+ VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH = -38,
+ VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR = -39,
+ VIRTCHNL_STATUS_ERR_INVALID_VF_ID = -40,
+ VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR = -53,
+ VIRTCHNL_STATUS_ERR_NOT_SUPPORTED = -64,
+};
+
+/* Backward compatibility */
+#define VIRTCHNL_ERR_PARAM VIRTCHNL_STATUS_ERR_PARAM
+#define VIRTCHNL_STATUS_NOT_SUPPORTED VIRTCHNL_STATUS_ERR_NOT_SUPPORTED
+
+#define VIRTCHNL_LINK_SPEED_2_5GB_SHIFT 0x0
+#define VIRTCHNL_LINK_SPEED_100MB_SHIFT 0x1
+#define VIRTCHNL_LINK_SPEED_1000MB_SHIFT 0x2
+#define VIRTCHNL_LINK_SPEED_10GB_SHIFT 0x3
+#define VIRTCHNL_LINK_SPEED_40GB_SHIFT 0x4
+#define VIRTCHNL_LINK_SPEED_20GB_SHIFT 0x5
+#define VIRTCHNL_LINK_SPEED_25GB_SHIFT 0x6
+#define VIRTCHNL_LINK_SPEED_5GB_SHIFT 0x7
+
+enum virtchnl_link_speed {
+ VIRTCHNL_LINK_SPEED_UNKNOWN = 0,
+ VIRTCHNL_LINK_SPEED_100MB = BIT(VIRTCHNL_LINK_SPEED_100MB_SHIFT),
+ VIRTCHNL_LINK_SPEED_1GB = BIT(VIRTCHNL_LINK_SPEED_1000MB_SHIFT),
+ VIRTCHNL_LINK_SPEED_10GB = BIT(VIRTCHNL_LINK_SPEED_10GB_SHIFT),
+ VIRTCHNL_LINK_SPEED_40GB = BIT(VIRTCHNL_LINK_SPEED_40GB_SHIFT),
+ VIRTCHNL_LINK_SPEED_20GB = BIT(VIRTCHNL_LINK_SPEED_20GB_SHIFT),
+ VIRTCHNL_LINK_SPEED_25GB = BIT(VIRTCHNL_LINK_SPEED_25GB_SHIFT),
+ VIRTCHNL_LINK_SPEED_2_5GB = BIT(VIRTCHNL_LINK_SPEED_2_5GB_SHIFT),
+ VIRTCHNL_LINK_SPEED_5GB = BIT(VIRTCHNL_LINK_SPEED_5GB_SHIFT),
+};
+
+/* for hsplit_0 field of Rx HMC context */
+/* deprecated with AVF 1.0 */
+enum virtchnl_rx_hsplit {
+ VIRTCHNL_RX_HSPLIT_NO_SPLIT = 0,
+ VIRTCHNL_RX_HSPLIT_SPLIT_L2 = 1,
+ VIRTCHNL_RX_HSPLIT_SPLIT_IP = 2,
+ VIRTCHNL_RX_HSPLIT_SPLIT_TCP_UDP = 4,
+ VIRTCHNL_RX_HSPLIT_SPLIT_SCTP = 8,
+};
+
+#define VIRTCHNL_ETH_LENGTH_OF_ADDRESS 6
+/* END GENERIC DEFINES */
+
+/* Opcodes for VF-PF communication. These are placed in the v_opcode field
+ * of the virtchnl_msg structure.
+ */
+enum virtchnl_ops {
+/* The PF sends status change events to VFs using
+ * the VIRTCHNL_OP_EVENT opcode.
+ * VFs send requests to the PF using the other ops.
+ * Use of "advanced opcode" features must be negotiated as part of capabilities
+ * exchange and are not considered part of base mode feature set.
+ */
+ VIRTCHNL_OP_UNKNOWN = 0,
+ VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */
+ VIRTCHNL_OP_RESET_VF = 2,
+ VIRTCHNL_OP_GET_VF_RESOURCES = 3,
+ VIRTCHNL_OP_CONFIG_TX_QUEUE = 4,
+ VIRTCHNL_OP_CONFIG_RX_QUEUE = 5,
+ VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6,
+ VIRTCHNL_OP_CONFIG_IRQ_MAP = 7,
+ VIRTCHNL_OP_ENABLE_QUEUES = 8,
+ VIRTCHNL_OP_DISABLE_QUEUES = 9,
+ VIRTCHNL_OP_ADD_ETH_ADDR = 10,
+ VIRTCHNL_OP_DEL_ETH_ADDR = 11,
+ VIRTCHNL_OP_ADD_VLAN = 12,
+ VIRTCHNL_OP_DEL_VLAN = 13,
+ VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14,
+ VIRTCHNL_OP_GET_STATS = 15,
+ VIRTCHNL_OP_RSVD = 16,
+ VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */
+ /* opcode 19 is reserved */
+ VIRTCHNL_OP_IWARP = 20, /* advanced opcode */
+ VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP = 21, /* advanced opcode */
+ VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP = 22, /* advanced opcode */
+ VIRTCHNL_OP_CONFIG_RSS_KEY = 23,
+ VIRTCHNL_OP_CONFIG_RSS_LUT = 24,
+ VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25,
+ VIRTCHNL_OP_SET_RSS_HENA = 26,
+ VIRTCHNL_OP_ENABLE_VLAN_STRIPPING = 27,
+ VIRTCHNL_OP_DISABLE_VLAN_STRIPPING = 28,
+ VIRTCHNL_OP_REQUEST_QUEUES = 29,
+ VIRTCHNL_OP_ENABLE_CHANNELS = 30,
+ VIRTCHNL_OP_DISABLE_CHANNELS = 31,
+ VIRTCHNL_OP_ADD_CLOUD_FILTER = 32,
+ VIRTCHNL_OP_DEL_CLOUD_FILTER = 33,
+ /* opcode 34 is reserved */
+ /* opcodes 39, 40, 41, 42 and 43 are reserved */
+ /* opcode 44, 45, 46, 47, 48 and 49 are reserved */
+
+};
+
+/* These macros are used to generate compilation errors if a structure/union
+ * is not exactly the correct length. It gives a divide by zero error if the
+ * structure/union is not of the correct size, otherwise it creates an enum
+ * that is never used.
+ */
+#define VIRTCHNL_CHECK_STRUCT_LEN(n, X) enum virtchnl_static_assert_enum_##X \
+ { virtchnl_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) }
+#define VIRTCHNL_CHECK_UNION_LEN(n, X) enum virtchnl_static_asset_enum_##X \
+ { virtchnl_static_assert_##X = (n)/((sizeof(union X) == (n)) ? 1 : 0) }
+
+/* Virtual channel message descriptor. This overlays the admin queue
+ * descriptor. All other data is passed in external buffers.
+ */
+
+struct virtchnl_msg {
+ u8 pad[8]; /* AQ flags/opcode/len/retval fields */
+ enum virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */
+ enum virtchnl_status_code v_retval; /* ditto for desc->retval */
+ u32 vfid; /* used by PF when sending to VF */
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(20, virtchnl_msg);
+
+/* Message descriptions and data structures. */
+
+/* VIRTCHNL_OP_VERSION
+ * VF posts its version number to the PF. PF responds with its version number
+ * in the same format, along with a return code.
+ * Reply from PF has its major/minor versions also in param0 and param1.
+ * If there is a major version mismatch, then the VF cannot operate.
+ * If there is a minor version mismatch, then the VF can operate but should
+ * add a warning to the system log.
+ *
+ * This enum element MUST always be specified as == 1, regardless of other
+ * changes in the API. The PF must always respond to this message without
+ * error regardless of version mismatch.
+ */
+#define VIRTCHNL_VERSION_MAJOR 1
+#define VIRTCHNL_VERSION_MINOR 1
+#define VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0
+
+struct virtchnl_version_info {
+ u32 major;
+ u32 minor;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_version_info);
+
+#define VF_IS_V10(_v) (((_v)->major == 1) && ((_v)->minor == 0))
+#define VF_IS_V11(_ver) (((_ver)->major == 1) && ((_ver)->minor == 1))
+
+/* VIRTCHNL_OP_RESET_VF
+ * VF sends this request to PF with no parameters
+ * PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register
+ * until reset completion is indicated. The admin queue must be reinitialized
+ * after this operation.
+ *
+ * When reset is complete, PF must ensure that all queues in all VSIs associated
+ * with the VF are stopped, all queue configurations in the HMC are set to 0,
+ * and all MAC and VLAN filters (except the default MAC address) on all VSIs
+ * are cleared.
+ */
+
+/* VSI types that use VIRTCHNL interface for VF-PF communication. VSI_SRIOV
+ * vsi_type should always be 6 for backward compatibility. Add other fields
+ * as needed.
+ */
+enum virtchnl_vsi_type {
+ VIRTCHNL_VSI_TYPE_INVALID = 0,
+ VIRTCHNL_VSI_SRIOV = 6,
+};
+
+/* VIRTCHNL_OP_GET_VF_RESOURCES
+ * Version 1.0 VF sends this request to PF with no parameters
+ * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities
+ * PF responds with an indirect message containing
+ * virtchnl_vf_resource and one or more
+ * virtchnl_vsi_resource structures.
+ */
+
+struct virtchnl_vsi_resource {
+ u16 vsi_id;
+ u16 num_queue_pairs;
+ enum virtchnl_vsi_type vsi_type;
+ u16 qset_handle;
+ u8 default_mac_addr[VIRTCHNL_ETH_LENGTH_OF_ADDRESS];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
+
+/* VF capability flags
+ * VIRTCHNL_VF_OFFLOAD_L2 flag is inclusive of base mode L2 offloads including
+ * TX/RX Checksum offloading and TSO for non-tunnelled packets.
+ */
+#define VIRTCHNL_VF_OFFLOAD_L2 0x00000001
+#define VIRTCHNL_VF_OFFLOAD_IWARP 0x00000002
+#define VIRTCHNL_VF_OFFLOAD_RSVD 0x00000004
+#define VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008
+#define VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010
+#define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020
+#define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES 0x00000040
+#define VIRTCHNL_VF_OFFLOAD_CRC 0x00000080
+#define VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
+#define VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
+#define VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
+#define VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000
+#define VIRTCHNL_VF_OFFLOAD_ENCAP 0X00100000
+#define VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00200000
+#define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM 0X00400000
+#define VIRTCHNL_VF_OFFLOAD_ADQ 0X00800000
+#define VIRTCHNL_VF_OFFLOAD_ADQ_V2 0X01000000
+#define VIRTCHNL_VF_OFFLOAD_USO 0X02000000
+ /* 0X40000000 is reserved */
+ /* 0X04000000, 0X08000000 and 0X10000000 are reserved */
+ /* 0X80000000 is reserved */
+
+/* Define below the capability flags that are not offloads */
+#define VIRTCHNL_VF_CAP_ADV_LINK_SPEED 0x00000080
+#define VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \
+ VIRTCHNL_VF_OFFLOAD_VLAN | \
+ VIRTCHNL_VF_OFFLOAD_RSS_PF)
+
+struct virtchnl_vf_resource {
+ u16 num_vsis;
+ u16 num_queue_pairs;
+ u16 max_vectors;
+ u16 max_mtu;
+
+ u32 vf_cap_flags;
+ u32 rss_key_size;
+ u32 rss_lut_size;
+
+ struct virtchnl_vsi_resource vsi_res[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(36, virtchnl_vf_resource);
+
+/* VIRTCHNL_OP_CONFIG_TX_QUEUE
+ * VF sends this message to set up parameters for one TX queue.
+ * External data buffer contains one instance of virtchnl_txq_info.
+ * PF configures requested queue and returns a status code.
+ */
+
+/* Tx queue config info */
+struct virtchnl_txq_info {
+ u16 vsi_id;
+ u16 queue_id;
+ u16 ring_len; /* number of descriptors, multiple of 8 */
+ u16 headwb_enabled; /* deprecated with AVF 1.0 */
+ u64 dma_ring_addr;
+ u64 dma_headwb_addr; /* deprecated with AVF 1.0 */
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_txq_info);
+
+/* VIRTCHNL_OP_CONFIG_RX_QUEUE
+ * VF sends this message to set up parameters for one RX queue.
+ * External data buffer contains one instance of virtchnl_rxq_info.
+ * PF configures requested queue and returns a status code. The
+ * crc_disable flag disables CRC stripping on the VF. Setting
+ * the crc_disable flag to 1 will disable CRC stripping for each
+ * queue in the VF where the flag is set. The VIRTCHNL_VF_OFFLOAD_CRC
+ * offload must have been set prior to sending this info or the PF
+ * will ignore the request. This flag should be set the same for
+ * all of the queues for a VF.
+ */
+
+/* Rx queue config info */
+struct virtchnl_rxq_info {
+ u16 vsi_id;
+ u16 queue_id;
+ u32 ring_len; /* number of descriptors, multiple of 32 */
+ u16 hdr_size;
+ u16 splithdr_enabled; /* deprecated with AVF 1.0 */
+ u32 databuffer_size;
+ u32 max_pkt_size;
+ u8 crc_disable;
+ u8 pad1[3];
+ u64 dma_ring_addr;
+ enum virtchnl_rx_hsplit rx_split_pos; /* deprecated with AVF 1.0 */
+ u32 pad2;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_rxq_info);
+
+/* VIRTCHNL_OP_CONFIG_VSI_QUEUES
+ * VF sends this message to set parameters for active TX and RX queues
+ * associated with the specified VSI.
+ * PF configures queues and returns status.
+ * If the number of queues specified is greater than the number of queues
+ * associated with the VSI, an error is returned and no queues are configured.
+ * NOTE: The VF is not required to configure all queues in a single request.
+ * It may send multiple messages. PF drivers must correctly handle all VF
+ * requests.
+ */
+struct virtchnl_queue_pair_info {
+ /* NOTE: vsi_id and queue_id should be identical for both queues. */
+ struct virtchnl_txq_info txq;
+ struct virtchnl_rxq_info rxq;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(64, virtchnl_queue_pair_info);
+
+struct virtchnl_vsi_queue_config_info {
+ u16 vsi_id;
+ u16 num_queue_pairs;
+ u32 pad;
+ struct virtchnl_queue_pair_info qpair[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_vsi_queue_config_info);
+
+/* VIRTCHNL_OP_REQUEST_QUEUES
+ * VF sends this message to request the PF to allocate additional queues to
+ * this VF. Each VF gets a guaranteed number of queues on init but asking for
+ * additional queues must be negotiated. This is a best effort request as it
+ * is possible the PF does not have enough queues left to support the request.
+ * If the PF cannot support the number requested it will respond with the
+ * maximum number it is able to support. If the request is successful, PF will
+ * then reset the VF to institute required changes.
+ */
+
+/* VF resource request */
+struct virtchnl_vf_res_request {
+ u16 num_queue_pairs;
+};
+
+/* VIRTCHNL_OP_CONFIG_IRQ_MAP
+ * VF uses this message to map vectors to queues.
+ * The rxq_map and txq_map fields are bitmaps used to indicate which queues
+ * are to be associated with the specified vector.
+ * The "other" causes are always mapped to vector 0. The VF may not request
+ * that vector 0 be used for traffic.
+ * PF configures interrupt mapping and returns status.
+ * NOTE: due to hardware requirements, all active queues (both TX and RX)
+ * should be mapped to interrupts, even if the driver intends to operate
+ * only in polling mode. In this case the interrupt may be disabled, but
+ * the ITR timer will still run to trigger writebacks.
+ */
+struct virtchnl_vector_map {
+ u16 vsi_id;
+ u16 vector_id;
+ u16 rxq_map;
+ u16 txq_map;
+ u16 rxitr_idx;
+ u16 txitr_idx;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_vector_map);
+
+struct virtchnl_irq_map_info {
+ u16 num_vectors;
+ struct virtchnl_vector_map vecmap[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(14, virtchnl_irq_map_info);
+
+/* VIRTCHNL_OP_ENABLE_QUEUES
+ * VIRTCHNL_OP_DISABLE_QUEUES
+ * VF sends these message to enable or disable TX/RX queue pairs.
+ * The queues fields are bitmaps indicating which queues to act upon.
+ * (Currently, we only support 16 queues per VF, but we make the field
+ * u32 to allow for expansion.)
+ * PF performs requested action and returns status.
+ * NOTE: The VF is not required to enable/disable all queues in a single
+ * request. It may send multiple messages.
+ * PF drivers must correctly handle all VF requests.
+ */
+struct virtchnl_queue_select {
+ u16 vsi_id;
+ u16 pad;
+ u32 rx_queues;
+ u32 tx_queues;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_select);
+
+/* VIRTCHNL_OP_ADD_ETH_ADDR
+ * VF sends this message in order to add one or more unicast or multicast
+ * address filters for the specified VSI.
+ * PF adds the filters and returns status.
+ */
+
+/* VIRTCHNL_OP_DEL_ETH_ADDR
+ * VF sends this message in order to remove one or more unicast or multicast
+ * filters for the specified VSI.
+ * PF removes the filters and returns status.
+ */
+
+struct virtchnl_ether_addr {
+ u8 addr[VIRTCHNL_ETH_LENGTH_OF_ADDRESS];
+ u8 pad[2];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_ether_addr);
+
+struct virtchnl_ether_addr_list {
+ u16 vsi_id;
+ u16 num_elements;
+ struct virtchnl_ether_addr list[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_ether_addr_list);
+
+/* VIRTCHNL_OP_ADD_VLAN
+ * VF sends this message to add one or more VLAN tag filters for receives.
+ * PF adds the filters and returns status.
+ * If a port VLAN is configured by the PF, this operation will return an
+ * error to the VF.
+ */
+
+/* VIRTCHNL_OP_DEL_VLAN
+ * VF sends this message to remove one or more VLAN tag filters for receives.
+ * PF removes the filters and returns status.
+ * If a port VLAN is configured by the PF, this operation will return an
+ * error to the VF.
+ */
+
+struct virtchnl_vlan_filter_list {
+ u16 vsi_id;
+ u16 num_elements;
+ u16 vlan_id[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_vlan_filter_list);
+
+/* VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
+ * VF sends VSI id and flags.
+ * PF returns status code in retval.
+ * Note: we assume that broadcast accept mode is always enabled.
+ */
+struct virtchnl_promisc_info {
+ u16 vsi_id;
+ u16 flags;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_promisc_info);
+
+#define FLAG_VF_UNICAST_PROMISC 0x00000001
+#define FLAG_VF_MULTICAST_PROMISC 0x00000002
+
+/* VIRTCHNL_OP_GET_STATS
+ * VF sends this message to request stats for the selected VSI. VF uses
+ * the virtchnl_queue_select struct to specify the VSI. The queue_id
+ * field is ignored by the PF.
+ *
+ * PF replies with struct virtchnl_eth_stats in an external buffer.
+ */
+
+struct virtchnl_eth_stats {
+ u64 rx_bytes; /* received bytes */
+ u64 rx_unicast; /* received unicast pkts */
+ u64 rx_multicast; /* received multicast pkts */
+ u64 rx_broadcast; /* received broadcast pkts */
+ u64 rx_discards;
+ u64 rx_unknown_protocol;
+ u64 tx_bytes; /* transmitted bytes */
+ u64 tx_unicast; /* transmitted unicast pkts */
+ u64 tx_multicast; /* transmitted multicast pkts */
+ u64 tx_broadcast; /* transmitted broadcast pkts */
+ u64 tx_discards;
+ u64 tx_errors;
+};
+
+/* VIRTCHNL_OP_CONFIG_RSS_KEY
+ * VIRTCHNL_OP_CONFIG_RSS_LUT
+ * VF sends these messages to configure RSS. Only supported if both PF
+ * and VF drivers set the VIRTCHNL_VF_OFFLOAD_RSS_PF bit during
+ * configuration negotiation. If this is the case, then the RSS fields in
+ * the VF resource struct are valid.
+ * Both the key and LUT are initialized to 0 by the PF, meaning that
+ * RSS is effectively disabled until set up by the VF.
+ */
+struct virtchnl_rss_key {
+ u16 vsi_id;
+ u16 key_len;
+ u8 key[1]; /* RSS hash key, packed bytes */
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_key);
+
+struct virtchnl_rss_lut {
+ u16 vsi_id;
+ u16 lut_entries;
+ u8 lut[1]; /* RSS lookup table */
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_lut);
+
+/* VIRTCHNL_OP_GET_RSS_HENA_CAPS
+ * VIRTCHNL_OP_SET_RSS_HENA
+ * VF sends these messages to get and set the hash filter enable bits for RSS.
+ * By default, the PF sets these to all possible traffic types that the
+ * hardware supports. The VF can query this value if it wants to change the
+ * traffic types that are hashed by the hardware.
+ */
+struct virtchnl_rss_hena {
+ u64 hena;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hena);
+
+/* This is used by PF driver to enforce how many channels can be supported.
+ * When ADQ_V2 capability is negotiated, it will allow 16 channels otherwise
+ * PF driver will allow only max 4 channels
+ */
+#define VIRTCHNL_MAX_ADQ_CHANNELS 4
+#define VIRTCHNL_MAX_ADQ_V2_CHANNELS 16
+
+/* VIRTCHNL_OP_ENABLE_CHANNELS
+ * VIRTCHNL_OP_DISABLE_CHANNELS
+ * VF sends these messages to enable or disable channels based on
+ * the user specified queue count and queue offset for each traffic class.
+ * This struct encompasses all the information that the PF needs from
+ * VF to create a channel.
+ */
+struct virtchnl_channel_info {
+ u16 count; /* number of queues in a channel */
+ u16 offset; /* queues in a channel start from 'offset' */
+ u32 pad;
+ u64 max_tx_rate;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_channel_info);
+
+struct virtchnl_tc_info {
+ u32 num_tc;
+ u32 pad;
+ struct virtchnl_channel_info list[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_tc_info);
+
+/* VIRTCHNL_ADD_CLOUD_FILTER
+ * VIRTCHNL_DEL_CLOUD_FILTER
+ * VF sends these messages to add or delete a cloud filter based on the
+ * user specified match and action filters. These structures encompass
+ * all the information that the PF needs from the VF to add/delete a
+ * cloud filter.
+ */
+
+struct virtchnl_l4_spec {
+ u8 src_mac[ETH_ALEN];
+ u8 dst_mac[ETH_ALEN];
+ /* vlan_prio is part of this 16 bit field even from OS perspective
+ * vlan_id:12 is actual vlan_id, then vlanid:bit14..12 is vlan_prio
+ * in future, when decided to offload vlan_prio, pass that information
+ * as part of the "vlan_id" field, Bit14..12
+ */
+ __be16 vlan_id;
+ __be16 pad; /* reserved for future use */
+ __be32 src_ip[4];
+ __be32 dst_ip[4];
+ __be16 src_port;
+ __be16 dst_port;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(52, virtchnl_l4_spec);
+
+union virtchnl_flow_spec {
+ struct virtchnl_l4_spec tcp_spec;
+ u8 buffer[128]; /* reserved for future use */
+};
+
+VIRTCHNL_CHECK_UNION_LEN(128, virtchnl_flow_spec);
+
+enum virtchnl_action {
+ /* action types */
+ VIRTCHNL_ACTION_DROP = 0,
+ VIRTCHNL_ACTION_TC_REDIRECT,
+ VIRTCHNL_ACTION_PASSTHRU,
+ VIRTCHNL_ACTION_QUEUE,
+ VIRTCHNL_ACTION_Q_REGION,
+ VIRTCHNL_ACTION_MARK,
+ VIRTCHNL_ACTION_COUNT,
+};
+
+enum virtchnl_flow_type {
+ /* flow types */
+ VIRTCHNL_TCP_V4_FLOW = 0,
+ VIRTCHNL_TCP_V6_FLOW,
+ VIRTCHNL_UDP_V4_FLOW,
+ VIRTCHNL_UDP_V6_FLOW,
+};
+
+struct virtchnl_filter {
+ union virtchnl_flow_spec data;
+ union virtchnl_flow_spec mask;
+ enum virtchnl_flow_type flow_type;
+ enum virtchnl_action action;
+ u32 action_meta;
+ u8 field_flags;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(272, virtchnl_filter);
+
+/* VIRTCHNL_OP_EVENT
+ * PF sends this message to inform the VF driver of events that may affect it.
+ * No direct response is expected from the VF, though it may generate other
+ * messages in response to this one.
+ */
+enum virtchnl_event_codes {
+ VIRTCHNL_EVENT_UNKNOWN = 0,
+ VIRTCHNL_EVENT_LINK_CHANGE,
+ VIRTCHNL_EVENT_RESET_IMPENDING,
+ VIRTCHNL_EVENT_PF_DRIVER_CLOSE,
+};
+
+#define PF_EVENT_SEVERITY_INFO 0
+#define PF_EVENT_SEVERITY_ATTENTION 1
+#define PF_EVENT_SEVERITY_ACTION_REQUIRED 2
+#define PF_EVENT_SEVERITY_CERTAIN_DOOM 255
+
+struct virtchnl_pf_event {
+ enum virtchnl_event_codes event;
+ union {
+ /* If the PF driver does not support the new speed reporting
+ * capabilities then use link_event else use link_event_adv to
+ * get the speed and link information. The ability to understand
+ * new speeds is indicated by setting the capability flag
+ * VIRTCHNL_VF_CAP_ADV_LINK_SPEED in vf_cap_flags parameter
+ * in virtchnl_vf_resource struct and can be used to determine
+ * which link event struct to use below.
+ */
+ struct {
+ enum virtchnl_link_speed link_speed;
+ u8 link_status;
+ } link_event;
+ struct {
+ /* link_speed provided in Mbps */
+ u32 link_speed;
+ u8 link_status;
+ } link_event_adv;
+ } event_data;
+
+ int severity;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_pf_event);
+
+/* VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP
+ * VF uses this message to request PF to map IWARP vectors to IWARP queues.
+ * The request for this originates from the VF IWARP driver through
+ * a client interface between VF LAN and VF IWARP driver.
+ * A vector could have an AEQ and CEQ attached to it although
+ * there is a single AEQ per VF IWARP instance in which case
+ * most vectors will have an INVALID_IDX for aeq and valid idx for ceq.
+ * There will never be a case where there will be multiple CEQs attached
+ * to a single vector.
+ * PF configures interrupt mapping and returns status.
+ */
+struct virtchnl_iwarp_qv_info {
+ u32 v_idx; /* msix_vector */
+ u16 ceq_idx;
+ u16 aeq_idx;
+ u8 itr_idx;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_iwarp_qv_info);
+
+struct virtchnl_iwarp_qvlist_info {
+ u32 num_vectors;
+ struct virtchnl_iwarp_qv_info qv_info[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_iwarp_qvlist_info);
+
+/* Since VF messages are limited by u16 size, precalculate the maximum possible
+ * values of nested elements in virtchnl structures that virtual channel can
+ * possibly handle in a single message.
+ */
+enum virtchnl_vector_limits {
+ VIRTCHNL_OP_CONFIG_VSI_QUEUES_MAX =
+ ((u16)(~0) - sizeof(struct virtchnl_vsi_queue_config_info)) /
+ sizeof(struct virtchnl_queue_pair_info),
+
+ VIRTCHNL_OP_CONFIG_IRQ_MAP_MAX =
+ ((u16)(~0) - sizeof(struct virtchnl_irq_map_info)) /
+ sizeof(struct virtchnl_vector_map),
+
+ VIRTCHNL_OP_ADD_DEL_ETH_ADDR_MAX =
+ ((u16)(~0) - sizeof(struct virtchnl_ether_addr_list)) /
+ sizeof(struct virtchnl_ether_addr),
+
+ VIRTCHNL_OP_ADD_DEL_VLAN_MAX =
+ ((u16)(~0) - sizeof(struct virtchnl_vlan_filter_list)) /
+ sizeof(u16),
+
+ VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP_MAX =
+ ((u16)(~0) - sizeof(struct virtchnl_iwarp_qvlist_info)) /
+ sizeof(struct virtchnl_iwarp_qv_info),
+
+ VIRTCHNL_OP_ENABLE_CHANNELS_MAX =
+ ((u16)(~0) - sizeof(struct virtchnl_tc_info)) /
+ sizeof(struct virtchnl_channel_info),
+};
+
+/* VF reset states - these are written into the RSTAT register:
+ * VFGEN_RSTAT on the VF
+ * When the PF initiates a reset, it writes 0
+ * When the reset is complete, it writes 1
+ * When the PF detects that the VF has recovered, it writes 2
+ * VF checks this register periodically to determine if a reset has occurred,
+ * then polls it to know when the reset is complete.
+ * If either the PF or VF reads the register while the hardware
+ * is in a reset state, it will return DEADBEEF, which, when masked
+ * will result in 3.
+ */
+enum virtchnl_vfr_states {
+ VIRTCHNL_VFR_INPROGRESS = 0,
+ VIRTCHNL_VFR_COMPLETED,
+ VIRTCHNL_VFR_VFACTIVE,
+};
+
+/**
+ * virtchnl_vc_validate_vf_msg
+ * @ver: Virtchnl version info
+ * @v_opcode: Opcode for the message
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * validate msg format against struct for each opcode
+ */
+static inline int
+virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
+ u8 *msg, u16 msglen)
+{
+ bool err_msg_format = false;
+ u32 valid_len = 0;
+
+ /* Validate message length. */
+ switch (v_opcode) {
+ case VIRTCHNL_OP_VERSION:
+ valid_len = sizeof(struct virtchnl_version_info);
+ break;
+ case VIRTCHNL_OP_RESET_VF:
+ break;
+ case VIRTCHNL_OP_GET_VF_RESOURCES:
+ if (VF_IS_V11(ver))
+ valid_len = sizeof(u32);
+ break;
+ case VIRTCHNL_OP_CONFIG_TX_QUEUE:
+ valid_len = sizeof(struct virtchnl_txq_info);
+ break;
+ case VIRTCHNL_OP_CONFIG_RX_QUEUE:
+ valid_len = sizeof(struct virtchnl_rxq_info);
+ break;
+ case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
+ valid_len = sizeof(struct virtchnl_vsi_queue_config_info);
+ if (msglen >= valid_len) {
+ struct virtchnl_vsi_queue_config_info *vqc =
+ (struct virtchnl_vsi_queue_config_info *)msg;
+
+ if (vqc->num_queue_pairs == 0 || vqc->num_queue_pairs >
+ VIRTCHNL_OP_CONFIG_VSI_QUEUES_MAX) {
+ err_msg_format = true;
+ break;
+ }
+
+ valid_len += (vqc->num_queue_pairs *
+ sizeof(struct
+ virtchnl_queue_pair_info));
+ }
+ break;
+ case VIRTCHNL_OP_CONFIG_IRQ_MAP:
+ valid_len = sizeof(struct virtchnl_irq_map_info);
+ if (msglen >= valid_len) {
+ struct virtchnl_irq_map_info *vimi =
+ (struct virtchnl_irq_map_info *)msg;
+
+ if (vimi->num_vectors == 0 || vimi->num_vectors >
+ VIRTCHNL_OP_CONFIG_IRQ_MAP_MAX) {
+ err_msg_format = true;
+ break;
+ }
+
+ valid_len += (vimi->num_vectors *
+ sizeof(struct virtchnl_vector_map));
+ }
+ break;
+ case VIRTCHNL_OP_ENABLE_QUEUES:
+ case VIRTCHNL_OP_DISABLE_QUEUES:
+ valid_len = sizeof(struct virtchnl_queue_select);
+ break;
+ case VIRTCHNL_OP_ADD_ETH_ADDR:
+ case VIRTCHNL_OP_DEL_ETH_ADDR:
+ valid_len = sizeof(struct virtchnl_ether_addr_list);
+ if (msglen >= valid_len) {
+ struct virtchnl_ether_addr_list *veal =
+ (struct virtchnl_ether_addr_list *)msg;
+
+ if (veal->num_elements == 0 || veal->num_elements >
+ VIRTCHNL_OP_ADD_DEL_ETH_ADDR_MAX) {
+ err_msg_format = true;
+ break;
+ }
+
+ valid_len += veal->num_elements *
+ sizeof(struct virtchnl_ether_addr);
+ }
+ break;
+ case VIRTCHNL_OP_ADD_VLAN:
+ case VIRTCHNL_OP_DEL_VLAN:
+ valid_len = sizeof(struct virtchnl_vlan_filter_list);
+ if (msglen >= valid_len) {
+ struct virtchnl_vlan_filter_list *vfl =
+ (struct virtchnl_vlan_filter_list *)msg;
+
+ if (vfl->num_elements == 0 || vfl->num_elements >
+ VIRTCHNL_OP_ADD_DEL_VLAN_MAX) {
+ err_msg_format = true;
+ break;
+ }
+
+ valid_len += vfl->num_elements * sizeof(u16);
+ }
+ break;
+ case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
+ valid_len = sizeof(struct virtchnl_promisc_info);
+ break;
+ case VIRTCHNL_OP_GET_STATS:
+ valid_len = sizeof(struct virtchnl_queue_select);
+ break;
+ case VIRTCHNL_OP_IWARP:
+ /* These messages are opaque to us and will be validated in
+ * the RDMA client code. We just need to check for nonzero
+ * length. The firmware will enforce max length restrictions.
+ */
+ if (msglen)
+ valid_len = msglen;
+ else
+ err_msg_format = true;
+ break;
+ case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP:
+ break;
+ case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
+ valid_len = sizeof(struct virtchnl_iwarp_qvlist_info);
+ if (msglen >= valid_len) {
+ struct virtchnl_iwarp_qvlist_info *qv =
+ (struct virtchnl_iwarp_qvlist_info *)msg;
+
+ if (qv->num_vectors == 0 || qv->num_vectors >
+ VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP_MAX) {
+ err_msg_format = true;
+ break;
+ }
+
+ valid_len += ((qv->num_vectors - 1) *
+ sizeof(struct virtchnl_iwarp_qv_info));
+ }
+ break;
+ case VIRTCHNL_OP_CONFIG_RSS_KEY:
+ valid_len = sizeof(struct virtchnl_rss_key);
+ if (msglen >= valid_len) {
+ struct virtchnl_rss_key *vrk =
+ (struct virtchnl_rss_key *)msg;
+
+ if (vrk->key_len == 0) {
+ /* zero length is allowed as input */
+ break;
+ }
+
+ valid_len += vrk->key_len - 1;
+ }
+ break;
+ case VIRTCHNL_OP_CONFIG_RSS_LUT:
+ valid_len = sizeof(struct virtchnl_rss_lut);
+ if (msglen >= valid_len) {
+ struct virtchnl_rss_lut *vrl =
+ (struct virtchnl_rss_lut *)msg;
+
+ if (vrl->lut_entries == 0) {
+ /* zero entries is allowed as input */
+ break;
+ }
+
+ valid_len += vrl->lut_entries - 1;
+ }
+ break;
+ case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
+ break;
+ case VIRTCHNL_OP_SET_RSS_HENA:
+ valid_len = sizeof(struct virtchnl_rss_hena);
+ break;
+ case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
+ case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
+ break;
+ case VIRTCHNL_OP_REQUEST_QUEUES:
+ valid_len = sizeof(struct virtchnl_vf_res_request);
+ break;
+ case VIRTCHNL_OP_ENABLE_CHANNELS:
+ valid_len = sizeof(struct virtchnl_tc_info);
+ if (msglen >= valid_len) {
+ struct virtchnl_tc_info *vti =
+ (struct virtchnl_tc_info *)msg;
+
+ if (vti->num_tc == 0 || vti->num_tc >
+ VIRTCHNL_OP_ENABLE_CHANNELS_MAX) {
+ err_msg_format = true;
+ break;
+ }
+
+ valid_len += (vti->num_tc - 1) *
+ sizeof(struct virtchnl_channel_info);
+ }
+ break;
+ case VIRTCHNL_OP_DISABLE_CHANNELS:
+ break;
+ case VIRTCHNL_OP_ADD_CLOUD_FILTER:
+ case VIRTCHNL_OP_DEL_CLOUD_FILTER:
+ valid_len = sizeof(struct virtchnl_filter);
+ break;
+ /* These are always errors coming from the VF. */
+ case VIRTCHNL_OP_EVENT:
+ case VIRTCHNL_OP_UNKNOWN:
+ default:
+ return VIRTCHNL_STATUS_ERR_PARAM;
+ }
+ /* few more checks */
+ if (err_msg_format || valid_len != msglen)
+ return VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH;
+
+ return 0;
+}
+#endif /* _VIRTCHNL_H_ */
diff --git a/sys/modules/iavf/Makefile b/sys/modules/iavf/Makefile
--- a/sys/modules/iavf/Makefile
+++ b/sys/modules/iavf/Makefile
@@ -1,17 +1,18 @@
#$FreeBSD$
-.PATH: ${SRCTOP}/sys/dev/ixl
+.PATH: ${SRCTOP}/sys/dev/iavf
KMOD = if_iavf
SRCS = device_if.h bus_if.h pci_if.h ifdi_if.h
-SRCS += opt_inet.h opt_inet6.h opt_rss.h opt_ixl.h opt_iflib.h opt_global.h
-SRCS += if_iavf.c iavf_vc.c ixl_txrx.c i40e_osdep.c
+SRCS += opt_inet.h opt_inet6.h opt_rss.h opt_iflib.h
+SRCS += if_iavf_iflib.c iavf_lib.c iavf_osdep.c iavf_txrx_iflib.c
+SRCS += iavf_vc_common.c iavf_vc_iflib.c
# Shared source
-SRCS += i40e_common.c i40e_nvm.c i40e_adminq.c
+SRCS += iavf_adminq.c iavf_common.c
# Debug messages / sysctls
-# CFLAGS += -DIXL_DEBUG
+# CFLAGS += -DIAVF_DEBUG
# Enable asserts and other debugging facilities
# CFLAGS += -DINVARIANTS -DINVARIANTS_SUPPORT -DWITNESS

File Metadata

Mime Type
text/plain
Expires
Fri, Nov 8, 12:21 AM (19 h, 40 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
14523817
Default Alt Text
D28636.id.diff (424 KB)

Event Timeline