Page MenuHomeFreeBSD

D30729.diff
No OneTemporary

D30729.diff

diff --git a/share/man/man4/man4.aarch64/Makefile b/share/man/man4/man4.aarch64/Makefile
--- a/share/man/man4/man4.aarch64/Makefile
+++ b/share/man/man4/man4.aarch64/Makefile
@@ -11,6 +11,7 @@
aw_spi.4 \
aw_syscon.4 \
bcm283x_pwm.4 \
+ enetc.4 \
rk_gpio.4 \
rk_grf.4 \
rk_i2c.4 \
diff --git a/share/man/man4/man4.aarch64/enetc.4 b/share/man/man4/man4.aarch64/enetc.4
new file mode 100644
--- /dev/null
+++ b/share/man/man4/man4.aarch64/enetc.4
@@ -0,0 +1,69 @@
+.\" -
+.\" SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+.\"
+.\" Copyright (c) 2021 Alstom Group.
+.\" Copyright (c) 2021 Semihalf.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+.\" IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+.\" OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+.\" IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+.\" INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+.\" NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+.\" DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+.\" THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+.\" (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+.\" THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+.\"
+.Dd June 11, 2021
+.Dt ENETC 4
+.Os
+.Sh NAME
+.Nm enetc
+.Nd "Freescale ENETC PCIe Gigabit Ethernet driver"
+.Sh SYNOPSIS
+To compile this driver into the kernel the following lines must be present
+in the kernel configuration file:
+.sp
+.Cd "options SOC_NXP_LS"
+.Cd "device pci"
+.Cd "device fdt"
+.Cd "device iflib"
+.Cd "device enetc"
+.Sh DESCRIPTION
+The
+.Nm
+driver provides support for ENETC Gigabit Ethernet NIC found in LS1028A SoC.
+.Xr iflib 9
+is used to communicate with the rest of kernel.
+Both physical ports, as well as virtual interfaces connected to the internal
+switch are supported.
+.Pp
+The following hardware offloads have been implemented in this version
+of the driver:
+.Bd -literal
+- Receive IP checksum validation.
+- VLAN tag insertion and extraction.
+- VLAN tag based packet filtering.
+.Ed
+.Pp
+For more information about configuring this device refer to
+.Xr ifconfig 8 .
+.Sh SEE ALSO
+.Xr vlan 4 ,
+.Xr ifconfig 8 ,
+.Xr iflib 9
+.Sh HISTORY
+The
+.Nm
+driver first appeared in
+.Fx 14.0 .
diff --git a/sys/arm64/conf/GENERIC b/sys/arm64/conf/GENERIC
--- a/sys/arm64/conf/GENERIC
+++ b/sys/arm64/conf/GENERIC
@@ -272,6 +272,7 @@
# PCI/PCI-X/PCIe Ethernet NICs that use iflib infrastructure
device iflib
+device enetc # NXP Gigabit NIC
device em # Intel PRO/1000 Gigabit Ethernet Family
device ix # Intel 10Gb Ethernet Family
device vmx # VMware VMXNET3 Ethernet
diff --git a/sys/conf/files.arm64 b/sys/conf/files.arm64
--- a/sys/conf/files.arm64
+++ b/sys/conf/files.arm64
@@ -175,6 +175,9 @@
dev/dwc/if_dwc.c optional fdt dwc_rk soc_rockchip_rk3328 | fdt dwc_rk soc_rockchip_rk3399 | fdt dwc_socfpga soc_intel_stratix10
dev/dwc/if_dwc_if.m optional fdt dwc_rk soc_rockchip_rk3328 | fdt dwc_rk soc_rockchip_rk3399 | fdt dwc_socfpga soc_intel_stratix10
+dev/enetc/enetc_mdio.c optional enetc soc_nxp_ls
+dev/enetc/if_enetc.c optional enetc iflib pci fdt soc_nxp_ls
+
dev/gpio/pl061.c optional pl061 gpio
dev/gpio/pl061_acpi.c optional pl061 gpio acpi
dev/gpio/pl061_fdt.c optional pl061 gpio fdt
diff --git a/sys/dev/enetc/enetc.h b/sys/dev/enetc/enetc.h
new file mode 100644
--- /dev/null
+++ b/sys/dev/enetc/enetc.h
@@ -0,0 +1,160 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2021 Alstom Group.
+ * Copyright (c) 2021 Semihalf.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _ENETC_H_
+#define _ENETC_H_
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+
+#include <dev/enetc/enetc_hw.h>
+
+struct enetc_softc;
+struct enetc_rx_queue {
+ struct enetc_softc *sc;
+ uint16_t qid;
+
+ union enetc_rx_bd *ring;
+ uint64_t ring_paddr;
+
+ struct if_irq irq;
+ bool enabled;
+};
+
+struct enetc_tx_queue {
+ struct enetc_softc *sc;
+
+ union enetc_tx_bd *ring;
+ uint64_t ring_paddr;
+
+ qidx_t next_to_clean;
+ bool ring_full;
+
+ struct if_irq irq;
+};
+
+struct enetc_ctrl_queue {
+ qidx_t pidx;
+
+ struct iflib_dma_info dma;
+ struct enetc_cbd *ring;
+
+ struct if_irq irq;
+};
+
+struct enetc_softc {
+ device_t dev;
+
+ if_ctx_t ctx;
+ if_softc_ctx_t shared;
+#define tx_num_queues shared->isc_ntxqsets
+#define rx_num_queues shared->isc_nrxqsets
+#define tx_queue_size shared->isc_ntxd[0]
+#define rx_queue_size shared->isc_nrxd[0]
+
+ struct resource *regs;
+
+ device_t miibus;
+
+ struct enetc_tx_queue *tx_queues;
+ struct enetc_rx_queue *rx_queues;
+ struct enetc_ctrl_queue ctrl_queue;
+
+ /* Default RX queue configuration. */
+ uint32_t rbmr;
+ /*
+ * Hardware VLAN hash based filtering uses a 64bit bitmap.
+ * We need to know how many vids are in given position to
+ * know when to remove the bit from the bitmap.
+ */
+#define VLAN_BITMAP_SIZE 64
+ uint8_t vlan_bitmap[64];
+
+ struct if_irq admin_irq;
+ int phy_addr;
+
+ struct ifmedia fixed_ifmedia;
+ bool fixed_link;
+};
+
+#define ENETC_RD4(sc, reg) \
+ bus_read_4((sc)->regs, reg)
+#define ENETC_WR4(sc, reg, value) \
+ bus_write_4((sc)->regs, reg, value)
+
+#define ENETC_PORT_RD8(sc, reg) \
+ bus_read_8((sc)->regs, ENETC_PORT_BASE + (reg))
+#define ENETC_PORT_RD4(sc, reg) \
+ bus_read_4((sc)->regs, ENETC_PORT_BASE + (reg))
+#define ENETC_PORT_WR4(sc, reg, value) \
+ bus_write_4((sc)->regs, ENETC_PORT_BASE + (reg), value)
+#define ENETC_PORT_RD2(sc, reg) \
+ bus_read_2((sc)->regs, ENETC_PORT_BASE + (reg))
+#define ENETC_PORT_WR2(sc, reg, value) \
+ bus_write_2((sc)->regs, ENETC_PORT_BASE + (reg), value)
+
+#define ENETC_TXQ_RD4(sc, q, reg) \
+ ENETC_RD4((sc), ENETC_BDR(TX, q, reg))
+#define ENETC_TXQ_WR4(sc, q, reg, value) \
+ ENETC_WR4((sc), ENETC_BDR(TX, q, reg), value)
+#define ENETC_RXQ_RD4(sc, q, reg) \
+ ENETC_RD4((sc), ENETC_BDR(RX, q, reg))
+#define ENETC_RXQ_WR4(sc, q, reg, value) \
+ ENETC_WR4((sc), ENETC_BDR(RX, q, reg), value)
+
+/* Device constants */
+
+#define ENETC_MAX_FRAME_LEN 9600
+
+#define ENETC_MAX_QUEUES 4
+
+/* Max supported nr of descriptors per frame. */
+#define ENETC_MAX_SCATTER 15
+
+/*
+ * Up to 4096 transmit/receive descriptors are supported,
+ * their number has to be a multple of 64.
+ */
+#define ENETC_MIN_DESC 64
+#define ENETC_MAX_DESC 4096
+#define ENETC_DEFAULT_DESC 512
+#define ENETC_DESC_ALIGN 64
+
+/* Rings have to be 128B aligned. */
+#define ENETC_RING_ALIGN 128
+
+#define ENETC_MSIX_COUNT 32
+
+#define ENETC_RX_INTR_PKT_THR 16
+
+/* Rx threshold irq timeout, 100us */
+#define ENETC_RX_INTR_TIME_THR ((100ULL * ENETC_CLK) / 1000000ULL)
+
+#define ENETC_RX_IP_ALIGN 2
+
+#endif
diff --git a/sys/dev/enetc/enetc_hw.h b/sys/dev/enetc/enetc_hw.h
new file mode 100644
--- /dev/null
+++ b/sys/dev/enetc/enetc_hw.h
@@ -0,0 +1,755 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2017-2019 NXP */
+#ifndef _ENETC_HW_H_
+#define _ENETC_HW_H_
+
+#include <sys/cdefs.h>
+
+#include <sys/param.h>
+
+#define BIT(x) (1UL << (x))
+#define GENMASK(h, l) (((~0U) - (1U << (l)) + 1) & (~0U >> (32 - 1 - (h))))
+#define ilog2(x) (flsl(x) - 1)
+
+#define PCI_VENDOR_FREESCALE 0x1957
+
+/* ENETC device IDs */
+#define ENETC_DEV_ID_PF 0xe100
+#define ENETC_DEV_ID_VF 0xef00
+#define ENETC_DEV_ID_PTP 0xee02
+
+/* ENETC register block BAR */
+#define ENETC_BAR_REGS 0
+
+/** SI regs, offset: 0h */
+#define ENETC_SIMR 0
+#define ENETC_SIMR_EN BIT(31)
+#define ENETC_SIMR_DRXG BIT(16)
+#define ENETC_SIMR_RSSE BIT(0)
+#define ENETC_SICTR0 0x18
+#define ENETC_SICTR1 0x1c
+#define ENETC_SIPCAPR0 0x20
+#define ENETC_SIPCAPR0_QBV BIT(4)
+#define ENETC_SIPCAPR0_PSFP BIT(9)
+#define ENETC_SIPCAPR0_RSS BIT(8)
+#define ENETC_SIPCAPR1 0x24
+#define ENETC_SITGTGR 0x30
+#define ENETC_SIRBGCR 0x38
+/* cache attribute registers for transactions initiated by ENETC */
+#define ENETC_SICAR0 0x40
+#define ENETC_SICAR1 0x44
+#define ENETC_SICAR2 0x48
+/* rd snoop, no alloc
+ * wr snoop, no alloc, partial cache line update for BDs and full cache line
+ * update for data
+ */
+#define ENETC_SICAR_RD_COHERENT 0x2b2b0000
+#define ENETC_SICAR_WR_COHERENT 0x00006727
+#define ENETC_SICAR_MSI 0x00300030 /* rd/wr device, no snoop, no alloc */
+
+#define ENETC_SIPMAR0 0x80
+#define ENETC_SIPMAR1 0x84
+
+/* VF-PF Message passing */
+#define ENETC_DEFAULT_MSG_SIZE 1024 /* and max size */
+
+#define ENETC_PSIMSGRR 0x204
+#define ENETC_PSIMSGRR_MR_MASK GENMASK(2, 1)
+#define ENETC_PSIMSGRR_MR(n) BIT((n) + 1) /* n = VSI index */
+#define ENETC_PSIVMSGRCVAR0(n) (0x210 + (n) * 0x8) /* n = VSI index */
+#define ENETC_PSIVMSGRCVAR1(n) (0x214 + (n) * 0x8)
+
+#define ENETC_VSIMSGSR 0x204 /* RO */
+#define ENETC_VSIMSGSR_MB BIT(0)
+#define ENETC_VSIMSGSR_MS BIT(1)
+#define ENETC_VSIMSGSNDAR0 0x210
+#define ENETC_VSIMSGSNDAR1 0x214
+
+#define ENETC_SIMSGSR_SET_MC(val) ((val) << 16)
+#define ENETC_SIMSGSR_GET_MC(val) ((val) >> 16)
+
+/* SI statistics */
+#define ENETC_SIROCT 0x300
+#define ENETC_SIRFRM 0x308
+#define ENETC_SIRUCA 0x310
+#define ENETC_SIRMCA 0x318
+#define ENETC_SITOCT 0x320
+#define ENETC_SITFRM 0x328
+#define ENETC_SITUCA 0x330
+#define ENETC_SITMCA 0x338
+#define ENETC_RBDCR(n) (0x8180 + (n) * 0x200)
+
+/* Control BDR regs */
+#define ENETC_SICBDRMR 0x800
+#define ENETC_SICBDRMR_EN BIT(31)
+#define ENETC_SICBDRSR 0x804 /* RO */
+#define ENETC_SICBDRBAR0 0x810
+#define ENETC_SICBDRBAR1 0x814
+#define ENETC_SICBDRPIR 0x818
+#define ENETC_SICBDRCIR 0x81c
+#define ENETC_SICBDRLENR 0x820
+
+#define ENETC_SICAPR0 0x900
+#define ENETC_SICAPR1 0x904
+
+#define ENETC_PSIIER 0xa00
+#define ENETC_PSIIER_MR_MASK GENMASK(2, 1)
+#define ENETC_PSIIDR 0xa08
+#define ENETC_SITXIDR 0xa18
+#define ENETC_SIRXIDR 0xa28
+#define ENETC_SIMSIVR 0xa30
+
+#define ENETC_SIMSITRV(n) (0xB00 + (n) * 0x4)
+#define ENETC_SIMSIRRV(n) (0xB80 + (n) * 0x4)
+
+#define ENETC_SIUEFDCR 0xe28
+
+#define ENETC_SIRFSCAPR 0x1200
+#define ENETC_SIRFSCAPR_GET_NUM_RFS(val) ((val) & 0x7f)
+#define ENETC_SIRSSCAPR 0x1600
+#define ENETC_SIRSSCAPR_GET_NUM_RSS(val) (BIT((val) & 0xf) * 32)
+
+/** SI BDR sub-blocks, n = 0..7 */
+enum enetc_bdr_type {TX, RX};
+#define ENETC_BDR_OFF(i) ((i) * 0x200)
+#define ENETC_BDR(t, i, r) (0x8000 + (t) * 0x100 + ENETC_BDR_OFF(i) + (r))
+/* RX BDR reg offsets */
+#define ENETC_RBMR 0
+#define ENETC_RBMR_AL BIT(0)
+#define ENETC_RBMR_BDS BIT(2)
+#define ENETC_RBMR_VTE BIT(5)
+#define ENETC_RBMR_EN BIT(31)
+#define ENETC_RBSR 0x4
+#define ENETC_RBBSR 0x8
+#define ENETC_RBCIR 0xc
+#define ENETC_RBBAR0 0x10
+#define ENETC_RBBAR1 0x14
+#define ENETC_RBPIR 0x18
+#define ENETC_RBLENR 0x20
+#define ENETC_RBIER 0xa0
+#define ENETC_RBIER_RXTIE BIT(0)
+#define ENETC_RBIDR 0xa4
+#define ENETC_RBICR0 0xa8
+#define ENETC_RBICR0_ICEN BIT(31)
+#define ENETC_RBICR0_ICPT_MASK 0x1ff
+#define ENETC_RBICR0_SET_ICPT(n) ((n) & ENETC_RBICR0_ICPT_MASK)
+#define ENETC_RBICR1 0xac
+
+/* TX BDR reg offsets */
+#define ENETC_TBMR 0
+#define ENETC_TBSR_BUSY BIT(0)
+#define ENETC_TBMR_VIH BIT(9)
+#define ENETC_TBMR_PRIO_MASK GENMASK(2, 0)
+#define ENETC_TBMR_SET_PRIO(val) ((val) & ENETC_TBMR_PRIO_MASK)
+#define ENETC_TBMR_EN BIT(31)
+#define ENETC_TBSR 0x4
+#define ENETC_TBBAR0 0x10
+#define ENETC_TBBAR1 0x14
+#define ENETC_TBPIR 0x18
+#define ENETC_TBCIR 0x1c
+#define ENETC_TBCIR_IDX_MASK 0xffff
+#define ENETC_TBLENR 0x20
+#define ENETC_TBIER 0xa0
+#define ENETC_TBIER_TXT BIT(0)
+#define ENETC_TBIER_TXF BIT(1)
+#define ENETC_TBIDR 0xa4
+#define ENETC_TBICR0 0xa8
+#define ENETC_TBICR0_ICEN BIT(31)
+#define ENETC_TBICR0_ICPT_MASK 0xf
+#define ENETC_TBICR0_SET_ICPT(n) ((ilog2(n) + 1) & ENETC_TBICR0_ICPT_MASK)
+#define ENETC_TBICR1 0xac
+
+#define ENETC_RTBLENR_LEN(n) ((n) & ~0x7)
+
+/* Port regs, offset: 1_0000h */
+#define ENETC_PORT_BASE 0x10000
+#define ENETC_PMR 0x0000
+#define ENETC_PMR_SI0EN BIT(16)
+#define ENETC_PMR_EN GENMASK(18, 16)
+#define ENETC_PMR_PSPEED_MASK GENMASK(11, 8)
+#define ENETC_PMR_PSPEED_10M 0
+#define ENETC_PMR_PSPEED_100M BIT(8)
+#define ENETC_PMR_PSPEED_1000M BIT(9)
+#define ENETC_PMR_PSPEED_2500M BIT(10)
+#define ENETC_PSR 0x0004 /* RO */
+#define ENETC_PSIPMR 0x0018
+#define ENETC_PSIPMR_SET_UP(n) BIT(n) /* n = SI index */
+#define ENETC_PSIPMR_SET_MP(n) BIT((n) + 16)
+#define ENETC_PSIPVMR 0x001c
+#define ENETC_VLAN_PROMISC_MAP_ALL 0x7
+#define ENETC_PSIPVMR_SET_VP(simap) ((simap) & 0x7)
+#define ENETC_PSIPVMR_SET_VUTA(simap) (((simap) & 0x7) << 16)
+#define ENETC_PSIPMAR0(n) (0x0100 + (n) * 0x8) /* n = SI index */
+#define ENETC_PSIPMAR1(n) (0x0104 + (n) * 0x8)
+#define ENETC_PVCLCTR 0x0208
+#define ENETC_PCVLANR1 0x0210
+#define ENETC_PCVLANR2 0x0214
+#define ENETC_VLAN_TYPE_C BIT(0)
+#define ENETC_VLAN_TYPE_S BIT(1)
+#define ENETC_PVCLCTR_OVTPIDL(bmp) ((bmp) & 0xff) /* VLAN_TYPE */
+#define ENETC_PSIVLANR(n) (0x0240 + (n) * 4) /* n = SI index */
+#define ENETC_PSIVLAN_EN BIT(31)
+#define ENETC_PSIVLAN_SET_QOS(val) ((uint32_t)(val) << 12)
+#define ENETC_PTXMBAR 0x0608
+#define ENETC_PCAPR0 0x0900
+#define ENETC_PCAPR0_RXBDR(val) ((val) >> 24)
+#define ENETC_PCAPR0_TXBDR(val) (((val) >> 16) & 0xff)
+#define ENETC_PCAPR1 0x0904
+#define ENETC_PSICFGR0(n) (0x0940 + (n) * 0xc) /* n = SI index */
+#define ENETC_PSICFGR0_SET_TXBDR(val) ((val) & 0xff)
+#define ENETC_PSICFGR0_SET_RXBDR(val) (((val) & 0xff) << 16)
+#define ENETC_PSICFGR0_VTE BIT(12)
+#define ENETC_PSICFGR0_SIVIE BIT(14)
+#define ENETC_PSICFGR0_ASE BIT(15)
+#define ENETC_PSICFGR0_SIVC(bmp) (((bmp) & 0xff) << 24) /* VLAN_TYPE */
+
+#define ENETC_PTCCBSR0(n) (0x1110 + (n) * 8) /* n = 0 to 7*/
+#define ENETC_CBSE BIT(31)
+#define ENETC_CBS_BW_MASK GENMASK(6, 0)
+#define ENETC_PTCCBSR1(n) (0x1114 + (n) * 8) /* n = 0 to 7*/
+#define ENETC_RSSHASH_KEY_SIZE 40
+#define ENETC_PRSSCAPR 0x1404
+#define ENETC_PRSSCAPR_GET_NUM_RSS(val) (BIT((val) & 0xf) * 32)
+#define ENETC_PRSSK(n) (0x1410 + (n) * 4) /* n = [0..9] */
+#define ENETC_PSIVLANFMR 0x1700
+#define ENETC_PSIVLANFMR_VS BIT(0)
+#define ENETC_PRFSMR 0x1800
+#define ENETC_PRFSMR_RFSE BIT(31)
+#define ENETC_PRFSCAPR 0x1804
+#define ENETC_PRFSCAPR_GET_NUM_RFS(val) ((((val) & 0xf) + 1) * 16)
+#define ENETC_PSIRFSCFGR(n) (0x1814 + (n) * 4) /* n = SI index */
+#define ENETC_PFPMR 0x1900
+#define ENETC_PFPMR_PMACE BIT(1)
+#define ENETC_PFPMR_MWLM BIT(0)
+#define ENETC_EMDIO_BASE 0x1c00
+#define ENETC_PSIUMHFR0(n, err) (((err) ? 0x1d08 : 0x1d00) + (n) * 0x10)
+#define ENETC_PSIUMHFR1(n) (0x1d04 + (n) * 0x10)
+#define ENETC_PSIMMHFR0(n, err) (((err) ? 0x1d00 : 0x1d08) + (n) * 0x10)
+#define ENETC_PSIMMHFR1(n) (0x1d0c + (n) * 0x10)
+#define ENETC_PSIVHFR0(n) (0x1e00 + (n) * 8) /* n = SI index */
+#define ENETC_PSIVHFR1(n) (0x1e04 + (n) * 8) /* n = SI index */
+#define ENETC_MMCSR 0x1f00
+#define ENETC_MMCSR_ME BIT(16)
+#define ENETC_PTCMSDUR(n) (0x2020 + (n) * 4) /* n = TC index [0..7] */
+
+#define ENETC_PAR_PORT_CFG 0x3050
+#define ENETC_PAR_PORT_L4CD BIT(0)
+#define ENETC_PAR_PORT_L3CD BIT(1)
+
+#define ENETC_PM0_CMD_CFG 0x8008
+#define ENETC_PM1_CMD_CFG 0x9008
+#define ENETC_PM0_TX_EN BIT(0)
+#define ENETC_PM0_RX_EN BIT(1)
+#define ENETC_PM0_PROMISC BIT(4)
+#define ENETC_PM0_CMD_XGLP BIT(10)
+#define ENETC_PM0_CMD_TXP BIT(11)
+#define ENETC_PM0_CMD_PHY_TX_EN BIT(15)
+#define ENETC_PM0_CMD_SFD BIT(21)
+#define ENETC_PM0_MAXFRM 0x8014
+#define ENETC_SET_TX_MTU(val) ((val) << 16)
+#define ENETC_SET_MAXFRM(val) ((val) & 0xffff)
+#define ENETC_PM0_RX_FIFO 0x801c
+#define ENETC_PM0_RX_FIFO_VAL 1
+
+#define ENETC_PM_IMDIO_BASE 0x8030
+
+#define ENETC_PM0_IF_MODE 0x8300
+#define ENETC_PM0_IFM_RG BIT(2)
+#define ENETC_PM0_IFM_RLP (BIT(5) | BIT(11))
+#define ENETC_PM0_IFM_EN_AUTO BIT(15)
+#define ENETC_PM0_IFM_SSP_MASK GENMASK(14, 13)
+#define ENETC_PM0_IFM_SSP_1000 (2 << 13)
+#define ENETC_PM0_IFM_SSP_100 (0 << 13)
+#define ENETC_PM0_IFM_SSP_10 (1 << 13)
+#define ENETC_PM0_IFM_FULL_DPX BIT(12)
+#define ENETC_PM0_IFM_IFMODE_MASK GENMASK(1, 0)
+#define ENETC_PM0_IFM_IFMODE_XGMII 0
+#define ENETC_PM0_IFM_IFMODE_GMII 2
+#define ENETC_PSIDCAPR 0x1b08
+#define ENETC_PSIDCAPR_MSK GENMASK(15, 0)
+#define ENETC_PSFCAPR 0x1b18
+#define ENETC_PSFCAPR_MSK GENMASK(15, 0)
+#define ENETC_PSGCAPR 0x1b28
+#define ENETC_PSGCAPR_GCL_MSK GENMASK(18, 16)
+#define ENETC_PSGCAPR_SGIT_MSK GENMASK(15, 0)
+#define ENETC_PFMCAPR 0x1b38
+#define ENETC_PFMCAPR_MSK GENMASK(15, 0)
+
+/* MAC counters */
+#define ENETC_PM0_REOCT 0x8100
+#define ENETC_PM0_RALN 0x8110
+#define ENETC_PM0_RXPF 0x8118
+#define ENETC_PM0_RFRM 0x8120
+#define ENETC_PM0_RFCS 0x8128
+#define ENETC_PM0_RVLAN 0x8130
+#define ENETC_PM0_RERR 0x8138
+#define ENETC_PM0_RUCA 0x8140
+#define ENETC_PM0_RMCA 0x8148
+#define ENETC_PM0_RBCA 0x8150
+#define ENETC_PM0_RDRP 0x8158
+#define ENETC_PM0_RPKT 0x8160
+#define ENETC_PM0_RUND 0x8168
+#define ENETC_PM0_R64 0x8170
+#define ENETC_PM0_R127 0x8178
+#define ENETC_PM0_R255 0x8180
+#define ENETC_PM0_R511 0x8188
+#define ENETC_PM0_R1023 0x8190
+#define ENETC_PM0_R1522 0x8198
+#define ENETC_PM0_R1523X 0x81A0
+#define ENETC_PM0_ROVR 0x81A8
+#define ENETC_PM0_RJBR 0x81B0
+#define ENETC_PM0_RFRG 0x81B8
+#define ENETC_PM0_RCNP 0x81C0
+#define ENETC_PM0_RDRNTP 0x81C8
+#define ENETC_PM0_TEOCT 0x8200
+#define ENETC_PM0_TOCT 0x8208
+#define ENETC_PM0_TCRSE 0x8210
+#define ENETC_PM0_TXPF 0x8218
+#define ENETC_PM0_TFRM 0x8220
+#define ENETC_PM0_TFCS 0x8228
+#define ENETC_PM0_TVLAN 0x8230
+#define ENETC_PM0_TERR 0x8238
+#define ENETC_PM0_TUCA 0x8240
+#define ENETC_PM0_TMCA 0x8248
+#define ENETC_PM0_TBCA 0x8250
+#define ENETC_PM0_TPKT 0x8260
+#define ENETC_PM0_TUND 0x8268
+#define ENETC_PM0_T64 0x8270
+#define ENETC_PM0_T127 0x8278
+#define ENETC_PM0_T255 0x8280
+#define ENETC_PM0_T511 0x8288
+#define ENETC_PM0_T1023 0x8290
+#define ENETC_PM0_T1522 0x8298
+#define ENETC_PM0_T1523X 0x82A0
+#define ENETC_PM0_TCNP 0x82C0
+#define ENETC_PM0_TDFR 0x82D0
+#define ENETC_PM0_TMCOL 0x82D8
+#define ENETC_PM0_TSCOL 0x82E0
+#define ENETC_PM0_TLCOL 0x82E8
+#define ENETC_PM0_TECOL 0x82F0
+
+/* Port counters */
+#define ENETC_PICDR(n) (0x0700 + (n) * 8) /* n = [0..3] */
+#define ENETC_PBFDSIR 0x0810
+#define ENETC_PFDMSAPR 0x0814
+#define ENETC_UFDMF 0x1680
+#define ENETC_MFDMF 0x1684
+#define ENETC_PUFDVFR 0x1780
+#define ENETC_PMFDVFR 0x1784
+#define ENETC_PBFDVFR 0x1788
+
+/** Global regs, offset: 2_0000h */
+#define ENETC_GLOBAL_BASE 0x20000
+#define ENETC_G_EIPBRR0 0x0bf8
+#define ENETC_G_EIPBRR1 0x0bfc
+#define ENETC_G_EPFBLPR(n) (0xd00 + 4 * (n))
+#define ENETC_G_EPFBLPR1_XGMII 0x80000000
+
+/* Buffer Descriptors (BD) */
+union enetc_tx_bd {
+ struct {
+ uint64_t addr;
+ uint16_t buf_len;
+ uint16_t frm_len;
+ union {
+ struct {
+ uint8_t reserved[3];
+ uint8_t flags;
+ }; /* default layout */
+ uint32_t txstart;
+ uint32_t lstatus;
+ };
+ };
+ struct {
+ uint32_t tstamp;
+ uint16_t tpid;
+ uint16_t vid;
+ uint8_t reserved[6];
+ uint8_t e_flags;
+ uint8_t flags;
+ } ext; /* Tx BD extension */
+ struct {
+ uint32_t tstamp;
+ uint8_t reserved[10];
+ uint8_t status;
+ uint8_t flags;
+ } wb; /* writeback descriptor */
+};
+
+enum enetc_txbd_flags {
+ ENETC_TXBD_FLAGS_RES0 = BIT(0), /* reserved */
+ ENETC_TXBD_FLAGS_TSE = BIT(1),
+ ENETC_TXBD_FLAGS_W = BIT(2),
+ ENETC_TXBD_FLAGS_RES3 = BIT(3), /* reserved */
+ ENETC_TXBD_FLAGS_TXSTART = BIT(4),
+ ENETC_TXBD_FLAGS_FI = BIT(5),
+ ENETC_TXBD_FLAGS_EX = BIT(6),
+ ENETC_TXBD_FLAGS_F = BIT(7)
+};
+#define ENETC_TXBD_TXSTART_MASK GENMASK(24, 0)
+#define ENETC_TXBD_FLAGS_OFFSET 24
+
+static inline void enetc_clear_tx_bd(union enetc_tx_bd *txbd)
+{
+ memset(txbd, 0, sizeof(*txbd));
+}
+
+/* Extension flags */
+#define ENETC_TXBD_E_FLAGS_VLAN_INS BIT(0)
+#define ENETC_TXBD_E_FLAGS_TWO_STEP_PTP BIT(2)
+
+union enetc_rx_bd {
+ struct {
+ uint64_t addr;
+ uint8_t reserved[8];
+ } w;
+ struct {
+ uint16_t inet_csum;
+ uint16_t parse_summary;
+ uint32_t rss_hash;
+ uint16_t buf_len;
+ uint16_t vlan_opt;
+ union {
+ struct {
+ uint16_t flags;
+ uint16_t error;
+ };
+ uint32_t lstatus;
+ };
+ } r;
+ struct {
+ uint32_t tstamp;
+ uint8_t reserved[12];
+ } ext;
+};
+
+#define ENETC_RXBD_PARSER_ERROR BIT(15)
+
+#define ENETC_RXBD_LSTATUS_R BIT(30)
+#define ENETC_RXBD_LSTATUS_F BIT(31)
+#define ENETC_RXBD_ERR_MASK 0xff
+#define ENETC_RXBD_LSTATUS(flags) ((flags) << 16)
+#define ENETC_RXBD_FLAG_RSSV BIT(8)
+#define ENETC_RXBD_FLAG_VLAN BIT(9)
+#define ENETC_RXBD_FLAG_TSTMP BIT(10)
+#define ENETC_RXBD_FLAG_TPID GENMASK(1, 0)
+
+#define ENETC_MAC_ADDR_FILT_CNT 8 /* # of supported entries per port */
+#define EMETC_MAC_ADDR_FILT_RES 3 /* # of reserved entries at the beginning */
+#define ENETC_MAX_NUM_VFS 2
+
+#define ENETC_CBD_FLAGS_SF BIT(7) /* short format */
+#define ENETC_CBD_STATUS_MASK 0xf
+
+struct enetc_cmd_rfse {
+ uint8_t smac_h[6];
+ uint8_t smac_m[6];
+ uint8_t dmac_h[6];
+ uint8_t dmac_m[6];
+ uint32_t sip_h[4]; /* Big-endian */
+ uint32_t sip_m[4]; /* Big-endian */
+ uint32_t dip_h[4]; /* Big-endian */
+ uint32_t dip_m[4]; /* Big-endian */
+ uint16_t ethtype_h;
+ uint16_t ethtype_m;
+ uint16_t ethtype4_h;
+ uint16_t ethtype4_m;
+ uint16_t sport_h;
+ uint16_t sport_m;
+ uint16_t dport_h;
+ uint16_t dport_m;
+ uint16_t vlan_h;
+ uint16_t vlan_m;
+ uint8_t proto_h;
+ uint8_t proto_m;
+ uint16_t flags;
+ uint16_t result;
+ uint16_t mode;
+};
+
+#define ENETC_RFSE_EN BIT(15)
+#define ENETC_RFSE_MODE_BD 2
+
+#define ENETC_SI_INT_IDX 0
+/* base index for Rx/Tx interrupts */
+#define ENETC_BDR_INT_BASE_IDX 1
+
+/* Messaging */
+
+/* Command completion status */
+enum enetc_msg_cmd_status {
+ ENETC_MSG_CMD_STATUS_OK,
+ ENETC_MSG_CMD_STATUS_FAIL
+};
+
+/* VSI-PSI command message types */
+enum enetc_msg_cmd_type {
+ ENETC_MSG_CMD_MNG_MAC = 1, /* manage MAC address */
+ ENETC_MSG_CMD_MNG_RX_MAC_FILTER,/* manage RX MAC table */
+ ENETC_MSG_CMD_MNG_RX_VLAN_FILTER /* manage RX VLAN table */
+};
+
+/* VSI-PSI command action types */
+enum enetc_msg_cmd_action_type {
+ ENETC_MSG_CMD_MNG_ADD = 1,
+ ENETC_MSG_CMD_MNG_REMOVE
+};
+
+/* PSI-VSI command header format */
+struct enetc_msg_cmd_header {
+ uint16_t type; /* command class type */
+ uint16_t id; /* denotes the specific required action */
+};
+
+enum bdcr_cmd_class {
+ BDCR_CMD_UNSPEC = 0,
+ BDCR_CMD_MAC_FILTER,
+ BDCR_CMD_VLAN_FILTER,
+ BDCR_CMD_RSS,
+ BDCR_CMD_RFS,
+ BDCR_CMD_PORT_GCL,
+ BDCR_CMD_RECV_CLASSIFIER,
+ BDCR_CMD_STREAM_IDENTIFY,
+ BDCR_CMD_STREAM_FILTER,
+ BDCR_CMD_STREAM_GCL,
+ BDCR_CMD_FLOW_METER,
+ __BDCR_CMD_MAX_LEN,
+ BDCR_CMD_MAX_LEN = __BDCR_CMD_MAX_LEN - 1,
+};
+
+enum bdcr_cmd_rss {
+ BDCR_CMD_RSS_WRITE = 1,
+ BDCR_CMD_RSS_READ = 2,
+};
+
+/* class 5, command 0 */
+struct tgs_gcl_conf {
+ uint8_t atc; /* init gate value */
+ uint8_t res[7];
+ struct {
+ uint8_t res1[4];
+ uint16_t acl_len;
+ uint8_t res2[2];
+ };
+};
+
+/* gate control list entry */
+struct gce {
+ uint32_t period;
+ uint8_t gate;
+ uint8_t res[3];
+};
+
+/* tgs_gcl_conf address point to this data space */
+struct tgs_gcl_data {
+ uint32_t btl;
+ uint32_t bth;
+ uint32_t ct;
+ uint32_t cte;
+ struct gce entry[];
+};
+
+/* class 7, command 0, Stream Identity Entry Configuration */
+struct streamid_conf {
+ uint32_t stream_handle; /* init gate value */
+ uint32_t iports;
+ uint8_t id_type;
+ uint8_t oui[3];
+ uint8_t res[3];
+ uint8_t en;
+};
+
+#define ENETC_CBDR_SID_VID_MASK 0xfff
+#define ENETC_CBDR_SID_VIDM BIT(12)
+#define ENETC_CBDR_SID_TG_MASK 0xc000
+/* streamid_conf address point to this data space */
+struct streamid_data {
+ union {
+ uint8_t dmac[6];
+ uint8_t smac[6];
+ };
+ uint16_t vid_vidm_tg;
+};
+
+#define ENETC_CBDR_SFI_PRI_MASK 0x7
+#define ENETC_CBDR_SFI_PRIM BIT(3)
+#define ENETC_CBDR_SFI_BLOV BIT(4)
+#define ENETC_CBDR_SFI_BLEN BIT(5)
+#define ENETC_CBDR_SFI_MSDUEN BIT(6)
+#define ENETC_CBDR_SFI_FMITEN BIT(7)
+#define ENETC_CBDR_SFI_ENABLE BIT(7)
+/* class 8, command 0, Stream Filter Instance, Short Format */
+struct sfi_conf {
+ uint32_t stream_handle;
+ uint8_t multi;
+ uint8_t res[2];
+ uint8_t sthm;
+ /* Max Service Data Unit or Flow Meter Instance Table index.
+ * Depending on the value of FLT this represents either Max
+ * Service Data Unit (max frame size) allowed by the filter
+ * entry or is an index into the Flow Meter Instance table
+ * index identifying the policer which will be used to police
+ * it.
+ */
+ uint16_t fm_inst_table_index;
+ uint16_t msdu;
+ uint16_t sg_inst_table_index;
+ uint8_t res1[2];
+ uint32_t input_ports;
+ uint8_t res2[3];
+ uint8_t en;
+};
+
+/* class 8, command 2 stream Filter Instance status query short format
+ * command no need structure define
+ * Stream Filter Instance Query Statistics Response data
+ */
+struct sfi_counter_data {
+ uint32_t matchl;
+ uint32_t matchh;
+ uint32_t msdu_dropl;
+ uint32_t msdu_droph;
+ uint32_t stream_gate_dropl;
+ uint32_t stream_gate_droph;
+ uint32_t flow_meter_dropl;
+ uint32_t flow_meter_droph;
+};
+
+#define ENETC_CBDR_SGI_OIPV_MASK 0x7
+#define ENETC_CBDR_SGI_OIPV_EN BIT(3)
+#define ENETC_CBDR_SGI_CGTST BIT(6)
+#define ENETC_CBDR_SGI_OGTST BIT(7)
+#define ENETC_CBDR_SGI_CFG_CHG BIT(1)
+#define ENETC_CBDR_SGI_CFG_PND BIT(2)
+#define ENETC_CBDR_SGI_OEX BIT(4)
+#define ENETC_CBDR_SGI_OEXEN BIT(5)
+#define ENETC_CBDR_SGI_IRX BIT(6)
+#define ENETC_CBDR_SGI_IRXEN BIT(7)
+#define ENETC_CBDR_SGI_ACLLEN_MASK 0x3
+#define ENETC_CBDR_SGI_OCLLEN_MASK 0xc
+#define ENETC_CBDR_SGI_EN BIT(7)
+/* class 9, command 0, Stream Gate Instance Table, Short Format
+ * class 9, command 2, Stream Gate Instance Table entry query write back
+ * Short Format
+ */
+struct sgi_table {
+ uint8_t res[8];
+ uint8_t oipv;
+ uint8_t res0[2];
+ uint8_t ocgtst;
+ uint8_t res1[7];
+ uint8_t gset;
+ uint8_t oacl_len;
+ uint8_t res2[2];
+ uint8_t en;
+};
+
+#define ENETC_CBDR_SGI_AIPV_MASK 0x7
+#define ENETC_CBDR_SGI_AIPV_EN BIT(3)
+#define ENETC_CBDR_SGI_AGTST BIT(7)
+
+/* class 9, command 1, Stream Gate Control List, Long Format */
+struct sgcl_conf {
+ uint8_t aipv;
+ uint8_t res[2];
+ uint8_t agtst;
+ uint8_t res1[4];
+ union {
+ struct {
+ uint8_t res2[4];
+ uint8_t acl_len;
+ uint8_t res3[3];
+ };
+ uint8_t cct[8]; /* Config change time */
+ };
+};
+
+#define ENETC_CBDR_SGL_IOMEN BIT(0)
+#define ENETC_CBDR_SGL_IPVEN BIT(3)
+#define ENETC_CBDR_SGL_GTST BIT(4)
+#define ENETC_CBDR_SGL_IPV_MASK 0xe
+/* Stream Gate Control List Entry */
+struct sgce {
+ uint32_t interval;
+ uint8_t msdu[3];
+ uint8_t multi;
+};
+
+/* stream control list class 9 , cmd 1 data buffer */
+struct sgcl_data {
+ uint32_t btl;
+ uint32_t bth;
+ uint32_t ct;
+ uint32_t cte;
+ struct sgce sgcl[0];
+};
+
+#define ENETC_CBDR_FMI_MR BIT(0)
+#define ENETC_CBDR_FMI_MREN BIT(1)
+#define ENETC_CBDR_FMI_DOY BIT(2)
+#define ENETC_CBDR_FMI_CM BIT(3)
+#define ENETC_CBDR_FMI_CF BIT(4)
+#define ENETC_CBDR_FMI_NDOR BIT(5)
+#define ENETC_CBDR_FMI_OALEN BIT(6)
+#define ENETC_CBDR_FMI_IRFPP_MASK GENMASK(4, 0)
+
+/* class 10: command 0/1, Flow Meter Instance Set, short Format */
+struct fmi_conf {
+ uint32_t cir;
+ uint32_t cbs;
+ uint32_t eir;
+ uint32_t ebs;
+ uint8_t conf;
+ uint8_t res1;
+ uint8_t ir_fpp;
+ uint8_t res2[4];
+ uint8_t en;
+};
+
+struct enetc_cbd {
+ union{
+ struct sfi_conf sfi_conf;
+ struct sgi_table sgi_table;
+ struct fmi_conf fmi_conf;
+ struct {
+ uint32_t addr[2];
+ union {
+ uint32_t opt[4];
+ struct tgs_gcl_conf gcl_conf;
+ struct streamid_conf sid_set;
+ struct sgcl_conf sgcl_conf;
+ };
+ }; /* Long format */
+ uint32_t data[6];
+ };
+ uint16_t index;
+ uint16_t length;
+ uint8_t cmd;
+ uint8_t cls;
+ uint8_t _res;
+ uint8_t status_flags;
+};
+
+#define ENETC_CLK 400000000ULL
+
+/* port time gating control register */
+#define ENETC_QBV_PTGCR_OFFSET 0x11a00
+#define ENETC_QBV_TGE BIT(31)
+#define ENETC_QBV_TGPE BIT(30)
+
+/* Port time gating capability register */
+#define ENETC_QBV_PTGCAPR_OFFSET 0x11a08
+#define ENETC_QBV_MAX_GCL_LEN_MASK GENMASK(15, 0)
+
+/* Port time specific departure */
+#define ENETC_PTCTSDR(n) (0x1210 + 4 * (n))
+#define ENETC_TSDE BIT(31)
+
+/* PSFP setting */
+#define ENETC_PPSFPMR 0x11b00
+#define ENETC_PPSFPMR_PSFPEN BIT(0)
+#define ENETC_PPSFPMR_VS BIT(1)
+#define ENETC_PPSFPMR_PVC BIT(2)
+#define ENETC_PPSFPMR_PVZC BIT(3)
+
+#endif
diff --git a/sys/dev/enetc/if_enetc.c b/sys/dev/enetc/if_enetc.c
new file mode 100644
--- /dev/null
+++ b/sys/dev/enetc/if_enetc.c
@@ -0,0 +1,1468 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2021 Alstom Group.
+ * Copyright (c) 2021 Semihalf.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/endian.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/rman.h>
+#include <sys/socket.h>
+#include <sys/sockio.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+
+#include <net/ethernet.h>
+#include <net/if.h>
+#include <net/if_dl.h>
+#include <net/if_var.h>
+#include <net/if_types.h>
+#include <net/if_media.h>
+#include <net/iflib.h>
+
+#include <dev/enetc/enetc_hw.h>
+#include <dev/enetc/enetc.h>
+#include <dev/enetc/enetc_mdio.h>
+#include <dev/mii/mii.h>
+#include <dev/mii/miivar.h>
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcivar.h>
+
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+
+#include "ifdi_if.h"
+#include "miibus_if.h"
+
+static device_register_t enetc_register;
+
+static ifdi_attach_pre_t enetc_attach_pre;
+static ifdi_attach_post_t enetc_attach_post;
+static ifdi_detach_t enetc_detach;
+
+static ifdi_tx_queues_alloc_t enetc_tx_queues_alloc;
+static ifdi_rx_queues_alloc_t enetc_rx_queues_alloc;
+static ifdi_queues_free_t enetc_queues_free;
+
+static ifdi_init_t enetc_init;
+static ifdi_stop_t enetc_stop;
+
+static ifdi_msix_intr_assign_t enetc_msix_intr_assign;
+static ifdi_tx_queue_intr_enable_t enetc_tx_queue_intr_enable;
+static ifdi_rx_queue_intr_enable_t enetc_rx_queue_intr_enable;
+static ifdi_intr_enable_t enetc_intr_enable;
+static ifdi_intr_disable_t enetc_intr_disable;
+
+static int enetc_isc_txd_encap(void*, if_pkt_info_t);
+static void enetc_isc_txd_flush(void*, uint16_t, qidx_t);
+static int enetc_isc_txd_credits_update(void*, uint16_t, bool);
+static int enetc_isc_rxd_available(void*, uint16_t, qidx_t, qidx_t);
+static int enetc_isc_rxd_pkt_get(void*, if_rxd_info_t);
+static void enetc_isc_rxd_refill(void*, if_rxd_update_t);
+static void enetc_isc_rxd_flush(void*, uint16_t, uint8_t, qidx_t);
+
+static void enetc_vlan_register(if_ctx_t, uint16_t);
+static void enetc_vlan_unregister(if_ctx_t, uint16_t);
+
+static uint64_t enetc_get_counter(if_ctx_t, ift_counter);
+static int enetc_promisc_set(if_ctx_t, int);
+static int enetc_mtu_set(if_ctx_t, uint32_t);
+static void enetc_setup_multicast(if_ctx_t);
+static void enetc_timer(if_ctx_t, uint16_t);
+static void enetc_update_admin_status(if_ctx_t);
+
+static miibus_readreg_t enetc_miibus_readreg;
+static miibus_writereg_t enetc_miibus_writereg;
+static miibus_linkchg_t enetc_miibus_linkchg;
+static miibus_statchg_t enetc_miibus_statchg;
+
+static int enetc_media_change(if_t);
+static void enetc_media_status(if_t, struct ifmediareq*);
+
+static int enetc_fixed_media_change(if_t);
+static void enetc_fixed_media_status(if_t, struct ifmediareq*);
+
+static void enetc_max_nqueues(struct enetc_softc*, int*, int*);
+static int enetc_setup_phy(struct enetc_softc*);
+
+static void enetc_get_hwaddr(struct enetc_softc*);
+static void enetc_set_hwaddr(struct enetc_softc*);
+static int enetc_setup_rss(struct enetc_softc*);
+
+static void enetc_init_hw(struct enetc_softc*);
+static void enetc_init_ctrl(struct enetc_softc*);
+static void enetc_init_tx(struct enetc_softc*);
+static void enetc_init_rx(struct enetc_softc*);
+
+static int enetc_ctrl_send(struct enetc_softc*,
+ uint16_t, uint16_t, iflib_dma_info_t);
+
+static const char enetc_driver_version[] = "1.0.0";
+
+static pci_vendor_info_t enetc_vendor_info_array[] = {
+ PVID(PCI_VENDOR_FREESCALE, ENETC_DEV_ID_PF,
+ "Freescale ENETC PCIe Gigabit Ethernet Controller"),
+ PVID_END
+};
+
+#define ENETC_IFCAPS (IFCAP_VLAN_MTU | IFCAP_RXCSUM | IFCAP_JUMBO_MTU | \
+ IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWFILTER)
+
+static device_method_t enetc_methods[] = {
+ DEVMETHOD(device_register, enetc_register),
+ DEVMETHOD(device_probe, iflib_device_probe),
+ DEVMETHOD(device_attach, iflib_device_attach),
+ DEVMETHOD(device_detach, iflib_device_detach),
+ DEVMETHOD(device_shutdown, iflib_device_shutdown),
+ DEVMETHOD(device_suspend, iflib_device_suspend),
+ DEVMETHOD(device_resume, iflib_device_resume),
+
+ DEVMETHOD(miibus_readreg, enetc_miibus_readreg),
+ DEVMETHOD(miibus_writereg, enetc_miibus_writereg),
+ DEVMETHOD(miibus_linkchg, enetc_miibus_linkchg),
+ DEVMETHOD(miibus_statchg, enetc_miibus_statchg),
+
+ DEVMETHOD_END
+};
+
+static driver_t enetc_driver = {
+ "enetc", enetc_methods, sizeof(struct enetc_softc)
+};
+
+static devclass_t enetc_devclass;
+DRIVER_MODULE(enetc, pci, enetc_driver, enetc_devclass, NULL, NULL);
+DRIVER_MODULE(miibus, enetc, miibus_driver, miibus_devclass, NULL, NULL);
+MODULE_VERSION(enetc, 1);
+
+IFLIB_PNP_INFO(pci, enetc, enetc_vendor_info_array);
+
+MODULE_DEPEND(enetc, ether, 1, 1, 1);
+MODULE_DEPEND(enetc, iflib, 1, 1, 1);
+MODULE_DEPEND(enetc, miibus, 1, 1, 1);
+
+static device_method_t enetc_iflib_methods[] = {
+ DEVMETHOD(ifdi_attach_pre, enetc_attach_pre),
+ DEVMETHOD(ifdi_attach_post, enetc_attach_post),
+ DEVMETHOD(ifdi_detach, enetc_detach),
+
+ DEVMETHOD(ifdi_init, enetc_init),
+ DEVMETHOD(ifdi_stop, enetc_stop),
+
+ DEVMETHOD(ifdi_tx_queues_alloc, enetc_tx_queues_alloc),
+ DEVMETHOD(ifdi_rx_queues_alloc, enetc_rx_queues_alloc),
+ DEVMETHOD(ifdi_queues_free, enetc_queues_free),
+
+ DEVMETHOD(ifdi_msix_intr_assign, enetc_msix_intr_assign),
+ DEVMETHOD(ifdi_tx_queue_intr_enable, enetc_tx_queue_intr_enable),
+ DEVMETHOD(ifdi_rx_queue_intr_enable, enetc_rx_queue_intr_enable),
+ DEVMETHOD(ifdi_intr_enable, enetc_intr_enable),
+ DEVMETHOD(ifdi_intr_disable, enetc_intr_disable),
+
+ DEVMETHOD(ifdi_vlan_register, enetc_vlan_register),
+ DEVMETHOD(ifdi_vlan_unregister, enetc_vlan_unregister),
+
+ DEVMETHOD(ifdi_get_counter, enetc_get_counter),
+ DEVMETHOD(ifdi_mtu_set, enetc_mtu_set),
+ DEVMETHOD(ifdi_multi_set, enetc_setup_multicast),
+ DEVMETHOD(ifdi_promisc_set, enetc_promisc_set),
+ DEVMETHOD(ifdi_timer, enetc_timer),
+ DEVMETHOD(ifdi_update_admin_status, enetc_update_admin_status),
+
+ DEVMETHOD_END
+};
+
+static driver_t enetc_iflib_driver = {
+ "enetc", enetc_iflib_methods, sizeof(struct enetc_softc)
+};
+
+static struct if_txrx enetc_txrx = {
+ .ift_txd_encap = enetc_isc_txd_encap,
+ .ift_txd_flush = enetc_isc_txd_flush,
+ .ift_txd_credits_update = enetc_isc_txd_credits_update,
+ .ift_rxd_available = enetc_isc_rxd_available,
+ .ift_rxd_pkt_get = enetc_isc_rxd_pkt_get,
+ .ift_rxd_refill = enetc_isc_rxd_refill,
+ .ift_rxd_flush = enetc_isc_rxd_flush
+};
+
+static struct if_shared_ctx enetc_sctx_init = {
+ .isc_magic = IFLIB_MAGIC,
+
+ .isc_q_align = ENETC_RING_ALIGN,
+
+ .isc_tx_maxsize = ENETC_MAX_FRAME_LEN,
+ .isc_tx_maxsegsize = PAGE_SIZE,
+
+ .isc_rx_maxsize = ENETC_MAX_FRAME_LEN,
+ .isc_rx_maxsegsize = ENETC_MAX_FRAME_LEN,
+ .isc_rx_nsegments = ENETC_MAX_SCATTER,
+
+ .isc_admin_intrcnt = 0,
+
+ .isc_nfl = 1,
+ .isc_nrxqs = 1,
+ .isc_ntxqs = 1,
+
+ .isc_vendor_info = enetc_vendor_info_array,
+ .isc_driver_version = enetc_driver_version,
+ .isc_driver = &enetc_iflib_driver,
+
+ .isc_flags = IFLIB_DRIVER_MEDIA | IFLIB_PRESERVE_TX_INDICES,
+ .isc_ntxd_min = {ENETC_MIN_DESC},
+ .isc_ntxd_max = {ENETC_MAX_DESC},
+ .isc_ntxd_default = {ENETC_DEFAULT_DESC},
+ .isc_nrxd_min = {ENETC_MIN_DESC},
+ .isc_nrxd_max = {ENETC_MAX_DESC},
+ .isc_nrxd_default = {ENETC_DEFAULT_DESC}
+};
+
+static void*
+enetc_register(device_t dev)
+{
+
+ if (!ofw_bus_status_okay(dev))
+ return (NULL);
+
+ return (&enetc_sctx_init);
+}
+
+static void
+enetc_max_nqueues(struct enetc_softc *sc, int *max_tx_nqueues,
+ int *max_rx_nqueues)
+{
+ uint32_t val;
+
+ val = ENETC_PORT_RD4(sc, ENETC_PCAPR0);
+ *max_tx_nqueues = MIN(ENETC_PCAPR0_TXBDR(val), ENETC_MAX_QUEUES);
+ *max_rx_nqueues = MIN(ENETC_PCAPR0_RXBDR(val), ENETC_MAX_QUEUES);
+}
+
+static int
+enetc_setup_fixed(struct enetc_softc *sc, phandle_t node)
+{
+ ssize_t size;
+ int speed;
+
+ size = OF_getencprop(node, "speed", &speed, sizeof(speed));
+ if (size <= 0) {
+ device_printf(sc->dev,
+ "Device has fixed-link node without link speed specified\n");
+ return (ENXIO);
+ }
+ switch (speed) {
+ case 10:
+ speed = IFM_10_T;
+ break;
+ case 100:
+ speed = IFM_100_TX;
+ break;
+ case 1000:
+ speed = IFM_1000_T;
+ break;
+ default:
+ device_printf(sc->dev, "Unsupported link speed value of %d\n",
+ speed);
+ return (ENXIO);
+ }
+ speed |= IFM_ETHER;
+
+ if (OF_hasprop(node, "full-duplex"))
+ speed |= IFM_FDX;
+ else
+ speed |= IFM_HDX;
+
+ sc->fixed_link = true;
+
+ ifmedia_init(&sc->fixed_ifmedia, 0, enetc_fixed_media_change,
+ enetc_fixed_media_status);
+ ifmedia_add(&sc->fixed_ifmedia, speed, 0, NULL);
+ ifmedia_set(&sc->fixed_ifmedia, speed);
+ sc->shared->isc_media = &sc->fixed_ifmedia;
+
+ return (0);
+}
+
+static int
+enetc_setup_phy(struct enetc_softc *sc)
+{
+ phandle_t node, fixed_link, phy_handle;
+ struct mii_data *miid;
+ int phy_addr, error;
+ ssize_t size;
+
+ node = ofw_bus_get_node(sc->dev);
+ fixed_link = ofw_bus_find_child(node, "fixed-link");
+ if (fixed_link != 0)
+ return (enetc_setup_fixed(sc, fixed_link));
+
+ size = OF_getencprop(node, "phy-handle", &phy_handle, sizeof(phy_handle));
+ if (size <= 0) {
+ device_printf(sc->dev,
+ "Failed to acquire PHY handle from FDT.\n");
+ return (ENXIO);
+ }
+ phy_handle = OF_node_from_xref(phy_handle);
+ size = OF_getencprop(phy_handle, "reg", &phy_addr, sizeof(phy_addr));
+ if (size <= 0) {
+ device_printf(sc->dev, "Failed to obtain PHY address\n");
+ return (ENXIO);
+ }
+ error = mii_attach(sc->dev, &sc->miibus, iflib_get_ifp(sc->ctx),
+ enetc_media_change, enetc_media_status,
+ BMSR_DEFCAPMASK, phy_addr, MII_OFFSET_ANY, MIIF_DOPAUSE);
+ if (error != 0) {
+ device_printf(sc->dev, "mii_attach failed\n");
+ return (error);
+ }
+ miid = device_get_softc(sc->miibus);
+ sc->shared->isc_media = &miid->mii_media;
+
+ return (0);
+}
+
+static int
+enetc_attach_pre(if_ctx_t ctx)
+{
+ struct ifnet *ifp;
+ if_softc_ctx_t scctx;
+ struct enetc_softc *sc;
+ int error, rid;
+
+ sc = iflib_get_softc(ctx);
+ scctx = iflib_get_softc_ctx(ctx);
+ sc->ctx = ctx;
+ sc->dev = iflib_get_dev(ctx);
+ sc->shared = scctx;
+ ifp = iflib_get_ifp(ctx);
+
+ rid = PCIR_BAR(ENETC_BAR_REGS);
+ sc->regs = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, &rid, RF_ACTIVE);
+ if (sc->regs == NULL) {
+ device_printf(sc->dev,
+ "Failed to allocate BAR %d\n", ENETC_BAR_REGS);
+ return (ENXIO);
+ }
+
+ error = iflib_dma_alloc_align(ctx,
+ ENETC_MIN_DESC * sizeof(struct enetc_cbd),
+ ENETC_RING_ALIGN,
+ &sc->ctrl_queue.dma,
+ 0);
+ if (error != 0) {
+ device_printf(sc->dev, "Failed to allocate control ring\n");
+ goto fail;
+ }
+ sc->ctrl_queue.ring = (struct enetc_cbd*)sc->ctrl_queue.dma.idi_vaddr;
+
+ scctx->isc_txrx = &enetc_txrx;
+ scctx->isc_tx_nsegments = ENETC_MAX_SCATTER;
+ enetc_max_nqueues(sc, &scctx->isc_nrxqsets_max, &scctx->isc_ntxqsets_max);
+
+ if (scctx->isc_ntxd[0] % ENETC_DESC_ALIGN != 0) {
+ device_printf(sc->dev,
+ "The number of TX descriptors has to be a multiple of %d\n",
+ ENETC_DESC_ALIGN);
+ error = EINVAL;
+ goto fail;
+ }
+ if (scctx->isc_nrxd[0] % ENETC_DESC_ALIGN != 0) {
+ device_printf(sc->dev,
+ "The number of RX descriptors has to be a multiple of %d\n",
+ ENETC_DESC_ALIGN);
+ error = EINVAL;
+ goto fail;
+ }
+ scctx->isc_txqsizes[0] = scctx->isc_ntxd[0] * sizeof(union enetc_tx_bd);
+ scctx->isc_rxqsizes[0] = scctx->isc_nrxd[0] * sizeof(union enetc_rx_bd);
+ scctx->isc_txd_size[0] = sizeof(union enetc_tx_bd);
+ scctx->isc_rxd_size[0] = sizeof(union enetc_rx_bd);
+ scctx->isc_tx_csum_flags = 0;
+ scctx->isc_capabilities = scctx->isc_capenable = ENETC_IFCAPS;
+
+ error = enetc_mtu_set(ctx, ETHERMTU);
+ if (error != 0)
+ goto fail;
+
+ scctx->isc_msix_bar = pci_msix_table_bar(sc->dev);
+
+ error = enetc_setup_phy(sc);
+ if (error != 0)
+ goto fail;
+
+ enetc_get_hwaddr(sc);
+
+ return (0);
+fail:
+ enetc_detach(ctx);
+ return (error);
+}
+
+static int
+enetc_attach_post(if_ctx_t ctx)
+{
+
+ enetc_init_hw(iflib_get_softc(ctx));
+ return (0);
+}
+
+static int
+enetc_detach(if_ctx_t ctx)
+{
+ struct enetc_softc *sc;
+ int error = 0, i;
+
+ sc = iflib_get_softc(ctx);
+
+ for (i = 0; i < sc->rx_num_queues; i++)
+ iflib_irq_free(ctx, &sc->rx_queues[i].irq);
+
+ if (sc->miibus != NULL)
+ device_delete_child(sc->dev, sc->miibus);
+
+ if (sc->regs != NULL)
+ error = bus_release_resource(sc->dev, SYS_RES_MEMORY,
+ rman_get_rid(sc->regs), sc->regs);
+
+ if (sc->ctrl_queue.dma.idi_size != 0)
+ iflib_dma_free(&sc->ctrl_queue.dma);
+
+ return (error);
+}
+
+static int
+enetc_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
+ int ntxqs, int ntxqsets)
+{
+ struct enetc_softc *sc;
+ struct enetc_tx_queue *queue;
+ int i;
+
+ sc = iflib_get_softc(ctx);
+
+ MPASS(ntxqs == 1);
+
+ sc->tx_queues = mallocarray(sc->tx_num_queues,
+ sizeof(struct enetc_tx_queue), M_DEVBUF, M_NOWAIT | M_ZERO);
+ if (sc->tx_queues == NULL) {
+ device_printf(sc->dev,
+ "Failed to allocate memory for TX queues.\n");
+ return (ENOMEM);
+ }
+
+ for (i = 0; i < sc->tx_num_queues; i++) {
+ queue = &sc->tx_queues[i];
+ queue->sc = sc;
+ queue->ring = (union enetc_tx_bd*)(vaddrs[i]);
+ queue->ring_paddr = paddrs[i];
+ queue->next_to_clean = 0;
+ queue->ring_full = false;
+ }
+
+ return (0);
+}
+
+static int
+enetc_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
+ int nrxqs, int nrxqsets)
+{
+ struct enetc_softc *sc;
+ struct enetc_rx_queue *queue;
+ int i;
+
+ sc = iflib_get_softc(ctx);
+ MPASS(nrxqs == 1);
+
+ sc->rx_queues = mallocarray(sc->rx_num_queues,
+ sizeof(struct enetc_rx_queue), M_DEVBUF, M_NOWAIT | M_ZERO);
+ if (sc->rx_queues == NULL) {
+ device_printf(sc->dev,
+ "Failed to allocate memory for RX queues.\n");
+ return (ENOMEM);
+ }
+
+ for (i = 0; i < sc->rx_num_queues; i++) {
+ queue = &sc->rx_queues[i];
+ queue->sc = sc;
+ queue->qid = i;
+ queue->ring = (union enetc_rx_bd*)(vaddrs[i]);
+ queue->ring_paddr = paddrs[i];
+ }
+
+ return (0);
+}
+
+static void
+enetc_queues_free(if_ctx_t ctx)
+{
+ struct enetc_softc *sc;
+
+ sc = iflib_get_softc(ctx);
+
+ if (sc->tx_queues != NULL) {
+ free(sc->tx_queues, M_DEVBUF);
+ sc->tx_queues = NULL;
+ }
+ if (sc->rx_queues != NULL) {
+ free(sc->rx_queues, M_DEVBUF);
+ sc->rx_queues = NULL;
+ }
+}
+
+static void
+enetc_get_hwaddr(struct enetc_softc *sc)
+{
+ struct ether_addr hwaddr;
+ uint16_t high;
+ uint32_t low;
+
+ low = ENETC_PORT_RD4(sc, ENETC_PSIPMAR0(0));
+ high = ENETC_PORT_RD2(sc, ENETC_PSIPMAR1(0));
+
+ memcpy(&hwaddr.octet[0], &low, 4);
+ memcpy(&hwaddr.octet[4], &high, 2);
+
+ if (ETHER_IS_BROADCAST(hwaddr.octet) ||
+ ETHER_IS_MULTICAST(hwaddr.octet) ||
+ ETHER_IS_ZERO(hwaddr.octet)) {
+ ether_gen_addr(iflib_get_ifp(sc->ctx), &hwaddr);
+ device_printf(sc->dev,
+ "Failed to obtain MAC address, using a random one\n");
+ memcpy(&low, &hwaddr.octet[0], 4);
+ memcpy(&high, &hwaddr.octet[4], 2);
+ }
+
+ iflib_set_mac(sc->ctx, hwaddr.octet);
+}
+
+static void
+enetc_set_hwaddr(struct enetc_softc *sc)
+{
+ struct ifnet *ifp;
+ uint16_t high;
+ uint32_t low;
+ uint8_t *hwaddr;
+
+ ifp = iflib_get_ifp(sc->ctx);
+ hwaddr = (uint8_t*)if_getlladdr(ifp);
+ low = *((uint32_t*)hwaddr);
+ high = *((uint16_t*)(hwaddr+4));
+
+ ENETC_PORT_WR4(sc, ENETC_PSIPMAR0(0), low);
+ ENETC_PORT_WR2(sc, ENETC_PSIPMAR1(0), high);
+}
+
+static int
+enetc_setup_rss(struct enetc_softc *sc)
+{
+ struct iflib_dma_info dma;
+ int error, i, buckets_num = 0;
+ uint8_t *rss_table;
+ uint32_t reg;
+
+ reg = ENETC_RD4(sc, ENETC_SIPCAPR0);
+ if (reg & ENETC_SIPCAPR0_RSS) {
+ reg = ENETC_RD4(sc, ENETC_SIRSSCAPR);
+ buckets_num = ENETC_SIRSSCAPR_GET_NUM_RSS(reg);
+ }
+ if (buckets_num == 0)
+ return (ENOTSUP);
+
+ for (i = 0; i < ENETC_RSSHASH_KEY_SIZE / sizeof(uint32_t); i++) {
+ arc4rand((uint8_t *)&reg, sizeof(reg), 0);
+ ENETC_PORT_WR4(sc, ENETC_PRSSK(i), reg);
+ }
+
+ ENETC_WR4(sc, ENETC_SIRBGCR, sc->rx_num_queues);
+
+ error = iflib_dma_alloc_align(sc->ctx,
+ buckets_num * sizeof(*rss_table),
+ ENETC_RING_ALIGN,
+ &dma,
+ 0);
+ if (error != 0) {
+ device_printf(sc->dev, "Failed to allocate DMA buffer for RSS\n");
+ return (error);
+ }
+ rss_table = (uint8_t *)dma.idi_vaddr;
+
+ for (i = 0; i < buckets_num; i++)
+ rss_table[i] = i % sc->rx_num_queues;
+
+ error = enetc_ctrl_send(sc, (BDCR_CMD_RSS << 8) | BDCR_CMD_RSS_WRITE,
+ buckets_num * sizeof(*rss_table), &dma);
+ if (error != 0)
+ device_printf(sc->dev, "Failed to setup RSS table\n");
+
+ iflib_dma_free(&dma);
+
+ return (error);
+}
+
+static int
+enetc_ctrl_send(struct enetc_softc *sc, uint16_t cmd, uint16_t size,
+ iflib_dma_info_t dma)
+{
+ struct enetc_ctrl_queue *queue;
+ struct enetc_cbd *desc;
+ int timeout = 1000;
+
+ queue = &sc->ctrl_queue;
+ desc = &queue->ring[queue->pidx];
+
+ if (++queue->pidx == ENETC_MIN_DESC)
+ queue->pidx = 0;
+
+ desc->addr[0] = (uint32_t)dma->idi_paddr;
+ desc->addr[1] = (uint32_t)(dma->idi_paddr >> 32);
+ desc->index = 0;
+ desc->length = (uint16_t)size;
+ desc->cmd = (uint8_t)cmd;
+ desc->cls = (uint8_t)(cmd >> 8);
+ desc->status_flags = 0;
+
+ /* Sync command packet, */
+ bus_dmamap_sync(dma->idi_tag, dma->idi_map, BUS_DMASYNC_PREWRITE);
+ /* and the control ring. */
+ bus_dmamap_sync(queue->dma.idi_tag, queue->dma.idi_map, BUS_DMASYNC_PREWRITE);
+ ENETC_WR4(sc, ENETC_SICBDRPIR, queue->pidx);
+
+ while (--timeout != 0) {
+ DELAY(20);
+ if (ENETC_RD4(sc, ENETC_SICBDRCIR) == queue->pidx)
+ break;
+ }
+
+ if (timeout == 0)
+ return (ETIMEDOUT);
+
+ bus_dmamap_sync(dma->idi_tag, dma->idi_map, BUS_DMASYNC_POSTREAD);
+ return (0);
+}
+
+static void
+enetc_init_hw(struct enetc_softc *sc)
+{
+ uint32_t val;
+ int error;
+
+ ENETC_PORT_WR4(sc, ENETC_PM0_CMD_CFG,
+ ENETC_PM0_CMD_TXP | ENETC_PM0_PROMISC |
+ ENETC_PM0_TX_EN | ENETC_PM0_RX_EN);
+ ENETC_PORT_WR4(sc, ENETC_PM0_RX_FIFO, ENETC_PM0_RX_FIFO_VAL);
+ val = ENETC_PSICFGR0_SET_TXBDR(sc->tx_num_queues);
+ val |= ENETC_PSICFGR0_SET_RXBDR(sc->rx_num_queues);
+ val |= ENETC_PSICFGR0_SIVC(ENETC_VLAN_TYPE_C | ENETC_VLAN_TYPE_S);
+ ENETC_PORT_WR4(sc, ENETC_PSICFGR0(0), val);
+ ENETC_PORT_WR4(sc, ENETC_PSIPVMR, ENETC_PSIPVMR_SET_VUTA(1));
+ ENETC_PORT_WR4(sc, ENETC_PVCLCTR, ENETC_VLAN_TYPE_C | ENETC_VLAN_TYPE_S);
+ ENETC_PORT_WR4(sc, ENETC_PSIVLANFMR, ENETC_PSIVLANFMR_VS);
+ ENETC_PORT_WR4(sc, ENETC_PAR_PORT_CFG, ENETC_PAR_PORT_L4CD);
+ ENETC_PORT_WR4(sc, ENETC_PMR, ENETC_PMR_SI0EN | ENETC_PMR_PSPEED_1000M);
+
+ ENETC_WR4(sc, ENETC_SICAR0,
+ ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT);
+ ENETC_WR4(sc, ENETC_SICAR1, ENETC_SICAR_MSI);
+ ENETC_WR4(sc, ENETC_SICAR2,
+ ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT);
+
+ enetc_init_ctrl(sc);
+ error = enetc_setup_rss(sc);
+ if (error != 0)
+ ENETC_WR4(sc, ENETC_SIMR, ENETC_SIMR_EN);
+ else
+ ENETC_WR4(sc, ENETC_SIMR, ENETC_SIMR_EN | ENETC_SIMR_RSSE);
+
+}
+
+static void
+enetc_init_ctrl(struct enetc_softc *sc)
+{
+ struct enetc_ctrl_queue *queue = &sc->ctrl_queue;
+
+ ENETC_WR4(sc, ENETC_SICBDRBAR0,
+ (uint32_t)queue->dma.idi_paddr);
+ ENETC_WR4(sc, ENETC_SICBDRBAR1,
+ (uint32_t)(queue->dma.idi_paddr >> 32));
+ ENETC_WR4(sc, ENETC_SICBDRLENR,
+ queue->dma.idi_size / sizeof(struct enetc_cbd));
+
+ queue->pidx = 0;
+ ENETC_WR4(sc, ENETC_SICBDRPIR, queue->pidx);
+ ENETC_WR4(sc, ENETC_SICBDRCIR, queue->pidx);
+ ENETC_WR4(sc, ENETC_SICBDRMR, ENETC_SICBDRMR_EN);
+}
+
+static void
+enetc_init_tx(struct enetc_softc *sc)
+{
+ struct enetc_tx_queue *queue;
+ int i;
+
+ for (i = 0; i < sc->tx_num_queues; i++) {
+ queue = &sc->tx_queues[i];
+
+ ENETC_TXQ_WR4(sc, i, ENETC_TBBAR0,
+ (uint32_t)queue->ring_paddr);
+ ENETC_TXQ_WR4(sc, i, ENETC_TBBAR1,
+ (uint32_t)(queue->ring_paddr >> 32));
+ ENETC_TXQ_WR4(sc, i, ENETC_TBLENR, sc->tx_queue_size);
+
+ /*
+ * Even though it is undoccumented resetting the TX ring
+ * indices results in TX hang.
+ * Do the same as Linux and simply keep those unchanged
+ * for the drivers lifetime.
+ */
+#if 0
+ ENETC_TXQ_WR4(sc, i, ENETC_TBPIR, 0);
+ ENETC_TXQ_WR4(sc, i, ENETC_TBCIR, 0);
+#endif
+ ENETC_TXQ_WR4(sc, i, ENETC_TBMR, ENETC_TBMR_EN);
+ }
+
+}
+
+static void
+enetc_init_rx(struct enetc_softc *sc)
+{
+ struct enetc_rx_queue *queue;
+ uint32_t rx_buf_size;
+ int i;
+
+ rx_buf_size = iflib_get_rx_mbuf_sz(sc->ctx);
+
+ for (i = 0; i < sc->rx_num_queues; i++) {
+ queue = &sc->rx_queues[i];
+
+ ENETC_RXQ_WR4(sc, i, ENETC_RBBAR0,
+ (uint32_t)queue->ring_paddr);
+ ENETC_RXQ_WR4(sc, i, ENETC_RBBAR1,
+ (uint32_t)(queue->ring_paddr >> 32));
+ ENETC_RXQ_WR4(sc, i, ENETC_RBLENR, sc->rx_queue_size);
+ ENETC_RXQ_WR4(sc, i, ENETC_RBBSR, rx_buf_size);
+ ENETC_RXQ_WR4(sc, i, ENETC_RBPIR, 0);
+ ENETC_RXQ_WR4(sc, i, ENETC_RBCIR, 0);
+ queue->enabled = false;
+ }
+}
+
+static u_int
+enetc_hash_mac(void *arg, struct sockaddr_dl *sdl, u_int cnt)
+{
+ uint64_t *bitmap = arg;
+ uint64_t address = 0;
+ uint8_t hash = 0;
+ bool bit;
+ int i, j;
+
+ bcopy(LLADDR(sdl), &address, ETHER_ADDR_LEN);
+
+ /*
+ * The six bit hash is calculated by xoring every
+ * 6th bit of the address.
+ * It is then used as an index in a bitmap that is
+ * written to the device.
+ */
+ for (i = 0; i < 6; i++) {
+ bit = 0;
+ for (j = 0; j < 8; j++)
+ bit ^= address & BIT(i + j*6);
+
+ hash |= bit << i;
+ }
+
+ *bitmap |= (1 << hash);
+ return (1);
+}
+
+static void
+enetc_setup_multicast(if_ctx_t ctx)
+{
+ struct enetc_softc *sc;
+ struct ifnet *ifp;
+ uint64_t bitmap = 0;
+ uint8_t revid;
+
+ sc = iflib_get_softc(ctx);
+ ifp = iflib_get_ifp(ctx);
+ revid = pci_get_revid(sc->dev);
+
+ if_foreach_llmaddr(ifp, enetc_hash_mac, &bitmap);
+
+ /*
+ * In revid 1 of this chip the positions multicast and unicast
+ * hash filter registers are flipped.
+ */
+ ENETC_PORT_WR4(sc, ENETC_PSIMMHFR0(0, revid == 1), bitmap & UINT32_MAX);
+ ENETC_PORT_WR4(sc, ENETC_PSIMMHFR1(0), bitmap >> 32);
+
+}
+
+static uint8_t
+enetc_hash_vid(uint16_t vid)
+{
+ uint8_t hash = 0;
+ bool bit;
+ int i;
+
+ for (i = 0;i < 6;i++) {
+ bit = vid & BIT(i);
+ bit ^= vid & BIT(i + 6);
+ hash |= bit << i;
+ }
+
+ return (hash);
+}
+
+static void
+enetc_vlan_register(if_ctx_t ctx, uint16_t vid)
+{
+ struct enetc_softc *sc;
+ uint8_t hash;
+ uint64_t bitmap;
+
+ sc = iflib_get_softc(ctx);
+ hash = enetc_hash_vid(vid);
+
+ /* Check if hash is alredy present in the bitmap. */
+ if (++sc->vlan_bitmap[hash] != 1)
+ return;
+
+ bitmap = ENETC_PORT_RD4(sc, ENETC_PSIVHFR0(0));
+ bitmap |= (uint64_t)ENETC_PORT_RD4(sc, ENETC_PSIVHFR1(0)) << 32;
+ bitmap |= BIT(hash);
+ ENETC_PORT_WR4(sc, ENETC_PSIVHFR0(0), bitmap & UINT32_MAX);
+ ENETC_PORT_WR4(sc, ENETC_PSIVHFR1(0), bitmap >> 32);
+}
+
+static void
+enetc_vlan_unregister(if_ctx_t ctx, uint16_t vid)
+{
+ struct enetc_softc *sc;
+ uint8_t hash;
+ uint64_t bitmap;
+
+ sc = iflib_get_softc(ctx);
+ hash = enetc_hash_vid(vid);
+
+ MPASS(sc->vlan_bitmap[hash] > 0);
+ if (--sc->vlan_bitmap[hash] != 0)
+ return;
+
+ bitmap = ENETC_PORT_RD4(sc, ENETC_PSIVHFR0(0));
+ bitmap |= (uint64_t)ENETC_PORT_RD4(sc, ENETC_PSIVHFR1(0)) << 32;
+ bitmap &= ~BIT(hash);
+ ENETC_PORT_WR4(sc, ENETC_PSIVHFR0(0), bitmap & UINT32_MAX);
+ ENETC_PORT_WR4(sc, ENETC_PSIVHFR1(0), bitmap >> 32);
+}
+
+static void
+enetc_init(if_ctx_t ctx)
+{
+ struct enetc_softc *sc;
+ struct mii_data *miid;
+ struct ifnet *ifp;
+ uint16_t max_frame_length;
+ int baudrate;
+
+ sc = iflib_get_softc(ctx);
+ ifp = iflib_get_ifp(ctx);
+
+ max_frame_length = sc->shared->isc_max_frame_size;
+ MPASS(max_frame_length < ENETC_MAX_FRAME_LEN);
+
+ /* Set max RX and TX frame lengths. */
+ ENETC_PORT_WR4(sc, ENETC_PM0_MAXFRM, max_frame_length);
+ ENETC_PORT_WR4(sc, ENETC_PTCMSDUR(0), max_frame_length);
+ ENETC_PORT_WR4(sc, ENETC_PTXMBAR, 2 * max_frame_length);
+
+ /* Set "VLAN promiscious" mode if filtering is disabled. */
+ if ((if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) == 0)
+ ENETC_PORT_WR4(sc, ENETC_PSIPVMR,
+ ENETC_PSIPVMR_SET_VUTA(1) | ENETC_PSIPVMR_SET_VP(1));
+ else
+ ENETC_PORT_WR4(sc, ENETC_PSIPVMR,
+ ENETC_PSIPVMR_SET_VUTA(1));
+
+ sc->rbmr = ENETC_RBMR_EN | ENETC_RBMR_AL;
+
+ if (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING)
+ sc->rbmr |= ENETC_RBMR_VTE;
+
+ /* Write MAC address to hardware. */
+ enetc_set_hwaddr(sc);
+
+ enetc_init_tx(sc);
+ enetc_init_rx(sc);
+
+ if (sc->fixed_link) {
+ baudrate = ifmedia_baudrate(sc->fixed_ifmedia.ifm_cur->ifm_media);
+ iflib_link_state_change(sc->ctx, LINK_STATE_UP, baudrate);
+ } else {
+ /*
+ * Can't return an error from this function, there is not much
+ * we can do if this fails.
+ */
+ miid = device_get_softc(sc->miibus);
+ (void)mii_mediachg(miid);
+ }
+
+ enetc_promisc_set(ctx, if_getflags(ifp));
+}
+
+static void
+enetc_stop(if_ctx_t ctx)
+{
+ struct enetc_softc *sc;
+ int i;
+
+ sc = iflib_get_softc(ctx);
+
+ for (i = 0; i < sc->tx_num_queues; i++)
+ ENETC_TXQ_WR4(sc, i, ENETC_TBMR, 0);
+
+ for (i = 0; i < sc->rx_num_queues; i++)
+ ENETC_RXQ_WR4(sc, i, ENETC_RBMR, 0);
+}
+
+static int
+enetc_msix_intr_assign(if_ctx_t ctx, int msix)
+{
+ struct enetc_softc *sc;
+ struct enetc_rx_queue *rx_queue;
+ struct enetc_tx_queue *tx_queue;
+ int vector = 0, i, error;
+ char irq_name[16];
+
+ sc = iflib_get_softc(ctx);
+
+ MPASS(sc->rx_num_queues + 1 <= ENETC_MSIX_COUNT);
+ MPASS(sc->rx_num_queues == sc->tx_num_queues);
+
+ for (i = 0; i < sc->rx_num_queues; i++, vector++) {
+ rx_queue = &sc->rx_queues[i];
+ snprintf(irq_name, sizeof(irq_name), "rxtxq%d", i);
+ error = iflib_irq_alloc_generic(ctx,
+ &rx_queue->irq, vector + 1, IFLIB_INTR_RXTX,
+ NULL, rx_queue, i, irq_name);
+ if (error != 0)
+ goto fail;
+
+ ENETC_WR4(sc, ENETC_SIMSIRRV(i), vector);
+ ENETC_RXQ_WR4(sc, i, ENETC_RBICR1, ENETC_RX_INTR_TIME_THR);
+ ENETC_RXQ_WR4(sc, i, ENETC_RBICR0,
+ ENETC_RBICR0_ICEN | ENETC_RBICR0_SET_ICPT(ENETC_RX_INTR_PKT_THR));
+ }
+ vector = 0;
+ for (i = 0;i < sc->tx_num_queues; i++, vector++) {
+ tx_queue = &sc->tx_queues[i];
+ snprintf(irq_name, sizeof(irq_name), "txq%d", i);
+ iflib_softirq_alloc_generic(ctx, &tx_queue->irq,
+ IFLIB_INTR_TX, tx_queue, i, irq_name);
+
+ ENETC_WR4(sc, ENETC_SIMSITRV(i), vector);
+ }
+
+ return (0);
+fail:
+ for (i = 0; i < sc->rx_num_queues; i++) {
+ rx_queue = &sc->rx_queues[i];
+ iflib_irq_free(ctx, &rx_queue->irq);
+ }
+ return (error);
+}
+
+static int
+enetc_tx_queue_intr_enable(if_ctx_t ctx, uint16_t qid)
+{
+ struct enetc_softc *sc;
+
+ sc = iflib_get_softc(ctx);
+ ENETC_TXQ_RD4(sc, qid, ENETC_TBIDR);
+ return (0);
+}
+
+static int
+enetc_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid)
+{
+ struct enetc_softc *sc;
+
+ sc = iflib_get_softc(ctx);
+ ENETC_RXQ_RD4(sc, qid, ENETC_RBIDR);
+ return (0);
+}
+static void
+enetc_intr_enable(if_ctx_t ctx)
+{
+ struct enetc_softc *sc;
+ int i;
+
+ sc = iflib_get_softc(ctx);
+
+ for (i = 0; i < sc->rx_num_queues; i++)
+ ENETC_RXQ_WR4(sc, i, ENETC_RBIER, ENETC_RBIER_RXTIE);
+
+ for (i = 0; i < sc->tx_num_queues; i++)
+ ENETC_TXQ_WR4(sc, i, ENETC_TBIER, ENETC_TBIER_TXF);
+}
+
+static void
+enetc_intr_disable(if_ctx_t ctx)
+{
+ struct enetc_softc *sc;
+ int i;
+
+ sc = iflib_get_softc(ctx);
+
+ for (i = 0; i < sc->rx_num_queues; i++)
+ ENETC_RXQ_WR4(sc, i, ENETC_RBIER, 0);
+
+ for (i = 0; i < sc->tx_num_queues; i++)
+ ENETC_TXQ_WR4(sc, i, ENETC_TBIER, 0);
+}
+
+static int
+enetc_isc_txd_encap(void *data, if_pkt_info_t ipi)
+{
+ struct enetc_softc *sc = data;
+ struct enetc_tx_queue *queue;
+ union enetc_tx_bd *desc;
+ bus_dma_segment_t *segs;
+ qidx_t pidx, queue_len;
+ qidx_t i = 0;
+
+ queue = &sc->tx_queues[ipi->ipi_qsidx];
+ segs = ipi->ipi_segs;
+ pidx = ipi->ipi_pidx;
+ queue_len = sc->tx_queue_size;
+
+ /*
+ * First descriptor is special. We use it to set frame
+ * related information and offloads, e.g. VLAN tag.
+ */
+ desc = &queue->ring[pidx];
+ bzero(desc, sizeof(*desc));
+ desc->frm_len = ipi->ipi_len;
+ desc->addr = segs[i].ds_addr;
+ desc->buf_len = segs[i].ds_len;
+ if (ipi->ipi_flags & IPI_TX_INTR)
+ desc->flags = ENETC_TXBD_FLAGS_FI;
+
+ i++;
+ if (++pidx == queue_len)
+ pidx = 0;
+
+ if (ipi->ipi_mflags & M_VLANTAG) {
+ /* VLAN tag is inserted in a separate descriptor. */
+ desc->flags |= ENETC_TXBD_FLAGS_EX;
+ desc = &queue->ring[pidx];
+ bzero(desc, sizeof(*desc));
+ desc->ext.vid = ipi->ipi_vtag;
+ desc->ext.e_flags = ENETC_TXBD_E_FLAGS_VLAN_INS;
+ if (++pidx == queue_len)
+ pidx = 0;
+ }
+
+ /* Now add remaining descriptors. */
+ for (;i < ipi->ipi_nsegs; i++) {
+ desc = &queue->ring[pidx];
+ bzero(desc, sizeof(*desc));
+ desc->addr = segs[i].ds_addr;
+ desc->buf_len = segs[i].ds_len;
+
+ if (++pidx == queue_len)
+ pidx = 0;
+ }
+
+ desc->flags |= ENETC_TXBD_FLAGS_F;
+ ipi->ipi_new_pidx = pidx;
+ if (pidx == queue->next_to_clean)
+ queue->ring_full = true;
+
+ return (0);
+}
+
+static void
+enetc_isc_txd_flush(void *data, uint16_t qid, qidx_t pidx)
+{
+ struct enetc_softc *sc = data;
+
+ ENETC_TXQ_WR4(sc, qid, ENETC_TBPIR, pidx);
+}
+
+static int
+enetc_isc_txd_credits_update(void *data, uint16_t qid, bool clear)
+{
+ struct enetc_softc *sc = data;
+ struct enetc_tx_queue *queue;
+ qidx_t next_to_clean, next_to_process;
+ int clean_count;
+
+ queue = &sc->tx_queues[qid];
+ next_to_process =
+ ENETC_TXQ_RD4(sc, qid, ENETC_TBCIR) & ENETC_TBCIR_IDX_MASK;
+ next_to_clean = queue->next_to_clean;
+
+ if (next_to_clean == next_to_process && !queue->ring_full)
+ return (0);
+
+ if (!clear)
+ return (1);
+
+ clean_count = next_to_process - next_to_clean;
+ if (clean_count <= 0)
+ clean_count += sc->tx_queue_size;
+
+ queue->next_to_clean = next_to_process;
+ queue->ring_full = false;
+
+ return (clean_count);
+}
+
+static int
+enetc_isc_rxd_available(void *data, uint16_t qid, qidx_t pidx, qidx_t budget)
+{
+ struct enetc_softc *sc = data;
+ struct enetc_rx_queue *queue;
+ qidx_t hw_pidx, queue_len;
+ union enetc_rx_bd *desc;
+ int count = 0;
+
+ queue = &sc->rx_queues[qid];
+ desc = &queue->ring[pidx];
+ queue_len = sc->rx_queue_size;
+
+ if (desc->r.lstatus == 0)
+ return (0);
+
+ if (budget == 1)
+ return (1);
+
+ hw_pidx = ENETC_RXQ_RD4(sc, qid, ENETC_RBPIR);
+ while (pidx != hw_pidx && count < budget) {
+ desc = &queue->ring[pidx];
+ if (desc->r.lstatus & ENETC_RXBD_LSTATUS_F)
+ count++;
+
+ if (++pidx == queue_len)
+ pidx = 0;
+ }
+
+ return (count);
+}
+
+static int
+enetc_isc_rxd_pkt_get(void *data, if_rxd_info_t ri)
+{
+ struct enetc_softc *sc = data;
+ struct enetc_rx_queue *queue;
+ union enetc_rx_bd *desc;
+ uint16_t buf_len, pkt_size = 0;
+ qidx_t cidx, queue_len;
+ uint32_t status;
+ int i;
+
+ cidx = ri->iri_cidx;
+ queue = &sc->rx_queues[ri->iri_qsidx];
+ desc = &queue->ring[cidx];
+ status = desc->r.lstatus;
+ queue_len = sc->rx_queue_size;
+
+ /*
+ * Ready bit will be set only when all descriptors
+ * in the chain have been processed.
+ */
+ if ((status & ENETC_RXBD_LSTATUS_R) == 0)
+ return (EAGAIN);
+
+ /* Pass RSS hash. */
+ if (status & ENETC_RXBD_FLAG_RSSV) {
+ ri->iri_flowid = desc->r.rss_hash;
+ ri->iri_rsstype = M_HASHTYPE_OPAQUE_HASH;
+ }
+
+ /* Pass IP checksum status. */
+ ri->iri_csum_flags = CSUM_IP_CHECKED;
+ if ((desc->r.parse_summary & ENETC_RXBD_PARSER_ERROR) == 0)
+ ri->iri_csum_flags |= CSUM_IP_VALID;
+
+ /* Pass extracted VLAN tag. */
+ if (status & ENETC_RXBD_FLAG_VLAN) {
+ ri->iri_vtag = desc->r.vlan_opt;
+ ri->iri_flags = M_VLANTAG;
+ }
+
+ for (i = 0; i < ENETC_MAX_SCATTER; i++) {
+ buf_len = desc->r.buf_len;
+ ri->iri_frags[i].irf_idx = cidx;
+ ri->iri_frags[i].irf_len = buf_len;
+ pkt_size += buf_len;
+ if (desc->r.lstatus & ENETC_RXBD_LSTATUS_F)
+ break;
+
+ if (++cidx == queue_len)
+ cidx = 0;
+
+ desc = &queue->ring[cidx];
+ }
+ ri->iri_nfrags = i + 1;
+ ri->iri_len = pkt_size + ENETC_RX_IP_ALIGN;
+ ri->iri_pad = ENETC_RX_IP_ALIGN;
+
+ MPASS(desc->r.lstatus & ENETC_RXBD_LSTATUS_F);
+ if (status & ENETC_RXBD_LSTATUS(ENETC_RXBD_ERR_MASK))
+ return (EBADMSG);
+
+ return (0);
+}
+
+static void
+enetc_isc_rxd_refill(void *data, if_rxd_update_t iru)
+{
+ struct enetc_softc *sc = data;
+ struct enetc_rx_queue *queue;
+ union enetc_rx_bd *desc;
+ qidx_t pidx, queue_len;
+ uint64_t *paddrs;
+ int i, count;
+
+ queue = &sc->rx_queues[iru->iru_qsidx];
+ paddrs = iru->iru_paddrs;
+ pidx = iru->iru_pidx;
+ count = iru->iru_count;
+ queue_len = sc->rx_queue_size;
+
+ for (i = 0; i < count; i++) {
+ desc = &queue->ring[pidx];
+ bzero(desc, sizeof(*desc));
+
+ desc->w.addr = paddrs[i];
+ if (++pidx == queue_len)
+ pidx = 0;
+ }
+ /*
+ * After enabling the queue NIC will prefetch the first
+ * 8 descriptors. It probably assumes that the RX is fully
+ * refilled when cidx == pidx.
+ * Enable it only if we have enough decriptors ready on the ring.
+ */
+ if (!queue->enabled && pidx >= 8) {
+ ENETC_RXQ_WR4(sc, iru->iru_qsidx, ENETC_RBMR, sc->rbmr);
+ queue->enabled = true;
+ }
+}
+
+static void
+enetc_isc_rxd_flush(void *data, uint16_t qid, uint8_t flid, qidx_t pidx)
+{
+ struct enetc_softc *sc = data;
+
+ ENETC_RXQ_WR4(sc, qid, ENETC_RBCIR, pidx);
+}
+
+static uint64_t
+enetc_get_counter(if_ctx_t ctx, ift_counter cnt)
+{
+ struct enetc_softc *sc;
+ struct ifnet *ifp;
+
+ sc = iflib_get_softc(ctx);
+ ifp = iflib_get_ifp(ctx);
+
+ switch (cnt) {
+ case IFCOUNTER_IERRORS:
+ return (ENETC_PORT_RD8(sc, ENETC_PM0_RERR));
+ case IFCOUNTER_OERRORS:
+ return (ENETC_PORT_RD8(sc, ENETC_PM0_TERR));
+ default:
+ return (if_get_counter_default(ifp, cnt));
+ }
+}
+
+static int
+enetc_mtu_set(if_ctx_t ctx, uint32_t mtu)
+{
+ struct enetc_softc *sc = iflib_get_softc(ctx);
+ uint32_t max_frame_size;
+
+ max_frame_size = mtu +
+ ETHER_HDR_LEN +
+ ETHER_CRC_LEN +
+ sizeof(struct ether_vlan_header);
+
+ if (max_frame_size > ENETC_MAX_FRAME_LEN)
+ return (EINVAL);
+
+ sc->shared->isc_max_frame_size = max_frame_size;
+
+ return (0);
+}
+
+static int
+enetc_promisc_set(if_ctx_t ctx, int flags)
+{
+ struct enetc_softc *sc;
+ uint32_t reg = 0;
+
+ sc = iflib_get_softc(ctx);
+
+ if (flags & IFF_PROMISC)
+ reg = ENETC_PSIPMR_SET_UP(0) | ENETC_PSIPMR_SET_MP(0);
+ else if (flags & IFF_ALLMULTI)
+ reg = ENETC_PSIPMR_SET_MP(0);
+
+ ENETC_PORT_WR4(sc, ENETC_PSIPMR, reg);
+
+ return (0);
+}
+
+static void
+enetc_timer(if_ctx_t ctx, uint16_t qid)
+{
+ /*
+ * Poll PHY status. Do this only for qid 0 to save
+ * some cycles.
+ */
+ if (qid == 0)
+ iflib_admin_intr_deferred(ctx);
+}
+
+static void
+enetc_update_admin_status(if_ctx_t ctx)
+{
+ struct enetc_softc *sc;
+ struct mii_data *miid;
+
+ sc = iflib_get_softc(ctx);
+
+ if (!sc->fixed_link) {
+ miid = device_get_softc(sc->miibus);
+ mii_tick(miid);
+ }
+}
+
+static int
+enetc_miibus_readreg(device_t dev, int phy, int reg)
+{
+ struct enetc_softc *sc;
+
+ sc = iflib_get_softc(device_get_softc(dev));
+ return (enetc_mdio_read(sc->regs, ENETC_PORT_BASE + ENETC_EMDIO_BASE,
+ phy, reg));
+}
+
+static int
+enetc_miibus_writereg(device_t dev, int phy, int reg, int data)
+{
+ struct enetc_softc *sc;
+
+ sc = iflib_get_softc(device_get_softc(dev));
+ return (enetc_mdio_write(sc->regs, ENETC_PORT_BASE + ENETC_EMDIO_BASE,
+ phy, reg, data));
+}
+
+static void
+enetc_miibus_linkchg(device_t dev)
+{
+
+ enetc_miibus_statchg(dev);
+}
+
+static void
+enetc_miibus_statchg(device_t dev)
+{
+ struct enetc_softc *sc;
+ struct mii_data *miid;
+ int link_state, baudrate;
+
+ sc = iflib_get_softc(device_get_softc(dev));
+ miid = device_get_softc(sc->miibus);
+
+ baudrate = ifmedia_baudrate(miid->mii_media_active);
+ if (miid->mii_media_status & IFM_AVALID) {
+ if (miid->mii_media_status & IFM_ACTIVE)
+ link_state = LINK_STATE_UP;
+ else
+ link_state = LINK_STATE_DOWN;
+ } else {
+ link_state = LINK_STATE_UNKNOWN;
+ }
+
+ iflib_link_state_change(sc->ctx, link_state, baudrate);
+
+}
+
+static int
+enetc_media_change(if_t ifp)
+{
+ struct enetc_softc *sc;
+ struct mii_data *miid;
+
+ sc = iflib_get_softc(ifp->if_softc);
+ miid = device_get_softc(sc->miibus);
+
+ mii_mediachg(miid);
+ return (0);
+}
+
+static void
+enetc_media_status(if_t ifp, struct ifmediareq* ifmr)
+{
+ struct enetc_softc *sc;
+ struct mii_data *miid;
+
+ sc = iflib_get_softc(ifp->if_softc);
+ miid = device_get_softc(sc->miibus);
+
+ mii_pollstat(miid);
+
+ ifmr->ifm_active = miid->mii_media_active;
+ ifmr->ifm_status = miid->mii_media_status;
+}
+
+static int
+enetc_fixed_media_change(if_t ifp)
+{
+
+ if_printf(ifp, "Can't change media in fixed-link mode.\n");
+ return (0);
+}
+static void
+enetc_fixed_media_status(if_t ifp, struct ifmediareq* ifmr)
+{
+ struct enetc_softc *sc;
+
+ sc = iflib_get_softc(ifp->if_softc);
+
+ ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
+ ifmr->ifm_active = sc->fixed_ifmedia.ifm_cur->ifm_media;
+ return;
+}

File Metadata

Mime Type
text/plain
Expires
Sun, Jan 12, 11:52 AM (21 h, 11 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
15769060
Default Alt Text
D30729.diff (66 KB)

Event Timeline