Page Menu
Home
FreeBSD
Search
Configure Global Search
Log In
Files
F107532521
D32321.diff
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Flag For Later
Award Token
Size
45 KB
Referenced Files
None
Subscribers
None
D32321.diff
View Options
diff --git a/sys/arm64/arm64/cmn600.c b/sys/arm64/arm64/cmn600.c
new file mode 100644
--- /dev/null
+++ b/sys/arm64/arm64/cmn600.c
@@ -0,0 +1,834 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2021 ARM Ltd
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/* Arm CoreLink CMN-600 Coherent Mesh Network Driver */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "opt_acpi.h"
+
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+#include <sys/proc.h>
+#include <sys/rman.h>
+#include <sys/smp.h>
+#include <sys/sysctl.h>
+
+#include <machine/bus.h>
+#include <machine/cpu.h>
+
+#include <contrib/dev/acpica/include/acpi.h>
+#include <dev/acpica/acpivar.h>
+
+#include <machine/cmn600_reg.h>
+
+#define RD4(sc, r) bus_read_4((sc)->sc_res[0], (r))
+#define RD8(sc, r) bus_read_8((sc)->sc_res[0], (r))
+#define WR4(sc, r, v) bus_write_4((sc)->sc_res[0], (r), (v))
+#define WR8(sc, r, v) bus_write_8((sc)->sc_res[0], (r), (v))
+#define FLD(v, n) (((v) & n ## _MASK) >> n ## _SHIFT)
+
+static char *cmn600_ids[] = {
+ "ARMHC600",
+ NULL
+};
+
+static struct resource_spec cmn600_res_spec[] = {
+ { SYS_RES_MEMORY, 0, RF_ACTIVE },
+ { SYS_RES_MEMORY, 1, RF_ACTIVE | RF_UNMAPPED | RF_OPTIONAL },
+ { SYS_RES_IRQ, 0, RF_ACTIVE },
+ { -1, 0 }
+};
+
+struct cmn600_node;
+
+typedef uint64_t (*nd_read_8_t)(struct cmn600_node *, uint32_t);
+typedef uint32_t (*nd_read_4_t)(struct cmn600_node *, uint32_t);
+typedef void (*nd_write_8_t)(struct cmn600_node *, uint32_t, uint64_t);
+typedef void (*nd_write_4_t)(struct cmn600_node *, uint32_t, uint32_t);
+
+struct cmn600_node {
+ struct cmn600_softc *sc;
+ off_t nd_offset;
+ int nd_type;
+ uint16_t nd_id;
+ uint16_t nd_logical_id;
+ uint8_t nd_x, nd_y, nd_port, nd_sub;
+ uint16_t nd_child_count;
+ uint32_t nd_paired;
+ struct cmn600_node *nd_parent;
+ nd_read_8_t nd_read8;
+ nd_read_4_t nd_read4;
+ nd_write_8_t nd_write8;
+ nd_write_4_t nd_write4;
+ struct cmn600_node **nd_children;
+};
+
+struct cmn600_softc {
+ device_t sc_dev;
+ int sc_unit;
+ int sc_domain;
+ int sc_longid;
+ int sc_mesh_x;
+ int sc_mesh_y;
+ struct resource *sc_res[3];
+ void *sc_ih;
+ int sc_r2;
+ int sc_rev;
+ struct cmn600_node *sc_rootnode;
+ struct cmn600_node *sc_dtcnode;
+ struct cmn600_node *sc_dvmnode;
+ struct cmn600_node *sc_xpnodes[64];
+ int (*sc_pmu_ih)(struct trapframe *tf, int unit, int i);
+};
+
+static struct cmn600_pmc cmn600_pmcs[CMN600_UNIT_MAX];
+static int cmn600_npmcs = 0;
+
+static int cmn600_acpi_detach(device_t dev);
+static int cmn600_intr(void *arg);
+
+static void
+cmn600_pmc_register(int unit, void *arg, int domain)
+{
+
+ if (unit >= CMN600_UNIT_MAX) {
+ /* TODO */
+ return;
+ }
+
+ cmn600_pmcs[unit].arg = arg;
+ cmn600_pmcs[unit].domain = domain;
+ cmn600_npmcs++;
+}
+
+static void
+cmn600_pmc_unregister(int unit)
+{
+
+ cmn600_pmcs[unit].arg = NULL;
+ cmn600_npmcs--;
+}
+
+int
+cmn600_pmc_nunits(void)
+{
+
+ return (cmn600_npmcs);
+}
+
+int
+cmn600_pmc_getunit(int unit, void **arg, int *domain)
+{
+
+ if (unit >= cmn600_npmcs)
+ return (EINVAL);
+ if (cmn600_pmcs[unit].arg == NULL)
+ return (EINVAL);
+ *arg = cmn600_pmcs[unit].arg;
+ *domain = cmn600_pmcs[unit].domain;
+ return (0);
+}
+
+int
+pmu_cmn600_rev(void *arg)
+{
+ struct cmn600_softc *sc;
+
+ sc = (struct cmn600_softc *)arg;
+ switch (sc->sc_rev) {
+ case 0x0:
+ return (0x100);
+ case 0x1:
+ return (0x101);
+ case 0x2:
+ return (0x102);
+ case 0x3:
+ return (0x103);
+ case 0x4:
+ return (0x200);
+ case 0x5:
+ return (0x300);
+ case 0x6:
+ return (0x301);
+ }
+ return (0x302); /* Unknown revision. */
+}
+
+static uint64_t
+cmn600_node_read8(struct cmn600_node *nd, uint32_t reg)
+{
+
+ return (RD8(nd->sc, nd->nd_offset + reg));
+}
+
+static void
+cmn600_node_write8(struct cmn600_node *nd, uint32_t reg, uint64_t val)
+{
+
+ WR8(nd->sc, nd->nd_offset + reg, val);
+}
+
+static uint32_t
+cmn600_node_read4(struct cmn600_node *nd, uint32_t reg)
+{
+
+ return (RD4(nd->sc, nd->nd_offset + reg));
+}
+
+static void
+cmn600_node_write4(struct cmn600_node *nd, uint32_t reg, uint32_t val)
+{
+
+ WR4(nd->sc, nd->nd_offset + reg, val);
+}
+
+static const char *
+cmn600_node_type_str(int type)
+{
+
+#define NAME_OF(t, n) case NODE_TYPE_ ## t: return n
+ switch (type) {
+ NAME_OF(INVALID, "<invalid node>");
+ NAME_OF(DVM, "DVM");
+ NAME_OF(CFG, "CFG");
+ NAME_OF(DTC, "DTC");
+ NAME_OF(HN_I, "HN-I");
+ NAME_OF(HN_F, "HN-F");
+ NAME_OF(XP, "XP");
+ NAME_OF(SBSX, "SBSX");
+ NAME_OF(RN_I, "RN-I");
+ NAME_OF(RN_D, "RN-D");
+ NAME_OF(RN_SAM, "RN-SAM");
+ NAME_OF(CXRA, "CXRA");
+ NAME_OF(CXHA, "CXHA");
+ NAME_OF(CXLA, "CXLA");
+ default:
+ return "<unknown node>";
+ }
+#undef NAME_OF
+}
+
+static const char *
+cmn600_xpport_dev_type_str(uint8_t type)
+{
+
+#define NAME_OF(t, n) case POR_MXP_PX_INFO_DEV_TYPE_ ## t: return n
+ switch (type) {
+ NAME_OF(RN_I, "RN-I");
+ NAME_OF(RN_D, "RN-D");
+ NAME_OF(RN_F_CHIB, "RN-F CHIB");
+ NAME_OF(RN_F_CHIB_ESAM, "RN-F CHIB ESAM");
+ NAME_OF(RN_F_CHIA, "RN-F CHIA");
+ NAME_OF(RN_F_CHIA_ESAM, "RN-F CHIA ESAM");
+ NAME_OF(HN_T, "HN-T");
+ NAME_OF(HN_I, "HN-I");
+ NAME_OF(HN_D, "HN-D");
+ NAME_OF(SN_F, "SN-F");
+ NAME_OF(SBSX, "SBSX");
+ NAME_OF(HN_F, "HN-F");
+ NAME_OF(CXHA, "CXHA");
+ NAME_OF(CXRA, "CXRA");
+ NAME_OF(CXRH, "CXRH");
+ default:
+ return "<unknown>";
+ }
+#undef NAME_OF
+}
+
+static void
+cmn600_dump_node(struct cmn600_node *node, int lvl)
+{
+ int i;
+
+ for (i = 0; i < lvl; i++) printf(" ");
+ printf("%s [%dx%d:%d:%d] id: 0x%x @0x%lx Logical Id: 0x%x",
+ cmn600_node_type_str(node->nd_type), node->nd_x, node->nd_y,
+ node->nd_port, node->nd_sub, node->nd_id, node->nd_offset,
+ node->nd_logical_id);
+ if (node->nd_child_count > 0)
+ printf(", Children: %d", node->nd_child_count);
+ printf("\n");
+ if (node->nd_type == NODE_TYPE_XP)
+ printf("\tPort 0: %s\n\tPort 1: %s\n",
+ cmn600_xpport_dev_type_str(node->nd_read4(node,
+ POR_MXP_P0_INFO) & 0x1f),
+ cmn600_xpport_dev_type_str(node->nd_read4(node,
+ POR_MXP_P1_INFO) & 0x1f));
+}
+
+static void
+cmn600_dump_node_recursive(struct cmn600_node *node, int lvl)
+{
+ int i;
+
+ cmn600_dump_node(node, lvl);
+ for (i = 0; i < node->nd_child_count; i++) {
+ cmn600_dump_node_recursive(node->nd_children[i], lvl + 1);
+ }
+}
+
+static void
+cmn600_dump_nodes_tree(struct cmn600_softc *sc)
+{
+
+ device_printf(sc->sc_dev, " nodes:\n");
+ cmn600_dump_node_recursive(sc->sc_rootnode, 0);
+}
+
+static int
+cmn600_sysctl_dump_nodes(SYSCTL_HANDLER_ARGS)
+{
+ struct cmn600_softc *sc;
+ uint32_t val;
+ int err;
+
+ sc = (struct cmn600_softc *)arg1;
+ val = 0;
+ err = sysctl_handle_int(oidp, &val, 0, req);
+
+ if (err)
+ return (err);
+
+ if (val != 0)
+ cmn600_dump_nodes_tree(sc);
+
+ return (0);
+}
+
+static struct cmn600_node *
+cmn600_create_node(struct cmn600_softc *sc, off_t node_offset,
+ struct cmn600_node *parent, int lvl)
+{
+ struct cmn600_node *node;
+ off_t child_offset;
+ uint64_t val;
+ int i;
+
+ node = malloc(sizeof(struct cmn600_node), M_DEVBUF, M_WAITOK);
+ if (node == NULL)
+ return (NULL);
+
+ node->sc = sc;
+ node->nd_offset = node_offset;
+ node->nd_parent = parent;
+ node->nd_read4 = cmn600_node_read4;
+ node->nd_read8 = cmn600_node_read8;
+ node->nd_write4 = cmn600_node_write4;
+ node->nd_write8 = cmn600_node_write8;
+
+ val = node->nd_read8(node, POR_CFGM_NODE_INFO);
+ node->nd_type = FLD(val, POR_CFGM_NODE_INFO_NODE_TYPE);
+ node->nd_id = FLD(val, POR_CFGM_NODE_INFO_NODE_ID);
+ node->nd_logical_id = FLD(val, POR_CFGM_NODE_INFO_LOGICAL_ID);
+
+ val = node->nd_read8(node, POR_CFGM_CHILD_INFO);
+ node->nd_child_count = FLD(val, POR_CFGM_CHILD_INFO_CHILD_COUNT);
+ child_offset = FLD(val, POR_CFGM_CHILD_INFO_CHILD_PTR_OFFSET);
+
+ if (parent == NULL) {
+ /* Find XP node with Id 8. It have to be last in a row. */
+ for (i = 0; i < node->nd_child_count; i++) {
+ val = node->nd_read8(node, child_offset + (i * 8));
+ val &= POR_CFGM_CHILD_POINTER_BASE_MASK;
+ val = RD8(sc, val + POR_CFGM_NODE_INFO);
+
+ if (FLD(val, POR_CFGM_NODE_INFO_NODE_ID) != 8)
+ continue;
+
+ sc->sc_mesh_x = FLD(val, POR_CFGM_NODE_INFO_LOGICAL_ID);
+ sc->sc_mesh_y = node->nd_child_count / sc->sc_mesh_x;
+ if (bootverbose)
+ printf("Mesh width X/Y %d/%d\n", sc->sc_mesh_x,
+ sc->sc_mesh_y);
+
+ if ((sc->sc_mesh_x > 4) || (sc->sc_mesh_y > 4))
+ sc->sc_longid = 1;
+ break;
+ }
+
+ val = node->nd_read8(node, POR_INFO_GLOBAL);
+ sc->sc_r2 = (val & POR_INFO_GLOBAL_R2_ENABLE) ? 1 : 0;
+ val = node->nd_read4(node, POR_CFGM_PERIPH_ID_2_PERIPH_ID_3);
+ sc->sc_rev = FLD(val, POR_CFGM_PERIPH_ID_2_REV);
+ if (bootverbose)
+ printf(" Rev: %d, R2_ENABLE = %s\n", sc->sc_rev,
+ sc->sc_r2 ? "true" : "false");
+ }
+ node->nd_sub = FLD(node->nd_id, NODE_ID_SUB);
+ node->nd_port = FLD(node->nd_id, NODE_ID_PORT);
+ node->nd_paired = 0;
+ if (sc->sc_longid == 1) {
+ node->nd_x = FLD(node->nd_id, NODE_ID_X3B);
+ node->nd_y = FLD(node->nd_id, NODE_ID_Y3B);
+ } else {
+ node->nd_x = FLD(node->nd_id, NODE_ID_X2B);
+ node->nd_y = FLD(node->nd_id, NODE_ID_Y2B);
+ }
+
+ if (bootverbose) {
+ cmn600_dump_node(node, lvl);
+ }
+
+ node->nd_children = (struct cmn600_node **)mallocarray(
+ node->nd_child_count, sizeof(struct cmn600_node *), M_DEVBUF,
+ M_WAITOK);
+ if (node->nd_children == NULL)
+ goto FAIL;
+ for (i = 0; i < node->nd_child_count; i++) {
+ val = node->nd_read8(node, child_offset + (i * 8));
+ node->nd_children[i] = cmn600_create_node(sc, val &
+ POR_CFGM_CHILD_POINTER_BASE_MASK, node, lvl + 1);
+ }
+ switch (node->nd_type) {
+ case NODE_TYPE_DTC:
+ sc->sc_dtcnode = node;
+ break;
+ case NODE_TYPE_DVM:
+ sc->sc_dvmnode = node;
+ break;
+ case NODE_TYPE_XP:
+ sc->sc_xpnodes[node->nd_id >> NODE_ID_X2B_SHIFT] = node;
+ break;
+ default:
+ break;
+ }
+ return (node);
+FAIL:
+ free(node, M_DEVBUF);
+ return (NULL);
+}
+
+static void
+cmn600_destroy_node(struct cmn600_node *node)
+{
+ int i;
+
+ for (i = 0; i < node->nd_child_count; i++) {
+ if (node->nd_children[i] == NULL)
+ continue;
+ cmn600_destroy_node(node->nd_children[i]);
+ }
+ free(node->nd_children, M_DEVBUF);
+ free(node, M_DEVBUF);
+}
+
+static int
+cmn600_find_node(struct cmn600_softc *sc, int node_id, int type,
+ struct cmn600_node **node)
+{
+ struct cmn600_node *xp, *child;
+ uint8_t xp_xy;
+ int i;
+
+ switch (type) {
+ case NODE_TYPE_INVALID:
+ return (ENXIO);
+ case NODE_TYPE_CFG:
+ *node = sc->sc_rootnode;
+ return (0);
+ case NODE_TYPE_DTC:
+ *node = sc->sc_dtcnode;
+ return (0);
+ case NODE_TYPE_DVM:
+ *node = sc->sc_dvmnode;
+ return (0);
+ default:
+ break;
+ }
+
+ xp_xy = node_id >> NODE_ID_X2B_SHIFT;
+ if (xp_xy >= 64)
+ return (ENXIO);
+ if (sc->sc_xpnodes[xp_xy] == NULL)
+ return (ENOENT);
+
+ switch (type) {
+ case NODE_TYPE_XP:
+ *node = sc->sc_xpnodes[xp_xy];
+ return (0);
+ default:
+ xp = sc->sc_xpnodes[xp_xy];
+ for (i = 0; i < xp->nd_child_count; i++) {
+ child = xp->nd_children[i];
+ if (child->nd_id == node_id && child->nd_type == type) {
+ *node = child;
+ return (0);
+ }
+ }
+ }
+ return (ENOENT);
+}
+
+int
+pmu_cmn600_alloc_localpmc(void *arg, int nodeid, int node_type, int *counter)
+{
+ struct cmn600_node *node;
+ struct cmn600_softc *sc;
+ uint32_t new, old;
+ int i, ret;
+
+ sc = (struct cmn600_softc *)arg;
+ switch (node_type) {
+ case NODE_TYPE_CXLA:
+ break;
+ default:
+ node_type = NODE_TYPE_XP;
+ /* Parent XP node has always zero port and device bits. */
+ nodeid &= ~0x07;
+ }
+ ret = cmn600_find_node(sc, nodeid, node_type, &node);
+ if (ret != 0)
+ return (ret);
+ for (i = 0; i < 4; i++) {
+ new = old = node->nd_paired;
+ if (old == 0xf)
+ return (EBUSY);
+ if ((old & (1 << i)) != 0)
+ continue;
+ new |= 1 << i;
+ if (atomic_cmpset_32(&node->nd_paired, old, new) != 0)
+ break;
+ }
+ *counter = i;
+ return (0);
+}
+
+int
+pmu_cmn600_free_localpmc(void *arg, int nodeid, int node_type, int counter)
+{
+ struct cmn600_node *node;
+ struct cmn600_softc *sc;
+ uint32_t new, old;
+ int ret;
+
+ sc = (struct cmn600_softc *)arg;
+ switch (node_type) {
+ case NODE_TYPE_CXLA:
+ break;
+ default:
+ node_type = NODE_TYPE_XP;
+ }
+ ret = cmn600_find_node(sc, nodeid, node_type, &node);
+ if (ret != 0)
+ return (ret);
+
+ do {
+ new = old = node->nd_paired;
+ new &= ~(1 << counter);
+ } while (atomic_cmpset_32(&node->nd_paired, old, new) == 0);
+ return (0);
+}
+
+uint32_t
+pmu_cmn600_rd4(void *arg, int nodeid, int node_type, off_t reg)
+{
+ struct cmn600_node *node;
+ struct cmn600_softc *sc;
+ int ret;
+
+ sc = (struct cmn600_softc *)arg;
+ ret = cmn600_find_node(sc, nodeid, node_type, &node);
+ if (ret != 0)
+ return (UINT32_MAX);
+ return (cmn600_node_read4(node, reg));
+}
+
+int
+pmu_cmn600_wr4(void *arg, int nodeid, int node_type, off_t reg, uint32_t val)
+{
+ struct cmn600_node *node;
+ struct cmn600_softc *sc;
+ int ret;
+
+ sc = (struct cmn600_softc *)arg;
+ ret = cmn600_find_node(sc, nodeid, node_type, &node);
+ if (ret != 0)
+ return (ret);
+ cmn600_node_write4(node, reg, val);
+ return (0);
+}
+
+uint64_t
+pmu_cmn600_rd8(void *arg, int nodeid, int node_type, off_t reg)
+{
+ struct cmn600_node *node;
+ struct cmn600_softc *sc;
+ int ret;
+
+ sc = (struct cmn600_softc *)arg;
+ ret = cmn600_find_node(sc, nodeid, node_type, &node);
+ if (ret != 0)
+ return (UINT64_MAX);
+ return (cmn600_node_read8(node, reg));
+}
+
+int
+pmu_cmn600_wr8(void *arg, int nodeid, int node_type, off_t reg, uint64_t val)
+{
+ struct cmn600_node *node;
+ struct cmn600_softc *sc;
+ int ret;
+
+ sc = (struct cmn600_softc *)arg;
+ ret = cmn600_find_node(sc, nodeid, node_type, &node);
+ if (ret != 0)
+ return (ret);
+ cmn600_node_write8(node, reg, val);
+ return (0);
+}
+
+int
+pmu_cmn600_set8(void *arg, int nodeid, int node_type, off_t reg, uint64_t val)
+{
+ struct cmn600_node *node;
+ struct cmn600_softc *sc;
+ int ret;
+
+ sc = (struct cmn600_softc *)arg;
+ ret = cmn600_find_node(sc, nodeid, node_type, &node);
+ if (ret != 0)
+ return (ret);
+ cmn600_node_write8(node, reg, cmn600_node_read8(node, reg) | val);
+ return (0);
+}
+
+int
+pmu_cmn600_clr8(void *arg, int nodeid, int node_type, off_t reg, uint64_t val)
+{
+ struct cmn600_node *node;
+ struct cmn600_softc *sc;
+ int ret;
+
+ sc = (struct cmn600_softc *)arg;
+ ret = cmn600_find_node(sc, nodeid, node_type, &node);
+ if (ret != 0)
+ return (ret);
+ cmn600_node_write8(node, reg, cmn600_node_read8(node, reg) & ~val);
+ return (0);
+}
+
+int
+pmu_cmn600_md8(void *arg, int nodeid, int node_type, off_t reg, uint64_t mask,
+ uint64_t val)
+{
+ struct cmn600_node *node;
+ struct cmn600_softc *sc;
+ int ret;
+
+ sc = (struct cmn600_softc *)arg;
+ ret = cmn600_find_node(sc, nodeid, node_type, &node);
+ if (ret != 0)
+ return (ret);
+ cmn600_node_write8(node, reg, (cmn600_node_read8(node, reg) & ~mask) |
+ val);
+ return (0);
+}
+
+static int
+cmn600_acpi_probe(device_t dev)
+{
+ int err;
+
+ err = ACPI_ID_PROBE(device_get_parent(dev), dev, cmn600_ids, NULL);
+ if (err <= 0)
+ device_set_desc(dev, "Arm CoreLink CMN-600 Coherent Mesh Network");
+
+ return (err);
+}
+
+static int
+cmn600_acpi_attach(device_t dev)
+{
+ struct sysctl_ctx_list *ctx;
+ struct sysctl_oid_list *child;
+ struct cmn600_softc *sc;
+ int cpu, domain, i, u;
+ const char *dname;
+ rman_res_t count, periph_base, rootnode_base;
+ struct cmn600_node *node;
+
+ dname = device_get_name(dev);
+ sc = device_get_softc(dev);
+ sc->sc_dev = dev;
+ u = device_get_unit(dev);
+ sc->sc_unit = u;
+ domain = 0;
+
+ if ((resource_int_value(dname, u, "domain", &domain) == 0 ||
+ bus_get_domain(dev, &domain) == 0) && domain < MAXMEMDOM) {
+ sc->sc_domain = domain;
+ }
+ if (domain == -1) /* NUMA not supported. Use single domain. */
+ domain = 0;
+ sc->sc_domain = domain;
+ device_printf(dev, "domain=%d\n", sc->sc_domain);
+
+ cpu = CPU_FFS(&cpuset_domain[domain]) - 1;
+
+ i = bus_alloc_resources(dev, cmn600_res_spec, sc->sc_res);
+ if (i != 0) {
+ device_printf(dev, "cannot allocate resources for device (%d)\n",
+ i);
+ return (i);
+ }
+
+ bus_get_resource(dev, cmn600_res_spec[0].type, cmn600_res_spec[0].rid,
+ &periph_base, &count);
+ bus_get_resource(dev, cmn600_res_spec[1].type, cmn600_res_spec[1].rid,
+ &rootnode_base, &count);
+ rootnode_base -= periph_base;
+ if (bootverbose)
+ printf("ROOTNODE at %lx x %lx\n", rootnode_base, count);
+
+ sc->sc_rootnode = cmn600_create_node(sc, rootnode_base, NULL, 0);
+ ctx = device_get_sysctl_ctx(sc->sc_dev);
+
+ child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->sc_dev));
+ SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "dump_nodes", CTLTYPE_INT |
+ CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0, cmn600_sysctl_dump_nodes,
+ "U", "Dump CMN-600 nodes tree");
+
+ node = sc->sc_dtcnode;
+ if (node == NULL)
+ return (ENXIO);
+
+ cmn600_pmc_register(sc->sc_unit, (void *)sc, domain);
+
+ node->nd_write8(node, POR_DT_PMCR, 0);
+ node->nd_write8(node, POR_DT_PMOVSR_CLR, POR_DT_PMOVSR_ALL);
+ node->nd_write8(node, POR_DT_PMCR, POR_DT_PMCR_OVFL_INTR_EN);
+ node->nd_write8(node, POR_DT_DTC_CTL, POR_DT_DTC_CTL_DT_EN);
+
+ if (bus_setup_intr(dev, sc->sc_res[2], INTR_TYPE_MISC | INTR_MPSAFE,
+ cmn600_intr, NULL, sc, &sc->sc_ih)) {
+ bus_release_resources(dev, cmn600_res_spec, sc->sc_res);
+ device_printf(dev, "cannot setup interrupt handler\n");
+ cmn600_acpi_detach(dev);
+ return (ENXIO);
+ }
+ if (bus_bind_intr(dev, sc->sc_res[2], cpu)) {
+ bus_teardown_intr(dev, sc->sc_res[2], sc->sc_ih);
+ bus_release_resources(dev, cmn600_res_spec, sc->sc_res);
+ device_printf(dev, "cannot setup interrupt handler\n");
+ cmn600_acpi_detach(dev);
+ return (ENXIO);
+ }
+ return (0);
+}
+
+static int
+cmn600_acpi_detach(device_t dev)
+{
+ struct cmn600_softc *sc;
+ struct cmn600_node *node;
+
+ sc = device_get_softc(dev);
+ if (sc->sc_res[2] != NULL) {
+ bus_teardown_intr(dev, sc->sc_res[2], sc->sc_ih);
+ }
+
+ node = sc->sc_dtcnode;
+ node->nd_write4(node, POR_DT_DTC_CTL,
+ node->nd_read4(node, POR_DT_DTC_CTL) & ~POR_DT_DTC_CTL_DT_EN);
+ node->nd_write8(node, POR_DT_PMOVSR_CLR, POR_DT_PMOVSR_ALL);
+
+ cmn600_pmc_unregister(sc->sc_unit);
+ cmn600_destroy_node(sc->sc_rootnode);
+ bus_release_resources(dev, cmn600_res_spec, sc->sc_res);
+
+ return (0);
+}
+
+int
+cmn600_pmu_intr_cb(void *arg, int (*handler)(struct trapframe *tf, int unit,
+ int i))
+{
+ struct cmn600_softc *sc;
+
+ sc = (struct cmn600_softc *) arg;
+ sc->sc_pmu_ih = handler;
+ return (0);
+}
+
+static int
+cmn600_intr(void *arg)
+{
+ struct cmn600_node *node;
+ struct cmn600_softc *sc;
+ struct trapframe *tf;
+ uint64_t mask, ready, val;
+ int i;
+
+ tf = PCPU_GET(curthread)->td_intr_frame;
+ sc = (struct cmn600_softc *) arg;
+ node = sc->sc_dtcnode;
+ val = node->nd_read8(node, POR_DT_PMOVSR);
+ if (val & POR_DT_PMOVSR_CYCLE_COUNTER)
+ node->nd_write8(node, POR_DT_PMOVSR_CLR,
+ POR_DT_PMOVSR_CYCLE_COUNTER);
+ if (val & POR_DT_PMOVSR_EVENT_COUNTERS) {
+ for (ready = 0, i = 0; i < 8; i++) {
+ mask = 1 << i;
+ if ((val & mask) == 0)
+ continue;
+ if (sc->sc_pmu_ih != NULL)
+ sc->sc_pmu_ih(tf, sc->sc_unit, i);
+ ready |= mask;
+
+ }
+ node->nd_write8(node, POR_DT_PMOVSR_CLR, ready);
+ }
+
+ return (FILTER_HANDLED);
+}
+
+static device_method_t cmn600_acpi_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, cmn600_acpi_probe),
+ DEVMETHOD(device_attach, cmn600_acpi_attach),
+ DEVMETHOD(device_detach, cmn600_acpi_detach),
+
+ /* End */
+ DEVMETHOD_END
+};
+
+static driver_t cmn600_acpi_driver = {
+ "cmn600",
+ cmn600_acpi_methods,
+ sizeof(struct cmn600_softc),
+};
+static devclass_t cmn600_acpi_devclass;
+
+DRIVER_MODULE(cmn600, acpi, cmn600_acpi_driver, cmn600_acpi_devclass, 0, 0);
+MODULE_VERSION(cmn600, 1);
diff --git a/sys/arm64/include/cmn600_reg.h b/sys/arm64/include/cmn600_reg.h
--- a/sys/arm64/include/cmn600_reg.h
+++ b/sys/arm64/include/cmn600_reg.h
@@ -75,6 +75,19 @@
#define POR_CFGM_NODE_INFO_NODE_TYPE_MASK 0xffff
#define POR_CFGM_NODE_INFO_NODE_TYPE_SHIFT 0
+#define NODE_ID_SUB_MASK 0x3
+#define NODE_ID_SUB_SHIFT 0
+#define NODE_ID_PORT_MASK 0x4
+#define NODE_ID_PORT_SHIFT 2
+#define NODE_ID_X2B_MASK (0x3 << 3)
+#define NODE_ID_X2B_SHIFT 3
+#define NODE_ID_Y2B_MASK (0x3 << 5)
+#define NODE_ID_Y2B_SHIFT 5
+#define NODE_ID_X3B_MASK (0x7 << 3)
+#define NODE_ID_X3B_SHIFT 3
+#define NODE_ID_Y3B_MASK (0x7 << 6)
+#define NODE_ID_Y3B_SHIFT 6
+
#define NODE_TYPE_INVALID 0x000
#define NODE_TYPE_DVM 0x001
#define NODE_TYPE_CFG 0x002
@@ -265,6 +278,8 @@
#define POR_DT_PMEVCNTEF 0x2020 /* rw */
#define POR_DT_PMEVCNTGH 0x2030 /* rw */
#define POR_DT_PMEVCNT(x) (0x2000 + ((x) * 0x10))
+#define POR_DT_PMEVCNT_EVENCNT_SHIFT 0
+#define POR_DT_PMEVCNT_ODDCNT_SHIFT 32
#define POR_DT_PMCCNTR 0x2040 /* rw */
#define POR_DT_PMEVCNTSRAB 0x2050 /* rw */
#define POR_DT_PMEVCNTSRCD 0x2060 /* rw */
@@ -512,6 +527,7 @@
#define POR_DTM_PMU_CONFIG_VCNT_INPUT_SEL_SHIFT 32
#define POR_DTM_PMU_CONFIG_VCNT_INPUT_SEL_WIDTH 8
#define POR_DTM_PMEVCNT 0x2220 /* rw */
+#define POR_DTM_PMEVCNT_CNTR_WIDTH 16
#define POR_DTM_PMEVCNTSR 0x2240 /* rw */
/* RN-D registers */
diff --git a/sys/dev/hwpmc/hwpmc_cmn600.c b/sys/dev/hwpmc/hwpmc_cmn600.c
new file mode 100644
--- /dev/null
+++ b/sys/dev/hwpmc/hwpmc_cmn600.c
@@ -0,0 +1,830 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2003-2008 Joseph Koshy
+ * Copyright (c) 2007 The FreeBSD Foundation
+ * Copyright (c) 2021 ARM Ltd
+ *
+ * Portions of this software were developed by A. Joseph Koshy under
+ * sponsorship from the FreeBSD Foundation and Google, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/* Arm CoreLink CMN-600 Coherent Mesh Network PMU Driver */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+#include <sys/mutex.h>
+#include <sys/pmc.h>
+#include <sys/pmckern.h>
+#include <sys/systm.h>
+
+#include <machine/cmn600_reg.h>
+
+struct cmn600_descr {
+ struct pmc_descr pd_descr; /* "base class" */
+ void *pd_rw_arg; /* Argument to use with read/write */
+ struct pmc *pd_pmc;
+ struct pmc_hw *pd_phw;
+ uint32_t pd_nodeid;
+ int32_t pd_node_type;
+ int pd_local_counter;
+
+};
+
+static struct cmn600_descr **cmn600_pmcdesc;
+
+static struct cmn600_pmc cmn600_pmcs[CMN600_UNIT_MAX];
+static int cmn600_units = 0;
+
+static inline struct cmn600_descr *
+cmn600desc(int ri)
+{
+
+ return (cmn600_pmcdesc[ri]);
+}
+
+static inline int
+class_ri2unit(int ri)
+{
+
+ return (ri / CMN600_COUNTERS_N);
+}
+
+#define EVENCNTR(x) (((x) >> POR_DT_PMEVCNT_EVENCNT_SHIFT) << \
+ POR_DTM_PMEVCNT_CNTR_WIDTH)
+#define ODDCNTR(x) (((x) >> POR_DT_PMEVCNT_ODDCNT_SHIFT) << \
+ POR_DTM_PMEVCNT_CNTR_WIDTH)
+
+static uint64_t
+cmn600_pmu_readcntr(void *arg, u_int nodeid, u_int xpcntr, u_int dtccntr,
+ u_int width)
+{
+ uint64_t dtcval, xpval;
+
+ KASSERT(xpcntr < 4, ("[cmn600,%d] XP counter number %d is too big."
+ " Max: 3", __LINE__, xpcntr));
+ KASSERT(dtccntr < 8, ("[cmn600,%d] Global counter number %d is too"
+ " big. Max: 7", __LINE__, dtccntr));
+
+ dtcval = pmu_cmn600_rd8(arg, nodeid, NODE_TYPE_DTC,
+ POR_DT_PMEVCNT(dtccntr >> 1));
+ if (width == 4) {
+ dtcval = (dtccntr & 1) ? ODDCNTR(dtcval) : EVENCNTR(dtcval);
+ dtcval &= 0xffffffff0000UL;
+ } else
+ dtcval <<= POR_DTM_PMEVCNT_CNTR_WIDTH;
+
+ xpval = pmu_cmn600_rd8(arg, nodeid, NODE_TYPE_XP, POR_DTM_PMEVCNT);
+ xpval >>= xpcntr * POR_DTM_PMEVCNT_CNTR_WIDTH;
+ xpval &= 0xffffUL;
+ return (dtcval | xpval);
+}
+
+static void
+cmn600_pmu_writecntr(void *arg, u_int nodeid, u_int xpcntr, u_int dtccntr,
+ u_int width, uint64_t val)
+{
+ int shift;
+
+ KASSERT(xpcntr < 4, ("[cmn600,%d] XP counter number %d is too big."
+ " Max: 3", __LINE__, xpcntr));
+ KASSERT(dtccntr < 8, ("[cmn600,%d] Global counter number %d is too"
+ " big. Max: 7", __LINE__, dtccntr));
+
+ if (width == 4) {
+ shift = (dtccntr & 1) ? POR_DT_PMEVCNT_ODDCNT_SHIFT :
+ POR_DT_PMEVCNT_EVENCNT_SHIFT;
+ pmu_cmn600_md8(arg, nodeid, NODE_TYPE_DTC,
+ POR_DT_PMEVCNT(dtccntr >> 1), 0xffffffffUL << shift,
+ ((val >> POR_DTM_PMEVCNT_CNTR_WIDTH) & 0xffffffff) << shift);
+ } else
+ pmu_cmn600_wr8(arg, nodeid, NODE_TYPE_DTC,
+ POR_DT_PMEVCNT(dtccntr & ~0x1), val >>
+ POR_DTM_PMEVCNT_CNTR_WIDTH);
+
+ shift = xpcntr * POR_DTM_PMEVCNT_CNTR_WIDTH;
+ val &= 0xffffUL;
+ pmu_cmn600_md8(arg, nodeid, NODE_TYPE_XP, POR_DTM_PMEVCNT,
+ 0xffffUL << shift, val << shift);
+}
+
+#undef EVENCNTR
+#undef ODDCNTR
+
+/*
+ * read a pmc register
+ */
+static int
+cmn600_read_pmc(int cpu, int ri, pmc_value_t *v)
+{
+ int counter, local_counter, nodeid;
+ struct cmn600_descr *desc;
+ struct pmc *pm;
+ void *arg;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[cmn600,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0, ("[cmn600,%d] row-index %d out of range", __LINE__,
+ ri));
+
+ counter = ri % CMN600_COUNTERS_N;
+ desc = cmn600desc(ri);
+ pm = desc->pd_phw->phw_pmc;
+ arg = desc->pd_rw_arg;
+ nodeid = pm->pm_md.pm_cmn600.pm_cmn600_nodeid;
+ local_counter = pm->pm_md.pm_cmn600.pm_cmn600_local_counter;
+
+ KASSERT(pm != NULL,
+ ("[cmn600,%d] No owner for HWPMC [cpu%d,pmc%d]", __LINE__,
+ cpu, ri));
+
+ *v = cmn600_pmu_readcntr(arg, nodeid, local_counter, counter, 4);
+ PMCDBG3(MDP, REA, 2, "%s id=%d -> %jd", __func__, ri, *v);
+
+ return (0);
+}
+
+/*
+ * Write a pmc register.
+ */
+static int
+cmn600_write_pmc(int cpu, int ri, pmc_value_t v)
+{
+ int counter, local_counter, nodeid;
+ struct cmn600_descr *desc;
+ struct pmc *pm;
+ void *arg;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[cmn600,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0, ("[cmn600,%d] row-index %d out of range", __LINE__,
+ ri));
+
+ counter = ri % CMN600_COUNTERS_N;
+ desc = cmn600desc(ri);
+ pm = desc->pd_phw->phw_pmc;
+ arg = desc->pd_rw_arg;
+ nodeid = pm->pm_md.pm_cmn600.pm_cmn600_nodeid;
+ local_counter = pm->pm_md.pm_cmn600.pm_cmn600_local_counter;
+
+ KASSERT(pm != NULL,
+ ("[cmn600,%d] PMC not owned (cpu%d,pmc%d)", __LINE__,
+ cpu, ri));
+
+ PMCDBG4(MDP, WRI, 1, "%s cpu=%d ri=%d v=%jx", __func__, cpu, ri, v);
+
+ cmn600_pmu_writecntr(arg, nodeid, local_counter, counter, 4, v);
+ return (0);
+}
+
+/*
+ * configure hardware pmc according to the configuration recorded in
+ * pmc 'pm'.
+ */
+static int
+cmn600_config_pmc(int cpu, int ri, struct pmc *pm)
+{
+ struct pmc_hw *phw;
+
+ PMCDBG4(MDP, CFG, 1, "%s cpu=%d ri=%d pm=%p", __func__, cpu, ri, pm);
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[cmn600,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0, ("[cmn600,%d] row-index %d out of range", __LINE__,
+ ri));
+
+ phw = cmn600desc(ri)->pd_phw;
+
+ KASSERT(pm == NULL || phw->phw_pmc == NULL,
+ ("[cmn600,%d] pm=%p phw->pm=%p hwpmc not unconfigured",
+ __LINE__, pm, phw->phw_pmc));
+
+ phw->phw_pmc = pm;
+ return (0);
+}
+
+/*
+ * Retrieve a configured PMC pointer from hardware state.
+ */
+static int
+cmn600_get_config(int cpu, int ri, struct pmc **ppm)
+{
+
+ *ppm = cmn600desc(ri)->pd_phw->phw_pmc;
+
+ return (0);
+}
+
+#define CASE_DN_VER_EVT(n, id) case PMC_EV_CMN600_PMU_ ## n: { *event = id; \
+ return (0); }
+static int
+cmn600_map_ev2event(int ev, int rev, int *node_type, uint8_t *event)
+{
+ if (ev < PMC_EV_CMN600_PMU_dn_rxreq_dvmop ||
+ ev > PMC_EV_CMN600_PMU_rni_rdb_ord)
+ return (EINVAL);
+ if (ev <= PMC_EV_CMN600_PMU_dn_rxreq_trk_full) {
+ *node_type = NODE_TYPE_DVM;
+ if (rev < 0x200) {
+ switch (ev) {
+ CASE_DN_VER_EVT(dn_rxreq_dvmop, 1);
+ CASE_DN_VER_EVT(dn_rxreq_dvmsync, 2);
+ CASE_DN_VER_EVT(dn_rxreq_dvmop_vmid_filtered, 3);
+ CASE_DN_VER_EVT(dn_rxreq_retried, 4);
+ CASE_DN_VER_EVT(dn_rxreq_trk_occupancy, 5);
+ }
+ } else {
+ switch (ev) {
+ CASE_DN_VER_EVT(dn_rxreq_tlbi_dvmop, 0x01);
+ CASE_DN_VER_EVT(dn_rxreq_bpi_dvmop, 0x02);
+ CASE_DN_VER_EVT(dn_rxreq_pici_dvmop, 0x03);
+ CASE_DN_VER_EVT(dn_rxreq_vivi_dvmop, 0x04);
+ CASE_DN_VER_EVT(dn_rxreq_dvmsync, 0x05);
+ CASE_DN_VER_EVT(dn_rxreq_dvmop_vmid_filtered, 0x06);
+ CASE_DN_VER_EVT(dn_rxreq_dvmop_other_filtered, 0x07);
+ CASE_DN_VER_EVT(dn_rxreq_retried, 0x08);
+ CASE_DN_VER_EVT(dn_rxreq_snp_sent, 0x09);
+ CASE_DN_VER_EVT(dn_rxreq_snp_stalled, 0x0a);
+ CASE_DN_VER_EVT(dn_rxreq_trk_full, 0x0b);
+ CASE_DN_VER_EVT(dn_rxreq_trk_occupancy, 0x0c);
+ }
+ }
+ return (EINVAL);
+ } else if (ev <= PMC_EV_CMN600_PMU_hnf_snp_fwded) {
+ *node_type = NODE_TYPE_HN_F;
+ *event = ev - PMC_EV_CMN600_PMU_hnf_cache_miss;
+ return (0);
+ } else if (ev <= PMC_EV_CMN600_PMU_hni_pcie_serialization) {
+ *node_type = NODE_TYPE_HN_I;
+ *event = ev - PMC_EV_CMN600_PMU_hni_rrt_rd_occ_cnt_ovfl;
+ return (0);
+ } else if (ev <= PMC_EV_CMN600_PMU_xp_partial_dat_flit) {
+ *node_type = NODE_TYPE_XP;
+ *event = ev - PMC_EV_CMN600_PMU_xp_txflit_valid;
+ return (0);
+ } else if (ev <= PMC_EV_CMN600_PMU_sbsx_txrsp_stall) {
+ *node_type = NODE_TYPE_SBSX;
+ *event = ev - PMC_EV_CMN600_PMU_sbsx_rd_req;
+ return (0);
+ } else if (ev <= PMC_EV_CMN600_PMU_rnd_rdb_ord) {
+ *node_type = NODE_TYPE_RN_D;
+ *event = ev - PMC_EV_CMN600_PMU_rnd_s0_rdata_beats;
+ return (0);
+ } else if (ev <= PMC_EV_CMN600_PMU_rni_rdb_ord) {
+ *node_type = NODE_TYPE_RN_I;
+ *event = ev - PMC_EV_CMN600_PMU_rni_s0_rdata_beats;
+ return (0);
+ } else if (ev <= PMC_EV_CMN600_PMU_cxha_snphaz_occ) {
+ *node_type = NODE_TYPE_CXHA;
+ *event = ev - PMC_EV_CMN600_PMU_cxha_rddatbyp;
+ return (0);
+ } else if (ev <= PMC_EV_CMN600_PMU_cxra_ext_dat_stall) {
+ *node_type = NODE_TYPE_CXRA;
+ *event = ev - PMC_EV_CMN600_PMU_cxra_req_trk_occ;
+ return (0);
+ } else if (ev <= PMC_EV_CMN600_PMU_cxla_avg_latency_form_tx_tlp) {
+ *node_type = NODE_TYPE_CXLA;
+ *event = ev - PMC_EV_CMN600_PMU_cxla_rx_tlp_link0;
+ return (0);
+ }
+ return (EINVAL);
+}
+
+/*
+ * Check if a given allocation is feasible.
+ */
+
+static int
+cmn600_allocate_pmc(int cpu, int ri, struct pmc *pm,
+ const struct pmc_op_pmcallocate *a)
+{
+ struct cmn600_descr *desc;
+ const struct pmc_descr *pd;
+ uint64_t caps __unused;
+ int local_counter, node_type;
+ enum pmc_event pe;
+ void *arg;
+ uint8_t e;
+ int err;
+
+ (void) cpu;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[cmn600,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0, ("[cmn600,%d] row-index %d out of range", __LINE__,
+ ri));
+
+ desc = cmn600desc(ri);
+ arg = desc->pd_rw_arg;
+ pd = &desc->pd_descr;
+ if (cmn600_pmcs[class_ri2unit(ri)].domain != pcpu_find(cpu)->pc_domain)
+ return (EINVAL);
+
+ /* check class match */
+ if (pd->pd_class != a->pm_class)
+ return (EINVAL);
+
+ caps = pm->pm_caps;
+
+ PMCDBG3(MDP, ALL, 1, "%s ri=%d caps=0x%x", __func__, ri, caps);
+
+ pe = a->pm_ev;
+ err = cmn600_map_ev2event(pe, pmu_cmn600_rev(arg), &node_type, &e);
+ if (err != 0)
+ return (err);
+ err = pmu_cmn600_alloc_localpmc(arg,
+ a->pm_md.pm_cmn600.pma_cmn600_nodeid, node_type, &local_counter);
+ if (err != 0)
+ return (err);
+
+ pm->pm_md.pm_cmn600.pm_cmn600_config =
+ a->pm_md.pm_cmn600.pma_cmn600_config;
+ pm->pm_md.pm_cmn600.pm_cmn600_occupancy =
+ a->pm_md.pm_cmn600.pma_cmn600_occupancy;
+ desc->pd_nodeid = pm->pm_md.pm_cmn600.pm_cmn600_nodeid =
+ a->pm_md.pm_cmn600.pma_cmn600_nodeid;
+ desc->pd_node_type = pm->pm_md.pm_cmn600.pm_cmn600_node_type =
+ node_type;
+ pm->pm_md.pm_cmn600.pm_cmn600_event = e;
+ desc->pd_local_counter = pm->pm_md.pm_cmn600.pm_cmn600_local_counter =
+ local_counter;
+
+ PMCDBG3(MDP, ALL, 2, "%s ri=%d -> control=0x%x", __func__, ri, control);
+
+ return (0);
+}
+
+/* Release machine dependent state associated with a PMC. */
+
+static int
+cmn600_release_pmc(int cpu, int ri, struct pmc *pmc)
+{
+ struct cmn600_descr *desc;
+ struct pmc_hw *phw;
+ struct pmc *pm;
+ int err;
+
+ (void) pmc;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[cmn600,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0, ("[cmn600,%d] row-index %d out of range", __LINE__,
+ ri));
+
+ desc = cmn600desc(ri);
+ phw = desc->pd_phw;
+ pm = phw->phw_pmc;
+ err = pmu_cmn600_free_localpmc(desc->pd_rw_arg, desc->pd_nodeid,
+ desc->pd_node_type, desc->pd_local_counter);
+ if (err != 0)
+ return (err);
+
+ KASSERT(pm == NULL, ("[cmn600,%d] PHW pmc %p non-NULL", __LINE__, pm));
+
+ return (0);
+}
+
+static inline uint64_t
+cmn600_encode_source(int node_type, int counter, int port, int sub)
+{
+
+ /* Calculate pmevcnt0_input_sel based on list in Table 3-794. */
+ if (node_type == NODE_TYPE_XP)
+ return (0x4 | counter);
+
+ return (((port + 1) << 4) | (sub << 2) | counter);
+}
+
+/*
+ * start a PMC.
+ */
+
+static int
+cmn600_start_pmc(int cpu, int ri)
+{
+ int counter, local_counter, node_type, shift;
+ uint64_t config, occupancy, source, xp_pmucfg;
+ struct cmn600_descr *desc;
+ struct pmc_hw *phw;
+ struct pmc *pm;
+ uint8_t event, port, sub;
+ uint16_t nodeid;
+ void *arg;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[cmn600,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0, ("[cmn600,%d] row-index %d out of range", __LINE__,
+ ri));
+
+ counter = ri % CMN600_COUNTERS_N;
+ desc = cmn600desc(ri);
+ phw = desc->pd_phw;
+ pm = phw->phw_pmc;
+ arg = desc->pd_rw_arg;
+
+ KASSERT(pm != NULL,
+ ("[cmn600,%d] starting cpu%d,pmc%d with null pmc record", __LINE__,
+ cpu, ri));
+
+ PMCDBG3(MDP, STA, 1, "%s cpu=%d ri=%d", __func__, cpu, ri);
+
+ config = pm->pm_md.pm_cmn600.pm_cmn600_config;
+ occupancy = pm->pm_md.pm_cmn600.pm_cmn600_occupancy;
+ node_type = pm->pm_md.pm_cmn600.pm_cmn600_node_type;
+ event = pm->pm_md.pm_cmn600.pm_cmn600_event;
+ nodeid = pm->pm_md.pm_cmn600.pm_cmn600_nodeid;
+ local_counter = pm->pm_md.pm_cmn600.pm_cmn600_local_counter;
+ port = (nodeid >> 2) & 1;
+ sub = nodeid & 3;
+
+ switch (node_type) {
+ case NODE_TYPE_DVM:
+ case NODE_TYPE_HN_F:
+ case NODE_TYPE_CXHA:
+ case NODE_TYPE_CXRA:
+ pmu_cmn600_md8(arg, nodeid, node_type,
+ CMN600_COMMON_PMU_EVENT_SEL,
+ CMN600_COMMON_PMU_EVENT_SEL_OCC_MASK,
+ occupancy << CMN600_COMMON_PMU_EVENT_SEL_OCC_SHIFT);
+ break;
+ case NODE_TYPE_XP:
+ /* Set PC and Interface.*/
+ event |= config;
+ }
+
+ /*
+ * 5.5.1 Set up PMU counters
+ * 1. Ensure that the NIDEN input is asserted. HW side. */
+ /* 2. Select event of target node for one of four outputs. */
+ pmu_cmn600_md8(arg, nodeid, node_type, CMN600_COMMON_PMU_EVENT_SEL,
+ 0xff << (local_counter * 8),
+ event << (local_counter * 8));
+
+ xp_pmucfg = pmu_cmn600_rd8(arg, nodeid, NODE_TYPE_XP,
+ POR_DTM_PMU_CONFIG);
+ /*
+ * 3. configure XP to connect one of four target node outputs to local
+ * counter.
+ */
+ source = cmn600_encode_source(node_type, local_counter, port, sub);
+ shift = (local_counter * POR_DTM_PMU_CONFIG_VCNT_INPUT_SEL_WIDTH) +
+ POR_DTM_PMU_CONFIG_VCNT_INPUT_SEL_SHIFT;
+ xp_pmucfg &= ~(0xffUL << shift);
+ xp_pmucfg |= source << shift;
+
+ /* 4. Pair with global counters A, B, C, ..., H. */
+ shift = (local_counter * 4) + 16;
+ xp_pmucfg &= ~(0xfUL << shift);
+ xp_pmucfg |= counter << shift;
+ /* Enable pairing.*/
+ xp_pmucfg |= 1 << (local_counter + 4);
+
+ /* 5. Combine local counters 0 with 1, 2 with 3 or all four. */
+ xp_pmucfg &= ~0xeUL;
+
+ /* 6. Enable XP's PMU function. */
+ xp_pmucfg |= POR_DTM_PMU_CONFIG_PMU_EN;
+ pmu_cmn600_wr8(arg, nodeid, NODE_TYPE_XP, POR_DTM_PMU_CONFIG, xp_pmucfg);
+ if (node_type == NODE_TYPE_CXLA)
+ pmu_cmn600_set8(arg, nodeid, NODE_TYPE_CXLA,
+ POR_CXG_RA_CFG_CTL, EN_CXLA_PMUCMD_PROP);
+
+ /* 7. Enable DTM. */
+ pmu_cmn600_set8(arg, nodeid, NODE_TYPE_XP, POR_DTM_CONTROL,
+ POR_DTM_CONTROL_DTM_ENABLE);
+
+ /* 8. Reset grouping of global counters. Use 32 bits. */
+ pmu_cmn600_clr8(arg, nodeid, NODE_TYPE_DTC, POR_DT_PMCR,
+ POR_DT_PMCR_CNTCFG_MASK);
+
+ /* 9. Enable DTC. */
+ pmu_cmn600_set8(arg, nodeid, NODE_TYPE_DTC, POR_DT_DTC_CTL,
+ POR_DT_DTC_CTL_DT_EN);
+
+ /* 10. Enable Overflow Interrupt. */
+ pmu_cmn600_set8(arg, nodeid, NODE_TYPE_DTC, POR_DT_PMCR,
+ POR_DT_PMCR_OVFL_INTR_EN);
+
+ /* 11. Run PMC. */
+ pmu_cmn600_set8(arg, nodeid, NODE_TYPE_DTC, POR_DT_PMCR,
+ POR_DT_PMCR_PMU_EN);
+
+ PMCDBG2(MDP, STA, 2, "%s control=0x%x", __func__, control);
+
+ return (0);
+}
+
+/*
+ * Stop a PMC.
+ */
+
+static int
+cmn600_stop_pmc(int cpu, int ri)
+{
+ struct cmn600_descr *desc;
+ struct pmc_hw *phw;
+ struct pmc *pm;
+ int local_counter;
+ uint64_t val;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[cmn600,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0, ("[cmn600,%d] row-index %d out of range", __LINE__,
+ ri));
+
+ desc = cmn600desc(ri);
+ phw = desc->pd_phw;
+ pm = phw->phw_pmc;
+
+ KASSERT(pm != NULL,
+ ("[cmn600,%d] cpu%d,pmc%d no PMC to stop", __LINE__,
+ cpu, ri));
+
+ PMCDBG2(MDP, STO, 1, "%s ri=%d", __func__, ri);
+
+ /* Disable pairing. */
+ local_counter = pm->pm_md.pm_cmn600.pm_cmn600_local_counter;
+ pmu_cmn600_clr8(desc->pd_rw_arg, pm->pm_md.pm_cmn600.pm_cmn600_nodeid,
+ NODE_TYPE_XP, POR_DTM_PMU_CONFIG, (1 << (local_counter + 4)));
+
+ /* Shutdown XP's DTM function if no paired counters. */
+ val = pmu_cmn600_rd8(desc->pd_rw_arg,
+ pm->pm_md.pm_cmn600.pm_cmn600_nodeid, NODE_TYPE_XP,
+ POR_DTM_PMU_CONFIG);
+ if ((val & 0xf0) == 0)
+ pmu_cmn600_clr8(desc->pd_rw_arg,
+ pm->pm_md.pm_cmn600.pm_cmn600_nodeid, NODE_TYPE_XP,
+ POR_DTM_PMU_CONFIG, POR_DTM_CONTROL_DTM_ENABLE);
+
+ return (0);
+}
+
+/*
+ * describe a PMC
+ */
+static int
+cmn600_describe(int cpu, int ri, struct pmc_info *pi, struct pmc **ppmc)
+{
+ struct pmc_hw *phw;
+ size_t copied;
+ int error;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[cmn600,%d] illegal CPU %d", __LINE__, cpu));
+ KASSERT(ri >= 0, ("[cmn600,%d] row-index %d out of range", __LINE__,
+ ri));
+
+ phw = cmn600desc(ri)->pd_phw;
+
+ if ((error = copystr(cmn600desc(ri)->pd_descr.pd_name,
+ pi->pm_name, PMC_NAME_MAX, &copied)) != 0)
+ return (error);
+
+ pi->pm_class = cmn600desc(ri)->pd_descr.pd_class;
+
+ if (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) {
+ pi->pm_enabled = TRUE;
+ *ppmc = phw->phw_pmc;
+ } else {
+ pi->pm_enabled = FALSE;
+ *ppmc = NULL;
+ }
+
+ return (0);
+}
+
+/*
+ * processor dependent initialization.
+ */
+
+static int
+cmn600_pcpu_init(struct pmc_mdep *md, int cpu)
+{
+ int first_ri, n, npmc;
+ struct pmc_hw *phw;
+ struct pmc_cpu *pc;
+ int mdep_class;
+
+ mdep_class = PMC_MDEP_CLASS_INDEX_CMN600;
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[cmn600,%d] insane cpu number %d", __LINE__, cpu));
+
+ PMCDBG1(MDP, INI, 1, "cmn600-init cpu=%d", cpu);
+
+ /*
+ * Set the content of the hardware descriptors to a known
+ * state and initialize pointers in the MI per-cpu descriptor.
+ */
+
+ pc = pmc_pcpu[cpu];
+ first_ri = md->pmd_classdep[mdep_class].pcd_ri;
+ npmc = md->pmd_classdep[mdep_class].pcd_num;
+
+ for (n = 0; n < npmc; n++, phw++) {
+ phw = cmn600desc(n)->pd_phw;
+ phw->phw_state = PMC_PHW_CPU_TO_STATE(cpu) |
+ PMC_PHW_INDEX_TO_STATE(n);
+ /* Set enabled only if unit present. */
+ if (cmn600_pmcs[class_ri2unit(n)].arg != NULL)
+ phw->phw_state |= PMC_PHW_FLAG_IS_ENABLED;
+ phw->phw_pmc = NULL;
+ pc->pc_hwpmcs[n + first_ri] = phw;
+ }
+ return (0);
+}
+
+/*
+ * processor dependent cleanup prior to the KLD
+ * being unloaded
+ */
+
+static int
+cmn600_pcpu_fini(struct pmc_mdep *md, int cpu)
+{
+
+ return (0);
+}
+
+static int
+cmn600_pmu_intr(struct trapframe *tf, int unit, int i)
+{
+ struct pmc_cpu *pc;
+ struct pmc_hw *phw;
+ struct pmc *pm;
+ int error, cpu, ri;
+
+ ri = i + unit * CMN600_COUNTERS_N;
+ cpu = curcpu;
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[cmn600,%d] CPU %d out of range", __LINE__, cpu));
+ pc = pmc_pcpu[cpu];
+ KASSERT(pc != NULL, ("pc != NULL"));
+
+ phw = cmn600desc(ri)->pd_phw;
+ KASSERT(phw != NULL, ("phw != NULL"));
+ pm = phw->phw_pmc;
+ if (pm == NULL)
+ return (0);
+
+ if (!PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) {
+ /* Always CPU0. */
+ pm->pm_pcpu_state[0].pps_overflowcnt += 1;
+ return (0);
+ }
+
+ if (pm->pm_state != PMC_STATE_RUNNING)
+ return (0);
+
+ error = pmc_process_interrupt(PMC_HR, pm, tf);
+ if (error)
+ cmn600_stop_pmc(cpu, ri);
+
+ /* Reload sampling count */
+ cmn600_write_pmc(cpu, ri, pm->pm_sc.pm_reloadcount);
+
+ return (0);
+}
+
+/*
+ * Initialize ourselves.
+ */
+static int
+cmn600_init_pmc_units()
+{
+ int i;
+
+ if (cmn600_units > 0) { /* Already initialized. */
+ return (0);
+ }
+
+ cmn600_units = cmn600_pmc_nunits();
+ if (cmn600_units == 0)
+ return (ENOENT);
+
+ for (i = 0; i < cmn600_units; i++) {
+ if (cmn600_pmc_getunit(i, &cmn600_pmcs[i].arg,
+ &cmn600_pmcs[i].domain) != 0)
+ cmn600_pmcs[i].arg = NULL;
+ }
+ return (0);
+}
+
+int
+pmc_cmn600_nclasses()
+{
+
+ if (cmn600_pmc_nunits() > 0)
+ return (1);
+ return (0);
+}
+
+int
+pmc_cmn600_initialize(struct pmc_mdep *md)
+{
+ struct pmc_classdep *pcd;
+ int i, npmc, unit;
+
+ cmn600_init_pmc_units();
+ KASSERT(md != NULL, ("[cmn600,%d] md is NULL", __LINE__));
+ KASSERT(cmn600_units < CMN600_UNIT_MAX,
+ ("[cmn600,%d] cmn600_units too big", __LINE__));
+
+ PMCDBG0(MDP,INI,1, "cmn600-initialize");
+
+ npmc = CMN600_COUNTERS_N * cmn600_units;
+ pcd = &md->pmd_classdep[PMC_MDEP_CLASS_INDEX_CMN600];
+
+ pcd->pcd_caps = PMC_CAP_SYSTEM | PMC_CAP_READ |
+ PMC_CAP_WRITE | PMC_CAP_QUALIFIER | PMC_CAP_INTERRUPT |
+ PMC_CAP_DOMWIDE;
+ pcd->pcd_class = PMC_CLASS_CMN600_PMU;
+ pcd->pcd_num = npmc;
+ pcd->pcd_ri = md->pmd_npmc;
+ pcd->pcd_width = 48;
+
+ pcd->pcd_allocate_pmc = cmn600_allocate_pmc;
+ pcd->pcd_config_pmc = cmn600_config_pmc;
+ pcd->pcd_describe = cmn600_describe;
+ pcd->pcd_get_config = cmn600_get_config;
+ pcd->pcd_get_msr = NULL;
+ pcd->pcd_pcpu_fini = cmn600_pcpu_fini;
+ pcd->pcd_pcpu_init = cmn600_pcpu_init;
+ pcd->pcd_read_pmc = cmn600_read_pmc;
+ pcd->pcd_release_pmc = cmn600_release_pmc;
+ pcd->pcd_start_pmc = cmn600_start_pmc;
+ pcd->pcd_stop_pmc = cmn600_stop_pmc;
+ pcd->pcd_write_pmc = cmn600_write_pmc;
+
+ md->pmd_npmc += npmc;
+ cmn600_pmcdesc = malloc(sizeof(struct cmn600_descr *) * npmc *
+ CMN600_PMU_DEFAULT_UNITS_N, M_PMC, M_WAITOK|M_ZERO);
+ for (i = 0; i < npmc; i++) {
+ cmn600_pmcdesc[i] = malloc(sizeof(struct cmn600_descr), M_PMC,
+ M_WAITOK|M_ZERO);
+
+ unit = i / CMN600_COUNTERS_N;
+ KASSERT(unit >= 0, ("unit >= 0"));
+ KASSERT(cmn600_pmcs[unit].arg != NULL, ("arg != NULL"));
+
+ cmn600_pmcdesc[i]->pd_rw_arg = cmn600_pmcs[unit].arg;
+ cmn600_pmcdesc[i]->pd_descr.pd_class =
+ PMC_CLASS_CMN600_PMU;
+ cmn600_pmcdesc[i]->pd_descr.pd_caps = pcd->pcd_caps;
+ cmn600_pmcdesc[i]->pd_phw = (struct pmc_hw *)malloc(
+ sizeof(struct pmc_hw), M_PMC, M_WAITOK|M_ZERO);
+ snprintf(cmn600_pmcdesc[i]->pd_descr.pd_name, 63,
+ "CMN600_%d", i);
+ cmn600_pmu_intr_cb(cmn600_pmcs[unit].arg, cmn600_pmu_intr);
+ }
+
+ return (0);
+}
+
+void
+pmc_cmn600_finalize(struct pmc_mdep *md)
+{
+ struct pmc_classdep *pcd;
+ int i, npmc;
+
+ KASSERT(md->pmd_classdep[PMC_MDEP_CLASS_INDEX_CMN600].pcd_class ==
+ PMC_CLASS_CMN600_PMU, ("[cmn600,%d] pmc class mismatch",
+ __LINE__));
+
+ pcd = &md->pmd_classdep[PMC_MDEP_CLASS_INDEX_CMN600];
+
+ npmc = pcd->pcd_num;
+ for (i = 0; i < npmc; i++) {
+ free(cmn600_pmcdesc[i]->pd_phw, M_PMC);
+ free(cmn600_pmcdesc[i], M_PMC);
+ }
+ free(cmn600_pmcdesc, M_PMC);
+ cmn600_pmcdesc = NULL;
+}
+
+MODULE_DEPEND(pmc, cmn600, 1, 1, 1);
diff --git a/sys/modules/hwpmc/Makefile b/sys/modules/hwpmc/Makefile
--- a/sys/modules/hwpmc/Makefile
+++ b/sys/modules/hwpmc/Makefile
@@ -12,6 +12,7 @@
.if ${MACHINE_CPUARCH} == "aarch64"
SRCS+= hwpmc_arm64.c hwpmc_arm64_md.c
+SRCS+= cmn600.c hwpmc_cmn600.c
SRCS+= hwpmc_dmc620.c pmu_dmc620.c
.endif
File Metadata
Details
Attached
Mime Type
text/plain
Expires
Thu, Jan 16, 12:46 PM (20 h, 21 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
15825651
Default Alt Text
D32321.diff (45 KB)
Attached To
Mode
D32321: Add support of ARM CMN-600 controller, PMU access functions only. Add support of PMU counters of ARM CMN-600 controller.
Attached
Detach File
Event Timeline
Log In to Comment