Page MenuHomeFreeBSD

D42792.diff
No OneTemporary

D42792.diff

diff --git a/sys/compat/linuxkpi/common/include/linux/interrupt.h b/sys/compat/linuxkpi/common/include/linux/interrupt.h
--- a/sys/compat/linuxkpi/common/include/linux/interrupt.h
+++ b/sys/compat/linuxkpi/common/include/linux/interrupt.h
@@ -40,7 +40,7 @@
typedef irqreturn_t (*irq_handler_t)(int, void *);
-#define IRQF_SHARED RF_SHAREABLE
+#define IRQF_SHARED 0x0004 /* Historically */
#define IRQF_NOBALANCING 0
#define IRQ_DISABLE_UNLAZY 0
diff --git a/sys/compat/linuxkpi/common/include/linux/pci.h b/sys/compat/linuxkpi/common/include/linux/pci.h
--- a/sys/compat/linuxkpi/common/include/linux/pci.h
+++ b/sys/compat/linuxkpi/common/include/linux/pci.h
@@ -42,7 +42,6 @@
#include <sys/module.h>
#include <sys/nv.h>
#include <sys/pciio.h>
-#include <sys/rman.h>
#include <dev/pci/pcivar.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pci_private.h>
@@ -306,10 +305,16 @@
struct pci_msi_desc pci;
};
+struct msix_entry {
+ int entry;
+ int vector;
+};
+
/*
* If we find drivers accessing this from multiple KPIs we may have to
* refcount objects of this structure.
*/
+struct resource;
struct pci_mmio_region {
TAILQ_ENTRY(pci_mmio_region) next;
struct resource *res;
@@ -346,32 +351,30 @@
TAILQ_HEAD(, pci_mmio_region) mmio;
};
-/* We need some meta-struct to keep track of these for devres. */
-struct pci_devres {
- bool enable_io;
- /* PCIR_MAX_BAR_0 + 1 = 6 => BIT(0..5). */
- uint8_t region_mask;
- struct resource *region_table[PCIR_MAX_BAR_0 + 1]; /* Not needed. */
-};
-struct pcim_iomap_devres {
- void *mmio_table[PCIR_MAX_BAR_0 + 1];
- struct resource *res_table[PCIR_MAX_BAR_0 + 1];
-};
-
int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name);
int pci_alloc_irq_vectors(struct pci_dev *pdev, int minv, int maxv,
unsigned int flags);
bool pci_device_is_present(struct pci_dev *pdev);
+int linuxkpi_pcim_enable_device(struct pci_dev *pdev);
+void __iomem **linuxkpi_pcim_iomap_table(struct pci_dev *pdev);
+void *linuxkpi_pci_iomap(struct pci_dev *pdev, int mmio_bar, int mmio_size);
+void linuxkpi_pci_iounmap(struct pci_dev *pdev, void *res);
+int linuxkpi_pcim_iomap_regions(struct pci_dev *pdev, uint32_t mask,
+ const char *name);
+int linuxkpi_pci_request_regions(struct pci_dev *pdev, const char *res_name);
+void linuxkpi_pci_release_region(struct pci_dev *pdev, int bar);
+void linuxkpi_pci_release_regions(struct pci_dev *pdev);
+int linuxkpi_pci_enable_msix(struct pci_dev *pdev, struct msix_entry *entries,
+ int nreq);
+
/* Internal helper function(s). */
struct pci_dev *lkpinew_pci_dev(device_t);
-struct pci_devres *lkpi_pci_devres_get_alloc(struct pci_dev *pdev);
void lkpi_pci_devres_release(struct device *, void *);
-struct resource *_lkpi_pci_iomap(struct pci_dev *pdev, int bar, int mmio_size);
-struct pcim_iomap_devres *lkpi_pcim_iomap_devres_find(struct pci_dev *pdev);
-void lkpi_pcim_iomap_table_release(struct device *, void *);
struct pci_dev *lkpi_pci_get_device(uint16_t, uint16_t, struct pci_dev *);
struct msi_desc *lkpi_pci_msi_desc_alloc(int);
+struct device *lkpi_pci_find_irq_dev(unsigned int irq);
+int _lkpi_pci_enable_msi_range(struct pci_dev *pdev, int minvec, int maxvec);
static inline bool
dev_is_pci(struct device *dev)
@@ -395,56 +398,6 @@
return (SYS_RES_MEMORY);
}
-struct resource_list_entry *linux_pci_reserve_bar(struct pci_dev *pdev,
- struct resource_list *rl, int type, int rid);
-
-static inline struct resource_list_entry *
-linux_pci_get_rle(struct pci_dev *pdev, int type, int rid, bool reserve_bar)
-{
- struct pci_devinfo *dinfo;
- struct resource_list *rl;
- struct resource_list_entry *rle;
-
- dinfo = device_get_ivars(pdev->dev.bsddev);
- rl = &dinfo->resources;
- rle = resource_list_find(rl, type, rid);
- /* Reserve resources for this BAR if needed. */
- if (rle == NULL && reserve_bar)
- rle = linux_pci_reserve_bar(pdev, rl, type, rid);
- return (rle);
-}
-
-static inline struct resource_list_entry *
-linux_pci_get_bar(struct pci_dev *pdev, int bar, bool reserve)
-{
- int type;
-
- type = pci_resource_type(pdev, bar);
- if (type < 0)
- return (NULL);
- bar = PCIR_BAR(bar);
- return (linux_pci_get_rle(pdev, type, bar, reserve));
-}
-
-static inline struct device *
-linux_pci_find_irq_dev(unsigned int irq)
-{
- struct pci_dev *pdev;
- struct device *found;
-
- found = NULL;
- spin_lock(&pci_lock);
- list_for_each_entry(pdev, &pci_devices, links) {
- if (irq == pdev->dev.irq ||
- (irq >= pdev->dev.irq_start && irq < pdev->dev.irq_end)) {
- found = &pdev->dev;
- break;
- }
- }
- spin_unlock(&pci_lock);
- return (found);
-}
-
/*
* All drivers just seem to want to inspect the type not flags.
*/
@@ -578,73 +531,10 @@
return (pdev->bus->self);
}
-static inline struct pci_devres *
-lkpi_pci_devres_find(struct pci_dev *pdev)
-{
-
- if (!pdev->managed)
- return (NULL);
-
- return (lkpi_pci_devres_get_alloc(pdev));
-}
-
-static inline void
-pci_release_region(struct pci_dev *pdev, int bar)
-{
- struct resource_list_entry *rle;
- struct pci_devres *dr;
- struct pci_mmio_region *mmio, *p;
-
- if ((rle = linux_pci_get_bar(pdev, bar, false)) == NULL)
- return;
-
- /*
- * As we implicitly track the requests we also need to clear them on
- * release. Do clear before resource release.
- */
- dr = lkpi_pci_devres_find(pdev);
- if (dr != NULL) {
- KASSERT(dr->region_table[bar] == rle->res, ("%s: pdev %p bar %d"
- " region_table res %p != rel->res %p\n", __func__, pdev,
- bar, dr->region_table[bar], rle->res));
- dr->region_table[bar] = NULL;
- dr->region_mask &= ~(1 << bar);
- }
-
- TAILQ_FOREACH_SAFE(mmio, &pdev->mmio, next, p) {
- if (rle->res != (void *)rman_get_bushandle(mmio->res))
- continue;
- TAILQ_REMOVE(&pdev->mmio, mmio, next);
- free(mmio, M_DEVBUF);
- }
-
- bus_release_resource(pdev->dev.bsddev, rle->type, rle->rid, rle->res);
-}
-
-static inline void
-pci_release_regions(struct pci_dev *pdev)
-{
- int i;
-
- for (i = 0; i <= PCIR_MAX_BAR_0; i++)
- pci_release_region(pdev, i);
-}
-
-static inline int
-pci_request_regions(struct pci_dev *pdev, const char *res_name)
-{
- int error;
- int i;
-
- for (i = 0; i <= PCIR_MAX_BAR_0; i++) {
- error = pci_request_region(pdev, i, res_name);
- if (error && error != -ENODEV) {
- pci_release_regions(pdev);
- return (error);
- }
- }
- return (0);
-}
+#define pci_release_region(pdev, bar) linuxkpi_pci_release_region(pdev, bar)
+#define pci_release_regions(pdev) linuxkpi_pci_release_regions(pdev)
+#define pci_request_regions(pdev, res_name) \
+ linuxkpi_pci_request_regions(pdev, res_name)
static inline void
lkpi_pci_disable_msix(struct pci_dev *pdev)
@@ -655,7 +545,7 @@
/*
* The MSIX IRQ numbers associated with this PCI device are no
* longer valid and might be re-assigned. Make sure
- * linux_pci_find_irq_dev() does no longer see them by
+ * lkpi_pci_find_irq_dev() does no longer see them by
* resetting their references to zero:
*/
pdev->dev.irq_start = 0;
@@ -813,11 +703,6 @@
#define pci_register_driver(pdrv) linux_pci_register_driver(pdrv)
#define pci_unregister_driver(pdrv) linux_pci_unregister_driver(pdrv)
-struct msix_entry {
- int entry;
- int vector;
-};
-
/*
* Enable msix, positive errors indicate actual number of available
* vectors. Negative errors are failures.
@@ -825,42 +710,7 @@
* NB: define added to prevent this definition of pci_enable_msix from
* clashing with the native FreeBSD version.
*/
-#define pci_enable_msix(...) \
- linux_pci_enable_msix(__VA_ARGS__)
-
-static inline int
-pci_enable_msix(struct pci_dev *pdev, struct msix_entry *entries, int nreq)
-{
- struct resource_list_entry *rle;
- int error;
- int avail;
- int i;
-
- avail = pci_msix_count(pdev->dev.bsddev);
- if (avail < nreq) {
- if (avail == 0)
- return -EINVAL;
- return avail;
- }
- avail = nreq;
- if ((error = -pci_alloc_msix(pdev->dev.bsddev, &avail)) != 0)
- return error;
- /*
- * Handle case where "pci_alloc_msix()" may allocate less
- * interrupts than available and return with no error:
- */
- if (avail < nreq) {
- pci_release_msi(pdev->dev.bsddev);
- return avail;
- }
- rle = linux_pci_get_rle(pdev, SYS_RES_IRQ, 1, false);
- pdev->dev.irq_start = rle->start;
- pdev->dev.irq_end = rle->start + avail;
- for (i = 0; i < nreq; i++)
- entries[i].vector = pdev->dev.irq_start + i;
- pdev->msix_enabled = true;
- return (0);
-}
+#define pci_enable_msix(...) linuxkpi_pci_enable_msix(__VA_ARGS__)
#define pci_enable_msix_range(...) \
linux_pci_enable_msix_range(__VA_ARGS__)
@@ -891,38 +741,6 @@
#define pci_enable_msi(pdev) \
linux_pci_enable_msi(pdev)
-static inline int
-_lkpi_pci_enable_msi_range(struct pci_dev *pdev, int minvec, int maxvec)
-{
- struct resource_list_entry *rle;
- int error;
- int nvec;
-
- if (maxvec < minvec)
- return (-EINVAL);
-
- nvec = pci_msi_count(pdev->dev.bsddev);
- if (nvec < 1 || nvec < minvec)
- return (-ENOSPC);
-
- nvec = min(nvec, maxvec);
- if ((error = -pci_alloc_msi(pdev->dev.bsddev, &nvec)) != 0)
- return error;
-
- /* Native PCI might only ever ask for 32 vectors. */
- if (nvec < minvec) {
- pci_release_msi(pdev->dev.bsddev);
- return (-ENOSPC);
- }
-
- rle = linux_pci_get_rle(pdev, SYS_RES_IRQ, 1, false);
- pdev->dev.irq_start = rle->start;
- pdev->dev.irq_end = rle->start + nvec;
- pdev->irq = rle->start;
- pdev->msi_enabled = true;
- return (0);
-}
-
static inline int
pci_enable_msi(struct pci_dev *pdev)
{
@@ -946,35 +764,9 @@
{
}
-static inline void *
-pci_iomap(struct pci_dev *pdev, int mmio_bar, int mmio_size)
-{
- struct resource *res;
-
- res = _lkpi_pci_iomap(pdev, mmio_bar, mmio_size);
- if (res == NULL)
- return (NULL);
- /* This is a FreeBSD extension so we can use bus_*(). */
- if (pdev->want_iomap_res)
- return (res);
- return ((void *)rman_get_bushandle(res));
-}
-
-static inline void
-pci_iounmap(struct pci_dev *pdev, void *res)
-{
- struct pci_mmio_region *mmio, *p;
-
- TAILQ_FOREACH_SAFE(mmio, &pdev->mmio, next, p) {
- if (res != (void *)rman_get_bushandle(mmio->res))
- continue;
- bus_release_resource(pdev->dev.bsddev,
- mmio->type, mmio->rid, mmio->res);
- TAILQ_REMOVE(&pdev->mmio, mmio, next);
- free(mmio, M_DEVBUF);
- return;
- }
-}
+#define pci_iomap(pdev, mmio_bar, mmio_size) \
+ linuxkpi_pci_iomap(pdev, mmio_bar, mmio_size)
+#define pci_iounmap(pdev, res) linuxkpi_pci_iounmap(pdev, res)
static inline void
lkpi_pci_save_state(struct pci_dev *pdev)
@@ -1534,97 +1326,10 @@
/* -------------------------------------------------------------------------- */
-static inline int
-pcim_enable_device(struct pci_dev *pdev)
-{
- struct pci_devres *dr;
- int error;
-
- /* Here we cannot run through the pdev->managed check. */
- dr = lkpi_pci_devres_get_alloc(pdev);
- if (dr == NULL)
- return (-ENOMEM);
-
- /* If resources were enabled before do not do it again. */
- if (dr->enable_io)
- return (0);
-
- error = pci_enable_device(pdev);
- if (error == 0)
- dr->enable_io = true;
-
- /* This device is not managed. */
- pdev->managed = true;
-
- return (error);
-}
-
-static inline void __iomem **
-pcim_iomap_table(struct pci_dev *pdev)
-{
- struct pcim_iomap_devres *dr;
-
- dr = lkpi_pcim_iomap_devres_find(pdev);
- if (dr == NULL)
- return (NULL);
-
- /*
- * If the driver has manually set a flag to be able to request the
- * resource to use bus_read/write_<n>, return the shadow table.
- */
- if (pdev->want_iomap_res)
- return ((void **)dr->res_table);
-
- /* This is the Linux default. */
- return (dr->mmio_table);
-}
-
-static inline int
-pcim_iomap_regions(struct pci_dev *pdev, uint32_t mask, const char *name)
-{
- struct pcim_iomap_devres *dr;
- void *res;
- uint32_t mappings;
- int bar;
-
- dr = lkpi_pcim_iomap_devres_find(pdev);
- if (dr == NULL)
- return (-ENOMEM);
-
- /* Now iomap all the requested (by "mask") ones. */
- for (bar = mappings = 0; mappings != mask; bar++) {
- if ((mask & (1 << bar)) == 0)
- continue;
-
- /* Request double is not allowed. */
- if (dr->mmio_table[bar] != NULL) {
- device_printf(pdev->dev.bsddev, "%s: bar %d %p\n",
- __func__, bar, dr->mmio_table[bar]);
- goto err;
- }
-
- res = _lkpi_pci_iomap(pdev, bar, 0);
- if (res == NULL)
- goto err;
- dr->mmio_table[bar] = (void *)rman_get_bushandle(res);
- dr->res_table[bar] = res;
-
- mappings |= (1 << bar);
- }
-
- return (0);
-err:
- for (bar = PCIR_MAX_BAR_0; bar >= 0; bar--) {
- if ((mappings & (1 << bar)) != 0) {
- res = dr->mmio_table[bar];
- if (res == NULL)
- continue;
- pci_iounmap(pdev, res);
- }
- }
-
- return (-EINVAL);
-}
+#define pcim_enable_device(pdev) linuxkpi_pcim_enable_device(pdev)
+#define pcim_iomap_table(pdev) linuxkpi_pcim_iomap_table(pdev)
+#define pcim_iomap_regions(pdev, mask, name) \
+ linuxkpi_pcim_iomap_regions(pdev, mask, name)
static inline int
pcim_iomap_regions_request_all(struct pci_dev *pdev, uint32_t mask, char *name)
diff --git a/sys/compat/linuxkpi/common/src/linux_interrupt.c b/sys/compat/linuxkpi/common/src/linux_interrupt.c
--- a/sys/compat/linuxkpi/common/src/linux_interrupt.c
+++ b/sys/compat/linuxkpi/common/src/linux_interrupt.c
@@ -117,17 +117,20 @@
struct resource *res;
struct irq_ent *irqe;
struct device *dev;
+ unsigned resflags;
int error;
int rid;
- dev = linux_pci_find_irq_dev(irq);
+ dev = lkpi_pci_find_irq_dev(irq);
if (dev == NULL)
return -ENXIO;
if (xdev != NULL && xdev != dev)
return -ENXIO;
rid = lkpi_irq_rid(dev, irq);
- res = bus_alloc_resource_any(dev->bsddev, SYS_RES_IRQ, &rid,
- flags | RF_ACTIVE);
+ resflags = RF_ACTIVE;
+ if ((flags & IRQF_SHARED) != 0)
+ resflags |= RF_SHAREABLE;
+ res = bus_alloc_resource_any(dev->bsddev, SYS_RES_IRQ, &rid, resflags);
if (res == NULL)
return (-ENXIO);
if (xdev != NULL)
@@ -167,7 +170,7 @@
struct irq_ent *irqe;
struct device *dev;
- dev = linux_pci_find_irq_dev(irq);
+ dev = lkpi_pci_find_irq_dev(irq);
if (dev == NULL)
return -EINVAL;
irqe = lkpi_irq_ent(dev, irq);
@@ -183,7 +186,7 @@
struct irq_ent *irqe;
struct device *dev;
- dev = linux_pci_find_irq_dev(irq);
+ dev = lkpi_pci_find_irq_dev(irq);
if (dev == NULL)
return;
irqe = lkpi_irq_ent(dev, irq);
@@ -200,7 +203,7 @@
struct irq_ent *irqe;
struct device *dev;
- dev = linux_pci_find_irq_dev(irq);
+ dev = lkpi_pci_find_irq_dev(irq);
if (dev == NULL)
return (-ENOENT);
@@ -217,7 +220,7 @@
struct irq_ent *irqe;
struct device *dev;
- dev = linux_pci_find_irq_dev(irq);
+ dev = lkpi_pci_find_irq_dev(irq);
if (dev == NULL)
return;
irqe = lkpi_irq_ent(dev, irq);
@@ -233,7 +236,7 @@
struct device *dev;
struct irq_ent *irqe;
- dev = linux_pci_find_irq_dev(irq);
+ dev = lkpi_pci_find_irq_dev(irq);
if (dev == NULL)
return;
if (xdev != dev)
diff --git a/sys/compat/linuxkpi/common/src/linux_pci.c b/sys/compat/linuxkpi/common/src/linux_pci.c
--- a/sys/compat/linuxkpi/common/src/linux_pci.c
+++ b/sys/compat/linuxkpi/common/src/linux_pci.c
@@ -41,11 +41,14 @@
#include <sys/filio.h>
#include <sys/pciio.h>
#include <sys/pctrie.h>
+#include <sys/rman.h>
#include <sys/rwlock.h>
#include <vm/vm.h>
#include <vm/pmap.h>
+#include <machine/bus.h>
+#include <machine/resource.h>
#include <machine/stdarg.h>
#include <dev/pci/pcivar.h>
@@ -95,6 +98,7 @@
static int linux_backlight_get_status(device_t dev, struct backlight_props *props);
static int linux_backlight_update_status(device_t dev, struct backlight_props *props);
static int linux_backlight_get_info(device_t dev, struct backlight_info *info);
+static void lkpi_pcim_iomap_table_release(struct device *, void *);
static device_method_t pci_methods[] = {
DEVMETHOD(device_probe, linux_pci_probe),
@@ -118,6 +122,18 @@
"UNKNOWN", "D0", "D1", "D2", "D3hot", "D3cold"
};
+/* We need some meta-struct to keep track of these for devres. */
+struct pci_devres {
+ bool enable_io;
+ /* PCIR_MAX_BAR_0 + 1 = 6 => BIT(0..5). */
+ uint8_t region_mask;
+ struct resource *region_table[PCIR_MAX_BAR_0 + 1]; /* Not needed. */
+};
+struct pcim_iomap_devres {
+ void *mmio_table[PCIR_MAX_BAR_0 + 1];
+ struct resource *res_table[PCIR_MAX_BAR_0 + 1];
+};
+
struct linux_dma_priv {
uint64_t dma_mask;
bus_dma_tag_t dmat;
@@ -434,6 +450,41 @@
return (linux_pci_attach_device(dev, pdrv, id, pdev));
}
+static struct resource_list_entry *
+linux_pci_reserve_bar(struct pci_dev *pdev, struct resource_list *rl,
+ int type, int rid)
+{
+ device_t dev;
+ struct resource *res;
+
+ KASSERT(type == SYS_RES_IOPORT || type == SYS_RES_MEMORY,
+ ("trying to reserve non-BAR type %d", type));
+
+ dev = pdev->pdrv != NULL && pdev->pdrv->isdrm ?
+ device_get_parent(pdev->dev.bsddev) : pdev->dev.bsddev;
+ res = pci_reserve_map(device_get_parent(dev), dev, type, &rid, 0, ~0,
+ 1, 1, 0);
+ if (res == NULL)
+ return (NULL);
+ return (resource_list_find(rl, type, rid));
+}
+
+static struct resource_list_entry *
+linux_pci_get_rle(struct pci_dev *pdev, int type, int rid, bool reserve_bar)
+{
+ struct pci_devinfo *dinfo;
+ struct resource_list *rl;
+ struct resource_list_entry *rle;
+
+ dinfo = device_get_ivars(pdev->dev.bsddev);
+ rl = &dinfo->resources;
+ rle = resource_list_find(rl, type, rid);
+ /* Reserve resources for this BAR if needed. */
+ if (rle == NULL && reserve_bar)
+ rle = linux_pci_reserve_bar(pdev, rl, type, rid);
+ return (rle);
+}
+
int
linux_pci_attach_device(device_t dev, struct pci_driver *pdrv,
const struct pci_device_id *id, struct pci_dev *pdev)
@@ -542,7 +593,7 @@
return (0);
}
-struct pci_devres *
+static struct pci_devres *
lkpi_pci_devres_get_alloc(struct pci_dev *pdev)
{
struct pci_devres *dr;
@@ -558,6 +609,15 @@
return (dr);
}
+static struct pci_devres *
+lkpi_pci_devres_find(struct pci_dev *pdev)
+{
+ if (!pdev->managed)
+ return (NULL);
+
+ return (lkpi_pci_devres_get_alloc(pdev));
+}
+
void
lkpi_pci_devres_release(struct device *dev, void *p)
{
@@ -586,7 +646,32 @@
}
}
-struct pcim_iomap_devres *
+int
+linuxkpi_pcim_enable_device(struct pci_dev *pdev)
+{
+ struct pci_devres *dr;
+ int error;
+
+ /* Here we cannot run through the pdev->managed check. */
+ dr = lkpi_pci_devres_get_alloc(pdev);
+ if (dr == NULL)
+ return (-ENOMEM);
+
+ /* If resources were enabled before do not do it again. */
+ if (dr->enable_io)
+ return (0);
+
+ error = pci_enable_device(pdev);
+ if (error == 0)
+ dr->enable_io = true;
+
+ /* This device is not managed. */
+ pdev->managed = true;
+
+ return (error);
+}
+
+static struct pcim_iomap_devres *
lkpi_pcim_iomap_devres_find(struct pci_dev *pdev)
{
struct pcim_iomap_devres *dr;
@@ -606,7 +691,144 @@
return (dr);
}
+void __iomem **
+linuxkpi_pcim_iomap_table(struct pci_dev *pdev)
+{
+ struct pcim_iomap_devres *dr;
+
+ dr = lkpi_pcim_iomap_devres_find(pdev);
+ if (dr == NULL)
+ return (NULL);
+
+ /*
+ * If the driver has manually set a flag to be able to request the
+ * resource to use bus_read/write_<n>, return the shadow table.
+ */
+ if (pdev->want_iomap_res)
+ return ((void **)dr->res_table);
+
+ /* This is the Linux default. */
+ return (dr->mmio_table);
+}
+
+static struct resource *
+_lkpi_pci_iomap(struct pci_dev *pdev, int bar, int mmio_size __unused)
+{
+ struct pci_mmio_region *mmio, *p;
+ int type;
+
+ type = pci_resource_type(pdev, bar);
+ if (type < 0) {
+ device_printf(pdev->dev.bsddev, "%s: bar %d type %d\n",
+ __func__, bar, type);
+ return (NULL);
+ }
+
+ /*
+ * Check for duplicate mappings.
+ * This can happen if a driver calls pci_request_region() first.
+ */
+ TAILQ_FOREACH_SAFE(mmio, &pdev->mmio, next, p) {
+ if (mmio->type == type && mmio->rid == PCIR_BAR(bar)) {
+ return (mmio->res);
+ }
+ }
+
+ mmio = malloc(sizeof(*mmio), M_DEVBUF, M_WAITOK | M_ZERO);
+ mmio->rid = PCIR_BAR(bar);
+ mmio->type = type;
+ mmio->res = bus_alloc_resource_any(pdev->dev.bsddev, mmio->type,
+ &mmio->rid, RF_ACTIVE|RF_SHAREABLE);
+ if (mmio->res == NULL) {
+ device_printf(pdev->dev.bsddev, "%s: failed to alloc "
+ "bar %d type %d rid %d\n",
+ __func__, bar, type, PCIR_BAR(bar));
+ free(mmio, M_DEVBUF);
+ return (NULL);
+ }
+ TAILQ_INSERT_TAIL(&pdev->mmio, mmio, next);
+
+ return (mmio->res);
+}
+
+void *
+linuxkpi_pci_iomap(struct pci_dev *pdev, int mmio_bar, int mmio_size)
+{
+ struct resource *res;
+
+ res = _lkpi_pci_iomap(pdev, mmio_bar, mmio_size);
+ if (res == NULL)
+ return (NULL);
+ /* This is a FreeBSD extension so we can use bus_*(). */
+ if (pdev->want_iomap_res)
+ return (res);
+ return ((void *)rman_get_bushandle(res));
+}
+
void
+linuxkpi_pci_iounmap(struct pci_dev *pdev, void *res)
+{
+ struct pci_mmio_region *mmio, *p;
+
+ TAILQ_FOREACH_SAFE(mmio, &pdev->mmio, next, p) {
+ if (res != (void *)rman_get_bushandle(mmio->res))
+ continue;
+ bus_release_resource(pdev->dev.bsddev,
+ mmio->type, mmio->rid, mmio->res);
+ TAILQ_REMOVE(&pdev->mmio, mmio, next);
+ free(mmio, M_DEVBUF);
+ return;
+ }
+}
+
+int
+linuxkpi_pcim_iomap_regions(struct pci_dev *pdev, uint32_t mask, const char *name)
+{
+ struct pcim_iomap_devres *dr;
+ void *res;
+ uint32_t mappings;
+ int bar;
+
+ dr = lkpi_pcim_iomap_devres_find(pdev);
+ if (dr == NULL)
+ return (-ENOMEM);
+
+ /* Now iomap all the requested (by "mask") ones. */
+ for (bar = mappings = 0; mappings != mask; bar++) {
+ if ((mask & (1 << bar)) == 0)
+ continue;
+
+ /* Request double is not allowed. */
+ if (dr->mmio_table[bar] != NULL) {
+ device_printf(pdev->dev.bsddev, "%s: bar %d %p\n",
+ __func__, bar, dr->mmio_table[bar]);
+ goto err;
+ }
+
+ res = _lkpi_pci_iomap(pdev, bar, 0);
+ if (res == NULL)
+ goto err;
+ dr->mmio_table[bar] = (void *)rman_get_bushandle(res);
+ dr->res_table[bar] = res;
+
+ mappings |= (1 << bar);
+ }
+
+ return (0);
+err:
+ for (bar = PCIR_MAX_BAR_0; bar >= 0; bar--) {
+ if ((mappings & (1 << bar)) != 0) {
+ res = dr->mmio_table[bar];
+ if (res == NULL)
+ continue;
+ pci_iounmap(pdev, res);
+ }
+ }
+
+ return (-EINVAL);
+}
+
+static void
lkpi_pcim_iomap_table_release(struct device *dev, void *p)
{
struct pcim_iomap_devres *dr;
@@ -758,23 +980,35 @@
return (_linux_pci_register_driver(pdrv, dc));
}
-struct resource_list_entry *
-linux_pci_reserve_bar(struct pci_dev *pdev, struct resource_list *rl,
- int type, int rid)
+static struct resource_list_entry *
+lkpi_pci_get_bar(struct pci_dev *pdev, int bar, bool reserve)
{
- device_t dev;
- struct resource *res;
-
- KASSERT(type == SYS_RES_IOPORT || type == SYS_RES_MEMORY,
- ("trying to reserve non-BAR type %d", type));
+ int type;
- dev = pdev->pdrv != NULL && pdev->pdrv->isdrm ?
- device_get_parent(pdev->dev.bsddev) : pdev->dev.bsddev;
- res = pci_reserve_map(device_get_parent(dev), dev, type, &rid, 0, ~0,
- 1, 1, 0);
- if (res == NULL)
+ type = pci_resource_type(pdev, bar);
+ if (type < 0)
return (NULL);
- return (resource_list_find(rl, type, rid));
+ bar = PCIR_BAR(bar);
+ return (linux_pci_get_rle(pdev, type, bar, reserve));
+}
+
+struct device *
+lkpi_pci_find_irq_dev(unsigned int irq)
+{
+ struct pci_dev *pdev;
+ struct device *found;
+
+ found = NULL;
+ spin_lock(&pci_lock);
+ list_for_each_entry(pdev, &pci_devices, links) {
+ if (irq == pdev->dev.irq ||
+ (irq >= pdev->dev.irq_start && irq < pdev->dev.irq_end)) {
+ found = &pdev->dev;
+ break;
+ }
+ }
+ spin_unlock(&pci_lock);
+ return (found);
}
unsigned long
@@ -785,7 +1019,7 @@
device_t dev;
int error;
- if ((rle = linux_pci_get_bar(pdev, bar, true)) == NULL)
+ if ((rle = lkpi_pci_get_bar(pdev, bar, true)) == NULL)
return (0);
dev = pdev->pdrv != NULL && pdev->pdrv->isdrm ?
device_get_parent(pdev->dev.bsddev) : pdev->dev.bsddev;
@@ -804,7 +1038,7 @@
{
struct resource_list_entry *rle;
- if ((rle = linux_pci_get_bar(pdev, bar, true)) == NULL)
+ if ((rle = lkpi_pci_get_bar(pdev, bar, true)) == NULL)
return (0);
return (rle->count);
}
@@ -853,44 +1087,62 @@
return (0);
}
-struct resource *
-_lkpi_pci_iomap(struct pci_dev *pdev, int bar, int mmio_size __unused)
+int
+linuxkpi_pci_request_regions(struct pci_dev *pdev, const char *res_name)
{
- struct pci_mmio_region *mmio, *p;
- int type;
+ int error;
+ int i;
- type = pci_resource_type(pdev, bar);
- if (type < 0) {
- device_printf(pdev->dev.bsddev, "%s: bar %d type %d\n",
- __func__, bar, type);
- return (NULL);
+ for (i = 0; i <= PCIR_MAX_BAR_0; i++) {
+ error = pci_request_region(pdev, i, res_name);
+ if (error && error != -ENODEV) {
+ pci_release_regions(pdev);
+ return (error);
+ }
}
+ return (0);
+}
+
+void
+linuxkpi_pci_release_region(struct pci_dev *pdev, int bar)
+{
+ struct resource_list_entry *rle;
+ struct pci_devres *dr;
+ struct pci_mmio_region *mmio, *p;
+
+ if ((rle = lkpi_pci_get_bar(pdev, bar, false)) == NULL)
+ return;
/*
- * Check for duplicate mappings.
- * This can happen if a driver calls pci_request_region() first.
+ * As we implicitly track the requests we also need to clear them on
+ * release. Do clear before resource release.
*/
- TAILQ_FOREACH_SAFE(mmio, &pdev->mmio, next, p) {
- if (mmio->type == type && mmio->rid == PCIR_BAR(bar)) {
- return (mmio->res);
- }
+ dr = lkpi_pci_devres_find(pdev);
+ if (dr != NULL) {
+ KASSERT(dr->region_table[bar] == rle->res, ("%s: pdev %p bar %d"
+ " region_table res %p != rel->res %p\n", __func__, pdev,
+ bar, dr->region_table[bar], rle->res));
+ dr->region_table[bar] = NULL;
+ dr->region_mask &= ~(1 << bar);
}
- mmio = malloc(sizeof(*mmio), M_DEVBUF, M_WAITOK | M_ZERO);
- mmio->rid = PCIR_BAR(bar);
- mmio->type = type;
- mmio->res = bus_alloc_resource_any(pdev->dev.bsddev, mmio->type,
- &mmio->rid, RF_ACTIVE|RF_SHAREABLE);
- if (mmio->res == NULL) {
- device_printf(pdev->dev.bsddev, "%s: failed to alloc "
- "bar %d type %d rid %d\n",
- __func__, bar, type, PCIR_BAR(bar));
+ TAILQ_FOREACH_SAFE(mmio, &pdev->mmio, next, p) {
+ if (rle->res != (void *)rman_get_bushandle(mmio->res))
+ continue;
+ TAILQ_REMOVE(&pdev->mmio, mmio, next);
free(mmio, M_DEVBUF);
- return (NULL);
}
- TAILQ_INSERT_TAIL(&pdev->mmio, mmio, next);
- return (mmio->res);
+ bus_release_resource(pdev->dev.bsddev, rle->type, rle->rid, rle->res);
+}
+
+void
+linuxkpi_pci_release_regions(struct pci_dev *pdev)
+{
+ int i;
+
+ for (i = 0; i <= PCIR_MAX_BAR_0; i++)
+ pci_release_region(pdev, i);
}
int
@@ -938,6 +1190,73 @@
bus_topo_unlock();
}
+int
+linuxkpi_pci_enable_msix(struct pci_dev *pdev, struct msix_entry *entries,
+ int nreq)
+{
+ struct resource_list_entry *rle;
+ int error;
+ int avail;
+ int i;
+
+ avail = pci_msix_count(pdev->dev.bsddev);
+ if (avail < nreq) {
+ if (avail == 0)
+ return -EINVAL;
+ return avail;
+ }
+ avail = nreq;
+ if ((error = -pci_alloc_msix(pdev->dev.bsddev, &avail)) != 0)
+ return error;
+ /*
+ * Handle case where "pci_alloc_msix()" may allocate less
+ * interrupts than available and return with no error:
+ */
+ if (avail < nreq) {
+ pci_release_msi(pdev->dev.bsddev);
+ return avail;
+ }
+ rle = linux_pci_get_rle(pdev, SYS_RES_IRQ, 1, false);
+ pdev->dev.irq_start = rle->start;
+ pdev->dev.irq_end = rle->start + avail;
+ for (i = 0; i < nreq; i++)
+ entries[i].vector = pdev->dev.irq_start + i;
+ pdev->msix_enabled = true;
+ return (0);
+}
+
+int
+_lkpi_pci_enable_msi_range(struct pci_dev *pdev, int minvec, int maxvec)
+{
+ struct resource_list_entry *rle;
+ int error;
+ int nvec;
+
+ if (maxvec < minvec)
+ return (-EINVAL);
+
+ nvec = pci_msi_count(pdev->dev.bsddev);
+ if (nvec < 1 || nvec < minvec)
+ return (-ENOSPC);
+
+ nvec = min(nvec, maxvec);
+ if ((error = -pci_alloc_msi(pdev->dev.bsddev, &nvec)) != 0)
+ return error;
+
+ /* Native PCI might only ever ask for 32 vectors. */
+ if (nvec < minvec) {
+ pci_release_msi(pdev->dev.bsddev);
+ return (-ENOSPC);
+ }
+
+ rle = linux_pci_get_rle(pdev, SYS_RES_IRQ, 1, false);
+ pdev->dev.irq_start = rle->start;
+ pdev->dev.irq_end = rle->start + nvec;
+ pdev->irq = rle->start;
+ pdev->msi_enabled = true;
+ return (0);
+}
+
int
pci_alloc_irq_vectors(struct pci_dev *pdev, int minv, int maxv,
unsigned int flags)
@@ -986,7 +1305,7 @@
struct pcicfg_msi *msi;
int vec;
- dev = linux_pci_find_irq_dev(irq);
+ dev = lkpi_pci_find_irq_dev(irq);
if (dev == NULL)
return (NULL);
diff --git a/sys/contrib/dev/iwlwifi/pcie/trans.c b/sys/contrib/dev/iwlwifi/pcie/trans.c
--- a/sys/contrib/dev/iwlwifi/pcie/trans.c
+++ b/sys/contrib/dev/iwlwifi/pcie/trans.c
@@ -15,6 +15,7 @@
#include <linux/wait.h>
#include <linux/seq_file.h>
#if defined(__FreeBSD__)
+#include <sys/rman.h>
#include <linux/delay.h>
#endif
diff --git a/sys/contrib/dev/rtw88/pci.c b/sys/contrib/dev/rtw88/pci.c
--- a/sys/contrib/dev/rtw88/pci.c
+++ b/sys/contrib/dev/rtw88/pci.c
@@ -17,6 +17,7 @@
#include "ps.h"
#include "debug.h"
#if defined(__FreeBSD__)
+#include <sys/rman.h>
#include <linux/pm.h>
#endif
diff --git a/sys/dev/irdma/irdma_main.h b/sys/dev/irdma/irdma_main.h
--- a/sys/dev/irdma/irdma_main.h
+++ b/sys/dev/irdma/irdma_main.h
@@ -39,6 +39,7 @@
#include <netinet/ip6.h>
#include <netinet/udp.h>
#include <netinet/tcp.h>
+#include <sys/rman.h>
#include <sys/socket.h>
#include <netinet/if_ether.h>
#include <linux/slab.h>

File Metadata

Mime Type
text/plain
Expires
Tue, Apr 29, 2:33 AM (9 h, 31 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
17836202
Default Alt Text
D42792.diff (28 KB)

Event Timeline