Page Menu
Home
FreeBSD
Search
Configure Global Search
Log In
Files
F110217430
D36549.diff
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Flag For Later
Award Token
Size
45 KB
Referenced Files
None
Subscribers
None
D36549.diff
View Options
diff --git a/sys/amd64/amd64/mp_machdep.c b/sys/amd64/amd64/mp_machdep.c
--- a/sys/amd64/amd64/mp_machdep.c
+++ b/sys/amd64/amd64/mp_machdep.c
@@ -423,17 +423,17 @@
domain = acpi_pxm_get_cpu_locality(apic_id);
#endif
/* allocate and set up an idle stack data page */
- bootstacks[cpu] = (void *)kmem_malloc(kstack_pages * PAGE_SIZE,
+ bootstacks[cpu] = kmem_malloc(kstack_pages * PAGE_SIZE,
M_WAITOK | M_ZERO);
- doublefault_stack = (char *)kmem_malloc(DBLFAULT_STACK_SIZE,
+ doublefault_stack = kmem_malloc(DBLFAULT_STACK_SIZE,
M_WAITOK | M_ZERO);
- mce_stack = (char *)kmem_malloc(MCE_STACK_SIZE,
+ mce_stack = kmem_malloc(MCE_STACK_SIZE,
M_WAITOK | M_ZERO);
- nmi_stack = (char *)kmem_malloc_domainset(
+ nmi_stack = kmem_malloc_domainset(
DOMAINSET_PREF(domain), NMI_STACK_SIZE, M_WAITOK | M_ZERO);
- dbg_stack = (char *)kmem_malloc_domainset(
+ dbg_stack = kmem_malloc_domainset(
DOMAINSET_PREF(domain), DBG_STACK_SIZE, M_WAITOK | M_ZERO);
- dpcpu = (void *)kmem_malloc_domainset(DOMAINSET_PREF(domain),
+ dpcpu = kmem_malloc_domainset(DOMAINSET_PREF(domain),
DPCPU_SIZE, M_WAITOK | M_ZERO);
bootpcpu = &__pcpu[cpu];
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -2390,7 +2390,7 @@
*/
s = (vm_size_t)pv_npg * sizeof(struct md_page);
s = round_page(s);
- pv_table = (struct md_page *)kmem_malloc(s, M_WAITOK | M_ZERO);
+ pv_table = kmem_malloc(s, M_WAITOK | M_ZERO);
for (i = 0; i < pv_npg; i++)
TAILQ_INIT(&pv_table[i].pv_list);
TAILQ_INIT(&pv_dummy.pv_list);
diff --git a/sys/amd64/amd64/sys_machdep.c b/sys/amd64/amd64/sys_machdep.c
--- a/sys/amd64/amd64/sys_machdep.c
+++ b/sys/amd64/amd64/sys_machdep.c
@@ -418,8 +418,7 @@
*/
pcb = td->td_pcb;
if (pcb->pcb_tssp == NULL) {
- tssp = (struct amd64tss *)kmem_malloc(ctob(IOPAGES + 1),
- M_WAITOK);
+ tssp = kmem_malloc(ctob(IOPAGES + 1), M_WAITOK);
pmap_pti_add_kva((vm_offset_t)tssp, (vm_offset_t)tssp +
ctob(IOPAGES + 1), false);
iomap = (char *)&tssp[1];
@@ -523,8 +522,8 @@
mtx_unlock(&dt_lock);
new_ldt = malloc(sizeof(struct proc_ldt), M_SUBPROC, M_WAITOK);
sz = max_ldt_segment * sizeof(struct user_segment_descriptor);
- sva = kmem_malloc(sz, M_WAITOK | M_ZERO);
- new_ldt->ldt_base = (caddr_t)sva;
+ new_ldt->ldt_base = kmem_malloc(sz, M_WAITOK | M_ZERO);
+ sva = (uintptr_t)new_ldt->ldt_base;
pmap_pti_add_kva(sva, sva + sz, false);
new_ldt->ldt_refcnt = 1;
sldt.ssd_base = sva;
@@ -539,7 +538,7 @@
pldt = mdp->md_ldt;
if (pldt != NULL && !force) {
pmap_pti_remove_kva(sva, sva + sz);
- kmem_free(sva, sz);
+ kmem_free(new_ldt->ldt_base, sz);
free(new_ldt, M_SUBPROC);
return (pldt);
}
@@ -592,7 +591,7 @@
sva = (vm_offset_t)pldt->ldt_base;
sz = max_ldt_segment * sizeof(struct user_segment_descriptor);
pmap_pti_remove_kva(sva, sva + sz);
- kmem_free(sva, sz);
+ kmem_free(pldt->ldt_base, sz);
free(pldt, M_SUBPROC);
}
}
diff --git a/sys/amd64/amd64/vm_machdep.c b/sys/amd64/amd64/vm_machdep.c
--- a/sys/amd64/amd64/vm_machdep.c
+++ b/sys/amd64/amd64/vm_machdep.c
@@ -373,7 +373,7 @@
if (pcb->pcb_tssp != NULL) {
pmap_pti_remove_kva((vm_offset_t)pcb->pcb_tssp,
(vm_offset_t)pcb->pcb_tssp + ctob(IOPAGES + 1));
- kmem_free((vm_offset_t)pcb->pcb_tssp, ctob(IOPAGES + 1));
+ kmem_free(pcb->pcb_tssp, ctob(IOPAGES + 1));
pcb->pcb_tssp = NULL;
}
}
diff --git a/sys/arm/arm/busdma_machdep.c b/sys/arm/arm/busdma_machdep.c
--- a/sys/arm/arm/busdma_machdep.c
+++ b/sys/arm/arm/busdma_machdep.c
@@ -776,10 +776,10 @@
howmany(dmat->maxsize, MIN(dmat->maxsegsz, PAGE_SIZE)) &&
dmat->alignment <= PAGE_SIZE &&
(dmat->boundary % PAGE_SIZE) == 0) {
- *vaddr = (void *)kmem_alloc_attr(dmat->maxsize, mflags, 0,
+ *vaddr = kmem_alloc_attr(dmat->maxsize, mflags, 0,
dmat->lowaddr, memattr);
} else {
- *vaddr = (void *)kmem_alloc_contig(dmat->maxsize, mflags, 0,
+ *vaddr = kmem_alloc_contig(dmat->maxsize, mflags, 0,
dmat->lowaddr, dmat->alignment, dmat->boundary, memattr);
}
if (*vaddr == NULL) {
@@ -822,7 +822,7 @@
!exclusion_bounce(dmat))
uma_zfree(bufzone->umazone, vaddr);
else
- kmem_free((vm_offset_t)vaddr, dmat->maxsize);
+ kmem_free(vaddr, dmat->maxsize);
dmat->map_count--;
if (map->flags & DMAMAP_COHERENT)
diff --git a/sys/arm/arm/mp_machdep.c b/sys/arm/arm/mp_machdep.c
--- a/sys/arm/arm/mp_machdep.c
+++ b/sys/arm/arm/mp_machdep.c
@@ -115,7 +115,7 @@
/* Reserve memory for application processors */
for(i = 0; i < (mp_ncpus - 1); i++)
- dpcpu[i] = (void *)kmem_malloc(DPCPU_SIZE, M_WAITOK | M_ZERO);
+ dpcpu[i] = kmem_malloc(DPCPU_SIZE, M_WAITOK | M_ZERO);
dcache_wbinv_poc_all();
diff --git a/sys/arm/arm/pmap-v6.c b/sys/arm/arm/pmap-v6.c
--- a/sys/arm/arm/pmap-v6.c
+++ b/sys/arm/arm/pmap-v6.c
@@ -1780,7 +1780,7 @@
*/
s = (vm_size_t)(pv_npg * sizeof(struct md_page));
s = round_page(s);
- pv_table = (struct md_page *)kmem_malloc(s, M_WAITOK | M_ZERO);
+ pv_table = kmem_malloc(s, M_WAITOK | M_ZERO);
for (i = 0; i < pv_npg; i++)
TAILQ_INIT(&pv_table[i].pv_list);
@@ -2213,7 +2213,7 @@
*/
if (pmap->pm_pt1 == NULL) {
- pmap->pm_pt1 = (pt1_entry_t *)kmem_alloc_contig(NB_IN_PT1,
+ pmap->pm_pt1 = kmem_alloc_contig(NB_IN_PT1,
M_NOWAIT | M_ZERO, 0, -1UL, NB_IN_PT1, 0, pt_memattr);
if (pmap->pm_pt1 == NULL)
return (0);
@@ -2229,7 +2229,7 @@
* be used no matter which process is current. Its mapping
* in PT2MAP can be used only for current process.
*/
- pmap->pm_pt2tab = (pt2_entry_t *)kmem_alloc_attr(NB_IN_PT2TAB,
+ pmap->pm_pt2tab = kmem_alloc_attr(NB_IN_PT2TAB,
M_NOWAIT | M_ZERO, 0, -1UL, pt_memattr);
if (pmap->pm_pt2tab == NULL) {
/*
@@ -2237,7 +2237,7 @@
* UMA_ZONE_NOFREE flag, it's important to leave
* no allocation in pmap if initialization failed.
*/
- kmem_free((vm_offset_t)pmap->pm_pt1, NB_IN_PT1);
+ kmem_free(pmap->pm_pt1, NB_IN_PT1);
pmap->pm_pt1 = NULL;
return (0);
}
diff --git a/sys/arm/freescale/imx/imx6_sdma.c b/sys/arm/freescale/imx/imx6_sdma.c
--- a/sys/arm/freescale/imx/imx6_sdma.c
+++ b/sys/arm/freescale/imx/imx6_sdma.c
@@ -185,7 +185,7 @@
chn = i;
/* Allocate area for buffer descriptors */
- channel->bd = (void *)kmem_alloc_contig(PAGE_SIZE, M_ZERO, 0, ~0,
+ channel->bd = kmem_alloc_contig(PAGE_SIZE, M_ZERO, 0, ~0,
PAGE_SIZE, 0, VM_MEMATTR_UNCACHEABLE);
return (chn);
@@ -202,7 +202,7 @@
channel = &sc->channel[chn];
channel->in_use = 0;
- kmem_free((vm_offset_t)channel->bd, PAGE_SIZE);
+ kmem_free(channel->bd, PAGE_SIZE);
return (0);
}
@@ -396,7 +396,7 @@
sz = SDMA_N_CHANNELS * sizeof(struct sdma_channel_control) + \
sizeof(struct sdma_context_data);
- sc->ccb = (void *)kmem_alloc_contig(sz, M_ZERO, 0, ~0, PAGE_SIZE, 0,
+ sc->ccb = kmem_alloc_contig(sz, M_ZERO, 0, ~0, PAGE_SIZE, 0,
VM_MEMATTR_UNCACHEABLE);
sc->ccb_phys = vtophys(sc->ccb);
@@ -415,7 +415,7 @@
/* Channel 0 is used for booting firmware */
chn = 0;
- sc->bd0 = (void *)kmem_alloc_contig(PAGE_SIZE, M_ZERO, 0, ~0, PAGE_SIZE,
+ sc->bd0 = kmem_alloc_contig(PAGE_SIZE, M_ZERO, 0, ~0, PAGE_SIZE,
0, VM_MEMATTR_UNCACHEABLE);
bd0 = sc->bd0;
sc->ccb[chn].base_bd_ptr = vtophys(bd0);
diff --git a/sys/arm/nvidia/drm2/tegra_dc.c b/sys/arm/nvidia/drm2/tegra_dc.c
--- a/sys/arm/nvidia/drm2/tegra_dc.c
+++ b/sys/arm/nvidia/drm2/tegra_dc.c
@@ -1231,7 +1231,8 @@
sc->tegra_crtc.cursor_vbase = kmem_alloc_contig(256 * 256 * 4,
M_WAITOK | M_ZERO, 0, -1UL, PAGE_SIZE, 0,
VM_MEMATTR_WRITE_COMBINING);
- sc->tegra_crtc.cursor_pbase = vtophys(sc->tegra_crtc.cursor_vbase);
+ sc->tegra_crtc.cursor_pbase =
+ vtophys((uintptr_t)sc->tegra_crtc.cursor_vbase);
return (0);
}
diff --git a/sys/arm/nvidia/drm2/tegra_drm.h b/sys/arm/nvidia/drm2/tegra_drm.h
--- a/sys/arm/nvidia/drm2/tegra_drm.h
+++ b/sys/arm/nvidia/drm2/tegra_drm.h
@@ -64,7 +64,7 @@
device_t dev;
int nvidia_head;
vm_paddr_t cursor_pbase; /* Cursor buffer */
- vm_offset_t cursor_vbase;
+ void *cursor_vbase;
};
struct tegra_drm_encoder {
diff --git a/sys/arm/nvidia/tegra_pcie.c b/sys/arm/nvidia/tegra_pcie.c
--- a/sys/arm/nvidia/tegra_pcie.c
+++ b/sys/arm/nvidia/tegra_pcie.c
@@ -1382,7 +1382,7 @@
sc = device_get_softc(dev);
- sc->msi_page = kmem_alloc_contig(PAGE_SIZE, M_WAITOK, 0,
+ sc->msi_page = (uintptr_t)kmem_alloc_contig(PAGE_SIZE, M_WAITOK, 0,
BUS_SPACE_MAXADDR, PAGE_SIZE, 0, VM_MEMATTR_DEFAULT);
/* MSI BAR */
diff --git a/sys/arm/nvidia/tegra_xhci.c b/sys/arm/nvidia/tegra_xhci.c
--- a/sys/arm/nvidia/tegra_xhci.c
+++ b/sys/arm/nvidia/tegra_xhci.c
@@ -289,7 +289,7 @@
struct intr_config_hook irq_hook;
bool xhci_inited;
- vm_offset_t fw_vaddr;
+ void *fw_vaddr;
vm_size_t fw_size;
};
@@ -744,7 +744,7 @@
const struct firmware *fw;
const struct tegra_xusb_fw_hdr *fw_hdr;
vm_paddr_t fw_paddr, fw_base;
- vm_offset_t fw_vaddr;
+ void *fw_vaddr;
vm_size_t fw_size;
uint32_t code_tags, code_size;
struct clocktime fw_clock;
@@ -775,9 +775,9 @@
fw_vaddr = kmem_alloc_contig(fw_size, M_WAITOK, 0, -1UL, PAGE_SIZE, 0,
VM_MEMATTR_UNCACHEABLE);
- fw_paddr = vtophys(fw_vaddr);
+ fw_paddr = vtophys((uintptr_t)fw_vaddr);
fw_hdr = (const struct tegra_xusb_fw_hdr *)fw_vaddr;
- memcpy((void *)fw_vaddr, fw->data, fw_size);
+ memcpy(fw_vaddr, fw->data, fw_size);
firmware_put(fw, FIRMWARE_UNLOAD);
sc->fw_vaddr = fw_vaddr;
@@ -947,7 +947,7 @@
xhci_uninit(xsc);
if (sc->irq_hdl_mbox != NULL)
bus_teardown_intr(dev, sc->irq_res_mbox, sc->irq_hdl_mbox);
- if (sc->fw_vaddr != 0)
+ if (sc->fw_vaddr != NULL)
kmem_free(sc->fw_vaddr, sc->fw_size);
LOCK_DESTROY(sc);
return (0);
diff --git a/sys/arm64/arm64/busdma_bounce.c b/sys/arm64/arm64/busdma_bounce.c
--- a/sys/arm64/arm64/busdma_bounce.c
+++ b/sys/arm64/arm64/busdma_bounce.c
@@ -567,11 +567,11 @@
dmat->alloc_alignment <= PAGE_SIZE &&
(dmat->common.boundary % PAGE_SIZE) == 0) {
/* Page-based multi-segment allocations allowed */
- *vaddr = (void *)kmem_alloc_attr(dmat->alloc_size, mflags,
+ *vaddr = kmem_alloc_attr(dmat->alloc_size, mflags,
0ul, dmat->common.lowaddr, attr);
dmat->bounce_flags |= BF_KMEM_ALLOC;
} else {
- *vaddr = (void *)kmem_alloc_contig(dmat->alloc_size, mflags,
+ *vaddr = kmem_alloc_contig(dmat->alloc_size, mflags,
0ul, dmat->common.lowaddr, dmat->alloc_alignment != 0 ?
dmat->alloc_alignment : 1ul, dmat->common.boundary, attr);
dmat->bounce_flags |= BF_KMEM_ALLOC;
@@ -608,7 +608,7 @@
if ((dmat->bounce_flags & BF_KMEM_ALLOC) == 0)
free(vaddr, M_DEVBUF);
else
- kmem_free((vm_offset_t)vaddr, dmat->alloc_size);
+ kmem_free(vaddr, dmat->alloc_size);
free(map, M_DEVBUF);
dmat->map_count--;
CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat,
diff --git a/sys/arm64/arm64/mp_machdep.c b/sys/arm64/arm64/mp_machdep.c
--- a/sys/arm64/arm64/mp_machdep.c
+++ b/sys/arm64/arm64/mp_machdep.c
@@ -317,7 +317,7 @@
for (cpu = 1; cpu < mp_ncpus; cpu++) {
if (bootstacks[cpu] != NULL)
- kmem_free((vm_offset_t)bootstacks[cpu], PAGE_SIZE);
+ kmem_free(bootstacks[cpu], PAGE_SIZE);
}
}
SYSINIT(smp_after_idle_runnable, SI_SUB_SMP, SI_ORDER_ANY,
@@ -495,7 +495,6 @@
start_cpu(u_int cpuid, uint64_t target_cpu, int domain)
{
struct pcpu *pcpup;
- vm_offset_t pcpu_mem;
vm_size_t size;
vm_paddr_t pa;
int err, naps;
@@ -511,18 +510,16 @@
KASSERT(cpuid < MAXCPU, ("Too many CPUs"));
size = round_page(sizeof(*pcpup) + DPCPU_SIZE);
- pcpu_mem = kmem_malloc_domainset(DOMAINSET_PREF(domain), size,
+ pcpup = kmem_malloc_domainset(DOMAINSET_PREF(domain), size,
M_WAITOK | M_ZERO);
- pmap_disable_promotion(pcpu_mem, size);
-
- pcpup = (struct pcpu *)pcpu_mem;
+ pmap_disable_promotion((vm_offset_t)pcpup, size);
pcpu_init(pcpup, cpuid, sizeof(struct pcpu));
pcpup->pc_mpidr = target_cpu & CPU_AFF_MASK;
dpcpu[cpuid - 1] = (void *)(pcpup + 1);
dpcpu_init(dpcpu[cpuid - 1], cpuid);
- bootstacks[cpuid] = (void *)kmem_malloc_domainset(
+ bootstacks[cpuid] = kmem_malloc_domainset(
DOMAINSET_PREF(domain), PAGE_SIZE, M_WAITOK | M_ZERO);
naps = atomic_load_int(&aps_started);
@@ -544,8 +541,8 @@
pcpu_destroy(pcpup);
dpcpu[cpuid - 1] = NULL;
- kmem_free((vm_offset_t)bootstacks[cpuid], PAGE_SIZE);
- kmem_free(pcpu_mem, size);
+ kmem_free(bootstacks[cpuid], PAGE_SIZE);
+ kmem_free(pcpup, size);
bootstacks[cpuid] = NULL;
mp_ncpus--;
return (false);
diff --git a/sys/arm64/arm64/pmap.c b/sys/arm64/arm64/pmap.c
--- a/sys/arm64/arm64/pmap.c
+++ b/sys/arm64/arm64/pmap.c
@@ -1247,7 +1247,7 @@
* bit_alloc().
*/
set->asid_set_size = 1 << set->asid_bits;
- set->asid_set = (bitstr_t *)kmem_malloc(bitstr_size(set->asid_set_size),
+ set->asid_set = kmem_malloc(bitstr_size(set->asid_set_size),
M_WAITOK | M_ZERO);
for (i = 0; i < ASID_FIRST_AVAILABLE; i++)
bit_set(set->asid_set, i);
@@ -1326,7 +1326,7 @@
*/
s = (vm_size_t)(pv_npg * sizeof(struct md_page));
s = round_page(s);
- pv_table = (struct md_page *)kmem_malloc(s, M_WAITOK | M_ZERO);
+ pv_table = kmem_malloc(s, M_WAITOK | M_ZERO);
for (i = 0; i < pv_npg; i++)
TAILQ_INIT(&pv_table[i].pv_list);
TAILQ_INIT(&pv_dummy.pv_list);
diff --git a/sys/compat/linuxkpi/common/include/linux/dma-mapping.h b/sys/compat/linuxkpi/common/include/linux/dma-mapping.h
--- a/sys/compat/linuxkpi/common/include/linux/dma-mapping.h
+++ b/sys/compat/linuxkpi/common/include/linux/dma-mapping.h
@@ -165,7 +165,7 @@
{
linux_dma_unmap(dev, dma_addr, size);
- kmem_free((vm_offset_t)cpu_addr, size);
+ kmem_free(cpu_addr, size);
}
static inline dma_addr_t
diff --git a/sys/compat/linuxkpi/common/src/linux_page.c b/sys/compat/linuxkpi/common/src/linux_page.c
--- a/sys/compat/linuxkpi/common/src/linux_page.c
+++ b/sys/compat/linuxkpi/common/src/linux_page.c
@@ -170,7 +170,7 @@
linux_alloc_kmem(gfp_t flags, unsigned int order)
{
size_t size = ((size_t)PAGE_SIZE) << order;
- vm_offset_t addr;
+ void *addr;
if ((flags & GFP_DMA32) == 0) {
addr = kmem_malloc(size, flags & GFP_NATIVE_MASK);
@@ -178,7 +178,7 @@
addr = kmem_alloc_contig(size, flags & GFP_NATIVE_MASK, 0,
BUS_SPACE_MAXADDR_32BIT, PAGE_SIZE, 0, VM_MEMATTR_DEFAULT);
}
- return (addr);
+ return ((vm_offset_t)addr);
}
void
@@ -186,7 +186,7 @@
{
size_t size = ((size_t)PAGE_SIZE) << order;
- kmem_free(addr, size);
+ kmem_free((void *)addr, size);
}
static int
diff --git a/sys/compat/linuxkpi/common/src/linux_pci.c b/sys/compat/linuxkpi/common/src/linux_pci.c
--- a/sys/compat/linuxkpi/common/src/linux_pci.c
+++ b/sys/compat/linuxkpi/common/src/linux_pci.c
@@ -1124,13 +1124,13 @@
align = PAGE_SIZE << get_order(size);
/* Always zero the allocation. */
flag |= M_ZERO;
- mem = (void *)kmem_alloc_contig(size, flag & GFP_NATIVE_MASK, 0, high,
+ mem = kmem_alloc_contig(size, flag & GFP_NATIVE_MASK, 0, high,
align, 0, VM_MEMATTR_DEFAULT);
if (mem != NULL) {
*dma_handle = linux_dma_map_phys_common(dev, vtophys(mem), size,
priv->dmat_coherent);
if (*dma_handle == 0) {
- kmem_free((vm_offset_t)mem, size);
+ kmem_free(mem, size);
mem = NULL;
}
} else {
diff --git a/sys/dev/agp/agp.c b/sys/dev/agp/agp.c
--- a/sys/dev/agp/agp.c
+++ b/sys/dev/agp/agp.c
@@ -153,9 +153,8 @@
return 0;
gatt->ag_entries = entries;
- gatt->ag_virtual = (void *)kmem_alloc_contig(entries *
- sizeof(u_int32_t), M_NOWAIT | M_ZERO, 0, ~0, PAGE_SIZE, 0,
- VM_MEMATTR_WRITE_COMBINING);
+ gatt->ag_virtual = kmem_alloc_contig(entries * sizeof(uint32_t),
+ M_NOWAIT | M_ZERO, 0, ~0, PAGE_SIZE, 0, VM_MEMATTR_WRITE_COMBINING);
if (!gatt->ag_virtual) {
if (bootverbose)
device_printf(dev, "contiguous allocation failed\n");
@@ -170,8 +169,7 @@
void
agp_free_gatt(struct agp_gatt *gatt)
{
- kmem_free((vm_offset_t)gatt->ag_virtual, gatt->ag_entries *
- sizeof(u_int32_t));
+ kmem_free(gatt->ag_virtual, gatt->ag_entries * sizeof(uint32_t));
free(gatt, M_AGP);
}
diff --git a/sys/dev/agp/agp_amd.c b/sys/dev/agp/agp_amd.c
--- a/sys/dev/agp/agp_amd.c
+++ b/sys/dev/agp/agp_amd.c
@@ -101,7 +101,7 @@
* directory.
*/
gatt->ag_entries = entries;
- gatt->ag_virtual = (void *)kmem_alloc_attr(entries * sizeof(u_int32_t),
+ gatt->ag_virtual = kmem_alloc_attr(entries * sizeof(uint32_t),
M_NOWAIT | M_ZERO, 0, ~0, VM_MEMATTR_WRITE_COMBINING);
if (!gatt->ag_virtual) {
if (bootverbose)
@@ -113,14 +113,13 @@
/*
* Allocate the page directory.
*/
- gatt->ag_vdir = (void *)kmem_alloc_attr(AGP_PAGE_SIZE, M_NOWAIT |
+ gatt->ag_vdir = kmem_alloc_attr(AGP_PAGE_SIZE, M_NOWAIT |
M_ZERO, 0, ~0, VM_MEMATTR_WRITE_COMBINING);
if (!gatt->ag_vdir) {
if (bootverbose)
device_printf(dev,
"failed to allocate page directory\n");
- kmem_free((vm_offset_t)gatt->ag_virtual, entries *
- sizeof(u_int32_t));
+ kmem_free(gatt->ag_virtual, entries * sizeof(uint32_t));
free(gatt, M_AGP);
return 0;
}
@@ -168,9 +167,8 @@
static void
agp_amd_free_gatt(struct agp_amd_gatt *gatt)
{
- kmem_free((vm_offset_t)gatt->ag_vdir, AGP_PAGE_SIZE);
- kmem_free((vm_offset_t)gatt->ag_virtual, gatt->ag_entries *
- sizeof(u_int32_t));
+ kmem_free(gatt->ag_vdir, AGP_PAGE_SIZE);
+ kmem_free(gatt->ag_virtual, gatt->ag_entries * sizeof(uint32_t));
free(gatt, M_AGP);
}
diff --git a/sys/dev/agp/agp_ati.c b/sys/dev/agp/agp_ati.c
--- a/sys/dev/agp/agp_ati.c
+++ b/sys/dev/agp/agp_ati.c
@@ -132,7 +132,7 @@
/* Alloc the GATT -- pointers to pages of AGP memory */
sc->ag_entries = entries;
- sc->ag_virtual = (void *)kmem_alloc_attr(entries * sizeof(u_int32_t),
+ sc->ag_virtual = kmem_alloc_attr(entries * sizeof(uint32_t),
M_NOWAIT | M_ZERO, 0, ~0, VM_MEMATTR_WRITE_COMBINING);
if (sc->ag_virtual == NULL) {
if (bootverbose)
@@ -141,13 +141,12 @@
}
/* Alloc the page directory -- pointers to each page of the GATT */
- sc->ag_vdir = (void *)kmem_alloc_attr(AGP_PAGE_SIZE, M_NOWAIT | M_ZERO,
+ sc->ag_vdir = kmem_alloc_attr(AGP_PAGE_SIZE, M_NOWAIT | M_ZERO,
0, ~0, VM_MEMATTR_WRITE_COMBINING);
if (sc->ag_vdir == NULL) {
if (bootverbose)
device_printf(dev, "pagedir allocation failed\n");
- kmem_free((vm_offset_t)sc->ag_virtual, entries *
- sizeof(u_int32_t));
+ kmem_free(sc->ag_virtual, entries * sizeof(uint32_t));
return ENOMEM;
}
sc->ag_pdir = vtophys((vm_offset_t)sc->ag_vdir);
@@ -263,9 +262,8 @@
temp = pci_read_config(dev, apsize_reg, 4);
pci_write_config(dev, apsize_reg, temp & ~1, 4);
- kmem_free((vm_offset_t)sc->ag_vdir, AGP_PAGE_SIZE);
- kmem_free((vm_offset_t)sc->ag_virtual, sc->ag_entries *
- sizeof(u_int32_t));
+ kmem_free(sc->ag_vdir, AGP_PAGE_SIZE);
+ kmem_free(sc->ag_virtual, sc->ag_entries * sizeof(uint32_t));
bus_release_resource(dev, SYS_RES_MEMORY, ATI_GART_MMADDR, sc->regs);
agp_free_res(dev);
diff --git a/sys/dev/agp/agp_i810.c b/sys/dev/agp/agp_i810.c
--- a/sys/dev/agp/agp_i810.c
+++ b/sys/dev/agp/agp_i810.c
@@ -1189,7 +1189,7 @@
sc->dcache_size = 0;
/* According to the specs the gatt on the i810 must be 64k. */
- sc->gatt->ag_virtual = (void *)kmem_alloc_contig(64 * 1024, M_NOWAIT |
+ sc->gatt->ag_virtual = kmem_alloc_contig(64 * 1024, M_NOWAIT |
M_ZERO, 0, ~0, PAGE_SIZE, 0, VM_MEMATTR_WRITE_COMBINING);
if (sc->gatt->ag_virtual == NULL) {
if (bootverbose)
@@ -1329,7 +1329,7 @@
sc = device_get_softc(dev);
bus_write_4(sc->sc_res[0], AGP_I810_PGTBL_CTL, 0);
- kmem_free((vm_offset_t)sc->gatt->ag_virtual, 64 * 1024);
+ kmem_free(sc->gatt->ag_virtual, 64 * 1024);
}
static void
diff --git a/sys/dev/amd_ecc_inject/ecc_inject.c b/sys/dev/amd_ecc_inject/ecc_inject.c
--- a/sys/dev/amd_ecc_inject/ecc_inject.c
+++ b/sys/dev/amd_ecc_inject/ecc_inject.c
@@ -177,7 +177,7 @@
static void
ecc_ei_inject(int count)
{
- vm_offset_t memory;
+ void *memory;
int injected;
KASSERT((quadrant & ~QUADRANT_MASK) == 0,
@@ -191,7 +191,7 @@
VM_MEMATTR_UNCACHEABLE);
for (injected = 0; injected < count; injected++) {
- ecc_ei_inject_one((void*)memory, PAGE_SIZE);
+ ecc_ei_inject_one(memory, PAGE_SIZE);
if (delay_ms != 0 && injected != count - 1)
pause_sbt("ecc_ei_inject", delay_ms * SBT_1MS, 0, 0);
}
diff --git a/sys/dev/drm2/drmP.h b/sys/dev/drm2/drmP.h
--- a/sys/dev/drm2/drmP.h
+++ b/sys/dev/drm2/drmP.h
@@ -497,7 +497,7 @@
* Scatter-gather memory.
*/
struct drm_sg_mem {
- vm_offset_t vaddr;
+ void *vaddr;
vm_paddr_t *busaddr;
vm_pindex_t pages;
};
diff --git a/sys/dev/drm2/drm_bufs.c b/sys/dev/drm2/drm_bufs.c
--- a/sys/dev/drm2/drm_bufs.c
+++ b/sys/dev/drm2/drm_bufs.c
@@ -392,8 +392,8 @@
free(map, DRM_MEM_MAPS);
return -EINVAL;
}
- map->handle = (void *)(dev->sg->vaddr + offset);
- map->offset += dev->sg->vaddr;
+ map->handle = (char *)dev->sg->vaddr + offset;
+ map->offset += (uintptr_t)dev->sg->vaddr;
break;
case _DRM_CONSISTENT:
/* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
diff --git a/sys/dev/drm2/drm_scatter.c b/sys/dev/drm2/drm_scatter.c
--- a/sys/dev/drm2/drm_scatter.c
+++ b/sys/dev/drm2/drm_scatter.c
@@ -35,7 +35,7 @@
#define DEBUG_SCATTER 0
-static inline vm_offset_t drm_vmalloc_dma(vm_size_t size)
+static inline void *drm_vmalloc_dma(vm_size_t size)
{
return kmem_alloc_attr(size, M_NOWAIT | M_ZERO, 0,
BUS_SPACE_MAXADDR_32BIT, VM_MEMATTR_WRITE_COMBINING);
@@ -46,7 +46,7 @@
if (entry == NULL)
return;
- if (entry->vaddr != 0)
+ if (entry->vaddr != NULL)
kmem_free(entry->vaddr, IDX_TO_OFF(entry->pages));
free(entry->busaddr, DRM_MEM_SGLISTS);
@@ -83,7 +83,7 @@
}
entry->vaddr = drm_vmalloc_dma(size);
- if (entry->vaddr == 0) {
+ if (entry->vaddr == NULL) {
free(entry->busaddr, DRM_MEM_DRIVER);
free(entry, DRM_MEM_DRIVER);
return -ENOMEM;
@@ -91,14 +91,14 @@
for (pindex = 0; pindex < entry->pages; pindex++) {
entry->busaddr[pindex] =
- vtophys(entry->vaddr + IDX_TO_OFF(pindex));
+ vtophys((uintptr_t)entry->vaddr + IDX_TO_OFF(pindex));
}
- request->handle = entry->vaddr;
+ request->handle = (uintptr_t)entry->vaddr;
dev->sg = entry;
- DRM_DEBUG("allocated %ju pages @ 0x%08zx, contents=%08lx\n",
+ DRM_DEBUG("allocated %ju pages @ %p, contents=%08lx\n",
entry->pages, entry->vaddr, *(unsigned long *)entry->vaddr);
return 0;
@@ -125,10 +125,10 @@
entry = dev->sg;
dev->sg = NULL;
- if (!entry || entry->vaddr != request->handle)
+ if (!entry || (uintptr_t)entry->vaddr != request->handle)
return -EINVAL;
- DRM_DEBUG("free 0x%zx\n", entry->vaddr);
+ DRM_DEBUG("free %p\n", entry->vaddr);
drm_sg_cleanup(entry);
diff --git a/sys/dev/hyperv/vmbus/hyperv.c b/sys/dev/hyperv/vmbus/hyperv.c
--- a/sys/dev/hyperv/vmbus/hyperv.c
+++ b/sys/dev/hyperv/vmbus/hyperv.c
@@ -268,7 +268,7 @@
static void
hypercall_memfree(void)
{
- kmem_free((vm_offset_t)hypercall_context.hc_addr, PAGE_SIZE);
+ kmem_free(hypercall_context.hc_addr, PAGE_SIZE);
hypercall_context.hc_addr = NULL;
}
@@ -286,8 +286,7 @@
* the NX bit.
* - Assume kmem_malloc() returns properly aligned memory.
*/
- hypercall_context.hc_addr = (void *)kmem_malloc(PAGE_SIZE, M_EXEC |
- M_WAITOK);
+ hypercall_context.hc_addr = kmem_malloc(PAGE_SIZE, M_EXEC | M_WAITOK);
hypercall_context.hc_paddr = vtophys(hypercall_context.hc_addr);
/* Get the 'reserved' bits, which requires preservation. */
diff --git a/sys/dev/iommu/busdma_iommu.c b/sys/dev/iommu/busdma_iommu.c
--- a/sys/dev/iommu/busdma_iommu.c
+++ b/sys/dev/iommu/busdma_iommu.c
@@ -519,7 +519,7 @@
DOMAINSET_PREF(tag->common.domain), mflags);
map->flags |= BUS_DMAMAP_IOMMU_MALLOC;
} else {
- *vaddr = (void *)kmem_alloc_attr_domainset(
+ *vaddr = kmem_alloc_attr_domainset(
DOMAINSET_PREF(tag->common.domain), tag->common.maxsize,
mflags, 0ul, BUS_SPACE_MAXADDR, attr);
map->flags |= BUS_DMAMAP_IOMMU_KMEM_ALLOC;
@@ -547,7 +547,7 @@
} else {
KASSERT((map->flags & BUS_DMAMAP_IOMMU_KMEM_ALLOC) != 0,
("iommu_bus_dmamem_free for non alloced map %p", map));
- kmem_free((vm_offset_t)vaddr, tag->common.maxsize);
+ kmem_free(vaddr, tag->common.maxsize);
map->flags &= ~BUS_DMAMAP_IOMMU_KMEM_ALLOC;
}
diff --git a/sys/dev/kvm_clock/kvm_clock.c b/sys/dev/kvm_clock/kvm_clock.c
--- a/sys/dev/kvm_clock/kvm_clock.c
+++ b/sys/dev/kvm_clock/kvm_clock.c
@@ -148,7 +148,7 @@
(regs[0] & KVM_FEATURE_CLOCKSOURCE_STABLE_BIT) != 0;
/* Set up 'struct pvclock_vcpu_time_info' page(s): */
- sc->timeinfos = (struct pvclock_vcpu_time_info *)kmem_malloc(mp_ncpus *
+ sc->timeinfos = kmem_malloc(mp_ncpus *
sizeof(struct pvclock_vcpu_time_info), M_WAITOK | M_ZERO);
kvm_clock_system_time_enable(sc);
diff --git a/sys/dev/liquidio/lio_network.h b/sys/dev/liquidio/lio_network.h
--- a/sys/dev/liquidio/lio_network.h
+++ b/sys/dev/liquidio/lio_network.h
@@ -198,7 +198,7 @@
void *mem;
align = PAGE_SIZE << lio_get_order(size);
- mem = (void *)kmem_alloc_contig(size, M_WAITOK, 0, ~0ul, align, 0,
+ mem = kmem_alloc_contig(size, M_WAITOK, 0, ~0ul, align, 0,
VM_MEMATTR_DEFAULT);
if (mem != NULL)
*dma_handle = vtophys(mem);
@@ -212,7 +212,7 @@
lio_dma_free(size_t size, void *cpu_addr)
{
- kmem_free((vm_offset_t)cpu_addr, size);
+ kmem_free(cpu_addr, size);
}
static inline uint64_t
diff --git a/sys/dev/mlx5/mlx5_core/mlx5_fwdump.c b/sys/dev/mlx5/mlx5_core/mlx5_fwdump.c
--- a/sys/dev/mlx5/mlx5_core/mlx5_fwdump.c
+++ b/sys/dev/mlx5/mlx5_core/mlx5_fwdump.c
@@ -410,6 +410,7 @@
struct mlx5_fw_update *fu;
struct firmware fake_fw;
struct mlx5_eeprom_get *eeprom_info;
+ void *fw_data;
int error;
error = 0;
@@ -461,21 +462,21 @@
error = mlx5_dbsf_to_core(devaddr, &mdev);
if (error != 0)
break;
- bzero(&fake_fw, sizeof(fake_fw));
- fake_fw.name = "umlx_fw_up";
- fake_fw.datasize = fu->img_fw_data_len;
- fake_fw.version = 1;
- fake_fw.data = (void *)kmem_malloc(fu->img_fw_data_len,
- M_WAITOK);
+ fw_data = kmem_malloc(fu->img_fw_data_len, M_WAITOK);
if (fake_fw.data == NULL) {
error = ENOMEM;
break;
}
- error = copyin(fu->img_fw_data, __DECONST(void *, fake_fw.data),
- fu->img_fw_data_len);
- if (error == 0)
+ error = copyin(fu->img_fw_data, fw_data, fu->img_fw_data_len);
+ if (error == 0) {
+ bzero(&fake_fw, sizeof(fake_fw));
+ fake_fw.name = "umlx_fw_up";
+ fake_fw.datasize = fu->img_fw_data_len;
+ fake_fw.version = 1;
+ fake_fw.data = fw_data;
error = -mlx5_firmware_flash(mdev, &fake_fw);
- kmem_free((vm_offset_t)fake_fw.data, fu->img_fw_data_len);
+ }
+ kmem_free(fw_data, fu->img_fw_data_len);
break;
case MLX5_FW_RESET:
if ((fflag & FWRITE) == 0) {
diff --git a/sys/i386/i386/mp_machdep.c b/sys/i386/i386/mp_machdep.c
--- a/sys/i386/i386/mp_machdep.c
+++ b/sys/i386/i386/mp_machdep.c
@@ -393,9 +393,9 @@
apic_id = cpu_apic_ids[cpu];
/* allocate and set up a boot stack data page */
- bootstacks[cpu] = (char *)kmem_malloc(kstack_pages * PAGE_SIZE,
+ bootstacks[cpu] = kmem_malloc(kstack_pages * PAGE_SIZE,
M_WAITOK | M_ZERO);
- dpcpu = (void *)kmem_malloc(DPCPU_SIZE, M_WAITOK | M_ZERO);
+ dpcpu = kmem_malloc(DPCPU_SIZE, M_WAITOK | M_ZERO);
/* setup a vector to our boot code */
*((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET;
*((volatile u_short *) WARMBOOT_SEG) = (boot_address >> 4);
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c
--- a/sys/i386/i386/pmap.c
+++ b/sys/i386/i386/pmap.c
@@ -1050,7 +1050,7 @@
*/
s = (vm_size_t)(pv_npg * sizeof(struct md_page));
s = round_page(s);
- pv_table = (struct md_page *)kmem_malloc(s, M_WAITOK | M_ZERO);
+ pv_table = kmem_malloc(s, M_WAITOK | M_ZERO);
for (i = 0; i < pv_npg; i++)
TAILQ_INIT(&pv_table[i].pv_list);
diff --git a/sys/kern/kern_malloc.c b/sys/kern/kern_malloc.c
--- a/sys/kern/kern_malloc.c
+++ b/sys/kern/kern_malloc.c
@@ -494,7 +494,7 @@
contigfree(void *addr, unsigned long size, struct malloc_type *type)
{
- kmem_free((vm_offset_t)addr, size);
+ kmem_free(addr, size);
malloc_type_freed(type, round_page(size));
}
@@ -588,17 +588,15 @@
malloc_large(size_t size, struct malloc_type *mtp, struct domainset *policy,
int flags DEBUG_REDZONE_ARG_DEF)
{
- vm_offset_t kva;
- caddr_t va;
+ void *va;
size = roundup(size, PAGE_SIZE);
- kva = kmem_malloc_domainset(policy, size, flags);
- if (kva != 0) {
+ va = kmem_malloc_domainset(policy, size, flags);
+ if (va != NULL) {
/* The low bit is unused for slab pointers. */
- vsetzoneslab(kva, NULL, (void *)((size << 1) | 1));
+ vsetzoneslab((uintptr_t)va, NULL, (void *)((size << 1) | 1));
uma_total_inc(size);
}
- va = (caddr_t)kva;
malloc_type_allocated(mtp, va == NULL ? 0 : size);
if (__predict_false(va == NULL)) {
KASSERT((flags & M_WAITOK) == 0,
@@ -607,7 +605,7 @@
#ifdef DEBUG_REDZONE
va = redzone_setup(va, osize);
#endif
- kasan_mark((void *)va, osize, size, KASAN_MALLOC_REDZONE);
+ kasan_mark(va, osize, size, KASAN_MALLOC_REDZONE);
}
return (va);
}
@@ -616,7 +614,7 @@
free_large(void *addr, size_t size)
{
- kmem_free((vm_offset_t)addr, size);
+ kmem_free(addr, size);
uma_total_dec(size);
}
diff --git a/sys/kern/subr_busdma_bufalloc.c b/sys/kern/subr_busdma_bufalloc.c
--- a/sys/kern/subr_busdma_bufalloc.c
+++ b/sys/kern/subr_busdma_bufalloc.c
@@ -158,7 +158,7 @@
/* Inform UMA that this allocator uses kernel_arena/object. */
*pflag = UMA_SLAB_KERNEL;
- return ((void *)kmem_alloc_attr_domainset(DOMAINSET_FIXED(domain), size,
+ return (kmem_alloc_attr_domainset(DOMAINSET_FIXED(domain), size,
wait, 0, BUS_SPACE_MAXADDR, VM_MEMATTR_UNCACHEABLE));
#else
panic("VM_MEMATTR_UNCACHEABLE unavailable");
@@ -169,5 +169,5 @@
busdma_bufalloc_free_uncacheable(void *item, vm_size_t size, uint8_t pflag)
{
- kmem_free((vm_offset_t)item, size);
+ kmem_free(item, size);
}
diff --git a/sys/powerpc/aim/mmu_radix.c b/sys/powerpc/aim/mmu_radix.c
--- a/sys/powerpc/aim/mmu_radix.c
+++ b/sys/powerpc/aim/mmu_radix.c
@@ -3686,7 +3686,7 @@
*/
s = (vm_size_t)(pv_npg * sizeof(struct md_page));
s = round_page(s);
- pv_table = (struct md_page *)kmem_malloc(s, M_WAITOK | M_ZERO);
+ pv_table = kmem_malloc(s, M_WAITOK | M_ZERO);
for (i = 0; i < pv_npg; i++)
TAILQ_INIT(&pv_table[i].pv_list);
TAILQ_INIT(&pv_dummy.pv_list);
diff --git a/sys/powerpc/powerpc/busdma_machdep.c b/sys/powerpc/powerpc/busdma_machdep.c
--- a/sys/powerpc/powerpc/busdma_machdep.c
+++ b/sys/powerpc/powerpc/busdma_machdep.c
@@ -483,7 +483,7 @@
* multi-seg allocations yet though.
* XXX Certain AGP hardware does.
*/
- *vaddr = (void *)kmem_alloc_contig(dmat->maxsize, mflags, 0ul,
+ *vaddr = kmem_alloc_contig(dmat->maxsize, mflags, 0ul,
dmat->lowaddr, dmat->alignment ? dmat->alignment : 1ul,
dmat->boundary, attr);
(*mapp)->contigalloc = 1;
@@ -511,7 +511,7 @@
if (!map->contigalloc)
free(vaddr, M_DEVBUF);
else
- kmem_free((vm_offset_t)vaddr, dmat->maxsize);
+ kmem_free(vaddr, dmat->maxsize);
bus_dmamap_destroy(dmat, map);
CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags);
}
diff --git a/sys/powerpc/powerpc/mp_machdep.c b/sys/powerpc/powerpc/mp_machdep.c
--- a/sys/powerpc/powerpc/mp_machdep.c
+++ b/sys/powerpc/powerpc/mp_machdep.c
@@ -176,7 +176,7 @@
void *dpcpu;
pc = &__pcpu[cpu.cr_cpuid];
- dpcpu = (void *)kmem_malloc_domainset(DOMAINSET_PREF(domain),
+ dpcpu = kmem_malloc_domainset(DOMAINSET_PREF(domain),
DPCPU_SIZE, M_WAITOK | M_ZERO);
pcpu_init(pc, cpu.cr_cpuid, sizeof(*pc));
dpcpu_init(dpcpu, cpu.cr_cpuid);
diff --git a/sys/riscv/riscv/busdma_bounce.c b/sys/riscv/riscv/busdma_bounce.c
--- a/sys/riscv/riscv/busdma_bounce.c
+++ b/sys/riscv/riscv/busdma_bounce.c
@@ -454,11 +454,11 @@
dmat->common.alignment <= PAGE_SIZE &&
(dmat->common.boundary % PAGE_SIZE) == 0) {
/* Page-based multi-segment allocations allowed */
- *vaddr = (void *)kmem_alloc_attr(dmat->common.maxsize, mflags,
+ *vaddr = kmem_alloc_attr(dmat->common.maxsize, mflags,
0ul, dmat->common.lowaddr, attr);
dmat->bounce_flags |= BF_KMEM_ALLOC;
} else {
- *vaddr = (void *)kmem_alloc_contig(dmat->common.maxsize, mflags,
+ *vaddr = kmem_alloc_contig(dmat->common.maxsize, mflags,
0ul, dmat->common.lowaddr, dmat->common.alignment != 0 ?
dmat->common.alignment : 1ul, dmat->common.boundary, attr);
dmat->bounce_flags |= BF_KMEM_ALLOC;
@@ -495,7 +495,7 @@
if ((dmat->bounce_flags & BF_KMEM_ALLOC) == 0)
free(vaddr, M_DEVBUF);
else
- kmem_free((vm_offset_t)vaddr, dmat->common.maxsize);
+ kmem_free(vaddr, dmat->common.maxsize);
free(map, M_DEVBUF);
dmat->map_count--;
CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat,
diff --git a/sys/riscv/riscv/mp_machdep.c b/sys/riscv/riscv/mp_machdep.c
--- a/sys/riscv/riscv/mp_machdep.c
+++ b/sys/riscv/riscv/mp_machdep.c
@@ -311,7 +311,7 @@
for (cpu = 1; cpu <= mp_maxid; cpu++) {
if (bootstacks[cpu] != NULL)
- kmem_free((vm_offset_t)bootstacks[cpu], PAGE_SIZE);
+ kmem_free(bootstacks[cpu], PAGE_SIZE);
}
}
SYSINIT(smp_after_idle_runnable, SI_SUB_SMP, SI_ORDER_ANY,
@@ -472,10 +472,10 @@
pcpu_init(pcpup, cpuid, sizeof(struct pcpu));
pcpup->pc_hart = hart;
- dpcpu[cpuid - 1] = (void *)kmem_malloc(DPCPU_SIZE, M_WAITOK | M_ZERO);
+ dpcpu[cpuid - 1] = kmem_malloc(DPCPU_SIZE, M_WAITOK | M_ZERO);
dpcpu_init(dpcpu[cpuid - 1], cpuid);
- bootstacks[cpuid] = (void *)kmem_malloc(PAGE_SIZE, M_WAITOK | M_ZERO);
+ bootstacks[cpuid] = kmem_malloc(PAGE_SIZE, M_WAITOK | M_ZERO);
naps = atomic_load_int(&aps_started);
bootstack = (char *)bootstacks[cpuid] + PAGE_SIZE;
diff --git a/sys/riscv/riscv/pmap.c b/sys/riscv/riscv/pmap.c
--- a/sys/riscv/riscv/pmap.c
+++ b/sys/riscv/riscv/pmap.c
@@ -790,7 +790,7 @@
*/
s = (vm_size_t)(pv_npg * sizeof(struct md_page));
s = round_page(s);
- pv_table = (struct md_page *)kmem_malloc(s, M_WAITOK | M_ZERO);
+ pv_table = kmem_malloc(s, M_WAITOK | M_ZERO);
for (i = 0; i < pv_npg; i++)
TAILQ_INIT(&pv_table[i].pv_list);
TAILQ_INIT(&pv_dummy.pv_list);
diff --git a/sys/vm/uma_core.c b/sys/vm/uma_core.c
--- a/sys/vm/uma_core.c
+++ b/sys/vm/uma_core.c
@@ -1947,7 +1947,7 @@
void *p; /* Returned page */
*pflag = UMA_SLAB_KERNEL;
- p = (void *)kmem_malloc_domainset(DOMAINSET_FIXED(domain), bytes, wait);
+ p = kmem_malloc_domainset(DOMAINSET_FIXED(domain), bytes, wait);
return (p);
}
@@ -2104,7 +2104,7 @@
KASSERT((flags & UMA_SLAB_KERNEL) != 0,
("UMA: page_free used with invalid flags %x", flags));
- kmem_free((vm_offset_t)mem, size);
+ kmem_free(mem, size);
}
/*
diff --git a/sys/vm/vm_extern.h b/sys/vm/vm_extern.h
--- a/sys/vm/vm_extern.h
+++ b/sys/vm/vm_extern.h
@@ -57,20 +57,20 @@
void kmap_free_wakeup(vm_map_t, vm_offset_t, vm_size_t);
/* These operate on virtual addresses backed by memory. */
-vm_offset_t kmem_alloc_attr(vm_size_t size, int flags,
+void *kmem_alloc_attr(vm_size_t size, int flags,
vm_paddr_t low, vm_paddr_t high, vm_memattr_t memattr);
-vm_offset_t kmem_alloc_attr_domainset(struct domainset *ds, vm_size_t size,
+void *kmem_alloc_attr_domainset(struct domainset *ds, vm_size_t size,
int flags, vm_paddr_t low, vm_paddr_t high, vm_memattr_t memattr);
-vm_offset_t kmem_alloc_contig(vm_size_t size, int flags,
+void *kmem_alloc_contig(vm_size_t size, int flags,
vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
vm_memattr_t memattr);
-vm_offset_t kmem_alloc_contig_domainset(struct domainset *ds, vm_size_t size,
+void *kmem_alloc_contig_domainset(struct domainset *ds, vm_size_t size,
int flags, vm_paddr_t low, vm_paddr_t high, u_long alignment,
vm_paddr_t boundary, vm_memattr_t memattr);
-vm_offset_t kmem_malloc(vm_size_t size, int flags);
-vm_offset_t kmem_malloc_domainset(struct domainset *ds, vm_size_t size,
+void *kmem_malloc(vm_size_t size, int flags);
+void *kmem_malloc_domainset(struct domainset *ds, vm_size_t size,
int flags);
-void kmem_free(vm_offset_t addr, vm_size_t size);
+void kmem_free(void *addr, vm_size_t size);
/* This provides memory for previously allocated address space. */
int kmem_back(vm_object_t, vm_offset_t, vm_size_t, int);
diff --git a/sys/vm/vm_init.c b/sys/vm/vm_init.c
--- a/sys/vm/vm_init.c
+++ b/sys/vm/vm_init.c
@@ -150,8 +150,7 @@
void
vm_ksubmap_init(struct kva_md_info *kmi)
{
- vm_offset_t firstaddr;
- caddr_t v;
+ caddr_t firstaddr, v;
vm_size_t size = 0;
long physmem_est;
vm_offset_t minaddr;
@@ -170,9 +169,9 @@
* needed and allocates it. The second pass assigns virtual
* addresses to the various data structures.
*/
- firstaddr = 0;
+ firstaddr = NULL;
again:
- v = (caddr_t)firstaddr;
+ v = firstaddr;
/*
* Discount the physical memory larger than the size of kernel_map
@@ -186,7 +185,7 @@
/*
* End of first pass, size has been calculated so allocate memory
*/
- if (firstaddr == 0) {
+ if (firstaddr == NULL) {
size = (vm_size_t)v;
#ifdef VM_FREELIST_DMA32
/*
@@ -195,10 +194,10 @@
*/
firstaddr = kmem_alloc_attr(size, M_ZERO | M_NOWAIT,
(vm_paddr_t)1 << 32, ~(vm_paddr_t)0, VM_MEMATTR_DEFAULT);
- if (firstaddr == 0)
+ if (firstaddr == NULL)
#endif
firstaddr = kmem_malloc(size, M_ZERO | M_WAITOK);
- if (firstaddr == 0)
+ if (firstaddr == NULL)
panic("startup: no room for tables");
goto again;
}
@@ -206,15 +205,15 @@
/*
* End of second pass, addresses have been assigned
*/
- if ((vm_size_t)((char *)v - firstaddr) != size)
+ if ((vm_size_t)(v - firstaddr) != size)
panic("startup: table size inconsistency");
/*
* Allocate the clean map to hold all of I/O virtual memory.
*/
size = (long)nbuf * BKVASIZE + (long)bio_transient_maxcnt * maxphys;
- kmi->clean_sva = firstaddr = kva_alloc(size);
- kmi->clean_eva = firstaddr + size;
+ kmi->clean_sva = kva_alloc(size);
+ kmi->clean_eva = kmi->clean_sva + size;
/*
* Allocate the buffer arena.
@@ -223,11 +222,10 @@
* avoids lock contention at the expense of some fragmentation.
*/
size = (long)nbuf * BKVASIZE;
- kmi->buffer_sva = firstaddr;
+ kmi->buffer_sva = kmi->clean_sva;
kmi->buffer_eva = kmi->buffer_sva + size;
vmem_init(buffer_arena, "buffer arena", kmi->buffer_sva, size,
PAGE_SIZE, (mp_ncpus > 4) ? BKVASIZE * 8 : 0, M_WAITOK);
- firstaddr += size;
/*
* And optionally transient bio space.
@@ -235,11 +233,8 @@
if (bio_transient_maxcnt != 0) {
size = (long)bio_transient_maxcnt * maxphys;
vmem_init(transient_arena, "transient arena",
- firstaddr, size, PAGE_SIZE, 0, M_WAITOK);
- firstaddr += size;
+ kmi->buffer_eva, size, PAGE_SIZE, 0, M_WAITOK);
}
- if (firstaddr != kmi->clean_eva)
- panic("Clean map calculation incorrect");
/*
* Allocate the pageable submaps. We may cache an exec map entry per
diff --git a/sys/vm/vm_kern.c b/sys/vm/vm_kern.c
--- a/sys/vm/vm_kern.c
+++ b/sys/vm/vm_kern.c
@@ -229,7 +229,7 @@
* necessarily physically contiguous. If M_ZERO is specified through the
* given flags, then the pages are zeroed before they are mapped.
*/
-static vm_offset_t
+static void *
kmem_alloc_attr_domain(int domain, vm_size_t size, int flags, vm_paddr_t low,
vm_paddr_t high, vm_memattr_t memattr)
{
@@ -270,10 +270,10 @@
}
VM_OBJECT_WUNLOCK(object);
kmem_alloc_san(addr, size, asize, flags);
- return (addr);
+ return ((void *)addr);
}
-vm_offset_t
+void *
kmem_alloc_attr(vm_size_t size, int flags, vm_paddr_t low, vm_paddr_t high,
vm_memattr_t memattr)
{
@@ -282,19 +282,19 @@
high, memattr));
}
-vm_offset_t
+void *
kmem_alloc_attr_domainset(struct domainset *ds, vm_size_t size, int flags,
vm_paddr_t low, vm_paddr_t high, vm_memattr_t memattr)
{
struct vm_domainset_iter di;
- vm_offset_t addr;
+ void *addr;
int domain;
vm_domainset_iter_policy_init(&di, ds, &domain, &flags);
do {
addr = kmem_alloc_attr_domain(domain, size, flags, low, high,
memattr);
- if (addr != 0)
+ if (addr != NULL)
break;
} while (vm_domainset_iter_policy(&di, &domain) == 0);
@@ -309,7 +309,7 @@
* through the given flags, then the pages are zeroed before they are
* mapped.
*/
-static vm_offset_t
+static void *
kmem_alloc_contig_domain(int domain, vm_size_t size, int flags, vm_paddr_t low,
vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
vm_memattr_t memattr)
@@ -326,7 +326,7 @@
asize = round_page(size);
vmem = vm_dom[domain].vmd_kernel_arena;
if (vmem_alloc(vmem, asize, flags | M_BESTFIT, &addr))
- return (0);
+ return (NULL);
offset = addr - VM_MIN_KERNEL_ADDRESS;
pflags = malloc2vm_flags(flags) | VM_ALLOC_WIRED;
npages = atop(asize);
@@ -336,7 +336,7 @@
if (m == NULL) {
VM_OBJECT_WUNLOCK(object);
vmem_free(vmem, addr, asize);
- return (0);
+ return (NULL);
}
KASSERT(vm_page_domain(m) == domain,
("kmem_alloc_contig_domain: Domain mismatch %d != %d",
@@ -353,10 +353,10 @@
}
VM_OBJECT_WUNLOCK(object);
kmem_alloc_san(addr, size, asize, flags);
- return (addr);
+ return ((void *)addr);
}
-vm_offset_t
+void *
kmem_alloc_contig(vm_size_t size, int flags, vm_paddr_t low, vm_paddr_t high,
u_long alignment, vm_paddr_t boundary, vm_memattr_t memattr)
{
@@ -365,20 +365,20 @@
high, alignment, boundary, memattr));
}
-vm_offset_t
+void *
kmem_alloc_contig_domainset(struct domainset *ds, vm_size_t size, int flags,
vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
vm_memattr_t memattr)
{
struct vm_domainset_iter di;
- vm_offset_t addr;
+ void *addr;
int domain;
vm_domainset_iter_policy_init(&di, ds, &domain, &flags);
do {
addr = kmem_alloc_contig_domain(domain, size, flags, low, high,
alignment, boundary, memattr);
- if (addr != 0)
+ if (addr != NULL)
break;
} while (vm_domainset_iter_policy(&di, &domain) == 0);
@@ -423,7 +423,7 @@
*
* Allocate wired-down pages in the kernel's address space.
*/
-static vm_offset_t
+static void *
kmem_malloc_domain(int domain, vm_size_t size, int flags)
{
vmem_t *arena;
@@ -445,27 +445,27 @@
return (0);
}
kasan_mark((void *)addr, size, asize, KASAN_KMEM_REDZONE);
- return (addr);
+ return ((void *)addr);
}
-vm_offset_t
+void *
kmem_malloc(vm_size_t size, int flags)
{
return (kmem_malloc_domainset(DOMAINSET_RR(), size, flags));
}
-vm_offset_t
+void *
kmem_malloc_domainset(struct domainset *ds, vm_size_t size, int flags)
{
struct vm_domainset_iter di;
- vm_offset_t addr;
+ void *addr;
int domain;
vm_domainset_iter_policy_init(&di, ds, &domain, &flags);
do {
addr = kmem_malloc_domain(domain, size, flags);
- if (addr != 0)
+ if (addr != NULL)
break;
} while (vm_domainset_iter_policy(&di, &domain) == 0);
@@ -631,15 +631,15 @@
* original allocation.
*/
void
-kmem_free(vm_offset_t addr, vm_size_t size)
+kmem_free(void *addr, vm_size_t size)
{
struct vmem *arena;
size = round_page(size);
- kasan_mark((void *)addr, size, size, 0);
- arena = _kmem_unback(kernel_object, addr, size);
+ kasan_mark(addr, size, size, 0);
+ arena = _kmem_unback(kernel_object, (uintptr_t)addr, size);
if (arena != NULL)
- vmem_free(arena, addr, size);
+ vmem_free(arena, (uintptr_t)addr, size);
}
/*
diff --git a/sys/x86/iommu/intel_dmar.h b/sys/x86/iommu/intel_dmar.h
--- a/sys/x86/iommu/intel_dmar.h
+++ b/sys/x86/iommu/intel_dmar.h
@@ -158,7 +158,7 @@
/* QI */
int qi_enabled;
- vm_offset_t inv_queue;
+ char *inv_queue;
vm_size_t inv_queue_size;
uint32_t inv_queue_avail;
uint32_t inv_queue_tail;
diff --git a/sys/x86/iommu/intel_intrmap.c b/sys/x86/iommu/intel_intrmap.c
--- a/sys/x86/iommu/intel_intrmap.c
+++ b/sys/x86/iommu/intel_intrmap.c
@@ -342,9 +342,9 @@
return (0);
}
unit->irte_cnt = clp2(num_io_irqs);
- unit->irt = (dmar_irte_t *)(uintptr_t)kmem_alloc_contig(
- unit->irte_cnt * sizeof(dmar_irte_t), M_ZERO | M_WAITOK, 0,
- dmar_high, PAGE_SIZE, 0, DMAR_IS_COHERENT(unit) ?
+ unit->irt = kmem_alloc_contig(unit->irte_cnt * sizeof(dmar_irte_t),
+ M_ZERO | M_WAITOK, 0, dmar_high, PAGE_SIZE, 0,
+ DMAR_IS_COHERENT(unit) ?
VM_MEMATTR_DEFAULT : VM_MEMATTR_UNCACHEABLE);
if (unit->irt == NULL)
return (ENOMEM);
@@ -378,7 +378,6 @@
dmar_disable_ir(unit);
dmar_qi_invalidate_iec_glob(unit);
vmem_destroy(unit->irtids);
- kmem_free((vm_offset_t)unit->irt, unit->irte_cnt *
- sizeof(dmar_irte_t));
+ kmem_free(unit->irt, unit->irte_cnt * sizeof(dmar_irte_t));
}
}
diff --git a/sys/x86/iommu/intel_qi.c b/sys/x86/iommu/intel_qi.c
--- a/sys/x86/iommu/intel_qi.c
+++ b/sys/x86/iommu/intel_qi.c
@@ -510,7 +510,7 @@
DMAR_LOCK(unit);
dmar_write8(unit, DMAR_IQT_REG, 0);
- iqa = pmap_kextract(unit->inv_queue);
+ iqa = pmap_kextract((uintptr_t)unit->inv_queue);
iqa |= qi_sz;
dmar_write8(unit, DMAR_IQA_REG, iqa);
dmar_enable_qi(unit);
@@ -552,7 +552,7 @@
DMAR_UNLOCK(unit);
kmem_free(unit->inv_queue, unit->inv_queue_size);
- unit->inv_queue = 0;
+ unit->inv_queue = NULL;
unit->inv_queue_size = 0;
unit->qi_enabled = 0;
}
diff --git a/sys/x86/x86/busdma_bounce.c b/sys/x86/x86/busdma_bounce.c
--- a/sys/x86/x86/busdma_bounce.c
+++ b/sys/x86/x86/busdma_bounce.c
@@ -449,12 +449,12 @@
dmat->common.alignment <= PAGE_SIZE &&
(dmat->common.boundary % PAGE_SIZE) == 0) {
/* Page-based multi-segment allocations allowed */
- *vaddr = (void *)kmem_alloc_attr_domainset(
+ *vaddr = kmem_alloc_attr_domainset(
DOMAINSET_PREF(dmat->common.domain), dmat->common.maxsize,
mflags, 0ul, dmat->common.lowaddr, attr);
dmat->bounce_flags |= BUS_DMA_KMEM_ALLOC;
} else {
- *vaddr = (void *)kmem_alloc_contig_domainset(
+ *vaddr = kmem_alloc_contig_domainset(
DOMAINSET_PREF(dmat->common.domain), dmat->common.maxsize,
mflags, 0ul, dmat->common.lowaddr,
dmat->common.alignment != 0 ? dmat->common.alignment : 1ul,
@@ -490,7 +490,7 @@
if ((dmat->bounce_flags & BUS_DMA_KMEM_ALLOC) == 0)
free(vaddr, M_DEVBUF);
else
- kmem_free((vm_offset_t)vaddr, dmat->common.maxsize);
+ kmem_free(vaddr, dmat->common.maxsize);
CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat,
dmat->bounce_flags);
}
diff --git a/sys/x86/x86/mp_x86.c b/sys/x86/x86/mp_x86.c
--- a/sys/x86/x86/mp_x86.c
+++ b/sys/x86/x86/mp_x86.c
@@ -1154,8 +1154,7 @@
smp_no_rendezvous_barrier, NULL);
for (cpu = 1; cpu < mp_ncpus; cpu++) {
- kmem_free((vm_offset_t)bootstacks[cpu], kstack_pages *
- PAGE_SIZE);
+ kmem_free(bootstacks[cpu], kstack_pages * PAGE_SIZE);
}
}
SYSINIT(smp_after_idle_runnable, SI_SUB_SMP, SI_ORDER_ANY,
File Metadata
Details
Attached
Mime Type
text/plain
Expires
Sun, Feb 16, 6:24 AM (14 h, 47 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
16669267
Default Alt Text
D36549.diff (45 KB)
Attached To
Mode
D36549: kmem_malloc/free: Use void * instead of vm_offset_t for kernel pointers.
Attached
Detach File
Event Timeline
Log In to Comment