Page MenuHomeFreeBSD

D33685.id100692.diff
No OneTemporary

D33685.id100692.diff

Index: sys/arm/arm/busdma_machdep.c
===================================================================
--- sys/arm/arm/busdma_machdep.c
+++ sys/arm/arm/busdma_machdep.c
@@ -319,7 +319,7 @@
alignment_bounce(bus_dma_tag_t dmat, bus_addr_t addr)
{
- return (addr & (dmat->alignment - 1));
+ return (!vm_page_align_ok(addr, dmat->alignment));
}
/*
@@ -1008,18 +1008,13 @@
_bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr,
bus_size_t sgsize, bus_dma_segment_t *segs, int *segp)
{
- bus_addr_t baddr, bmask;
int seg;
/*
* Make sure we don't cross any boundaries.
*/
- bmask = ~(dmat->boundary - 1);
- if (dmat->boundary > 0) {
- baddr = (curaddr + dmat->boundary) & bmask;
- if (sgsize > (baddr - curaddr))
- sgsize = (baddr - curaddr);
- }
+ if (!vm_page_bound_ok(curaddr, sgsize, dmat->boundary))
+ sgsize = roundup2(curaddr, dmat->boundary) - curaddr;
/*
* Insert chunk into a segment, coalescing with
@@ -1033,8 +1028,8 @@
} else {
if (curaddr == segs[seg].ds_addr + segs[seg].ds_len &&
(segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
- (dmat->boundary == 0 ||
- (segs[seg].ds_addr & bmask) == (curaddr & bmask)))
+ vm_page_bound_ok(segs[seg].ds_addr, segs[seg].ds_len,
+ dmat->boundary))
segs[seg].ds_len += sgsize;
else {
if (++seg >= dmat->nsegments)
Index: sys/arm64/arm64/busdma_bounce.c
===================================================================
--- sys/arm64/arm64/busdma_bounce.c
+++ sys/arm64/arm64/busdma_bounce.c
@@ -198,7 +198,7 @@
alignment_bounce(bus_dma_tag_t dmat, bus_addr_t addr)
{
- return ((addr & (dmat->common.alignment - 1)) != 0);
+ return (!vm_page_align_ok(addr, dmat->common.alignment));
}
static bool
@@ -617,7 +617,7 @@
__func__, dmat, dmat->common.flags, ENOMEM);
free(*mapp, M_DEVBUF);
return (ENOMEM);
- } else if (vtophys(*vaddr) & (dmat->alloc_alignment - 1)) {
+ } else if (!vm_page_align_ok(vtophys(*vaddr), dmat->alloc_alignment)) {
printf("bus_dmamem_alloc failed to align memory properly.\n");
}
dmat->map_count++;
@@ -768,18 +768,13 @@
_bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr,
bus_size_t sgsize, bus_dma_segment_t *segs, int *segp)
{
- bus_addr_t baddr, bmask;
int seg;
/*
* Make sure we don't cross any boundaries.
*/
- bmask = ~(dmat->common.boundary - 1);
- if (dmat->common.boundary > 0) {
- baddr = (curaddr + dmat->common.boundary) & bmask;
- if (sgsize > (baddr - curaddr))
- sgsize = (baddr - curaddr);
- }
+ if (!vm_page_bound_ok(curaddr, sgsize, dmat->common.boundary))
+ sgsize = roundup2(curaddr, dmat->common.boundary) - curaddr;
/*
* Insert chunk into a segment, coalescing with
@@ -793,8 +788,8 @@
} else {
if (curaddr == segs[seg].ds_addr + segs[seg].ds_len &&
(segs[seg].ds_len + sgsize) <= dmat->common.maxsegsz &&
- (dmat->common.boundary == 0 ||
- (segs[seg].ds_addr & bmask) == (curaddr & bmask)))
+ vm_page_bound_ok(segs[seg].ds_addr, segs[seg].ds_len,
+ dmat->common.boundary))
segs[seg].ds_len += sgsize;
else {
if (++seg >= dmat->common.nsegments)
Index: sys/dev/iommu/busdma_iommu.c
===================================================================
--- sys/dev/iommu/busdma_iommu.c
+++ sys/dev/iommu/busdma_iommu.c
@@ -619,8 +619,8 @@
if (buflen1 > tag->common.maxsegsz)
buflen1 = tag->common.maxsegsz;
- KASSERT(((entry->start + offset) & (tag->common.alignment - 1))
- == 0,
+ KASSERT(vm_page_align_ok(entry->start + offset,
+ tag->common.alignment),
("alignment failed: ctx %p start 0x%jx offset %x "
"align 0x%jx", ctx, (uintmax_t)entry->start, offset,
(uintmax_t)tag->common.alignment));
@@ -631,7 +631,7 @@
(uintmax_t)entry->start, (uintmax_t)entry->end,
(uintmax_t)tag->common.lowaddr,
(uintmax_t)tag->common.highaddr));
- KASSERT(iommu_test_boundary(entry->start + offset, buflen1,
+ KASSERT(vm_page_bound_ok(entry->start + offset, buflen1,
tag->common.boundary),
("boundary failed: ctx %p start 0x%jx end 0x%jx "
"boundary 0x%jx", ctx, (uintmax_t)entry->start,
Index: sys/dev/iommu/iommu.h
===================================================================
--- sys/dev/iommu/iommu.h
+++ sys/dev/iommu/iommu.h
@@ -148,16 +148,6 @@
#define IOMMU_DOMAIN_UNLOCK(dom) mtx_unlock(&(dom)->lock)
#define IOMMU_DOMAIN_ASSERT_LOCKED(dom) mtx_assert(&(dom)->lock, MA_OWNED)
-static inline bool
-iommu_test_boundary(iommu_gaddr_t start, iommu_gaddr_t size,
- iommu_gaddr_t boundary)
-{
-
- if (boundary == 0)
- return (true);
- return (start + size <= ((start + boundary) & ~(boundary - 1)));
-}
-
void iommu_free_ctx(struct iommu_ctx *ctx);
void iommu_free_ctx_locked(struct iommu_unit *iommu, struct iommu_ctx *ctx);
struct iommu_ctx *iommu_get_ctx(struct iommu_unit *, device_t dev,
Index: sys/dev/iommu/iommu_gas.c
===================================================================
--- sys/dev/iommu/iommu_gas.c
+++ sys/dev/iommu/iommu_gas.c
@@ -314,7 +314,7 @@
return (false);
/* No boundary crossing. */
- if (iommu_test_boundary(a->entry->start + a->offset, a->size,
+ if (vm_page_bound_ok(a->entry->start + a->offset, a->size,
a->common->boundary))
return (true);
Index: sys/mips/mips/busdma_machdep.c
===================================================================
--- sys/mips/mips/busdma_machdep.c
+++ sys/mips/mips/busdma_machdep.c
@@ -269,7 +269,7 @@
do {
if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr)
- || ((paddr & (dmat->alignment - 1)) != 0))
+ || !vm_page_align_ok(paddr, dmat->alignment))
&& (dmat->filter == NULL
|| (*dmat->filter)(dmat->filterarg, paddr) != 0))
retval = 1;
@@ -873,18 +873,14 @@
_bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr,
bus_size_t sgsize, bus_dma_segment_t *segs, int *segp)
{
- bus_addr_t baddr, bmask;
int seg;
/*
* Make sure we don't cross any boundaries.
*/
- bmask = ~(dmat->boundary - 1);
- if (dmat->boundary > 0) {
- baddr = (curaddr + dmat->boundary) & bmask;
- if (sgsize > (baddr - curaddr))
- sgsize = (baddr - curaddr);
- }
+ if (!vm_page_bound_ok(curaddr, sgsize, dmat->boundary))
+ sgsize = roundup2(curaddr, dmat->boundary) - curaddr;
+
/*
* Insert chunk into a segment, coalescing with
* the previous segment if possible.
@@ -893,8 +889,8 @@
if (seg >= 0 &&
curaddr == segs[seg].ds_addr + segs[seg].ds_len &&
(segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
- (dmat->boundary == 0 ||
- (segs[seg].ds_addr & bmask) == (curaddr & bmask))) {
+ vm_page_bound_ok(segs[seg].ds_addr, segs[seg].ds_len,
+ dmat->boundary))
segs[seg].ds_len += sgsize;
} else {
if (++seg >= dmat->nsegments)
Index: sys/powerpc/powerpc/busdma_machdep.c
===================================================================
--- sys/powerpc/powerpc/busdma_machdep.c
+++ sys/powerpc/powerpc/busdma_machdep.c
@@ -173,7 +173,7 @@
paddr > dmat->lowaddr && paddr <= dmat->highaddr)
retval = 1;
if (dmat->filter == NULL &&
- (paddr & (dmat->alignment - 1)) != 0)
+ vm_page_align_ok(paddr, dmat->alignment))
retval = 1;
if (dmat->filter != NULL &&
(*dmat->filter)(dmat->filterarg, paddr) != 0)
@@ -564,7 +564,7 @@
CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
__func__, dmat, dmat->flags, ENOMEM);
return (ENOMEM);
- } else if (vtophys(*vaddr) & (dmat->alignment - 1)) {
+ } else if (!vm_page_align_ok(vtophys(*vaddr), dmat->alignment)) {
printf("bus_dmamem_alloc failed to align memory properly.\n");
}
CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
@@ -689,18 +689,13 @@
_bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr,
bus_size_t sgsize, bus_dma_segment_t *segs, int *segp)
{
- bus_addr_t baddr, bmask;
int seg;
/*
* Make sure we don't cross any boundaries.
*/
- bmask = ~(dmat->boundary - 1);
- if (dmat->boundary > 0) {
- baddr = (curaddr + dmat->boundary) & bmask;
- if (sgsize > (baddr - curaddr))
- sgsize = (baddr - curaddr);
- }
+ if (!vm_page_bound_ok(curaddr, sgsize, dmat->boundary))
+ sgsize = roundup2(curaddr, dmat->boundary) - curaddr;
/*
* Insert chunk into a segment, coalescing with
@@ -714,8 +709,8 @@
} else {
if (curaddr == segs[seg].ds_addr + segs[seg].ds_len &&
(segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
- (dmat->boundary == 0 ||
- (segs[seg].ds_addr & bmask) == (curaddr & bmask)))
+ vm_page_bound_ok(segs[seg].ds_addr, segs[seg].ds_len,
+ dmat->boundary))
segs[seg].ds_len += sgsize;
else {
if (++seg >= dmat->nsegments)
Index: sys/riscv/riscv/busdma_bounce.c
===================================================================
--- sys/riscv/riscv/busdma_bounce.c
+++ sys/riscv/riscv/busdma_bounce.c
@@ -505,7 +505,7 @@
__func__, dmat, dmat->common.flags, ENOMEM);
free(*mapp, M_DEVBUF);
return (ENOMEM);
- } else if (vtophys(*vaddr) & (dmat->common.alignment - 1)) {
+ } else if (!vm_page_align_ok(vtophys(*vaddr), dmat->common.alignment)) {
printf("bus_dmamem_alloc failed to align memory properly.\n");
}
dmat->map_count++;
@@ -637,18 +637,13 @@
_bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr,
bus_size_t sgsize, bus_dma_segment_t *segs, int *segp)
{
- bus_addr_t baddr, bmask;
int seg;
/*
* Make sure we don't cross any boundaries.
*/
- bmask = ~(dmat->common.boundary - 1);
- if (dmat->common.boundary > 0) {
- baddr = (curaddr + dmat->common.boundary) & bmask;
- if (sgsize > (baddr - curaddr))
- sgsize = (baddr - curaddr);
- }
+ if (!vm_page_bound_ok(curaddr, sgsize, dmat->common.boundary))
+ sgsize = roundup2(curaddr, dmat->common.boundary) - curaddr;
/*
* Insert chunk into a segment, coalescing with
@@ -662,8 +657,8 @@
} else {
if (curaddr == segs[seg].ds_addr + segs[seg].ds_len &&
(segs[seg].ds_len + sgsize) <= dmat->common.maxsegsz &&
- (dmat->common.boundary == 0 ||
- (segs[seg].ds_addr & bmask) == (curaddr & bmask)))
+ vm_page_bound_ok(segs[seg].ds_addr, segs[seg].ds_len,
+ dmat->common.boundary))
segs[seg].ds_len += sgsize;
else {
if (++seg >= dmat->common.nsegments)
Index: sys/riscv/riscv/busdma_machdep.c
===================================================================
--- sys/riscv/riscv/busdma_machdep.c
+++ sys/riscv/riscv/busdma_machdep.c
@@ -102,7 +102,7 @@
retval = 0;
do {
if (((paddr > tc->lowaddr && paddr <= tc->highaddr) ||
- ((paddr & (tc->alignment - 1)) != 0)) &&
+ !vm_page_align_ok(paddr, tc->alignment) &&
(tc->filter == NULL ||
(*tc->filter)(tc->filterarg, paddr) != 0))
retval = 1;
Index: sys/vm/vm_map.c
===================================================================
--- sys/vm/vm_map.c
+++ sys/vm/vm_map.c
@@ -2032,10 +2032,8 @@
*/
if (alignment == 0)
pmap_align_superpage(object, offset, addr, length);
- else if ((*addr & (alignment - 1)) != 0) {
- *addr &= ~(alignment - 1);
- *addr += alignment;
- }
+ else
+ *addr = roundup2(*addr, alignment);
aligned_addr = *addr;
if (aligned_addr == free_addr) {
/*
Index: sys/vm/vm_page.h
===================================================================
--- sys/vm/vm_page.h
+++ sys/vm/vm_page.h
@@ -1017,5 +1017,24 @@
#endif
}
+static inline bool
+vm_page_align_ok(vm_paddr_t pa, u_long alignment)
+{
+ return ((pa & (alignment - 1)) == 0);
+}
+
+static inline bool
+vm_page_bound_ok(vm_paddr_t pa, vm_paddr_t size, vm_paddr_t boundary)
+{
+ return (((pa ^ (pa + size - 1)) & -boundary) == 0);
+}
+
+static inline bool
+vm_page_addr_ok(vm_paddr_t pa, vm_paddr_t size, u_long alignment,
+ vm_paddr_t boundary)
+{
+ return (vm_page_align_ok(pa, alignment) &&
+ vm_page_bound_ok(pa, size, boundary));
+}
#endif /* _KERNEL */
#endif /* !_VM_PAGE_ */
Index: sys/vm/vm_page.c
===================================================================
--- sys/vm/vm_page.c
+++ sys/vm/vm_page.c
@@ -2656,12 +2656,11 @@
if (m + npages > m_end)
break;
pa = VM_PAGE_TO_PHYS(m);
- if ((pa & (alignment - 1)) != 0) {
+ if (!vm_page_align_ok(pa, alignment)) {
m_inc = atop(roundup2(pa, alignment) - pa);
continue;
}
- if (rounddown2(pa ^ (pa + ptoa(npages) - 1),
- boundary) != 0) {
+ if (!vm_page_bound_ok(pa, ptoa(npages), boundary)) {
m_inc = atop(roundup2(pa, boundary) - pa);
continue;
}
Index: sys/vm/vm_phys.c
===================================================================
--- sys/vm/vm_phys.c
+++ sys/vm/vm_phys.c
@@ -1466,8 +1466,8 @@
pa = VM_PAGE_TO_PHYS(m_ret);
pa_end = pa + size;
if (pa >= low && pa_end <= high &&
- (pa & (alignment - 1)) == 0 &&
- rounddown2(pa ^ (pa_end - 1), boundary) == 0)
+ vm_page_addr_ok(pa, size,
+ alignment, boundary))
goto done;
}
}
Index: sys/vm/vm_reserv.c
===================================================================
--- sys/vm/vm_reserv.c
+++ sys/vm/vm_reserv.c
@@ -656,10 +656,8 @@
* possible size satisfy the alignment and boundary requirements?
*/
pa = VM_RESERV_INDEX(object, pindex) << PAGE_SHIFT;
- if ((pa & (alignment - 1)) != 0)
- return (NULL);
size = npages << PAGE_SHIFT;
- if (((pa ^ (pa + size - 1)) & ~(boundary - 1)) != 0)
+ if (!vm_page_addr_ok(pa, size, alignment, boundary))
return (NULL);
/*
@@ -682,8 +680,7 @@
m = &rv->pages[index];
pa = VM_PAGE_TO_PHYS(m);
if (pa < low || pa + size > high ||
- (pa & (alignment - 1)) != 0 ||
- ((pa ^ (pa + size - 1)) & ~(boundary - 1)) != 0)
+ !vm_page_addr_ok(pa, size, alignment, boundary))
goto out;
/* Handle vm_page_rename(m, new_object, ...). */
for (i = 0; i < npages; i++)
@@ -1333,7 +1330,7 @@
* doesn't include a boundary-multiple within it. Otherwise,
* no boundary-constrained allocation is possible.
*/
- if (size > boundary && boundary > 0)
+ if (!vm_page_bound_ok(0, size, boundary))
return (NULL);
marker = &vm_rvd[domain].marker;
queue = &vm_rvd[domain].partpop;
@@ -1360,7 +1357,7 @@
/* This entire reservation is too high; go to next. */
continue;
}
- if ((pa & (alignment - 1)) != 0) {
+ if (!vm_page_align_ok(pa, alignment)) {
/* This entire reservation is unaligned; go to next. */
continue;
}
@@ -1397,12 +1394,10 @@
vm_reserv_unlock(rv);
m_ret = &rv->pages[posn];
pa = VM_PAGE_TO_PHYS(m_ret);
- KASSERT((pa & (alignment - 1)) == 0,
- ("%s: adjusted address does not align to %lx",
- __func__, alignment));
- KASSERT(((pa ^ (pa + size - 1)) & -boundary) == 0,
- ("%s: adjusted address spans boundary to %jx",
- __func__, (uintmax_t)boundary));
+ KASSERT(vm_page_addr_ok(pa, size, alignment, boundary),
+ ("%s: adjusted address not aligned/bounded to "
+ "%lx/%jx",
+ __func__, alignment, (uintmax_t)boundary));
return (m_ret);
}
vm_reserv_domain_lock(domain);
Index: sys/x86/x86/busdma_bounce.c
===================================================================
--- sys/x86/x86/busdma_bounce.c
+++ sys/x86/x86/busdma_bounce.c
@@ -502,7 +502,7 @@
CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
__func__, dmat, dmat->common.flags, ENOMEM);
return (ENOMEM);
- } else if (vtophys(*vaddr) & (dmat->common.alignment - 1)) {
+ } else if (!vm_page_align_ok(vtophys(*vaddr), dmat->common.alignment)) {
printf("bus_dmamem_alloc failed to align memory properly.\n");
}
CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
@@ -645,8 +645,9 @@
sg_len = roundup2(sg_len,
dmat->common.alignment);
sg_len = MIN(sg_len, max_sgsize);
- KASSERT((sg_len & (dmat->common.alignment - 1))
- == 0, ("Segment size is not aligned"));
+ KASSERT(vm_page_align_ok(sg_len,
+ dmat->common.alignment),
+ ("Segment size is not aligned"));
map->pagesneeded++;
}
if (((ma_offs + sg_len) & ~PAGE_MASK) != 0)
@@ -691,7 +692,6 @@
_bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t curaddr,
bus_size_t sgsize, bus_dma_segment_t *segs, int *segp)
{
- bus_addr_t baddr, bmask;
int seg;
KASSERT(curaddr <= BUS_SPACE_MAXADDR,
@@ -704,12 +704,8 @@
/*
* Make sure we don't cross any boundaries.
*/
- bmask = ~(dmat->common.boundary - 1);
- if (dmat->common.boundary > 0) {
- baddr = (curaddr + dmat->common.boundary) & bmask;
- if (sgsize > (baddr - curaddr))
- sgsize = (baddr - curaddr);
- }
+ if (!vm_page_bound_ok(curaddr, sgsize, dmat->common.boundary))
+ sgsize = roundup2(curaddr, dmat->common.boundary) - curaddr;
/*
* Insert chunk into a segment, coalescing with
@@ -723,8 +719,8 @@
} else {
if (curaddr == segs[seg].ds_addr + segs[seg].ds_len &&
(segs[seg].ds_len + sgsize) <= dmat->common.maxsegsz &&
- (dmat->common.boundary == 0 ||
- (segs[seg].ds_addr & bmask) == (curaddr & bmask)))
+ vm_page_bound_ok(segs[seg].ds_addr, segs[seg].ds_len,
+ dmat->common.boundary))
segs[seg].ds_len += sgsize;
else {
if (++seg >= dmat->common.nsegments)
@@ -908,7 +904,8 @@
bus_dma_run_filter(&dmat->common, paddr)) {
sgsize = roundup2(sgsize, dmat->common.alignment);
sgsize = MIN(sgsize, max_sgsize);
- KASSERT((sgsize & (dmat->common.alignment - 1)) == 0,
+ KASSERT(vm_page_align_ok(sgsize,
+ dmat->common.alignment),
("Segment size is not aligned"));
/*
* Check if two pages of the user provided buffer
Index: sys/x86/x86/busdma_machdep.c
===================================================================
--- sys/x86/x86/busdma_machdep.c
+++ sys/x86/x86/busdma_machdep.c
@@ -107,7 +107,7 @@
do {
if ((paddr >= BUS_SPACE_MAXADDR ||
(paddr > tc->lowaddr && paddr <= tc->highaddr) ||
- (paddr & (tc->alignment - 1)) != 0) &&
+ !vm_page_align_ok(paddr, tc->alignment) &&
(tc->filter == NULL ||
(*tc->filter)(tc->filterarg, paddr) != 0))
retval = 1;

File Metadata

Mime Type
text/plain
Expires
Mon, Sep 23, 10:28 PM (19 h, 13 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
12495459
Default Alt Text
D33685.id100692.diff (17 KB)

Event Timeline