Page MenuHomeFreeBSD

D48894.diff
No OneTemporary

D48894.diff

diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -5026,13 +5026,13 @@
npdpg = howmany(size, NBPDP);
npde = size / NBPDR;
- dummypa = vm_phys_early_alloc(PAGE_SIZE, -1);
+ dummypa = vm_phys_early_alloc(PAGE_SIZE);
pagezero((void *)PHYS_TO_DMAP(dummypa));
- dummypt = vm_phys_early_alloc(PAGE_SIZE, -1);
+ dummypt = vm_phys_early_alloc(PAGE_SIZE);
pagezero((void *)PHYS_TO_DMAP(dummypt));
- dummypd = vm_phys_early_alloc(PAGE_SIZE * npdpg, -1);
+ dummypd = vm_phys_early_alloc(PAGE_SIZE * npdpg);
for (i = 0; i < npdpg; i++)
pagezero((void *)PHYS_TO_DMAP(dummypd + ptoa(i)));
@@ -5091,7 +5091,8 @@
domain = vm_phys_domain(ptoa(pfn));
pdpe = pmap_pdpe(kernel_pmap, va);
if ((*pdpe & X86_PG_V) == 0) {
- pa = vm_phys_early_alloc(PAGE_SIZE, domain);
+ pa = vm_phys_early_alloc_ex(PAGE_SIZE, PAGE_SIZE, -1,
+ domain, 0);
dump_add_page(pa);
pagezero((void *)PHYS_TO_DMAP(pa));
*pdpe = (pdp_entry_t)(pa | X86_PG_V | X86_PG_RW |
diff --git a/sys/powerpc/aim/mmu_oea64.c b/sys/powerpc/aim/mmu_oea64.c
--- a/sys/powerpc/aim/mmu_oea64.c
+++ b/sys/powerpc/aim/mmu_oea64.c
@@ -3442,7 +3442,7 @@
/* Short-circuit single-domain systems. */
if (vm_ndomains == 1) {
size = round_page(pages * sizeof(struct vm_page));
- pa = vm_phys_early_alloc(size, 0);
+ pa = vm_phys_early_alloc(size);
vm_page_base = moea64_map(&vm_page_base,
pa, pa + size, VM_PROT_READ | VM_PROT_WRITE);
vm_page_array_size = pages;
diff --git a/sys/powerpc/aim/mmu_radix.c b/sys/powerpc/aim/mmu_radix.c
--- a/sys/powerpc/aim/mmu_radix.c
+++ b/sys/powerpc/aim/mmu_radix.c
@@ -6438,7 +6438,7 @@
start = VM_MIN_KERNEL_ADDRESS;
end = start + pages * sizeof(struct vm_page);
- pa = vm_phys_early_alloc(end - start, -1);
+ pa = vm_phys_early_alloc(end - start);
start = mmu_radix_map(&start, pa, end - start, VM_MEMATTR_DEFAULT);
#ifdef notyet
@@ -6448,7 +6448,8 @@
domain = vm_phys_domain(ptoa(pfn));
l2e = pmap_pml2e(kernel_pmap, va);
if ((be64toh(*l2e) & PG_V) == 0) {
- pa = vm_phys_early_alloc(PAGE_SIZE, domain);
+ pa = vm_phys_early_alloc_ex(PAGE_SIZE, PAGE_SIZE, -1,
+ domain, 0);
dump_add_page(pa);
pagezero(PHYS_TO_DMAP(pa));
pde_store(l2e, (pml2_entry_t)pa);
diff --git a/sys/vm/vm_phys.h b/sys/vm/vm_phys.h
--- a/sys/vm/vm_phys.h
+++ b/sys/vm/vm_phys.h
@@ -92,7 +92,7 @@
int __result_use_check vm_phys_early_alloc_ex_err(size_t alloc_size,
vm_paddr_t alignment, int chunk_start_idx, int domain, u_int flags,
vm_paddr_t *ppa);
-vm_paddr_t vm_phys_early_alloc(size_t alloc_size, int domain);
+vm_paddr_t vm_phys_early_alloc(size_t alloc_size);
void vm_phys_early_startup(void);
int vm_phys_avail_largest(void);
diff --git a/sys/vm/vm_phys.c b/sys/vm/vm_phys.c
--- a/sys/vm/vm_phys.c
+++ b/sys/vm/vm_phys.c
@@ -2371,17 +2371,17 @@
/*
* Simpler wrapper for vm_phys_early_alloc_ex().
*
- * Can't request a specific alignment, chunk, nor pass flags. In particular, it
- * will panic on failure.
+ * Can't request a specific alignment, chunk or domain nor pass flags. In
+ * particular, it will panic on failure.
*
* CAUTION: Contrary to the previous vm_phys_early_alloc() implementation, it
* only aligns the requested memory on PAGE_SIZE, regardless of 'alloc_size'.
* If a greater alignment is needed, use vm_phys_early_alloc_ex() instead.
*/
vm_paddr_t
-vm_phys_early_alloc(size_t alloc_size, int domain)
+vm_phys_early_alloc(size_t alloc_size)
{
- return (vm_phys_early_alloc_ex(alloc_size, PAGE_SIZE, -1, domain, 0));
+ return (vm_phys_early_alloc_ex(alloc_size, PAGE_SIZE, -1, -1, 0));
}
void

File Metadata

Mime Type
text/plain
Expires
Fri, Feb 14, 7:31 AM (16 h, 2 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
16620081
Default Alt Text
D48894.diff (3 KB)

Event Timeline