Page MenuHomeFreeBSD

D36673.diff
No OneTemporary

D36673.diff

diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -4997,13 +4997,21 @@
vm_page_t nkpg;
pd_entry_t *pde, newpdir;
pdp_entry_t *pdpe;
+ vm_offset_t end;
mtx_assert(&kernel_map->system_mtx, MA_OWNED);
/*
- * Return if "addr" is within the range of kernel page table pages
- * that were preallocated during pmap bootstrap. Moreover, leave
- * "kernel_vm_end" and the kernel page table as they were.
+ * The kernel map covers two distinct regions of KVA: that used
+ * for dynamic kernel memory allocations, and the uppermost 2GB
+ * of the virtual address space. The latter is used to map the
+ * kernel and loadable kernel modules. This scheme enables the
+ * use of a special code generation model for kernel code which
+ * takes advantage of compact addressing modes in machine code.
+ *
+ * Both regions grow upwards; to avoid wasting memory, the gap
+ * in between is unmapped. If "addr" is above "KERNBASE", the
+ * kernel's region is grown, otherwise the kmem region is grown.
*
* The correctness of this action is based on the following
* argument: vm_map_insert() allocates contiguous ranges of the
@@ -5015,22 +5023,32 @@
* any new kernel page table pages between "kernel_vm_end" and
* "KERNBASE".
*/
- if (KERNBASE < addr && addr <= KERNBASE + nkpt * NBPDR)
- return;
+ if (KERNBASE < addr) {
+ end = KERNBASE + nkpt * NBPDR;
+ if (end == 0)
+ return;
+ } else {
+ end = kernel_vm_end;
+ }
addr = roundup2(addr, NBPDR);
if (addr - 1 >= vm_map_max(kernel_map))
addr = vm_map_max(kernel_map);
- if (kernel_vm_end < addr)
- kasan_shadow_map(kernel_vm_end, addr - kernel_vm_end);
- if (kernel_vm_end < addr)
- kmsan_shadow_map(kernel_vm_end, addr - kernel_vm_end);
- while (kernel_vm_end < addr) {
- pdpe = pmap_pdpe(kernel_pmap, kernel_vm_end);
+ if (addr <= end) {
+ /*
+ * The grown region is already mapped, so there is
+ * nothing to do.
+ */
+ return;
+ }
+
+ kasan_shadow_map(end, addr - end);
+ kmsan_shadow_map(end, addr - end);
+ while (end < addr) {
+ pdpe = pmap_pdpe(kernel_pmap, end);
if ((*pdpe & X86_PG_V) == 0) {
- /* We need a new PDP entry */
nkpg = pmap_alloc_pt_page(kernel_pmap,
- kernel_vm_end >> PDPSHIFT, VM_ALLOC_WIRED |
+ pmap_pdpe_pindex(end), VM_ALLOC_WIRED |
VM_ALLOC_INTERRUPT | VM_ALLOC_ZERO);
if (nkpg == NULL)
panic("pmap_growkernel: no memory to grow kernel");
@@ -5039,31 +5057,35 @@
X86_PG_A | X86_PG_M);
continue; /* try again */
}
- pde = pmap_pdpe_to_pde(pdpe, kernel_vm_end);
+ pde = pmap_pdpe_to_pde(pdpe, end);
if ((*pde & X86_PG_V) != 0) {
- kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK;
- if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) {
- kernel_vm_end = vm_map_max(kernel_map);
+ end = (end + NBPDR) & ~PDRMASK;
+ if (end - 1 >= vm_map_max(kernel_map)) {
+ end = vm_map_max(kernel_map);
break;
}
continue;
}
- nkpg = pmap_alloc_pt_page(kernel_pmap,
- pmap_pde_pindex(kernel_vm_end), VM_ALLOC_WIRED |
- VM_ALLOC_INTERRUPT | VM_ALLOC_ZERO);
+ nkpg = pmap_alloc_pt_page(kernel_pmap, pmap_pde_pindex(end),
+ VM_ALLOC_WIRED | VM_ALLOC_INTERRUPT | VM_ALLOC_ZERO);
if (nkpg == NULL)
panic("pmap_growkernel: no memory to grow kernel");
paddr = VM_PAGE_TO_PHYS(nkpg);
newpdir = paddr | X86_PG_V | X86_PG_RW | X86_PG_A | X86_PG_M;
pde_store(pde, newpdir);
- kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK;
- if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) {
- kernel_vm_end = vm_map_max(kernel_map);
+ end = (end + NBPDR) & ~PDRMASK;
+ if (end - 1 >= vm_map_max(kernel_map)) {
+ end = vm_map_max(kernel_map);
break;
}
}
+
+ if (end <= KERNBASE)
+ kernel_vm_end = end;
+ else
+ nkpt = howmany(end - KERNBASE, NBPDR);
}
/***************************************************

File Metadata

Mime Type
text/plain
Expires
Mon, Sep 30, 4:18 PM (21 h, 55 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
13191819
Default Alt Text
D36673.diff (3 KB)

Event Timeline