Page MenuHomeFreeBSD

D39828.id121390.diff
No OneTemporary

D39828.id121390.diff

diff --git a/sys/arm64/arm64/efirt_machdep.c b/sys/arm64/arm64/efirt_machdep.c
--- a/sys/arm64/arm64/efirt_machdep.c
+++ b/sys/arm64/arm64/efirt_machdep.c
@@ -108,7 +108,7 @@
if (*l0 == 0) {
m = efi_1t1_page();
mphys = VM_PAGE_TO_PHYS(m);
- *l0 = mphys | L0_TABLE;
+ *l0 = PHYS_TO_PTE(mphys) | L0_TABLE;
} else {
mphys = PTE_TO_PHYS(*l0);
}
@@ -119,7 +119,7 @@
if (*l1 == 0) {
m = efi_1t1_page();
mphys = VM_PAGE_TO_PHYS(m);
- *l1 = mphys | L1_TABLE;
+ *l1 = PHYS_TO_PTE(mphys) | L1_TABLE;
} else {
mphys = PTE_TO_PHYS(*l1);
}
@@ -130,7 +130,7 @@
if (*l2 == 0) {
m = efi_1t1_page();
mphys = VM_PAGE_TO_PHYS(m);
- *l2 = mphys | L2_TABLE;
+ *l2 = PHYS_TO_PTE(mphys) | L2_TABLE;
} else {
mphys = PTE_TO_PHYS(*l2);
}
diff --git a/sys/arm64/arm64/pmap.c b/sys/arm64/arm64/pmap.c
--- a/sys/arm64/arm64/pmap.c
+++ b/sys/arm64/arm64/pmap.c
@@ -962,7 +962,7 @@
l1_pa = pmap_early_vtophys((vm_offset_t)state->l1);
MPASS((l1_pa & Ln_TABLE_MASK) == 0);
MPASS(pagetable_l0_ttbr1[l0_slot] == 0);
- pmap_store(&pagetable_l0_ttbr1[l0_slot], l1_pa |
+ pmap_store(&pagetable_l0_ttbr1[l0_slot], PHYS_TO_PTE(l1_pa) |
TATTR_UXN_TABLE | TATTR_AP_TABLE_NO_EL0 | L0_TABLE);
}
KASSERT(state->l1 != NULL, ("%s: NULL l1", __func__));
@@ -1010,8 +1010,8 @@
l2_pa = pmap_early_vtophys((vm_offset_t)state->l2);
MPASS((l2_pa & Ln_TABLE_MASK) == 0);
MPASS(state->l1[l1_slot] == 0);
- pmap_store(&state->l1[l1_slot], l2_pa | state->table_attrs |
- L1_TABLE);
+ pmap_store(&state->l1[l1_slot], PHYS_TO_PTE(l2_pa) |
+ state->table_attrs | L1_TABLE);
}
KASSERT(state->l2 != NULL, ("%s: NULL l2", __func__));
}
@@ -1054,8 +1054,8 @@
l3_pa = pmap_early_vtophys((vm_offset_t)state->l3);
MPASS((l3_pa & Ln_TABLE_MASK) == 0);
MPASS(state->l2[l2_slot] == 0);
- pmap_store(&state->l2[l2_slot], l3_pa | state->table_attrs |
- L2_TABLE);
+ pmap_store(&state->l2[l2_slot], PHYS_TO_PTE(l3_pa) |
+ state->table_attrs | L2_TABLE);
}
KASSERT(state->l3 != NULL, ("%s: NULL l3", __func__));
}
@@ -1088,9 +1088,9 @@
l2_slot = pmap_l2_index(state->va);
MPASS((state->pa & L2_OFFSET) == 0);
MPASS(state->l2[l2_slot] == 0);
- pmap_store(&state->l2[l2_slot], state->pa | ATTR_DEFAULT |
- ATTR_S1_XN | ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) |
- L2_BLOCK);
+ pmap_store(&state->l2[l2_slot], PHYS_TO_PTE(state->pa) |
+ ATTR_DEFAULT | ATTR_S1_XN |
+ ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) | L2_BLOCK);
}
MPASS(state->va == (state->pa - dmap_phys_base + DMAP_MIN_ADDRESS));
}
@@ -1123,9 +1123,9 @@
l3_slot = pmap_l3_index(state->va);
MPASS((state->pa & L3_OFFSET) == 0);
MPASS(state->l3[l3_slot] == 0);
- pmap_store(&state->l3[l3_slot], state->pa | ATTR_DEFAULT |
- ATTR_S1_XN | ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) |
- L3_PAGE);
+ pmap_store(&state->l3[l3_slot], PHYS_TO_PTE(state->pa) |
+ ATTR_DEFAULT | ATTR_S1_XN |
+ ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) | L3_PAGE);
}
MPASS(state->va == (state->pa - dmap_phys_base + DMAP_MIN_ADDRESS));
}
@@ -1163,9 +1163,9 @@
MPASS((bs_state.pa & L1_OFFSET) == 0);
pmap_store(
&bs_state.l1[pmap_l1_index(bs_state.va)],
- bs_state.pa | ATTR_DEFAULT | ATTR_S1_XN |
+ PHYS_TO_PTE(bs_state.pa) | ATTR_DEFAULT |
ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) |
- L1_BLOCK);
+ ATTR_S1_XN | L1_BLOCK);
}
MPASS(bs_state.pa <= physmap[i + 1]);
@@ -1241,7 +1241,7 @@
continue;
}
- pmap_store(l2, pa | PMAP_SAN_PTE_BITS | L2_BLOCK);
+ pmap_store(l2, PHYS_TO_PTE(pa) | PMAP_SAN_PTE_BITS | L2_BLOCK);
}
/*
@@ -1998,7 +1998,7 @@
KASSERT(lvl == 2, ("pmap_kenter: Invalid level %d", lvl));
pte = pmap_l2_to_l3(pde, va);
- pmap_load_store(pte, (pa & ~L3_OFFSET) | attr);
+ pmap_load_store(pte, PHYS_TO_PTE(pa) | attr);
va += PAGE_SIZE;
pa += PAGE_SIZE;
@@ -2080,7 +2080,7 @@
pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count)
{
pd_entry_t *pde;
- pt_entry_t *pte, pa;
+ pt_entry_t *pte, pa, attr;
vm_offset_t va;
vm_page_t m;
int i, lvl;
@@ -2094,11 +2094,11 @@
("pmap_qenter: Invalid level %d", lvl));
m = ma[i];
- pa = VM_PAGE_TO_PHYS(m) | ATTR_DEFAULT |
- ATTR_S1_AP(ATTR_S1_AP_RW) | ATTR_S1_XN |
+ pa = VM_PAGE_TO_PHYS(m);
+ attr = ATTR_DEFAULT | ATTR_S1_AP(ATTR_S1_AP_RW) | ATTR_S1_XN |
ATTR_S1_IDX(m->md.pv_memattr) | L3_PAGE;
pte = pmap_l2_to_l3(pde, va);
- pmap_load_store(pte, pa);
+ pmap_load_store(pte, PHYS_TO_PTE(pa) | attr);
va += L3_SIZE;
}
@@ -2393,7 +2393,7 @@
l0p = &pmap->pm_l0[l0index];
KASSERT((pmap_load(l0p) & ATTR_DESCR_VALID) == 0,
("%s: L0 entry %#lx is valid", __func__, pmap_load(l0p)));
- l0e = VM_PAGE_TO_PHYS(m) | L0_TABLE;
+ l0e = PHYS_TO_PTE(VM_PAGE_TO_PHYS(m)) | L0_TABLE;
/*
* Mark all kernel memory as not accessible from userspace
@@ -2433,7 +2433,7 @@
l1 = &l1[ptepindex & Ln_ADDR_MASK];
KASSERT((pmap_load(l1) & ATTR_DESCR_VALID) == 0,
("%s: L1 entry %#lx is valid", __func__, pmap_load(l1)));
- pmap_store(l1, VM_PAGE_TO_PHYS(m) | L1_TABLE);
+ pmap_store(l1, PHYS_TO_PTE(VM_PAGE_TO_PHYS(m)) | L1_TABLE);
} else {
vm_pindex_t l0index, l1index;
pd_entry_t *l0, *l1, *l2;
@@ -2477,7 +2477,7 @@
l2 = &l2[ptepindex & Ln_ADDR_MASK];
KASSERT((pmap_load(l2) & ATTR_DESCR_VALID) == 0,
("%s: L2 entry %#lx is valid", __func__, pmap_load(l2)));
- pmap_store(l2, VM_PAGE_TO_PHYS(m) | L2_TABLE);
+ pmap_store(l2, PHYS_TO_PTE(VM_PAGE_TO_PHYS(m)) | L2_TABLE);
}
pmap_resident_count_inc(pmap, 1);
@@ -2712,7 +2712,7 @@
/* See the dmb() in _pmap_alloc_l3(). */
dmb(ishst);
paddr = VM_PAGE_TO_PHYS(nkpg);
- pmap_store(l1, paddr | L1_TABLE);
+ pmap_store(l1, PHYS_TO_PTE(paddr) | L1_TABLE);
continue; /* try again */
}
l2 = pmap_l1_to_l2(l1, kernel_vm_end);
@@ -2733,7 +2733,7 @@
/* See the dmb() in _pmap_alloc_l3(). */
dmb(ishst);
paddr = VM_PAGE_TO_PHYS(nkpg);
- pmap_store(l2, paddr | L2_TABLE);
+ pmap_store(l2, PHYS_TO_PTE(paddr) | L2_TABLE);
kernel_vm_end = (kernel_vm_end + L2_SIZE) & ~L2_OFFSET;
if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) {
@@ -3381,7 +3381,7 @@
panic("pmap_remove_kernel_l2: Missing pt page");
ml3pa = VM_PAGE_TO_PHYS(ml3);
- newl2 = ml3pa | L2_TABLE;
+ newl2 = PHYS_TO_PTE(ml3pa) | L2_TABLE;
/*
* If this page table page was unmapped by a promotion, then it
@@ -4387,7 +4387,7 @@
if ((m->oflags & VPO_UNMANAGED) == 0)
VM_PAGE_OBJECT_BUSY_ASSERT(m);
pa = VM_PAGE_TO_PHYS(m);
- new_l3 = (pt_entry_t)(pa | ATTR_DEFAULT | L3_PAGE);
+ new_l3 = (pt_entry_t)(PHYS_TO_PTE(pa) | ATTR_DEFAULT | L3_PAGE);
new_l3 |= pmap_pte_memattr(pmap, m->md.pv_memattr);
new_l3 |= pmap_pte_prot(pmap, prot);
@@ -4701,7 +4701,7 @@
KASSERT(ADDR_IS_CANONICAL(va),
("%s: Address not in canonical form: %lx", __func__, va));
- new_l2 = (pd_entry_t)(VM_PAGE_TO_PHYS(m) | ATTR_DEFAULT |
+ new_l2 = (pd_entry_t)(PHYS_TO_PTE(VM_PAGE_TO_PHYS(m)) | ATTR_DEFAULT |
ATTR_S1_IDX(m->md.pv_memattr) | ATTR_S1_AP(ATTR_S1_AP_RO) |
L2_BLOCK);
if ((m->oflags & VPO_UNMANAGED) == 0) {
@@ -5043,7 +5043,7 @@
pmap_resident_count_inc(pmap, 1);
pa = VM_PAGE_TO_PHYS(m);
- l3_val = pa | ATTR_DEFAULT | ATTR_S1_IDX(m->md.pv_memattr) |
+ l3_val = PHYS_TO_PTE(pa) | ATTR_DEFAULT | ATTR_S1_IDX(m->md.pv_memattr) |
ATTR_S1_AP(ATTR_S1_AP_RO) | L3_PAGE;
if ((prot & VM_PROT_EXECUTE) == 0 ||
m->md.pv_memattr == VM_MEMATTR_DEVICE)
@@ -6467,7 +6467,7 @@
/* Insert L2_BLOCK */
l2 = pmap_l1_to_l2(pde, va);
pmap_load_store(l2,
- pa | ATTR_DEFAULT | ATTR_S1_XN |
+ PHYS_TO_PTE(pa) | ATTR_DEFAULT | ATTR_S1_XN |
ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) | L2_BLOCK);
va += L2_SIZE;
@@ -7804,7 +7804,8 @@
if ((pmap_load(&l2[slot]) & ATTR_DESCR_VALID) == 0) {
MPASS(first);
block = pmap_san_enter_bootstrap_alloc_l2();
- pmap_store(&l2[slot], pmap_early_vtophys(block) |
+ pmap_store(&l2[slot],
+ PHYS_TO_PTE(pmap_early_vtophys(block)) |
PMAP_SAN_PTE_BITS | L2_BLOCK);
dmb(ishst);
}
@@ -7817,17 +7818,18 @@
MPASS(l1 != NULL);
if ((pmap_load(l1) & ATTR_DESCR_VALID) == 0) {
m = pmap_san_enter_alloc_l3();
- pmap_store(l1, VM_PAGE_TO_PHYS(m) | L1_TABLE);
+ pmap_store(l1, PHYS_TO_PTE(VM_PAGE_TO_PHYS(m)) | L1_TABLE);
}
l2 = pmap_l1_to_l2(l1, va);
if ((pmap_load(l2) & ATTR_DESCR_VALID) == 0) {
m = pmap_san_enter_alloc_l2();
if (m != NULL) {
- pmap_store(l2, VM_PAGE_TO_PHYS(m) | PMAP_SAN_PTE_BITS |
- L2_BLOCK);
+ pmap_store(l2, PHYS_TO_PTE(VM_PAGE_TO_PHYS(m)) |
+ PMAP_SAN_PTE_BITS | L2_BLOCK);
} else {
m = pmap_san_enter_alloc_l3();
- pmap_store(l2, VM_PAGE_TO_PHYS(m) | L2_TABLE);
+ pmap_store(l2, PHYS_TO_PTE(VM_PAGE_TO_PHYS(m)) |
+ L2_TABLE);
}
dmb(ishst);
}
@@ -7837,7 +7839,8 @@
if ((pmap_load(l3) & ATTR_DESCR_VALID) != 0)
return;
m = pmap_san_enter_alloc_l3();
- pmap_store(l3, VM_PAGE_TO_PHYS(m) | PMAP_SAN_PTE_BITS | L3_PAGE);
+ pmap_store(l3, PHYS_TO_PTE(VM_PAGE_TO_PHYS(m)) |
+ PMAP_SAN_PTE_BITS | L3_PAGE);
dmb(ishst);
}
#endif /* KASAN */
diff --git a/sys/arm64/include/pte.h b/sys/arm64/include/pte.h
--- a/sys/arm64/include/pte.h
+++ b/sys/arm64/include/pte.h
@@ -56,6 +56,8 @@
#define BASE_ADDR(x) ((x) & BASE_MASK)
#define PTE_TO_PHYS(pte) BASE_ADDR(pte)
+/* Convert a phys addr to the output address field of a PTE */
+#define PHYS_TO_PTE(pa) (pa)
/* Bits 58:55 are reserved for software */
#define ATTR_SW_UNUSED1 (1UL << 58)

File Metadata

Mime Type
text/plain
Expires
Tue, Oct 1, 7:10 AM (20 h, 46 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
13236890
Default Alt Text
D39828.id121390.diff (9 KB)

Event Timeline