Page Menu
Home
FreeBSD
Search
Configure Global Search
Log In
Files
F115976279
D9433.id24713.diff
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Flag For Later
Award Token
Size
69 KB
Referenced Files
None
Subscribers
None
D9433.id24713.diff
View Options
Index: sys/contrib/ncsw/Peripherals/FM/fm.h
===================================================================
--- sys/contrib/ncsw/Peripherals/FM/fm.h
+++ sys/contrib/ncsw/Peripherals/FM/fm.h
@@ -637,8 +637,8 @@
bool lowEndRestriction;
#endif /* FM_LOW_END_RESTRICTION */
uint32_t exceptions;
- int irq;
- int errIrq;
+ uintptr_t irq;
+ uintptr_t errIrq;
bool ramsEccEnable;
bool explicitEnable;
bool internalCall;
Index: sys/powerpc/booke/booke_machdep.c
===================================================================
--- sys/powerpc/booke/booke_machdep.c
+++ sys/powerpc/booke/booke_machdep.c
@@ -215,7 +215,7 @@
ivor_setup(void)
{
- mtspr(SPR_IVPR, ((uintptr_t)&interrupt_vector_base) & 0xffff0000);
+ mtspr(SPR_IVPR, ((uintptr_t)&interrupt_vector_base) & ~0xffffUL);
SET_TRAP(SPR_IVOR0, int_critical_input);
SET_TRAP(SPR_IVOR1, int_machine_check);
@@ -248,6 +248,11 @@
SET_TRAP(SPR_IVOR32, int_vec);
break;
}
+
+#ifdef __powerpc64__
+ /* Set 64-bit interrupt mode. */
+ mtspr(SPR_EPCR, mfspr(SPR_EPCR) | EPCR_ICM);
+#endif
}
static int
@@ -288,6 +293,10 @@
void *mdp;
vm_offset_t dtbp, end;
+#if 0
+ li %r5, 2
+ stw %r5, 0(%r7)
+#endif
end = (uintptr_t)_end;
dtbp = (vm_offset_t)NULL;
@@ -296,6 +305,7 @@
bzero(__sbss_start, __sbss_end - __sbss_start);
bzero(__bss_start, _end - __bss_start);
tlb1_init();
+ early_putc('a');
/*
* Handle the various ways we can get loaded and started:
@@ -351,7 +361,7 @@
}
#define RES_GRANULE 32
-extern uint32_t tlb0_miss_locks[];
+extern uintptr_t tlb0_miss_locks[];
/* Initialise a struct pcpu. */
void
@@ -361,8 +371,8 @@
pcpu->pc_tid_next = TID_MIN;
#ifdef SMP
- uint32_t *ptr;
- int words_per_gran = RES_GRANULE / sizeof(uint32_t);
+ uintptr_t *ptr;
+ int words_per_gran = RES_GRANULE / sizeof(uintptr_t);
ptr = &tlb0_miss_locks[cpuid * words_per_gran];
pcpu->pc_booke_tlb_lock = ptr;
Index: sys/powerpc/booke/locore.S
===================================================================
--- sys/powerpc/booke/locore.S
+++ sys/powerpc/booke/locore.S
@@ -41,6 +41,35 @@
#define TMPSTACKSZ 16384
+#ifdef __powerpc64__
+#define GET_TOCBASE(r) \
+ mfspr r, SPR_SPRG8
+#define TOC_RESTORE nop
+#define CMPI cmpdi
+#define CMPL cmpld
+#define LOAD ld
+#define LOADX ldarx
+#define STORE std
+#define STOREX stdcx.
+#define STU stdu
+#define CALLSIZE 48
+#define REDZONE 288
+#define THREAD_REG %r13
+#else
+#define GET_TOCBASE(r)
+#define TOC_RESTORE
+#define CMPI cmpwi
+#define CMPL cmplw
+#define LOAD lwz
+#define LOADX lwarx
+#define STOREX stwcx.
+#define STORE stw
+#define STU stwu
+#define CALLSIZE 8
+#define REDZONE 0
+#define THREAD_REG %r2
+#endif
+
.text
.globl btext
btext:
@@ -101,6 +130,9 @@
* Initial cleanup
*/
li %r3, PSL_DE /* Keep debug exceptions for CodeWarrior. */
+#ifdef __powerpc64__
+ oris %r3, %r3, PSL_CM@h
+#endif
mtmsr %r3
isync
@@ -200,18 +232,19 @@
mtspr SPR_MAS1, %r3 /* note TS was not filled, so it's TS=0 */
isync
- lis %r3, KERNBASE@h
- ori %r3, %r3, KERNBASE@l /* EPN = KERNBASE */
-#ifdef SMP
+ LOAD_ADDR(%r3, KERNBASE)
ori %r3, %r3, (_TLB_ENTRY_SHARED | MAS2_M)@l /* WIMGE = 0b00100 */
-#endif
mtspr SPR_MAS2, %r3
isync
/* Discover phys load address */
bl 3f
3: mflr %r4 /* Use current address */
+#ifdef __powerpc64__
+ clrrdi %r4, %r4, 26 /* 64MB alignment mask */
+#else
rlwinm %r4, %r4, 0, 0, 5 /* 64MB alignment mask */
+#endif
ori %r4, %r4, (MAS3_SX | MAS3_SW | MAS3_SR)@l
mtspr SPR_MAS3, %r4 /* Set RPN and protection */
isync
@@ -224,11 +257,19 @@
/* Switch to the above TLB1[1] mapping */
bl 4f
4: mflr %r4
- rlwinm %r4, %r4, 0, 8, 31 /* Current offset from kernel load address */
+#ifdef __powerpc64__
+ clrldi %r4, %r4, 38
+ clrrdi %r3, %r3, 12
+#else
+ rlwinm %r4, %r4, 0, 6, 31 /* Current offset from kernel load address */
rlwinm %r3, %r3, 0, 0, 19
+#endif
add %r4, %r4, %r3 /* Convert to kernel virtual address */
addi %r4, %r4, (5f - 4b)
li %r3, PSL_DE /* Note AS=0 */
+#ifdef __powerpc64__
+ oris %r3, %r3, PSL_CM@h
+#endif
mtspr SPR_SRR0, %r4
mtspr SPR_SRR1, %r3
rfi
@@ -242,6 +283,33 @@
done_mapping:
+#ifdef __powerpc64__
+ /* Set up the TOC pointer */
+ b 0f
+ .align 3
+0: nop
+ bl 1f
+ .llong __tocbase + 0x8000 - .
+1: mflr %r2
+ ld %r1,0(%r2)
+ add %r2,%r1,%r2
+ mtspr SPR_SPRG8, %r2
+
+ /* Get load offset */
+ ld %r31,-0x8000(%r2) /* First TOC entry is TOC base */
+ subf %r31,%r31,%r2 /* Subtract from real TOC base to get base */
+
+ /* Set up the stack pointer */
+ ld %r1,TOC_REF(tmpstack)(%r2)
+ addi %r1,%r1,TMPSTACKSZ-96
+ add %r1,%r1,%r31
+ bl 1f
+ .llong _DYNAMIC-.
+1: mflr %r3
+ ld %r4,0(%r3)
+ add %r3,%r4,%r3
+ mr %r4,%r31
+#else
/*
* Setup a temporary stack
*/
@@ -265,12 +333,15 @@
add %r4,%r4,%r5
lwz %r4,4(%r4) /* got[0] is _DYNAMIC link addr */
subf %r4,%r4,%r3 /* subtract to calculate relocbase */
- bl elf_reloc_self
+#endif
+ bl CNAME(elf_reloc_self)
+ TOC_RESTORE
/*
* Initialise exception vector offsets
*/
- bl ivor_setup
+ bl CNAME(ivor_setup)
+ TOC_RESTORE
/*
* Set up arguments and jump to system initialization code
@@ -279,15 +350,17 @@
mr %r4, %r31
/* Prepare core */
- bl booke_init
+ bl CNAME(booke_init)
+ TOC_RESTORE
/* Switch to thread0.td_kstack now */
mr %r1, %r3
li %r3, 0
- stw %r3, 0(%r1)
+ STORE %r3, 0(%r1)
/* Machine independet part, does not return */
- bl mi_startup
+ bl CNAME(mi_startup)
+ TOC_RESTORE
/* NOT REACHED */
5: b 5b
@@ -393,8 +466,7 @@
mtspr SPR_MAS1, %r3 /* note TS was not filled, so it's TS=0 */
isync
- lis %r3, KERNBASE@h
- ori %r3, %r3, KERNBASE@l /* EPN = KERNBASE */
+ LOAD_ADDR(%r3, KERNBASE)
ori %r3, %r3, (_TLB_ENTRY_SHARED | MAS2_M)@l /* WIMGE = 0b00100 */
mtspr SPR_MAS2, %r3
isync
@@ -426,7 +498,11 @@
rlwinm %r3, %r3, 0, 0xfff /* Offset from boot page start */
add %r3, %r3, %r5 /* Make this virtual address */
addi %r3, %r3, (7f - 6b)
+#ifdef __powerpc64__
+ lis %r4, PSL_CM@h /* Note AS=0 */
+#else
li %r4, 0 /* Note AS=0 */
+#endif
mtspr SPR_SRR0, %r3
mtspr SPR_SRR1, %r4
rfi
@@ -458,7 +534,8 @@
/*
* Initialise exception vector offsets
*/
- bl ivor_setup
+ bl CNAME(ivor_setup)
+ TOC_RESTORE
/*
* Assign our pcpu instance
@@ -471,13 +548,16 @@
lwz %r3, 0(%r3)
mtsprg0 %r3
- bl pmap_bootstrap_ap
+ bl CNAME(pmap_bootstrap_ap)
+ TOC_RESTORE
- bl cpudep_ap_bootstrap
+ bl CNAME(cpudep_ap_bootstrap)
+ TOC_RESTORE
/* Switch to the idle thread's kstack */
mr %r1, %r3
- bl machdep_ap_bootstrap
+ bl CNAME(machdep_ap_bootstrap)
+ TOC_RESTORE
/* NOT REACHED */
6: b 6b
@@ -594,7 +674,6 @@
* r3-r5 scratched
*/
tlb1_inval_all_but_current:
- mr %r6, %r3
mfspr %r3, SPR_TLB1CFG /* Get number of entries */
andi. %r3, %r3, TLBCFG_NENTRY_MASK@l
li %r4, 0 /* Start from Entry 0 */
@@ -863,15 +942,23 @@
/************************************************************************/
.data
.align 3
+#ifdef __powerpc64__
+GLOBAL(__startkernel)
+ .llong begin
+GLOBAL(__endkernel)
+ .llong end
+#else
GLOBAL(__startkernel)
.long begin
GLOBAL(__endkernel)
.long end
+#endif
.align 4
tmpstack:
.space TMPSTACKSZ
tmpstackbound:
.space 10240 /* XXX: this really should not be necessary */
+TOC_ENTRY(tmpstack)
/*
* Compiled KERNBASE locations
Index: sys/powerpc/booke/pmap.c
===================================================================
--- sys/powerpc/booke/pmap.c
+++ sys/powerpc/booke/pmap.c
@@ -34,18 +34,42 @@
* Kernel and user threads run within one common virtual address space
* defined by AS=0.
*
+ * 32-bit pmap:
* Virtual address space layout:
* -----------------------------
- * 0x0000_0000 - 0xafff_ffff : user process
- * 0xb000_0000 - 0xbfff_ffff : pmap_mapdev()-ed area (PCI/PCIE etc.)
+ * 0x0000_0000 - 0x7fff_ffff : user process
+ * 0x8000_0000 - 0xbfff_ffff : pmap_mapdev()-ed area (PCI/PCIE etc.)
* 0xc000_0000 - 0xc0ff_ffff : kernel reserved
* 0xc000_0000 - data_end : kernel code+data, env, metadata etc.
- * 0xc100_0000 - 0xfeef_ffff : KVA
+ * 0xc100_0000 - 0xffff_ffff : KVA
* 0xc100_0000 - 0xc100_3fff : reserved for page zero/copy
* 0xc100_4000 - 0xc200_3fff : reserved for ptbl bufs
* 0xc200_4000 - 0xc200_8fff : guard page + kstack0
* 0xc200_9000 - 0xfeef_ffff : actual free KVA space
- * 0xfef0_0000 - 0xffff_ffff : I/O devices region
+ *
+ * 64-bit pmap:
+ * Virtual address space layout:
+ * -----------------------------
+ * 0x0000_0000_0000_0000 - 0xbfff_ffff_ffff_ffff : user process
+ * 0x0000_0000_0000_0000 - 0x8fff_ffff_ffff_ffff : text, data, heap, maps, libraries
+ * 0x9000_0000_0000_0000 - 0xafff_ffff_ffff_ffff : mmio region
+ * 0xb000_0000_0000_0000 - 0xbfff_ffff_ffff_ffff : stack
+ * 0xc000_0000_0000_0000 - 0xcfff_ffff_ffff_ffff : kernel reserved
+ * 0xc000_0000_0000_0000 - endkernel-1 : kernel code & data
+ * endkernel - msgbufp-1 : flat device tree
+ * msgbufp - ptbl_bufs-1 : message buffer
+ * ptbl_bufs - kernel_pdir-1 : kernel page tables
+ * kernel_pdir - kernel_pp2d-1 : kernel page directory
+ * kernel_pp2d - . : kernel pointers to page directory
+ * pmap_zero_copy_min - crashdumpmap-1 : reserved for page zero/copy
+ * crashdumpmap - ptbl_buf_pool_vabase-1 : reserved for ptbl bufs
+ * ptbl_buf_pool_vabase - virtual_avail-1 : user page directories and page tables
+ * virtual_avail - 0xcfff_ffff_ffff_ffff : actual free KVA space
+ * 0xd000_0000_0000_0000 - 0xdfff_ffff_ffff_ffff : coprocessor region
+ * 0xe000_0000_0000_0000 - 0xefff_ffff_ffff_ffff : mmio region
+ * 0xf000_0000_0000_0000 - 0xffff_ffff_ffff_ffff : direct map
+ * 0xf000_0000_0000_0000 - +Maxmem : physmem map
+ * - 0xffff_ffff_ffff_ffff : device direct map
*/
#include <sys/cdefs.h>
@@ -97,6 +121,7 @@
#include "mmu_if.h"
#define SPARSE_MAPDEV
+#define DEBUG
#ifdef DEBUG
#define debugf(fmt, args...) printf(fmt, ##args)
#else
@@ -144,6 +169,9 @@
unsigned int kptbl_min; /* Index of the first kernel ptbl. */
unsigned int kernel_ptbls; /* Number of KVA ptbls. */
+#ifdef __powerpc64__
+unsigned int kernel_pdirs;
+#endif
/*
* If user pmap is processed with mmu_booke_remove and the resident count
@@ -152,7 +180,9 @@
#define PMAP_REMOVE_DONE(pmap) \
((pmap) != kernel_pmap && (pmap)->pm_stats.resident_count == 0)
+#if defined(COMPAT_FREEBSD32) || !defined(__powerpc64__)
extern int elf32_nxstack;
+#endif
/**************************************************************************/
/* TLB and TID handling */
@@ -175,7 +205,6 @@
#define TLB0_ENTRIES_PER_WAY (tlb0_entries_per_way)
#define TLB1_ENTRIES (tlb1_entries)
-#define TLB1_MAXENTRIES 64
static vm_offset_t tlb1_map_base = VM_MAXUSER_ADDRESS + PAGE_SIZE;
@@ -219,17 +248,25 @@
static void ptbl_buf_free(struct ptbl_buf *);
static void ptbl_free_pmap_ptbl(pmap_t, pte_t *);
+#ifdef __powerpc64__
+static pte_t *ptbl_alloc(mmu_t mmu, pmap_t pmap, pte_t ** pdir,
+ unsigned int pdir_idx, boolean_t nosleep);
+static void ptbl_free(mmu_t mmu, pmap_t pmap, pte_t ** pdir, unsigned int pdir_idx);
+static void ptbl_hold(mmu_t mmu, pmap_t pmap, pte_t ** pdir, unsigned int pdir_idx);
+static int ptbl_unhold(mmu_t mmu, pmap_t pmap, vm_offset_t va);
+#else
static pte_t *ptbl_alloc(mmu_t, pmap_t, unsigned int, boolean_t);
static void ptbl_free(mmu_t, pmap_t, unsigned int);
static void ptbl_hold(mmu_t, pmap_t, unsigned int);
static int ptbl_unhold(mmu_t, pmap_t, unsigned int);
+#endif
static vm_paddr_t pte_vatopa(mmu_t, pmap_t, vm_offset_t);
-static pte_t *pte_find(mmu_t, pmap_t, vm_offset_t);
static int pte_enter(mmu_t, pmap_t, vm_page_t, vm_offset_t, uint32_t, boolean_t);
static int pte_remove(mmu_t, pmap_t, vm_offset_t, uint8_t);
+static pte_t *pte_find(mmu_t, pmap_t, vm_offset_t);
static void kernel_pte_alloc(vm_offset_t data_end, vm_offset_t addr,
- vm_offset_t pdir);
+ vm_offset_t pdir);
static pv_entry_t pv_alloc(void);
static void pv_free(pv_entry_t);
@@ -239,7 +276,11 @@
static void booke_pmap_init_qpages(void);
/* Number of kva ptbl buffers, each covering one ptbl (PTBL_PAGES). */
+#ifdef __powerpc64__
+#define PTBL_BUFS (16UL * 16 * 16)
+#else
#define PTBL_BUFS (128 * 16)
+#endif
struct ptbl_buf {
TAILQ_ENTRY(ptbl_buf) link; /* list link */
@@ -503,6 +544,362 @@
/* Page table related */
/**************************************************************************/
+#ifdef __powerpc64__
+/* Initialize pool of kva ptbl buffers. */
+static void
+ptbl_init(void)
+{
+ int i;
+
+ mtx_init(&ptbl_buf_freelist_lock, "ptbl bufs lock", NULL, MTX_DEF);
+ TAILQ_INIT(&ptbl_buf_freelist);
+
+ for (i = 0; i < PTBL_BUFS; i++) {
+ ptbl_bufs[i].kva = ptbl_buf_pool_vabase +
+ i * MAX(PTBL_PAGES,PDIR_PAGES) * PAGE_SIZE;
+ TAILQ_INSERT_TAIL(&ptbl_buf_freelist, &ptbl_bufs[i], link);
+ }
+}
+
+/* Get an sf_buf from the freelist. */
+static struct ptbl_buf *
+ptbl_buf_alloc(void)
+{
+ struct ptbl_buf *buf;
+
+ mtx_lock(&ptbl_buf_freelist_lock);
+ buf = TAILQ_FIRST(&ptbl_buf_freelist);
+ if (buf != NULL)
+ TAILQ_REMOVE(&ptbl_buf_freelist, buf, link);
+ mtx_unlock(&ptbl_buf_freelist_lock);
+
+ return (buf);
+}
+
+/* Return ptbl buff to free pool. */
+static void
+ptbl_buf_free(struct ptbl_buf *buf)
+{
+ mtx_lock(&ptbl_buf_freelist_lock);
+ TAILQ_INSERT_TAIL(&ptbl_buf_freelist, buf, link);
+ mtx_unlock(&ptbl_buf_freelist_lock);
+}
+
+/*
+ * Search the list of allocated ptbl bufs and find on list of allocated ptbls
+ */
+static void
+ptbl_free_pmap_ptbl(pmap_t pmap, pte_t * ptbl)
+{
+ struct ptbl_buf *pbuf;
+
+ TAILQ_FOREACH(pbuf, &pmap->pm_ptbl_list, link) {
+ if (pbuf->kva == (vm_offset_t) ptbl) {
+ /* Remove from pmap ptbl buf list. */
+ TAILQ_REMOVE(&pmap->pm_ptbl_list, pbuf, link);
+
+ /* Free correspondig ptbl buf. */
+ ptbl_buf_free(pbuf);
+
+ break;
+ }
+ }
+}
+
+/* Get a pointer to a PTE in a page table. */
+static __inline pte_t *
+pte_find(mmu_t mmu, pmap_t pmap, vm_offset_t va)
+{
+ pte_t **pdir;
+ pte_t *ptbl;
+
+ KASSERT((pmap != NULL), ("pte_find: invalid pmap"));
+
+ pdir = pmap->pm_pp2d[PP2D_IDX(va)];
+ if (!pdir)
+ return NULL;
+ ptbl = pdir[PDIR_IDX(va)];
+ return ((ptbl != NULL) ? &ptbl[PTBL_IDX(va)] : NULL);
+}
+
+/*
+ * Search the list of allocated pdir bufs and find on list of allocated pdirs
+ */
+static void
+ptbl_free_pmap_pdir(mmu_t mmu, pmap_t pmap, pte_t ** pdir)
+{
+ struct ptbl_buf *pbuf;
+
+ TAILQ_FOREACH(pbuf, &pmap->pm_pdir_list, link) {
+ if (pbuf->kva == (vm_offset_t) pdir) {
+ /* Remove from pmap ptbl buf list. */
+ TAILQ_REMOVE(&pmap->pm_pdir_list, pbuf, link);
+
+ /* Free corresponding pdir buf. */
+ ptbl_buf_free(pbuf);
+
+ break;
+ }
+ }
+}
+/* Free pdir pages and invalidate pdir entry. */
+static void
+pdir_free(mmu_t mmu, pmap_t pmap, unsigned int pp2d_idx)
+{
+ pte_t **pdir;
+ vm_paddr_t pa;
+ vm_offset_t va;
+ vm_page_t m;
+ int i;
+
+ pdir = pmap->pm_pp2d[pp2d_idx];
+
+ KASSERT((pdir != NULL), ("pdir_free: null pdir"));
+
+ pmap->pm_pp2d[pp2d_idx] = NULL;
+
+ for (i = 0; i < PDIR_PAGES; i++) {
+ va = ((vm_offset_t) pdir + (i * PAGE_SIZE));
+ pa = pte_vatopa(mmu, kernel_pmap, va);
+ m = PHYS_TO_VM_PAGE(pa);
+ vm_page_free_zero(m);
+ atomic_subtract_int(&vm_cnt.v_wire_count, 1);
+ pmap_kremove(va);
+ }
+
+ ptbl_free_pmap_pdir(mmu, pmap, pdir);
+}
+
+/*
+ * Decrement pdir pages hold count and attempt to free pdir pages. Called
+ * when removing directory entry from pdir.
+ *
+ * Return 1 if pdir pages were freed.
+ */
+static int
+pdir_unhold(mmu_t mmu, pmap_t pmap, u_int pp2d_idx)
+{
+ pte_t **pdir;
+ vm_paddr_t pa;
+ vm_page_t m;
+ int i;
+
+ KASSERT((pmap != kernel_pmap),
+ ("pdir_unhold: unholding kernel pdir!"));
+
+ pdir = pmap->pm_pp2d[pp2d_idx];
+
+ KASSERT(((vm_offset_t) pdir >= VM_MIN_KERNEL_ADDRESS), ("pdir_unhold: non kva pdir"));
+
+ /* decrement hold count */
+ for (i = 0; i < PDIR_PAGES; i++) {
+ pa = pte_vatopa(mmu, kernel_pmap,
+ (vm_offset_t) pdir + (i * PAGE_SIZE));
+ m = PHYS_TO_VM_PAGE(pa);
+ m->wire_count--;
+ }
+
+ /*
+ * Free pdir pages if there are no dir entries in this pdir.
+ * wire_count has the same value for all ptbl pages, so check the
+ * last page.
+ */
+ if (m->wire_count == 0) {
+ pdir_free(mmu, pmap, pp2d_idx);
+ return (1);
+ }
+ return (0);
+}
+
+/*
+ * Increment hold count for pdir pages. This routine is used when new ptlb
+ * entry is being inserted into pdir.
+ */
+static void
+pdir_hold(mmu_t mmu, pmap_t pmap, pte_t ** pdir)
+{
+ vm_paddr_t pa;
+ vm_page_t m;
+ int i;
+
+ KASSERT((pmap != kernel_pmap),
+ ("pdir_hold: holding kernel pdir!"));
+
+ KASSERT((pdir != NULL), ("pdir_hold: null pdir"));
+
+ for (i = 0; i < PDIR_PAGES; i++) {
+ pa = pte_vatopa(mmu, kernel_pmap,
+ (vm_offset_t) pdir + (i * PAGE_SIZE));
+ m = PHYS_TO_VM_PAGE(pa);
+ m->wire_count++;
+ }
+}
+
+/* Allocate page table. */
+static pte_t *
+ptbl_alloc(mmu_t mmu, pmap_t pmap, pte_t ** pdir, unsigned int pdir_idx, boolean_t nosleep)
+{
+ vm_page_t mtbl [PTBL_PAGES];
+ vm_page_t m;
+ struct ptbl_buf *pbuf;
+ unsigned int pidx;
+ pte_t *ptbl;
+ int i, j;
+ int req;
+
+ KASSERT((pdir[pdir_idx] == NULL),
+ ("%s: valid ptbl entry exists!", __func__));
+
+ pbuf = ptbl_buf_alloc();
+ if (pbuf == NULL)
+ panic("%s: couldn't alloc kernel virtual memory", __func__);
+
+ ptbl = (pte_t *) pbuf->kva;
+
+ /* Allocate ptbl pages, this will sleep! */
+ for (i = 0; i < PTBL_PAGES; i++) {
+ pidx = (PTBL_PAGES * pdir_idx) + i;
+ req = VM_ALLOC_NOOBJ | VM_ALLOC_WIRED;
+ while ((m = vm_page_alloc(NULL, pidx, req)) == NULL) {
+ PMAP_UNLOCK(pmap);
+ rw_wunlock(&pvh_global_lock);
+ if (nosleep) {
+ ptbl_free_pmap_ptbl(pmap, ptbl);
+ for (j = 0; j < i; j++)
+ vm_page_free(mtbl[j]);
+ atomic_subtract_int(&vm_cnt.v_wire_count, i);
+ return (NULL);
+ }
+ VM_WAIT;
+ rw_wlock(&pvh_global_lock);
+ PMAP_LOCK(pmap);
+ }
+ mtbl[i] = m;
+ }
+
+ /* Mapin allocated pages into kernel_pmap. */
+ mmu_booke_qenter(mmu, (vm_offset_t) ptbl, mtbl, PTBL_PAGES);
+ /* Zero whole ptbl. */
+ bzero((caddr_t) ptbl, PTBL_PAGES * PAGE_SIZE);
+
+ /* Add pbuf to the pmap ptbl bufs list. */
+ TAILQ_INSERT_TAIL(&pmap->pm_ptbl_list, pbuf, link);
+
+ return (ptbl);
+}
+
+/* Free ptbl pages and invalidate pdir entry. */
+static void
+ptbl_free(mmu_t mmu, pmap_t pmap, pte_t ** pdir, unsigned int pdir_idx)
+{
+ pte_t *ptbl;
+ vm_paddr_t pa;
+ vm_offset_t va;
+ vm_page_t m;
+ int i;
+
+ ptbl = pdir[pdir_idx];
+
+ KASSERT((ptbl != NULL), ("ptbl_free: null ptbl"));
+
+ pdir[pdir_idx] = NULL;
+
+ for (i = 0; i < PTBL_PAGES; i++) {
+ va = ((vm_offset_t) ptbl + (i * PAGE_SIZE));
+ pa = pte_vatopa(mmu, kernel_pmap, va);
+ m = PHYS_TO_VM_PAGE(pa);
+ vm_page_free_zero(m);
+ atomic_subtract_int(&vm_cnt.v_wire_count, 1);
+ pmap_kremove(va);
+ }
+
+ ptbl_free_pmap_ptbl(pmap, ptbl);
+}
+
+/*
+ * Decrement ptbl pages hold count and attempt to free ptbl pages. Called
+ * when removing pte entry from ptbl.
+ *
+ * Return 1 if ptbl pages were freed.
+ */
+static int
+ptbl_unhold(mmu_t mmu, pmap_t pmap, vm_offset_t va)
+{
+ pte_t *ptbl;
+ vm_paddr_t pa;
+ vm_page_t m;
+ u_int pp2d_idx;
+ pte_t **pdir;
+ u_int pdir_idx;
+ int i;
+
+ pp2d_idx = PP2D_IDX(va);
+ pdir_idx = PDIR_IDX(va);
+
+ KASSERT((pmap != kernel_pmap),
+ ("ptbl_unhold: unholding kernel ptbl!"));
+
+ pdir = pmap->pm_pp2d[pp2d_idx];
+ ptbl = pdir[pdir_idx];
+
+ KASSERT(((vm_offset_t) ptbl >= VM_MIN_KERNEL_ADDRESS), ("ptbl_unhold: non kva ptbl"));
+
+ /* decrement hold count */
+ for (i = 0; i < PTBL_PAGES; i++) {
+ pa = pte_vatopa(mmu, kernel_pmap,
+ (vm_offset_t) ptbl + (i * PAGE_SIZE));
+ m = PHYS_TO_VM_PAGE(pa);
+ m->wire_count--;
+ }
+
+ /*
+ * Free ptbl pages if there are no pte entries in this ptbl.
+ * wire_count has the same value for all ptbl pages, so check the
+ * last page.
+ */
+ if (m->wire_count == 0) {
+ /* A pair of indirect entries might point to this ptbl page */
+#if 0
+ tlb_flush_entry(pmap, va & ~((2UL * PAGE_SIZE_1M) - 1),
+ TLB_SIZE_1M, MAS6_SIND);
+ tlb_flush_entry(pmap, (va & ~((2UL * PAGE_SIZE_1M) - 1)) | PAGE_SIZE_1M,
+ TLB_SIZE_1M, MAS6_SIND);
+#endif
+ ptbl_free(mmu, pmap, pdir, pdir_idx);
+ pdir_unhold(mmu, pmap, pp2d_idx);
+ return (1);
+ }
+ return (0);
+}
+
+/*
+ * Increment hold count for ptbl pages. This routine is used when new pte
+ * entry is being inserted into ptbl.
+ */
+static void
+ptbl_hold(mmu_t mmu, pmap_t pmap, pte_t ** pdir, unsigned int pdir_idx)
+{
+ vm_paddr_t pa;
+ pte_t *ptbl;
+ vm_page_t m;
+ int i;
+
+ KASSERT((pmap != kernel_pmap),
+ ("ptbl_hold: holding kernel ptbl!"));
+
+ ptbl = pdir[pdir_idx];
+
+ KASSERT((ptbl != NULL), ("ptbl_hold: null ptbl"));
+
+ for (i = 0; i < PTBL_PAGES; i++) {
+ pa = pte_vatopa(mmu, kernel_pmap,
+ (vm_offset_t) ptbl + (i * PAGE_SIZE));
+ m = PHYS_TO_VM_PAGE(pa);
+ m->wire_count++;
+ }
+}
+#else
+
/* Initialize pool of kva ptbl buffers. */
static void
ptbl_init(void)
@@ -763,6 +1160,7 @@
m->wire_count++;
}
}
+#endif
/* Allocate pv_entry structure. */
pv_entry_t
@@ -843,6 +1241,228 @@
//debugf("pv_remove: e\n");
}
+#ifdef __powerpc64__
+/*
+ * Clean pte entry, try to free page table page if requested.
+ *
+ * Return 1 if ptbl pages were freed, otherwise return 0.
+ */
+static int
+pte_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, u_int8_t flags)
+{
+ vm_page_t m;
+ pte_t *pte;
+
+ pte = pte_find(mmu, pmap, va);
+ KASSERT(pte != NULL, ("%s: NULL pte", __func__));
+
+ if (!PTE_ISVALID(pte))
+ return (0);
+
+ /* Get vm_page_t for mapped pte. */
+ m = PHYS_TO_VM_PAGE(PTE_PA(pte));
+
+ if (PTE_ISWIRED(pte))
+ pmap->pm_stats.wired_count--;
+
+ /* Handle managed entry. */
+ if (PTE_ISMANAGED(pte)) {
+
+ /* Handle modified pages. */
+ if (PTE_ISMODIFIED(pte))
+ vm_page_dirty(m);
+
+ /* Referenced pages. */
+ if (PTE_ISREFERENCED(pte))
+ vm_page_aflag_set(m, PGA_REFERENCED);
+
+ /* Remove pv_entry from pv_list. */
+ pv_remove(pmap, va, m);
+ }
+ mtx_lock_spin(&tlbivax_mutex);
+ tlb_miss_lock();
+
+ tlb0_flush_entry(va);
+ *pte = 0;
+
+ tlb_miss_unlock();
+ mtx_unlock_spin(&tlbivax_mutex);
+
+ pmap->pm_stats.resident_count--;
+
+ if (flags & PTBL_UNHOLD) {
+ return (ptbl_unhold(mmu, pmap, va));
+ }
+ return (0);
+}
+
+/*
+ * allocate a page of pointers to page directories, do not preallocate the
+ * page tables
+ */
+static pte_t **
+pdir_alloc(mmu_t mmu, pmap_t pmap, unsigned int pp2d_idx, bool nosleep)
+{
+ vm_page_t mtbl [PDIR_PAGES];
+ vm_page_t m;
+ struct ptbl_buf *pbuf;
+ pte_t **pdir;
+ unsigned int pidx;
+ int i;
+ int req;
+
+ pbuf = ptbl_buf_alloc();
+
+ if (pbuf == NULL)
+ panic("%s: couldn't alloc kernel virtual memory", __func__);
+
+ /* Allocate pdir pages, this will sleep! */
+ for (i = 0; i < PDIR_PAGES; i++) {
+ pidx = (PDIR_PAGES * pp2d_idx) + i;
+ req = VM_ALLOC_NOOBJ | VM_ALLOC_WIRED;
+ while ((m = vm_page_alloc(NULL, pidx, req)) == NULL) {
+ PMAP_UNLOCK(pmap);
+ VM_WAIT;
+ PMAP_LOCK(pmap);
+ }
+ mtbl[i] = m;
+ }
+
+ /* Mapin allocated pages into kernel_pmap. */
+ pdir = (pte_t **) pbuf->kva;
+ pmap_qenter((vm_offset_t) pdir, mtbl, PDIR_PAGES);
+
+ /* Zero whole pdir. */
+ bzero((caddr_t) pdir, PDIR_PAGES * PAGE_SIZE);
+
+ /* Add pdir to the pmap pdir bufs list. */
+ TAILQ_INSERT_TAIL(&pmap->pm_pdir_list, pbuf, link);
+
+ return pdir;
+}
+
+void db_trace_self(void);
+/*
+ * Insert PTE for a given page and virtual address.
+ */
+static int
+pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags,
+ boolean_t nosleep)
+{
+ unsigned int pp2d_idx = PP2D_IDX(va);
+ unsigned int pdir_idx = PDIR_IDX(va);
+ unsigned int ptbl_idx = PTBL_IDX(va);
+ pte_t *ptbl, *pte;
+ pte_t **pdir;
+
+ /* Get the page directory pointer. */
+ pdir = pmap->pm_pp2d[pp2d_idx];
+ if (pdir == NULL)
+ pdir = pdir_alloc(mmu, pmap, pp2d_idx, nosleep);
+
+ /* Get the page table pointer. */
+ ptbl = pdir[pdir_idx];
+
+ if (ptbl == NULL) {
+ /* Allocate page table pages. */
+ ptbl = ptbl_alloc(mmu, pmap, pdir, pdir_idx, nosleep);
+ if (ptbl == NULL) {
+ KASSERT(nosleep, ("nosleep and NULL ptbl"));
+ return (ENOMEM);
+ }
+ } else {
+ //printf("ptbl(%p) = %p\n", va, ptbl);
+ /*
+ * Check if there is valid mapping for requested va, if there
+ * is, remove it.
+ */
+ pte = &pdir[pdir_idx][ptbl_idx];
+ if (PTE_ISVALID(pte)) {
+ pte_remove(mmu, pmap, va, PTBL_HOLD);
+ } else {
+ /*
+ * pte is not used, increment hold count for ptbl
+ * pages.
+ */
+ if (pmap != kernel_pmap)
+ ptbl_hold(mmu, pmap, pdir, pdir_idx);
+ }
+ }
+
+ if (pdir[pdir_idx] == NULL) {
+ if (pmap != kernel_pmap && pmap->pm_pp2d[pp2d_idx] != NULL)
+ pdir_hold(mmu, pmap, pdir);
+ pdir[pdir_idx] = ptbl;
+ }
+ if (pmap->pm_pp2d[pp2d_idx] == NULL)
+ pmap->pm_pp2d[pp2d_idx] = pdir;
+
+ /*
+ * Insert pv_entry into pv_list for mapped page if part of managed
+ * memory.
+ */
+ if ((m->oflags & VPO_UNMANAGED) == 0) {
+ flags |= PTE_MANAGED;
+
+ /* Create and insert pv entry. */
+ pv_insert(pmap, va, m);
+ }
+ pmap->pm_stats.resident_count++;
+ pte = &pdir[pdir_idx][ptbl_idx];
+ *pte = PTE_RPN_FROM_PA(VM_PAGE_TO_PHYS(m));
+ *pte |= (PTE_VALID | flags);
+
+ return (0);
+}
+
+/* Return the pa for the given pmap/va. */
+static vm_paddr_t
+pte_vatopa(mmu_t mmu, pmap_t pmap, vm_offset_t va)
+{
+ vm_paddr_t pa = 0;
+ pte_t *pte;
+
+ pte = pte_find(mmu, pmap, va);
+ if ((pte != NULL) && PTE_ISVALID(pte))
+ pa = (PTE_PA(pte) | (va & PTE_PA_MASK));
+ return (pa);
+}
+
+
+/* allocate pte entries to manage (addr & mask) to (addr & mask) + size */
+static void
+kernel_pte_alloc(vm_offset_t data_end, vm_offset_t addr, vm_offset_t pdir)
+{
+ int i, j;
+ vm_offset_t va;
+ pte_t *pte;
+
+ va = addr;
+ /* Initialize kernel pdir */
+ for (i = 0; i < kernel_pdirs; i++) {
+ kernel_pmap->pm_pp2d[i + PP2D_IDX(va)] =
+ (pte_t **)(pdir + (i * PAGE_SIZE * PDIR_PAGES));
+ for (j = PDIR_IDX(va + (i * PAGE_SIZE * PDIR_NENTRIES * PTBL_NENTRIES)); j < PDIR_NENTRIES; j++) {
+ kernel_pmap->pm_pp2d[i + PP2D_IDX(va)][j] =
+ (pte_t *)(pdir + (kernel_pdirs * PAGE_SIZE * PDIR_PAGES) +
+ (((i * PDIR_NENTRIES) + j) * PAGE_SIZE * PTBL_PAGES));
+ }
+ }
+
+ /*
+ * Fill in PTEs covering kernel code and data. They are not required
+ * for address translation, as this area is covered by static TLB1
+ * entries, but for pte_vatopa() to work correctly with kernel area
+ * addresses.
+ */
+ for (va = addr; va < data_end; va += PAGE_SIZE) {
+ pte = &(kernel_pmap->pm_pp2d[PP2D_IDX(va)][PDIR_IDX(va)][PTBL_IDX(va)]);
+ *pte = PTE_RPN_FROM_PA(kernload + (va - kernstart));
+ *pte |= PTE_M | PTE_SR | PTE_SW | PTE_SX | PTE_WIRED |
+ PTE_VALID | PTE_PS_4KB;
+ }
+}
+#else
/*
* Clean pte entry, try to free page table page if requested.
*
@@ -1045,6 +1665,7 @@
PTE_VALID | PTE_PS_4KB;
}
}
+#endif
/**************************************************************************/
/* PMAP related */
@@ -1071,7 +1692,9 @@
/* Set interesting system properties */
hw_direct_map = 0;
+#if defined(COMPAT_FREEBSD32) || !defined(__powerpc64__)
elf32_nxstack = 1;
+#endif
/* Initialize invalidation mutex */
mtx_init(&tlbivax_mutex, "tlbivax", NULL, MTX_SPIN);
@@ -1102,16 +1725,26 @@
/* Allocate space for the message buffer. */
msgbufp = (struct msgbuf *)data_end;
data_end += msgbufsize;
+#ifdef __powerpc64__
+ debugf(" msgbufp at 0x%16lx end = 0x%16lx\n", (uint64_t)msgbufp,
+ data_end);
+#else
debugf(" msgbufp at 0x%08x end = 0x%08x\n", (uint32_t)msgbufp,
data_end);
+#endif
data_end = round_page(data_end);
/* Allocate space for ptbl_bufs. */
ptbl_bufs = (struct ptbl_buf *)data_end;
data_end += sizeof(struct ptbl_buf) * PTBL_BUFS;
+#ifdef __powerpc64__
+ debugf(" ptbl_bufs at 0x%16lx end = 0x%16lx\n", (uint64_t)ptbl_bufs,
+ data_end);
+#else
debugf(" ptbl_bufs at 0x%08x end = 0x%08x\n", (uint32_t)ptbl_bufs,
data_end);
+#endif
data_end = round_page(data_end);
@@ -1119,17 +1752,33 @@
kernel_pdir = data_end;
kernel_ptbls = howmany(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS,
PDIR_SIZE);
+#ifdef __powerpc64__
+ kernel_pdirs = howmany(kernel_ptbls, PDIR_NENTRIES);
+ data_end += kernel_pdirs * PDIR_PAGES * PAGE_SIZE;
+#endif
data_end += kernel_ptbls * PTBL_PAGES * PAGE_SIZE;
debugf(" kernel ptbls: %d\n", kernel_ptbls);
+#ifdef __powerpc64__
+ debugf(" kernel pdir at 0x%016x end = 0x%016x\n", kernel_pdir, data_end);
+#else
debugf(" kernel pdir at 0x%08x end = 0x%08x\n", kernel_pdir, data_end);
+#endif
+#ifdef __powerpc64__
+ debugf(" data_end: 0x%016x\n", data_end);
+#else
debugf(" data_end: 0x%08x\n", data_end);
+#endif
if (data_end - kernstart > kernsize) {
kernsize += tlb1_mapin_region(kernstart + kernsize,
kernload + kernsize, (data_end - kernstart) - kernsize);
}
data_end = kernstart + kernsize;
+#ifdef __powerpc64__
+ debugf(" updated data_end: 0x%016x\n", data_end);
+#else
debugf(" updated data_end: 0x%08x\n", data_end);
+#endif
/*
* Clear the structures - note we can only do it safely after the
@@ -1138,7 +1787,13 @@
*/
dpcpu_init(dpcpu, 0);
memset((void *)ptbl_bufs, 0, sizeof(struct ptbl_buf) * PTBL_SIZE);
+#ifdef __powerpc64__
+ memset((void *)kernel_pdir, 0,
+ kernel_pdirs * PDIR_PAGES * PAGE_SIZE +
+ kernel_ptbls * PTBL_PAGES * PAGE_SIZE);
+#else
memset((void *)kernel_pdir, 0, kernel_ptbls * PTBL_PAGES * PAGE_SIZE);
+#endif
/*******************************************************/
/* Set the start and end of kva. */
@@ -1308,14 +1963,16 @@
/* Initialize (statically allocated) kernel pmap. */
/*******************************************************/
PMAP_LOCK_INIT(kernel_pmap);
+#ifndef __powerpc64__
kptbl_min = VM_MIN_KERNEL_ADDRESS / PDIR_SIZE;
+#endif
+#ifdef __powerpc64__
+ debugf("kernel_pmap = 0x%16lx\n", (uint64_t)kernel_pmap);
+#else
debugf("kernel_pmap = 0x%08x\n", (uint32_t)kernel_pmap);
- debugf("kptbl_min = %d, kernel_ptbls = %d\n", kptbl_min, kernel_ptbls);
- debugf("kernel pdir range: 0x%08x - 0x%08x\n",
- kptbl_min * PDIR_SIZE, (kptbl_min + kernel_ptbls) * PDIR_SIZE - 1);
-
- kernel_pte_alloc(data_end, kernstart, kernel_pdir);
+#endif
+ kernel_pte_alloc(virtual_avail, kernstart, kernel_pdir);
for (i = 0; i < MAXCPU; i++) {
kernel_pmap->pm_tid[i] = TID_KERNEL;
@@ -1343,7 +2000,11 @@
debugf("kstack_sz = 0x%08x\n", kstack0_sz);
debugf("kstack0_phys at 0x%09llx - 0x%09llx\n",
kstack0_phys, kstack0_phys + kstack0_sz);
+#ifdef __powerpc64__
+ debugf("kstack0 at 0x%016x - 0x%016x\n", kstack0, kstack0 + kstack0_sz);
+#else
debugf("kstack0 at 0x%08x - 0x%08x\n", kstack0, kstack0 + kstack0_sz);
+#endif
virtual_avail += KSTACK_GUARD_PAGES * PAGE_SIZE + kstack0_sz;
for (i = 0; i < kstack_pages; i++) {
@@ -1354,8 +2015,13 @@
pmap_bootstrapped = 1;
+#ifdef __powerpc64__
+ debugf("virtual_avail = %016lx\n", virtual_avail);
+ debugf("virtual_end = %016lx\n", virtual_end);
+#else
debugf("virtual_avail = %08x\n", virtual_avail);
debugf("virtual_end = %08x\n", virtual_end);
+#endif
debugf("mmu_booke_bootstrap: exit\n");
}
@@ -1543,6 +2209,7 @@
flags |= PTE_PS_4KB;
pte = pte_find(mmu, kernel_pmap, va);
+ KASSERT((pte != NULL), ("mmu_booke_kenter: invalid va. NULL PTE"));
mtx_lock_spin(&tlbivax_mutex);
tlb_miss_lock();
@@ -1633,7 +2300,12 @@
pmap->pm_tid[i] = TID_NONE;
CPU_ZERO(&kernel_pmap->pm_active);
bzero(&pmap->pm_stats, sizeof(pmap->pm_stats));
+#ifdef __powerpc64__
+ bzero(&pmap->pm_pp2d, sizeof(pte_t **) * PP2D_NENTRIES);
+ TAILQ_INIT(&pmap->pm_pdir_list);
+#else
bzero(&pmap->pm_pdir, sizeof(pte_t *) * PDIR_NENTRIES);
+#endif
TAILQ_INIT(&pmap->pm_ptbl_list);
}
@@ -2841,10 +3513,18 @@
do {
tmpva = tlb1_map_base;
va = roundup(tlb1_map_base, 1 << flsl(size));
+#ifdef __powerpc64__
+ } while (!atomic_cmpset_long(&tlb1_map_base, tmpva, va + size));
+#else
} while (!atomic_cmpset_int(&tlb1_map_base, tmpva, va + size));
+#endif
+#else
+#ifdef __powerpc64__
+ va = atomic_fetchadd_long(&tlb1_map_base, size);
#else
va = atomic_fetchadd_int(&tlb1_map_base, size);
#endif
+#endif
res = (void *)va;
do {
@@ -2855,7 +3535,7 @@
} while (va % sz != 0);
}
if (bootverbose)
- printf("Wiring VA=%x to PA=%jx (size=%x)\n",
+ printf("Wiring VA=%lx to PA=%jx (size=%lx)\n",
va, (uintmax_t)pa, sz);
tlb1_set_entry(va, pa, sz,
_TLB_ENTRY_SHARED | tlb_calc_wimg(pa, ma));
@@ -3403,6 +4083,8 @@
tsz = (mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
kernsize += (tsz > 0) ? tsize2size(tsz) : 0;
+ tlb1_set_entry(0xbffffffffffff000, 0xffe11c000, PAGE_SIZE, _TLB_ENTRY_SHARED | _TLB_ENTRY_IO);
+
/* Setup TLB miss defaults */
set_mas4_defaults();
}
Index: sys/powerpc/booke/trap_subr.S
===================================================================
--- sys/powerpc/booke/trap_subr.S
+++ sys/powerpc/booke/trap_subr.S
@@ -84,7 +84,11 @@
#define RES_GRANULE 32
#define RES_LOCK 0 /* offset to the 'lock' word */
+#ifdef __powerpc64__
+#define RES_RECURSE 8 /* offset to the 'recurse' word */
+#else
#define RES_RECURSE 4 /* offset to the 'recurse' word */
+#endif
/*
* Standard interrupt prolog
@@ -114,16 +118,16 @@
#define STANDARD_PROLOG(sprg_sp, savearea, isrr0, isrr1) \
mtspr sprg_sp, %r1; /* Save SP */ \
GET_CPUINFO(%r1); /* Per-cpu structure */ \
- stw %r30, (savearea+CPUSAVE_R30)(%r1); \
- stw %r31, (savearea+CPUSAVE_R31)(%r1); \
+ STORE %r30, (savearea+CPUSAVE_R30)(%r1); \
+ STORE %r31, (savearea+CPUSAVE_R31)(%r1); \
mfdear %r30; \
mfesr %r31; \
- stw %r30, (savearea+CPUSAVE_BOOKE_DEAR)(%r1); \
- stw %r31, (savearea+CPUSAVE_BOOKE_ESR)(%r1); \
+ STORE %r30, (savearea+CPUSAVE_BOOKE_DEAR)(%r1); \
+ STORE %r31, (savearea+CPUSAVE_BOOKE_ESR)(%r1); \
mfspr %r30, isrr0; \
mfspr %r31, isrr1; /* MSR at interrupt time */ \
- stw %r30, (savearea+CPUSAVE_SRR0)(%r1); \
- stw %r31, (savearea+CPUSAVE_SRR1)(%r1); \
+ STORE %r30, (savearea+CPUSAVE_SRR0)(%r1); \
+ STORE %r31, (savearea+CPUSAVE_SRR1)(%r1); \
isync; \
mfspr %r1, sprg_sp; /* Restore SP */ \
mfcr %r30; /* Save CR */ \
@@ -131,26 +135,26 @@
mtcr %r31; /* MSR at interrupt time */ \
bf 17, 1f; \
GET_CPUINFO(%r1); /* Per-cpu structure */ \
- lwz %r1, PC_CURPCB(%r1); /* Per-thread kernel stack */ \
+ LOAD %r1, PC_CURPCB(%r1); /* Per-thread kernel stack */ \
1:
#define STANDARD_CRIT_PROLOG(sprg_sp, savearea, isrr0, isrr1) \
mtspr sprg_sp, %r1; /* Save SP */ \
GET_CPUINFO(%r1); /* Per-cpu structure */ \
- stw %r30, (savearea+CPUSAVE_R30)(%r1); \
- stw %r31, (savearea+CPUSAVE_R31)(%r1); \
+ STORE %r30, (savearea+CPUSAVE_R30)(%r1); \
+ STORE %r31, (savearea+CPUSAVE_R31)(%r1); \
mfdear %r30; \
mfesr %r31; \
- stw %r30, (savearea+CPUSAVE_BOOKE_DEAR)(%r1); \
- stw %r31, (savearea+CPUSAVE_BOOKE_ESR)(%r1); \
+ STORE %r30, (savearea+CPUSAVE_BOOKE_DEAR)(%r1); \
+ STORE %r31, (savearea+CPUSAVE_BOOKE_ESR)(%r1); \
mfspr %r30, isrr0; \
mfspr %r31, isrr1; /* MSR at interrupt time */ \
- stw %r30, (savearea+CPUSAVE_SRR0)(%r1); \
- stw %r31, (savearea+CPUSAVE_SRR1)(%r1); \
+ STORE %r30, (savearea+CPUSAVE_SRR0)(%r1); \
+ STORE %r31, (savearea+CPUSAVE_SRR1)(%r1); \
mfspr %r30, SPR_SRR0; \
mfspr %r31, SPR_SRR1; /* MSR at interrupt time */ \
- stw %r30, (savearea+CPUSAVE_SRR0+8)(%r1); \
- stw %r31, (savearea+CPUSAVE_SRR1+8)(%r1); \
+ STORE %r30, (savearea+BOOKE_CRITSAVE_SRR0)(%r1); \
+ STORE %r31, (savearea+BOOKE_CRITSAVE_SRR1)(%r1); \
isync; \
mfspr %r1, sprg_sp; /* Restore SP */ \
mfcr %r30; /* Save CR */ \
@@ -158,7 +162,7 @@
mtcr %r31; /* MSR at interrupt time */ \
bf 17, 1f; \
GET_CPUINFO(%r1); /* Per-cpu structure */ \
- lwz %r1, PC_CURPCB(%r1); /* Per-thread kernel stack */ \
+ LOAD %r1, PC_CURPCB(%r1); /* Per-thread kernel stack */ \
1:
/*
@@ -185,42 +189,77 @@
* enough i.e. when kstack crosses page boundary and both pages are
* untranslated)
*/
+#ifdef __powerpc64__
+#define SAVE_REGS(r) \
+ std %r3, FRAME_3+CALLSIZE(r); \
+ std %r4, FRAME_4+CALLSIZE(r); \
+ std %r5, FRAME_5+CALLSIZE(r); \
+ std %r6, FRAME_6+CALLSIZE(r); \
+ std %r7, FRAME_7+CALLSIZE(r); \
+ std %r8, FRAME_8+CALLSIZE(r); \
+ std %r9, FRAME_9+CALLSIZE(r); \
+ std %r10, FRAME_10+CALLSIZE(r); \
+ std %r11, FRAME_11+CALLSIZE(r); \
+ std %r12, FRAME_12+CALLSIZE(r); \
+ std %r13, FRAME_13+CALLSIZE(r); \
+ std %r14, FRAME_14+CALLSIZE(r); \
+ std %r15, FRAME_15+CALLSIZE(r); \
+ std %r16, FRAME_16+CALLSIZE(r); \
+ std %r17, FRAME_17+CALLSIZE(r); \
+ std %r18, FRAME_18+CALLSIZE(r); \
+ std %r19, FRAME_19+CALLSIZE(r); \
+ std %r20, FRAME_20+CALLSIZE(r); \
+ std %r21, FRAME_21+CALLSIZE(r); \
+ std %r22, FRAME_22+CALLSIZE(r); \
+ std %r23, FRAME_23+CALLSIZE(r); \
+ std %r24, FRAME_24+CALLSIZE(r); \
+ std %r25, FRAME_25+CALLSIZE(r); \
+ std %r26, FRAME_26+CALLSIZE(r); \
+ std %r27, FRAME_27+CALLSIZE(r); \
+ std %r28, FRAME_28+CALLSIZE(r); \
+ std %r29, FRAME_29+CALLSIZE(r); \
+ std %r30, FRAME_30+CALLSIZE(r); \
+ std %r31, FRAME_31+CALLSIZE(r)
+#else
+#define SAVE_REGS(r)
+ stmw %r3, FRAME_3+CALLSIZE(r)
+#endif
#define FRAME_SETUP(sprg_sp, savearea, exc) \
mfspr %r31, sprg_sp; /* get saved SP */ \
/* establish a new stack frame and put everything on it */ \
- stwu %r31, -FRAMELEN(%r1); \
- stw %r0, FRAME_0+8(%r1); /* save r0 in the trapframe */ \
- stw %r31, FRAME_1+8(%r1); /* save SP " " */ \
- stw %r2, FRAME_2+8(%r1); /* save r2 " " */ \
+ STU %r31, -(FRAMELEN+REDZONE)(%r1); \
+ STORE %r0, FRAME_0+CALLSIZE(%r1); /* save r0 in the trapframe */ \
+ STORE %r31, FRAME_1+CALLSIZE(%r1); /* save SP " " */ \
+ STORE %r2, FRAME_2+CALLSIZE(%r1); /* save r2 " " */ \
mflr %r31; \
- stw %r31, FRAME_LR+8(%r1); /* save LR " " */ \
- stw %r30, FRAME_CR+8(%r1); /* save CR " " */ \
+ STORE %r31, FRAME_LR+CALLSIZE(%r1); /* save LR " " */ \
+ STORE %r30, FRAME_CR+CALLSIZE(%r1); /* save CR " " */ \
GET_CPUINFO(%r2); \
- lwz %r30, (savearea+CPUSAVE_R30)(%r2); /* get saved r30 */ \
- lwz %r31, (savearea+CPUSAVE_R31)(%r2); /* get saved r31 */ \
+ LOAD %r30, (savearea+CPUSAVE_R30)(%r2); /* get saved r30 */ \
+ LOAD %r31, (savearea+CPUSAVE_R31)(%r2); /* get saved r31 */ \
/* save R3-31 */ \
- stmw %r3, FRAME_3+8(%r1) ; \
+ SAVE_REGS(%r1); \
/* save DEAR, ESR */ \
- lwz %r28, (savearea+CPUSAVE_BOOKE_DEAR)(%r2); \
- lwz %r29, (savearea+CPUSAVE_BOOKE_ESR)(%r2); \
- stw %r28, FRAME_BOOKE_DEAR+8(%r1); \
- stw %r29, FRAME_BOOKE_ESR+8(%r1); \
+ LOAD %r28, (savearea+CPUSAVE_BOOKE_DEAR)(%r2); \
+ LOAD %r29, (savearea+CPUSAVE_BOOKE_ESR)(%r2); \
+ STORE %r28, FRAME_BOOKE_DEAR+CALLSIZE(%r1); \
+ STORE %r29, FRAME_BOOKE_ESR+CALLSIZE(%r1); \
/* save XER, CTR, exc number */ \
mfxer %r3; \
mfctr %r4; \
- stw %r3, FRAME_XER+8(%r1); \
- stw %r4, FRAME_CTR+8(%r1); \
+ STORE %r3, FRAME_XER+CALLSIZE(%r1); \
+ STORE %r4, FRAME_CTR+CALLSIZE(%r1); \
li %r5, exc; \
- stw %r5, FRAME_EXC+8(%r1); \
+ STORE %r5, FRAME_EXC+CALLSIZE(%r1); \
/* save DBCR0 */ \
mfspr %r3, SPR_DBCR0; \
- stw %r3, FRAME_BOOKE_DBCR0+8(%r1); \
+ STORE %r3, FRAME_BOOKE_DBCR0+CALLSIZE(%r1); \
/* save xSSR0-1 */ \
- lwz %r30, (savearea+CPUSAVE_SRR0)(%r2); \
- lwz %r31, (savearea+CPUSAVE_SRR1)(%r2); \
- stw %r30, FRAME_SRR0+8(%r1); \
- stw %r31, FRAME_SRR1+8(%r1); \
- lwz %r2,PC_CURTHREAD(%r2) /* set curthread pointer */
+ LOAD %r30, (savearea+CPUSAVE_SRR0)(%r2); \
+ LOAD %r31, (savearea+CPUSAVE_SRR1)(%r2); \
+ STORE %r30, FRAME_SRR0+CALLSIZE(%r1); \
+ STORE %r31, FRAME_SRR1+CALLSIZE(%r1); \
+ LOAD THREAD_REG, PC_CURTHREAD(%r2); \
/*
*
@@ -232,26 +271,26 @@
*/
#define FRAME_LEAVE(isrr0, isrr1) \
/* restore CTR, XER, LR, CR */ \
- lwz %r4, FRAME_CTR+8(%r1); \
- lwz %r5, FRAME_XER+8(%r1); \
- lwz %r6, FRAME_LR+8(%r1); \
- lwz %r7, FRAME_CR+8(%r1); \
+ LOAD %r4, FRAME_CTR+CALLSIZE(%r1); \
+ LOAD %r5, FRAME_XER+CALLSIZE(%r1); \
+ LOAD %r6, FRAME_LR+CALLSIZE(%r1); \
+ LOAD %r7, FRAME_CR+CALLSIZE(%r1); \
mtctr %r4; \
mtxer %r5; \
mtlr %r6; \
mtcr %r7; \
/* restore DBCR0 */ \
- lwz %r4, FRAME_BOOKE_DBCR0+8(%r1); \
+ LOAD %r4, FRAME_BOOKE_DBCR0+CALLSIZE(%r1); \
mtspr SPR_DBCR0, %r4; \
/* restore xSRR0-1 */ \
- lwz %r30, FRAME_SRR0+8(%r1); \
- lwz %r31, FRAME_SRR1+8(%r1); \
+ LOAD %r30, FRAME_SRR0+CALLSIZE(%r1); \
+ LOAD %r31, FRAME_SRR1+CALLSIZE(%r1); \
mtspr isrr0, %r30; \
mtspr isrr1, %r31; \
/* restore R2-31, SP */ \
- lmw %r2, FRAME_2+8(%r1) ; \
- lwz %r0, FRAME_0+8(%r1); \
- lwz %r1, FRAME_1+8(%r1); \
+ lmw %r2, FRAME_2+CALLSIZE(%r1) ; \
+ LOAD %r0, FRAME_0+CALLSIZE(%r1); \
+ LOAD %r1, FRAME_1+CALLSIZE(%r1); \
isync
/*
@@ -264,33 +303,70 @@
* miss within the TLB prolog itself!
* - TLBSAVE is always translated
*/
+#ifdef __powerpc64__
+#define TLB_SAVE_REGS(br) \
+ std %r20, (TLBSAVE_BOOKE_R20)(br); \
+ std %r21, (TLBSAVE_BOOKE_R21)(br); \
+ std %r22, (TLBSAVE_BOOKE_R22)(br); \
+ std %r23, (TLBSAVE_BOOKE_R23)(br); \
+ std %r24, (TLBSAVE_BOOKE_R24)(br); \
+ std %r25, (TLBSAVE_BOOKE_R25)(br); \
+ std %r26, (TLBSAVE_BOOKE_R26)(br); \
+ std %r27, (TLBSAVE_BOOKE_R27)(br); \
+ std %r28, (TLBSAVE_BOOKE_R28)(br); \
+ std %r29, (TLBSAVE_BOOKE_R29)(br); \
+ std %r30, (TLBSAVE_BOOKE_R30)(br); \
+ std %r31, (TLBSAVE_BOOKE_R31)(br);
+#define TLB_RESTORE_REGS(br) \
+ ld %r20, (TLBSAVE_BOOKE_R20)(br); \
+ ld %r21, (TLBSAVE_BOOKE_R21)(br); \
+ ld %r22, (TLBSAVE_BOOKE_R22)(br); \
+ ld %r23, (TLBSAVE_BOOKE_R23)(br); \
+ ld %r24, (TLBSAVE_BOOKE_R24)(br); \
+ ld %r25, (TLBSAVE_BOOKE_R25)(br); \
+ ld %r26, (TLBSAVE_BOOKE_R26)(br); \
+ ld %r27, (TLBSAVE_BOOKE_R27)(br); \
+ ld %r28, (TLBSAVE_BOOKE_R28)(br); \
+ ld %r29, (TLBSAVE_BOOKE_R29)(br); \
+ ld %r30, (TLBSAVE_BOOKE_R30)(br); \
+ ld %r31, (TLBSAVE_BOOKE_R31)(br);
+#define TLB_NEST(outr,inr) \
+ rlwinm outr, inr, 7, 23, 24; /* 8 x TLBSAVE_LEN */
+#else
+#define TLB_SAVE_REGS(br) \
+ stmw %r20, TLBSAVE_BOOKE_R20(br)
+#define TLB_RESTORE_REGS(br) \
+ lmw %r20, TLBSAVE_BOOKE_R20(br)
+#define TLB_NEST(outr,inr) \
+ rlwinm outr, inr, 6, 23, 25; /* 4 x TLBSAVE_LEN */
+#endif
#define TLB_PROLOG \
mtsprg4 %r1; /* Save SP */ \
mtsprg5 %r28; \
mtsprg6 %r29; \
/* calculate TLB nesting level and TLBSAVE instance address */ \
GET_CPUINFO(%r1); /* Per-cpu structure */ \
- lwz %r28, PC_BOOKE_TLB_LEVEL(%r1); \
- rlwinm %r29, %r28, 6, 23, 25; /* 4 x TLBSAVE_LEN */ \
+ LOAD %r28, PC_BOOKE_TLB_LEVEL(%r1); \
+ TLB_NEST(%r29,%r28); \
addi %r28, %r28, 1; \
- stw %r28, PC_BOOKE_TLB_LEVEL(%r1); \
+ STORE %r28, PC_BOOKE_TLB_LEVEL(%r1); \
addi %r29, %r29, PC_BOOKE_TLBSAVE@l; \
add %r1, %r1, %r29; /* current TLBSAVE ptr */ \
\
/* save R20-31 */ \
mfsprg5 %r28; \
mfsprg6 %r29; \
- stmw %r20, (TLBSAVE_BOOKE_R20)(%r1); \
+ TLB_SAVE_REGS(%r1); \
/* save LR, CR */ \
mflr %r30; \
mfcr %r31; \
- stw %r30, (TLBSAVE_BOOKE_LR)(%r1); \
- stw %r31, (TLBSAVE_BOOKE_CR)(%r1); \
+ STORE %r30, (TLBSAVE_BOOKE_LR)(%r1); \
+ STORE %r31, (TLBSAVE_BOOKE_CR)(%r1); \
/* save SRR0-1 */ \
mfsrr0 %r30; /* execution addr at interrupt time */ \
mfsrr1 %r31; /* MSR at interrupt time*/ \
- stw %r30, (TLBSAVE_BOOKE_SRR0)(%r1); /* save SRR0 */ \
- stw %r31, (TLBSAVE_BOOKE_SRR1)(%r1); /* save SRR1 */ \
+ STORE %r30, (TLBSAVE_BOOKE_SRR0)(%r1); /* save SRR0 */ \
+ STORE %r31, (TLBSAVE_BOOKE_SRR1)(%r1); /* save SRR1 */ \
isync; \
mfsprg4 %r1
@@ -303,43 +379,43 @@
mtsprg4 %r1; /* Save SP */ \
GET_CPUINFO(%r1); /* Per-cpu structure */ \
/* calculate TLB nesting level and TLBSAVE instance addr */ \
- lwz %r28, PC_BOOKE_TLB_LEVEL(%r1); \
+ LOAD %r28, PC_BOOKE_TLB_LEVEL(%r1); \
subi %r28, %r28, 1; \
- stw %r28, PC_BOOKE_TLB_LEVEL(%r1); \
- rlwinm %r29, %r28, 6, 23, 25; /* 4 x TLBSAVE_LEN */ \
+ STORE %r28, PC_BOOKE_TLB_LEVEL(%r1); \
+ TLB_NEST(%r29,%r28); \
addi %r29, %r29, PC_BOOKE_TLBSAVE@l; \
add %r1, %r1, %r29; \
\
/* restore LR, CR */ \
- lwz %r30, (TLBSAVE_BOOKE_LR)(%r1); \
- lwz %r31, (TLBSAVE_BOOKE_CR)(%r1); \
+ LOAD %r30, (TLBSAVE_BOOKE_LR)(%r1); \
+ LOAD %r31, (TLBSAVE_BOOKE_CR)(%r1); \
mtlr %r30; \
mtcr %r31; \
/* restore SRR0-1 */ \
- lwz %r30, (TLBSAVE_BOOKE_SRR0)(%r1); \
- lwz %r31, (TLBSAVE_BOOKE_SRR1)(%r1); \
+ LOAD %r30, (TLBSAVE_BOOKE_SRR0)(%r1); \
+ LOAD %r31, (TLBSAVE_BOOKE_SRR1)(%r1); \
mtsrr0 %r30; \
mtsrr1 %r31; \
/* restore R20-31 */ \
- lmw %r20, (TLBSAVE_BOOKE_R20)(%r1); \
+ TLB_RESTORE_REGS(%r1); \
mfsprg4 %r1
#ifdef SMP
#define TLB_LOCK \
GET_CPUINFO(%r20); \
- lwz %r21, PC_CURTHREAD(%r20); \
- lwz %r22, PC_BOOKE_TLB_LOCK(%r20); \
+ LOAD %r21, PC_CURTHREAD(%r20); \
+ LOAD %r22, PC_BOOKE_TLB_LOCK(%r20); \
\
-1: lwarx %r23, 0, %r22; \
- cmpwi %r23, TLB_UNLOCKED; \
+1: LOADX %r23, 0, %r22; \
+ CMPI %r23, TLB_UNLOCKED; \
beq 2f; \
\
/* check if this is recursion */ \
- cmplw cr0, %r21, %r23; \
+ CMPL cr0, %r21, %r23; \
bne- 1b; \
\
2: /* try to acquire lock */ \
- stwcx. %r21, 0, %r22; \
+ STOREX %r21, 0, %r22; \
bne- 1b; \
\
/* got it, update recursion counter */ \
@@ -351,22 +427,22 @@
#define TLB_UNLOCK \
GET_CPUINFO(%r20); \
- lwz %r21, PC_CURTHREAD(%r20); \
- lwz %r22, PC_BOOKE_TLB_LOCK(%r20); \
+ LOAD %r21, PC_CURTHREAD(%r20); \
+ LOAD %r22, PC_BOOKE_TLB_LOCK(%r20); \
\
/* update recursion counter */ \
lwz %r23, RES_RECURSE(%r22); \
subi %r23, %r23, 1; \
stw %r23, RES_RECURSE(%r22); \
\
- cmpwi %r23, 0; \
+ cmplwi %r23, 0; \
bne 1f; \
isync; \
msync; \
\
/* release the lock */ \
li %r23, TLB_UNLOCKED; \
- stw %r23, 0(%r22); \
+ STORE %r23, 0(%r22); \
1: isync; \
msync
#else
@@ -407,8 +483,10 @@
INTERRUPT(int_critical_input)
STANDARD_CRIT_PROLOG(SPR_SPRG2, PC_BOOKE_CRITSAVE, SPR_CSRR0, SPR_CSRR1)
FRAME_SETUP(SPR_SPRG2, PC_BOOKE_CRITSAVE, EXC_CRIT)
- addi %r3, %r1, 8
+ GET_TOCBASE(%r2)
+ addi %r3, %r1, CALLSIZE
bl CNAME(powerpc_interrupt)
+ TOC_RESTORE
FRAME_LEAVE(SPR_CSRR0, SPR_CSRR1)
rfci
@@ -419,8 +497,10 @@
INTERRUPT(int_machine_check)
STANDARD_PROLOG(SPR_SPRG3, PC_BOOKE_MCHKSAVE, SPR_MCSRR0, SPR_MCSRR1)
FRAME_SETUP(SPR_SPRG3, PC_BOOKE_MCHKSAVE, EXC_MCHK)
- addi %r3, %r1, 8
+ GET_TOCBASE(%r2)
+ addi %r3, %r1, CALLSIZE
bl CNAME(powerpc_interrupt)
+ TOC_RESTORE
FRAME_LEAVE(SPR_MCSRR0, SPR_MCSRR1)
rfmci
@@ -449,8 +529,10 @@
INTERRUPT(int_external_input)
STANDARD_PROLOG(SPR_SPRG1, PC_TEMPSAVE, SPR_SRR0, SPR_SRR1)
FRAME_SETUP(SPR_SPRG1, PC_TEMPSAVE, EXC_EXI)
- addi %r3, %r1, 8
+ GET_TOCBASE(%r2)
+ addi %r3, %r1, CALLSIZE
bl CNAME(powerpc_interrupt)
+ TOC_RESTORE
b clear_we
@@ -487,8 +569,10 @@
INTERRUPT(int_decrementer)
STANDARD_PROLOG(SPR_SPRG1, PC_TEMPSAVE, SPR_SRR0, SPR_SRR1)
FRAME_SETUP(SPR_SPRG1, PC_TEMPSAVE, EXC_DECR)
- addi %r3, %r1, 8
+ GET_TOCBASE(%r2)
+ addi %r3, %r1, CALLSIZE
bl CNAME(powerpc_interrupt)
+ TOC_RESTORE
b clear_we
@@ -535,8 +619,10 @@
INTERRUPT(int_performance_counter)
STANDARD_PROLOG(SPR_SPRG3, PC_TEMPSAVE, SPR_SRR0, SPR_SRR1)
FRAME_SETUP(SPR_SPRG3, PC_TEMPSAVE, EXC_PERF)
- addi %r3, %r1, 8
+ GET_TOCBASE(%r2)
+ addi %r3, %r1, CALLSIZE
bl CNAME(powerpc_interrupt)
+ TOC_RESTORE
b trapexit
#endif
@@ -574,9 +660,8 @@
mfspr %r27, SPR_MAS2
/* Check faulting address. */
- lis %r21, VM_MAXUSER_ADDRESS@h
- ori %r21, %r21, VM_MAXUSER_ADDRESS@l
- cmplw cr0, %r31, %r21
+ LOAD_ADDR(%r21, VM_MAXUSER_ADDRESS)
+ CMPL cr0, %r31, %r21
blt search_user_pmap
/* If it's kernel address, allow only supervisor mode misses. */
@@ -587,9 +672,13 @@
search_kernel_pmap:
/* Load r26 with kernel_pmap address */
bl 1f
+#ifdef __powerpc64__
+ .llong kernel_pmap_store-.
+#else
.long kernel_pmap_store-.
+#endif
1: mflr %r21
- lwz %r26, 0(%r21)
+ LOAD %r26, 0(%r21)
add %r26, %r21, %r26 /* kernel_pmap_store in r26 */
/* Force kernel tid, set TID to 0 in MAS1. */
@@ -600,7 +689,7 @@
/* This may result in nested tlb miss. */
bl pte_lookup /* returns PTE address in R25 */
- cmpwi %r25, 0 /* pte found? */
+ CMPI %r25, 0 /* pte found? */
beq search_failed
/* Finish up, write TLB entry. */
@@ -614,7 +703,7 @@
search_user_pmap:
/* Load r26 with current user space process pmap */
GET_CPUINFO(%r26)
- lwz %r26, PC_CURPMAP(%r26)
+ LOAD %r26, PC_CURPMAP(%r26)
b tlb_miss_handle
@@ -657,9 +746,35 @@
*
****************************************************************************/
pte_lookup:
- cmpwi %r26, 0
+ CMPI %r26, 0
beq 1f /* fail quickly if pmap is invalid */
+#ifdef __powerpc64__
+ rldicl %r21, %r31, (64 - PP2D_L_L), (64 - PP2D_L_NUM) /* pp2d offset */
+ rldicl %r25, %r31, (64 - PP2D_H_L), (64 - PP2D_H_NUM)
+ rldimi %r21, %r25, PP2D_L_NUM, (64 - (PP2D_L_NUM + PP2D_H_NUM))
+ slwi %r21, %r21, PP2D_ENTRY_SHIFT /* multiply by pp2d entry size */
+ addi %r25, %r26, PM_PP2D /* pmap pm_pp2d[] address */
+ add %r25, %r25, %r21 /* offset within pm_pp2d[] table */
+ ld %r25, 0(%r25) /* get pdir address, i.e. pmap->pm_pp2d[pp2d_idx] * */
+
+ cmpdi %r25, 0
+ beq 1f
+
+#if PAGE_SIZE < 65536
+ rldicl %r21, %r31, (64 - PDIR_L), (64 - PDIR_NUM) /* pdir offset */
+ slwi %r21, %r21, PDIR_ENTRY_SHIFT /* multiply by pdir entry size */
+ add %r25, %r25, %r21 /* offset within pdir table */
+ ld %r25, 0(%r25) /* get ptbl address, i.e. pmap->pm_pp2d[pp2d_idx][pdir_idx] */
+
+ cmpdi %r25, 0
+ beq 1f
+#endif
+
+ rldicl %r21, %r31, (64 - PTBL_L), (64 - PTBL_NUM) /* ptbl offset */
+ slwi %r21, %r21, PTBL_ENTRY_SHIFT /* multiply by pte entry size */
+
+#else
srwi %r21, %r31, PDIR_SHIFT /* pdir offset */
slwi %r21, %r21, PDIR_ENTRY_SHIFT /* multiply by pdir entry size */
@@ -669,8 +784,8 @@
* Get ptbl address, i.e. pmap->pm_pdir[pdir_idx]
* This load may cause a Data TLB miss for non-kernel pmap!
*/
- lwz %r25, 0(%r25)
- cmpwi %r25, 0
+ LOAD %r25, 0(%r25)
+ CMPI %r25, 0
beq 2f
lis %r21, PTBL_MASK@h
@@ -679,6 +794,7 @@
/* ptbl offset, multiply by ptbl entry size */
srwi %r21, %r21, (PTBL_SHIFT - PTBL_ENTRY_SHIFT)
+#endif
add %r25, %r25, %r21 /* address of pte entry */
/*
@@ -730,12 +846,19 @@
rlwimi %r27, %r21, 13, 27, 30 /* insert WIMG bits from pte */
/* Setup MAS3 value in r23. */
- lwz %r23, PTE_RPN(%r25) /* get pte->rpn */
+ LOAD %r23, PTE_RPN(%r25) /* get pte->rpn */
+#ifdef __powerpc64__
+ rldicr %r22, %r23, 52, 51 /* extract MAS3 portion of RPN */
+ rldicl %r23, %r23, 20, 54 /* extract MAS7 portion of RPN */
+
+ rlwimi %r22, %r21, 30, 26, 31 /* insert protection bits from pte */
+#else
rlwinm %r22, %r23, 20, 0, 11 /* extract MAS3 portion of RPN */
rlwimi %r22, %r21, 30, 26, 31 /* insert protection bits from pte */
rlwimi %r22, %r21, 20, 12, 19 /* insert lower 8 RPN bits to MAS3 */
rlwinm %r23, %r23, 20, 24, 31 /* MAS7 portion of RPN */
+#endif
/* Load MAS registers. */
mtspr SPR_MAS0, %r29
@@ -795,34 +918,41 @@
STANDARD_CRIT_PROLOG(SPR_SPRG2, PC_BOOKE_CRITSAVE, SPR_CSRR0, SPR_CSRR1)
FRAME_SETUP(SPR_SPRG2, PC_BOOKE_CRITSAVE, EXC_DEBUG)
GET_CPUINFO(%r3)
- lwz %r3, (PC_BOOKE_CRITSAVE+CPUSAVE_SRR0)(%r3)
+ LOAD %r3, (PC_BOOKE_CRITSAVE+CPUSAVE_SRR0)(%r3)
bl 0f
+#ifdef __powerpc64__
+ .llong interrupt_vector_base-.
+ .llong interrupt_vector_top-.
+#else
.long interrupt_vector_base-.
.long interrupt_vector_top-.
+#endif
0: mflr %r5
- lwz %r4,0(%r5) /* interrupt_vector_base in r4 */
+ LOAD %r4,0(%r5) /* interrupt_vector_base in r4 */
add %r4,%r4,%r5
- cmplw cr0, %r3, %r4
+ CMPL cr0, %r3, %r4
blt 1f
- lwz %r4,4(%r5) /* interrupt_vector_top in r4 */
+ LOAD %r4,4(%r5) /* interrupt_vector_top in r4 */
add %r4,%r4,%r5
addi %r4,%r4,4
- cmplw cr0, %r3, %r4
+ CMPL cr0, %r3, %r4
bge 1f
/* Disable single-stepping for the interrupt handlers. */
- lwz %r3, FRAME_SRR1+8(%r1);
+ LOAD %r3, FRAME_SRR1+CALLSIZE(%r1);
rlwinm %r3, %r3, 0, 23, 21
- stw %r3, FRAME_SRR1+8(%r1);
+ STORE %r3, FRAME_SRR1+CALLSIZE(%r1);
/* Restore srr0 and srr1 as they could have been clobbered. */
GET_CPUINFO(%r4)
- lwz %r3, (PC_BOOKE_CRITSAVE+CPUSAVE_SRR0+8)(%r4);
+ LOAD %r3, (PC_BOOKE_CRITSAVE+BOOKE_CRITSAVE_SRR0)(%r4);
mtspr SPR_SRR0, %r3
- lwz %r4, (PC_BOOKE_CRITSAVE+CPUSAVE_SRR1+8)(%r4);
+ LOAD %r4, (PC_BOOKE_CRITSAVE+BOOKE_CRITSAVE_SRR1)(%r4);
mtspr SPR_SRR1, %r4
b 9f
1:
- addi %r3, %r1, 8
+ GET_TOCBASE(%r2)
+ addi %r3, %r1, CALLSIZE
bl CNAME(trap)
+ TOC_RESTORE
/*
* Handle ASTs, needed for proper support of single-stepping.
* We actually need to return to the process with an rfi.
@@ -838,8 +968,10 @@
****************************************************************************/
trap_common:
/* Call C trap dispatcher */
- addi %r3, %r1, 8
+ GET_TOCBASE(%r2)
+ addi %r3, %r1, CALLSIZE
bl CNAME(trap)
+ TOC_RESTORE
.globl CNAME(trapexit) /* exported for db_backtrace use */
CNAME(trapexit):
@@ -847,12 +979,12 @@
wrteei 0
/* Test AST pending - makes sense for user process only */
- lwz %r5, FRAME_SRR1+8(%r1)
+ LOAD %r5, FRAME_SRR1+CALLSIZE(%r1)
mtcr %r5
bf 17, 1f
GET_CPUINFO(%r3)
- lwz %r4, PC_CURTHREAD(%r3)
+ LOAD %r4, PC_CURTHREAD(%r3)
lwz %r4, TD_FLAGS(%r4)
lis %r5, (TDF_ASTPENDING | TDF_NEEDRESCHED)@h
ori %r5, %r5, (TDF_ASTPENDING | TDF_NEEDRESCHED)@l
@@ -862,8 +994,10 @@
/* re-enable interrupts before calling ast() */
wrteei 1
- addi %r3, %r1, 8
+ GET_TOCBASE(%r2)
+ addi %r3, %r1, CALLSIZE
bl CNAME(ast)
+ TOC_RESTORE
.globl CNAME(asttrapexit) /* db_backtrace code sentinel #2 */
CNAME(asttrapexit):
b trapexit /* test ast ret value ? */
@@ -876,8 +1010,8 @@
/*
* Deliberate entry to dbtrap
*/
- .globl CNAME(breakpoint)
-CNAME(breakpoint):
+ /* .globl CNAME(breakpoint)*/
+ASENTRY_NOPROF(breakpoint)
mtsprg1 %r1
mfmsr %r3
mtsrr1 %r3
@@ -885,21 +1019,21 @@
mtmsr %r3 /* disable interrupts */
isync
GET_CPUINFO(%r3)
- stw %r30, (PC_DBSAVE+CPUSAVE_R30)(%r3)
- stw %r31, (PC_DBSAVE+CPUSAVE_R31)(%r3)
+ STORE %r30, (PC_DBSAVE+CPUSAVE_R30)(%r3)
+ STORE %r31, (PC_DBSAVE+CPUSAVE_R31)(%r3)
mflr %r31
mtsrr0 %r31
mfdear %r30
mfesr %r31
- stw %r30, (PC_DBSAVE+CPUSAVE_BOOKE_DEAR)(%r3)
- stw %r31, (PC_DBSAVE+CPUSAVE_BOOKE_ESR)(%r3)
+ STORE %r30, (PC_DBSAVE+CPUSAVE_BOOKE_DEAR)(%r3)
+ STORE %r31, (PC_DBSAVE+CPUSAVE_BOOKE_ESR)(%r3)
mfsrr0 %r30
mfsrr1 %r31
- stw %r30, (PC_DBSAVE+CPUSAVE_SRR0)(%r3)
- stw %r31, (PC_DBSAVE+CPUSAVE_SRR1)(%r3)
+ STORE %r30, (PC_DBSAVE+CPUSAVE_SRR0)(%r3)
+ STORE %r31, (PC_DBSAVE+CPUSAVE_SRR1)(%r3)
isync
mfcr %r30
@@ -910,8 +1044,10 @@
dbtrap:
FRAME_SETUP(SPR_SPRG1, PC_DBSAVE, EXC_DEBUG)
/* Call C trap code: */
- addi %r3, %r1, 8
+ GET_TOCBASE(%r2)
+ addi %r3, %r1, CALLSIZE
bl CNAME(db_trap_glue)
+ TOC_RESTORE
or. %r3, %r3, %r3
bne dbleave
/* This wasn't for KDB, so switch to real trap: */
@@ -923,19 +1059,19 @@
#endif /* KDB */
clear_we:
- lwz %r3, (FRAME_SRR1+8)(%r1)
+ LOAD %r3, (FRAME_SRR1+CALLSIZE)(%r1)
rlwinm %r3, %r3, 0, 14, 12
- stw %r3, (FRAME_SRR1+8)(%r1)
+ STORE %r3, (FRAME_SRR1+CALLSIZE)(%r1)
b trapexit
#ifdef SMP
ENTRY(tlb_lock)
GET_CPUINFO(%r5)
- lwz %r5, PC_CURTHREAD(%r5)
-1: lwarx %r4, 0, %r3
- cmpwi %r4, TLB_UNLOCKED
+ LOAD %r5, PC_CURTHREAD(%r5)
+1: LOADX %r4, 0, %r3
+ CMPI %r4, TLB_UNLOCKED
bne 1b
- stwcx. %r5, 0, %r3
+ STOREX %r5, 0, %r3
bne- 1b
isync
msync
@@ -945,7 +1081,7 @@
isync
msync
li %r4, TLB_UNLOCKED
- stw %r4, 0(%r3)
+ STORE %r4, 0(%r3)
isync
msync
blr
Index: sys/powerpc/include/asm.h
===================================================================
--- sys/powerpc/include/asm.h
+++ sys/powerpc/include/asm.h
@@ -128,6 +128,13 @@
.long 0; \
.byte 0,0,0,0,0,0,0,0; \
END_SIZE(name)
+
+#define LOAD_ADDR(reg, var) \
+ lis reg, var@highest; \
+ ori reg, reg, var@higher; \
+ rldicr reg, reg, 32, 31; \
+ oris reg, reg, var@h; \
+ ori reg, reg, var@l;
#else /* !__powerpc64__ */
#define _ENTRY(name) \
.text; \
@@ -136,6 +143,10 @@
.type name,@function; \
name:
#define _END(name)
+
+#define LOAD_ADDR(reg, var) \
+ lis reg, var@ha; \
+ ori reg, reg, var@l;
#endif /* __powerpc64__ */
#if defined(PROF) || (defined(_KERNEL) && defined(GPROF))
Index: sys/powerpc/include/pcpu.h
===================================================================
--- sys/powerpc/include/pcpu.h
+++ sys/powerpc/include/pcpu.h
@@ -80,15 +80,20 @@
#define BOOKE_TLB_SAVELEN 16
#define BOOKE_TLBSAVE_LEN (BOOKE_TLB_SAVELEN * BOOKE_TLB_MAXNEST)
+#ifdef __powerpc64__
+#define BOOKE_PCPU_PAD 773
+#else
+#define BOOKE_PCPU_PAD 173
+#endif
#define PCPU_MD_BOOKE_FIELDS \
register_t pc_booke_critsave[BOOKE_CRITSAVE_LEN]; \
register_t pc_booke_mchksave[CPUSAVE_LEN]; \
register_t pc_booke_tlbsave[BOOKE_TLBSAVE_LEN]; \
register_t pc_booke_tlb_level; \
vm_offset_t pc_qmap_addr; \
- uint32_t *pc_booke_tlb_lock; \
+ uintptr_t *pc_booke_tlb_lock; \
int pc_tid_next; \
- char __pad[173]
+ char __pad[BOOKE_PCPU_PAD]
/* Definitions for register offsets within the exception tmp save areas */
#define CPUSAVE_R27 0 /* where r27 gets saved */
@@ -102,6 +107,8 @@
#define CPUSAVE_BOOKE_ESR 6 /* where SPR_ESR gets saved */
#define CPUSAVE_SRR0 7 /* where SRR0 gets saved */
#define CPUSAVE_SRR1 8 /* where SRR1 gets saved */
+#define BOOKE_CRITSAVE_SRR0 9 /* where real SRR0 gets saved (critical) */
+#define BOOKE_CRITSAVE_SRR1 10 /* where real SRR0 gets saved (critical) */
/* Book-E TLBSAVE is more elaborate */
#define TLBSAVE_BOOKE_LR 0
Index: sys/powerpc/include/pmap.h
===================================================================
--- sys/powerpc/include/pmap.h
+++ sys/powerpc/include/pmap.h
@@ -188,8 +188,16 @@
tlbtid_t pm_tid[MAXCPU]; /* TID to identify this pmap entries in TLB */
cpuset_t pm_active; /* active on cpus */
+#ifdef __powerpc64__
+ /* Page table directory, array of pointers to page directories. */
+ pte_t **pm_pp2d[PP2D_NENTRIES];
+
+ /* List of allocated pdir bufs (pdir kva regions). */
+ TAILQ_HEAD(, ptbl_buf) pm_pdir_list;
+#else
/* Page table directory, array of pointers to page tables. */
pte_t *pm_pdir[PDIR_NENTRIES];
+#endif
/* List of allocated ptbl bufs (ptbl kva regions). */
TAILQ_HEAD(, ptbl_buf) pm_ptbl_list;
Index: sys/powerpc/include/psl.h
===================================================================
--- sys/powerpc/include/psl.h
+++ sys/powerpc/include/psl.h
@@ -50,6 +50,10 @@
#define PSL_PMM 0x00000004UL /* performance monitor mark */
/* Machine State Register - Book-E cores */
+#ifdef __powerpc64__
+#define PSL_CM 0x80000000UL /* Computation Mode (64-bit) */
+#endif
+
#define PSL_UCLE 0x04000000UL /* User mode cache lock enable */
#define PSL_WE 0x00040000UL /* Wait state enable */
#define PSL_CE 0x00020000UL /* Critical interrupt enable */
@@ -86,7 +90,11 @@
#if defined(BOOKE_E500)
/* Initial kernel MSR, use IS=1 ad DS=1. */
#define PSL_KERNSET_INIT (PSL_IS | PSL_DS)
+#ifdef __powerpc64__
+#define PSL_KERNSET (PSL_CM | PSL_CE | PSL_ME | PSL_EE)
+#else
#define PSL_KERNSET (PSL_CE | PSL_ME | PSL_EE)
+#endif
#define PSL_SRR1_MASK 0x00000000UL /* No mask on Book-E */
#elif defined(BOOKE_PPC4XX)
#define PSL_KERNSET (PSL_CE | PSL_ME | PSL_EE | PSL_FP)
Index: sys/powerpc/include/pte.h
===================================================================
--- sys/powerpc/include/pte.h
+++ sys/powerpc/include/pte.h
@@ -162,6 +162,83 @@
#include <machine/tlb.h>
+#ifdef __powerpc64__
+
+#include <machine/tlb.h>
+
+/*
+ * The virtual address is:
+ *
+ * 4K page size
+ * +-----+-----+-----+-------+-------------+-------------+----------------+
+ * | - |p2d#h| - | p2d#l | dir# | pte# | off in 4K page |
+ * +-----+-----+-----+-------+-------------+-------------+----------------+
+ * 63 62 61 60 59 40 39 30 29 ^ 21 20 ^ 12 11 0
+ * | |
+ * index in 1 page of pointers
+ *
+ * 1st level - pointers to page table directory (pp2d)
+ *
+ * pp2d consists of PP2D_NENTRIES entries, each being a pointer to
+ * second level entity, i.e. the page table directory (pdir).
+ */
+#define HARDWARE_WALKER
+#define PP2D_H_H 61
+#define PP2D_H_L 60
+#define PP2D_L_H 39
+#define PP2D_L_L 30 /* >30 would work with no page table pool */
+#ifndef LOCORE
+#define PP2D_SIZE (1UL << PP2D_L_L) /* va range mapped by pp2d */
+#else
+#define PP2D_SIZE (1 << PP2D_L_L) /* va range mapped by pp2d */
+#endif
+#define PP2D_L_SHIFT PP2D_L_L
+#define PP2D_L_NUM (PP2D_L_H-PP2D_L_L+1)
+#define PP2D_L_MASK ((1<<PP2D_L_NUM)-1)
+#define PP2D_H_SHIFT (PP2D_H_L-PP2D_L_NUM)
+#define PP2D_H_NUM (PP2D_H_H-PP2D_H_L+1)
+#define PP2D_H_MASK (((1<<PP2D_H_NUM)-1)<<PP2D_L_NUM)
+#define PP2D_IDX(va) (((va >> PP2D_H_SHIFT) & PP2D_H_MASK) | ((va >> PP2D_L_SHIFT) & PP2D_L_MASK))
+#define PP2D_NENTRIES (1<<(PP2D_L_NUM+PP2D_H_NUM))
+#define PP2D_ENTRY_SHIFT 3 /* log2 (sizeof(struct pte_entry **)) */
+
+/*
+ * 2nd level - page table directory (pdir)
+ *
+ * pdir consists of PDIR_NENTRIES entries, each being a pointer to
+ * second level entity, i.e. the actual page table (ptbl).
+ */
+#define PDIR_H (PP2D_L_L-1)
+#define PDIR_L 21
+#define PDIR_NUM (PDIR_H-PDIR_L+1)
+#define PDIR_SIZE (1 << PDIR_L) /* va range mapped by pdir */
+#define PDIR_MASK ((1<<PDIR_NUM)-1)
+#define PDIR_SHIFT PDIR_L
+#define PDIR_NENTRIES (1<<PDIR_NUM)
+#define PDIR_IDX(va) (((va) >> PDIR_SHIFT) & PDIR_MASK)
+#define PDIR_ENTRY_SHIFT 3 /* log2 (sizeof(struct pte_entry *)) */
+#define PDIR_PAGES ((PDIR_NENTRIES * (1<<PDIR_ENTRY_SHIFT)) / PAGE_SIZE)
+
+/*
+ * 3rd level - page table (ptbl)
+ *
+ * Page table covers PTBL_NENTRIES page table entries. Page
+ * table entry (pte) is 64 bit wide and defines mapping
+ * for a single page.
+ */
+#define PTBL_H (PDIR_L-1)
+#define PTBL_L PAGE_SHIFT
+#define PTBL_NUM (PTBL_H-PTBL_L+1)
+#define PTBL_MASK ((1<<PTBL_NUM)-1)
+#define PTBL_SHIFT PTBL_L
+#define PTBL_SIZE PAGE_SIZE /* va range mapped by ptbl entry */
+#define PTBL_NENTRIES (1<<PTBL_NUM)
+#define PTBL_IDX(va) ((va >> PTBL_SHIFT) & PTBL_MASK)
+#define PTBL_ENTRY_SHIFT 3 /* log2 (sizeof (struct pte_entry)) */
+#define PTBL_PAGES ((PTBL_NENTRIES * (1<<PTBL_ENTRY_SHIFT)) / PAGE_SIZE)
+
+#define KERNEL_LINEAR_MAX 0xc000000040000000
+#else
/*
* 1st level - page table directory (pdir)
*
@@ -197,6 +274,8 @@
#define PTBL_PAGES 2
#define PTBL_ENTRY_SHIFT 3 /* entry size is 2^3 = 8 bytes */
+#endif
+
/*
* Flags for pte_remove() routine.
*/
@@ -268,6 +347,29 @@
#define PTE_MANAGED 0x00000002 /* Managed */
#define PTE_REFERENCED 0x00040000 /* Referenced */
+/*
+ * Page Table Entry definitions and macros.
+ *
+ * We use the hardware page table entry format:
+ *
+ * 63 24 23 19 18 17 14 13 12 11 8 7 6 5 4 3 2 1 0
+ * ---------------------------------------------------------------
+ * ARPN(12:51) WIMGE R U0:U3 SW0 C PSIZE UX SX UW SW UR SR SW1 V
+ * ---------------------------------------------------------------
+ */
+
+/* PTE fields. */
+#define PTE_TSIZE_SHIFT (63-54)
+#define PTE_TSIZE_MASK 0x7
+#define PTE_TSIZE_SHIFT_DIRECT (63-55)
+#define PTE_TSIZE_MASK_DIRECT 0xf
+#define PTE_PS_DIRECT(ps) (ps<<PTE_TSIZE_SHIFT_DIRECT) /* Direct Entry Page Size */
+#define PTE_PS(ps) (ps<<PTE_TSIZE_SHIFT) /* Page Size */
+
+/* Macro argument must of pte_t type. */
+#define PTE_TSIZE(pte) (int)((*pte >> PTE_TSIZE_SHIFT) & PTE_TSIZE_MASK)
+#define PTE_TSIZE_DIRECT(pte) (int)((*pte >> PTE_TSIZE_SHIFT_DIRECT) & PTE_TSIZE_MASK_DIRECT)
+
/* Macro argument must of pte_t type. */
#define PTE_ARPN_SHIFT 12
#define PTE_FLAGS_MASK 0x00ffffff
Index: sys/powerpc/include/spr.h
===================================================================
--- sys/powerpc/include/spr.h
+++ sys/powerpc/include/spr.h
@@ -192,6 +192,18 @@
#define FSL_E5500 0x8024
#define FSL_E6500 0x8040
+#define SPR_EPCR 0x133
+#define EPCR_EXTGS 0x80000000
+#define EPCR_DTLBGS 0x40000000
+#define EPCR_ITLBGS 0x20000000
+#define EPCR_DSIGS 0x10000000
+#define EPCR_ISIGS 0x08000000
+#define EPCR_DUVGS 0x04000000
+#define EPCR_ICM 0x02000000
+#define EPCR_GICMGS 0x01000000
+#define EPCR_DGTMI 0x00800000
+#define EPCR_DMIUH 0x00400000
+#define EPCR_PMGS 0x00200000
#define SPR_SPEFSCR 0x200 /* ..8 Signal Processing Engine FSCR. */
#define SPR_IBAT0U 0x210 /* .68 Instruction BAT Reg 0 Upper */
#define SPR_IBAT0U 0x210 /* .6. Instruction BAT Reg 0 Upper */
@@ -259,6 +271,7 @@
#define SPR_DBAT6L 0x23d /* .6. Data BAT Reg 6 Lower */
#define SPR_DBAT7U 0x23e /* .6. Data BAT Reg 7 Upper */
#define SPR_DBAT7L 0x23f /* .6. Data BAT Reg 7 Lower */
+#define SPR_SPRG8 0x25c /* ..8 SPR General 8 */
#define SPR_MI_CTR 0x310 /* ..8 IMMU control */
#define Mx_CTR_GPM 0x80000000 /* Group Protection Mode */
#define Mx_CTR_PPM 0x40000000 /* Page Protection Mode */
Index: sys/powerpc/include/tlb.h
===================================================================
--- sys/powerpc/include/tlb.h
+++ sys/powerpc/include/tlb.h
@@ -65,7 +65,11 @@
#define TLB_SIZE_1G 10
#define TLB_SIZE_4G 11
+#ifdef __powerpc64__
+#define MAS2_EPN_MASK 0xFFFFFFFFFFFFF000UL
+#else
#define MAS2_EPN_MASK 0xFFFFF000
+#endif
#define MAS2_EPN_SHIFT 12
#define MAS2_X0 0x00000040
#define MAS2_X1 0x00000020
@@ -137,7 +141,11 @@
vm_offset_t virt;
vm_size_t size;
uint32_t mas1;
+#ifdef __powerpc64__
+ uint64_t mas2;
+#else
uint32_t mas2;
+#endif
uint32_t mas3;
uint32_t mas7;
} tlb_entry_t;
@@ -217,8 +225,8 @@
struct pmap;
-void tlb_lock(uint32_t *);
-void tlb_unlock(uint32_t *);
+void tlb_lock(uintptr_t *);
+void tlb_unlock(uintptr_t *);
void tlb1_ap_prep(void);
int tlb1_set_entry(vm_offset_t, vm_paddr_t, vm_size_t, uint32_t);
Index: sys/powerpc/include/vmparam.h
===================================================================
--- sys/powerpc/include/vmparam.h
+++ sys/powerpc/include/vmparam.h
@@ -69,7 +69,11 @@
#if !defined(LOCORE)
#ifdef __powerpc64__
#define VM_MIN_ADDRESS (0x0000000000000000UL)
+#ifdef AIM
#define VM_MAXUSER_ADDRESS (0xfffffffffffff000UL)
+#else
+#define VM_MAXUSER_ADDRESS (0x7ffffffffffff000UL)
+#endif
#define VM_MAX_ADDRESS (0xffffffffffffffffUL)
#else
#define VM_MIN_ADDRESS ((vm_offset_t)0)
@@ -78,23 +82,29 @@
#endif
#define SHAREDPAGE (VM_MAXUSER_ADDRESS - PAGE_SIZE)
#else /* LOCORE */
-#if !defined(__powerpc64__) && defined(BOOKE)
+#ifdef BOOKE
#define VM_MIN_ADDRESS 0
+#ifdef __powerpc64__
+#define VM_MAXUSER_ADDRESS 0x7ffffffffffff000
+#else
#define VM_MAXUSER_ADDRESS 0x7ffff000
#endif
+#endif
#endif /* LOCORE */
#define FREEBSD32_SHAREDPAGE (VM_MAXUSER_ADDRESS32 - PAGE_SIZE)
#define FREEBSD32_USRSTACK FREEBSD32_SHAREDPAGE
-#ifdef AIM
-#define KERNBASE 0x00100000UL /* start of kernel virtual */
-
#ifdef __powerpc64__
#define VM_MIN_KERNEL_ADDRESS 0xc000000000000000UL
#define VM_MAX_KERNEL_ADDRESS 0xc0000001c7ffffffUL
#define VM_MAX_SAFE_KERNEL_ADDRESS VM_MAX_KERNEL_ADDRESS
-#else
+#endif
+
+#ifdef AIM
+#define KERNBASE 0x00100000UL /* start of kernel virtual */
+
+#ifndef __powerpc64__
#define VM_MIN_KERNEL_ADDRESS ((vm_offset_t)KERNEL_SR << ADDR_SR_SHFT)
#define VM_MAX_SAFE_KERNEL_ADDRESS (VM_MIN_KERNEL_ADDRESS + 2*SEGMENT_LENGTH -1)
#define VM_MAX_KERNEL_ADDRESS (VM_MIN_KERNEL_ADDRESS + 3*SEGMENT_LENGTH - 1)
@@ -108,11 +118,19 @@
#else /* Book-E */
+#ifdef __powerpc64__
+#ifndef LOCORE
+#define KERNBASE 0xc000000000000000UL /* start of kernel virtual */
+#else
+#define KERNBASE 0xc000000000000000 /* start of kernel virtual */
+#endif
+#else
#define KERNBASE 0xc0000000 /* start of kernel virtual */
#define VM_MIN_KERNEL_ADDRESS KERNBASE
#define VM_MAX_KERNEL_ADDRESS 0xffffefff
#define VM_MAX_SAFE_KERNEL_ADDRESS VM_MAX_KERNEL_ADDRESS
+#endif
#endif /* AIM/E500 */
Index: sys/powerpc/mpc85xx/platform_mpc85xx.c
===================================================================
--- sys/powerpc/mpc85xx/platform_mpc85xx.c
+++ sys/powerpc/mpc85xx/platform_mpc85xx.c
@@ -553,3 +553,23 @@
return (0);
}
+
+#if defined(EARLY_PRINTF)
+static void mpc85xx_early_putc(int c);
+static void
+mpc85xx_early_putc(int c)
+{
+ int limit;
+ static vm_offset_t ccsr = 0xbffffffffffff000;
+
+ limit = 250000;
+ while ((*(volatile uint8_t*)(ccsr + 0x505)& 0x20) == 0 && --limit)
+ DELAY(4);
+ *(volatile uint8_t *)(ccsr + 0x500) = c;
+ limit = 250000;
+ while ((*(volatile uint8_t *)(ccsr + 0x505) & 0x40) == 0 && --limit)
+ DELAY(4);
+}
+
+early_putc_t *early_putc = mpc85xx_early_putc;
+#endif
Index: sys/powerpc/powerpc/genassym.c
===================================================================
--- sys/powerpc/powerpc/genassym.c
+++ sys/powerpc/powerpc/genassym.c
@@ -82,6 +82,8 @@
ASSYM(CPUSAVE_AIM_DSISR, CPUSAVE_AIM_DSISR*sizeof(register_t));
ASSYM(CPUSAVE_BOOKE_DEAR, CPUSAVE_BOOKE_DEAR*sizeof(register_t));
ASSYM(CPUSAVE_BOOKE_ESR, CPUSAVE_BOOKE_ESR*sizeof(register_t));
+ASSYM(BOOKE_CRITSAVE_SRR0, BOOKE_CRITSAVE_SRR0*sizeof(register_t));
+ASSYM(BOOKE_CRITSAVE_SRR1, BOOKE_CRITSAVE_SRR1*sizeof(register_t));
ASSYM(TLBSAVE_BOOKE_LR, TLBSAVE_BOOKE_LR*sizeof(register_t));
ASSYM(TLBSAVE_BOOKE_CR, TLBSAVE_BOOKE_CR*sizeof(register_t));
@@ -117,7 +119,11 @@
ASSYM(USER_SR, USER_SR);
#endif
#elif defined(BOOKE)
+#ifdef __powerpc64__
+ASSYM(PM_PP2D, offsetof(struct pmap, pm_pp2d));
+#else
ASSYM(PM_PDIR, offsetof(struct pmap, pm_pdir));
+#endif
/*
* With pte_t being a bitfield struct, these fields cannot be addressed via
* offsetof().
@@ -216,6 +222,7 @@
ASSYM(KERNBASE, KERNBASE);
ASSYM(MAXCOMLEN, MAXCOMLEN);
+ASSYM(PSL_CM, PSL_CM);
ASSYM(PSL_DE, PSL_DE);
ASSYM(PSL_DS, PSL_DS);
ASSYM(PSL_IS, PSL_IS);
File Metadata
Details
Attached
Mime Type
text/plain
Expires
Fri, May 2, 3:02 AM (12 h, 10 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
17896869
Default Alt Text
D9433.id24713.diff (69 KB)
Attached To
Mode
D9433: Add 64-bit support for PowerPC Book-E
Attached
Detach File
Event Timeline
Log In to Comment