Some evidence about the PowerMac G5 multiprocessor boot hang ups with the modern VM_MAX_KERNEL_ADDRESS value [found yet more staging info]
Mark Millard
marklmi at yahoo.com
Sat Feb 16 20:49:36 UTC 2019
[I added to moea64_cpu_bootstrap_native to see
more staging infomrtion.]
On 2019-Feb-16, at 12:07, Mark Millard <marklmi at yahoo.com> wrote:
> [I needed to allow more time after the 2 resets before
> having CPU 0 look at the memory. It was reporting
> older values instead of my added writes. The odd
> non-zero value was from before the activity of interest.]
>
> I start with the new result found, then give supporting
> material.
>
> I've now seen hangs with:
>
> *(unsigned long*)0xc0000000000000f0)=0x10
>
> for CPU 3. So the following completed:
>
> void
> cpudep_ap_early_bootstrap(void)
> {
> #ifndef __powerpc64__
> register_t reg;
> #endif
>
> switch (mfpvr() >> 16) {
> case IBM970:
> case IBM970FX:
> case IBM970MP:
> /* Restore HID4 and HID5, which are necessary for the MMU */
>
> #ifdef __powerpc64__
> mtspr(SPR_HID4, bsp_state[2]); powerpc_sync(); isync();
> mtspr(SPR_HID5, bsp_state[3]); powerpc_sync(); isync();
> #else
> __asm __volatile("ld %0, 16(%2); sync; isync; \
> mtspr %1, %0; sync; isync;"
> : "=r"(reg) : "K"(SPR_HID4), "b"(bsp_state));
> __asm __volatile("ld %0, 24(%2); sync; isync; \
> mtspr %1, %0; sync; isync;"
> : "=r"(reg) : "K"(SPR_HID5), "b"(bsp_state));
> #endif
> powerpc_sync();
> break;
> case IBMPOWER8:
> case IBMPOWER8E:
> case IBMPOWER9:
> #ifdef __powerpc64__
> if (mfmsr() & PSL_HV) {
> isync();
> /*
> * Direct interrupts to SRR instead of HSRR and
> * reset LPCR otherwise
> */
> mtspr(SPR_LPID, 0);
> isync();
>
> mtspr(SPR_LPCR, lpcr);
> isync();
> }
> #endif
> break;
> }
>
> __asm __volatile("mtsprg 0, %0" :: "r"(ap_pcpu));
> powerpc_sync();
>
> *(unsigned long*)0xc0000000000000f0 = 0x10; // HACK!!!
> powerpc_sync(); // HACK!!!
> }
>
> but the following (and later) did not complete:
>
> void
> pmap_cpu_bootstrap(int ap)
> {
> /*
> * No KTR here because our console probably doesn't work yet
> */
>
> return (MMU_CPU_BOOTSTRAP(mmu_obj, ap));
>
> *(unsigned long*)0xc0000000000000f0 = 0x20; // HACK!!!
> powerpc_sync(); // HACK!!!
> }
>
>
> . . .
The below additions lead to moea64_cpu_bootstrap_native
lead to:
*(unsigned long*)0xc0000000000000f0)=0x25
which indicates that moea64_cpu_bootstrap_native
got to its end but pmap_cpu_bootstrap (the caller
via MMU_CPU_BOOTSTRAP) did not record its:
*(unsigned long*)0xc0000000000000f0 = 0x20;
from after the call. moea64_cpu_bootstrap_native
(and MMU_CPU_BOOTRAP) seems to have trouble
returning to pmap_cpu_bootstrap.
The below // HACK!!! lines are what I added:
static void
moea64_cpu_bootstrap_native(mmu_t mmup, int ap)
{
int i = 0;
#ifdef __powerpc64__
struct slb *slb = PCPU_GET(aim.slb);
register_t seg0;
#endif
/*
* Initialize segment registers and MMU
*/
mtmsr(mfmsr() & ~PSL_DR & ~PSL_IR);
*(unsigned long*)0xc0000000000000f0 = 0x21; // HACK!!!
powerpc_sync(); // HACK!!!
/*
* Install kernel SLB entries
*/
#ifdef __powerpc64__
__asm __volatile ("slbia");
__asm __volatile ("slbmfee %0,%1; slbie %0;" : "=r"(seg0) :
"r"(0));
*(unsigned long*)0xc0000000000000f0 = 0x22; // HACK!!!
powerpc_sync(); // HACK!!!
for (i = 0; i < n_slbs; i++) {
if (!(slb[i].slbe & SLBE_VALID))
continue;
__asm __volatile ("slbmte %0, %1" ::
"r"(slb[i].slbv), "r"(slb[i].slbe));
}
*(unsigned long*)0xc0000000000000f0 = 0x23; // HACK!!!
powerpc_sync(); // HACK!!!
#else
for (i = 0; i < 16; i++)
mtsrin(i << ADDR_SR_SHFT, kernel_pmap->pm_sr[i]);
#endif
/*
* Install page table
*/
__asm __volatile ("ptesync; mtsdr1 %0; isync"
:: "r"(((uintptr_t)moea64_pteg_table & ~DMAP_BASE_ADDRESS)
| (uintptr_t)(flsl(moea64_pteg_mask >> 11))));
*(unsigned long*)0xc0000000000000f0 = 0x24; // HACK!!!
powerpc_sync(); // HACK!!!
tlbia();
*(unsigned long*)0xc0000000000000f0 = 0x25; // HACK!!!
powerpc_sync(); // HACK!!!
}
===
Mark Millard
marklmi at yahoo.com
( dsl-only.net went
away in early 2018-Mar)
More information about the freebsd-ppc
mailing list