svn commit: r215341 - in projects/sbruno_64cpus: lib/libmemstat
sys/amd64/amd64 sys/amd64/include sys/cddl/dev/dtrace/amd64
sys/dev/hwpmc sys/geom/eli sys/i386/i386 sys/i386/include
sys/kern sys/sy...
Sean Bruno
sbruno at FreeBSD.org
Mon Nov 15 17:40:49 UTC 2010
Author: sbruno
Date: Mon Nov 15 17:40:48 2010
New Revision: 215341
URL: http://svn.freebsd.org/changeset/base/215341
Log:
Initial patches to support 64CPUS.
*note that this dies in memory allocation/locking at this time
*note that this does not raise MAXCPU yet
*note that this does raise MAXCPU for libmemestat
Obtained from: peter@ via Yahoo! Inc
Modified:
projects/sbruno_64cpus/lib/libmemstat/memstat.h
projects/sbruno_64cpus/sys/amd64/amd64/cpu_switch.S
projects/sbruno_64cpus/sys/amd64/amd64/intr_machdep.c
projects/sbruno_64cpus/sys/amd64/amd64/mp_machdep.c
projects/sbruno_64cpus/sys/amd64/amd64/pmap.c
projects/sbruno_64cpus/sys/amd64/amd64/vm_machdep.c
projects/sbruno_64cpus/sys/amd64/include/_types.h
projects/sbruno_64cpus/sys/cddl/dev/dtrace/amd64/dtrace_subr.c
projects/sbruno_64cpus/sys/dev/hwpmc/hwpmc_mod.c
projects/sbruno_64cpus/sys/geom/eli/g_eli.c
projects/sbruno_64cpus/sys/i386/i386/mp_machdep.c
projects/sbruno_64cpus/sys/i386/i386/vm_machdep.c
projects/sbruno_64cpus/sys/i386/include/_types.h
projects/sbruno_64cpus/sys/kern/kern_ktr.c
projects/sbruno_64cpus/sys/kern/kern_pmc.c
projects/sbruno_64cpus/sys/kern/sched_4bsd.c
projects/sbruno_64cpus/sys/kern/sched_ule.c
projects/sbruno_64cpus/sys/kern/subr_pcpu.c
projects/sbruno_64cpus/sys/kern/subr_smp.c
projects/sbruno_64cpus/sys/sys/smp.h
projects/sbruno_64cpus/sys/sys/systm.h
projects/sbruno_64cpus/sys/x86/x86/local_apic.c
projects/sbruno_64cpus/sys/x86/x86/mca.c
projects/sbruno_64cpus/sys/x86/x86/mptable.c
Modified: projects/sbruno_64cpus/lib/libmemstat/memstat.h
==============================================================================
--- projects/sbruno_64cpus/lib/libmemstat/memstat.h Mon Nov 15 17:24:07 2010 (r215340)
+++ projects/sbruno_64cpus/lib/libmemstat/memstat.h Mon Nov 15 17:40:48 2010 (r215341)
@@ -33,7 +33,7 @@
* Number of CPU slots in library-internal data structures. This should be
* at least the value of MAXCPU from param.h.
*/
-#define MEMSTAT_MAXCPU 32
+#define MEMSTAT_MAXCPU 64
/*
* Amount of caller data to maintain for each caller data slot. Applications
Modified: projects/sbruno_64cpus/sys/amd64/amd64/cpu_switch.S
==============================================================================
--- projects/sbruno_64cpus/sys/amd64/amd64/cpu_switch.S Mon Nov 15 17:24:07 2010 (r215340)
+++ projects/sbruno_64cpus/sys/amd64/amd64/cpu_switch.S Mon Nov 15 17:40:48 2010 (r215341)
@@ -74,7 +74,7 @@ ENTRY(cpu_throw)
jz 1f
/* release bit from old pm_active */
movq PCPU(CURPMAP),%rdx
- LK btrl %eax,PM_ACTIVE(%rdx) /* clear old */
+ LK btrq %rax,VM_PMAP+PM_ACTIVE(%rdx) /* clear old */
1:
movq TD_PCB(%rsi),%r8 /* newtd->td_proc */
movq PCB_CR3(%r8),%rdx
@@ -138,14 +138,14 @@ swinact:
movl PCPU(CPUID), %eax
/* Release bit from old pmap->pm_active */
movq PCPU(CURPMAP),%rcx
- LK btrl %eax,PM_ACTIVE(%rcx) /* clear old */
+ LK btrq %rax,VM_PMAP+PM_ACTIVE(%rcx) /* clear old */
SETLK %rdx, TD_LOCK(%rdi) /* Release the old thread */
swact:
/* Set bit in new pmap->pm_active */
movq TD_PROC(%rsi),%rdx /* newproc */
movq P_VMSPACE(%rdx), %rdx
addq $VM_PMAP,%rdx
- LK btsl %eax,PM_ACTIVE(%rdx) /* set new */
+ LK btsq %rax,VM_PMAP+PM_ACTIVE(%rdx) /* set new */
movq %rdx,PCPU(CURPMAP)
sw1:
Modified: projects/sbruno_64cpus/sys/amd64/amd64/intr_machdep.c
==============================================================================
--- projects/sbruno_64cpus/sys/amd64/amd64/intr_machdep.c Mon Nov 15 17:24:07 2010 (r215340)
+++ projects/sbruno_64cpus/sys/amd64/amd64/intr_machdep.c Mon Nov 15 17:40:48 2010 (r215341)
@@ -444,7 +444,7 @@ DB_SHOW_COMMAND(irqs, db_show_irqs)
*/
/* The BSP is always a valid target. */
-static cpumask_t intr_cpus = (1 << 0);
+static cpumask_t intr_cpus = cputomask(0);
static int current_cpu;
/*
@@ -466,7 +466,7 @@ intr_next_cpu(void)
current_cpu++;
if (current_cpu > mp_maxid)
current_cpu = 0;
- } while (!(intr_cpus & (1 << current_cpu)));
+ } while (!(intr_cpus & cputomask(current_cpu)));
mtx_unlock_spin(&icu_lock);
return (apic_id);
}
@@ -497,7 +497,7 @@ intr_add_cpu(u_int cpu)
printf("INTR: Adding local APIC %d as a target\n",
cpu_apic_ids[cpu]);
- intr_cpus |= (1 << cpu);
+ intr_cpus |= cputomask(cpu);
}
/*
Modified: projects/sbruno_64cpus/sys/amd64/amd64/mp_machdep.c
==============================================================================
--- projects/sbruno_64cpus/sys/amd64/amd64/mp_machdep.c Mon Nov 15 17:24:07 2010 (r215340)
+++ projects/sbruno_64cpus/sys/amd64/amd64/mp_machdep.c Mon Nov 15 17:40:48 2010 (r215341)
@@ -933,7 +933,7 @@ start_all_aps(void)
panic("AP #%d (PHY# %d) failed!", cpu, apic_id);
}
- all_cpus |= (1 << cpu); /* record AP in CPU map */
+ all_cpus |= cputomask(cpu); /* record AP in CPU map */
}
/* build our map of 'other' CPUs */
@@ -1091,27 +1091,16 @@ smp_tlb_shootdown(u_int vector, vm_offse
static void
smp_targeted_tlb_shootdown(cpumask_t mask, u_int vector, vm_offset_t addr1, vm_offset_t addr2)
{
- int ncpu, othercpus;
+ int cpu, ncpu, othercpus;
othercpus = mp_ncpus - 1;
if (mask == (cpumask_t)-1) {
- ncpu = othercpus;
- if (ncpu < 1)
+ if (othercpus < 1)
return;
} else {
mask &= ~PCPU_GET(cpumask);
if (mask == 0)
return;
- ncpu = bitcount32(mask);
- if (ncpu > othercpus) {
- /* XXX this should be a panic offence */
- printf("SMP: tlb shootdown to %d other cpus (only have %d)\n",
- ncpu, othercpus);
- ncpu = othercpus;
- }
- /* XXX should be a panic, implied by mask == 0 above */
- if (ncpu < 1)
- return;
}
if (!(read_rflags() & PSL_I))
panic("%s: interrupts disabled", __func__);
@@ -1119,10 +1108,18 @@ smp_targeted_tlb_shootdown(cpumask_t mas
smp_tlb_addr1 = addr1;
smp_tlb_addr2 = addr2;
atomic_store_rel_int(&smp_tlb_wait, 0);
- if (mask == (cpumask_t)-1)
+ if (mask == (cpumask_t)-1) {
+ ncpu = othercpus;
ipi_all_but_self(vector);
- else
- ipi_selected(mask, vector);
+ } else {
+ ncpu = 0;
+ while ((cpu = ffsl(mask)) != 0) {
+ cpu--;
+ mask &= ~cputomask(cpu);
+ lapic_ipi_vectored(vector, cpu_apic_ids[cpu]);
+ ncpu++;
+ }
+ }
while (smp_tlb_wait < ncpu)
ia32_pause();
mtx_unlock_spin(&smp_ipi_mtx);
@@ -1285,12 +1282,12 @@ ipi_selected(cpumask_t cpus, u_int ipi)
* Set the mask of receiving CPUs for this purpose.
*/
if (ipi == IPI_STOP_HARD)
- atomic_set_int(&ipi_nmi_pending, cpus);
+ atomic_set_long(&ipi_nmi_pending, cpus);
CTR3(KTR_SMP, "%s: cpus: %x ipi: %x", __func__, cpus, ipi);
- while ((cpu = ffs(cpus)) != 0) {
+ while ((cpu = ffsl(cpus)) != 0) {
cpu--;
- cpus &= ~(1 << cpu);
+ cpus &= ~(cputomask(cpu));
ipi_send_cpu(cpu, ipi);
}
}
@@ -1308,7 +1305,7 @@ ipi_cpu(int cpu, u_int ipi)
* Set the mask of receiving CPUs for this purpose.
*/
if (ipi == IPI_STOP_HARD)
- atomic_set_int(&ipi_nmi_pending, 1 << cpu);
+ atomic_set_long(&ipi_nmi_pending, cputomask(cpu));
CTR3(KTR_SMP, "%s: cpu: %d ipi: %x", __func__, cpu, ipi);
ipi_send_cpu(cpu, ipi);
@@ -1332,7 +1329,7 @@ ipi_all_but_self(u_int ipi)
* Set the mask of receiving CPUs for this purpose.
*/
if (ipi == IPI_STOP_HARD)
- atomic_set_int(&ipi_nmi_pending, PCPU_GET(other_cpus));
+ atomic_set_long(&ipi_nmi_pending, PCPU_GET(other_cpus));
CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
lapic_ipi_vectored(ipi, APIC_IPI_DEST_OTHERS);
@@ -1353,7 +1350,7 @@ ipi_nmi_handler()
if ((ipi_nmi_pending & cpumask) == 0)
return (1);
- atomic_clear_int(&ipi_nmi_pending, cpumask);
+ atomic_clear_long(&ipi_nmi_pending, cpumask);
cpustop_handler();
return (0);
}
@@ -1374,14 +1371,14 @@ cpustop_handler(void)
savectx(&stoppcbs[cpu]);
/* Indicate that we are stopped */
- atomic_set_int(&stopped_cpus, cpumask);
+ atomic_set_long(&stopped_cpus, cpumask);
/* Wait for restart */
while (!(started_cpus & cpumask))
ia32_pause();
- atomic_clear_int(&started_cpus, cpumask);
- atomic_clear_int(&stopped_cpus, cpumask);
+ atomic_clear_long(&started_cpus, cpumask);
+ atomic_clear_long(&stopped_cpus, cpumask);
if (cpu == 0 && cpustop_restartfunc != NULL) {
cpustop_restartfunc();
@@ -1408,7 +1405,7 @@ cpususpend_handler(void)
if (savectx(susppcbs[cpu])) {
wbinvd();
- atomic_set_int(&stopped_cpus, cpumask);
+ atomic_set_long(&stopped_cpus, cpumask);
} else {
PCPU_SET(switchtime, 0);
PCPU_SET(switchticks, ticks);
@@ -1418,8 +1415,8 @@ cpususpend_handler(void)
while (!(started_cpus & cpumask))
ia32_pause();
- atomic_clear_int(&started_cpus, cpumask);
- atomic_clear_int(&stopped_cpus, cpumask);
+ atomic_clear_long(&started_cpus, cpumask);
+ atomic_clear_long(&stopped_cpus, cpumask);
/* Restore CR3 and enable interrupts */
load_cr3(cr3);
@@ -1451,7 +1448,7 @@ sysctl_hlt_cpus(SYSCTL_HANDLER_ARGS)
int error;
mask = hlt_cpus_mask;
- error = sysctl_handle_int(oidp, &mask, 0, req);
+ error = sysctl_handle_long(oidp, &mask, 0, req);
if (error || !req->newptr)
return (error);
@@ -1465,12 +1462,12 @@ sysctl_hlt_cpus(SYSCTL_HANDLER_ARGS)
mask |= hyperthreading_cpus_mask;
if ((mask & all_cpus) == all_cpus)
- mask &= ~(1<<0);
+ mask &= ~cputomask(0);
hlt_cpus_mask = mask;
return (error);
}
-SYSCTL_PROC(_machdep, OID_AUTO, hlt_cpus, CTLTYPE_INT|CTLFLAG_RW,
- 0, 0, sysctl_hlt_cpus, "IU",
+SYSCTL_PROC(_machdep, OID_AUTO, hlt_cpus, CTLTYPE_LONG|CTLFLAG_RW,
+ 0, 0, sysctl_hlt_cpus, "LU",
"Bitmap of CPUs to halt. 101 (binary) will halt CPUs 0 and 2.");
static int
@@ -1492,7 +1489,7 @@ sysctl_hlt_logical_cpus(SYSCTL_HANDLER_A
hlt_cpus_mask |= hyperthreading_cpus_mask;
if ((hlt_cpus_mask & all_cpus) == all_cpus)
- hlt_cpus_mask &= ~(1<<0);
+ hlt_cpus_mask &= ~cputomask(0);
hlt_logical_cpus = disable;
return (error);
@@ -1530,7 +1527,7 @@ sysctl_hyperthreading_allowed(SYSCTL_HAN
hlt_logical_cpus = 0;
if ((hlt_cpus_mask & all_cpus) == all_cpus)
- hlt_cpus_mask &= ~(1<<0);
+ hlt_cpus_mask &= ~cputomask(0);
hyperthreading_allowed = allowed;
return (error);
Modified: projects/sbruno_64cpus/sys/amd64/amd64/pmap.c
==============================================================================
--- projects/sbruno_64cpus/sys/amd64/amd64/pmap.c Mon Nov 15 17:24:07 2010 (r215340)
+++ projects/sbruno_64cpus/sys/amd64/amd64/pmap.c Mon Nov 15 17:40:48 2010 (r215341)
@@ -573,7 +573,7 @@ pmap_bootstrap(vm_paddr_t *firstaddr)
PMAP_LOCK_INIT(kernel_pmap);
kernel_pmap->pm_pml4 = (pdp_entry_t *)PHYS_TO_DMAP(KPML4phys);
kernel_pmap->pm_root = NULL;
- kernel_pmap->pm_active = -1; /* don't allow deactivation */
+ kernel_pmap->pm_active = (cpumask_t)-1; /* don't allow deactivation */
TAILQ_INIT(&kernel_pmap->pm_pvchunk);
/*
@@ -5061,8 +5061,8 @@ pmap_activate(struct thread *td)
pmap = vmspace_pmap(td->td_proc->p_vmspace);
oldpmap = PCPU_GET(curpmap);
#ifdef SMP
- atomic_clear_int(&oldpmap->pm_active, PCPU_GET(cpumask));
- atomic_set_int(&pmap->pm_active, PCPU_GET(cpumask));
+ atomic_clear_long(&oldpmap->pm_active, PCPU_GET(cpumask));
+ atomic_set_long(&pmap->pm_active, PCPU_GET(cpumask));
#else
oldpmap->pm_active &= ~PCPU_GET(cpumask);
pmap->pm_active |= PCPU_GET(cpumask);
Modified: projects/sbruno_64cpus/sys/amd64/amd64/vm_machdep.c
==============================================================================
--- projects/sbruno_64cpus/sys/amd64/amd64/vm_machdep.c Mon Nov 15 17:24:07 2010 (r215340)
+++ projects/sbruno_64cpus/sys/amd64/amd64/vm_machdep.c Mon Nov 15 17:40:48 2010 (r215341)
@@ -544,7 +544,7 @@ cpu_reset()
printf("cpu_reset: Restarting BSP\n");
/* Restart CPU #0. */
- atomic_store_rel_int(&started_cpus, 1 << 0);
+ atomic_store_rel_long(&started_cpus, cputomask(0));
cnt = 0;
while (cpu_reset_proxy_active == 0 && cnt < 10000000)
Modified: projects/sbruno_64cpus/sys/amd64/include/_types.h
==============================================================================
--- projects/sbruno_64cpus/sys/amd64/include/_types.h Mon Nov 15 17:24:07 2010 (r215340)
+++ projects/sbruno_64cpus/sys/amd64/include/_types.h Mon Nov 15 17:40:48 2010 (r215341)
@@ -61,7 +61,7 @@ typedef unsigned long __uint64_t;
* Standard type definitions.
*/
typedef __int32_t __clock_t; /* clock()... */
-typedef unsigned int __cpumask_t;
+typedef unsigned long __cpumask_t;
typedef __int64_t __critical_t;
typedef double __double_t;
typedef float __float_t;
Modified: projects/sbruno_64cpus/sys/cddl/dev/dtrace/amd64/dtrace_subr.c
==============================================================================
--- projects/sbruno_64cpus/sys/cddl/dev/dtrace/amd64/dtrace_subr.c Mon Nov 15 17:24:07 2010 (r215340)
+++ projects/sbruno_64cpus/sys/cddl/dev/dtrace/amd64/dtrace_subr.c Mon Nov 15 17:40:48 2010 (r215341)
@@ -120,14 +120,14 @@ dtrace_xcall(processorid_t cpu, dtrace_x
if (cpu == DTRACE_CPUALL)
cpus = all_cpus;
else
- cpus = (cpumask_t) (1 << cpu);
+ cpus = cputomask(cpu);
/* If the current CPU is in the set, call the function directly: */
- if ((cpus & (1 << curcpu)) != 0) {
+ if ((cpus & cputomask(curcpu)) != 0) {
(*func)(arg);
/* Mask the current CPU from the set */
- cpus &= ~(1 << curcpu);
+ cpus &= ~cputomask(curcpu);
}
/* If there are any CPUs in the set, cross-call to those CPUs */
Modified: projects/sbruno_64cpus/sys/dev/hwpmc/hwpmc_mod.c
==============================================================================
--- projects/sbruno_64cpus/sys/dev/hwpmc/hwpmc_mod.c Mon Nov 15 17:24:07 2010 (r215340)
+++ projects/sbruno_64cpus/sys/dev/hwpmc/hwpmc_mod.c Mon Nov 15 17:40:48 2010 (r215341)
@@ -1991,7 +1991,7 @@ pmc_hook_handler(struct thread *td, int
* had already processed the interrupt). We don't
* lose the interrupt sample.
*/
- atomic_clear_int(&pmc_cpumask, (1 << PCPU_GET(cpuid)));
+ atomic_clear_long(&pmc_cpumask, PCPU_GET(cpuid));
pmc_process_samples(PCPU_GET(cpuid));
break;
@@ -4083,7 +4083,7 @@ pmc_process_interrupt(int cpu, struct pm
done:
/* mark CPU as needing processing */
- atomic_set_rel_int(&pmc_cpumask, (1 << cpu));
+ atomic_set_rel_long(&pmc_cpumask, cputomask(cpu));
return (error);
}
@@ -4193,7 +4193,7 @@ pmc_process_samples(int cpu)
break;
if (ps->ps_nsamples == PMC_SAMPLE_INUSE) {
/* Need a rescan at a later time. */
- atomic_set_rel_int(&pmc_cpumask, (1 << cpu));
+ atomic_set_rel_long(&pmc_cpumask, cputomask(cpu));
break;
}
@@ -4782,7 +4782,7 @@ pmc_cleanup(void)
PMCDBG(MOD,INI,0, "%s", "cleanup");
/* switch off sampling */
- atomic_store_rel_int(&pmc_cpumask, 0);
+ atomic_store_rel_long(&pmc_cpumask, 0);
pmc_intr = NULL;
sx_xlock(&pmc_sx);
Modified: projects/sbruno_64cpus/sys/geom/eli/g_eli.c
==============================================================================
--- projects/sbruno_64cpus/sys/geom/eli/g_eli.c Mon Nov 15 17:24:07 2010 (r215340)
+++ projects/sbruno_64cpus/sys/geom/eli/g_eli.c Mon Nov 15 17:40:48 2010 (r215341)
@@ -688,7 +688,7 @@ static int
g_eli_cpu_is_disabled(int cpu)
{
#ifdef SMP
- return ((hlt_cpus_mask & (1 << cpu)) != 0);
+ return ((hlt_cpus_mask & cputomask(cpu)) != 0);
#else
return (0);
#endif
Modified: projects/sbruno_64cpus/sys/i386/i386/mp_machdep.c
==============================================================================
--- projects/sbruno_64cpus/sys/i386/i386/mp_machdep.c Mon Nov 15 17:24:07 2010 (r215340)
+++ projects/sbruno_64cpus/sys/i386/i386/mp_machdep.c Mon Nov 15 17:40:48 2010 (r215341)
@@ -1388,7 +1388,7 @@ ipi_selected(cpumask_t cpus, u_int ipi)
* Set the mask of receiving CPUs for this purpose.
*/
if (ipi == IPI_STOP_HARD)
- atomic_set_int(&ipi_nmi_pending, cpus);
+ atomic_set_long(&ipi_nmi_pending, cpus);
CTR3(KTR_SMP, "%s: cpus: %x ipi: %x", __func__, cpus, ipi);
while ((cpu = ffs(cpus)) != 0) {
@@ -1411,7 +1411,7 @@ ipi_cpu(int cpu, u_int ipi)
* Set the mask of receiving CPUs for this purpose.
*/
if (ipi == IPI_STOP_HARD)
- atomic_set_int(&ipi_nmi_pending, 1 << cpu);
+ atomic_set_long(&ipi_nmi_pending, cputomask(cpu));
CTR3(KTR_SMP, "%s: cpu: %d ipi: %x", __func__, cpu, ipi);
ipi_send_cpu(cpu, ipi);
@@ -1435,7 +1435,7 @@ ipi_all_but_self(u_int ipi)
* Set the mask of receiving CPUs for this purpose.
*/
if (ipi == IPI_STOP_HARD)
- atomic_set_int(&ipi_nmi_pending, PCPU_GET(other_cpus));
+ atomic_set_long(&ipi_nmi_pending, PCPU_GET(other_cpus));
CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
lapic_ipi_vectored(ipi, APIC_IPI_DEST_OTHERS);
}
@@ -1476,14 +1476,14 @@ cpustop_handler(void)
savectx(&stoppcbs[cpu]);
/* Indicate that we are stopped */
- atomic_set_int(&stopped_cpus, cpumask);
+ atomic_set_long(&stopped_cpus, cpumask);
/* Wait for restart */
while (!(started_cpus & cpumask))
ia32_pause();
- atomic_clear_int(&started_cpus, cpumask);
- atomic_clear_int(&stopped_cpus, cpumask);
+ atomic_clear_long(&started_cpus, cpumask);
+ atomic_clear_long(&stopped_cpus, cpumask);
if (cpu == 0 && cpustop_restartfunc != NULL) {
cpustop_restartfunc();
Modified: projects/sbruno_64cpus/sys/i386/i386/vm_machdep.c
==============================================================================
--- projects/sbruno_64cpus/sys/i386/i386/vm_machdep.c Mon Nov 15 17:24:07 2010 (r215340)
+++ projects/sbruno_64cpus/sys/i386/i386/vm_machdep.c Mon Nov 15 17:40:48 2010 (r215341)
@@ -614,7 +614,7 @@ cpu_reset()
/* Restart CPU #0. */
/* XXX: restart_cpus(1 << 0); */
- atomic_store_rel_int(&started_cpus, (1 << 0));
+ atomic_store_rel_long(&started_cpus, cputomask(0));
cnt = 0;
while (cpu_reset_proxy_active == 0 && cnt < 10000000)
Modified: projects/sbruno_64cpus/sys/i386/include/_types.h
==============================================================================
--- projects/sbruno_64cpus/sys/i386/include/_types.h Mon Nov 15 17:24:07 2010 (r215340)
+++ projects/sbruno_64cpus/sys/i386/include/_types.h Mon Nov 15 17:40:48 2010 (r215341)
@@ -74,7 +74,7 @@ typedef unsigned long long __uint64_t;
* Standard type definitions.
*/
typedef unsigned long __clock_t; /* clock()... */
-typedef unsigned int __cpumask_t;
+typedef unsigned long __cpumask_t;
typedef __int32_t __critical_t;
typedef long double __double_t;
typedef long double __float_t;
Modified: projects/sbruno_64cpus/sys/kern/kern_ktr.c
==============================================================================
--- projects/sbruno_64cpus/sys/kern/kern_ktr.c Mon Nov 15 17:24:07 2010 (r215340)
+++ projects/sbruno_64cpus/sys/kern/kern_ktr.c Mon Nov 15 17:40:48 2010 (r215341)
@@ -82,7 +82,7 @@ __FBSDID("$FreeBSD$");
SYSCTL_NODE(_debug, OID_AUTO, ktr, CTLFLAG_RD, 0, "KTR options");
-int ktr_cpumask = KTR_CPUMASK;
+cpumask_t ktr_cpumask = KTR_CPUMASK;
TUNABLE_INT("debug.ktr.cpumask", &ktr_cpumask);
SYSCTL_INT(_debug_ktr, OID_AUTO, cpumask, CTLFLAG_RW,
&ktr_cpumask, 0, "Bitmask of CPUs on which KTR logging is enabled");
@@ -211,7 +211,7 @@ ktr_tracepoint(u_int mask, const char *f
if ((ktr_mask & mask) == 0)
return;
cpu = KTR_CPU;
- if (((1 << cpu) & ktr_cpumask) == 0)
+ if ((cpumask(cpu) & ktr_cpumask) == 0)
return;
#if defined(KTR_VERBOSE) || defined(KTR_ALQ)
td = curthread;
Modified: projects/sbruno_64cpus/sys/kern/kern_pmc.c
==============================================================================
--- projects/sbruno_64cpus/sys/kern/kern_pmc.c Mon Nov 15 17:24:07 2010 (r215340)
+++ projects/sbruno_64cpus/sys/kern/kern_pmc.c Mon Nov 15 17:40:48 2010 (r215341)
@@ -34,6 +34,7 @@ __FBSDID("$FreeBSD$");
#include "opt_hwpmc_hooks.h"
#include <sys/types.h>
+#include <sys/systm.h>
#include <sys/pmc.h>
#include <sys/pmckern.h>
#include <sys/smp.h>
@@ -110,7 +111,7 @@ pmc_cpu_is_active(int cpu)
{
#ifdef SMP
return (pmc_cpu_is_present(cpu) &&
- (hlt_cpus_mask & (1 << cpu)) == 0);
+ (hlt_cpus_mask & cputomask(cpu)) == 0);
#else
return (1);
#endif
@@ -137,7 +138,7 @@ int
pmc_cpu_is_primary(int cpu)
{
#ifdef SMP
- return ((logical_cpus_mask & (1 << cpu)) == 0);
+ return ((logical_cpus_mask & cputomask(cpu)) == 0);
#else
return (1);
#endif
Modified: projects/sbruno_64cpus/sys/kern/sched_4bsd.c
==============================================================================
--- projects/sbruno_64cpus/sys/kern/sched_4bsd.c Mon Nov 15 17:24:07 2010 (r215340)
+++ projects/sbruno_64cpus/sys/kern/sched_4bsd.c Mon Nov 15 17:40:48 2010 (r215341)
@@ -1097,7 +1097,7 @@ forward_wakeup(int cpunum)
me = PCPU_GET(cpumask);
/* Don't bother if we should be doing it ourself. */
- if ((me & idle_cpus_mask) && (cpunum == NOCPU || me == (1 << cpunum)))
+ if ((me & idle_cpus_mask) && (cpunum == NOCPU || me == cputomask(cpunum)))
return (0);
dontuse = me | stopped_cpus | hlt_cpus_mask;
@@ -1119,7 +1119,7 @@ forward_wakeup(int cpunum)
/* If they are both on, compare and use loop if different. */
if (forward_wakeup_use_loop) {
if (map != map3) {
- printf("map (%02X) != map3 (%02X)\n", map,
+ printf("map (%02lX) != map3 (%02lX)\n", map,
map3);
map = map3;
}
@@ -1131,7 +1131,7 @@ forward_wakeup(int cpunum)
/* If we only allow a specific CPU, then mask off all the others. */
if (cpunum != NOCPU) {
KASSERT((cpunum <= mp_maxcpus),("forward_wakeup: bad cpunum."));
- map &= (1 << cpunum);
+ map &= cputomask(cpunum);
} else {
/* Try choose an idle die. */
if (forward_wakeup_use_htt) {
Modified: projects/sbruno_64cpus/sys/kern/sched_ule.c
==============================================================================
--- projects/sbruno_64cpus/sys/kern/sched_ule.c Mon Nov 15 17:24:07 2010 (r215340)
+++ projects/sbruno_64cpus/sys/kern/sched_ule.c Mon Nov 15 17:40:48 2010 (r215341)
@@ -549,7 +549,7 @@ struct cpu_search {
#define CPUSET_FOREACH(cpu, mask) \
for ((cpu) = 0; (cpu) <= mp_maxid; (cpu)++) \
- if ((mask) & 1 << (cpu))
+ if ((mask) & cputomask(cpu))
static __inline int cpu_search(struct cpu_group *cg, struct cpu_search *low,
struct cpu_search *high, const int match);
@@ -2645,11 +2645,11 @@ sysctl_kern_sched_topology_spec_internal
sbuf_printf(sb, "%*s<group level=\"%d\" cache-level=\"%d\">\n", indent,
"", 1 + indent / 2, cg->cg_level);
- sbuf_printf(sb, "%*s <cpu count=\"%d\" mask=\"0x%x\">", indent, "",
+ sbuf_printf(sb, "%*s <cpu count=\"%d\" mask=\"0x%lx\">", indent, "",
cg->cg_count, cg->cg_mask);
first = TRUE;
for (i = 0; i < MAXCPU; i++) {
- if ((cg->cg_mask & (1 << i)) != 0) {
+ if ((cg->cg_mask & cputomask(i)) != 0) {
if (!first)
sbuf_printf(sb, ", ");
else
Modified: projects/sbruno_64cpus/sys/kern/subr_pcpu.c
==============================================================================
--- projects/sbruno_64cpus/sys/kern/subr_pcpu.c Mon Nov 15 17:24:07 2010 (r215340)
+++ projects/sbruno_64cpus/sys/kern/subr_pcpu.c Mon Nov 15 17:40:48 2010 (r215341)
@@ -88,7 +88,7 @@ pcpu_init(struct pcpu *pcpu, int cpuid,
KASSERT(cpuid >= 0 && cpuid < MAXCPU,
("pcpu_init: invalid cpuid %d", cpuid));
pcpu->pc_cpuid = cpuid;
- pcpu->pc_cpumask = 1 << cpuid;
+ pcpu->pc_cpumask = cputomask(cpuid);
cpuid_to_pcpu[cpuid] = pcpu;
SLIST_INSERT_HEAD(&cpuhead, pcpu, pc_allcpu);
cpu_pcpu_init(pcpu, cpuid, size);
Modified: projects/sbruno_64cpus/sys/kern/subr_smp.c
==============================================================================
--- projects/sbruno_64cpus/sys/kern/subr_smp.c Mon Nov 15 17:24:07 2010 (r215340)
+++ projects/sbruno_64cpus/sys/kern/subr_smp.c Mon Nov 15 17:40:48 2010 (r215341)
@@ -290,7 +290,7 @@ restart_cpus(cpumask_t map)
CTR1(KTR_SMP, "restart_cpus(%x)", map);
/* signal other cpus to restart */
- atomic_store_rel_int(&started_cpus, map);
+ atomic_store_rel_long(&started_cpus, map);
/* wait for each to clear its bit */
while ((stopped_cpus & map) != 0)
@@ -368,11 +368,11 @@ smp_rendezvous_cpus(cpumask_t map,
}
CPU_FOREACH(i) {
- if (((1 << i) & map) != 0)
+ if ((cputomask(i) & map) != 0)
ncpus++;
}
if (ncpus == 0)
- panic("ncpus is 0 with map=0x%x", map);
+ panic("ncpus is 0 with map=0x%lx", map);
/* obtain rendezvous lock */
mtx_lock_spin(&smp_ipi_mtx);
@@ -388,10 +388,10 @@ smp_rendezvous_cpus(cpumask_t map,
atomic_store_rel_int(&smp_rv_waiters[0], 0);
/* signal other processors, which will enter the IPI with interrupts off */
- ipi_selected(map & ~(1 << curcpu), IPI_RENDEZVOUS);
+ ipi_selected(map & ~cputomask(curcpu), IPI_RENDEZVOUS);
/* Check if the current CPU is in the map */
- if ((map & (1 << curcpu)) != 0)
+ if ((map & cputomask(curcpu)) != 0)
smp_rendezvous_action();
if (teardown_func == smp_no_rendevous_barrier)
@@ -463,7 +463,7 @@ smp_topo(void)
panic("Built bad topology at %p. CPU count %d != %d",
top, top->cg_count, mp_ncpus);
if (top->cg_mask != all_cpus)
- panic("Built bad topology at %p. CPU mask 0x%X != 0x%X",
+ panic("Built bad topology at %p. CPU mask 0x%lX != 0x%lX",
top, top->cg_mask, all_cpus);
return (top);
}
@@ -476,7 +476,7 @@ smp_topo_none(void)
top = &group[0];
top->cg_parent = NULL;
top->cg_child = NULL;
- top->cg_mask = ~0U >> (32 - mp_ncpus);
+ top->cg_mask = ~0lU >> (MAXCPU - mp_ncpus);
top->cg_count = mp_ncpus;
top->cg_children = 0;
top->cg_level = CG_SHARE_NONE;
@@ -493,7 +493,7 @@ smp_topo_addleaf(struct cpu_group *paren
int i;
for (mask = 0, i = 0; i < count; i++, start++)
- mask |= (1 << start);
+ mask |= cputomask(start);
child->cg_parent = parent;
child->cg_child = NULL;
child->cg_children = 0;
@@ -504,7 +504,7 @@ smp_topo_addleaf(struct cpu_group *paren
parent->cg_children++;
for (; parent != NULL; parent = parent->cg_parent) {
if ((parent->cg_mask & child->cg_mask) != 0)
- panic("Duplicate children in %p. mask 0x%X child 0x%X",
+ panic("Duplicate children in %p. mask 0x%lX child 0x%lX",
parent, parent->cg_mask, child->cg_mask);
parent->cg_mask |= child->cg_mask;
parent->cg_count += child->cg_count;
@@ -570,7 +570,7 @@ smp_topo_find(struct cpu_group *top, int
int children;
int i;
- mask = (1 << cpu);
+ mask = cputomask(cpu);
cg = top;
for (;;) {
if ((cg->cg_mask & mask) == 0)
Modified: projects/sbruno_64cpus/sys/sys/smp.h
==============================================================================
--- projects/sbruno_64cpus/sys/sys/smp.h Mon Nov 15 17:24:07 2010 (r215340)
+++ projects/sbruno_64cpus/sys/sys/smp.h Mon Nov 15 17:40:48 2010 (r215341)
@@ -90,7 +90,8 @@ extern cpumask_t all_cpus;
* time, thus permitting us to configure sparse maps of cpuid-dependent
* (per-CPU) structures.
*/
-#define CPU_ABSENT(x_cpu) ((all_cpus & (1 << (x_cpu))) == 0)
+#include <sys/systm.h>
+#define CPU_ABSENT(x_cpu) ((all_cpus & (cputomask(x_cpu))) == 0)
/*
* Macros to iterate over non-absent CPUs. CPU_FOREACH() takes an
@@ -102,7 +103,7 @@ extern cpumask_t all_cpus;
*/
#define CPU_FOREACH(i) \
for ((i) = 0; (i) <= mp_maxid; (i)++) \
- if (!CPU_ABSENT((i)))
+ if (!CPU_ABSENT(i))
static __inline int
cpu_first(void)
Modified: projects/sbruno_64cpus/sys/sys/systm.h
==============================================================================
--- projects/sbruno_64cpus/sys/sys/systm.h Mon Nov 15 17:24:07 2010 (r215340)
+++ projects/sbruno_64cpus/sys/sys/systm.h Mon Nov 15 17:40:48 2010 (r215341)
@@ -426,4 +426,6 @@ bitcount32(uint32_t x)
return (x);
}
+#define cputomask(_cpu) ((__cpumask_t)1 << _cpu)
+
#endif /* !_SYS_SYSTM_H_ */
Modified: projects/sbruno_64cpus/sys/x86/x86/local_apic.c
==============================================================================
--- projects/sbruno_64cpus/sys/x86/x86/local_apic.c Mon Nov 15 17:24:07 2010 (r215340)
+++ projects/sbruno_64cpus/sys/x86/x86/local_apic.c Mon Nov 15 17:40:48 2010 (r215341)
@@ -809,7 +809,7 @@ lapic_handle_timer(struct trapframe *fra
* and unlike other schedulers it actually schedules threads to
* those CPUs.
*/
- if ((hlt_cpus_mask & (1 << PCPU_GET(cpuid))) != 0)
+ if ((hlt_cpus_mask & PCPU_GET(cpumask)) != 0)
return;
#endif
Modified: projects/sbruno_64cpus/sys/x86/x86/mca.c
==============================================================================
--- projects/sbruno_64cpus/sys/x86/x86/mca.c Mon Nov 15 17:24:07 2010 (r215340)
+++ projects/sbruno_64cpus/sys/x86/x86/mca.c Mon Nov 15 17:40:48 2010 (r215341)
@@ -539,7 +539,7 @@ mca_scan(enum scan_mode mode)
* For a CMCI, only check banks this CPU is
* responsible for.
*/
- if (mode == CMCI && !(PCPU_GET(cmci_mask) & 1 << i))
+ if (mode == CMCI && !(PCPU_GET(cmci_mask) & cputomask(i)))
continue;
#endif
@@ -558,7 +558,7 @@ mca_scan(enum scan_mode mode)
* If this is a bank this CPU monitors via CMCI,
* update the threshold.
*/
- if (PCPU_GET(cmci_mask) & 1 << i)
+ if (PCPU_GET(cmci_mask) & cputomask(i))
cmci_update(mode, i, valid, &rec);
#endif
}
@@ -734,7 +734,7 @@ cmci_monitor(int i)
wrmsr(MSR_MC_CTL2(i), ctl);
/* Mark this bank as monitored. */
- PCPU_SET(cmci_mask, PCPU_GET(cmci_mask) | 1 << i);
+ PCPU_SET(cmci_mask, PCPU_GET(cmci_mask) | cputomask(i));
}
/*
@@ -750,7 +750,7 @@ cmci_resume(int i)
KASSERT(i < cmc_banks, ("CPU %d has more MC banks", PCPU_GET(cpuid)));
/* Ignore banks not monitored by this CPU. */
- if (!(PCPU_GET(cmci_mask) & 1 << i))
+ if (!(PCPU_GET(cmci_mask) & cputomask(i)))
return;
cc = &cmc_state[PCPU_GET(cpuid)][i];
Modified: projects/sbruno_64cpus/sys/x86/x86/mptable.c
==============================================================================
--- projects/sbruno_64cpus/sys/x86/x86/mptable.c Mon Nov 15 17:24:07 2010 (r215340)
+++ projects/sbruno_64cpus/sys/x86/x86/mptable.c Mon Nov 15 17:40:48 2010 (r215341)
@@ -162,7 +162,7 @@ static int lookup_bus_type(char *name);
static void mptable_count_items(void);
static void mptable_count_items_handler(u_char *entry, void *arg);
#ifdef MPTABLE_FORCE_HTT
-static void mptable_hyperthread_fixup(u_int id_mask);
+static void mptable_hyperthread_fixup(cpumask_t id_mask);
#endif
static void mptable_parse_apics_and_busses(void);
static void mptable_parse_apics_and_busses_handler(u_char *entry,
@@ -303,7 +303,7 @@ found:
static int
mptable_probe_cpus(void)
{
- u_int cpu_mask;
+ cpumask_t cpu_mask;
/* Is this a pre-defined config? */
if (mpfps->config_type != 0) {
@@ -423,7 +423,7 @@ static void
mptable_probe_cpus_handler(u_char *entry, void *arg)
{
proc_entry_ptr proc;
- u_int *cpu_mask;
+ cpumask_t *cpu_mask;
switch (*entry) {
case MPCT_ENTRY_PROCESSOR:
@@ -432,8 +432,8 @@ mptable_probe_cpus_handler(u_char *entry
lapic_create(proc->apic_id, proc->cpu_flags &
PROCENTRY_FLAG_BP);
if (proc->apic_id < MAX_LAPIC_ID) {
- cpu_mask = (u_int *)arg;
- *cpu_mask |= (1ul << proc->apic_id);
+ cpu_mask = (cpumask_t *)arg;
+ *cpu_mask |= cputomask(proc->apic_id);
}
}
break;
@@ -883,7 +883,7 @@ mptable_parse_ints(void)
* with the number of logical CPU's in the processor.
*/
static void
-mptable_hyperthread_fixup(u_int id_mask)
+mptable_hyperthread_fixup(cpumask_t id_mask)
{
u_int i, id, logical_cpus;
@@ -901,13 +901,13 @@ mptable_hyperthread_fixup(u_int id_mask)
* already in the table, then kill the fixup.
*/
for (id = 0; id <= MAX_LAPIC_ID; id++) {
- if ((id_mask & 1 << id) == 0)
+ if ((id_mask & cputomask(id)) == 0)
continue;
/* First, make sure we are on a logical_cpus boundary. */
if (id % logical_cpus != 0)
return;
for (i = id + 1; i < id + logical_cpus; i++)
- if ((id_mask & 1 << i) != 0)
+ if ((id_mask & cputomask(i)) != 0)
return;
}
@@ -915,7 +915,7 @@ mptable_hyperthread_fixup(u_int id_mask)
* Ok, the ID's checked out, so perform the fixup by
* adding the logical CPUs.
*/
- while ((id = ffs(id_mask)) != 0) {
+ while ((id = ffsl(id_mask)) != 0) {
id--;
for (i = id + 1; i < id + logical_cpus; i++) {
if (bootverbose)
@@ -924,7 +924,7 @@ mptable_hyperthread_fixup(u_int id_mask)
i, id);
lapic_create(i, 0);
}
- id_mask &= ~(1 << id);
+ id_mask &= ~cputomask(id);
}
}
#endif /* MPTABLE_FORCE_HTT */
More information about the svn-src-projects
mailing list