diff --git a/sys/x86/include/apicvar.h b/sys/x86/include/apicvar.h --- a/sys/x86/include/apicvar.h +++ b/sys/x86/include/apicvar.h @@ -128,7 +128,9 @@ #define IPI_PREEMPT 1 #define IPI_HARDCLOCK 2 #define IPI_TRACE 3 /* Collect stack trace. */ -#define IPI_BITMAP_LAST IPI_TRACE +#define IPI_IDLE 4 /* Idle scheduler. */ +#define IPI_UNIDLE 5 /* Unidle scheduler. */ +#define IPI_BITMAP_LAST IPI_UNIDLE #define IPI_IS_BITMAPED(x) ((x) <= IPI_BITMAP_LAST) #define IPI_STOP (APIC_IPI_INTS + 6) /* Stop CPU until restarted. */ diff --git a/sys/x86/x86/mp_x86.c b/sys/x86/x86/mp_x86.c --- a/sys/x86/x86/mp_x86.c +++ b/sys/x86/x86/mp_x86.c @@ -109,6 +109,8 @@ u_long *ipi_invlcache_counts[MAXCPU]; u_long *ipi_rendezvous_counts[MAXCPU]; static u_long *ipi_hardclock_counts[MAXCPU]; +static u_long *ipi_idle_counts[MAXCPU]; +static u_long *ipi_unidle_counts[MAXCPU]; #endif /* Default cpu_ops implementation. */ @@ -1369,6 +1371,18 @@ #endif hardclockintr(); } + if (ipi_bitmap & (1 << IPI_IDLE)) { +#ifdef COUNT_IPIS + (*ipi_idle_counts[cpu])++; +#endif + sched_do_idle(td, true); + } + if (ipi_bitmap & (1 << IPI_UNIDLE)) { +#ifdef COUNT_IPIS + (*ipi_unidle_counts[cpu])++; +#endif + sched_do_idle(td, false); + } td->td_intr_frame = oldframe; td->td_intr_nesting_level--; if (ipi_bitmap & (1 << IPI_HARDCLOCK)) @@ -1771,6 +1785,10 @@ intrcnt_add(buf, &ipi_rendezvous_counts[i]); snprintf(buf, sizeof(buf), "cpu%d:hardclock", i); intrcnt_add(buf, &ipi_hardclock_counts[i]); + snprintf(buf, sizeof(buf), "cpu%d:idle", i); + intrcnt_add(buf, &ipi_idle_counts[i]); + snprintf(buf, sizeof(buf), "cpu%d:unidle", i); + intrcnt_add(buf, &ipi_unidle_counts[i]); } } SYSINIT(mp_ipi_intrcnt, SI_SUB_INTR, SI_ORDER_MIDDLE, mp_ipi_intrcnt, NULL);