#include <mach/mach_types.h>
#include <kern/cpu_data.h>
#include <kern/kalloc.h>
#include <sys/errno.h>
#include <sys/vm.h>
#include <sys/ktrace.h>
#include <machine/machine_routines.h>
#if defined(__x86_64__)
#include <i386/mp.h>
#endif
#include <kperf/kperf.h>
#include <kperf/buffer.h>
#include <kperf/context.h>
#include <kperf/action.h>
#include <kperf/kperf_timer.h>
#include <kperf/kperf_arch.h>
#include <kperf/pet.h>
#include <kperf/sample.h>
struct kperf_timer *kperf_timerv = NULL;
unsigned int kperf_timerc = 0;
static unsigned int pet_timer_id = 999;
#define TIMER_MAX (16)
#if defined(__x86_64__)
#define MIN_PERIOD_NS (20 * NSEC_PER_USEC)
#define MIN_PERIOD_BG_NS (10 * NSEC_PER_MSEC)
#define MIN_PERIOD_PET_NS (2 * NSEC_PER_MSEC)
#define MIN_PERIOD_PET_BG_NS (10 * NSEC_PER_MSEC)
#else
#error "unsupported architecture"
#endif
static uint64_t min_period_abstime;
static uint64_t min_period_bg_abstime;
static uint64_t min_period_pet_abstime;
static uint64_t min_period_pet_bg_abstime;
static uint64_t
kperf_timer_min_period_abstime(void)
{
if (ktrace_background_active()) {
return min_period_bg_abstime;
} else {
return min_period_abstime;
}
}
static uint64_t
kperf_timer_min_pet_period_abstime(void)
{
if (ktrace_background_active()) {
return min_period_pet_bg_abstime;
} else {
return min_period_pet_abstime;
}
}
static void
kperf_timer_schedule(struct kperf_timer *timer, uint64_t now)
{
BUF_INFO(PERF_TM_SCHED, timer->period);
if (timer->period == 0) {
return;
}
uint64_t deadline = now + timer->period;
timer_call_enter(&timer->tcall, deadline, TIMER_CALL_SYS_CRITICAL);
}
void
kperf_ipi_handler(void *param)
{
struct kperf_context ctx;
struct kperf_timer *timer = param;
assert(timer != NULL);
BUF_DATA(PERF_TM_HNDLR | DBG_FUNC_START, 0);
int ncpu = cpu_number();
struct kperf_sample *intbuf = kperf_intr_sample_buffer();
ctx.cur_thread = current_thread();
ctx.cur_pid = task_pid(get_threadtask(ctx.cur_thread));
ctx.trigger_type = TRIGGER_TYPE_TIMER;
ctx.trigger_id = (unsigned int)(timer - kperf_timerv);
if (ctx.trigger_id == pet_timer_id && ncpu < machine_info.logical_cpu_max) {
kperf_thread_on_cpus[ncpu] = ctx.cur_thread;
}
unsigned int status = kperf_sampling_status();
if (status == KPERF_SAMPLING_OFF) {
BUF_INFO(PERF_TM_HNDLR | DBG_FUNC_END, SAMPLE_OFF);
return;
} else if (status == KPERF_SAMPLING_SHUTDOWN) {
BUF_INFO(PERF_TM_HNDLR | DBG_FUNC_END, SAMPLE_SHUTDOWN);
return;
}
int r = kperf_sample(intbuf, &ctx, timer->actionid, SAMPLE_FLAG_PEND_USER);
BUF_INFO(PERF_TM_HNDLR | DBG_FUNC_END, r);
#if defined(__x86_64__)
(void)atomic_bit_clear(&(timer->pending_cpus), ncpu, __ATOMIC_RELAXED);
#endif
}
static void
kperf_timer_handler(void *param0, __unused void *param1)
{
struct kperf_timer *timer = param0;
unsigned int ntimer = (unsigned int)(timer - kperf_timerv);
unsigned int ncpus = machine_info.logical_cpu_max;
timer->active = 1;
if (kperf_sampling_status() == KPERF_SAMPLING_SHUTDOWN) {
goto deactivate;
}
BUF_DATA(PERF_TM_FIRE, ntimer, ntimer == pet_timer_id, timer->period,
timer->actionid);
if (ntimer == pet_timer_id) {
kperf_pet_fire_before();
bzero(kperf_thread_on_cpus, ncpus * sizeof(*kperf_thread_on_cpus));
}
kperf_mp_broadcast_running(timer);
if (ntimer == pet_timer_id) {
kperf_pet_fire_after();
} else {
kperf_timer_schedule(timer, mach_absolute_time());
}
deactivate:
timer->active = 0;
}
void
kperf_timer_pet_rearm(uint64_t elapsed_ticks)
{
struct kperf_timer *timer = NULL;
uint64_t period = 0;
uint64_t deadline;
if (pet_timer_id >= kperf_timerc) {
return;
}
unsigned int status = kperf_sampling_status();
if (status == KPERF_SAMPLING_OFF) {
BUF_INFO(PERF_PET_END, SAMPLE_OFF);
return;
} else if (status == KPERF_SAMPLING_SHUTDOWN) {
BUF_INFO(PERF_PET_END, SAMPLE_SHUTDOWN);
return;
}
timer = &(kperf_timerv[pet_timer_id]);
if (!timer->period) {
return;
}
if (timer->period > elapsed_ticks) {
period = timer->period - elapsed_ticks;
}
if (period < min_period_pet_abstime) {
period = min_period_pet_abstime;
}
deadline = mach_absolute_time() + period;
BUF_INFO(PERF_PET_SCHED, timer->period, period, elapsed_ticks, deadline);
timer_call_enter(&(timer->tcall), deadline, TIMER_CALL_SYS_CRITICAL);
return;
}
void
kperf_timer_go(void)
{
if (pet_timer_id < kperf_timerc) {
kperf_pet_config(kperf_timerv[pet_timer_id].actionid);
}
uint64_t now = mach_absolute_time();
for (unsigned int i = 0; i < kperf_timerc; i++) {
if (kperf_timerv[i].period == 0) {
continue;
}
kperf_timer_schedule(&(kperf_timerv[i]), now);
}
}
void
kperf_timer_stop(void)
{
for (unsigned int i = 0; i < kperf_timerc; i++) {
if (kperf_timerv[i].period == 0) {
continue;
}
while (kperf_timerv[i].active);
timer_call_cancel(&(kperf_timerv[i].tcall));
}
kperf_pet_config(0);
}
unsigned int
kperf_timer_get_petid(void)
{
return pet_timer_id;
}
int
kperf_timer_set_petid(unsigned int timerid)
{
if (timerid < kperf_timerc) {
uint64_t min_period;
min_period = kperf_timer_min_pet_period_abstime();
if (kperf_timerv[timerid].period < min_period) {
kperf_timerv[timerid].period = min_period;
}
kperf_pet_config(kperf_timerv[timerid].actionid);
} else {
kperf_pet_config(0);
}
pet_timer_id = timerid;
return 0;
}
int
kperf_timer_get_period(unsigned int timerid, uint64_t *period_abstime)
{
if (timerid >= kperf_timerc) {
return EINVAL;
}
*period_abstime = kperf_timerv[timerid].period;
return 0;
}
int
kperf_timer_set_period(unsigned int timerid, uint64_t period_abstime)
{
uint64_t min_period;
if (timerid >= kperf_timerc) {
return EINVAL;
}
if (pet_timer_id == timerid) {
min_period = kperf_timer_min_pet_period_abstime();
} else {
min_period = kperf_timer_min_period_abstime();
}
if (period_abstime > 0 && period_abstime < min_period) {
period_abstime = min_period;
}
kperf_timerv[timerid].period = period_abstime;
return 0;
}
int
kperf_timer_get_action(unsigned int timerid, uint32_t *action)
{
if (timerid >= kperf_timerc) {
return EINVAL;
}
*action = kperf_timerv[timerid].actionid;
return 0;
}
int
kperf_timer_set_action(unsigned int timerid, uint32_t action)
{
if (timerid >= kperf_timerc) {
return EINVAL;
}
kperf_timerv[timerid].actionid = action;
return 0;
}
unsigned int
kperf_timer_get_count(void)
{
return kperf_timerc;
}
void
kperf_timer_reset(void)
{
kperf_timer_set_petid(999);
kperf_set_pet_idle_rate(KPERF_PET_DEFAULT_IDLE_RATE);
kperf_set_lightweight_pet(0);
for (unsigned int i = 0; i < kperf_timerc; i++) {
kperf_timerv[i].period = 0;
kperf_timerv[i].actionid = 0;
#if defined(__x86_64__)
kperf_timerv[i].pending_cpus = 0;
#endif
}
}
extern int
kperf_timer_set_count(unsigned int count)
{
struct kperf_timer *new_timerv = NULL, *old_timerv = NULL;
unsigned int old_count;
if (min_period_abstime == 0) {
nanoseconds_to_absolutetime(MIN_PERIOD_NS, &min_period_abstime);
nanoseconds_to_absolutetime(MIN_PERIOD_BG_NS, &min_period_bg_abstime);
nanoseconds_to_absolutetime(MIN_PERIOD_PET_NS, &min_period_pet_abstime);
nanoseconds_to_absolutetime(MIN_PERIOD_PET_BG_NS,
&min_period_pet_bg_abstime);
assert(min_period_abstime > 0);
}
if (count == kperf_timerc) {
return 0;
}
if (count > TIMER_MAX) {
return EINVAL;
}
if (count < kperf_timerc) {
return EINVAL;
}
if (kperf_timerc == 0) {
int r;
if ((r = kperf_init())) {
return r;
}
}
kperf_timer_stop();
new_timerv = kalloc_tag(count * sizeof(struct kperf_timer),
VM_KERN_MEMORY_DIAG);
if (new_timerv == NULL) {
return ENOMEM;
}
old_timerv = kperf_timerv;
old_count = kperf_timerc;
if (old_timerv != NULL) {
bcopy(kperf_timerv, new_timerv,
kperf_timerc * sizeof(struct kperf_timer));
}
bzero(&(new_timerv[kperf_timerc]),
(count - old_count) * sizeof(struct kperf_timer));
for (unsigned int i = 0; i < count; i++) {
timer_call_setup(&(new_timerv[i].tcall), kperf_timer_handler, &(new_timerv[i]));
}
kperf_timerv = new_timerv;
kperf_timerc = count;
if (old_timerv != NULL) {
kfree(old_timerv, old_count * sizeof(struct kperf_timer));
}
return 0;
}