#include <kern/ipc_tt.h>
#include <kern/thread.h>
#include <kern/machine.h>
#include <kern/kalloc.h>
#include <mach/mach_types.h>
#include <sys/errno.h>
#include <sys/ktrace.h>
#include <kperf/action.h>
#include <kperf/buffer.h>
#include <kperf/kdebug_trigger.h>
#include <kperf/kperf.h>
#include <kperf/kperf_timer.h>
#include <kperf/pet.h>
#include <kperf/sample.h>
extern uint64_t strtouq(const char *, char **, int);
lck_grp_t kperf_lck_grp;
uint64_t *kperf_tid_on_cpus = NULL;
static struct kperf_sample *intr_samplev;
static unsigned int intr_samplec = 0;
static unsigned sampling_status = KPERF_SAMPLING_OFF;
static boolean_t kperf_initted = FALSE;
boolean_t kperf_on_cpu_active = FALSE;
struct kperf_sample *
kperf_intr_sample_buffer(void)
{
unsigned ncpu = cpu_number();
assert(ml_get_interrupts_enabled() == FALSE);
assert(ncpu < intr_samplec);
return &(intr_samplev[ncpu]);
}
int
kperf_init(void)
{
static lck_grp_attr_t lck_grp_attr;
unsigned ncpus = 0;
int err;
if (kperf_initted) {
return 0;
}
lck_grp_attr_setdefault(&lck_grp_attr);
lck_grp_init(&kperf_lck_grp, "kperf", &lck_grp_attr);
ncpus = machine_info.logical_cpu_max;
kperf_tid_on_cpus = kalloc_tag(ncpus * sizeof(*kperf_tid_on_cpus),
VM_KERN_MEMORY_DIAG);
if (kperf_tid_on_cpus == NULL) {
err = ENOMEM;
goto error;
}
bzero(kperf_tid_on_cpus, ncpus * sizeof(*kperf_tid_on_cpus));
intr_samplec = ncpus;
intr_samplev = kalloc_tag(ncpus * sizeof(*intr_samplev),
VM_KERN_MEMORY_DIAG);
if (intr_samplev == NULL) {
err = ENOMEM;
goto error;
}
bzero(intr_samplev, ncpus * sizeof(*intr_samplev));
if ((err = kperf_kdebug_init())) {
goto error;
}
kperf_initted = TRUE;
return 0;
error:
if (intr_samplev) {
kfree(intr_samplev, ncpus * sizeof(*intr_samplev));
intr_samplev = NULL;
intr_samplec = 0;
}
if (kperf_tid_on_cpus) {
kfree(kperf_tid_on_cpus, ncpus * sizeof(*kperf_tid_on_cpus));
kperf_tid_on_cpus = NULL;
}
return err;
}
void
kperf_reset(void)
{
(void)kperf_sampling_disable();
(void)kperf_kdbg_cswitch_set(0);
(void)kperf_set_lightweight_pet(0);
kperf_kdebug_reset();
kperf_timer_reset();
kperf_action_reset();
}
void
kperf_kernel_configure(const char *config)
{
int pairs = 0;
char *end;
bool pet = false;
assert(config != NULL);
ktrace_start_single_threaded();
ktrace_kernel_configure(KTRACE_KPERF);
if (config[0] == 'p') {
pet = true;
config++;
}
do {
uint32_t action_samplers;
uint64_t timer_period;
pairs += 1;
kperf_action_set_count(pairs);
kperf_timer_set_count(pairs);
action_samplers = (uint32_t)strtouq(config, &end, 0);
if (config == end) {
kprintf("kperf: unable to parse '%s' as action sampler\n", config);
goto out;
}
config = end;
kperf_action_set_samplers(pairs, action_samplers);
if (config[0] == '\0') {
kprintf("kperf: missing timer period in config\n");
goto out;
}
config++;
timer_period = strtouq(config, &end, 0);
if (config == end) {
kprintf("kperf: unable to parse '%s' as timer period\n", config);
goto out;
}
config = end;
kperf_timer_set_period(pairs - 1, timer_period);
kperf_timer_set_action(pairs - 1, pairs);
if (pet) {
kperf_timer_set_petid(pairs - 1);
kperf_set_lightweight_pet(1);
pet = false;
}
} while (*(config++) == ',');
kperf_sampling_enable();
out:
ktrace_end_single_threaded();
}
void
kperf_on_cpu_internal(thread_t thread, thread_continue_t continuation,
uintptr_t *starting_fp)
{
if (kperf_kdebug_cswitch) {
int pid = task_pid(get_threadtask(thread));
BUF_DATA(PERF_TI_CSWITCH, thread_tid(thread), pid);
}
if (kperf_lightweight_pet_active) {
kperf_pet_on_cpu(thread, continuation, starting_fp);
}
}
void
kperf_on_cpu_update(void)
{
kperf_on_cpu_active = kperf_kdebug_cswitch ||
kperf_lightweight_pet_active;
}
uint32_t
kperf_get_thread_flags(thread_t thread)
{
return thread->kperf_flags;
}
void
kperf_set_thread_flags(thread_t thread, uint32_t flags)
{
thread->kperf_flags = flags;
}
unsigned int
kperf_sampling_status(void)
{
return sampling_status;
}
int
kperf_sampling_enable(void)
{
if (sampling_status == KPERF_SAMPLING_ON) {
return 0;
}
if (sampling_status != KPERF_SAMPLING_OFF) {
panic("kperf: sampling was %d when asked to enable", sampling_status);
}
if (!kperf_initted || (kperf_action_get_count() == 0)) {
return ECANCELED;
}
sampling_status = KPERF_SAMPLING_ON;
kperf_lightweight_pet_active_update();
kperf_timer_go();
return 0;
}
int
kperf_sampling_disable(void)
{
if (sampling_status != KPERF_SAMPLING_ON) {
return 0;
}
sampling_status = KPERF_SAMPLING_SHUTDOWN;
kperf_timer_stop();
sampling_status = KPERF_SAMPLING_OFF;
kperf_lightweight_pet_active_update();
return 0;
}
boolean_t
kperf_thread_get_dirty(thread_t thread)
{
return (thread->c_switch != thread->kperf_c_switch);
}
void
kperf_thread_set_dirty(thread_t thread, boolean_t dirty)
{
if (dirty) {
thread->kperf_c_switch = thread->c_switch - 1;
} else {
thread->kperf_c_switch = thread->c_switch;
}
}
int
kperf_port_to_pid(mach_port_name_t portname)
{
task_t task;
int pid;
if (!MACH_PORT_VALID(portname)) {
return -1;
}
task = port_name_to_task(portname);
if (task == TASK_NULL) {
return -1;
}
pid = task_pid(task);
task_deallocate_internal(task);
return pid;
}