#include <mach/mach_types.h>
#include <mach/task_server.h>
#include <kern/sched.h>
#include <kern/task.h>
#include <mach/thread_policy.h>
#include <sys/errno.h>
#include <sys/resource.h>
#include <machine/limits.h>
#include <kern/ledger.h>
#include <kern/thread_call.h>
#if CONFIG_TELEMETRY
#include <kern/telemetry.h>
#endif
#if IMPORTANCE_DEBUG
#include <mach/machine/sdt.h>
#endif
#include <sys/kdebug.h>
extern void task_hold_locked(task_t task);
extern void task_release_locked(task_t task);
extern void task_wait_locked(task_t task, boolean_t until_not_runnable);
static void proc_set_task_policy_locked(task_t task, thread_t thread, int category, int flavor, int value);
static void task_policy_update_locked(task_t task, thread_t thread);
static void task_policy_update_internal_locked(task_t task, thread_t thread, boolean_t in_create);
static void task_policy_update_task_locked(task_t task, boolean_t update_throttle, boolean_t update_bg_throttle);
static void task_policy_update_thread_locked(thread_t thread, int update_cpu, boolean_t update_throttle);
static void task_policy_update_complete_unlocked(task_t task, thread_t thread);
static int proc_get_effective_policy(task_t task, thread_t thread, int policy);
static void proc_iopol_to_tier(int iopolicy, int *tier, int *passive);
static int proc_tier_to_iopol(int tier, int passive);
static uintptr_t trequested(task_t task, thread_t thread);
static uintptr_t teffective(task_t task, thread_t thread);
static uintptr_t tpending(task_t task, thread_t thread);
static uint64_t task_requested_bitfield(task_t task, thread_t thread);
static uint64_t task_effective_bitfield(task_t task, thread_t thread);
static uint64_t task_pending_bitfield(task_t task, thread_t thread);
void proc_get_thread_policy(thread_t thread, thread_policy_state_t info);
static int task_get_cpuusage(task_t task, uint8_t *percentagep, uint64_t *intervalp, uint64_t *deadlinep, int *scope);
int task_set_cpuusage(task_t task, uint8_t percentage, uint64_t interval, uint64_t deadline, int scope, int entitled);
static int task_clear_cpuusage_locked(task_t task, int cpumon_entitled);
int task_disable_cpumon(task_t task);
static int task_apply_resource_actions(task_t task, int type);
void task_action_cpuusage(thread_call_param_t param0, thread_call_param_t param1);
void proc_init_cpumon_params(void);
#ifdef MACH_BSD
int proc_pid(void *proc);
extern int proc_selfpid(void);
extern char * proc_name_address(void *p);
extern void rethrottle_thread(void * uthread);
extern void proc_apply_task_networkbg(void * bsd_info, thread_t thread, int bg);
#endif
void task_importance_mark_receiver(task_t task, boolean_t receiving);
#if IMPORTANCE_INHERITANCE
static void task_update_boost_locked(task_t task, boolean_t boost_active);
static int task_importance_hold_assertion_locked(task_t target_task, int external, uint32_t count);
static int task_importance_drop_assertion_locked(task_t target_task, int external, uint32_t count);
#endif
#if IMPORTANCE_DEBUG
#define __impdebug_only
#else
#define __impdebug_only __unused
#endif
#if IMPORTANCE_INHERITANCE
#define __imp_only
#else
#define __imp_only __unused
#endif
#define TASK_LOCKED 1
#define TASK_UNLOCKED 0
#define DO_LOWPRI_CPU 1
#define UNDO_LOWPRI_CPU 2
#define tpriority(task, thread) ((uintptr_t)(thread == THREAD_NULL ? (task->priority) : (thread->priority)))
#define tisthread(thread) (thread == THREAD_NULL ? TASK_POLICY_TASK : TASK_POLICY_THREAD)
#define targetid(task, thread) ((uintptr_t)(thread == THREAD_NULL ? (audit_token_pid_from_task(task)) : (thread->thread_id)))
int proc_standard_daemon_tier = THROTTLE_LEVEL_TIER1;
int proc_suppressed_disk_tier = THROTTLE_LEVEL_TIER1;
int proc_tal_disk_tier = THROTTLE_LEVEL_TIER1;
int proc_graphics_timer_qos = (LATENCY_QOS_TIER_0 & 0xFF);
const int proc_default_bg_iotier = THROTTLE_LEVEL_TIER2;
const struct task_requested_policy default_task_requested_policy = {
.bg_iotier = proc_default_bg_iotier
};
const struct task_effective_policy default_task_effective_policy = {};
const struct task_pended_policy default_task_pended_policy = {};
#define DEFAULT_CPUMON_PERCENTAGE 50
#define DEFAULT_CPUMON_INTERVAL (3 * 60)
uint8_t proc_max_cpumon_percentage;
uint64_t proc_max_cpumon_interval;
static kern_return_t
task_qos_policy_validate(task_qos_policy_t qosinfo, mach_msg_type_number_t count) {
if (count < TASK_QOS_POLICY_COUNT)
return KERN_INVALID_ARGUMENT;
task_latency_qos_t ltier = qosinfo->task_latency_qos_tier;
task_throughput_qos_t ttier = qosinfo->task_throughput_qos_tier;
if ((ltier != LATENCY_QOS_TIER_UNSPECIFIED) &&
((ltier > LATENCY_QOS_TIER_5) || (ltier < LATENCY_QOS_TIER_0)))
return KERN_INVALID_ARGUMENT;
if ((ttier != THROUGHPUT_QOS_TIER_UNSPECIFIED) &&
((ttier > THROUGHPUT_QOS_TIER_5) || (ttier < THROUGHPUT_QOS_TIER_0)))
return KERN_INVALID_ARGUMENT;
return KERN_SUCCESS;
}
static uint32_t
task_qos_extract(uint32_t qv) {
return (qv & 0xFF);
}
static uint32_t
task_qos_latency_package(uint32_t qv) {
return (qv == LATENCY_QOS_TIER_UNSPECIFIED) ? LATENCY_QOS_TIER_UNSPECIFIED : ((0xFF << 16) | qv);
}
static uint32_t
task_qos_throughput_package(uint32_t qv) {
return (qv == THROUGHPUT_QOS_TIER_UNSPECIFIED) ? THROUGHPUT_QOS_TIER_UNSPECIFIED : ((0xFE << 16) | qv);
}
kern_return_t
task_policy_set(
task_t task,
task_policy_flavor_t flavor,
task_policy_t policy_info,
mach_msg_type_number_t count)
{
kern_return_t result = KERN_SUCCESS;
if (task == TASK_NULL || task == kernel_task)
return (KERN_INVALID_ARGUMENT);
switch (flavor) {
case TASK_CATEGORY_POLICY: {
task_category_policy_t info = (task_category_policy_t)policy_info;
if (count < TASK_CATEGORY_POLICY_COUNT)
return (KERN_INVALID_ARGUMENT);
switch(info->role) {
case TASK_FOREGROUND_APPLICATION:
case TASK_BACKGROUND_APPLICATION:
case TASK_DEFAULT_APPLICATION:
proc_set_task_policy(task, THREAD_NULL,
TASK_POLICY_ATTRIBUTE, TASK_POLICY_ROLE,
info->role);
break;
case TASK_CONTROL_APPLICATION:
if (task != current_task() || task->sec_token.val[0] != 0)
result = KERN_INVALID_ARGUMENT;
else
proc_set_task_policy(task, THREAD_NULL,
TASK_POLICY_ATTRIBUTE, TASK_POLICY_ROLE,
info->role);
break;
case TASK_GRAPHICS_SERVER:
if (task != current_task() || task->sec_token.val[0] != 0)
result = KERN_INVALID_ARGUMENT;
else
proc_set_task_policy(task, THREAD_NULL,
TASK_POLICY_ATTRIBUTE, TASK_POLICY_ROLE,
info->role);
break;
default:
result = KERN_INVALID_ARGUMENT;
break;
}
break;
}
case TASK_BASE_QOS_POLICY:
{
task_qos_policy_t qosinfo = (task_qos_policy_t)policy_info;
kern_return_t kr = task_qos_policy_validate(qosinfo, count);
if (kr != KERN_SUCCESS)
return kr;
task_lock(task);
KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
(IMPORTANCE_CODE(TASK_POLICY_LATENCY_QOS, (TASK_POLICY_ATTRIBUTE | TASK_POLICY_TASK))) | DBG_FUNC_START,
proc_selfpid(), targetid(task, THREAD_NULL), trequested(task, THREAD_NULL), 0, 0);
task->requested_policy.t_base_latency_qos = task_qos_extract(qosinfo->task_latency_qos_tier);
task->requested_policy.t_base_through_qos = task_qos_extract(qosinfo->task_throughput_qos_tier);
task_policy_update_locked(task, THREAD_NULL);
task_unlock(task);
task_policy_update_complete_unlocked(task, THREAD_NULL);
KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
(IMPORTANCE_CODE(TASK_POLICY_LATENCY_QOS, (TASK_POLICY_ATTRIBUTE | TASK_POLICY_TASK))) | DBG_FUNC_END,
proc_selfpid(), targetid(task, THREAD_NULL), trequested(task, THREAD_NULL), 0, 0);
}
break;
case TASK_OVERRIDE_QOS_POLICY:
{
task_qos_policy_t qosinfo = (task_qos_policy_t)policy_info;
kern_return_t kr = task_qos_policy_validate(qosinfo, count);
if (kr != KERN_SUCCESS)
return kr;
task_lock(task);
KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
(IMPORTANCE_CODE(TASK_POLICY_LATENCY_QOS, (TASK_POLICY_ATTRIBUTE | TASK_POLICY_TASK))) | DBG_FUNC_START,
proc_selfpid(), targetid(task, THREAD_NULL), trequested(task, THREAD_NULL), 0, 0);
task->requested_policy.t_over_latency_qos = task_qos_extract(qosinfo->task_latency_qos_tier);
task->requested_policy.t_over_through_qos = task_qos_extract(qosinfo->task_throughput_qos_tier);
task_policy_update_locked(task, THREAD_NULL);
task_unlock(task);
task_policy_update_complete_unlocked(task, THREAD_NULL);
KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
(IMPORTANCE_CODE(TASK_POLICY_LATENCY_QOS, (TASK_POLICY_ATTRIBUTE | TASK_POLICY_TASK))) | DBG_FUNC_END,
proc_selfpid(), targetid(task, THREAD_NULL), trequested(task, THREAD_NULL), 0, 0);
}
break;
case TASK_SUPPRESSION_POLICY:
{
task_suppression_policy_t info = (task_suppression_policy_t)policy_info;
if (count < TASK_SUPPRESSION_POLICY_COUNT)
return (KERN_INVALID_ARGUMENT);
struct task_qos_policy qosinfo;
qosinfo.task_latency_qos_tier = info->timer_throttle;
qosinfo.task_throughput_qos_tier = info->throughput_qos;
kern_return_t kr = task_qos_policy_validate(&qosinfo, TASK_QOS_POLICY_COUNT);
if (kr != KERN_SUCCESS)
return kr;
task_lock(task);
KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
(IMPORTANCE_CODE(IMP_TASK_SUPPRESSION, info->active)) | DBG_FUNC_START,
proc_selfpid(), audit_token_pid_from_task(task), trequested(task, THREAD_NULL),
0, 0);
task->requested_policy.t_sup_active = (info->active) ? 1 : 0;
task->requested_policy.t_sup_lowpri_cpu = (info->lowpri_cpu) ? 1 : 0;
task->requested_policy.t_sup_timer = task_qos_extract(info->timer_throttle);
task->requested_policy.t_sup_disk = (info->disk_throttle) ? 1 : 0;
task->requested_policy.t_sup_cpu_limit = (info->cpu_limit) ? 1 : 0;
task->requested_policy.t_sup_suspend = (info->suspend) ? 1 : 0;
task->requested_policy.t_sup_throughput = task_qos_extract(info->throughput_qos);
task->requested_policy.t_sup_cpu = (info->suppressed_cpu) ? 1 : 0;
task_policy_update_locked(task, THREAD_NULL);
task_unlock(task);
task_policy_update_complete_unlocked(task, THREAD_NULL);
KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
(IMPORTANCE_CODE(IMP_TASK_SUPPRESSION, info->active)) | DBG_FUNC_END,
proc_selfpid(), audit_token_pid_from_task(task), trequested(task, THREAD_NULL),
0, 0);
break;
}
default:
result = KERN_INVALID_ARGUMENT;
break;
}
return (result);
}
kern_return_t
task_importance(
task_t task,
integer_t importance)
{
if (task == TASK_NULL || task == kernel_task)
return (KERN_INVALID_ARGUMENT);
task_lock(task);
if (!task->active) {
task_unlock(task);
return (KERN_TERMINATED);
}
if (proc_get_effective_task_policy(task, TASK_POLICY_ROLE) >= TASK_CONTROL_APPLICATION) {
task_unlock(task);
return (KERN_INVALID_ARGUMENT);
}
task->importance = importance;
task_policy_update_task_locked(task, FALSE, FALSE);
task_unlock(task);
return (KERN_SUCCESS);
}
kern_return_t
task_policy_get(
task_t task,
task_policy_flavor_t flavor,
task_policy_t policy_info,
mach_msg_type_number_t *count,
boolean_t *get_default)
{
if (task == TASK_NULL || task == kernel_task)
return (KERN_INVALID_ARGUMENT);
switch (flavor) {
case TASK_CATEGORY_POLICY:
{
task_category_policy_t info = (task_category_policy_t)policy_info;
if (*count < TASK_CATEGORY_POLICY_COUNT)
return (KERN_INVALID_ARGUMENT);
if (*get_default)
info->role = TASK_UNSPECIFIED;
else
info->role = proc_get_task_policy(task, THREAD_NULL, TASK_POLICY_ATTRIBUTE, TASK_POLICY_ROLE);
break;
}
case TASK_BASE_QOS_POLICY:
case TASK_OVERRIDE_QOS_POLICY:
{
task_qos_policy_t info = (task_qos_policy_t)policy_info;
if (*count < TASK_QOS_POLICY_COUNT)
return (KERN_INVALID_ARGUMENT);
if (*get_default) {
info->task_latency_qos_tier = LATENCY_QOS_TIER_UNSPECIFIED;
info->task_throughput_qos_tier = THROUGHPUT_QOS_TIER_UNSPECIFIED;
} else if (flavor == TASK_BASE_QOS_POLICY) {
task_lock(task);
info->task_latency_qos_tier = task_qos_latency_package(task->requested_policy.t_base_latency_qos);
info->task_throughput_qos_tier = task_qos_throughput_package(task->requested_policy.t_base_through_qos);
task_unlock(task);
} else if (flavor == TASK_OVERRIDE_QOS_POLICY) {
task_lock(task);
info->task_latency_qos_tier = task_qos_latency_package(task->requested_policy.t_over_latency_qos);
info->task_throughput_qos_tier = task_qos_throughput_package(task->requested_policy.t_over_through_qos);
task_unlock(task);
}
break;
}
case TASK_POLICY_STATE:
{
task_policy_state_t info = (task_policy_state_t)policy_info;
if (*count < TASK_POLICY_STATE_COUNT)
return (KERN_INVALID_ARGUMENT);
if (current_task()->sec_token.val[0] != 0)
return KERN_PROTECTION_FAILURE;
task_lock(task);
if (*get_default) {
info->requested = 0;
info->effective = 0;
info->pending = 0;
info->imp_assertcnt = 0;
info->imp_externcnt = 0;
info->flags = 0;
} else {
info->requested = task_requested_bitfield(task, THREAD_NULL);
info->effective = task_effective_bitfield(task, THREAD_NULL);
info->pending = task_pending_bitfield(task, THREAD_NULL);
info->imp_assertcnt = task->task_imp_assertcnt;
info->imp_externcnt = task->task_imp_externcnt;
info->flags = 0;
info->flags |= (task->imp_receiver ? TASK_IMP_RECEIVER : 0);
info->flags |= (task->imp_donor ? TASK_IMP_DONOR : 0);
}
task_unlock(task);
break;
}
case TASK_SUPPRESSION_POLICY:
{
task_suppression_policy_t info = (task_suppression_policy_t)policy_info;
if (*count < TASK_SUPPRESSION_POLICY_COUNT)
return (KERN_INVALID_ARGUMENT);
task_lock(task);
if (*get_default) {
info->active = 0;
info->lowpri_cpu = 0;
info->timer_throttle = LATENCY_QOS_TIER_UNSPECIFIED;
info->disk_throttle = 0;
info->cpu_limit = 0;
info->suspend = 0;
info->throughput_qos = 0;
info->suppressed_cpu = 0;
} else {
info->active = task->requested_policy.t_sup_active;
info->lowpri_cpu = task->requested_policy.t_sup_lowpri_cpu;
info->timer_throttle = task_qos_latency_package(task->requested_policy.t_sup_timer);
info->disk_throttle = task->requested_policy.t_sup_disk;
info->cpu_limit = task->requested_policy.t_sup_cpu_limit;
info->suspend = task->requested_policy.t_sup_suspend;
info->throughput_qos = task_qos_throughput_package(task->requested_policy.t_sup_throughput);
info->suppressed_cpu = task->requested_policy.t_sup_cpu;
}
task_unlock(task);
break;
}
default:
return (KERN_INVALID_ARGUMENT);
}
return (KERN_SUCCESS);
}
void
task_policy_create(task_t task, int parent_boosted)
{
if (task->requested_policy.t_apptype == TASK_APPTYPE_DAEMON_ADAPTIVE) {
if (parent_boosted) {
task->requested_policy.t_apptype = TASK_APPTYPE_DAEMON_INTERACTIVE;
task_importance_mark_donor(task, TRUE);
} else {
task->requested_policy.t_apptype = TASK_APPTYPE_DAEMON_BACKGROUND;
task_importance_mark_receiver(task, FALSE);
}
}
KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
(IMPORTANCE_CODE(IMP_UPDATE, (IMP_UPDATE_TASK_CREATE | TASK_POLICY_TASK))) | DBG_FUNC_START,
proc_selfpid(), audit_token_pid_from_task(task),
teffective(task, THREAD_NULL), tpriority(task, THREAD_NULL), 0);
task_policy_update_internal_locked(task, THREAD_NULL, TRUE);
KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
(IMPORTANCE_CODE(IMP_UPDATE, (IMP_UPDATE_TASK_CREATE | TASK_POLICY_TASK))) | DBG_FUNC_END,
proc_selfpid(), audit_token_pid_from_task(task),
teffective(task, THREAD_NULL), tpriority(task, THREAD_NULL), 0);
}
static void
task_policy_update_locked(task_t task, thread_t thread)
{
KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
(IMPORTANCE_CODE(IMP_UPDATE, tisthread(thread)) | DBG_FUNC_START),
proc_selfpid(), targetid(task, thread),
teffective(task, thread), tpriority(task, thread), 0);
task_policy_update_internal_locked(task, thread, FALSE);
KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
(IMPORTANCE_CODE(IMP_UPDATE, tisthread(thread))) | DBG_FUNC_END,
proc_selfpid(), targetid(task, thread),
teffective(task, thread), tpriority(task, thread), 0);
}
static void
task_policy_update_internal_locked(task_t task, thread_t thread, boolean_t in_create)
{
boolean_t on_task = (thread == THREAD_NULL) ? TRUE : FALSE;
struct task_requested_policy requested =
(on_task) ? task->requested_policy : thread->requested_policy;
struct task_effective_policy next = {};
boolean_t wants_darwinbg = FALSE;
boolean_t wants_all_sockets_bg = FALSE;
boolean_t wants_watchersbg = FALSE;
boolean_t wants_tal = FALSE;
if (requested.int_darwinbg || requested.ext_darwinbg)
wants_watchersbg = wants_all_sockets_bg = wants_darwinbg = TRUE;
if (on_task) {
if (requested.t_apptype == TASK_APPTYPE_APP_TAL &&
requested.t_role == TASK_BACKGROUND_APPLICATION &&
requested.t_tal_enabled == 1) {
wants_tal = TRUE;
next.t_tal_engaged = 1;
}
if (requested.t_apptype == TASK_APPTYPE_DAEMON_ADAPTIVE &&
requested.t_boosted == 0)
wants_darwinbg = TRUE;
if (requested.t_apptype == TASK_APPTYPE_DAEMON_BACKGROUND)
wants_darwinbg = TRUE;
} else {
if (requested.th_pidbind_bg)
wants_all_sockets_bg = wants_darwinbg = TRUE;
if (requested.th_workq_bg)
wants_darwinbg = TRUE;
}
if (wants_darwinbg) {
next.darwinbg = 1;
next.new_sockets_bg = 1;
next.lowpri_cpu = 1;
}
if (wants_all_sockets_bg)
next.all_sockets_bg = 1;
if (on_task && wants_watchersbg)
next.t_watchers_bg = 1;
boolean_t wants_lowpri_cpu = FALSE;
if (wants_darwinbg || wants_tal)
wants_lowpri_cpu = TRUE;
if (on_task && requested.t_sup_lowpri_cpu && requested.t_boosted == 0)
wants_lowpri_cpu = TRUE;
if (wants_lowpri_cpu)
next.lowpri_cpu = 1;
next.bg_iotier = requested.bg_iotier;
int iopol = THROTTLE_LEVEL_TIER0;
if (wants_darwinbg)
iopol = MAX(iopol, requested.bg_iotier);
if (on_task) {
if (requested.t_apptype == TASK_APPTYPE_DAEMON_STANDARD)
iopol = MAX(iopol, proc_standard_daemon_tier);
if (requested.t_sup_disk && requested.t_boosted == 0)
iopol = MAX(iopol, proc_suppressed_disk_tier);
if (wants_tal)
iopol = MAX(iopol, proc_tal_disk_tier);
}
iopol = MAX(iopol, requested.int_iotier);
iopol = MAX(iopol, requested.ext_iotier);
next.io_tier = iopol;
if (requested.ext_iopassive || requested.int_iopassive)
next.io_passive = 1;
if (on_task) {
next.t_role = requested.t_role;
if (requested.t_sup_active && requested.t_boosted == 0)
next.t_sup_active = 1;
if (requested.t_sup_suspend && requested.t_boosted == 0)
next.t_suspended = 1;
if (requested.t_int_gpu_deny || requested.t_ext_gpu_deny)
next.t_gpu_deny = 1;
int latency_qos = requested.t_base_latency_qos;
if (requested.t_sup_timer && requested.t_boosted == 0)
latency_qos = requested.t_sup_timer;
if (requested.t_over_latency_qos != 0)
latency_qos = requested.t_over_latency_qos;
if (requested.t_role == TASK_GRAPHICS_SERVER)
latency_qos = proc_graphics_timer_qos;
next.t_latency_qos = latency_qos;
int through_qos = requested.t_base_through_qos;
if (requested.t_sup_throughput && requested.t_boosted == 0)
through_qos = requested.t_sup_throughput;
if (requested.t_over_through_qos != 0)
through_qos = requested.t_over_through_qos;
next.t_through_qos = through_qos;
if (requested.t_sup_cpu && requested.t_boosted == 0)
next.t_suppressed_cpu = 1;
}
if (requested.terminated) {
next.terminated = 1;
next.darwinbg = 0;
next.lowpri_cpu = 0;
next.io_tier = THROTTLE_LEVEL_TIER0;
if (on_task) {
next.t_tal_engaged = 0;
next.t_role = TASK_UNSPECIFIED;
next.t_suppressed_cpu = 0;
next.t_suspended = 0;
}
}
struct task_effective_policy prev =
(on_task) ? task->effective_policy : thread->effective_policy;
if (task == kernel_task && prev.all_sockets_bg != next.all_sockets_bg)
panic("unexpected network change for kernel task");
if (on_task)
task->effective_policy = next;
else
thread->effective_policy = next;
if (in_create)
return;
struct task_pended_policy pended =
(on_task) ? task->pended_policy : thread->pended_policy;
if (prev.all_sockets_bg != next.all_sockets_bg)
pended.update_sockets = 1;
if (on_task) {
if (prev.t_latency_qos > next.t_latency_qos)
pended.t_update_timers = 1;
}
if (on_task)
task->pended_policy = pended;
else
thread->pended_policy = pended;
boolean_t update_throttle = (prev.io_tier != next.io_tier) ? TRUE : FALSE;
if (on_task) {
if (prev.t_suspended == 0 && next.t_suspended == 1 && task->active) {
task_hold_locked(task);
task_wait_locked(task, FALSE);
}
if (prev.t_suspended == 1 && next.t_suspended == 0 && task->active) {
task_release_locked(task);
}
boolean_t update_threads = FALSE;
if (prev.bg_iotier != next.bg_iotier)
update_threads = TRUE;
if (prev.terminated != next.terminated)
update_threads = TRUE;
task_policy_update_task_locked(task, update_throttle, update_threads);
} else {
int update_cpu = 0;
if (prev.lowpri_cpu != next.lowpri_cpu)
update_cpu = (next.lowpri_cpu ? DO_LOWPRI_CPU : UNDO_LOWPRI_CPU);
task_policy_update_thread_locked(thread, update_cpu, update_throttle);
}
}
static void
task_policy_update_thread_locked(thread_t thread,
int update_cpu,
boolean_t update_throttle)
{
thread_precedence_policy_data_t policy;
if (update_throttle) {
rethrottle_thread(thread->uthread);
}
if (update_cpu == DO_LOWPRI_CPU) {
thread->saved_importance = thread->importance;
policy.importance = INT_MIN;
} else if (update_cpu == UNDO_LOWPRI_CPU) {
policy.importance = thread->saved_importance;
thread->saved_importance = 0;
}
if (update_cpu)
thread_policy_set_internal(thread, THREAD_PRECEDENCE_POLICY,
(thread_policy_t)&policy,
THREAD_PRECEDENCE_POLICY_COUNT);
}
static void
task_policy_update_task_locked(task_t task,
boolean_t update_throttle,
boolean_t update_threads)
{
boolean_t update_priority = FALSE;
if (task == kernel_task)
panic("Attempting to set task policy on kernel_task");
int priority = BASEPRI_DEFAULT;
int max_priority = MAXPRI_USER;
if (proc_get_effective_task_policy(task, TASK_POLICY_LOWPRI_CPU)) {
priority = MAXPRI_THROTTLE;
max_priority = MAXPRI_THROTTLE;
} else if (proc_get_effective_task_policy(task, TASK_POLICY_SUPPRESSED_CPU)) {
priority = MAXPRI_SUPPRESSED;
max_priority = MAXPRI_SUPPRESSED;
} else {
switch (proc_get_effective_task_policy(task, TASK_POLICY_ROLE)) {
case TASK_FOREGROUND_APPLICATION:
priority = BASEPRI_FOREGROUND;
break;
case TASK_BACKGROUND_APPLICATION:
priority = BASEPRI_BACKGROUND;
break;
case TASK_CONTROL_APPLICATION:
priority = BASEPRI_CONTROL;
break;
case TASK_GRAPHICS_SERVER:
priority = BASEPRI_GRAPHICS;
max_priority = MAXPRI_RESERVED;
break;
default:
break;
}
priority += task->importance;
}
if (task->priority != priority || task->max_priority != max_priority) {
update_priority = TRUE;
task->max_priority = max_priority;
if (priority > task->max_priority)
priority = task->max_priority;
else if (priority < MINPRI)
priority = MINPRI;
task->priority = priority;
}
if (update_threads || update_throttle || update_priority ) {
thread_t thread;
queue_iterate(&task->threads, thread, thread_t, task_threads) {
if (update_priority) {
thread_mtx_lock(thread);
if (thread->active)
thread_task_priority(thread, priority, max_priority);
thread_mtx_unlock(thread);
}
if (update_throttle) {
rethrottle_thread(thread->uthread);
}
if (update_threads) {
thread->requested_policy.bg_iotier = task->effective_policy.bg_iotier;
thread->requested_policy.terminated = task->effective_policy.terminated;
task_policy_update_internal_locked(task, thread, FALSE);
}
}
}
}
static void
task_policy_update_complete_unlocked(task_t task, thread_t thread)
{
boolean_t on_task = (thread == THREAD_NULL) ? TRUE : FALSE;
task_lock(task);
while (task->pended_policy.t_updating_policy != 0) {
assert_wait((event_t)&task->pended_policy, THREAD_UNINT);
task_unlock(task);
thread_block(THREAD_CONTINUE_NULL);
task_lock(task);
}
struct task_pended_policy pended =
(on_task) ? task->pended_policy : thread->pended_policy;
struct task_effective_policy effective =
(on_task) ? task->effective_policy : thread->effective_policy;
if (on_task)
task->pended_policy = default_task_pended_policy;
else
thread->pended_policy = default_task_pended_policy;
task->pended_policy.t_updating_policy = 1;
task_unlock(task);
#ifdef MACH_BSD
if (pended.update_sockets)
proc_apply_task_networkbg(task->bsd_info, thread, effective.all_sockets_bg);
#endif
if (on_task) {
if (pended.t_update_timers)
ml_timer_evaluate();
}
task_lock(task);
task->pended_policy.t_updating_policy = 0;
thread_wakeup(&task->pended_policy);
task_unlock(task);
}
void
proc_set_task_policy(task_t task,
thread_t thread,
int category,
int flavor,
int value)
{
task_lock(task);
KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
(IMPORTANCE_CODE(flavor, (category | tisthread(thread)))) | DBG_FUNC_START,
proc_selfpid(), targetid(task, thread), trequested(task, thread), value, 0);
proc_set_task_policy_locked(task, thread, category, flavor, value);
task_policy_update_locked(task, thread);
task_unlock(task);
KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
(IMPORTANCE_CODE(flavor, (category | tisthread(thread)))) | DBG_FUNC_END,
proc_selfpid(), targetid(task, thread), trequested(task, thread), tpending(task, thread), 0);
task_policy_update_complete_unlocked(task, thread);
}
void
proc_set_task_policy_thread(task_t task,
uint64_t tid,
int category,
int flavor,
int value)
{
thread_t thread;
thread_t self = current_thread();
task_lock(task);
if (tid == TID_NULL || tid == self->thread_id)
thread = self;
else
thread = task_findtid(task, tid);
if (thread == THREAD_NULL) {
task_unlock(task);
return;
}
KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
(IMPORTANCE_CODE(flavor, (category | TASK_POLICY_THREAD))) | DBG_FUNC_START,
proc_selfpid(), targetid(task, thread), trequested(task, thread), value, 0);
proc_set_task_policy_locked(task, thread, category, flavor, value);
task_policy_update_locked(task, thread);
task_unlock(task);
KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
(IMPORTANCE_CODE(flavor, (category | TASK_POLICY_THREAD))) | DBG_FUNC_END,
proc_selfpid(), targetid(task, thread), trequested(task, thread), tpending(task, thread), 0);
task_policy_update_complete_unlocked(task, thread);
}
static void
proc_set_task_policy_locked(task_t task,
thread_t thread,
int category,
int flavor,
int value)
{
boolean_t on_task = (thread == THREAD_NULL) ? TRUE : FALSE;
int tier, passive;
struct task_requested_policy requested =
(on_task) ? task->requested_policy : thread->requested_policy;
switch (flavor) {
case TASK_POLICY_DARWIN_BG:
if (category == TASK_POLICY_EXTERNAL)
requested.ext_darwinbg = value;
else
requested.int_darwinbg = value;
break;
case TASK_POLICY_IOPOL:
proc_iopol_to_tier(value, &tier, &passive);
if (category == TASK_POLICY_EXTERNAL) {
requested.ext_iotier = tier;
requested.ext_iopassive = passive;
} else {
requested.int_iotier = tier;
requested.int_iopassive = passive;
}
break;
case TASK_POLICY_IO:
if (category == TASK_POLICY_EXTERNAL)
requested.ext_iotier = value;
else
requested.int_iotier = value;
break;
case TASK_POLICY_PASSIVE_IO:
if (category == TASK_POLICY_EXTERNAL)
requested.ext_iopassive = value;
else
requested.int_iopassive = value;
break;
case TASK_POLICY_GPU_DENY:
assert(on_task);
if (category == TASK_POLICY_EXTERNAL)
requested.t_ext_gpu_deny = value;
else
requested.t_int_gpu_deny = value;
break;
case TASK_POLICY_DARWIN_BG_AND_GPU:
assert(on_task);
if (category == TASK_POLICY_EXTERNAL) {
requested.ext_darwinbg = value;
requested.t_ext_gpu_deny = value;
} else {
requested.int_darwinbg = value;
requested.t_int_gpu_deny = value;
}
break;
case TASK_POLICY_DARWIN_BG_IOPOL:
assert(on_task && category == TASK_POLICY_INTERNAL);
proc_iopol_to_tier(value, &tier, &passive);
requested.bg_iotier = tier;
break;
case TASK_POLICY_TAL:
assert(on_task && category == TASK_POLICY_ATTRIBUTE);
requested.t_tal_enabled = value;
break;
case TASK_POLICY_BOOST:
assert(on_task && category == TASK_POLICY_ATTRIBUTE);
requested.t_boosted = value;
break;
case TASK_POLICY_ROLE:
assert(on_task && category == TASK_POLICY_ATTRIBUTE);
requested.t_role = value;
break;
case TASK_POLICY_TERMINATED:
assert(on_task && category == TASK_POLICY_ATTRIBUTE);
requested.terminated = value;
break;
case TASK_POLICY_PIDBIND_BG:
assert(!on_task && category == TASK_POLICY_ATTRIBUTE);
requested.th_pidbind_bg = value;
break;
case TASK_POLICY_WORKQ_BG:
assert(!on_task && category == TASK_POLICY_ATTRIBUTE);
requested.th_workq_bg = value;
break;
default:
panic("unknown task policy: %d %d %d", category, flavor, value);
break;
}
if (on_task)
task->requested_policy = requested;
else
thread->requested_policy = requested;
}
int
proc_get_task_policy(task_t task,
thread_t thread,
int category,
int flavor)
{
boolean_t on_task = (thread == THREAD_NULL) ? TRUE : FALSE;
int value = 0;
task_lock(task);
struct task_requested_policy requested =
(on_task) ? task->requested_policy : thread->requested_policy;
switch (flavor) {
case TASK_POLICY_DARWIN_BG:
if (category == TASK_POLICY_EXTERNAL)
value = requested.ext_darwinbg;
else
value = requested.int_darwinbg;
break;
case TASK_POLICY_IOPOL:
if (category == TASK_POLICY_EXTERNAL)
value = proc_tier_to_iopol(requested.ext_iotier,
requested.ext_iopassive);
else
value = proc_tier_to_iopol(requested.int_iotier,
requested.int_iopassive);
break;
case TASK_POLICY_IO:
if (category == TASK_POLICY_EXTERNAL)
value = requested.ext_iotier;
else
value = requested.int_iotier;
break;
case TASK_POLICY_PASSIVE_IO:
if (category == TASK_POLICY_EXTERNAL)
value = requested.ext_iopassive;
else
value = requested.int_iopassive;
break;
case TASK_POLICY_GPU_DENY:
assert(on_task);
if (category == TASK_POLICY_EXTERNAL)
value = requested.t_ext_gpu_deny;
else
value = requested.t_int_gpu_deny;
break;
case TASK_POLICY_DARWIN_BG_IOPOL:
assert(on_task && category == TASK_POLICY_ATTRIBUTE);
value = proc_tier_to_iopol(requested.bg_iotier, 0);
break;
case TASK_POLICY_ROLE:
assert(on_task && category == TASK_POLICY_ATTRIBUTE);
value = requested.t_role;
break;
default:
panic("unknown policy_flavor %d", flavor);
break;
}
task_unlock(task);
return value;
}
int
proc_get_effective_task_policy(task_t task, int flavor)
{
return proc_get_effective_policy(task, THREAD_NULL, flavor);
}
int
proc_get_effective_thread_policy(thread_t thread, int flavor)
{
return proc_get_effective_policy(thread->task, thread, flavor);
}
static int
proc_get_effective_policy(task_t task,
thread_t thread,
int flavor)
{
boolean_t on_task = (thread == THREAD_NULL) ? TRUE : FALSE;
int value = 0;
switch (flavor) {
case TASK_POLICY_DARWIN_BG:
if (on_task)
value = task->effective_policy.darwinbg;
else
value = (task->effective_policy.darwinbg ||
thread->effective_policy.darwinbg) ? 1 : 0;
break;
case TASK_POLICY_IO:
if (on_task)
value = task->effective_policy.io_tier;
else {
value = MAX(task->effective_policy.io_tier,
thread->effective_policy.io_tier);
if (thread->iotier_override != THROTTLE_LEVEL_NONE)
value = MIN(value, thread->iotier_override);
}
break;
case TASK_POLICY_PASSIVE_IO:
if (on_task)
value = task->effective_policy.io_passive;
else
value = (task->effective_policy.io_passive ||
thread->effective_policy.io_passive) ? 1 : 0;
break;
case TASK_POLICY_NEW_SOCKETS_BG:
if (on_task)
value = task->effective_policy.new_sockets_bg;
else
value = (task->effective_policy.new_sockets_bg ||
thread->effective_policy.new_sockets_bg) ? 1 : 0;
break;
case TASK_POLICY_LOWPRI_CPU:
if (on_task)
value = task->effective_policy.lowpri_cpu;
else
value = (task->effective_policy.lowpri_cpu ||
thread->effective_policy.lowpri_cpu) ? 1 : 0;
break;
case TASK_POLICY_SUPPRESSED_CPU:
assert(on_task);
value = task->effective_policy.t_suppressed_cpu;
break;
case TASK_POLICY_LATENCY_QOS:
assert(on_task);
value = task->effective_policy.t_latency_qos;
break;
case TASK_POLICY_THROUGH_QOS:
assert(on_task);
value = task->effective_policy.t_through_qos;
break;
case TASK_POLICY_GPU_DENY:
assert(on_task);
value = task->effective_policy.t_gpu_deny;
break;
case TASK_POLICY_ROLE:
assert(on_task);
value = task->effective_policy.t_role;
break;
case TASK_POLICY_WATCHERS_BG:
assert(on_task);
value = task->effective_policy.t_watchers_bg;
break;
default:
panic("unknown policy_flavor %d", flavor);
break;
}
return value;
}
static void
proc_iopol_to_tier(int iopolicy, int *tier, int *passive)
{
*passive = 0;
*tier = 0;
switch (iopolicy) {
case IOPOL_IMPORTANT:
*tier = THROTTLE_LEVEL_TIER0;
break;
case IOPOL_PASSIVE:
*tier = THROTTLE_LEVEL_TIER0;
*passive = 1;
break;
case IOPOL_STANDARD:
*tier = THROTTLE_LEVEL_TIER1;
break;
case IOPOL_UTILITY:
*tier = THROTTLE_LEVEL_TIER2;
break;
case IOPOL_THROTTLE:
*tier = THROTTLE_LEVEL_TIER3;
break;
default:
panic("unknown I/O policy %d", iopolicy);
break;
}
}
static int
proc_tier_to_iopol(int tier, int passive)
{
if (passive == 1) {
switch (tier) {
case THROTTLE_LEVEL_TIER0:
return IOPOL_PASSIVE;
break;
default:
panic("unknown passive tier %d", tier);
return IOPOL_DEFAULT;
break;
}
} else {
switch (tier) {
case THROTTLE_LEVEL_NONE:
return IOPOL_DEFAULT;
break;
case THROTTLE_LEVEL_TIER0:
return IOPOL_IMPORTANT;
break;
case THROTTLE_LEVEL_TIER1:
return IOPOL_STANDARD;
break;
case THROTTLE_LEVEL_TIER2:
return IOPOL_UTILITY;
break;
case THROTTLE_LEVEL_TIER3:
return IOPOL_THROTTLE;
break;
default:
panic("unknown tier %d", tier);
return IOPOL_DEFAULT;
break;
}
}
}
int
proc_apply_workq_bgthreadpolicy(thread_t thread)
{
if (thread == THREAD_NULL)
return ESRCH;
proc_set_task_policy(thread->task, thread, TASK_POLICY_ATTRIBUTE,
TASK_POLICY_WORKQ_BG, TASK_POLICY_ENABLE);
return(0);
}
int
proc_restore_workq_bgthreadpolicy(thread_t thread)
{
if (thread == THREAD_NULL)
return ESRCH;
proc_set_task_policy(thread->task, thread, TASK_POLICY_ATTRIBUTE,
TASK_POLICY_WORKQ_BG, TASK_POLICY_DISABLE);
return(0);
}
int
proc_setthread_saved_importance(__unused thread_t thread, __unused int importance)
{
return(0);
}
void set_thread_iotier_override(thread_t thread, int policy)
{
int current_override;
do {
current_override = thread->iotier_override;
if (current_override != THROTTLE_LEVEL_NONE)
policy = MIN(current_override, policy);
if (current_override == policy) {
return;
}
} while (!OSCompareAndSwap(current_override, policy, &thread->iotier_override));
rethrottle_thread(thread->uthread);
}
void
proc_set_task_apptype(task_t task, int apptype)
{
task_lock(task);
KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
(IMPORTANCE_CODE(IMP_TASK_APPTYPE, apptype)) | DBG_FUNC_START,
proc_selfpid(), audit_token_pid_from_task(task), trequested(task, THREAD_NULL),
apptype, 0);
switch (apptype) {
case TASK_APPTYPE_APP_TAL:
task->requested_policy.t_tal_enabled = 1;
case TASK_APPTYPE_APP_DEFAULT:
case TASK_APPTYPE_DAEMON_INTERACTIVE:
task->requested_policy.t_apptype = apptype;
task_importance_mark_donor(task, TRUE);
task_importance_mark_receiver(task, TRUE);
break;
case TASK_APPTYPE_DAEMON_STANDARD:
task->requested_policy.t_apptype = apptype;
task_importance_mark_donor(task, TRUE);
task_importance_mark_receiver(task, FALSE);
break;
case TASK_APPTYPE_DAEMON_ADAPTIVE:
task->requested_policy.t_apptype = apptype;
task_importance_mark_donor(task, FALSE);
task_importance_mark_receiver(task, TRUE);
break;
case TASK_APPTYPE_DAEMON_BACKGROUND:
task->requested_policy.t_apptype = apptype;
task_importance_mark_donor(task, FALSE);
task_importance_mark_receiver(task, FALSE);
break;
default:
panic("invalid apptype %d", apptype);
break;
}
task_policy_update_locked(task, THREAD_NULL);
task_unlock(task);
task_policy_update_complete_unlocked(task, THREAD_NULL);
KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
(IMPORTANCE_CODE(IMP_TASK_APPTYPE, apptype)) | DBG_FUNC_END,
proc_selfpid(), audit_token_pid_from_task(task), trequested(task, THREAD_NULL),
task->imp_receiver, 0);
}
boolean_t
proc_task_is_tal(task_t task)
{
return (task->requested_policy.t_apptype == TASK_APPTYPE_APP_TAL) ? TRUE : FALSE;
}
integer_t
task_grab_latency_qos(task_t task)
{
return task_qos_latency_package(proc_get_effective_task_policy(task, TASK_POLICY_LATENCY_QOS));
}
int
proc_get_darwinbgstate(task_t task, uint32_t * flagsp)
{
if (task->requested_policy.ext_darwinbg)
*flagsp |= PROC_FLAG_EXT_DARWINBG;
if (task->requested_policy.int_darwinbg)
*flagsp |= PROC_FLAG_DARWINBG;
if (task->requested_policy.t_apptype == TASK_APPTYPE_DAEMON_ADAPTIVE)
*flagsp |= PROC_FLAG_ADAPTIVE;
if (task->requested_policy.t_apptype == TASK_APPTYPE_DAEMON_ADAPTIVE && task->requested_policy.t_boosted == 1)
*flagsp |= PROC_FLAG_ADAPTIVE_IMPORTANT;
if (task->imp_donor)
*flagsp |= PROC_FLAG_IMPORTANCE_DONOR;
if (task->effective_policy.t_sup_active)
*flagsp |= PROC_FLAG_SUPPRESSED;
return(0);
}
void
proc_get_thread_policy(thread_t thread, thread_policy_state_t info)
{
task_t task = thread->task;
task_lock(task);
info->requested = (integer_t)task_requested_bitfield(task, thread);
info->effective = (integer_t)task_effective_bitfield(task, thread);
info->pending = (integer_t)task_pending_bitfield(task, thread);
task_unlock(task);
}
static uintptr_t
trequested(task_t task, thread_t thread)
{
return (uintptr_t) task_requested_bitfield(task, thread);
}
static uintptr_t
teffective(task_t task, thread_t thread)
{
return (uintptr_t) task_effective_bitfield(task, thread);
}
static uintptr_t
tpending(task_t task, thread_t thread)
{
return (uintptr_t) task_pending_bitfield(task, thread);
}
uint64_t
task_requested_bitfield(task_t task, thread_t thread)
{
uint64_t bits = 0;
struct task_requested_policy requested =
(thread == THREAD_NULL) ? task->requested_policy : thread->requested_policy;
bits |= (requested.int_darwinbg ? POLICY_REQ_INT_DARWIN_BG : 0);
bits |= (requested.ext_darwinbg ? POLICY_REQ_EXT_DARWIN_BG : 0);
bits |= (requested.int_iotier ? (((uint64_t)requested.int_iotier) << POLICY_REQ_INT_IO_TIER_SHIFT) : 0);
bits |= (requested.ext_iotier ? (((uint64_t)requested.ext_iotier) << POLICY_REQ_EXT_IO_TIER_SHIFT) : 0);
bits |= (requested.int_iopassive ? POLICY_REQ_INT_PASSIVE_IO : 0);
bits |= (requested.ext_iopassive ? POLICY_REQ_EXT_PASSIVE_IO : 0);
bits |= (requested.bg_iotier ? (((uint64_t)requested.bg_iotier) << POLICY_REQ_BG_IOTIER_SHIFT) : 0);
bits |= (requested.terminated ? POLICY_REQ_TERMINATED : 0);
bits |= (requested.th_pidbind_bg ? POLICY_REQ_PIDBIND_BG : 0);
bits |= (requested.th_workq_bg ? POLICY_REQ_WORKQ_BG : 0);
bits |= (requested.t_boosted ? POLICY_REQ_BOOSTED : 0);
bits |= (requested.t_tal_enabled ? POLICY_REQ_TAL_ENABLED : 0);
bits |= (requested.t_int_gpu_deny ? POLICY_REQ_INT_GPU_DENY : 0);
bits |= (requested.t_ext_gpu_deny ? POLICY_REQ_EXT_GPU_DENY : 0);
bits |= (requested.t_apptype ? (((uint64_t)requested.t_apptype) << POLICY_REQ_APPTYPE_SHIFT) : 0);
bits |= (requested.t_role ? (((uint64_t)requested.t_role) << POLICY_REQ_ROLE_SHIFT) : 0);
bits |= (requested.t_sup_active ? POLICY_REQ_SUP_ACTIVE : 0);
bits |= (requested.t_sup_lowpri_cpu ? POLICY_REQ_SUP_LOWPRI_CPU : 0);
bits |= (requested.t_sup_cpu ? POLICY_REQ_SUP_CPU : 0);
bits |= (requested.t_sup_timer ? (((uint64_t)requested.t_sup_timer) << POLICY_REQ_SUP_TIMER_THROTTLE_SHIFT) : 0);
bits |= (requested.t_sup_throughput ? (((uint64_t)requested.t_sup_throughput) << POLICY_REQ_SUP_THROUGHPUT_SHIFT) : 0);
bits |= (requested.t_sup_disk ? POLICY_REQ_SUP_DISK_THROTTLE : 0);
bits |= (requested.t_sup_cpu_limit ? POLICY_REQ_SUP_CPU_LIMIT : 0);
bits |= (requested.t_sup_suspend ? POLICY_REQ_SUP_SUSPEND : 0);
bits |= (requested.t_base_latency_qos ? (((uint64_t)requested.t_base_latency_qos) << POLICY_REQ_BASE_LATENCY_QOS_SHIFT) : 0);
bits |= (requested.t_over_latency_qos ? (((uint64_t)requested.t_over_latency_qos) << POLICY_REQ_OVER_LATENCY_QOS_SHIFT) : 0);
bits |= (requested.t_base_through_qos ? (((uint64_t)requested.t_base_through_qos) << POLICY_REQ_BASE_THROUGH_QOS_SHIFT) : 0);
bits |= (requested.t_over_through_qos ? (((uint64_t)requested.t_over_through_qos) << POLICY_REQ_OVER_THROUGH_QOS_SHIFT) : 0);
return bits;
}
uint64_t
task_effective_bitfield(task_t task, thread_t thread)
{
uint64_t bits = 0;
struct task_effective_policy effective =
(thread == THREAD_NULL) ? task->effective_policy : thread->effective_policy;
bits |= (effective.io_tier ? (((uint64_t)effective.io_tier) << POLICY_EFF_IO_TIER_SHIFT) : 0);
bits |= (effective.io_passive ? POLICY_EFF_IO_PASSIVE : 0);
bits |= (effective.darwinbg ? POLICY_EFF_DARWIN_BG : 0);
bits |= (effective.lowpri_cpu ? POLICY_EFF_LOWPRI_CPU : 0);
bits |= (effective.terminated ? POLICY_EFF_TERMINATED : 0);
bits |= (effective.all_sockets_bg ? POLICY_EFF_ALL_SOCKETS_BG : 0);
bits |= (effective.new_sockets_bg ? POLICY_EFF_NEW_SOCKETS_BG : 0);
bits |= (effective.bg_iotier ? (((uint64_t)effective.bg_iotier) << POLICY_EFF_BG_IOTIER_SHIFT) : 0);
bits |= (effective.t_gpu_deny ? POLICY_EFF_GPU_DENY : 0);
bits |= (effective.t_tal_engaged ? POLICY_EFF_TAL_ENGAGED : 0);
bits |= (effective.t_suspended ? POLICY_EFF_SUSPENDED : 0);
bits |= (effective.t_watchers_bg ? POLICY_EFF_WATCHERS_BG : 0);
bits |= (effective.t_sup_active ? POLICY_EFF_SUP_ACTIVE : 0);
bits |= (effective.t_suppressed_cpu ? POLICY_EFF_SUP_CPU : 0);
bits |= (effective.t_role ? (((uint64_t)effective.t_role) << POLICY_EFF_ROLE_SHIFT) : 0);
bits |= (effective.t_latency_qos ? (((uint64_t)effective.t_latency_qos) << POLICY_EFF_LATENCY_QOS_SHIFT) : 0);
bits |= (effective.t_through_qos ? (((uint64_t)effective.t_through_qos) << POLICY_EFF_THROUGH_QOS_SHIFT) : 0);
return bits;
}
uint64_t
task_pending_bitfield(task_t task, thread_t thread)
{
uint64_t bits = 0;
struct task_pended_policy pended =
(thread == THREAD_NULL) ? task->pended_policy : thread->pended_policy;
bits |= (pended.t_updating_policy ? POLICY_PEND_UPDATING : 0);
bits |= (pended.update_sockets ? POLICY_PEND_SOCKETS : 0);
bits |= (pended.t_update_timers ? POLICY_PEND_TIMERS : 0);
bits |= (pended.t_update_watchers ? POLICY_PEND_WATCHERS : 0);
return bits;
}
int
proc_get_task_ruse_cpu(task_t task, uint32_t *policyp, uint8_t *percentagep, uint64_t *intervalp, uint64_t *deadlinep)
{
int error = 0;
int scope;
task_lock(task);
error = task_get_cpuusage(task, percentagep, intervalp, deadlinep, &scope);
task_unlock(task);
if (scope == TASK_RUSECPU_FLAGS_PERTHR_LIMIT) {
*policyp = TASK_POLICY_RESOURCE_ATTRIBUTE_NOTIFY_EXC;
} else if (scope == TASK_RUSECPU_FLAGS_PROC_LIMIT) {
*policyp = TASK_POLICY_RESOURCE_ATTRIBUTE_THROTTLE;
} else if (scope == TASK_RUSECPU_FLAGS_DEADLINE) {
*policyp = TASK_POLICY_RESOURCE_ATTRIBUTE_NONE;
}
return(error);
}
void
proc_init_cpumon_params(void)
{
if (!PE_parse_boot_argn("max_cpumon_percentage", &proc_max_cpumon_percentage,
sizeof (proc_max_cpumon_percentage))) {
proc_max_cpumon_percentage = DEFAULT_CPUMON_PERCENTAGE;
}
if (proc_max_cpumon_percentage > 100) {
proc_max_cpumon_percentage = 100;
}
if (!PE_parse_boot_argn("max_cpumon_interval", &proc_max_cpumon_interval,
sizeof (proc_max_cpumon_interval))) {
proc_max_cpumon_interval = DEFAULT_CPUMON_INTERVAL;
}
proc_max_cpumon_interval *= NSEC_PER_SEC;
}
int
proc_set_task_ruse_cpu(task_t task, uint32_t policy, uint8_t percentage, uint64_t interval, uint64_t deadline,
int cpumon_entitled)
{
int error = 0;
int scope;
switch (policy) {
case TASK_POLICY_RESOURCE_ATTRIBUTE_NONE:
case TASK_POLICY_RESOURCE_ATTRIBUTE_THROTTLE:
if (deadline != 0)
return (ENOTSUP);
scope = TASK_RUSECPU_FLAGS_PROC_LIMIT;
break;
case TASK_POLICY_RESOURCE_ATTRIBUTE_SUSPEND:
case TASK_POLICY_RESOURCE_ATTRIBUTE_TERMINATE:
case TASK_POLICY_RESOURCE_ATTRIBUTE_NOTIFY_KQ:
if (percentage != 0)
return (ENOTSUP);
scope = TASK_RUSECPU_FLAGS_DEADLINE;
break;
case TASK_POLICY_RESOURCE_ATTRIBUTE_NOTIFY_EXC:
if (deadline != 0)
return (ENOTSUP);
scope = TASK_RUSECPU_FLAGS_PERTHR_LIMIT;
#ifdef CONFIG_NOMONITORS
return (error);
#endif
break;
default:
return (EINVAL);
}
task_lock(task);
if (task != current_task()) {
task->policy_ru_cpu_ext = policy;
} else {
task->policy_ru_cpu = policy;
}
error = task_set_cpuusage(task, percentage, interval, deadline, scope, cpumon_entitled);
task_unlock(task);
return(error);
}
int
proc_clear_task_ruse_cpu(task_t task, int cpumon_entitled)
{
int error = 0;
int action;
void * bsdinfo = NULL;
task_lock(task);
if (task != current_task()) {
task->policy_ru_cpu_ext = TASK_POLICY_RESOURCE_ATTRIBUTE_DEFAULT;
} else {
task->policy_ru_cpu = TASK_POLICY_RESOURCE_ATTRIBUTE_DEFAULT;
}
error = task_clear_cpuusage_locked(task, cpumon_entitled);
if (error != 0)
goto out;
action = task->applied_ru_cpu;
if (task->applied_ru_cpu_ext != TASK_POLICY_RESOURCE_ATTRIBUTE_NONE) {
task->applied_ru_cpu_ext = TASK_POLICY_RESOURCE_ATTRIBUTE_NONE;
}
if (action != TASK_POLICY_RESOURCE_ATTRIBUTE_NONE) {
bsdinfo = task->bsd_info;
task_unlock(task);
proc_restore_resource_actions(bsdinfo, TASK_POLICY_CPU_RESOURCE_USAGE, action);
goto out1;
}
out:
task_unlock(task);
out1:
return(error);
}
static int
task_apply_resource_actions(task_t task, int type)
{
int action = TASK_POLICY_RESOURCE_ATTRIBUTE_NONE;
void * bsdinfo = NULL;
switch (type) {
case TASK_POLICY_CPU_RESOURCE_USAGE:
break;
case TASK_POLICY_WIREDMEM_RESOURCE_USAGE:
case TASK_POLICY_VIRTUALMEM_RESOURCE_USAGE:
case TASK_POLICY_DISK_RESOURCE_USAGE:
case TASK_POLICY_NETWORK_RESOURCE_USAGE:
case TASK_POLICY_POWER_RESOURCE_USAGE:
return(0);
default:
return(1);
};
task_lock(task);
if (task->applied_ru_cpu_ext == TASK_POLICY_RESOURCE_ATTRIBUTE_NONE) {
task->applied_ru_cpu_ext = task->policy_ru_cpu_ext;
action = task->applied_ru_cpu_ext;
} else {
action = task->applied_ru_cpu_ext;
}
if (action != TASK_POLICY_RESOURCE_ATTRIBUTE_NONE) {
bsdinfo = task->bsd_info;
task_unlock(task);
proc_apply_resource_actions(bsdinfo, TASK_POLICY_CPU_RESOURCE_USAGE, action);
} else
task_unlock(task);
return(0);
}
int
task_get_cpuusage(task_t task, uint8_t *percentagep, uint64_t *intervalp, uint64_t *deadlinep, int *scope)
{
*percentagep = 0;
*intervalp = 0;
*deadlinep = 0;
if ((task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_PERTHR_LIMIT) != 0) {
*scope = TASK_RUSECPU_FLAGS_PERTHR_LIMIT;
*percentagep = task->rusage_cpu_perthr_percentage;
*intervalp = task->rusage_cpu_perthr_interval;
} else if ((task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_PROC_LIMIT) != 0) {
*scope = TASK_RUSECPU_FLAGS_PROC_LIMIT;
*percentagep = task->rusage_cpu_percentage;
*intervalp = task->rusage_cpu_interval;
} else if ((task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_DEADLINE) != 0) {
*scope = TASK_RUSECPU_FLAGS_DEADLINE;
*deadlinep = task->rusage_cpu_deadline;
} else {
*scope = 0;
}
return(0);
}
int
task_disable_cpumon(task_t task) {
thread_t thread;
task_lock_assert_owned(task);
if ((task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_PERTHR_LIMIT) == 0) {
return (KERN_INVALID_ARGUMENT);
}
#if CONFIG_TELEMETRY
telemetry_task_ctl_locked(current_task(), TF_CPUMON_WARNING, 0);
#endif
task->rusage_cpu_flags &= ~(TASK_RUSECPU_FLAGS_PERTHR_LIMIT | TASK_RUSECPU_FLAGS_FATAL_CPUMON);
queue_iterate(&task->threads, thread, thread_t, task_threads) {
set_astledger(thread);
}
task->rusage_cpu_perthr_percentage = 0;
task->rusage_cpu_perthr_interval = 0;
return (KERN_SUCCESS);
}
int
task_set_cpuusage(task_t task, uint8_t percentage, uint64_t interval, uint64_t deadline, int scope, int cpumon_entitled)
{
thread_t thread;
uint64_t abstime = 0;
uint64_t limittime = 0;
lck_mtx_assert(&task->lock, LCK_MTX_ASSERT_OWNED);
if (interval == 0)
interval = NSEC_PER_SEC;
if (percentage != 0) {
if (scope == TASK_RUSECPU_FLAGS_PERTHR_LIMIT) {
boolean_t warn = FALSE;
if (percentage == TASK_POLICY_CPUMON_DISABLE) {
if (cpumon_entitled) {
task_disable_cpumon(task);
return (0);
}
warn = TRUE;
percentage = TASK_POLICY_CPUMON_DEFAULTS;
}
if (percentage == TASK_POLICY_CPUMON_DEFAULTS) {
percentage = proc_max_cpumon_percentage;
interval = proc_max_cpumon_interval;
}
if (percentage > 100) {
percentage = 100;
}
if (interval == -1ULL) {
if (task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_PERTHR_LIMIT) {
interval = task->rusage_cpu_perthr_interval;
} else {
interval = proc_max_cpumon_interval;
}
}
if ((percentage > proc_max_cpumon_percentage) && (cpumon_entitled == 0)) {
warn = TRUE;
percentage = proc_max_cpumon_percentage;
}
if ((interval > proc_max_cpumon_interval) && (cpumon_entitled == 0)) {
warn = TRUE;
interval = proc_max_cpumon_interval;
}
if (warn) {
int pid = 0;
char *procname = (char *)"unknown";
#ifdef MACH_BSD
pid = proc_selfpid();
if (current_task()->bsd_info != NULL) {
procname = proc_name_address(current_task()->bsd_info);
}
#endif
printf("process %s[%d] denied attempt to escape CPU monitor"
" (missing required entitlement).\n", procname, pid);
}
task->rusage_cpu_flags |= TASK_RUSECPU_FLAGS_PERTHR_LIMIT;
task->rusage_cpu_perthr_percentage = percentage;
task->rusage_cpu_perthr_interval = interval;
queue_iterate(&task->threads, thread, thread_t, task_threads) {
set_astledger(thread);
}
} else if (scope == TASK_RUSECPU_FLAGS_PROC_LIMIT) {
task->rusage_cpu_flags |= TASK_RUSECPU_FLAGS_PROC_LIMIT;
task->rusage_cpu_percentage = percentage;
task->rusage_cpu_interval = interval;
limittime = (interval * percentage) / 100;
nanoseconds_to_absolutetime(limittime, &abstime);
ledger_set_limit(task->ledger, task_ledgers.cpu_time, abstime, 0);
ledger_set_period(task->ledger, task_ledgers.cpu_time, interval);
ledger_set_action(task->ledger, task_ledgers.cpu_time, LEDGER_ACTION_BLOCK);
}
}
if (deadline != 0) {
assert(scope == TASK_RUSECPU_FLAGS_DEADLINE);
if (task->rusage_cpu_callt != NULL) {
task_unlock(task);
thread_call_cancel_wait(task->rusage_cpu_callt);
task_lock(task);
}
if (task->rusage_cpu_callt == NULL) {
task->rusage_cpu_callt = thread_call_allocate_with_priority(task_action_cpuusage, (thread_call_param_t)task, THREAD_CALL_PRIORITY_KERNEL);
}
if (task->rusage_cpu_callt != 0) {
uint64_t save_abstime = 0;
task->rusage_cpu_flags |= TASK_RUSECPU_FLAGS_DEADLINE;
task->rusage_cpu_deadline = deadline;
nanoseconds_to_absolutetime(deadline, &abstime);
save_abstime = abstime;
clock_absolutetime_interval_to_deadline(save_abstime, &abstime);
thread_call_enter_delayed(task->rusage_cpu_callt, abstime);
}
}
return(0);
}
int
task_clear_cpuusage(task_t task, int cpumon_entitled)
{
int retval = 0;
task_lock(task);
retval = task_clear_cpuusage_locked(task, cpumon_entitled);
task_unlock(task);
return(retval);
}
int
task_clear_cpuusage_locked(task_t task, int cpumon_entitled)
{
thread_call_t savecallt;
if (task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_PROC_LIMIT) {
task->rusage_cpu_flags &= ~TASK_RUSECPU_FLAGS_PROC_LIMIT;
ledger_set_limit(task->ledger, task_ledgers.cpu_time, LEDGER_LIMIT_INFINITY, 0);
task->rusage_cpu_percentage = 0;
task->rusage_cpu_interval = 0;
}
if (cpumon_entitled) {
task_disable_cpumon(task);
}
if (task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_DEADLINE) {
task->rusage_cpu_flags &= ~TASK_RUSECPU_FLAGS_DEADLINE;
if (task->rusage_cpu_callt != 0) {
savecallt = task->rusage_cpu_callt;
task->rusage_cpu_callt = NULL;
task->rusage_cpu_deadline = 0;
task_unlock(task);
thread_call_cancel_wait(savecallt);
thread_call_free(savecallt);
task_lock(task);
}
}
return(0);
}
void
task_action_cpuusage(thread_call_param_t param0, __unused thread_call_param_t param1)
{
task_t task = (task_t)param0;
(void)task_apply_resource_actions(task, TASK_POLICY_CPU_RESOURCE_USAGE);
return;
}
void
task_importance_mark_donor(task_t task, boolean_t donating)
{
#if IMPORTANCE_INHERITANCE
task->imp_donor = (donating ? 1 : 0);
#endif
}
void
task_importance_mark_receiver(task_t task, boolean_t receiving)
{
#if IMPORTANCE_INHERITANCE
if (receiving) {
assert(task->task_imp_assertcnt == 0);
task->imp_receiver = 1;
task->task_imp_assertcnt = 0;
task->task_imp_externcnt = 0;
} else {
if (task->task_imp_assertcnt != 0 || task->task_imp_externcnt != 0)
panic("disabling imp_receiver on task with pending boosts!");
task->imp_receiver = 0;
task->task_imp_assertcnt = 0;
task->task_imp_externcnt = 0;
}
#endif
}
#if IMPORTANCE_INHERITANCE
static void
task_update_boost_locked(task_t task, boolean_t boost_active)
{
#if IMPORTANCE_DEBUG
KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_BOOST, (boost_active ? IMP_BOOSTED : IMP_UNBOOSTED)) | DBG_FUNC_START),
proc_selfpid(), audit_token_pid_from_task(task), trequested(task, THREAD_NULL), 0, 0);
#endif
proc_set_task_policy_locked(task, THREAD_NULL, TASK_POLICY_ATTRIBUTE, TASK_POLICY_BOOST, boost_active);
task_policy_update_locked(task, THREAD_NULL);
#if IMPORTANCE_DEBUG
if (boost_active == TRUE){
DTRACE_BOOST2(boost, task_t, task, int, audit_token_pid_from_task(task));
} else {
DTRACE_BOOST2(unboost, task_t, task, int, audit_token_pid_from_task(task));
}
KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_BOOST, (boost_active ? IMP_BOOSTED : IMP_UNBOOSTED)) | DBG_FUNC_END),
proc_selfpid(), audit_token_pid_from_task(task),
trequested(task, THREAD_NULL), tpending(task, THREAD_NULL), 0);
#endif
}
boolean_t
task_is_importance_donor(task_t task)
{
return (task->imp_donor == 1 || task->task_imp_assertcnt > 0) ? TRUE : FALSE;
}
boolean_t
task_is_importance_receiver(task_t task)
{
return (task->imp_receiver) ? TRUE : FALSE;
}
int
task_importance_hold_internal_assertion(task_t target_task, uint32_t count)
{
int rval = 0;
task_lock(target_task);
rval = task_importance_hold_assertion_locked(target_task, TASK_POLICY_INTERNAL, count);
task_unlock(target_task);
task_policy_update_complete_unlocked(target_task, THREAD_NULL);
return(rval);
}
int
task_importance_hold_external_assertion(task_t target_task, uint32_t count)
{
int rval = 0;
task_lock(target_task);
rval = task_importance_hold_assertion_locked(target_task, TASK_POLICY_EXTERNAL, count);
task_unlock(target_task);
task_policy_update_complete_unlocked(target_task, THREAD_NULL);
return(rval);
}
int
task_importance_drop_internal_assertion(task_t target_task, uint32_t count)
{
int rval = 0;
task_lock(target_task);
rval = task_importance_drop_assertion_locked(target_task, TASK_POLICY_INTERNAL, count);
task_unlock(target_task);
task_policy_update_complete_unlocked(target_task, THREAD_NULL);
return(rval);
}
int
task_importance_drop_external_assertion(task_t target_task, uint32_t count)
{
int rval = 0;
task_lock(target_task);
rval = task_importance_drop_assertion_locked(target_task, TASK_POLICY_EXTERNAL, count);
task_unlock(target_task);
task_policy_update_complete_unlocked(target_task, THREAD_NULL);
return(rval);
}
static int
task_importance_hold_assertion_locked(task_t target_task, int external, uint32_t count)
{
boolean_t apply_boost = FALSE;
int ret = 0;
assert(target_task->imp_receiver != 0);
#if IMPORTANCE_DEBUG
KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, (IMP_HOLD | external))) | DBG_FUNC_START,
proc_selfpid(), audit_token_pid_from_task(target_task), target_task->task_imp_assertcnt, target_task->task_imp_externcnt, 0);
#endif
if (external == TASK_POLICY_EXTERNAL) {
if (target_task->task_imp_externcnt == 0) {
printf("BUG in process %s[%d]: it attempted to acquire a new boost assertion without holding an existing external assertion. "
"(%d total, %d external)\n",
proc_name_address(target_task->bsd_info), audit_token_pid_from_task(target_task),
target_task->task_imp_assertcnt, target_task->task_imp_externcnt);
ret = EOVERFLOW;
count = 0;
} else {
target_task->task_imp_assertcnt += count;
target_task->task_imp_externcnt += count;
}
} else {
if (target_task->task_imp_assertcnt == 0)
apply_boost = TRUE;
target_task->task_imp_assertcnt += count;
}
if (apply_boost == TRUE)
task_update_boost_locked(target_task, TRUE);
#if IMPORTANCE_DEBUG
KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, (IMP_HOLD | external))) | DBG_FUNC_END,
proc_selfpid(), audit_token_pid_from_task(target_task), target_task->task_imp_assertcnt, target_task->task_imp_externcnt, 0);
DTRACE_BOOST6(receive_internal_boost, task_t, target_task, int, audit_token_pid_from_task(target_task), task_t, current_task(), int, proc_selfpid(), int, count, int, target_task->task_imp_assertcnt);
if (external == TASK_POLICY_EXTERNAL){
DTRACE_BOOST5(receive_boost, task_t, target_task, int, audit_token_pid_from_task(target_task), int, proc_selfpid(), int, count, int, target_task->task_imp_externcnt);
}
#endif
return(ret);
}
static int
task_importance_drop_assertion_locked(task_t target_task, int external, uint32_t count)
{
int ret = 0;
assert(target_task->imp_receiver != 0);
#if IMPORTANCE_DEBUG
KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, (IMP_DROP | external))) | DBG_FUNC_START,
proc_selfpid(), audit_token_pid_from_task(target_task), target_task->task_imp_assertcnt, target_task->task_imp_externcnt, 0);
#endif
if (external == TASK_POLICY_EXTERNAL) {
assert(count == 1);
if (count <= target_task->task_imp_externcnt) {
target_task->task_imp_externcnt -= count;
if (count <= target_task->task_imp_assertcnt)
target_task->task_imp_assertcnt -= count;
} else {
printf("BUG in process %s[%d]: over-released external boost assertions (%d total, %d external)\n",
proc_name_address(target_task->bsd_info), audit_token_pid_from_task(target_task),
target_task->task_imp_assertcnt, target_task->task_imp_externcnt);
ret = EOVERFLOW;
count = 0;
}
} else {
if (count <= target_task->task_imp_assertcnt) {
target_task->task_imp_assertcnt -= count;
} else {
printf("Over-release of kernel-internal importance assertions for task %p (%s), dropping %d assertion(s) but task only has %d remaining (%d external).\n",
target_task,
(target_task->bsd_info == NULL) ? "" : proc_name_address(target_task->bsd_info),
count,
target_task->task_imp_assertcnt,
target_task->task_imp_externcnt);
count = 0;
}
}
if (target_task->task_imp_assertcnt == 0 && ret == 0)
task_update_boost_locked(target_task, FALSE);
#if IMPORTANCE_DEBUG
KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, (IMP_DROP | external))) | DBG_FUNC_END,
proc_selfpid(), audit_token_pid_from_task(target_task), target_task->task_imp_assertcnt, target_task->task_imp_externcnt, 0);
if (external == TASK_POLICY_EXTERNAL) {
DTRACE_BOOST4(drop_boost, task_t, target_task, int, audit_token_pid_from_task(target_task), int, count, int, target_task->task_imp_externcnt);
}
DTRACE_BOOST4(drop_internal_boost, task_t, target_task, int, audit_token_pid_from_task(target_task), int, count, int, target_task->task_imp_assertcnt);
#endif
return(ret);
}
int
task_importance_externalize_assertion(task_t target_task, uint32_t count, __unused int sender_pid)
{
assert(target_task != TASK_NULL);
assert(target_task->imp_receiver != 0);
task_lock(target_task);
#if IMPORTANCE_DEBUG
KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, IMP_EXTERN)) | DBG_FUNC_START,
proc_selfpid(), audit_token_pid_from_task(target_task), target_task->task_imp_assertcnt, target_task->task_imp_externcnt, 0);
#endif
target_task->task_imp_externcnt += count;
#if IMPORTANCE_DEBUG
KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, IMP_EXTERN)) | DBG_FUNC_END,
proc_selfpid(), audit_token_pid_from_task(target_task), target_task->task_imp_assertcnt, target_task->task_imp_externcnt, 0);
DTRACE_BOOST5(receive_boost, task_t, target_task, int, audit_token_pid_from_task(target_task),
int, sender_pid, int, count, int, target_task->task_imp_externcnt);
#endif
task_unlock(target_task);
return(0);
}
#endif
void
task_hold_multiple_assertion(__imp_only task_t task, __imp_only uint32_t count)
{
#if IMPORTANCE_INHERITANCE
assert(task->imp_receiver != 0);
task_importance_hold_internal_assertion(task, count);
#endif
}
void
task_add_importance_watchport(__imp_only task_t task, __imp_only __impdebug_only int pid, __imp_only mach_port_t port, int *boostp)
{
int boost = 0;
__impdebug_only int released_pid = 0;
#if IMPORTANCE_INHERITANCE
task_t release_imp_task = TASK_NULL;
if (task->imp_receiver == 0) {
*boostp = boost;
return;
}
if (IP_VALID(port) != 0) {
ip_lock(port);
if (port->ip_tempowner != 0) {
assert(port->ip_impdonation != 0);
boost = port->ip_impcount;
if (port->ip_taskptr != 0) {
release_imp_task = port->ip_imp_task;
}
port->ip_taskptr = 1;
port->ip_imp_task = task;
task_reference(task);
}
ip_unlock(port);
if (release_imp_task != TASK_NULL) {
if (boost > 0)
task_importance_drop_internal_assertion(release_imp_task, boost);
released_pid = audit_token_pid_from_task(release_imp_task);
task_deallocate(release_imp_task);
}
#if IMPORTANCE_DEBUG
KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_WATCHPORT, 0)) | DBG_FUNC_NONE,
proc_selfpid(), pid, boost, released_pid, 0);
#endif
}
#endif
*boostp = boost;
return;
}
#define TASK_IMPORTANCE_FOREGROUND 4
#define TASK_IMPORTANCE_NOTDARWINBG 1
boolean_t
task_has_been_notified(task_t task, int pressurelevel)
{
if (task == NULL) {
return FALSE;
}
if (pressurelevel == kVMPressureWarning)
return (task->low_mem_notified_warn ? TRUE : FALSE);
else if (pressurelevel == kVMPressureCritical)
return (task->low_mem_notified_critical ? TRUE : FALSE);
else
return TRUE;
}
boolean_t
task_used_for_purging(task_t task, int pressurelevel)
{
if (task == NULL) {
return FALSE;
}
if (pressurelevel == kVMPressureWarning)
return (task->purged_memory_warn ? TRUE : FALSE);
else if (pressurelevel == kVMPressureCritical)
return (task->purged_memory_critical ? TRUE : FALSE);
else
return TRUE;
}
void
task_mark_has_been_notified(task_t task, int pressurelevel)
{
if (task == NULL) {
return;
}
if (pressurelevel == kVMPressureWarning)
task->low_mem_notified_warn = 1;
else if (pressurelevel == kVMPressureCritical)
task->low_mem_notified_critical = 1;
}
void
task_mark_used_for_purging(task_t task, int pressurelevel)
{
if (task == NULL) {
return;
}
if (pressurelevel == kVMPressureWarning)
task->purged_memory_warn = 1;
else if (pressurelevel == kVMPressureCritical)
task->purged_memory_critical = 1;
}
void
task_clear_has_been_notified(task_t task, int pressurelevel)
{
if (task == NULL) {
return;
}
if (pressurelevel == kVMPressureWarning)
task->low_mem_notified_warn = 0;
else if (pressurelevel == kVMPressureCritical)
task->low_mem_notified_critical = 0;
}
void
task_clear_used_for_purging(task_t task)
{
if (task == NULL) {
return;
}
task->purged_memory_warn = 0;
task->purged_memory_critical = 0;
}
int
task_importance_estimate(task_t task)
{
int task_importance = 0;
if (task == NULL) {
return 0;
}
if (proc_get_effective_task_policy(task, TASK_POLICY_ROLE) == TASK_FOREGROUND_APPLICATION)
task_importance += TASK_IMPORTANCE_FOREGROUND;
if (proc_get_effective_task_policy(task, TASK_POLICY_DARWIN_BG) == 0)
task_importance += TASK_IMPORTANCE_NOTDARWINBG;
return task_importance;
}