#ifndef _KERN_SCHED_H_
#define _KERN_SCHED_H_
#include <mach/policy.h>
#include <kern/kern_types.h>
#include <kern/queue.h>
#include <kern/lock.h>
#include <kern/macro_help.h>
#include <kern/timer_call.h>
#include <kern/ast.h>
#define NRQS 128
#define NRQBM (NRQS / 32)
#define MAXPRI (NRQS-1)
#define MINPRI IDLEPRI
#define IDLEPRI 0
#define BASEPRI_RTQUEUES (BASEPRI_REALTIME + 1)
#define BASEPRI_REALTIME (MAXPRI - (NRQS / 4) + 1)
#define MAXPRI_KERNEL (BASEPRI_REALTIME - 1)
#define BASEPRI_PREEMPT (MAXPRI_KERNEL - 2)
#define BASEPRI_KERNEL (MINPRI_KERNEL + 1)
#define MINPRI_KERNEL (MAXPRI_KERNEL - (NRQS / 8) + 1)
#define MAXPRI_RESERVED (MINPRI_KERNEL - 1)
#define BASEPRI_GRAPHICS (MAXPRI_RESERVED - 3)
#define MINPRI_RESERVED (MAXPRI_RESERVED - (NRQS / 8) + 1)
#define MAXPRI_USER (MINPRI_RESERVED - 1)
#define BASEPRI_CONTROL (BASEPRI_DEFAULT + 17)
#define BASEPRI_FOREGROUND (BASEPRI_DEFAULT + 16)
#define BASEPRI_BACKGROUND (BASEPRI_DEFAULT + 15)
#define BASEPRI_DEFAULT (MAXPRI_USER - (NRQS / 4))
#define MAXPRI_SUPPRESSED (BASEPRI_DEFAULT - 3)
#define MAXPRI_THROTTLE (MINPRI + 4)
#define MINPRI_USER MINPRI
#define DEPRESSPRI MINPRI
typedef enum {
TH_MODE_NONE = 0,
TH_MODE_REALTIME,
TH_MODE_FIXED,
TH_MODE_TIMESHARE,
TH_MODE_FAIRSHARE
} sched_mode_t;
#define invalid_pri(pri) ((pri) < MINPRI || (pri) > MAXPRI)
struct runq_stats {
uint64_t count_sum;
uint64_t last_change_timestamp;
};
#if defined(CONFIG_SCHED_TRADITIONAL) || defined(CONFIG_SCHED_PROTO) || defined(CONFIG_SCHED_FIXEDPRIORITY)
struct run_queue {
int highq;
int bitmap[NRQBM];
int count;
int urgency;
queue_head_t queues[NRQS];
struct runq_stats runq_stats;
};
#endif
struct rt_queue {
int count;
queue_head_t queue;
struct runq_stats runq_stats;
};
#if defined(CONFIG_SCHED_TRADITIONAL) || defined(CONFIG_SCHED_PROTO) || defined(CONFIG_SCHED_FIXEDPRIORITY)
struct fairshare_queue {
int count;
queue_head_t queue;
struct runq_stats runq_stats;
};
#endif
#if defined(CONFIG_SCHED_GRRR_CORE)
typedef uint8_t grrr_proportional_priority_t;
typedef uint8_t grrr_group_index_t;
#define NUM_GRRR_PROPORTIONAL_PRIORITIES 256
#define MAX_GRRR_PROPORTIONAL_PRIORITY ((grrr_proportional_priority_t)255)
#if 0
#define NUM_GRRR_GROUPS 8
#endif
#define NUM_GRRR_GROUPS 64
struct grrr_group {
queue_chain_t priority_order;
grrr_proportional_priority_t minpriority;
grrr_group_index_t index;
queue_head_t clients;
int count;
uint32_t weight;
#if 0
uint32_t deferred_removal_weight;
#endif
uint32_t work;
thread_t current_client;
};
struct grrr_run_queue {
int count;
uint32_t last_rescale_tick;
struct grrr_group groups[NUM_GRRR_GROUPS];
queue_head_t sorted_group_list;
uint32_t weight;
grrr_group_t current_group;
struct runq_stats runq_stats;
};
#endif
#define first_timeslice(processor) ((processor)->timeslice > 0)
extern struct rt_queue rt_runq;
extern void thread_quantum_expire(
timer_call_param_t processor,
timer_call_param_t thread);
extern ast_t csw_check(processor_t processor);
#if defined(CONFIG_SCHED_TRADITIONAL)
extern uint32_t std_quantum, min_std_quantum;
extern uint32_t std_quantum_us;
#endif
extern uint32_t thread_depress_time;
extern uint32_t default_timeshare_computation;
extern uint32_t default_timeshare_constraint;
extern uint32_t max_rt_quantum, min_rt_quantum;
extern int default_preemption_rate;
extern int default_bg_preemption_rate;
#if defined(CONFIG_SCHED_TRADITIONAL)
#define SCHED_TICK_SHIFT 3
#define SCHED_TICK_MAX_DELTA (8)
extern unsigned sched_tick;
extern uint32_t sched_tick_interval;
#endif
extern uint64_t sched_one_second_interval;
extern void compute_averages(uint64_t);
extern void compute_averunnable(
void *nrun);
extern void compute_stack_target(
void *arg);
extern void compute_memory_pressure(
void *arg);
extern void compute_zone_gc_throttle(
void *arg);
extern void compute_pageout_gc_throttle(
void *arg);
extern void compute_pmap_gc_throttle(
void *arg);
#if defined(CONFIG_SCHED_TRADITIONAL)
extern uint32_t sched_pri_shift;
extern uint32_t sched_background_pri_shift;
extern uint32_t sched_combined_fgbg_pri_shift;
extern uint32_t sched_fixed_shift;
extern int8_t sched_load_shifts[NRQS];
extern uint32_t sched_decay_usage_age_factor;
extern uint32_t sched_use_combined_fgbg_decay;
void sched_traditional_consider_maintenance(uint64_t);
#endif
extern int32_t sched_poll_yield_shift;
extern uint64_t sched_safe_duration;
extern uint32_t sched_run_count, sched_share_count, sched_background_count;
extern uint32_t sched_load_average, sched_mach_factor;
extern uint32_t avenrun[3], mach_factor[3];
extern uint64_t max_unsafe_computation;
extern uint64_t max_poll_computation;
#define sched_run_incr() \
MACRO_BEGIN \
hw_atomic_add(&sched_run_count, 1); \
MACRO_END
#define sched_run_decr() \
MACRO_BEGIN \
hw_atomic_sub(&sched_run_count, 1); \
MACRO_END
#define sched_share_incr() \
MACRO_BEGIN \
(void)hw_atomic_add(&sched_share_count, 1); \
MACRO_END
#define sched_share_decr() \
MACRO_BEGIN \
(void)hw_atomic_sub(&sched_share_count, 1); \
MACRO_END
#define sched_background_incr() \
MACRO_BEGIN \
(void)hw_atomic_add(&sched_background_count, 1); \
MACRO_END
#define sched_background_decr() \
MACRO_BEGIN \
(void)hw_atomic_sub(&sched_background_count, 1); \
MACRO_END
#define thread_timer_delta(thread, delta) \
MACRO_BEGIN \
(delta) = (typeof(delta))timer_delta(&(thread)->system_timer, \
&(thread)->system_timer_save); \
(delta) += (typeof(delta))timer_delta(&(thread)->user_timer, \
&(thread)->user_timer_save); \
MACRO_END
#endif