#include <stdint.h>
#include <machine/atomic.h>
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/filedesc.h>
#include <sys/kernel.h>
#include <sys/proc_internal.h>
#include <sys/kauth.h>
#include <sys/malloc.h>
#include <sys/unistd.h>
#include <sys/file_internal.h>
#include <sys/fcntl.h>
#include <sys/select.h>
#include <sys/queue.h>
#include <sys/event.h>
#include <sys/eventvar.h>
#include <sys/protosw.h>
#include <sys/socket.h>
#include <sys/socketvar.h>
#include <sys/stat.h>
#include <sys/sysctl.h>
#include <sys/uio.h>
#include <sys/sysproto.h>
#include <sys/user.h>
#include <sys/vnode_internal.h>
#include <string.h>
#include <sys/proc_info.h>
#include <sys/codesign.h>
#include <sys/pthread_shims.h>
#include <sys/kdebug.h>
#include <sys/reason.h>
#include <os/reason_private.h>
#include <pexpert/pexpert.h>
#include <kern/locks.h>
#include <kern/clock.h>
#include <kern/cpu_data.h>
#include <kern/policy_internal.h>
#include <kern/thread_call.h>
#include <kern/sched_prim.h>
#include <kern/waitq.h>
#include <kern/zalloc.h>
#include <kern/kalloc.h>
#include <kern/assert.h>
#include <kern/ast.h>
#include <kern/thread.h>
#include <kern/kcdata.h>
#include <pthread/priority_private.h>
#include <pthread/workqueue_syscalls.h>
#include <pthread/workqueue_internal.h>
#include <libkern/libkern.h>
#include <libkern/OSAtomic.h>
#include "net/net_str_id.h"
#include <mach/task.h>
#include <libkern/section_keywords.h>
#if CONFIG_MEMORYSTATUS
#include <sys/kern_memorystatus.h>
#endif
extern thread_t port_name_to_thread(mach_port_name_t port_name);
extern mach_port_name_t ipc_entry_name_mask(mach_port_name_t name);
#define KEV_EVTID(code) BSDDBG_CODE(DBG_BSD_KEVENT, (code))
MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system");
#define KQ_EVENT NO_EVENT64
static int kqueue_read(struct fileproc *fp, struct uio *uio,
int flags, vfs_context_t ctx);
static int kqueue_write(struct fileproc *fp, struct uio *uio,
int flags, vfs_context_t ctx);
static int kqueue_ioctl(struct fileproc *fp, u_long com, caddr_t data,
vfs_context_t ctx);
static int kqueue_select(struct fileproc *fp, int which, void *wq_link_id,
vfs_context_t ctx);
static int kqueue_close(struct fileglob *fg, vfs_context_t ctx);
static int kqueue_kqfilter(struct fileproc *fp, struct knote *kn,
struct kevent_internal_s *kev, vfs_context_t ctx);
static int kqueue_drain(struct fileproc *fp, vfs_context_t ctx);
static const struct fileops kqueueops = {
.fo_type = DTYPE_KQUEUE,
.fo_read = kqueue_read,
.fo_write = kqueue_write,
.fo_ioctl = kqueue_ioctl,
.fo_select = kqueue_select,
.fo_close = kqueue_close,
.fo_kqfilter = kqueue_kqfilter,
.fo_drain = kqueue_drain,
};
static void kevent_put_kq(struct proc *p, kqueue_id_t id, struct fileproc *fp, struct kqueue *kq);
static int kevent_internal(struct proc *p,
kqueue_id_t id, kqueue_id_t *id_out,
user_addr_t changelist, int nchanges,
user_addr_t eventlist, int nevents,
user_addr_t data_out, uint64_t data_available,
unsigned int flags, user_addr_t utimeout,
kqueue_continue_t continuation,
int32_t *retval);
static int kevent_copyin(user_addr_t *addrp, struct kevent_internal_s *kevp,
struct proc *p, unsigned int flags);
static int kevent_copyout(struct kevent_internal_s *kevp, user_addr_t *addrp,
struct proc *p, unsigned int flags);
char * kevent_description(struct kevent_internal_s *kevp, char *s, size_t n);
static int kevent_register_wait_prepare(struct knote *kn, struct kevent_internal_s *kev);
static void kevent_register_wait_block(struct turnstile *ts, thread_t handoff_thread,
struct knote_lock_ctx *knlc, thread_continue_t cont,
struct _kevent_register *cont_args) __dead2;
static void kevent_register_wait_return(struct _kevent_register *cont_args) __dead2;
static void kevent_register_wait_cleanup(struct knote *kn);
static inline void kqueue_release_last(struct proc *p, kqueue_t kqu);
static void kqueue_interrupt(struct kqueue *kq);
static int kevent_callback(struct kqueue *kq, struct kevent_internal_s *kevp,
void *data);
static void kevent_continue(struct kqueue *kq, void *data, int error);
static void kqueue_scan_continue(void *contp, wait_result_t wait_result);
static int kqueue_process(struct kqueue *kq, kevent_callback_t callback, void *callback_data,
struct filt_process_s *process_data, int *countp);
static int kqueue_queue_empty(struct kqueue *kq, kq_index_t qos_index);
static struct kqtailq *kqueue_get_suppressed_queue(kqueue_t kq, struct knote *kn);
static void kqueue_threadreq_initiate(struct kqueue *kq, struct kqrequest *kqr, kq_index_t qos, int flags);
static void kqworkq_update_override(struct kqworkq *kqwq, struct knote *kn, kq_index_t qos);
static void kqworkq_unbind(proc_t p, struct kqrequest *kqr);
static thread_qos_t kqworkq_unbind_locked(struct kqworkq *kqwq, struct kqrequest *kqr, thread_t thread);
static struct kqrequest *kqworkq_get_request(struct kqworkq *kqwq, kq_index_t qos_index);
static void kqworkloop_update_override(struct kqworkloop *kqwl, kq_index_t override_index);
static void kqworkloop_unbind(proc_t p, struct kqworkloop *kwql);
static thread_qos_t kqworkloop_unbind_locked(struct kqworkloop *kwql, thread_t thread);
static kq_index_t kqworkloop_owner_override(struct kqworkloop *kqwl);
enum {
KQWL_UTQ_NONE,
KQWL_UTQ_UPDATE_WAKEUP_QOS,
KQWL_UTQ_UPDATE_STAYACTIVE_QOS,
KQWL_UTQ_RECOMPUTE_WAKEUP_QOS,
KQWL_UTQ_UNBINDING,
KQWL_UTQ_PARKING,
KQWL_UTQ_UPDATE_WAKEUP_OVERRIDE,
KQWL_UTQ_RESET_WAKEUP_OVERRIDE,
KQWL_UTQ_SET_QOS_INDEX,
KQWL_UTQ_REDRIVE_EVENTS,
};
static void kqworkloop_update_threads_qos(struct kqworkloop *kqwl, int op, kq_index_t qos);
static void kqworkloop_request_help(struct kqworkloop *kqwl, kq_index_t qos_index);
static int kqworkloop_end_processing(struct kqworkloop *kqwl, int flags, int kevent_flags);
static int knote_process(struct knote *kn, kevent_callback_t callback, void *callback_data,
struct filt_process_s *process_data);
static int kq_add_knote(struct kqueue *kq, struct knote *kn,
struct knote_lock_ctx *knlc, struct proc *p);
static struct knote *kq_find_knote_and_kq_lock(struct kqueue *kq, struct kevent_internal_s *kev, bool is_fd, struct proc *p);
static void knote_drop(struct kqueue *kq, struct knote *kn, struct knote_lock_ctx *knlc);
static struct knote *knote_alloc(void);
static void knote_free(struct knote *kn);
static void knote_activate(struct knote *kn);
static void knote_deactivate(struct knote *kn);
static void knote_enable(struct knote *kn);
static void knote_disable(struct knote *kn);
static int knote_enqueue(struct knote *kn);
static void knote_dequeue(struct knote *kn);
static void knote_suppress(struct knote *kn);
static void knote_unsuppress(struct knote *kn);
static void knote_wakeup(struct knote *kn);
static bool knote_should_apply_qos_override(struct kqueue *kq, struct knote *kn,
int result, thread_qos_t *qos_out);
static void knote_apply_qos_override(struct knote *kn, kq_index_t qos_index);
static void knote_adjust_qos(struct kqueue *kq, struct knote *kn, int result);
static void knote_reset_priority(struct knote *kn, pthread_priority_t pp);
static kq_index_t knote_get_qos_override_index(struct knote *kn);
static void knote_set_qos_overcommit(struct knote *kn);
static zone_t knote_zone;
static zone_t kqfile_zone;
static zone_t kqworkq_zone;
static zone_t kqworkloop_zone;
#if DEVELOPMENT || DEBUG
#define KEVENT_PANIC_ON_WORKLOOP_OWNERSHIP_LEAK (1U << 0)
#define KEVENT_PANIC_ON_NON_ENQUEUED_PROCESS (1U << 1)
#define KEVENT_PANIC_BOOT_ARG_INITIALIZED (1U << 31)
#define KEVENT_PANIC_DEFAULT_VALUE (0)
static uint32_t
kevent_debug_flags(void)
{
static uint32_t flags = KEVENT_PANIC_DEFAULT_VALUE;
if ((flags & KEVENT_PANIC_BOOT_ARG_INITIALIZED) == 0) {
uint32_t value = 0;
if (!PE_parse_boot_argn("kevent_debug", &value, sizeof(value))) {
value = KEVENT_PANIC_DEFAULT_VALUE;
}
value |= KEVENT_PANIC_BOOT_ARG_INITIALIZED;
os_atomic_store(&flags, value, relaxed);
}
return flags;
}
#endif
#define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask))
static int filt_badattach(struct knote *kn, struct kevent_internal_s *kev);
static int filt_badevent(struct knote *kn, long hint);
SECURITY_READ_ONLY_EARLY(static struct filterops) bad_filtops = {
.f_attach = filt_badattach,
};
#if CONFIG_MEMORYSTATUS
extern const struct filterops memorystatus_filtops;
#endif
extern const struct filterops fs_filtops;
extern const struct filterops sig_filtops;
extern const struct filterops machport_filtops;
extern const struct filterops pipe_rfiltops;
extern const struct filterops pipe_wfiltops;
extern const struct filterops ptsd_kqops;
extern const struct filterops ptmx_kqops;
extern const struct filterops soread_filtops;
extern const struct filterops sowrite_filtops;
extern const struct filterops sock_filtops;
extern const struct filterops soexcept_filtops;
extern const struct filterops spec_filtops;
extern const struct filterops bpfread_filtops;
extern const struct filterops necp_fd_rfiltops;
extern const struct filterops fsevent_filtops;
extern const struct filterops vnode_filtops;
extern const struct filterops tty_filtops;
const static struct filterops file_filtops;
const static struct filterops kqread_filtops;
const static struct filterops proc_filtops;
const static struct filterops timer_filtops;
const static struct filterops user_filtops;
const static struct filterops workloop_filtops;
SECURITY_READ_ONLY_EARLY(static struct filterops *) sysfilt_ops[EVFILTID_MAX] = {
[~EVFILT_READ] = &file_filtops,
[~EVFILT_WRITE] = &file_filtops,
[~EVFILT_AIO] = &bad_filtops,
[~EVFILT_VNODE] = &file_filtops,
[~EVFILT_PROC] = &proc_filtops,
[~EVFILT_SIGNAL] = &sig_filtops,
[~EVFILT_TIMER] = &timer_filtops,
[~EVFILT_MACHPORT] = &machport_filtops,
[~EVFILT_FS] = &fs_filtops,
[~EVFILT_USER] = &user_filtops,
&bad_filtops,
[~EVFILT_VM] = &bad_filtops,
[~EVFILT_SOCK] = &file_filtops,
#if CONFIG_MEMORYSTATUS
[~EVFILT_MEMORYSTATUS] = &memorystatus_filtops,
#else
[~EVFILT_MEMORYSTATUS] = &bad_filtops,
#endif
[~EVFILT_EXCEPT] = &file_filtops,
[~EVFILT_WORKLOOP] = &workloop_filtops,
[EVFILTID_KQREAD] = &kqread_filtops,
[EVFILTID_PIPE_R] = &pipe_rfiltops,
[EVFILTID_PIPE_W] = &pipe_wfiltops,
[EVFILTID_PTSD] = &ptsd_kqops,
[EVFILTID_SOREAD] = &soread_filtops,
[EVFILTID_SOWRITE] = &sowrite_filtops,
[EVFILTID_SCK] = &sock_filtops,
[EVFILTID_SOEXCEPT] = &soexcept_filtops,
[EVFILTID_SPEC] = &spec_filtops,
[EVFILTID_BPFREAD] = &bpfread_filtops,
[EVFILTID_NECP_FD] = &necp_fd_rfiltops,
[EVFILTID_FSEVENT] = &fsevent_filtops,
[EVFILTID_VN] = &vnode_filtops,
[EVFILTID_TTY] = &tty_filtops,
[EVFILTID_PTMX] = &ptmx_kqops,
};
void waitq_set__CALLING_PREPOST_HOOK__(void *kq_hook, void *knote_hook, int qos);
static inline struct kqworkloop *
kqr_kqworkloop(struct kqrequest *kqr)
{
if (kqr->kqr_state & KQR_WORKLOOP) {
return __container_of(kqr, struct kqworkloop, kqwl_request);
}
return NULL;
}
static inline kqueue_t
kqr_kqueue(proc_t p, struct kqrequest *kqr)
{
kqueue_t kqu;
if (kqr->kqr_state & KQR_WORKLOOP) {
kqu.kqwl = kqr_kqworkloop(kqr);
} else {
kqu.kqwq = (struct kqworkq *)p->p_fd->fd_wqkqueue;
assert(kqr >= kqu.kqwq->kqwq_request &&
kqr < kqu.kqwq->kqwq_request + KQWQ_NBUCKETS);
}
return kqu;
}
static inline boolean_t
is_workqueue_thread(thread_t thread)
{
return (thread_get_tag(thread) & THREAD_TAG_WORKQUEUE);
}
static lck_grp_attr_t *kq_lck_grp_attr;
static lck_grp_t *kq_lck_grp;
static lck_attr_t *kq_lck_attr;
static inline void
kqlock(kqueue_t kqu)
{
lck_spin_lock(&kqu.kq->kq_lock);
}
static inline void
kqlock_held(__assert_only kqueue_t kqu)
{
LCK_SPIN_ASSERT(&kqu.kq->kq_lock, LCK_ASSERT_OWNED);
}
static inline void
kqunlock(kqueue_t kqu)
{
lck_spin_unlock(&kqu.kq->kq_lock);
}
static inline void
kq_req_lock(kqueue_t kqu)
{
assert(kqu.kq->kq_state & (KQ_WORKLOOP | KQ_WORKQ));
lck_spin_lock(&kqu.kq->kq_reqlock);
}
static inline void
kq_req_unlock(kqueue_t kqu)
{
assert(kqu.kq->kq_state & (KQ_WORKLOOP | KQ_WORKQ));
lck_spin_unlock(&kqu.kq->kq_reqlock);
}
static inline void
kq_req_held(__assert_only kqueue_t kqu)
{
assert(kqu.kq->kq_state & (KQ_WORKLOOP | KQ_WORKQ));
LCK_SPIN_ASSERT(&kqu.kq->kq_reqlock, LCK_ASSERT_OWNED);
}
static inline void
knhash_lock(proc_t p)
{
lck_mtx_lock(&p->p_fd->fd_knhashlock);
}
static inline void
knhash_unlock(proc_t p)
{
lck_mtx_unlock(&p->p_fd->fd_knhashlock);
}
#pragma mark knote locks
#define KNOTE_KQ_LOCK_ALWAYS 0x0
#define KNOTE_KQ_LOCK_ON_SUCCESS 0x1
#define KNOTE_KQ_LOCK_ON_FAILURE 0x2
#define KNOTE_KQ_UNLOCK 0x3
#if DEBUG || DEVELOPMENT
__attribute__((noinline, not_tail_called, disable_tail_calls))
void knote_lock_ctx_chk(struct knote_lock_ctx *knlc)
{
assert(knlc->knlc_state == KNOTE_LOCK_CTX_UNLOCKED);
}
#endif
static struct knote_lock_ctx *
knote_lock_ctx_find(struct kqueue *kq, struct knote *kn)
{
struct knote_lock_ctx *ctx;
LIST_FOREACH(ctx, &kq->kq_knlocks, knlc_le) {
if (ctx->knlc_knote == kn) return ctx;
}
panic("knote lock context not found: %p", kn);
__builtin_trap();
}
__attribute__((noinline))
static bool __result_use_check
knote_lock_slow(struct kqueue *kq, struct knote *kn,
struct knote_lock_ctx *knlc, int kqlocking)
{
kqlock_held(kq);
struct knote_lock_ctx *owner_lc = knote_lock_ctx_find(kq, kn);
thread_t owner_thread = owner_lc->knlc_thread;
#if DEBUG || DEVELOPMENT
knlc->knlc_state = KNOTE_LOCK_CTX_WAITING;
#endif
thread_reference(owner_thread);
TAILQ_INSERT_TAIL(&owner_lc->knlc_head, knlc, knlc_tqe);
assert_wait(&kn->kn_status, THREAD_UNINT | THREAD_WAIT_NOREPORT);
kqunlock(kq);
if (thread_handoff_deallocate(owner_thread) == THREAD_RESTART) {
if (kqlocking == KNOTE_KQ_LOCK_ALWAYS ||
kqlocking == KNOTE_KQ_LOCK_ON_FAILURE) {
kqlock(kq);
}
#if DEBUG || DEVELOPMENT
assert(knlc->knlc_state == KNOTE_LOCK_CTX_WAITING);
knlc->knlc_state = KNOTE_LOCK_CTX_UNLOCKED;
#endif
return false;
}
#if DEBUG || DEVELOPMENT
assert(knlc->knlc_state == KNOTE_LOCK_CTX_LOCKED);
#endif
if (kqlocking == KNOTE_KQ_LOCK_ALWAYS ||
kqlocking == KNOTE_KQ_LOCK_ON_SUCCESS) {
kqlock(kq);
}
return true;
}
static bool __result_use_check
knote_lock(struct kqueue *kq, struct knote *kn, struct knote_lock_ctx *knlc,
int kqlocking)
{
kqlock_held(kq);
#if DEBUG || DEVELOPMENT
assert(knlc->knlc_state == KNOTE_LOCK_CTX_UNLOCKED);
#endif
knlc->knlc_knote = kn;
knlc->knlc_thread = current_thread();
TAILQ_INIT(&knlc->knlc_head);
if (__improbable(kn->kn_status & KN_LOCKED)) {
return knote_lock_slow(kq, kn, knlc, kqlocking);
}
assert((kn->kn_status & KN_DROPPING) == 0);
LIST_INSERT_HEAD(&kq->kq_knlocks, knlc, knlc_le);
kn->kn_status |= KN_LOCKED;
#if DEBUG || DEVELOPMENT
knlc->knlc_state = KNOTE_LOCK_CTX_LOCKED;
#endif
if (kqlocking == KNOTE_KQ_UNLOCK ||
kqlocking == KNOTE_KQ_LOCK_ON_FAILURE) {
kqunlock(kq);
}
return true;
}
static void
knote_unlock(struct kqueue *kq, struct knote *kn,
struct knote_lock_ctx *knlc, int flags)
{
kqlock_held(kq);
assert(knlc->knlc_knote == kn);
assert(kn->kn_status & KN_LOCKED);
#if DEBUG || DEVELOPMENT
assert(knlc->knlc_state == KNOTE_LOCK_CTX_LOCKED);
#endif
struct knote_lock_ctx *next_owner_lc = TAILQ_FIRST(&knlc->knlc_head);
LIST_REMOVE(knlc, knlc_le);
if (next_owner_lc) {
assert(next_owner_lc->knlc_knote == kn);
TAILQ_REMOVE(&knlc->knlc_head, next_owner_lc, knlc_tqe);
assert(TAILQ_EMPTY(&next_owner_lc->knlc_head));
TAILQ_CONCAT(&next_owner_lc->knlc_head, &knlc->knlc_head, knlc_tqe);
LIST_INSERT_HEAD(&kq->kq_knlocks, next_owner_lc, knlc_le);
#if DEBUG || DEVELOPMENT
next_owner_lc->knlc_state = KNOTE_LOCK_CTX_LOCKED;
#endif
} else {
kn->kn_status &= ~KN_LOCKED;
}
if (kn->kn_inuse == 0) {
kn->kn_status &= ~KN_MERGE_QOS;
}
if (flags & KNOTE_KQ_UNLOCK) {
kqunlock(kq);
}
if (next_owner_lc) {
thread_wakeup_thread(&kn->kn_status, next_owner_lc->knlc_thread);
}
#if DEBUG || DEVELOPMENT
knlc->knlc_state = KNOTE_LOCK_CTX_UNLOCKED;
#endif
}
static void
knote_unlock_cancel(struct kqueue *kq, struct knote *kn,
struct knote_lock_ctx *knlc, int kqlocking)
{
kqlock_held(kq);
assert(knlc->knlc_knote == kn);
assert(kn->kn_status & KN_LOCKED);
assert(kn->kn_status & KN_DROPPING);
LIST_REMOVE(knlc, knlc_le);
kn->kn_status &= ~KN_LOCKED;
if (kqlocking == KNOTE_KQ_UNLOCK ||
kqlocking == KNOTE_KQ_LOCK_ON_FAILURE) {
kqunlock(kq);
}
if (!TAILQ_EMPTY(&knlc->knlc_head)) {
thread_wakeup_with_result(&kn->kn_status, THREAD_RESTART);
}
#if DEBUG || DEVELOPMENT
knlc->knlc_state = KNOTE_LOCK_CTX_UNLOCKED;
#endif
}
static void
knote_call_filter_event(struct kqueue *kq, struct knote *kn, long hint)
{
int result, dropping = 0;
kqlock_held(kq);
if (kn->kn_status & (KN_DROPPING | KN_VANISHED))
return;
kn->kn_inuse++;
kqunlock(kq);
result = filter_call(knote_fops(kn), f_event(kn, hint));
kqlock(kq);
dropping = (kn->kn_status & KN_DROPPING);
if (!dropping && (result & FILTER_ACTIVE)) {
if (result & FILTER_ADJUST_EVENT_QOS_BIT)
knote_adjust_qos(kq, kn, result);
knote_activate(kn);
}
if (--kn->kn_inuse == 0) {
if ((kn->kn_status & KN_LOCKED) == 0) {
kn->kn_status &= ~KN_MERGE_QOS;
}
if (dropping) {
waitq_wakeup64_all((struct waitq *)&kq->kq_wqs,
CAST_EVENT64_T(&kn->kn_inuse),
THREAD_AWAKENED, WAITQ_ALL_PRIORITIES);
}
}
}
static void
knote_wait_for_filter_events(struct kqueue *kq, struct knote *kn)
{
wait_result_t wr = THREAD_NOT_WAITING;
kqlock_held(kq);
assert(kn->kn_status & KN_DROPPING);
if (kn->kn_inuse) {
wr = waitq_assert_wait64((struct waitq *)&kq->kq_wqs,
CAST_EVENT64_T(&kn->kn_inuse),
THREAD_UNINT | THREAD_WAIT_NOREPORT, TIMEOUT_WAIT_FOREVER);
}
kqunlock(kq);
if (wr == THREAD_WAITING) {
thread_block(THREAD_CONTINUE_NULL);
}
}
#pragma mark file_filtops
static int
filt_fileattach(struct knote *kn, struct kevent_internal_s *kev)
{
return fo_kqfilter(kn->kn_fp, kn, kev, vfs_context_current());
}
SECURITY_READ_ONLY_EARLY(static struct filterops) file_filtops = {
.f_isfd = 1,
.f_attach = filt_fileattach,
};
#pragma mark kqread_filtops
#define f_flag f_fglob->fg_flag
#define f_ops f_fglob->fg_ops
#define f_data f_fglob->fg_data
static void
filt_kqdetach(struct knote *kn)
{
struct kqfile *kqf = (struct kqfile *)kn->kn_fp->f_data;
struct kqueue *kq = &kqf->kqf_kqueue;
kqlock(kq);
KNOTE_DETACH(&kqf->kqf_sel.si_note, kn);
kqunlock(kq);
}
static int
filt_kqueue(struct knote *kn, __unused long hint)
{
struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
return (kq->kq_count > 0);
}
static int
filt_kqtouch(struct knote *kn, struct kevent_internal_s *kev)
{
#pragma unused(kev)
struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
int res;
kqlock(kq);
kn->kn_data = kq->kq_count;
res = (kn->kn_data > 0);
kqunlock(kq);
return res;
}
static int
filt_kqprocess(struct knote *kn, struct filt_process_s *data, struct kevent_internal_s *kev)
{
#pragma unused(data)
struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
int res;
kqlock(kq);
kn->kn_data = kq->kq_count;
res = (kn->kn_data > 0);
if (res) {
*kev = kn->kn_kevent;
if (kn->kn_flags & EV_CLEAR)
kn->kn_data = 0;
}
kqunlock(kq);
return res;
}
SECURITY_READ_ONLY_EARLY(static struct filterops) kqread_filtops = {
.f_isfd = 1,
.f_detach = filt_kqdetach,
.f_event = filt_kqueue,
.f_touch = filt_kqtouch,
.f_process = filt_kqprocess,
};
#pragma mark proc_filtops
static int
filt_procattach(struct knote *kn, __unused struct kevent_internal_s *kev)
{
struct proc *p;
assert(PID_MAX < NOTE_PDATAMASK);
if ((kn->kn_sfflags & (NOTE_TRACK | NOTE_TRACKERR | NOTE_CHILD)) != 0) {
knote_set_error(kn, ENOTSUP);
return 0;
}
p = proc_find(kn->kn_id);
if (p == NULL) {
knote_set_error(kn, ESRCH);
return 0;
}
const int NoteExitStatusBits = NOTE_EXIT | NOTE_EXITSTATUS;
if ((kn->kn_sfflags & NoteExitStatusBits) == NoteExitStatusBits)
do {
pid_t selfpid = proc_selfpid();
if (p->p_ppid == selfpid)
break;
if ((p->p_lflag & P_LTRACED) != 0 &&
(p->p_oppid == selfpid))
break;
proc_rele(p);
knote_set_error(kn, EACCES);
return 0;
} while (0);
proc_klist_lock();
kn->kn_ptr.p_proc = p;
KNOTE_ATTACH(&p->p_klist, kn);
proc_klist_unlock();
proc_rele(p);
return (0);
}
static void
filt_procdetach(struct knote *kn)
{
struct proc *p;
proc_klist_lock();
p = kn->kn_ptr.p_proc;
if (p != PROC_NULL) {
kn->kn_ptr.p_proc = PROC_NULL;
KNOTE_DETACH(&p->p_klist, kn);
}
proc_klist_unlock();
}
static int
filt_proc(struct knote *kn, long hint)
{
u_int event;
event = (u_int)hint & NOTE_PCTRLMASK;
if (event & NOTE_EXIT) {
if ((kn->kn_ptr.p_proc->p_oppid != 0)
&& (knote_get_kq(kn)->kq_p->p_pid != kn->kn_ptr.p_proc->p_ppid)) {
return 0;
}
}
if (kn->kn_sfflags & event)
kn->kn_fflags |= event;
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wdeprecated-declarations"
if ((event == NOTE_REAP) || ((event == NOTE_EXIT) && !(kn->kn_sfflags & NOTE_REAP))) {
kn->kn_flags |= (EV_EOF | EV_ONESHOT);
}
#pragma clang diagnostic pop
if (event == NOTE_EXIT) {
kn->kn_data = 0;
if ((kn->kn_sfflags & NOTE_EXITSTATUS) != 0) {
kn->kn_fflags |= NOTE_EXITSTATUS;
kn->kn_data |= (hint & NOTE_PDATAMASK);
}
if ((kn->kn_sfflags & NOTE_EXIT_DETAIL) != 0) {
kn->kn_fflags |= NOTE_EXIT_DETAIL;
if ((kn->kn_ptr.p_proc->p_lflag &
P_LTERM_DECRYPTFAIL) != 0) {
kn->kn_data |= NOTE_EXIT_DECRYPTFAIL;
}
if ((kn->kn_ptr.p_proc->p_lflag &
P_LTERM_JETSAM) != 0) {
kn->kn_data |= NOTE_EXIT_MEMORY;
switch (kn->kn_ptr.p_proc->p_lflag & P_JETSAM_MASK) {
case P_JETSAM_VMPAGESHORTAGE:
kn->kn_data |= NOTE_EXIT_MEMORY_VMPAGESHORTAGE;
break;
case P_JETSAM_VMTHRASHING:
kn->kn_data |= NOTE_EXIT_MEMORY_VMTHRASHING;
break;
case P_JETSAM_FCTHRASHING:
kn->kn_data |= NOTE_EXIT_MEMORY_FCTHRASHING;
break;
case P_JETSAM_VNODE:
kn->kn_data |= NOTE_EXIT_MEMORY_VNODE;
break;
case P_JETSAM_HIWAT:
kn->kn_data |= NOTE_EXIT_MEMORY_HIWAT;
break;
case P_JETSAM_PID:
kn->kn_data |= NOTE_EXIT_MEMORY_PID;
break;
case P_JETSAM_IDLEEXIT:
kn->kn_data |= NOTE_EXIT_MEMORY_IDLE;
break;
}
}
if ((kn->kn_ptr.p_proc->p_csflags &
CS_KILLED) != 0) {
kn->kn_data |= NOTE_EXIT_CSERROR;
}
}
}
return (kn->kn_fflags != 0);
}
static int
filt_proctouch(struct knote *kn, struct kevent_internal_s *kev)
{
int res;
proc_klist_lock();
kn->kn_sfflags = kev->fflags;
res = (kn->kn_fflags != 0);
proc_klist_unlock();
return res;
}
static int
filt_procprocess(struct knote *kn, struct filt_process_s *data, struct kevent_internal_s *kev)
{
#pragma unused(data)
int res;
proc_klist_lock();
res = (kn->kn_fflags != 0);
if (res) {
*kev = kn->kn_kevent;
kn->kn_flags |= EV_CLEAR;
kn->kn_fflags = 0;
kn->kn_data = 0;
}
proc_klist_unlock();
return res;
}
SECURITY_READ_ONLY_EARLY(static struct filterops) proc_filtops = {
.f_attach = filt_procattach,
.f_detach = filt_procdetach,
.f_event = filt_proc,
.f_touch = filt_proctouch,
.f_process = filt_procprocess,
};
#pragma mark timer_filtops
struct filt_timer_params {
uint64_t deadline;
uint64_t leeway;
uint64_t interval;
};
#define TIMER_IDLE 0x0
#define TIMER_ARMED 0x1
#define TIMER_FIRED 0x2
#define TIMER_IMMEDIATE 0x3
static void
filt_timer_set_params(struct knote *kn, struct filt_timer_params *params)
{
kn->kn_ext[0] = params->deadline;
kn->kn_ext[1] = params->leeway;
kn->kn_sdata = params->interval;
}
static int
filt_timervalidate(const struct kevent_internal_s *kev,
struct filt_timer_params *params)
{
uint64_t multiplier;
boolean_t use_abstime = FALSE;
switch (kev->fflags & (NOTE_SECONDS|NOTE_USECONDS|NOTE_NSECONDS|NOTE_MACHTIME)) {
case NOTE_SECONDS:
multiplier = NSEC_PER_SEC;
break;
case NOTE_USECONDS:
multiplier = NSEC_PER_USEC;
break;
case NOTE_NSECONDS:
multiplier = 1;
break;
case NOTE_MACHTIME:
multiplier = 0;
use_abstime = TRUE;
break;
case 0:
multiplier = NSEC_PER_SEC / 1000;
break;
default:
return (EINVAL);
}
if (kev->fflags & NOTE_LEEWAY) {
uint64_t leeway_abs;
if (use_abstime) {
leeway_abs = (uint64_t)kev->ext[1];
} else {
uint64_t leeway_ns;
if (os_mul_overflow((uint64_t)kev->ext[1], multiplier, &leeway_ns))
return (ERANGE);
nanoseconds_to_absolutetime(leeway_ns, &leeway_abs);
}
params->leeway = leeway_abs;
} else {
params->leeway = 0;
}
if (kev->fflags & NOTE_ABSOLUTE) {
uint64_t deadline_abs;
if (use_abstime) {
deadline_abs = (uint64_t)kev->data;
} else {
uint64_t calendar_deadline_ns;
if (os_mul_overflow((uint64_t)kev->data, multiplier, &calendar_deadline_ns))
return (ERANGE);
clock_sec_t seconds;
clock_nsec_t nanoseconds;
clock_get_calendar_nanotime(&seconds, &nanoseconds);
uint64_t calendar_now_ns = (uint64_t)seconds * NSEC_PER_SEC + nanoseconds;
if (calendar_now_ns < calendar_deadline_ns) {
uint64_t interval_ns = calendar_deadline_ns - calendar_now_ns;
uint64_t interval_abs;
nanoseconds_to_absolutetime(interval_ns, &interval_abs);
if (kev->fflags & NOTE_MACH_CONTINUOUS_TIME)
clock_continuoustime_interval_to_deadline(interval_abs,
&deadline_abs);
else
clock_absolutetime_interval_to_deadline(interval_abs,
&deadline_abs);
} else {
deadline_abs = 0;
}
}
params->deadline = deadline_abs;
params->interval = 0;
} else if (kev->data < 0) {
params->deadline = 0;
params->interval = 0;
} else {
uint64_t interval_abs = 0;
if (use_abstime) {
interval_abs = (uint64_t)kev->data;
} else {
uint64_t interval_ns;
if (os_mul_overflow((uint64_t)kev->data, multiplier, &interval_ns))
return (ERANGE);
nanoseconds_to_absolutetime(interval_ns, &interval_abs);
}
uint64_t deadline = 0;
if (kev->fflags & NOTE_MACH_CONTINUOUS_TIME)
clock_continuoustime_interval_to_deadline(interval_abs, &deadline);
else
clock_absolutetime_interval_to_deadline(interval_abs, &deadline);
params->deadline = deadline;
params->interval = interval_abs;
}
return (0);
}
static void
filt_timerexpire(void *knx, __unused void *spare)
{
struct knote *kn = knx;
int v;
if (os_atomic_cmpxchgv(&kn->kn_hookid, TIMER_ARMED, TIMER_FIRED,
&v, relaxed)) {
struct kqueue *kq = knote_get_kq(kn);
kqlock(kq);
knote_activate(kn);
kqunlock(kq);
} else {
assert(v == TIMER_IDLE);
}
}
static void
filt_timercancel(struct knote *kn)
{
if (os_atomic_xchg(&kn->kn_hookid, TIMER_IDLE, relaxed) == TIMER_ARMED) {
thread_call_cancel_wait((thread_call_t)kn->kn_hook);
}
}
static bool
filt_timer_is_ready(struct knote *kn)
{
uint64_t now, deadline = kn->kn_ext[0];
if (deadline == 0) {
return true;
}
if (kn->kn_sfflags & NOTE_MACH_CONTINUOUS_TIME) {
now = mach_continuous_time();
} else {
now = mach_absolute_time();
}
return deadline <= now;
}
static void
filt_timerarm(struct knote *kn)
{
uint64_t deadline = kn->kn_ext[0];
uint64_t leeway = kn->kn_ext[1];
int filter_flags = kn->kn_sfflags;
unsigned int timer_flags = 0;
assert(os_atomic_load(&kn->kn_hookid, relaxed) == TIMER_IDLE);
if (filter_flags & NOTE_CRITICAL)
timer_flags |= THREAD_CALL_DELAY_USER_CRITICAL;
else if (filter_flags & NOTE_BACKGROUND)
timer_flags |= THREAD_CALL_DELAY_USER_BACKGROUND;
else
timer_flags |= THREAD_CALL_DELAY_USER_NORMAL;
if (filter_flags & NOTE_LEEWAY)
timer_flags |= THREAD_CALL_DELAY_LEEWAY;
if (filter_flags & NOTE_MACH_CONTINUOUS_TIME)
timer_flags |= THREAD_CALL_CONTINUOUS;
os_atomic_store(&kn->kn_hookid, TIMER_ARMED, relaxed);
thread_call_enter_delayed_with_leeway((thread_call_t)kn->kn_hook, NULL,
deadline, leeway, timer_flags);
}
static int
filt_timerattach(struct knote *kn, struct kevent_internal_s *kev)
{
thread_call_t callout;
struct filt_timer_params params;
int error;
if ((error = filt_timervalidate(kev, ¶ms)) != 0) {
knote_set_error(kn, error);
return 0;
}
callout = thread_call_allocate_with_options(filt_timerexpire,
(thread_call_param_t)kn, THREAD_CALL_PRIORITY_HIGH,
THREAD_CALL_OPTIONS_ONCE);
if (NULL == callout) {
knote_set_error(kn, ENOMEM);
return 0;
}
filt_timer_set_params(kn, ¶ms);
kn->kn_hook = callout;
kn->kn_flags |= EV_CLEAR;
os_atomic_store(&kn->kn_hookid, TIMER_IDLE, relaxed);
if (kn->kn_sfflags & NOTE_ABSOLUTE)
kn->kn_flags |= EV_ONESHOT;
if (filt_timer_is_ready(kn)) {
os_atomic_store(&kn->kn_hookid, TIMER_IMMEDIATE, relaxed);
return FILTER_ACTIVE;
} else {
filt_timerarm(kn);
return 0;
}
}
static void
filt_timerdetach(struct knote *kn)
{
__assert_only boolean_t freed;
thread_call_cancel_wait((thread_call_t)kn->kn_hook);
freed = thread_call_free((thread_call_t)kn->kn_hook);
assert(freed);
}
static int
filt_timertouch(struct knote *kn, struct kevent_internal_s *kev)
{
struct filt_timer_params params;
uint32_t changed_flags = (kn->kn_sfflags ^ kev->fflags);
int error;
if (changed_flags & NOTE_ABSOLUTE) {
kev->flags |= EV_ERROR;
kev->data = EINVAL;
return 0;
}
if ((error = filt_timervalidate(kev, ¶ms)) != 0) {
kev->flags |= EV_ERROR;
kev->data = error;
return 0;
}
filt_timercancel(kn);
filt_timer_set_params(kn, ¶ms);
kn->kn_sfflags = kev->fflags;
if (filt_timer_is_ready(kn)) {
os_atomic_store(&kn->kn_hookid, TIMER_IMMEDIATE, relaxed);
return FILTER_ACTIVE | FILTER_UPDATE_REQ_QOS;
} else {
filt_timerarm(kn);
return FILTER_UPDATE_REQ_QOS;
}
}
static int
filt_timerprocess(
struct knote *kn,
__unused struct filt_process_s *data,
struct kevent_internal_s *kev)
{
switch (os_atomic_load(&kn->kn_hookid, relaxed)) {
case TIMER_IDLE:
case TIMER_ARMED:
return 0;
}
os_atomic_store(&kn->kn_hookid, TIMER_IDLE, relaxed);
*kev = kn->kn_kevent;
kev->ext[0] = 0;
if (kn->kn_sdata == 0) {
kev->data = 1;
} else {
uint64_t now;
if (kn->kn_sfflags & NOTE_MACH_CONTINUOUS_TIME)
now = mach_continuous_time();
else
now = mach_absolute_time();
uint64_t first_deadline = kn->kn_ext[0];
uint64_t interval_abs = kn->kn_sdata;
uint64_t orig_arm_time = first_deadline - interval_abs;
assert(now > orig_arm_time);
assert(now > first_deadline);
uint64_t elapsed = now - orig_arm_time;
uint64_t num_fired = elapsed / interval_abs;
assert(num_fired > 0);
kev->data = (int64_t)num_fired;
if ((kn->kn_flags & EV_ONESHOT) == 0) {
uint64_t new_deadline = first_deadline + num_fired * interval_abs;
assert(new_deadline > now);
kn->kn_ext[0] = new_deadline;
filt_timerarm(kn);
}
}
return FILTER_ACTIVE;
}
SECURITY_READ_ONLY_EARLY(static struct filterops) timer_filtops = {
.f_extended_codes = true,
.f_attach = filt_timerattach,
.f_detach = filt_timerdetach,
.f_event = filt_badevent,
.f_touch = filt_timertouch,
.f_process = filt_timerprocess,
};
#pragma mark user_filtops
static int
filt_userattach(struct knote *kn, __unused struct kevent_internal_s *kev)
{
if (kn->kn_sfflags & NOTE_TRIGGER) {
kn->kn_hookid = FILTER_ACTIVE;
} else {
kn->kn_hookid = 0;
}
return (kn->kn_hookid);
}
static void
filt_userdetach(__unused struct knote *kn)
{
}
static int
filt_usertouch(struct knote *kn, struct kevent_internal_s *kev)
{
uint32_t ffctrl;
int fflags;
ffctrl = kev->fflags & NOTE_FFCTRLMASK;
fflags = kev->fflags & NOTE_FFLAGSMASK;
switch (ffctrl) {
case NOTE_FFNOP:
break;
case NOTE_FFAND:
kn->kn_sfflags &= fflags;
break;
case NOTE_FFOR:
kn->kn_sfflags |= fflags;
break;
case NOTE_FFCOPY:
kn->kn_sfflags = fflags;
break;
}
kn->kn_sdata = kev->data;
if (kev->fflags & NOTE_TRIGGER) {
kn->kn_hookid = FILTER_ACTIVE;
}
return (int)kn->kn_hookid;
}
static int
filt_userprocess(
struct knote *kn,
__unused struct filt_process_s *data,
struct kevent_internal_s *kev)
{
int result = (int)kn->kn_hookid;
if (result) {
*kev = kn->kn_kevent;
kev->fflags = kn->kn_sfflags;
kev->data = kn->kn_sdata;
if (kn->kn_flags & EV_CLEAR) {
kn->kn_hookid = 0;
kn->kn_data = 0;
kn->kn_fflags = 0;
}
}
return result;
}
SECURITY_READ_ONLY_EARLY(static struct filterops) user_filtops = {
.f_extended_codes = true,
.f_attach = filt_userattach,
.f_detach = filt_userdetach,
.f_event = filt_badevent,
.f_touch = filt_usertouch,
.f_process = filt_userprocess,
};
#pragma mark workloop_filtops
static inline void
filt_wllock(struct kqworkloop *kqwl)
{
lck_mtx_lock(&kqwl->kqwl_statelock);
}
static inline void
filt_wlunlock(struct kqworkloop *kqwl)
{
lck_mtx_unlock(&kqwl->kqwl_statelock);
}
static inline bool
filt_wlturnstile_interlock_is_workq(struct kqworkloop *kqwl)
{
struct kqrequest *kqr = &kqwl->kqwl_request;
return (kqr->kqr_state & KQR_THREQUESTED) &&
(kqr->kqr_thread == THREAD_NULL);
}
static void
filt_wlupdate_inheritor(struct kqworkloop *kqwl, struct turnstile *ts,
turnstile_update_flags_t flags)
{
turnstile_inheritor_t inheritor = TURNSTILE_INHERITOR_NULL;
struct kqrequest *kqr = &kqwl->kqwl_request;
assert(!filt_wlturnstile_interlock_is_workq(kqwl));
if ((inheritor = kqwl->kqwl_owner)) {
flags |= TURNSTILE_INHERITOR_THREAD;
} else if ((inheritor = kqr->kqr_thread)) {
flags |= TURNSTILE_INHERITOR_THREAD;
}
turnstile_update_inheritor(ts, inheritor, flags);
}
#define FILT_WLATTACH 0
#define FILT_WLTOUCH 1
#define FILT_WLDROP 2
__result_use_check
static int
filt_wlupdate(struct kqworkloop *kqwl, struct knote *kn,
struct kevent_internal_s *kev, kq_index_t qos_index, int op)
{
user_addr_t uaddr = CAST_USER_ADDR_T(kev->ext[EV_EXTIDX_WL_ADDR]);
struct kqrequest *kqr = &kqwl->kqwl_request;
thread_t cur_owner, new_owner, extra_thread_ref = THREAD_NULL;
kq_index_t cur_owner_override = THREAD_QOS_UNSPECIFIED;
int action = KQWL_UTQ_NONE, error = 0;
bool needs_wake = false, needs_wllock = false;
uint64_t kdata = kev->ext[EV_EXTIDX_WL_VALUE];
uint64_t mask = kev->ext[EV_EXTIDX_WL_MASK];
uint64_t udata = 0;
if (kev->fflags & (NOTE_WL_END_OWNERSHIP | NOTE_WL_DISCOVER_OWNER)) {
needs_wllock = true;
} else if (kqr->kqr_thread == current_thread()) {
needs_wllock = true;
}
if (needs_wllock) {
filt_wllock(kqwl);
new_owner = cur_owner = kqwl->kqwl_owner;
} else {
new_owner = cur_owner = THREAD_NULL;
}
if (uaddr) {
error = copyin_word(uaddr, &udata, sizeof(udata));
if (error) {
goto out;
}
kev->ext[EV_EXTIDX_WL_VALUE] = udata;
if ((udata & mask) != (kdata & mask)) {
error = ESTALE;
} else if (kev->fflags & NOTE_WL_DISCOVER_OWNER) {
mach_port_name_t name = (mach_port_name_t)udata & ~0x3;
if (name != MACH_PORT_NULL) {
name = ipc_entry_name_mask(name);
extra_thread_ref = port_name_to_thread(name);
if (extra_thread_ref == THREAD_NULL) {
error = EOWNERDEAD;
goto out;
}
new_owner = extra_thread_ref;
}
}
}
if ((kev->fflags & NOTE_WL_END_OWNERSHIP) && new_owner == current_thread()) {
new_owner = THREAD_NULL;
}
if (error == 0) {
if ((kev->fflags & NOTE_WL_THREAD_REQUEST) && (kev->flags & EV_DELETE)) {
action = KQWL_UTQ_SET_QOS_INDEX;
} else if (qos_index && kqr->kqr_qos_index != qos_index) {
action = KQWL_UTQ_SET_QOS_INDEX;
}
if (op == FILT_WLTOUCH) {
kn->kn_sfflags &= ~NOTE_WL_UPDATES_MASK;
kn->kn_sfflags |= kev->fflags;
kn->kn_sdata = kev->data;
if (kev->fflags & NOTE_WL_SYNC_WAKE) {
needs_wake = (kn->kn_hook != THREAD_NULL);
}
} else if (op == FILT_WLDROP) {
if ((kn->kn_sfflags & (NOTE_WL_SYNC_WAIT | NOTE_WL_SYNC_WAKE)) ==
NOTE_WL_SYNC_WAIT) {
kn->kn_sfflags |= NOTE_WL_SYNC_WAKE;
needs_wake = (kn->kn_hook != THREAD_NULL);
}
}
}
if (cur_owner == new_owner && action == KQWL_UTQ_NONE && !needs_wake) {
goto out;
}
kq_req_lock(kqwl);
if (new_owner == kqr->kqr_thread) {
new_owner = THREAD_NULL;
}
if (cur_owner != new_owner) {
kqwl->kqwl_owner = new_owner;
if (new_owner == extra_thread_ref) {
extra_thread_ref = THREAD_NULL;
}
cur_owner_override = kqworkloop_owner_override(kqwl);
if (cur_owner) {
thread_ends_owning_workloop(cur_owner);
}
if (new_owner) {
if (cur_owner_override != THREAD_QOS_UNSPECIFIED) {
thread_add_ipc_override(new_owner, cur_owner_override);
}
thread_starts_owning_workloop(new_owner);
if ((kqr->kqr_state & KQR_THREQUESTED) && !kqr->kqr_thread) {
if (action == KQWL_UTQ_NONE) {
action = KQWL_UTQ_REDRIVE_EVENTS;
}
}
} else {
if ((kqr->kqr_state & (KQR_THREQUESTED | KQR_WAKEUP)) == KQR_WAKEUP) {
if (action == KQWL_UTQ_NONE) {
action = KQWL_UTQ_REDRIVE_EVENTS;
}
}
}
}
struct turnstile *ts = kqwl->kqwl_turnstile;
bool wl_inheritor_updated = false;
if (action != KQWL_UTQ_NONE) {
kqworkloop_update_threads_qos(kqwl, action, qos_index);
}
if (cur_owner != new_owner && ts) {
if (action == KQWL_UTQ_REDRIVE_EVENTS) {
assert(filt_wlturnstile_interlock_is_workq(kqwl));
} else if (filt_wlturnstile_interlock_is_workq(kqwl)) {
workq_kern_threadreq_lock(kqwl->kqwl_p);
workq_kern_threadreq_update_inheritor(kqwl->kqwl_p, kqr, new_owner,
ts, TURNSTILE_IMMEDIATE_UPDATE);
workq_kern_threadreq_unlock(kqwl->kqwl_p);
if (!filt_wlturnstile_interlock_is_workq(kqwl)) {
filt_wlupdate_inheritor(kqwl, ts, TURNSTILE_IMMEDIATE_UPDATE);
}
wl_inheritor_updated = true;
} else {
filt_wlupdate_inheritor(kqwl, ts, TURNSTILE_IMMEDIATE_UPDATE);
wl_inheritor_updated = true;
}
if (wl_inheritor_updated) {
turnstile_reference(ts);
}
}
if (needs_wake && ts) {
waitq_wakeup64_thread(&ts->ts_waitq, CAST_EVENT64_T((event_t)kn),
(thread_t)kn->kn_hook, THREAD_AWAKENED);
}
kq_req_unlock(kqwl);
if (wl_inheritor_updated) {
turnstile_update_inheritor_complete(ts, TURNSTILE_INTERLOCK_NOT_HELD);
turnstile_deallocate(ts);
}
out:
if (needs_wllock) {
filt_wlunlock(kqwl);
}
#if CONFIG_WORKLOOP_DEBUG
KQWL_HISTORY_WRITE_ENTRY(kqwl, {
.updater = current_thread(),
.servicer = kqr->kqr_thread,
.old_owner = cur_owner,
.new_owner = new_owner,
.kev_ident = kev->ident,
.error = (int16_t)error,
.kev_flags = kev->flags,
.kev_fflags = kev->fflags,
.kev_mask = mask,
.kev_value = kdata,
.in_value = udata,
});
#endif // CONFIG_WORKLOOP_DEBUG
if (cur_owner && new_owner != cur_owner) {
if (cur_owner_override != THREAD_QOS_UNSPECIFIED) {
thread_drop_ipc_override(cur_owner);
}
thread_deallocate(cur_owner);
}
if (extra_thread_ref) {
thread_deallocate(extra_thread_ref);
}
return error;
}
static inline void
filt_wlremember_last_update(struct knote *kn, struct kevent_internal_s *kev,
int error)
{
kn->kn_fflags = kev->fflags;
kn->kn_data = error;
memcpy(kn->kn_ext, kev->ext, sizeof(kev->ext));
}
static int
filt_wlattach(struct knote *kn, struct kevent_internal_s *kev)
{
struct kqueue *kq = knote_get_kq(kn);
struct kqworkloop *kqwl = (struct kqworkloop *)kq;
int error = 0;
kq_index_t qos_index = 0;
if ((kq->kq_state & KQ_WORKLOOP) == 0) {
error = ENOTSUP;
goto out;
}
#if DEVELOPMENT || DEBUG
if (kev->ident == 0 && kev->udata == 0 && kev->fflags == 0) {
struct kqrequest *kqr = &kqwl->kqwl_request;
kq_req_lock(kqwl);
kev->fflags = 0;
if (kqr->kqr_dsync_waiters) {
kev->fflags |= NOTE_WL_SYNC_WAIT;
}
if (kqr->kqr_qos_index) {
kev->fflags |= NOTE_WL_THREAD_REQUEST;
}
kev->ext[0] = thread_tid(kqwl->kqwl_owner);
kev->ext[1] = thread_tid(kqwl->kqwl_request.kqr_thread);
kev->ext[2] = thread_owned_workloops_count(current_thread());
kev->ext[3] = kn->kn_kevent.ext[3];
kq_req_unlock(kqwl);
error = EBUSY;
goto out;
}
#endif
int command = (kn->kn_sfflags & NOTE_WL_COMMANDS_MASK);
switch (command) {
case NOTE_WL_THREAD_REQUEST:
if (kn->kn_id != kqwl->kqwl_dynamicid) {
error = EINVAL;
goto out;
}
qos_index = _pthread_priority_thread_qos(kn->kn_qos);
if (qos_index == THREAD_QOS_UNSPECIFIED) {
error = ERANGE;
goto out;
}
if (kqwl->kqwl_request.kqr_qos_index) {
error = EALREADY;
goto out;
}
break;
case NOTE_WL_SYNC_WAIT:
case NOTE_WL_SYNC_WAKE:
if (kn->kn_id == kqwl->kqwl_dynamicid) {
error = EINVAL;
goto out;
}
if ((kn->kn_flags & EV_DISABLE) == 0) {
error = EINVAL;
goto out;
}
if (kn->kn_sfflags & NOTE_WL_END_OWNERSHIP) {
error = EINVAL;
goto out;
}
break;
default:
error = EINVAL;
goto out;
}
error = filt_wlupdate(kqwl, kn, kev, qos_index, FILT_WLATTACH);
out:
if (error) {
if (error == ESTALE && (kn->kn_sfflags & NOTE_WL_IGNORE_ESTALE)) {
error = 0;
}
knote_set_error(kn, error);
return 0;
}
if (command == NOTE_WL_SYNC_WAIT) {
return kevent_register_wait_prepare(kn, kev);
}
if (command == NOTE_WL_THREAD_REQUEST) {
kn->kn_flags |= EV_CLEAR;
return FILTER_ACTIVE;
}
return 0;
}
static void __dead2
filt_wlwait_continue(void *parameter, wait_result_t wr)
{
struct _kevent_register *cont_args = parameter;
struct kqworkloop *kqwl = (struct kqworkloop *)cont_args->kq;
struct kqrequest *kqr = &kqwl->kqwl_request;
kq_req_lock(kqwl);
kqr->kqr_dsync_waiters--;
if (filt_wlturnstile_interlock_is_workq(kqwl)) {
workq_kern_threadreq_lock(kqwl->kqwl_p);
turnstile_complete((uintptr_t)kqwl, &kqwl->kqwl_turnstile, NULL);
workq_kern_threadreq_unlock(kqwl->kqwl_p);
} else {
turnstile_complete((uintptr_t)kqwl, &kqwl->kqwl_turnstile, NULL);
}
kq_req_unlock(kqwl);
turnstile_cleanup();
if (wr == THREAD_INTERRUPTED) {
cont_args->kev.flags |= EV_ERROR;
cont_args->kev.data = EINTR;
} else if (wr != THREAD_AWAKENED) {
panic("Unexpected wait result: %d", wr);
}
kevent_register_wait_return(cont_args);
}
static void __dead2
filt_wlpost_register_wait(struct uthread *uth, struct knote_lock_ctx *knlc,
struct _kevent_register *cont_args)
{
struct kqworkloop *kqwl = (struct kqworkloop *)cont_args->kq;
struct kqrequest *kqr = &kqwl->kqwl_request;
struct turnstile *ts;
bool workq_locked = false;
kq_req_lock(kqwl);
kqr->kqr_dsync_waiters++;
if (filt_wlturnstile_interlock_is_workq(kqwl)) {
workq_kern_threadreq_lock(kqwl->kqwl_p);
workq_locked = true;
}
ts = turnstile_prepare((uintptr_t)kqwl, &kqwl->kqwl_turnstile,
TURNSTILE_NULL, TURNSTILE_WORKLOOPS);
if (workq_locked) {
workq_kern_threadreq_update_inheritor(kqwl->kqwl_p,
&kqwl->kqwl_request, kqwl->kqwl_owner, ts,
TURNSTILE_DELAYED_UPDATE);
if (!filt_wlturnstile_interlock_is_workq(kqwl)) {
workq_kern_threadreq_unlock(kqwl->kqwl_p);
workq_locked = false;
}
}
if (!workq_locked) {
filt_wlupdate_inheritor(kqwl, ts, TURNSTILE_DELAYED_UPDATE);
}
thread_set_pending_block_hint(uth->uu_thread, kThreadWaitWorkloopSyncWait);
waitq_assert_wait64(&ts->ts_waitq, CAST_EVENT64_T(cont_args->knote),
THREAD_ABORTSAFE, TIMEOUT_WAIT_FOREVER);
if (workq_locked) {
workq_kern_threadreq_unlock(kqwl->kqwl_p);
}
thread_t thread = kqwl->kqwl_owner ?: kqr->kqr_thread;
if (thread) {
thread_reference(thread);
}
kq_req_unlock(kqwl);
kevent_register_wait_block(ts, thread, knlc, filt_wlwait_continue, cont_args);
}
void
kdp_workloop_sync_wait_find_owner(__assert_only thread_t thread,
event64_t event, thread_waitinfo_t *waitinfo)
{
struct knote *kn = (struct knote *)event;
assert(kdp_is_in_zone(kn, "knote zone"));
assert(kn->kn_hook == thread);
struct kqueue *kq = knote_get_kq(kn);
assert(kdp_is_in_zone(kq, "kqueue workloop zone"));
assert(kq->kq_state & KQ_WORKLOOP);
struct kqworkloop *kqwl = (struct kqworkloop *)kq;
struct kqrequest *kqr = &kqwl->kqwl_request;
thread_t kqwl_owner = kqwl->kqwl_owner;
thread_t servicer = kqr->kqr_thread;
if (kqwl_owner != THREAD_NULL) {
assert(kdp_is_in_zone(kqwl_owner, "threads"));
waitinfo->owner = thread_tid(kqwl->kqwl_owner);
} else if (servicer != THREAD_NULL) {
assert(kdp_is_in_zone(servicer, "threads"));
waitinfo->owner = thread_tid(servicer);
} else if (kqr->kqr_state & KQR_THREQUESTED) {
waitinfo->owner = STACKSHOT_WAITOWNER_THREQUESTED;
} else {
waitinfo->owner = 0;
}
waitinfo->context = kqwl->kqwl_dynamicid;
}
static void
filt_wldetach(__assert_only struct knote *kn)
{
assert(knote_get_kq(kn)->kq_state & KQ_WORKLOOP);
if (kn->kn_hook) {
kevent_register_wait_cleanup(kn);
}
}
static int
filt_wlvalidate_kev_flags(struct knote *kn, struct kevent_internal_s *kev,
thread_qos_t *qos_index)
{
int new_commands = kev->fflags & NOTE_WL_COMMANDS_MASK;
int sav_commands = kn->kn_sfflags & NOTE_WL_COMMANDS_MASK;
if ((kev->fflags & NOTE_WL_DISCOVER_OWNER) && (kev->flags & EV_DELETE)) {
return EINVAL;
}
if (kev->fflags & NOTE_WL_UPDATE_QOS) {
if (kev->flags & EV_DELETE) {
return EINVAL;
}
if (sav_commands != NOTE_WL_THREAD_REQUEST) {
return EINVAL;
}
if (!(*qos_index = _pthread_priority_thread_qos(kev->qos))) {
return ERANGE;
}
}
switch (new_commands) {
case NOTE_WL_THREAD_REQUEST:
if (sav_commands != NOTE_WL_THREAD_REQUEST)
return EINVAL;
break;
case NOTE_WL_SYNC_WAIT:
if (kev->fflags & NOTE_WL_END_OWNERSHIP)
return EINVAL;
goto sync_checks;
case NOTE_WL_SYNC_WAKE:
sync_checks:
if (!(sav_commands & (NOTE_WL_SYNC_WAIT | NOTE_WL_SYNC_WAKE)))
return EINVAL;
if ((kev->flags & (EV_ENABLE | EV_DELETE)) == EV_ENABLE)
return EINVAL;
break;
default:
return EINVAL;
}
return 0;
}
static int
filt_wltouch(struct knote *kn, struct kevent_internal_s *kev)
{
struct kqworkloop *kqwl = (struct kqworkloop *)knote_get_kq(kn);
thread_qos_t qos_index = THREAD_QOS_UNSPECIFIED;
int error = filt_wlvalidate_kev_flags(kn, kev, &qos_index);
if (error) {
goto out;
}
error = filt_wlupdate(kqwl, kn, kev, qos_index, FILT_WLTOUCH);
filt_wlremember_last_update(kn, kev, error);
if (error) {
goto out;
}
out:
if (error) {
if (error == ESTALE && (kev->fflags & NOTE_WL_IGNORE_ESTALE)) {
return 0;
}
kev->flags |= EV_ERROR;
kev->data = error;
return 0;
}
int command = kev->fflags & NOTE_WL_COMMANDS_MASK;
if (command == NOTE_WL_SYNC_WAIT && !(kn->kn_sfflags & NOTE_WL_SYNC_WAKE)) {
return kevent_register_wait_prepare(kn, kev);
}
if (command == NOTE_WL_THREAD_REQUEST) {
if (kev->fflags & NOTE_WL_UPDATE_QOS) {
return FILTER_ACTIVE | FILTER_UPDATE_REQ_QOS;
}
return FILTER_ACTIVE;
}
return 0;
}
static bool
filt_wlallow_drop(struct knote *kn, struct kevent_internal_s *kev)
{
struct kqworkloop *kqwl = (struct kqworkloop *)knote_get_kq(kn);
int error = filt_wlvalidate_kev_flags(kn, kev, NULL);
if (error) {
goto out;
}
error = filt_wlupdate(kqwl, kn, kev, 0, FILT_WLDROP);
filt_wlremember_last_update(kn, kev, error);
if (error) {
goto out;
}
out:
if (error) {
if (error == ESTALE && (kev->fflags & NOTE_WL_IGNORE_ESTALE)) {
return false;
}
kev->flags |= EV_ERROR;
kev->data = error;
return false;
}
return true;
}
static int
filt_wlprocess(
struct knote *kn,
__unused struct filt_process_s *data,
struct kevent_internal_s *kev)
{
struct kqworkloop *kqwl = (struct kqworkloop *)knote_get_kq(kn);
int rc = 0;
assert(kn->kn_sfflags & NOTE_WL_THREAD_REQUEST);
filt_wllock(kqwl);
if (kqwl->kqwl_owner) {
kqlock(kqwl);
knote_activate(kn);
kqunlock(kqwl);
} else {
#if DEBUG || DEVELOPMENT
if (kevent_debug_flags() & KEVENT_PANIC_ON_NON_ENQUEUED_PROCESS) {
#define DISPATCH_QUEUE_ENQUEUED 0x1ull
user_addr_t addr = CAST_USER_ADDR_T(kn->kn_ext[EV_EXTIDX_WL_ADDR]);
task_t t = current_task();
uint64_t val;
if (addr && task_is_active(t) && !task_is_halting(t) &&
copyin_word(addr, &val, sizeof(val)) == 0 &&
val && (val & DISPATCH_QUEUE_ENQUEUED) == 0 &&
(val >> 48) != 0xdead && (val >> 48) != 0 && (val >> 48) != 0xffff) {
panic("kevent: workloop %#016llx is not enqueued "
"(kn:%p dq_state:%#016llx kev.dq_state:%#016llx)",
kn->kn_udata, kn, val, kn->kn_ext[EV_EXTIDX_WL_VALUE]);
}
}
#endif
*kev = kn->kn_kevent;
kev->fflags = kn->kn_sfflags;
kev->data = kn->kn_sdata;
kev->qos = kn->kn_qos;
rc |= FILTER_ACTIVE;
}
filt_wlunlock(kqwl);
if (rc & FILTER_ACTIVE) {
workq_thread_set_max_qos(kqwl->kqwl_p, &kqwl->kqwl_request);
}
return rc;
}
SECURITY_READ_ONLY_EARLY(static struct filterops) workloop_filtops = {
.f_extended_codes = true,
.f_attach = filt_wlattach,
.f_detach = filt_wldetach,
.f_event = filt_badevent,
.f_touch = filt_wltouch,
.f_process = filt_wlprocess,
.f_allow_drop = filt_wlallow_drop,
.f_post_register_wait = filt_wlpost_register_wait,
};
#pragma mark kevent / knotes
static int
filt_badevent(struct knote *kn, long hint)
{
panic("%s[%d](%p, %ld)", __func__, kn->kn_filter, kn, hint);
return 0;
}
static int
filt_badattach(__unused struct knote *kn, __unused struct kevent_internal_s *kev)
{
knote_set_error(kn, ENOTSUP);
return 0;
}
struct kqueue *
kqueue_alloc(struct proc *p, unsigned int flags)
{
struct filedesc *fdp = p->p_fd;
struct kqueue *kq = NULL;
int policy;
void *hook = NULL;
if (flags & KEVENT_FLAG_WORKQ) {
struct kqworkq *kqwq;
int i;
kqwq = (struct kqworkq *)zalloc(kqworkq_zone);
if (kqwq == NULL)
return NULL;
kq = &kqwq->kqwq_kqueue;
bzero(kqwq, sizeof (struct kqworkq));
kqwq->kqwq_state = KQ_WORKQ;
for (i = 0; i < KQWQ_NBUCKETS; i++) {
TAILQ_INIT(&kqwq->kqwq_queue[i]);
}
for (i = 0; i < KQWQ_NBUCKETS; i++) {
if (i != KQWQ_QOS_MANAGER) {
kqwq->kqwq_request[i].kqr_state |= KQR_THOVERCOMMIT;
}
kqwq->kqwq_request[i].kqr_qos_index = i;
TAILQ_INIT(&kqwq->kqwq_request[i].kqr_suppressed);
}
policy = SYNC_POLICY_FIFO;
hook = (void *)kqwq;
} else if (flags & KEVENT_FLAG_WORKLOOP) {
struct kqworkloop *kqwl;
int i;
kqwl = (struct kqworkloop *)zalloc(kqworkloop_zone);
if (kqwl == NULL)
return NULL;
bzero(kqwl, sizeof (struct kqworkloop));
kqwl->kqwl_state = KQ_WORKLOOP | KQ_DYNAMIC;
kqwl->kqwl_retains = 1;
kqwl->kqwl_request.kqr_state = KQR_WORKLOOP;
kq = &kqwl->kqwl_kqueue;
for (i = 0; i < KQWL_NBUCKETS; i++) {
TAILQ_INIT(&kqwl->kqwl_queue[i]);
}
TAILQ_INIT(&kqwl->kqwl_request.kqr_suppressed);
lck_mtx_init(&kqwl->kqwl_statelock, kq_lck_grp, kq_lck_attr);
policy = SYNC_POLICY_FIFO;
hook = (void *)kqwl;
} else {
struct kqfile *kqf;
kqf = (struct kqfile *)zalloc(kqfile_zone);
if (kqf == NULL)
return NULL;
kq = &kqf->kqf_kqueue;
bzero(kqf, sizeof (struct kqfile));
TAILQ_INIT(&kqf->kqf_queue);
TAILQ_INIT(&kqf->kqf_suppressed);
policy = SYNC_POLICY_FIFO | SYNC_POLICY_PREPOST;
}
waitq_set_init(&kq->kq_wqs, policy, NULL, hook);
lck_spin_init(&kq->kq_lock, kq_lck_grp, kq_lck_attr);
lck_spin_init(&kq->kq_reqlock, kq_lck_grp, kq_lck_attr);
kq->kq_p = p;
if (fdp->fd_knlistsize < 0) {
proc_fdlock(p);
if (fdp->fd_knlistsize < 0)
fdp->fd_knlistsize = 0;
proc_fdunlock(p);
}
return (kq);
}
void
knotes_dealloc(proc_t p)
{
struct filedesc *fdp = p->p_fd;
struct kqueue *kq;
struct knote *kn;
struct klist *kn_hash = NULL;
int i;
if (fdp->fd_knlistsize > 0) {
for (i = 0; i < fdp->fd_knlistsize; i++) {
while ((kn = SLIST_FIRST(&fdp->fd_knlist[i])) != NULL) {
kq = knote_get_kq(kn);
kqlock(kq);
proc_fdunlock(p);
knote_drop(kq, kn, NULL);
proc_fdlock(p);
}
}
FREE(fdp->fd_knlist, M_KQUEUE);
fdp->fd_knlist = NULL;
}
fdp->fd_knlistsize = -1;
knhash_lock(p);
proc_fdunlock(p);
if (fdp->fd_knhashmask != 0) {
for (i = 0; i <= (int)fdp->fd_knhashmask; i++) {
while ((kn = SLIST_FIRST(&fdp->fd_knhash[i])) != NULL) {
kq = knote_get_kq(kn);
kqlock(kq);
knhash_unlock(p);
knote_drop(kq, kn, NULL);
knhash_lock(p);
}
}
kn_hash = fdp->fd_knhash;
fdp->fd_knhashmask = 0;
fdp->fd_knhash = NULL;
}
knhash_unlock(p);
if (kn_hash)
FREE(kn_hash, M_KQUEUE);
proc_fdlock(p);
}
static thread_t
kqworkloop_invalidate(struct kqworkloop *kqwl)
{
thread_t cur_owner = kqwl->kqwl_owner;
assert(TAILQ_EMPTY(&kqwl->kqwl_request.kqr_suppressed));
if (cur_owner) {
if (kqworkloop_owner_override(kqwl) != THREAD_QOS_UNSPECIFIED) {
thread_drop_ipc_override(cur_owner);
}
thread_ends_owning_workloop(cur_owner);
kqwl->kqwl_owner = THREAD_NULL;
}
return cur_owner;
}
void
kqueue_dealloc(struct kqueue *kq)
{
struct proc *p;
struct filedesc *fdp;
struct knote *kn;
int i;
if (kq == NULL)
return;
p = kq->kq_p;
fdp = p->p_fd;
if ((kq->kq_state & KQ_WORKLOOP) == 0) {
KNOTE_LOCK_CTX(knlc);
proc_fdlock(p);
for (i = 0; i < fdp->fd_knlistsize; i++) {
kn = SLIST_FIRST(&fdp->fd_knlist[i]);
while (kn != NULL) {
if (kq == knote_get_kq(kn)) {
kqlock(kq);
proc_fdunlock(p);
if (knote_lock(kq, kn, &knlc, KNOTE_KQ_LOCK_ON_SUCCESS)) {
knote_drop(kq, kn, &knlc);
}
proc_fdlock(p);
kn = SLIST_FIRST(&fdp->fd_knlist[i]);
continue;
}
kn = SLIST_NEXT(kn, kn_link);
}
}
knhash_lock(p);
proc_fdunlock(p);
if (fdp->fd_knhashmask != 0) {
for (i = 0; i < (int)fdp->fd_knhashmask + 1; i++) {
kn = SLIST_FIRST(&fdp->fd_knhash[i]);
while (kn != NULL) {
if (kq == knote_get_kq(kn)) {
kqlock(kq);
knhash_unlock(p);
if (knote_lock(kq, kn, &knlc, KNOTE_KQ_LOCK_ON_SUCCESS)) {
knote_drop(kq, kn, &knlc);
}
knhash_lock(p);
kn = SLIST_FIRST(&fdp->fd_knhash[i]);
continue;
}
kn = SLIST_NEXT(kn, kn_link);
}
}
}
knhash_unlock(p);
}
if (kq->kq_state & KQ_WORKLOOP) {
struct kqworkloop *kqwl = (struct kqworkloop *)kq;
thread_t cur_owner = kqworkloop_invalidate(kqwl);
if (cur_owner) thread_deallocate(cur_owner);
if (kqwl->kqwl_request.kqr_state & KQR_ALLOCATED_TURNSTILE) {
struct turnstile *ts;
turnstile_complete((uintptr_t)kqwl, &kqwl->kqwl_turnstile, &ts);
turnstile_cleanup();
turnstile_deallocate(ts);
} else {
assert(kqwl->kqwl_turnstile == NULL);
}
}
waitq_set_deinit(&kq->kq_wqs);
lck_spin_destroy(&kq->kq_lock, kq_lck_grp);
lck_spin_destroy(&kq->kq_reqlock, kq_lck_grp);
if (kq->kq_state & KQ_WORKQ) {
zfree(kqworkq_zone, (struct kqworkq *)kq);
} else if (kq->kq_state & KQ_WORKLOOP) {
struct kqworkloop *kqwl = (struct kqworkloop *)kq;
assert(kqwl->kqwl_retains == 0);
lck_mtx_destroy(&kqwl->kqwl_statelock, kq_lck_grp);
zfree(kqworkloop_zone, kqwl);
} else {
zfree(kqfile_zone, (struct kqfile *)kq);
}
}
static inline void
kqueue_retain(struct kqueue *kq)
{
struct kqworkloop *kqwl = (struct kqworkloop *)kq;
uint32_t previous;
if ((kq->kq_state & KQ_DYNAMIC) == 0)
return;
previous = OSIncrementAtomic(&kqwl->kqwl_retains);
if (previous == KQ_WORKLOOP_RETAINS_MAX)
panic("kq(%p) retain overflow", kq);
if (previous == 0)
panic("kq(%p) resurrection", kq);
}
#define KQUEUE_CANT_BE_LAST_REF 0
#define KQUEUE_MIGHT_BE_LAST_REF 1
static inline int
kqueue_release(kqueue_t kqu, __assert_only int possibly_last)
{
if ((kqu.kq->kq_state & KQ_DYNAMIC) == 0) {
return 0;
}
assert(kqu.kq->kq_state & KQ_WORKLOOP);
uint32_t refs = OSDecrementAtomic(&kqu.kqwl->kqwl_retains);
if (__improbable(refs == 0)) {
panic("kq(%p) over-release", kqu.kq);
}
if (refs == 1) {
assert(possibly_last);
}
return refs == 1;
}
int
kqueue_body(struct proc *p, fp_allocfn_t fp_zalloc, void *cra, int32_t *retval)
{
struct kqueue *kq;
struct fileproc *fp;
int fd, error;
error = falloc_withalloc(p,
&fp, &fd, vfs_context_current(), fp_zalloc, cra);
if (error) {
return (error);
}
kq = kqueue_alloc(p, 0);
if (kq == NULL) {
fp_free(p, fd, fp);
return (ENOMEM);
}
fp->f_flag = FREAD | FWRITE;
fp->f_ops = &kqueueops;
fp->f_data = kq;
proc_fdlock(p);
*fdflags(p, fd) |= UF_EXCLOSE;
procfdtbl_releasefd(p, fd, NULL);
fp_drop(p, fd, fp, 1);
proc_fdunlock(p);
*retval = fd;
return (error);
}
int
kqueue(struct proc *p, __unused struct kqueue_args *uap, int32_t *retval)
{
return (kqueue_body(p, fileproc_alloc_init, NULL, retval));
}
static int
kevent_copyin(user_addr_t *addrp, struct kevent_internal_s *kevp, struct proc *p,
unsigned int flags)
{
int advance;
int error;
if (flags & KEVENT_FLAG_LEGACY32) {
bzero(kevp, sizeof (*kevp));
if (IS_64BIT_PROCESS(p)) {
struct user64_kevent kev64;
advance = sizeof (kev64);
error = copyin(*addrp, (caddr_t)&kev64, advance);
if (error)
return (error);
kevp->ident = kev64.ident;
kevp->filter = kev64.filter;
kevp->flags = kev64.flags;
kevp->udata = kev64.udata;
kevp->fflags = kev64.fflags;
kevp->data = kev64.data;
} else {
struct user32_kevent kev32;
advance = sizeof (kev32);
error = copyin(*addrp, (caddr_t)&kev32, advance);
if (error)
return (error);
kevp->ident = (uintptr_t)kev32.ident;
kevp->filter = kev32.filter;
kevp->flags = kev32.flags;
kevp->udata = CAST_USER_ADDR_T(kev32.udata);
kevp->fflags = kev32.fflags;
kevp->data = (intptr_t)kev32.data;
}
} else if (flags & KEVENT_FLAG_LEGACY64) {
struct kevent64_s kev64;
bzero(kevp, sizeof (*kevp));
advance = sizeof (struct kevent64_s);
error = copyin(*addrp, (caddr_t)&kev64, advance);
if (error)
return(error);
kevp->ident = kev64.ident;
kevp->filter = kev64.filter;
kevp->flags = kev64.flags;
kevp->udata = kev64.udata;
kevp->fflags = kev64.fflags;
kevp->data = kev64.data;
kevp->ext[0] = kev64.ext[0];
kevp->ext[1] = kev64.ext[1];
} else {
struct kevent_qos_s kevqos;
bzero(kevp, sizeof (*kevp));
advance = sizeof (struct kevent_qos_s);
error = copyin(*addrp, (caddr_t)&kevqos, advance);
if (error)
return error;
kevp->ident = kevqos.ident;
kevp->filter = kevqos.filter;
kevp->flags = kevqos.flags;
kevp->qos = kevqos.qos;
kevp->udata = kevqos.udata;
kevp->fflags = kevqos.fflags;
kevp->data = kevqos.data;
kevp->ext[0] = kevqos.ext[0];
kevp->ext[1] = kevqos.ext[1];
kevp->ext[2] = kevqos.ext[2];
kevp->ext[3] = kevqos.ext[3];
}
if (!error)
*addrp += advance;
return (error);
}
static int
kevent_copyout(struct kevent_internal_s *kevp, user_addr_t *addrp, struct proc *p,
unsigned int flags)
{
user_addr_t addr = *addrp;
int advance;
int error;
if (flags & KEVENT_FLAG_LEGACY32) {
assert((flags & KEVENT_FLAG_STACK_EVENTS) == 0);
if (IS_64BIT_PROCESS(p)) {
struct user64_kevent kev64;
advance = sizeof (kev64);
bzero(&kev64, advance);
kev64.ident = (kevp->ident == (uintptr_t)-1) ?
(uint64_t)-1LL : (uint64_t)kevp->ident;
kev64.filter = kevp->filter;
kev64.flags = kevp->flags;
kev64.fflags = kevp->fflags;
kev64.data = (int64_t) kevp->data;
kev64.udata = kevp->udata;
error = copyout((caddr_t)&kev64, addr, advance);
} else {
struct user32_kevent kev32;
advance = sizeof (kev32);
bzero(&kev32, advance);
kev32.ident = (uint32_t)kevp->ident;
kev32.filter = kevp->filter;
kev32.flags = kevp->flags;
kev32.fflags = kevp->fflags;
kev32.data = (int32_t)kevp->data;
kev32.udata = kevp->udata;
error = copyout((caddr_t)&kev32, addr, advance);
}
} else if (flags & KEVENT_FLAG_LEGACY64) {
struct kevent64_s kev64;
advance = sizeof (struct kevent64_s);
if (flags & KEVENT_FLAG_STACK_EVENTS) {
addr -= advance;
}
bzero(&kev64, advance);
kev64.ident = kevp->ident;
kev64.filter = kevp->filter;
kev64.flags = kevp->flags;
kev64.fflags = kevp->fflags;
kev64.data = (int64_t) kevp->data;
kev64.udata = kevp->udata;
kev64.ext[0] = kevp->ext[0];
kev64.ext[1] = kevp->ext[1];
error = copyout((caddr_t)&kev64, addr, advance);
} else {
struct kevent_qos_s kevqos;
advance = sizeof (struct kevent_qos_s);
if (flags & KEVENT_FLAG_STACK_EVENTS) {
addr -= advance;
}
bzero(&kevqos, advance);
kevqos.ident = kevp->ident;
kevqos.filter = kevp->filter;
kevqos.flags = kevp->flags;
kevqos.qos = kevp->qos;
kevqos.udata = kevp->udata;
kevqos.fflags = kevp->fflags;
kevqos.xflags = 0;
kevqos.data = (int64_t) kevp->data;
kevqos.ext[0] = kevp->ext[0];
kevqos.ext[1] = kevp->ext[1];
kevqos.ext[2] = kevp->ext[2];
kevqos.ext[3] = kevp->ext[3];
error = copyout((caddr_t)&kevqos, addr, advance);
}
if (!error) {
if (flags & KEVENT_FLAG_STACK_EVENTS)
*addrp = addr;
else
*addrp = addr + advance;
}
return (error);
}
static int
kevent_get_data_size(
struct proc *p,
uint64_t data_available,
unsigned int flags,
user_size_t *residp)
{
user_size_t resid;
int error = 0;
if (data_available != USER_ADDR_NULL) {
if (flags & KEVENT_FLAG_KERNEL) {
resid = *(user_size_t *)(uintptr_t)data_available;
} else if (IS_64BIT_PROCESS(p)) {
user64_size_t usize;
error = copyin((user_addr_t)data_available, &usize, sizeof(usize));
resid = (user_size_t)usize;
} else {
user32_size_t usize;
error = copyin((user_addr_t)data_available, &usize, sizeof(usize));
resid = (user_size_t)usize;
}
if (error)
return(error);
} else {
resid = 0;
}
*residp = resid;
return 0;
}
static int
kevent_put_data_size(
struct proc *p,
uint64_t data_available,
unsigned int flags,
user_size_t resid)
{
int error = 0;
if (data_available) {
if (flags & KEVENT_FLAG_KERNEL) {
*(user_size_t *)(uintptr_t)data_available = resid;
} else if (IS_64BIT_PROCESS(p)) {
user64_size_t usize = (user64_size_t)resid;
error = copyout(&usize, (user_addr_t)data_available, sizeof(usize));
} else {
user32_size_t usize = (user32_size_t)resid;
error = copyout(&usize, (user_addr_t)data_available, sizeof(usize));
}
}
return error;
}
__attribute__((noreturn))
static void
kevent_continue(__unused struct kqueue *kq, void *data, int error)
{
struct _kevent *cont_args;
struct fileproc *fp;
uint64_t data_available;
user_size_t data_size;
user_size_t data_resid;
unsigned int flags;
int32_t *retval;
int noutputs;
int fd;
struct proc *p = current_proc();
cont_args = (struct _kevent *)data;
data_available = cont_args->data_available;
flags = cont_args->process_data.fp_flags;
data_size = cont_args->process_data.fp_data_size;
data_resid = cont_args->process_data.fp_data_resid;
noutputs = cont_args->eventout;
retval = cont_args->retval;
fd = cont_args->fd;
fp = cont_args->fp;
kevent_put_kq(p, fd, fp, kq);
if (error == 0 && data_available && data_resid != data_size) {
(void)kevent_put_data_size(p, data_available, flags, data_resid);
}
if (error == ERESTART)
error = EINTR;
else if (error == EWOULDBLOCK)
error = 0;
if (error == 0)
*retval = noutputs;
unix_syscall_return(error);
}
int
kevent(struct proc *p, struct kevent_args *uap, int32_t *retval)
{
unsigned int flags = KEVENT_FLAG_LEGACY32;
return kevent_internal(p,
(kqueue_id_t)uap->fd, NULL,
uap->changelist, uap->nchanges,
uap->eventlist, uap->nevents,
0ULL, 0ULL,
flags,
uap->timeout,
kevent_continue,
retval);
}
int
kevent64(struct proc *p, struct kevent64_args *uap, int32_t *retval)
{
unsigned int flags;
flags = uap->flags & KEVENT_FLAG_USER;
flags |= KEVENT_FLAG_LEGACY64;
return kevent_internal(p,
(kqueue_id_t)uap->fd, NULL,
uap->changelist, uap->nchanges,
uap->eventlist, uap->nevents,
0ULL, 0ULL,
flags,
uap->timeout,
kevent_continue,
retval);
}
int
kevent_qos(struct proc *p, struct kevent_qos_args *uap, int32_t *retval)
{
uap->flags &= KEVENT_FLAG_USER;
return kevent_internal(p,
(kqueue_id_t)uap->fd, NULL,
uap->changelist, uap->nchanges,
uap->eventlist, uap->nevents,
uap->data_out, (uint64_t)uap->data_available,
uap->flags,
0ULL,
kevent_continue,
retval);
}
int
kevent_qos_internal(struct proc *p, int fd,
user_addr_t changelist, int nchanges,
user_addr_t eventlist, int nevents,
user_addr_t data_out, user_size_t *data_available,
unsigned int flags,
int32_t *retval)
{
return kevent_internal(p,
(kqueue_id_t)fd, NULL,
changelist, nchanges,
eventlist, nevents,
data_out, (uint64_t)data_available,
(flags | KEVENT_FLAG_KERNEL),
0ULL,
NULL,
retval);
}
int
kevent_id(struct proc *p, struct kevent_id_args *uap, int32_t *retval)
{
uap->flags &= KEVENT_FLAG_USER;
return kevent_internal(p,
(kqueue_id_t)uap->id, NULL,
uap->changelist, uap->nchanges,
uap->eventlist, uap->nevents,
uap->data_out, (uint64_t)uap->data_available,
(uap->flags | KEVENT_FLAG_DYNAMIC_KQUEUE),
0ULL,
kevent_continue,
retval);
}
int
kevent_id_internal(struct proc *p, kqueue_id_t *id,
user_addr_t changelist, int nchanges,
user_addr_t eventlist, int nevents,
user_addr_t data_out, user_size_t *data_available,
unsigned int flags,
int32_t *retval)
{
return kevent_internal(p,
*id, id,
changelist, nchanges,
eventlist, nevents,
data_out, (uint64_t)data_available,
(flags | KEVENT_FLAG_KERNEL | KEVENT_FLAG_DYNAMIC_KQUEUE),
0ULL,
NULL,
retval);
}
static int
kevent_get_timeout(struct proc *p,
user_addr_t utimeout,
unsigned int flags,
struct timeval *atvp)
{
struct timeval atv;
int error = 0;
if (flags & KEVENT_FLAG_IMMEDIATE) {
getmicrouptime(&atv);
} else if (utimeout != USER_ADDR_NULL) {
struct timeval rtv;
if (flags & KEVENT_FLAG_KERNEL) {
struct timespec *tsp = (struct timespec *)utimeout;
TIMESPEC_TO_TIMEVAL(&rtv, tsp);
} else if (IS_64BIT_PROCESS(p)) {
struct user64_timespec ts;
error = copyin(utimeout, &ts, sizeof(ts));
if ((ts.tv_sec & 0xFFFFFFFF00000000ull) != 0)
error = EINVAL;
else
TIMESPEC_TO_TIMEVAL(&rtv, &ts);
} else {
struct user32_timespec ts;
error = copyin(utimeout, &ts, sizeof(ts));
TIMESPEC_TO_TIMEVAL(&rtv, &ts);
}
if (error)
return (error);
if (itimerfix(&rtv))
return (EINVAL);
getmicrouptime(&atv);
timevaladd(&atv, &rtv);
} else {
atv.tv_sec = 0;
atv.tv_usec = 0;
}
*atvp = atv;
return 0;
}
static int
kevent_set_kq_mode(struct kqueue *kq, unsigned int flags)
{
kqlock(kq);
if (kq->kq_state & (KQ_KEV32 | KQ_KEV64 | KQ_KEV_QOS)) {
if (flags & KEVENT_FLAG_LEGACY32) {
if ((kq->kq_state & KQ_KEV32) == 0) {
kqunlock(kq);
return EINVAL;
}
} else if (kq->kq_state & KQ_KEV32) {
kqunlock(kq);
return EINVAL;
}
} else if (flags & KEVENT_FLAG_LEGACY32) {
kq->kq_state |= KQ_KEV32;
} else if (flags & KEVENT_FLAG_LEGACY64) {
kq->kq_state |= KQ_KEV64;
} else {
kq->kq_state |= KQ_KEV_QOS;
}
kqunlock(kq);
return 0;
}
#define KQ_HASH(val, mask) (((val) ^ (val >> 8)) & (mask))
#define CONFIG_KQ_HASHSIZE CONFIG_KN_HASHSIZE
static inline void
kqhash_lock(proc_t p)
{
lck_mtx_lock_spin_always(&p->p_fd->fd_kqhashlock);
}
static inline void
kqhash_lock_held(__assert_only proc_t p)
{
LCK_MTX_ASSERT(&p->p_fd->fd_kqhashlock, LCK_MTX_ASSERT_OWNED);
}
static inline void
kqhash_unlock(proc_t p)
{
lck_mtx_unlock(&p->p_fd->fd_kqhashlock);
}
static void
kqueue_hash_init_if_needed(proc_t p)
{
struct filedesc *fdp = p->p_fd;
kqhash_lock_held(p);
if (__improbable(fdp->fd_kqhash == NULL)) {
struct kqlist *alloc_hash;
u_long alloc_mask;
kqhash_unlock(p);
alloc_hash = hashinit(CONFIG_KQ_HASHSIZE, M_KQUEUE, &alloc_mask);
kqhash_lock(p);
if (fdp->fd_kqhashmask == 0) {
fdp->fd_kqhash = alloc_hash;
fdp->fd_kqhashmask = alloc_mask;
} else {
kqhash_unlock(p);
FREE(alloc_hash, M_KQUEUE);
kqhash_lock(p);
}
}
}
static void
kqueue_hash_insert(
struct proc *p,
kqueue_id_t id,
struct kqueue *kq)
{
struct kqworkloop *kqwl = (struct kqworkloop *)kq;
struct filedesc *fdp = p->p_fd;
struct kqlist *list;
kqhash_lock_held(p);
if ((kq->kq_state & KQ_DYNAMIC) == 0) {
assert(kq->kq_state & KQ_DYNAMIC);
return;
}
assert(kq->kq_state & KQ_WORKLOOP);
assert(fdp->fd_kqhash);
kqwl->kqwl_dynamicid = id;
list = &fdp->fd_kqhash[KQ_HASH(id, fdp->fd_kqhashmask)];
SLIST_INSERT_HEAD(list, kqwl, kqwl_hashlink);
}
static void
kqueue_hash_remove(
struct proc *p,
struct kqueue *kq)
{
struct kqworkloop *kqwl = (struct kqworkloop *)kq;
struct filedesc *fdp = p->p_fd;
struct kqlist *list;
kqhash_lock_held(p);
if ((kq->kq_state & KQ_DYNAMIC) == 0) {
assert(kq->kq_state & KQ_DYNAMIC);
return;
}
assert(kq->kq_state & KQ_WORKLOOP);
list = &fdp->fd_kqhash[KQ_HASH(kqwl->kqwl_dynamicid, fdp->fd_kqhashmask)];
SLIST_REMOVE(list, kqwl, kqworkloop, kqwl_hashlink);
}
static struct kqueue *
kqueue_hash_lookup(struct proc *p, kqueue_id_t id)
{
struct filedesc *fdp = p->p_fd;
struct kqlist *list;
struct kqworkloop *kqwl;
kqhash_lock_held(p);
if (fdp->fd_kqhashmask == 0) return NULL;
list = &fdp->fd_kqhash[KQ_HASH(id, fdp->fd_kqhashmask)];
SLIST_FOREACH(kqwl, list, kqwl_hashlink) {
if (kqwl->kqwl_dynamicid == id) {
struct kqueue *kq = (struct kqueue *)kqwl;
assert(kq->kq_state & KQ_DYNAMIC);
assert(kq->kq_state & KQ_WORKLOOP);
return kq;
}
}
return NULL;
}
static inline void
kqueue_release_last(struct proc *p, kqueue_t kqu)
{
struct kqueue *kq = kqu.kq;
if (kq->kq_state & KQ_DYNAMIC) {
kqhash_lock(p);
if (kqueue_release(kq, KQUEUE_MIGHT_BE_LAST_REF)) {
thread_t cur_owner = kqworkloop_invalidate(kqu.kqwl);
kqueue_hash_remove(p, kq);
kqhash_unlock(p);
if (cur_owner) thread_deallocate(cur_owner);
kqueue_dealloc(kq);
} else {
kqhash_unlock(p);
}
}
}
void
kqworkloops_dealloc(proc_t p)
{
struct filedesc *fdp = p->p_fd;
struct kqlist *list;
struct kqworkloop *kqwl, *kqwln;
struct kqlist tofree;
int i;
if (!(fdp->fd_flags & FD_WORKLOOP)) {
return;
}
SLIST_INIT(&tofree);
kqhash_lock(p);
assert(fdp->fd_kqhashmask != 0);
for (i = 0; i <= (int)fdp->fd_kqhashmask; i++) {
list = &fdp->fd_kqhash[i];
SLIST_FOREACH_SAFE(kqwl, list, kqwl_hashlink, kqwln) {
assert(kqwl->kqwl_params);
SLIST_REMOVE(list, kqwl, kqworkloop, kqwl_hashlink);
SLIST_INSERT_HEAD(&tofree, kqwl, kqwl_hashlink);
}
}
kqhash_unlock(p);
SLIST_FOREACH_SAFE(kqwl, &tofree, kqwl_hashlink, kqwln) {
struct kqueue *kq = (struct kqueue *)kqwl;
__assert_only bool released;
released = kqueue_release(kq, KQUEUE_MIGHT_BE_LAST_REF);
assert(released);
kqueue_dealloc(kq);
}
}
static struct kqueue *
kevent_get_bound_kqworkloop(thread_t thread)
{
struct uthread *ut = get_bsdthread_info(thread);
struct kqrequest *kqr = ut->uu_kqr_bound;
return kqr ? (struct kqueue *)kqr_kqworkloop(kqr) : NULL;
}
static int
kevent_get_kq(struct proc *p, kqueue_id_t id, workq_threadreq_param_t *trp,
unsigned int flags, struct fileproc **fpp, int *fdp,
struct kqueue **kqp)
{
struct filedesc *descp = p->p_fd;
struct fileproc *fp = NULL;
struct kqueue *kq = NULL;
int fd = 0;
int error = 0;
thread_t th = current_thread();
assert(!trp || (flags & KEVENT_FLAG_WORKLOOP));
if (flags & KEVENT_FLAG_DYNAMIC_KQUEUE) {
assert(flags & KEVENT_FLAG_WORKLOOP);
assert(!trp || (flags & KEVENT_FLAG_DYNAMIC_KQ_MUST_NOT_EXIST));
kq = kevent_get_bound_kqworkloop(th);
if (id == (kqueue_id_t)-1 &&
(flags & KEVENT_FLAG_KERNEL) &&
(flags & KEVENT_FLAG_WORKLOOP)) {
if (!is_workqueue_thread(th) || !kq) {
return EINVAL;
}
kqueue_retain(kq);
goto out;
}
if (id == 0 || id == (kqueue_id_t)-1) {
return EINVAL;
}
if (kq != NULL && ((struct kqworkloop *)kq)->kqwl_dynamicid == id) {
if (flags & KEVENT_FLAG_DYNAMIC_KQ_MUST_NOT_EXIST) {
return EEXIST;
}
assert(kq->kq_state & KQ_DYNAMIC);
kqueue_retain(kq);
goto out;
}
kqhash_lock(p);
kq = kqueue_hash_lookup(p, id);
if (kq == NULL) {
kqhash_unlock(p);
if (flags & KEVENT_FLAG_DYNAMIC_KQ_MUST_EXIST) {
return ENOENT;
}
struct kqueue *alloc_kq;
alloc_kq = kqueue_alloc(p, flags);
if (!alloc_kq) {
return ENOMEM;
}
kqhash_lock(p);
kqueue_hash_init_if_needed(p);
kq = kqueue_hash_lookup(p, id);
if (kq == NULL) {
kq = alloc_kq;
if (trp) {
struct kqworkloop *kqwl = (struct kqworkloop *)kq;
kqwl->kqwl_params = trp->trp_value;
}
kqueue_hash_insert(p, id, kq);
kqhash_unlock(p);
} else if (flags & KEVENT_FLAG_DYNAMIC_KQ_MUST_NOT_EXIST) {
kqhash_unlock(p);
kqueue_release(alloc_kq, KQUEUE_MIGHT_BE_LAST_REF);
kqueue_dealloc(alloc_kq);
return EEXIST;
} else {
kqueue_retain(kq);
kqhash_unlock(p);
kqueue_release(alloc_kq, KQUEUE_MIGHT_BE_LAST_REF);
kqueue_dealloc(alloc_kq);
}
} else {
if (flags & KEVENT_FLAG_DYNAMIC_KQ_MUST_NOT_EXIST) {
kqhash_unlock(p);
return EEXIST;
}
assert(kq->kq_state & KQ_DYNAMIC);
kqueue_retain(kq);
kqhash_unlock(p);
}
} else if (flags & KEVENT_FLAG_WORKQ) {
if (flags & KEVENT_FLAG_KERNEL) {
assert(descp->fd_wqkqueue != NULL);
}
kq = descp->fd_wqkqueue;
if (kq == NULL) {
struct kqueue *alloc_kq = kqueue_alloc(p, KEVENT_FLAG_WORKQ);
if (alloc_kq == NULL) {
return ENOMEM;
}
knhash_lock(p);
if (descp->fd_wqkqueue == NULL) {
kq = descp->fd_wqkqueue = alloc_kq;
knhash_unlock(p);
} else {
knhash_unlock(p);
kq = descp->fd_wqkqueue;
kqueue_dealloc(alloc_kq);
}
}
} else {
fd = (int)id;
if ((error = fp_getfkq(p, fd, &fp, &kq)) != 0)
return (error);
}
if ((error = kevent_set_kq_mode(kq, flags)) != 0) {
if (fp != NULL)
fp_drop(p, fd, fp, 0);
return error;
}
out:
*fpp = fp;
*fdp = fd;
*kqp = kq;
return error;
}
static void
kevent_put_kq(
struct proc *p,
kqueue_id_t id,
struct fileproc *fp,
struct kqueue *kq)
{
kqueue_release_last(p, kq);
if (fp != NULL) {
assert((kq->kq_state & KQ_WORKQ) == 0);
fp_drop(p, (int)id, fp, 0);
}
}
static uint64_t
kevent_workloop_serial_no_copyin(proc_t p, uint64_t workloop_id)
{
uint64_t serial_no = 0;
user_addr_t addr;
int rc;
if (workloop_id == 0 || p->p_dispatchqueue_serialno_offset == 0) {
return 0;
}
addr = (user_addr_t)(workloop_id + p->p_dispatchqueue_serialno_offset);
if (proc_is64bit(p)) {
rc = copyin(addr, (caddr_t)&serial_no, sizeof(serial_no));
} else {
uint32_t serial_no32 = 0;
rc = copyin(addr, (caddr_t)&serial_no32, sizeof(serial_no32));
serial_no = serial_no32;
}
return rc == 0 ? serial_no : 0;
}
int
kevent_exit_on_workloop_ownership_leak(thread_t thread)
{
proc_t p = current_proc();
struct filedesc *fdp = p->p_fd;
kqueue_id_t workloop_id = 0;
os_reason_t reason = OS_REASON_NULL;
mach_vm_address_t addr;
uint32_t reason_size;
kqhash_lock(p);
if (fdp->fd_kqhashmask > 0) {
for (uint32_t i = 0; i < fdp->fd_kqhashmask + 1; i++) {
struct kqworkloop *kqwl;
SLIST_FOREACH(kqwl, &fdp->fd_kqhash[i], kqwl_hashlink) {
struct kqueue *kq = &kqwl->kqwl_kqueue;
if ((kq->kq_state & KQ_DYNAMIC) && kqwl->kqwl_owner == thread) {
workloop_id = kqwl->kqwl_dynamicid;
break;
}
}
}
}
kqhash_unlock(p);
reason = os_reason_create(OS_REASON_LIBSYSTEM,
OS_REASON_LIBSYSTEM_CODE_WORKLOOP_OWNERSHIP_LEAK);
if (reason == OS_REASON_NULL) {
goto out;
}
reason->osr_flags |= OS_REASON_FLAG_GENERATE_CRASH_REPORT;
reason_size = 2 * sizeof(uint64_t);
reason_size = kcdata_estimate_required_buffer_size(2, reason_size);
if (os_reason_alloc_buffer(reason, reason_size) != 0) {
goto out;
}
if (workloop_id) {
struct kcdata_descriptor *kcd = &reason->osr_kcd_descriptor;
if (kcdata_get_memory_addr(kcd, EXIT_REASON_WORKLOOP_ID,
sizeof(workloop_id), &addr) == KERN_SUCCESS) {
kcdata_memcpy(kcd, addr, &workloop_id, sizeof(workloop_id));
}
uint64_t serial_no = kevent_workloop_serial_no_copyin(p, workloop_id);
if (serial_no && kcdata_get_memory_addr(kcd, EXIT_REASON_DISPATCH_QUEUE_NO,
sizeof(serial_no), &addr) == KERN_SUCCESS) {
kcdata_memcpy(kcd, addr, &serial_no, sizeof(serial_no));
}
}
out:
#if DEVELOPMENT || DEBUG
if (kevent_debug_flags() & KEVENT_PANIC_ON_WORKLOOP_OWNERSHIP_LEAK) {
panic("thread %p in task %p is leaked workloop 0x%016llx ownership",
thread, p->task, workloop_id);
}
psignal_try_thread_with_reason(p, thread, SIGABRT, reason);
return 0;
#else
return exit_with_reason(p, W_EXITCODE(0, SIGKILL), (int *)NULL,
FALSE, FALSE, 0, reason);
#endif
}
static inline boolean_t
kevent_args_requesting_events(unsigned int flags, int nevents)
{
return (!(flags & KEVENT_FLAG_ERROR_EVENTS) && nevents > 0);
}
static int
kevent_internal(struct proc *p,
kqueue_id_t id, kqueue_id_t *id_out,
user_addr_t changelist, int nchanges,
user_addr_t ueventlist, int nevents,
user_addr_t data_out, uint64_t data_available,
unsigned int flags,
user_addr_t utimeout,
kqueue_continue_t continuation,
int32_t *retval)
{
uthread_t ut;
struct kqueue *kq;
struct fileproc *fp = NULL;
int fd = 0;
struct kevent_internal_s kev;
int error, noutputs, register_rc;
bool needs_end_processing = false;
struct timeval atv;
user_size_t data_size;
user_size_t data_resid;
thread_t thread = current_thread();
KNOTE_LOCK_CTX(knlc);
if (((flags & (KEVENT_FLAG_WORKQ | KEVENT_FLAG_KERNEL)) == KEVENT_FLAG_WORKQ) &&
kevent_args_requesting_events(flags, nevents))
return EINVAL;
if (flags & KEVENT_FLAG_PARKING) {
if (!kevent_args_requesting_events(flags, nevents) || id != (kqueue_id_t)-1)
return EINVAL;
}
if ((flags & (KEVENT_FLAG_DYNAMIC_KQUEUE | KEVENT_FLAG_WORKLOOP)) == KEVENT_FLAG_DYNAMIC_KQUEUE)
return EINVAL;
if ((flags & (KEVENT_FLAG_WORKLOOP)) && (flags & (KEVENT_FLAG_WORKQ)))
return EINVAL;
if (flags & (KEVENT_FLAG_DYNAMIC_KQ_MUST_EXIST | KEVENT_FLAG_DYNAMIC_KQ_MUST_NOT_EXIST)) {
if (!(flags & KEVENT_FLAG_WORKLOOP) || (flags & KEVENT_FLAG_KERNEL) || !(flags & KEVENT_FLAG_DYNAMIC_KQUEUE))
return EINVAL;
}
if (flags & KEVENT_FLAG_STACK_EVENTS) {
int scale = ((flags & KEVENT_FLAG_LEGACY32) ?
(IS_64BIT_PROCESS(p) ? sizeof(struct user64_kevent) :
sizeof(struct user32_kevent)) :
((flags & KEVENT_FLAG_LEGACY64) ? sizeof(struct kevent64_s) :
sizeof(struct kevent_qos_s)));
ueventlist += nevents * scale;
}
error = kevent_get_timeout(p, utimeout, flags, &atv);
if (error)
return error;
error = kevent_get_data_size(p, data_available, flags, &data_size);
if (error)
return error;
error = kevent_get_kq(p, id, NULL, flags, &fp, &fd, &kq);
#if CONFIG_WORKLOOP_DEBUG
ut = (uthread_t)get_bsdthread_info(thread);
UU_KEVENT_HISTORY_WRITE_ENTRY(ut, {
.uu_kqid = id,
.uu_kq = error ? NULL : kq,
.uu_error = error,
.uu_nchanges = nchanges,
.uu_nevents = nevents,
.uu_flags = flags,
});
#endif // CONFIG_WORKLOOP_DEBUG
if (error)
return error;
if (flags & KEVENT_FLAG_WORKLOOP) {
struct kqworkloop *kqwl = (struct kqworkloop *)kq;
struct kqrequest *kqr = &kqwl->kqwl_request;
assert(kq->kq_state & KQ_WORKLOOP);
if (kevent_args_requesting_events(flags, nevents)) {
if (kq != kevent_get_bound_kqworkloop(thread)) {
error = EXDEV;
goto out;
}
kq_req_lock(kqwl);
kqr->kqr_state &= ~KQR_R2K_NOTIF_ARMED;
needs_end_processing = true;
kq_req_unlock(kq);
}
if (id_out) {
*id_out = kqwl->kqwl_dynamicid;
}
}
noutputs = 0;
while (nchanges > 0 && error == 0) {
error = kevent_copyin(&changelist, &kev, p, flags);
if (error)
break;
kev.flags &= ~EV_SYSFLAGS;
register_rc = kevent_register(kq, &kev, &knlc);
if (register_rc & FILTER_REGISTER_WAIT) {
kqlock_held(kq);
if (nchanges == 1 && nevents >= 1 && (flags & KEVENT_FLAG_ERROR_EVENTS)) {
struct _kevent_register *cont_args;
ut = (uthread_t)get_bsdthread_info(thread);
cont_args = &ut->uu_save.uus_kevent_register;
cont_args->kev = kev;
cont_args->kq = kq;
cont_args->fp = fp;
cont_args->fd = fd;
cont_args->ueventlist = ueventlist;
cont_args->flags = flags;
cont_args->retval = retval;
cont_args->eventcount = nevents;
cont_args->eventout = noutputs;
knote_fops(cont_args->knote)->f_post_register_wait(ut, &knlc, cont_args);
panic("f_post_register_wait returned (kev: %p)", &kev);
}
kev.flags |= EV_ERROR;
kev.data = ENOTSUP;
knote_unlock(kq, knlc.knlc_knote, &knlc, KNOTE_KQ_UNLOCK);
}
if (nevents > 0 && (kev.flags & (EV_ERROR|EV_RECEIPT))) {
if ((kev.flags & EV_ERROR) == 0) {
kev.flags |= EV_ERROR;
kev.data = 0;
}
error = kevent_copyout(&kev, &ueventlist, p, flags);
if (error == 0) {
nevents--;
noutputs++;
}
} else if (kev.flags & EV_ERROR) {
error = kev.data;
}
nchanges--;
}
if (flags & KEVENT_FLAG_ERROR_EVENTS)
nevents = 0;
if (nevents > 0 && noutputs == 0 && error == 0) {
struct _kevent *cont_args;
ut = (uthread_t)get_bsdthread_info(thread);
cont_args = &ut->uu_save.uus_kevent;
cont_args->fp = fp;
cont_args->fd = fd;
cont_args->retval = retval;
cont_args->eventlist = ueventlist;
cont_args->eventcount = nevents;
cont_args->eventout = noutputs;
cont_args->data_available = data_available;
cont_args->process_data.fp_fd = (int)id;
cont_args->process_data.fp_flags = flags;
cont_args->process_data.fp_data_out = data_out;
cont_args->process_data.fp_data_size = data_size;
cont_args->process_data.fp_data_resid = data_size;
needs_end_processing = false;
error = kqueue_scan(kq, kevent_callback,
continuation, cont_args,
&cont_args->process_data,
&atv, p);
noutputs = cont_args->eventout;
data_resid = cont_args->process_data.fp_data_resid;
if (error == 0 && data_available && data_resid != data_size) {
(void)kevent_put_data_size(p, data_available, flags, data_resid);
}
}
out:
if (__improbable(needs_end_processing)) {
kqlock(kq);
kqworkloop_end_processing((struct kqworkloop *)kq, 0, 0);
kqunlock(kq);
}
kevent_put_kq(p, id, fp, kq);
if (error == ERESTART)
error = EINTR;
else if (error == EWOULDBLOCK)
error = 0;
if (error == 0)
*retval = noutputs;
return (error);
}
static int
kevent_callback(__unused struct kqueue *kq, struct kevent_internal_s *kevp,
void *data)
{
struct _kevent *cont_args;
int error;
cont_args = (struct _kevent *)data;
assert(cont_args->eventout < cont_args->eventcount);
error = kevent_copyout(kevp, &cont_args->eventlist, current_proc(),
cont_args->process_data.fp_flags);
if (error == 0 && ++cont_args->eventout == cont_args->eventcount)
error = EWOULDBLOCK;
return (error);
}
char *
kevent_description(struct kevent_internal_s *kevp, char *s, size_t n)
{
snprintf(s, n,
"kevent="
"{.ident=%#llx, .filter=%d, .flags=%#x, .udata=%#llx, .fflags=%#x, .data=%#llx, .ext[0]=%#llx, .ext[1]=%#llx}",
kevp->ident,
kevp->filter,
kevp->flags,
kevp->udata,
kevp->fflags,
kevp->data,
kevp->ext[0],
kevp->ext[1] );
return (s);
}
static int
kevent_register_validate_priority(struct kqueue *kq, struct knote *kn,
struct kevent_internal_s *kev)
{
if (kev->flags & (EV_DISABLE | EV_DELETE)) {
return 0;
}
if (kq->kq_state & KQ_WORKLOOP) {
if (!_pthread_priority_thread_qos(kn ? kn->kn_qos : kev->qos)) {
return ERANGE;
}
}
return 0;
}
static int
kevent_register_wait_prepare(struct knote *kn, struct kevent_internal_s *kev)
{
thread_t thread = current_thread();
struct uthread *uth = get_bsdthread_info(thread);
assert(knote_fops(kn)->f_extended_codes);
if (kn->kn_hook == NULL) {
thread_reference(thread);
kn->kn_hook = thread;
} else if (kn->kn_hook != thread) {
kev->flags |= EV_ERROR;
kev->data = EXDEV;
return 0;
}
uth->uu_save.uus_kevent_register.knote = kn;
return FILTER_REGISTER_WAIT;
}
static void
kevent_register_wait_cleanup(struct knote *kn)
{
thread_t thread = kn->kn_hook;
kn->kn_hook = NULL;
thread_deallocate(thread);
}
static void
kevent_register_wait_block(struct turnstile *ts, thread_t thread,
struct knote_lock_ctx *knlc, thread_continue_t cont,
struct _kevent_register *cont_args)
{
knote_unlock(cont_args->kq, cont_args->knote, knlc, KNOTE_KQ_UNLOCK);
turnstile_update_inheritor_complete(ts, TURNSTILE_INTERLOCK_NOT_HELD);
cont_args->handoff_thread = thread;
thread_handoff_parameter(thread, cont, cont_args);
}
static void
kevent_register_wait_return(struct _kevent_register *cont_args)
{
struct kqueue *kq = cont_args->kq;
proc_t p = kq->kq_p;
struct kevent_internal_s *kev = &cont_args->kev;
int error = 0;
if (cont_args->handoff_thread) {
thread_deallocate(cont_args->handoff_thread);
}
if (kev->flags & (EV_ERROR|EV_RECEIPT)) {
if ((kev->flags & EV_ERROR) == 0) {
kev->flags |= EV_ERROR;
kev->data = 0;
}
error = kevent_copyout(kev, &cont_args->ueventlist, p, cont_args->flags);
if (error == 0) cont_args->eventout++;
}
kevent_put_kq(p, cont_args->fd, cont_args->fp, kq);
if (error == 0) {
*cont_args->retval = cont_args->eventout;
}
unix_syscall_return(error);
}
int
kevent_register(struct kqueue *kq, struct kevent_internal_s *kev,
struct knote_lock_ctx *knlc)
{
struct proc *p = kq->kq_p;
const struct filterops *fops;
struct knote *kn = NULL;
int result = 0, error = 0;
unsigned short kev_flags = kev->flags;
if (kev->filter < 0) {
if (kev->filter + EVFILT_SYSCOUNT < 0) {
error = EINVAL;
goto out;
}
fops = sysfilt_ops[~kev->filter];
} else {
error = EINVAL;
goto out;
}
if ((kev->flags & EV_VANISHED) &&
(kev->flags & (EV_ADD | EV_DISPATCH2)) != (EV_ADD | EV_DISPATCH2)) {
error = EINVAL;
goto out;
}
if (kev->flags & EV_DELETE)
kev->flags &= ~EV_ADD;
if (kev->flags & EV_DISABLE)
kev->flags &= ~EV_ENABLE;
if (kq->kq_state & KQ_WORKLOOP) {
KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWL_REGISTER),
((struct kqworkloop *)kq)->kqwl_dynamicid,
kev->udata, kev->flags, kev->filter);
} else if (kq->kq_state & KQ_WORKQ) {
KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWQ_REGISTER),
0, kev->udata, kev->flags, kev->filter);
} else {
KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQ_REGISTER),
VM_KERNEL_UNSLIDE_OR_PERM(kq),
kev->udata, kev->flags, kev->filter);
}
restart:
kn = kq_find_knote_and_kq_lock(kq, kev, fops->f_isfd, p);
error = kevent_register_validate_priority(kq, kn, kev);
result = 0;
if (error) {
goto out;
}
if (kn == NULL && (kev->flags & EV_ADD) == 0) {
if ((kev_flags & EV_ADD) && (kev_flags & EV_DELETE) &&
(kq->kq_state & KQ_WORKLOOP)) {
} else {
error = ENOENT;
}
goto out;
} else if (kn == NULL) {
struct fileproc *knote_fp = NULL;
if (fops->f_isfd) {
if ((error = fp_lookup(p, kev->ident, &knote_fp, 0)) != 0) {
goto out;
}
}
kn = knote_alloc();
if (kn == NULL) {
error = ENOMEM;
if (knote_fp != NULL)
fp_drop(p, kev->ident, knote_fp, 0);
goto out;
}
kn->kn_fp = knote_fp;
kn->kn_kq_packed = (intptr_t)(struct kqueue *)kq;
kqueue_retain(kq);
kn->kn_filtid = ~kev->filter;
kn->kn_status = KN_ATTACHING | KN_ATTACHED;
if (kev->flags & EV_VANISHED) {
kev->flags &= ~EV_VANISHED;
kn->kn_status |= KN_REQVANISH;
}
if (kev->flags & EV_DISPATCH)
kn->kn_status |= KN_DISPATCH;
if (kev->flags & EV_UDATA_SPECIFIC)
kn->kn_status |= KN_UDATA_SPECIFIC;
if (kev->flags & EV_DISABLE)
kn->kn_status |= KN_DISABLED;
kn->kn_kevent = *kev;
kn->kn_sfflags = kev->fflags;
kn->kn_sdata = kev->data;
kn->kn_fflags = 0;
kn->kn_data = 0;
knote_reset_priority(kn, kev->qos);
error = kq_add_knote(kq, kn, knlc, p);
if (error) {
(void)kqueue_release(kq, KQUEUE_CANT_BE_LAST_REF);
knote_free(kn);
if (knote_fp != NULL)
fp_drop(p, kev->ident, knote_fp, 0);
if (error == ERESTART) {
goto restart;
}
goto out;
}
result = fops->f_attach(kn, kev);
if (result && !knote_fops(kn)->f_extended_codes) {
result = FILTER_ACTIVE;
}
kqlock(kq);
if (kn->kn_flags & EV_ERROR) {
kn->kn_status &= ~(KN_ATTACHED | KN_ATTACHING);
error = kn->kn_data;
knote_drop(kq, kn, knlc);
result = 0;
goto out;
}
kn->kn_status &= ~KN_ATTACHING;
knote_set_qos_overcommit(kn);
if (result & FILTER_ACTIVE) {
if (result & FILTER_ADJUST_EVENT_QOS_BIT)
knote_adjust_qos(kq, kn, result);
knote_activate(kn);
}
} else if (!knote_lock(kq, kn, knlc, KNOTE_KQ_LOCK_ON_SUCCESS)) {
goto restart;
} else if (kev->flags & EV_DELETE) {
if (knote_fops(kn)->f_allow_drop) {
bool drop;
kqunlock(kq);
drop = knote_fops(kn)->f_allow_drop(kn, kev);
kqlock(kq);
if (!drop) goto out_unlock;
}
if ((kev->flags & EV_ENABLE) == 0 &&
(kn->kn_status & (KN_DISPATCH2 | KN_DISABLED)) ==
(KN_DISPATCH2 | KN_DISABLED)) {
kn->kn_status |= KN_DEFERDELETE;
error = EINPROGRESS;
goto out_unlock;
}
knote_drop(kq, kn, knlc);
goto out;
} else {
if (kn->kn_status & (KN_DEFERDELETE | KN_VANISHED)) {
if (kev->flags & EV_ENABLE) {
result = FILTER_ACTIVE;
}
} else {
kqunlock(kq);
result = filter_call(knote_fops(kn), f_touch(kn, kev));
kqlock(kq);
}
if (kev->flags & EV_ERROR) {
result = 0;
} else {
if ((kn->kn_status & KN_UDATA_SPECIFIC) == 0)
kn->kn_udata = kev->udata;
if (kev->flags & EV_DISABLE)
knote_disable(kn);
if (result & (FILTER_UPDATE_REQ_QOS | FILTER_ADJUST_EVENT_QOS_BIT))
knote_dequeue(kn);
if ((result & FILTER_UPDATE_REQ_QOS) &&
kev->qos && kev->qos != kn->kn_qos) {
knote_reset_priority(kn, kev->qos);
}
if (result & FILTER_ACTIVE) {
thread_qos_t qos;
if (result & FILTER_ADJUST_EVENT_QOS_BIT) {
if (knote_should_apply_qos_override(kq, kn, result, &qos)) {
knote_apply_qos_override(kn, qos);
}
}
knote_activate(kn);
}
if (result & (FILTER_UPDATE_REQ_QOS | FILTER_ADJUST_EVENT_QOS_BIT)) {
if (knote_enqueue(kn) && (kn->kn_status & KN_ACTIVE)) {
knote_wakeup(kn);
}
}
if (kev->flags & EV_ENABLE)
knote_enable(kn);
}
}
out_unlock:
if ((result & FILTER_REGISTER_WAIT) == 0) {
knote_unlock(kq, kn, knlc, KNOTE_KQ_UNLOCK);
}
out:
if (error) {
kev->flags |= EV_ERROR;
kev->data = error;
}
return result;
}
static int
knote_process(struct knote *kn,
kevent_callback_t callback,
void *callback_data,
struct filt_process_s *process_data)
{
struct kevent_internal_s kev;
struct kqueue *kq = knote_get_kq(kn);
KNOTE_LOCK_CTX(knlc);
int result = FILTER_ACTIVE;
int error = 0;
bool drop = false;
bzero(&kev, sizeof(kev));
assert(kn->kn_status & KN_QUEUED);
assert(kn->kn_status & (KN_ACTIVE|KN_STAYACTIVE));
assert(!(kn->kn_status & (KN_DISABLED|KN_SUPPRESSED|KN_DROPPING)));
if (kq->kq_state & KQ_WORKLOOP) {
KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWL_PROCESS),
((struct kqworkloop *)kq)->kqwl_dynamicid,
kn->kn_udata, kn->kn_status | (kn->kn_id << 32),
kn->kn_filtid);
} else if (kq->kq_state & KQ_WORKQ) {
KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWQ_PROCESS),
0, kn->kn_udata, kn->kn_status | (kn->kn_id << 32),
kn->kn_filtid);
} else {
KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQ_PROCESS),
VM_KERNEL_UNSLIDE_OR_PERM(kq), kn->kn_udata,
kn->kn_status | (kn->kn_id << 32), kn->kn_filtid);
}
if ((kn->kn_status & KN_DROPPING) ||
!knote_lock(kq, kn, &knlc, KNOTE_KQ_LOCK_ALWAYS)) {
return EJUSTRETURN;
}
knote_suppress(kn);
if (kn->kn_status & (KN_DEFERDELETE | KN_VANISHED)) {
kev.filter = kn->kn_filter;
kev.ident = kn->kn_id;
kev.flags = (kn->kn_status & KN_DEFERDELETE) ? EV_DELETE : EV_VANISHED;
kev.flags |= (EV_DISPATCH2 | EV_ONESHOT);
kev.udata = kn->kn_udata;
} else {
knote_deactivate(kn);
kqunlock(kq);
result = filter_call(knote_fops(kn), f_process(kn, process_data, &kev));
kqlock(kq);
}
if ((result & FILTER_ACTIVE) == 0) {
if ((kn->kn_status & (KN_ACTIVE | KN_STAYACTIVE)) == 0) {
knote_unsuppress(kn);
}
knote_unlock(kq, kn, &knlc, KNOTE_KQ_LOCK_ALWAYS);
return EJUSTRETURN;
}
if (result & FILTER_ADJUST_EVENT_QOS_BIT)
knote_adjust_qos(kq, kn, result);
kev.qos = _pthread_priority_combine(kn->kn_qos, kn->kn_qos_override);
if (kev.flags & EV_ONESHOT) {
if ((kn->kn_status & (KN_DISPATCH2 | KN_DEFERDELETE)) == KN_DISPATCH2) {
kn->kn_status |= KN_DEFERDELETE;
knote_disable(kn);
} else {
drop = true;
}
} else if (kn->kn_status & KN_DISPATCH) {
knote_disable(kn);
} else if ((kev.flags & EV_CLEAR) == 0) {
knote_activate(kn);
}
if (drop) {
knote_drop(kq, kn, &knlc);
} else {
knote_unlock(kq, kn, &knlc, KNOTE_KQ_UNLOCK);
}
if (kev.flags & EV_VANISHED) {
KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KNOTE_VANISHED),
kev.ident, kn->kn_udata, kn->kn_status | (kn->kn_id << 32),
kn->kn_filtid);
}
error = (callback)(kq, &kev, callback_data);
kqlock(kq);
return error;
}
#define KQWQAE_BEGIN_PROCESSING 1
#define KQWQAE_END_PROCESSING 2
#define KQWQAE_UNBIND 3
static int
kqworkq_acknowledge_events(struct kqworkq *kqwq, struct kqrequest *kqr,
int kevent_flags, int kqwqae_op)
{
thread_qos_t old_override = THREAD_QOS_UNSPECIFIED;
thread_t thread = kqr->kqr_thread;
struct knote *kn;
int rc = 0;
bool seen_stayactive = false, unbind;
kqlock_held(&kqwq->kqwq_kqueue);
if (!TAILQ_EMPTY(&kqr->kqr_suppressed)) {
while ((kn = TAILQ_FIRST(&kqr->kqr_suppressed)) != NULL) {
assert(kn->kn_status & KN_SUPPRESSED);
knote_unsuppress(kn);
if (kn->kn_status & KN_STAYACTIVE) {
seen_stayactive = true;
}
}
}
kq_req_lock(kqwq);
#if DEBUG || DEVELOPMENT
thread_t self = current_thread();
struct uthread *ut = get_bsdthread_info(self);
assert(kqr->kqr_state & KQR_THREQUESTED);
assert(kqr->kqr_thread == self);
assert(ut->uu_kqr_bound == kqr);
#endif // DEBUG || DEVELOPMENT
if (kqwqae_op == KQWQAE_UNBIND) {
unbind = true;
} else if ((kevent_flags & KEVENT_FLAG_PARKING) == 0) {
unbind = false;
} else if (kqwqae_op == KQWQAE_BEGIN_PROCESSING && seen_stayactive) {
unbind = false;
} else {
unbind = ((kqr->kqr_state & KQR_WAKEUP) == 0);
}
if (unbind) {
old_override = kqworkq_unbind_locked(kqwq, kqr, thread);
rc = -1;
if (kqr->kqr_state & KQR_WAKEUP) {
kqueue_threadreq_initiate(&kqwq->kqwq_kqueue, kqr,
kqr->kqr_qos_index, 0);
}
}
if (rc == 0) {
kqr->kqr_state &= ~KQR_WAKEUP;
}
kq_req_unlock(kqwq);
if (old_override) {
thread_drop_ipc_override(thread);
}
return rc;
}
static int
kqworkq_begin_processing(struct kqworkq *kqwq, struct kqrequest *kqr,
int kevent_flags)
{
int rc = 0;
KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWQ_PROCESS_BEGIN) | DBG_FUNC_START,
0, kqr->kqr_qos_index);
rc = kqworkq_acknowledge_events(kqwq, kqr, kevent_flags,
KQWQAE_BEGIN_PROCESSING);
KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWQ_PROCESS_BEGIN) | DBG_FUNC_END,
thread_tid(kqr->kqr_thread), kqr->kqr_state);
return rc;
}
static inline bool
kqworkloop_is_processing_on_current_thread(struct kqworkloop *kqwl)
{
struct kqueue *kq = &kqwl->kqwl_kqueue;
kqlock_held(kq);
if (kq->kq_state & KQ_PROCESSING) {
return kqwl->kqwl_request.kqr_thread == current_thread();
}
return false;
}
static thread_qos_t
kqworkloop_acknowledge_events(struct kqworkloop *kqwl)
{
struct kqrequest *kqr = &kqwl->kqwl_request;
kq_index_t qos = THREAD_QOS_UNSPECIFIED;
struct knote *kn, *tmp;
kqlock_held(&kqwl->kqwl_kqueue);
TAILQ_FOREACH_SAFE(kn, &kqr->kqr_suppressed, kn_tqe, tmp) {
if (knote_fops(kn)->f_adjusts_qos && (kn->kn_status & KN_DISABLED) &&
(kn->kn_status & (KN_STAYACTIVE | KN_DROPPING)) == 0 &&
(kn->kn_flags & (EV_DISPATCH | EV_DISABLE)) == EV_DISPATCH) {
qos = MAX(qos, knote_get_qos_override_index(kn));
continue;
}
knote_unsuppress(kn);
}
return qos;
}
static int
kqworkloop_begin_processing(struct kqworkloop *kqwl, unsigned int kevent_flags)
{
struct kqrequest *kqr = &kqwl->kqwl_request;
struct kqueue *kq = &kqwl->kqwl_kqueue;
thread_qos_t old_override = THREAD_QOS_UNSPECIFIED, qos_override;
thread_t thread = kqr->kqr_thread;
int rc = 0, op = KQWL_UTQ_NONE;
kqlock_held(kq);
KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWL_PROCESS_BEGIN) | DBG_FUNC_START,
kqwl->kqwl_dynamicid, 0, 0);
assert((kq->kq_state & KQ_PROCESSING) == 0);
kq->kq_state |= KQ_PROCESSING;
if (!TAILQ_EMPTY(&kqr->kqr_suppressed)) {
op = KQWL_UTQ_RESET_WAKEUP_OVERRIDE;
}
if (kevent_flags & KEVENT_FLAG_PARKING) {
if (kqr->kqr_state & KQR_THOVERCOMMIT) {
op = KQWL_UTQ_PARKING;
} else {
op = KQWL_UTQ_UNBINDING;
}
}
if (op == KQWL_UTQ_NONE) {
goto done;
}
qos_override = kqworkloop_acknowledge_events(kqwl);
kq_req_lock(kqwl);
if (op == KQWL_UTQ_UNBINDING) {
old_override = kqworkloop_unbind_locked(kqwl, thread);
(void)kqueue_release(kqwl, KQUEUE_CANT_BE_LAST_REF);
}
kqworkloop_update_threads_qos(kqwl, op, qos_override);
if (op == KQWL_UTQ_PARKING) {
if (!TAILQ_EMPTY(&kqwl->kqwl_queue[KQWL_BUCKET_STAYACTIVE])) {
} else if ((kqr->kqr_state & KQR_WAKEUP) == 0 || kqwl->kqwl_owner) {
old_override = kqworkloop_unbind_locked(kqwl, thread);
(void)kqueue_release(kqwl, KQUEUE_CANT_BE_LAST_REF);
rc = -1;
}
} else if (op == KQWL_UTQ_UNBINDING) {
if (kqr->kqr_thread == thread) {
} else {
rc = -1;
}
}
if (rc == 0) {
kqr->kqr_wakeup_indexes &= ~KQWL_STAYACTIVE_FIRED_BIT;
} else {
kq->kq_state &= ~KQ_PROCESSING;
}
kq_req_unlock(kqwl);
if (old_override) {
thread_drop_ipc_override(thread);
}
done:
KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWL_PROCESS_BEGIN) | DBG_FUNC_END,
kqwl->kqwl_dynamicid, 0, 0);
return rc;
}
static int
kqfile_begin_processing(struct kqueue *kq)
{
struct kqtailq *suppressq;
kqlock_held(kq);
assert((kq->kq_state & (KQ_WORKQ | KQ_WORKLOOP)) == 0);
KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQ_PROCESS_BEGIN) | DBG_FUNC_START,
VM_KERNEL_UNSLIDE_OR_PERM(kq), 0);
for (;;) {
if (kq->kq_state & KQ_DRAIN) {
KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQ_PROCESS_BEGIN) | DBG_FUNC_END,
VM_KERNEL_UNSLIDE_OR_PERM(kq), 2);
return -1;
}
if ((kq->kq_state & KQ_PROCESSING) == 0)
break;
kq->kq_state |= KQ_PROCWAIT;
suppressq = kqueue_get_suppressed_queue(kq, NULL);
waitq_assert_wait64((struct waitq *)&kq->kq_wqs,
CAST_EVENT64_T(suppressq), THREAD_UNINT | THREAD_WAIT_NOREPORT,
TIMEOUT_WAIT_FOREVER);
kqunlock(kq);
thread_block(THREAD_CONTINUE_NULL);
kqlock(kq);
}
waitq_set_clear_preposts(&kq->kq_wqs);
kq->kq_state &= ~KQ_WAKEUP;
if (kqueue_queue_empty(kq, QOS_INDEX_KQFILE)) {
KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQ_PROCESS_BEGIN) | DBG_FUNC_END,
VM_KERNEL_UNSLIDE_OR_PERM(kq), 1);
return -1;
}
kq->kq_state |= KQ_PROCESSING;
KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQ_PROCESS_BEGIN) | DBG_FUNC_END,
VM_KERNEL_UNSLIDE_OR_PERM(kq));
return 0;
}
static int
kqworkq_end_processing(struct kqworkq *kqwq, struct kqrequest *kqr,
int kevent_flags)
{
if (!kqueue_queue_empty(&kqwq->kqwq_kqueue, kqr->kqr_qos_index)) {
kq_req_lock(kqwq);
kqr->kqr_state |= KQR_WAKEUP;
kq_req_unlock(kqwq);
}
if (kevent_flags & KEVENT_FLAG_PARKING) {
int rc = kqworkq_acknowledge_events(kqwq, kqr, kevent_flags,
KQWQAE_END_PROCESSING);
if (rc == 0) {
return -1;
}
}
return 0;
}
static int
kqworkloop_end_processing(struct kqworkloop *kqwl, int flags, int kevent_flags)
{
struct kqueue *kq = &kqwl->kqwl_kqueue;
struct kqrequest *kqr = &kqwl->kqwl_request;
thread_qos_t old_override = THREAD_QOS_UNSPECIFIED, qos_override;
thread_t thread = kqr->kqr_thread;
int rc = 0;
kqlock_held(kq);
KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWL_PROCESS_END) | DBG_FUNC_START,
kqwl->kqwl_dynamicid, 0, 0);
if (flags & KQ_PROCESSING) {
assert(kq->kq_state & KQ_PROCESSING);
if (!TAILQ_EMPTY(&kqwl->kqwl_queue[KQWL_BUCKET_STAYACTIVE])) {
kq_req_lock(kqwl);
kqworkloop_update_threads_qos(kqwl, KQWL_UTQ_UPDATE_WAKEUP_QOS,
KQWL_BUCKET_STAYACTIVE);
kq_req_unlock(kqwl);
}
if (kevent_flags & KEVENT_FLAG_PARKING) {
qos_override = kqworkloop_acknowledge_events(kqwl);
}
}
kq_req_lock(kqwl);
if (kevent_flags & KEVENT_FLAG_PARKING) {
kqworkloop_update_threads_qos(kqwl, KQWL_UTQ_PARKING, qos_override);
if ((kqr->kqr_state & KQR_WAKEUP) && !kqwl->kqwl_owner) {
kqr->kqr_wakeup_indexes &= ~KQWL_STAYACTIVE_FIRED_BIT;
rc = -1;
} else {
old_override = kqworkloop_unbind_locked(kqwl, thread);
(void)kqueue_release(kqwl, KQUEUE_CANT_BE_LAST_REF);
kq->kq_state &= ~flags;
}
} else {
kq->kq_state &= ~flags;
kqr->kqr_state |= KQR_R2K_NOTIF_ARMED;
kqworkloop_update_threads_qos(kqwl, KQWL_UTQ_RECOMPUTE_WAKEUP_QOS, 0);
}
kq_req_unlock(kqwl);
if (old_override) {
thread_drop_ipc_override(thread);
}
KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWL_PROCESS_END) | DBG_FUNC_END,
kqwl->kqwl_dynamicid, 0, 0);
return rc;
}
static void
kqfile_end_processing(struct kqueue *kq)
{
struct knote *kn;
struct kqtailq *suppressq;
int procwait;
kqlock_held(kq);
assert((kq->kq_state & (KQ_WORKQ|KQ_WORKLOOP)) == 0);
KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQ_PROCESS_END),
VM_KERNEL_UNSLIDE_OR_PERM(kq), 0);
suppressq = kqueue_get_suppressed_queue(kq, NULL);
while ((kn = TAILQ_FIRST(suppressq)) != NULL) {
assert(kn->kn_status & KN_SUPPRESSED);
knote_unsuppress(kn);
}
procwait = (kq->kq_state & KQ_PROCWAIT);
kq->kq_state &= ~(KQ_PROCESSING | KQ_PROCWAIT);
if (procwait) {
waitq_wakeup64_all((struct waitq *)&kq->kq_wqs,
CAST_EVENT64_T(suppressq),
THREAD_AWAKENED,
WAITQ_ALL_PRIORITIES);
}
}
static int
kqueue_workloop_ctl_internal(proc_t p, uintptr_t cmd, uint64_t __unused options,
struct kqueue_workloop_params *params, int *retval)
{
int error = 0;
int fd;
struct fileproc *fp;
struct kqueue *kq;
struct kqworkloop *kqwl;
struct filedesc *fdp = p->p_fd;
workq_threadreq_param_t trp = { };
switch (cmd) {
case KQ_WORKLOOP_CREATE:
if (!params->kqwlp_flags) {
error = EINVAL;
break;
}
if ((params->kqwlp_flags & KQ_WORKLOOP_CREATE_SCHED_PRI) &&
(params->kqwlp_sched_pri < 1 ||
params->kqwlp_sched_pri > 63 )) {
error = EINVAL;
break;
}
if ((params->kqwlp_flags & KQ_WORKLOOP_CREATE_SCHED_POL) &&
invalid_policy(params->kqwlp_sched_pol)) {
error = EINVAL;
break;
}
if ((params->kqwlp_flags & KQ_WORKLOOP_CREATE_CPU_PERCENT) &&
(params->kqwlp_cpu_percent <= 0 ||
params->kqwlp_cpu_percent > 100 ||
params->kqwlp_cpu_refillms <= 0 ||
params->kqwlp_cpu_refillms > 0x00ffffff)) {
error = EINVAL;
break;
}
if (params->kqwlp_flags & KQ_WORKLOOP_CREATE_SCHED_PRI) {
trp.trp_flags |= TRP_PRIORITY;
trp.trp_pri = params->kqwlp_sched_pri;
}
if (params->kqwlp_flags & KQ_WORKLOOP_CREATE_SCHED_POL) {
trp.trp_flags |= TRP_POLICY;
trp.trp_pol = params->kqwlp_sched_pol;
}
if (params->kqwlp_flags & KQ_WORKLOOP_CREATE_CPU_PERCENT) {
trp.trp_flags |= TRP_CPUPERCENT;
trp.trp_cpupercent = (uint8_t)params->kqwlp_cpu_percent;
trp.trp_refillms = params->kqwlp_cpu_refillms;
}
error = kevent_get_kq(p, params->kqwlp_id, &trp,
KEVENT_FLAG_DYNAMIC_KQUEUE | KEVENT_FLAG_WORKLOOP |
KEVENT_FLAG_DYNAMIC_KQ_MUST_NOT_EXIST , &fp, &fd, &kq);
if (error) {
break;
}
if (!(fdp->fd_flags & FD_WORKLOOP)) {
proc_fdlock(p);
fdp->fd_flags |= FD_WORKLOOP;
proc_fdunlock(p);
}
break;
case KQ_WORKLOOP_DESTROY:
error = kevent_get_kq(p, params->kqwlp_id, NULL,
KEVENT_FLAG_DYNAMIC_KQUEUE | KEVENT_FLAG_WORKLOOP |
KEVENT_FLAG_DYNAMIC_KQ_MUST_EXIST , &fp, &fd, &kq);
if (error) {
break;
}
kqlock(kq);
kqwl = (struct kqworkloop *)kq;
trp.trp_value = kqwl->kqwl_params;
if (trp.trp_flags && !(trp.trp_flags & TRP_RELEASED)) {
trp.trp_flags |= TRP_RELEASED;
kqueue_release(kq, KQUEUE_CANT_BE_LAST_REF);
} else {
error = EINVAL;
}
kqunlock(kq);
kqueue_release_last(p, kq);
break;
}
*retval = 0;
return error;
}
int
kqueue_workloop_ctl(proc_t p, struct kqueue_workloop_ctl_args *uap, int *retval)
{
struct kqueue_workloop_params params = {
.kqwlp_id = 0,
};
if (uap->sz < sizeof(params.kqwlp_version)) {
return EINVAL;
}
size_t copyin_sz = MIN(sizeof(params), uap->sz);
int rv = copyin(uap->addr, ¶ms, copyin_sz);
if (rv) {
return rv;
}
if (params.kqwlp_version != (int)uap->sz) {
return EINVAL;
}
return kqueue_workloop_ctl_internal(p, uap->cmd, uap->options, ¶ms,
retval);
}
static int
kqueue_process(struct kqueue *kq,
kevent_callback_t callback,
void *callback_data,
struct filt_process_s *process_data,
int *countp)
{
struct uthread *ut = get_bsdthread_info(current_thread());
struct kqrequest *kqr = ut->uu_kqr_bound;
struct knote *kn;
unsigned int flags = process_data ? process_data->fp_flags : 0;
int nevents = 0, error = 0, rc = 0;
struct kqtailq *base_queue, *queue;
kqueue_t kqu = { .kq = kq };
#if DEBUG || DEVELOPMENT
int retries = 64;
#endif
if (kq->kq_state & KQ_WORKQ) {
if (kqr == NULL || (kqr->kqr_state & KQR_WORKLOOP)) {
return EJUSTRETURN;
}
rc = kqworkq_begin_processing(kqu.kqwq, kqr, flags);
} else if (kq->kq_state & KQ_WORKLOOP) {
if (ut->uu_kqr_bound != &kqu.kqwl->kqwl_request) {
return EJUSTRETURN;
}
rc = kqworkloop_begin_processing(kqu.kqwl, flags);
} else {
rc = kqfile_begin_processing(kq);
}
if (rc == -1) {
*countp = 0;
return 0;
}
process_again:
if (kq->kq_state & KQ_WORKQ) {
base_queue = queue = &kqu.kqwq->kqwq_queue[kqr->kqr_qos_index];
} else if (kq->kq_state & KQ_WORKLOOP) {
base_queue = &kqu.kqwl->kqwl_queue[0];
queue = &kqu.kqwl->kqwl_queue[KQWL_NBUCKETS - 1];
} else {
base_queue = queue = &kq->kq_queue[QOS_INDEX_KQFILE];
}
do {
while (error == 0 && (kn = TAILQ_FIRST(queue)) != NULL) {
error = knote_process(kn, callback, callback_data, process_data);
if (error == EJUSTRETURN) {
error = 0;
} else {
nevents++;
}
}
if (error == EWOULDBLOCK) {
error = 0;
break;
}
} while (queue-- > base_queue);
*countp = nevents;
if (error || nevents) flags &= ~KEVENT_FLAG_PARKING;
if (kq->kq_state & KQ_WORKQ) {
rc = kqworkq_end_processing(kqu.kqwq, kqr, flags);
} else if (kq->kq_state & KQ_WORKLOOP) {
rc = kqworkloop_end_processing(kqu.kqwl, KQ_PROCESSING, flags);
} else {
kqfile_end_processing(kq);
rc = 0;
}
if (rc == -1) {
assert(flags & KEVENT_FLAG_PARKING);
#if DEBUG || DEVELOPMENT
if (retries-- == 0) {
panic("kevent: way too many knote_process retries, kq: %p (0x%02x)",
kq, kq->kq_state);
}
#endif
goto process_again;
}
return error;
}
static void
kqueue_scan_continue(void *data, wait_result_t wait_result)
{
thread_t self = current_thread();
uthread_t ut = (uthread_t)get_bsdthread_info(self);
struct _kqueue_scan * cont_args = &ut->uu_save.uus_kqueue_scan;
struct kqueue *kq = (struct kqueue *)data;
struct filt_process_s *process_data = cont_args->process_data;
int error;
int count;
switch (wait_result) {
case THREAD_AWAKENED: {
kqlock(kq);
retry:
error = kqueue_process(kq, cont_args->call, cont_args->data,
process_data, &count);
if (error == 0 && count == 0) {
if (kq->kq_state & KQ_DRAIN) {
kqunlock(kq);
goto drain;
}
if (kq->kq_state & KQ_WAKEUP)
goto retry;
waitq_assert_wait64((struct waitq *)&kq->kq_wqs,
KQ_EVENT, THREAD_ABORTSAFE,
cont_args->deadline);
kq->kq_state |= KQ_SLEEP;
kqunlock(kq);
thread_block_parameter(kqueue_scan_continue, kq);
}
kqunlock(kq);
} break;
case THREAD_TIMED_OUT:
error = EWOULDBLOCK;
break;
case THREAD_INTERRUPTED:
error = EINTR;
break;
case THREAD_RESTART:
drain:
error = EBADF;
break;
default:
panic("%s: - invalid wait_result (%d)", __func__,
wait_result);
error = 0;
}
assert(cont_args->cont != NULL);
(cont_args->cont)(kq, cont_args->data, error);
}
int
kqueue_scan(struct kqueue *kq,
kevent_callback_t callback,
kqueue_continue_t continuation,
void *callback_data,
struct filt_process_s *process_data,
struct timeval *atvp,
__unused struct proc *p)
{
thread_continue_t cont = THREAD_CONTINUE_NULL;
unsigned int flags;
uint64_t deadline;
int error;
int first;
int fd;
assert(callback != NULL);
flags = (process_data) ? process_data->fp_flags : 0;
fd = (process_data) ? process_data->fp_fd : -1;
first = 1;
for (;;) {
wait_result_t wait_result;
int count;
kqlock(kq);
error = kqueue_process(kq, callback, callback_data,
process_data, &count);
if (error || count)
break;
if (first) {
first = 0;
if (atvp->tv_sec || atvp->tv_usec) {
uint64_t now;
clock_get_uptime(&now);
nanoseconds_to_absolutetime((uint64_t)atvp->tv_sec * NSEC_PER_SEC +
atvp->tv_usec * (long)NSEC_PER_USEC,
&deadline);
if (now >= deadline) {
error = EWOULDBLOCK;
break;
}
deadline -= now;
clock_absolutetime_interval_to_deadline(deadline, &deadline);
} else {
deadline = 0;
}
if (continuation) {
uthread_t ut = (uthread_t)get_bsdthread_info(current_thread());
struct _kqueue_scan *cont_args = &ut->uu_save.uus_kqueue_scan;
cont_args->call = callback;
cont_args->cont = continuation;
cont_args->deadline = deadline;
cont_args->data = callback_data;
cont_args->process_data = process_data;
cont = kqueue_scan_continue;
}
}
if (kq->kq_state & KQ_DRAIN) {
kqunlock(kq);
return EBADF;
}
if (kq->kq_state & KQ_WAKEUP) {
kqunlock(kq);
continue;
}
waitq_assert_wait64_leeway((struct waitq *)&kq->kq_wqs,
KQ_EVENT, THREAD_ABORTSAFE,
TIMEOUT_URGENCY_USER_NORMAL,
deadline, TIMEOUT_NO_LEEWAY);
kq->kq_state |= KQ_SLEEP;
kqunlock(kq);
wait_result = thread_block_parameter(cont, kq);
switch (wait_result) {
case THREAD_AWAKENED:
continue;
case THREAD_TIMED_OUT:
return EWOULDBLOCK;
case THREAD_INTERRUPTED:
return EINTR;
case THREAD_RESTART:
return EBADF;
default:
panic("%s: - bad wait_result (%d)", __func__,
wait_result);
error = 0;
}
}
kqunlock(kq);
return (error);
}
static int
kqueue_read(__unused struct fileproc *fp,
__unused struct uio *uio,
__unused int flags,
__unused vfs_context_t ctx)
{
return (ENXIO);
}
static int
kqueue_write(__unused struct fileproc *fp,
__unused struct uio *uio,
__unused int flags,
__unused vfs_context_t ctx)
{
return (ENXIO);
}
static int
kqueue_ioctl(__unused struct fileproc *fp,
__unused u_long com,
__unused caddr_t data,
__unused vfs_context_t ctx)
{
return (ENOTTY);
}
static int
kqueue_select(struct fileproc *fp, int which, void *wq_link_id,
__unused vfs_context_t ctx)
{
struct kqueue *kq = (struct kqueue *)fp->f_data;
struct kqtailq *queue;
struct kqtailq *suppressq;
struct knote *kn;
int retnum = 0;
if (which != FREAD)
return (0);
kqlock(kq);
assert((kq->kq_state & KQ_WORKQ) == 0);
if (wq_link_id != NULL) {
thread_t cur_act = current_thread();
struct uthread * ut = get_bsdthread_info(cur_act);
kq->kq_state |= KQ_SEL;
waitq_link((struct waitq *)&kq->kq_wqs, ut->uu_wqset,
WAITQ_SHOULD_LOCK, (uint64_t *)wq_link_id);
waitq_link_release(*(uint64_t *)wq_link_id);
*(uint64_t *)wq_link_id = 0;
void *wqptr = &kq->kq_wqs;
memcpy(wq_link_id, (void *)&wqptr, sizeof(void *));
}
if (kqfile_begin_processing(kq) == -1) {
kqunlock(kq);
return (0);
}
queue = &kq->kq_queue[QOS_INDEX_KQFILE];
if (!TAILQ_EMPTY(queue)) {
while ((kn = (struct knote *)TAILQ_FIRST(queue)) != NULL) {
if (kn->kn_status & KN_ACTIVE) {
retnum = 1;
goto out;
}
assert(kn->kn_status & KN_STAYACTIVE);
knote_suppress(kn);
}
suppressq = kqueue_get_suppressed_queue(kq, NULL);
while ((kn = (struct knote *)TAILQ_FIRST(suppressq)) != NULL) {
KNOTE_LOCK_CTX(knlc);
int result = 0;
if ((kn->kn_status & KN_DROPPING) || !knote_lock(kq, kn, &knlc,
KNOTE_KQ_LOCK_ON_FAILURE)) {
continue;
}
result = filter_call(knote_fops(kn), f_peek(kn));
kqlock(kq);
knote_unlock(kq, kn, &knlc, KNOTE_KQ_LOCK_ALWAYS);
knote_unsuppress(kn);
if (result & FILTER_ACTIVE) {
retnum = 1;
goto out;
}
}
}
out:
kqfile_end_processing(kq);
kqunlock(kq);
return (retnum);
}
static int
kqueue_close(struct fileglob *fg, __unused vfs_context_t ctx)
{
struct kqfile *kqf = (struct kqfile *)fg->fg_data;
assert((kqf->kqf_state & KQ_WORKQ) == 0);
kqueue_dealloc(&kqf->kqf_kqueue);
fg->fg_data = NULL;
return (0);
}
#define MAX_NESTED_KQ 1000
static int
kqueue_kqfilter(__unused struct fileproc *fp, struct knote *kn,
__unused struct kevent_internal_s *kev, __unused vfs_context_t ctx)
{
struct kqfile *kqf = (struct kqfile *)kn->kn_fp->f_data;
struct kqueue *kq = &kqf->kqf_kqueue;
struct kqueue *parentkq = knote_get_kq(kn);
uint16_t plevel = 0;
assert((kqf->kqf_state & KQ_WORKQ) == 0);
if (parentkq == kq || kn->kn_filter != EVFILT_READ) {
knote_set_error(kn, EINVAL);
return 0;
}
kqlock(parentkq);
if (parentkq->kq_level > 0 &&
parentkq->kq_level < kq->kq_level)
{
kqunlock(parentkq);
knote_set_error(kn, EINVAL);
return 0;
} else {
plevel = (parentkq->kq_level == 0)? 2: parentkq->kq_level;
if (plevel < kq->kq_level + 1) {
if (kq->kq_level + 1 > MAX_NESTED_KQ) {
kqunlock(parentkq);
knote_set_error(kn, EINVAL);
return 0;
}
plevel = kq->kq_level + 1;
}
parentkq->kq_level = plevel;
kqunlock(parentkq);
kn->kn_filtid = EVFILTID_KQREAD;
kqlock(kq);
KNOTE_ATTACH(&kqf->kqf_sel.si_note, kn);
if (kq->kq_level == 0)
kq->kq_level = 1;
int count = kq->kq_count;
kqunlock(kq);
return (count > 0);
}
}
static int
kqueue_drain(struct fileproc *fp, __unused vfs_context_t ctx)
{
struct kqueue *kq = (struct kqueue *)fp->f_fglob->fg_data;
assert((kq->kq_state & KQ_WORKQ) == 0);
kqlock(kq);
kq->kq_state |= KQ_DRAIN;
kqueue_interrupt(kq);
kqunlock(kq);
return (0);
}
int
kqueue_stat(struct kqueue *kq, void *ub, int isstat64, proc_t p)
{
assert((kq->kq_state & KQ_WORKQ) == 0);
kqlock(kq);
if (isstat64 != 0) {
struct stat64 *sb64 = (struct stat64 *)ub;
bzero((void *)sb64, sizeof(*sb64));
sb64->st_size = kq->kq_count;
if (kq->kq_state & KQ_KEV_QOS)
sb64->st_blksize = sizeof(struct kevent_qos_s);
else if (kq->kq_state & KQ_KEV64)
sb64->st_blksize = sizeof(struct kevent64_s);
else if (IS_64BIT_PROCESS(p))
sb64->st_blksize = sizeof(struct user64_kevent);
else
sb64->st_blksize = sizeof(struct user32_kevent);
sb64->st_mode = S_IFIFO;
} else {
struct stat *sb = (struct stat *)ub;
bzero((void *)sb, sizeof(*sb));
sb->st_size = kq->kq_count;
if (kq->kq_state & KQ_KEV_QOS)
sb->st_blksize = sizeof(struct kevent_qos_s);
else if (kq->kq_state & KQ_KEV64)
sb->st_blksize = sizeof(struct kevent64_s);
else if (IS_64BIT_PROCESS(p))
sb->st_blksize = sizeof(struct user64_kevent);
else
sb->st_blksize = sizeof(struct user32_kevent);
sb->st_mode = S_IFIFO;
}
kqunlock(kq);
return (0);
}
static void
kqueue_threadreq_initiate(struct kqueue *kq, struct kqrequest *kqr,
kq_index_t qos, int flags)
{
assert(kqr->kqr_state & KQR_WAKEUP);
assert(kqr->kqr_thread == THREAD_NULL);
assert((kqr->kqr_state & KQR_THREQUESTED) == 0);
struct turnstile *ts = TURNSTILE_NULL;
if (workq_is_exiting(kq->kq_p)) {
return;
}
kqueue_retain(kq);
kq_req_held(kq);
if (kq->kq_state & KQ_WORKLOOP) {
__assert_only struct kqworkloop *kqwl = (struct kqworkloop *)kq;
assert(kqwl->kqwl_owner == THREAD_NULL);
KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWL_THREQUEST),
kqwl->kqwl_dynamicid, 0, qos, kqr->kqr_state);
ts = kqwl->kqwl_turnstile;
} else {
assert(kq->kq_state & KQ_WORKQ);
KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWQ_THREQUEST),
-1, 0, qos, kqr->kqr_state);
}
kqr->kqr_state |= KQR_THREQUESTED;
if ((kq->kq_state & KQ_WORKLOOP) && current_proc() == kq->kq_p) {
flags |= WORKQ_THREADREQ_SET_AST_ON_FAILURE;
}
if (qos == KQWQ_QOS_MANAGER) {
qos = WORKQ_THREAD_QOS_MANAGER;
}
if (!workq_kern_threadreq_initiate(kq->kq_p, kqr, ts, qos, flags)) {
kqr->kqr_state &= ~(KQR_THREQUESTED | KQR_R2K_NOTIF_ARMED);
kqueue_release(kq, KQUEUE_CANT_BE_LAST_REF);
}
}
void
kqueue_threadreq_bind_prepost(struct proc *p __unused, workq_threadreq_t req,
thread_t thread)
{
struct kqrequest *kqr = __container_of(req, struct kqrequest, kqr_req);
struct uthread *ut = get_bsdthread_info(thread);
req->tr_binding_thread = thread;
ut->uu_kqr_bound = kqr;
req->tr_state = TR_STATE_BINDING;
struct kqworkloop *kqwl = kqr_kqworkloop(kqr);
if (kqwl && kqwl->kqwl_turnstile) {
struct turnstile *ts = kqwl->kqwl_turnstile;
turnstile_update_inheritor(ts, thread, TURNSTILE_IMMEDIATE_UPDATE |
TURNSTILE_INHERITOR_THREAD);
turnstile_update_inheritor_complete(ts, TURNSTILE_INTERLOCK_HELD);
}
}
void
kqueue_threadreq_bind_commit(struct proc *p, thread_t thread)
{
struct uthread *ut = get_bsdthread_info(thread);
struct kqrequest *kqr = ut->uu_kqr_bound;
kqueue_t kqu = kqr_kqueue(p, kqr);
kq_req_lock(kqu);
if (kqr->kqr_req.tr_state == TR_STATE_BINDING) {
kqueue_threadreq_bind(p, &kqr->kqr_req, thread, 0);
}
kq_req_unlock(kqu);
}
static void
kqueue_threadreq_modify(struct kqueue *kq, struct kqrequest *kqr, kq_index_t qos)
{
assert(kqr->kqr_state & KQR_THREQUESTED);
assert(kqr->kqr_thread == THREAD_NULL);
kq_req_held(kq);
int flags = 0;
if ((kq->kq_state & KQ_WORKLOOP) && kq->kq_p == current_proc()) {
flags |= WORKQ_THREADREQ_SET_AST_ON_FAILURE;
}
workq_kern_threadreq_modify(kq->kq_p, kqr, qos, flags);
}
void
kqueue_threadreq_bind(struct proc *p, workq_threadreq_t req, thread_t thread,
unsigned int flags)
{
struct kqrequest *kqr = __container_of(req, struct kqrequest, kqr_req);
kqueue_t kqu = kqr_kqueue(p, kqr);
struct uthread *ut = get_bsdthread_info(thread);
kq_req_held(kqu);
assert(kqr->kqr_state & KQR_THREQUESTED);
assert(kqr->kqr_thread == THREAD_NULL);
assert(ut->uu_kqueue_override == 0);
if (kqr->kqr_req.tr_state == TR_STATE_BINDING) {
assert(ut->uu_kqr_bound == kqr);
assert(kqr->kqr_req.tr_binding_thread == thread);
kqr->kqr_req.tr_state = TR_STATE_IDLE;
kqr->kqr_req.tr_binding_thread = NULL;
} else {
assert(ut->uu_kqr_bound == NULL);
}
ut->uu_kqr_bound = kqr;
kqr->kqr_thread = thread;
if (kqu.kq->kq_state & KQ_WORKLOOP) {
struct turnstile *ts = kqu.kqwl->kqwl_turnstile;
if (__improbable(thread == kqu.kqwl->kqwl_owner)) {
kqu.kqwl->kqwl_owner = THREAD_NULL;
if (kqworkloop_owner_override(kqu.kqwl)) {
thread_drop_ipc_override(thread);
}
thread_ends_owning_workloop(thread);
}
if (ts && (flags & KQUEUE_THREADERQ_BIND_NO_INHERITOR_UPDATE) == 0) {
filt_wlupdate_inheritor(kqu.kqwl, ts, TURNSTILE_IMMEDIATE_UPDATE);
turnstile_update_inheritor_complete(ts, TURNSTILE_INTERLOCK_HELD);
}
KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWL_BIND), kqu.kqwl->kqwl_dynamicid,
thread_tid(thread), kqr->kqr_qos_index,
(kqr->kqr_override_index << 16) | kqr->kqr_state);
ut->uu_kqueue_override = kqr->kqr_override_index;
if (kqr->kqr_override_index) {
thread_add_ipc_override(thread, kqr->kqr_override_index);
}
} else {
assert(kqr->kqr_override_index == 0);
KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWQ_BIND), -1,
thread_tid(thread), kqr->kqr_qos_index,
(kqr->kqr_override_index << 16) | kqr->kqr_state);
}
}
void
kqueue_threadreq_cancel(struct proc *p, workq_threadreq_t req)
{
struct kqrequest *kqr = __container_of(req, struct kqrequest, kqr_req);
kqueue_t kqu = kqr_kqueue(p, kqr);
kq_req_lock(kqu);
assert(kqr->kqr_thread == THREAD_NULL);
assert(kqr->kqr_state & KQR_THREQUESTED);
kqr->kqr_state &= ~(KQR_THREQUESTED | KQR_R2K_NOTIF_ARMED);
kq_req_unlock(kqu);
kqueue_release_last(p, kqu);
}
workq_threadreq_param_t
kqueue_threadreq_workloop_param(workq_threadreq_t req)
{
struct kqrequest *kqr = __container_of(req, struct kqrequest, kqr_req);
struct kqworkloop *kqwl;
workq_threadreq_param_t trp;
assert(kqr->kqr_state & KQR_WORKLOOP);
kqwl = __container_of(kqr, struct kqworkloop, kqwl_request);
trp.trp_value = kqwl->kqwl_params;
return trp;
}
void
kqueue_threadreq_unbind(struct proc *p, struct kqrequest *kqr)
{
if (kqr->kqr_state & KQR_WORKLOOP) {
kqworkloop_unbind(p, kqr_kqworkloop(kqr));
} else {
kqworkq_unbind(p, kqr);
}
}
static void
kqworkq_request_help(struct kqworkq *kqwq, kq_index_t qos_index)
{
struct kqrequest *kqr;
assert(qos_index < KQWQ_NBUCKETS);
kq_req_lock(kqwq);
kqr = kqworkq_get_request(kqwq, qos_index);
if ((kqr->kqr_state & KQR_WAKEUP) == 0) {
kqr->kqr_state |= KQR_WAKEUP;
if ((kqr->kqr_state & KQR_THREQUESTED) == 0) {
kqueue_threadreq_initiate(&kqwq->kqwq_kqueue, kqr, qos_index, 0);
}
}
kq_req_unlock(kqwq);
}
static kq_index_t
kqworkloop_owner_override(struct kqworkloop *kqwl)
{
struct kqrequest *kqr = &kqwl->kqwl_request;
return MAX(kqr->kqr_qos_index, kqr->kqr_override_index);
}
static inline void
kqworkloop_request_fire_r2k_notification(struct kqworkloop *kqwl)
{
struct kqrequest *kqr = &kqwl->kqwl_request;
kq_req_held(kqwl);
if (kqr->kqr_state & KQR_R2K_NOTIF_ARMED) {
assert(kqr->kqr_thread);
kqr->kqr_state &= ~KQR_R2K_NOTIF_ARMED;
act_set_astkevent(kqr->kqr_thread, AST_KEVENT_RETURN_TO_KERNEL);
}
}
static void
kqworkloop_update_threads_qos(struct kqworkloop *kqwl, int op, kq_index_t qos)
{
struct kqrequest *kqr = &kqwl->kqwl_request;
struct kqueue *kq = &kqwl->kqwl_kqueue;
kq_index_t old_owner_override = kqworkloop_owner_override(kqwl);
kq_index_t i;
kq_req_held(kqwl);
switch (op) {
case KQWL_UTQ_UPDATE_WAKEUP_QOS:
if (qos == KQWL_BUCKET_STAYACTIVE) {
kqr->kqr_wakeup_indexes |= KQWL_STAYACTIVE_FIRED_BIT;
qos = kqr->kqr_stayactive_qos;
assert(qos);
}
if (kqr->kqr_wakeup_indexes & (1 << qos)) {
assert(kqr->kqr_state & KQR_WAKEUP);
break;
}
kqr->kqr_wakeup_indexes |= (1 << qos);
kqr->kqr_state |= KQR_WAKEUP;
kqworkloop_request_fire_r2k_notification(kqwl);
goto recompute;
case KQWL_UTQ_UPDATE_STAYACTIVE_QOS:
assert(qos);
if (kqr->kqr_stayactive_qos < qos) {
kqr->kqr_stayactive_qos = qos;
if (kqr->kqr_wakeup_indexes & KQWL_STAYACTIVE_FIRED_BIT) {
assert(kqr->kqr_state & KQR_WAKEUP);
kqr->kqr_wakeup_indexes |= (1 << qos);
goto recompute;
}
}
break;
case KQWL_UTQ_PARKING:
case KQWL_UTQ_UNBINDING:
kqr->kqr_override_index = qos;
case KQWL_UTQ_RECOMPUTE_WAKEUP_QOS:
if (op == KQWL_UTQ_RECOMPUTE_WAKEUP_QOS) {
assert(qos == THREAD_QOS_UNSPECIFIED);
}
kqlock_held(kqwl); i = KQWL_BUCKET_STAYACTIVE;
if (TAILQ_EMPTY(&kqr->kqr_suppressed)) {
kqr->kqr_override_index = THREAD_QOS_UNSPECIFIED;
}
if (!TAILQ_EMPTY(&kqwl->kqwl_queue[i]) &&
(kqr->kqr_wakeup_indexes & KQWL_STAYACTIVE_FIRED_BIT)) {
kqr->kqr_wakeup_indexes &= KQWL_STAYACTIVE_FIRED_BIT;
kqr->kqr_wakeup_indexes |= (1 << kqr->kqr_stayactive_qos);
} else {
kqr->kqr_wakeup_indexes = 0;
}
for (i = THREAD_QOS_UNSPECIFIED + 1; i < KQWL_BUCKET_STAYACTIVE; i++) {
if (!TAILQ_EMPTY(&kqwl->kqwl_queue[i])) {
kqr->kqr_wakeup_indexes |= (1 << i);
}
}
if (kqr->kqr_wakeup_indexes) {
kqr->kqr_state |= KQR_WAKEUP;
kqworkloop_request_fire_r2k_notification(kqwl);
} else {
kqr->kqr_state &= ~KQR_WAKEUP;
}
goto recompute;
case KQWL_UTQ_RESET_WAKEUP_OVERRIDE:
kqr->kqr_override_index = qos;
goto recompute;
case KQWL_UTQ_UPDATE_WAKEUP_OVERRIDE:
recompute:
if (kqr->kqr_wakeup_indexes > (1 << qos)) {
qos = fls(kqr->kqr_wakeup_indexes) - 1;
}
if (kqr->kqr_override_index < qos) {
kqr->kqr_override_index = qos;
}
break;
case KQWL_UTQ_REDRIVE_EVENTS:
break;
case KQWL_UTQ_SET_QOS_INDEX:
kqr->kqr_qos_index = qos;
break;
default:
panic("unknown kqwl thread qos update operation: %d", op);
}
thread_t kqwl_owner = kqwl->kqwl_owner;
thread_t servicer = kqr->kqr_thread;
boolean_t qos_changed = FALSE;
kq_index_t new_owner_override = kqworkloop_owner_override(kqwl);
if (kqwl_owner) {
#if 0
KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWL_THADJUST),
kqwl->kqwl_dynamicid, thread_tid(kqwl_owner), kqr->kqr_qos_index,
(kqr->kqr_override_index << 16) | kqr->kqr_state);
#endif
if (new_owner_override == old_owner_override) {
} else if (old_owner_override == THREAD_QOS_UNSPECIFIED) {
thread_add_ipc_override(kqwl_owner, new_owner_override);
} else if (new_owner_override == THREAD_QOS_UNSPECIFIED) {
thread_drop_ipc_override(kqwl_owner);
} else {
thread_update_ipc_override(kqwl_owner, new_owner_override);
}
}
if ((kqr->kqr_state & KQR_THREQUESTED) == 0) {
if (kqwl_owner == NULL && (kqr->kqr_state & KQR_WAKEUP)) {
int initiate_flags = 0;
if (op == KQWL_UTQ_UNBINDING) {
initiate_flags = WORKQ_THREADREQ_ATTEMPT_REBIND;
}
kqueue_threadreq_initiate(kq, kqr, new_owner_override,
initiate_flags);
}
} else if (servicer) {
struct uthread *ut = get_bsdthread_info(servicer);
if (ut->uu_kqueue_override != kqr->kqr_override_index) {
if (ut->uu_kqueue_override == THREAD_QOS_UNSPECIFIED) {
thread_add_ipc_override(servicer, kqr->kqr_override_index);
} else if (kqr->kqr_override_index == THREAD_QOS_UNSPECIFIED) {
thread_drop_ipc_override(servicer);
} else {
thread_update_ipc_override(servicer, kqr->kqr_override_index);
}
ut->uu_kqueue_override = kqr->kqr_override_index;
qos_changed = TRUE;
}
} else if (new_owner_override == THREAD_QOS_UNSPECIFIED) {
} else if (old_owner_override != new_owner_override) {
kqueue_threadreq_modify(kq, kqr, new_owner_override);
qos_changed = TRUE;
}
if (qos_changed) {
KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWL_THADJUST), kqwl->kqwl_dynamicid,
thread_tid(kqr->kqr_thread), kqr->kqr_qos_index,
(kqr->kqr_override_index << 16) | kqr->kqr_state);
}
}
static void
kqworkloop_request_help(struct kqworkloop *kqwl, kq_index_t qos_index)
{
assert(qos_index < KQWL_NBUCKETS);
kq_req_lock(kqwl);
kqworkloop_update_threads_qos(kqwl, KQWL_UTQ_UPDATE_WAKEUP_QOS, qos_index);
kq_req_unlock(kqwl);
}
static struct kqtailq *
kqueue_get_queue(struct kqueue *kq, kq_index_t qos_index)
{
if (kq->kq_state & KQ_WORKQ) {
assert(qos_index < KQWQ_NBUCKETS);
} else if (kq->kq_state & KQ_WORKLOOP) {
assert(qos_index < KQWL_NBUCKETS);
} else {
assert(qos_index == QOS_INDEX_KQFILE);
}
static_assert(offsetof(struct kqueue, kq_queue) == sizeof(struct kqueue),
"struct kqueue::kq_queue must be exactly at the end");
return &kq->kq_queue[qos_index];
}
static int
kqueue_queue_empty(struct kqueue *kq, kq_index_t qos_index)
{
return TAILQ_EMPTY(kqueue_get_queue(kq, qos_index));
}
static struct kqtailq *
kqueue_get_suppressed_queue(kqueue_t kq, struct knote *kn)
{
if (kq.kq->kq_state & KQ_WORKQ) {
return &kqworkq_get_request(kq.kqwq, kn->kn_qos_index)->kqr_suppressed;
} else if (kq.kq->kq_state & KQ_WORKLOOP) {
return &kq.kqwl->kqwl_request.kqr_suppressed;
} else {
return &kq.kqf->kqf_suppressed;
}
}
static struct turnstile *
kqueue_get_turnstile(kqueue_t kqu, bool can_alloc)
{
uint8_t kqr_state;
if ((kqu.kq->kq_state & KQ_WORKLOOP) == 0) {
return TURNSTILE_NULL;
}
kqr_state = os_atomic_load(&kqu.kqwl->kqwl_request.kqr_state, relaxed);
if (kqr_state & KQR_ALLOCATED_TURNSTILE) {
return os_atomic_load_with_dependency_on(&kqu.kqwl->kqwl_turnstile,
kqr_state);
}
if (!can_alloc) {
return TURNSTILE_NULL;
}
struct turnstile *ts = turnstile_alloc(), *free_ts = TURNSTILE_NULL;
kq_req_lock(kqu);
if (filt_wlturnstile_interlock_is_workq(kqu.kqwl)) {
workq_kern_threadreq_lock(kqu.kqwl->kqwl_p);
}
if (kqu.kqwl->kqwl_request.kqr_state & KQR_ALLOCATED_TURNSTILE) {
free_ts = ts;
ts = kqu.kqwl->kqwl_turnstile;
} else {
ts = turnstile_prepare((uintptr_t)kqu.kqwl, &kqu.kqwl->kqwl_turnstile,
ts, TURNSTILE_WORKLOOPS);
os_atomic_or(&kqu.kqwl->kqwl_request.kqr_state,
KQR_ALLOCATED_TURNSTILE, release);
}
if (filt_wlturnstile_interlock_is_workq(kqu.kqwl)) {
workq_kern_threadreq_unlock(kqu.kqwl->kqwl_p);
}
kq_req_unlock(kqu.kqwl);
if (free_ts) {
turnstile_deallocate(free_ts);
}
return ts;
}
struct turnstile *
kqueue_turnstile(struct kqueue *kq)
{
return kqueue_get_turnstile(kq, false);
}
struct turnstile *
kqueue_alloc_turnstile(struct kqueue *kq)
{
return kqueue_get_turnstile(kq, true);
}
static struct kqtailq *
knote_get_queue(struct knote *kn)
{
return kqueue_get_queue(knote_get_kq(kn), kn->kn_qos_index);
}
static void
knote_reset_priority(struct knote *kn, pthread_priority_t pp)
{
struct kqueue *kq = knote_get_kq(kn);
kq_index_t qos = _pthread_priority_thread_qos(pp);
assert((kn->kn_status & KN_QUEUED) == 0);
if (kq->kq_state & KQ_WORKQ) {
if (qos == THREAD_QOS_UNSPECIFIED) {
qos = KQWQ_QOS_MANAGER;
pp = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG;
} else {
pp = _pthread_priority_normalize(pp);
}
} else if (kq->kq_state & KQ_WORKLOOP) {
assert((pp & _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG) == 0);
pp = _pthread_priority_normalize(pp);
} else {
pp = _pthread_unspecified_priority();
qos = THREAD_QOS_UNSPECIFIED;
}
kn->kn_qos = pp;
kn->kn_req_index = qos;
if ((kn->kn_status & KN_MERGE_QOS) == 0 || qos > kn->kn_qos_override) {
kn->kn_qos_override = qos;
}
if ((kn->kn_status & KN_SUPPRESSED) == 0) {
kn->kn_qos_index = qos;
} else if (kq->kq_state & KQ_WORKQ) {
kqworkq_update_override((struct kqworkq *)kq, kn, qos);
} else if (kq->kq_state & KQ_WORKLOOP) {
kqworkloop_update_override((struct kqworkloop *)kq, qos);
}
}
static void
knote_set_qos_overcommit(struct knote *kn)
{
struct kqueue *kq = knote_get_kq(kn);
if ((kn->kn_qos & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG) &&
(kq->kq_state & KQ_WORKLOOP)) {
struct kqworkloop *kqwl = (struct kqworkloop *)kq;
struct kqrequest *kqr = &kqwl->kqwl_request;
if (kqr->kqr_state & KQR_THOVERCOMMIT) {
return;
}
kq_req_lock(kqwl);
kqr->kqr_state |= KQR_THOVERCOMMIT;
if (!kqr->kqr_thread && (kqr->kqr_state & KQR_THREQUESTED)) {
kqueue_threadreq_modify(kq, kqr, kqr->kqr_req.tr_qos);
}
kq_req_unlock(kqwl);
}
}
static kq_index_t
knote_get_qos_override_index(struct knote *kn)
{
return kn->kn_qos_override;
}
static void
kqworkq_update_override(struct kqworkq *kqwq, struct knote *kn,
kq_index_t override_index)
{
struct kqrequest *kqr;
kq_index_t old_override_index;
kq_index_t queue_index = kn->kn_qos_index;
if (override_index <= queue_index) {
return;
}
kqr = kqworkq_get_request(kqwq, queue_index);
kq_req_lock(kqwq);
old_override_index = kqr->kqr_override_index;
if (override_index > MAX(kqr->kqr_qos_index, old_override_index)) {
kqr->kqr_override_index = override_index;
if (kqr->kqr_thread) {
if (old_override_index)
thread_update_ipc_override(kqr->kqr_thread, override_index);
else
thread_add_ipc_override(kqr->kqr_thread, override_index);
}
}
kq_req_unlock(kqwq);
}
static void
kqworkloop_update_override(struct kqworkloop *kqwl, kq_index_t override_index)
{
kq_req_lock(kqwl);
kqworkloop_update_threads_qos(kqwl, KQWL_UTQ_UPDATE_WAKEUP_OVERRIDE,
override_index);
kq_req_unlock(kqwl);
}
static thread_qos_t
kqworkloop_unbind_locked(struct kqworkloop *kqwl, thread_t thread)
{
struct uthread *ut = get_bsdthread_info(thread);
struct kqrequest *kqr = &kqwl->kqwl_request;
kq_index_t ipc_override = ut->uu_kqueue_override;
KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWL_UNBIND), kqwl->kqwl_dynamicid,
thread_tid(thread), 0, 0);
kq_req_held(kqwl);
assert(ut->uu_kqr_bound == kqr);
ut->uu_kqr_bound = NULL;
ut->uu_kqueue_override = THREAD_QOS_UNSPECIFIED;
if (kqwl->kqwl_owner == NULL && kqwl->kqwl_turnstile) {
turnstile_update_inheritor(kqwl->kqwl_turnstile,
TURNSTILE_INHERITOR_NULL, TURNSTILE_IMMEDIATE_UPDATE);
turnstile_update_inheritor_complete(kqwl->kqwl_turnstile,
TURNSTILE_INTERLOCK_HELD);
}
kqr->kqr_thread = NULL;
kqr->kqr_state &= ~(KQR_THREQUESTED | KQR_R2K_NOTIF_ARMED);
return ipc_override;
}
static void
kqworkloop_unbind(proc_t p, struct kqworkloop *kqwl)
{
struct kqueue *kq = &kqwl->kqwl_kqueue;
struct kqrequest *kqr = &kqwl->kqwl_request;
thread_t thread = kqr->kqr_thread;
int op = KQWL_UTQ_PARKING;
kq_index_t ipc_override, qos_override = THREAD_QOS_UNSPECIFIED;
assert(thread == current_thread());
kqlock(kqwl);
assert((kq->kq_state & KQ_PROCESSING) == 0);
if (!TAILQ_EMPTY(&kqr->kqr_suppressed)) {
kq->kq_state |= KQ_PROCESSING;
qos_override = kqworkloop_acknowledge_events(kqwl);
kq->kq_state &= ~KQ_PROCESSING;
}
kq_req_lock(kqwl);
ipc_override = kqworkloop_unbind_locked(kqwl, thread);
kqworkloop_update_threads_qos(kqwl, op, qos_override);
kq_req_unlock(kqwl);
kqunlock(kqwl);
if (ipc_override) {
thread_drop_ipc_override(thread);
}
kqueue_release_last(p, kqwl);
}
static thread_qos_t
kqworkq_unbind_locked(__assert_only struct kqworkq *kqwq,
struct kqrequest *kqr, thread_t thread)
{
struct uthread *ut = get_bsdthread_info(thread);
kq_index_t old_override = kqr->kqr_override_index;
KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWQ_UNBIND), -1,
thread_tid(kqr->kqr_thread), kqr->kqr_qos_index, 0);
kq_req_held(kqwq);
assert(ut->uu_kqr_bound == kqr);
ut->uu_kqr_bound = NULL;
kqr->kqr_thread = NULL;
kqr->kqr_state &= ~(KQR_THREQUESTED | KQR_R2K_NOTIF_ARMED);
kqr->kqr_override_index = THREAD_QOS_UNSPECIFIED;
return old_override;
}
static void
kqworkq_unbind(proc_t p, struct kqrequest *kqr)
{
struct kqworkq *kqwq = (struct kqworkq *)p->p_fd->fd_wqkqueue;
__assert_only int rc;
kqlock(kqwq);
rc = kqworkq_acknowledge_events(kqwq, kqr, 0, KQWQAE_UNBIND);
assert(rc == -1);
kqunlock(kqwq);
}
struct kqrequest *
kqworkq_get_request(struct kqworkq *kqwq, kq_index_t qos_index)
{
assert(qos_index < KQWQ_NBUCKETS);
return &kqwq->kqwq_request[qos_index];
}
static void
knote_apply_qos_override(struct knote *kn, kq_index_t qos_index)
{
assert((kn->kn_status & KN_QUEUED) == 0);
kn->kn_qos_override = qos_index;
if (kn->kn_status & KN_SUPPRESSED) {
struct kqueue *kq = knote_get_kq(kn);
if (kq->kq_state & KQ_WORKQ) {
kqworkq_update_override((struct kqworkq *)kq, kn, qos_index);
} else {
kqworkloop_update_override((struct kqworkloop *)kq, qos_index);
}
} else {
kn->kn_qos_index = qos_index;
}
}
static bool
knote_should_apply_qos_override(struct kqueue *kq, struct knote *kn, int result,
thread_qos_t *qos_out)
{
thread_qos_t qos_index = (result >> FILTER_ADJUST_EVENT_QOS_SHIFT) & 7;
kqlock_held(kq);
assert(result & FILTER_ADJUST_EVENT_QOS_BIT);
assert(qos_index < THREAD_QOS_LAST);
if (__improbable(!knote_fops(kn)->f_adjusts_qos)) {
panic("filter %d cannot change QoS", kn->kn_filtid);
} else if (kq->kq_state & KQ_WORKLOOP) {
if (kn->kn_req_index == KQWL_BUCKET_STAYACTIVE) {
return false;
}
} else if (kq->kq_state & KQ_WORKQ) {
if (kn->kn_req_index == KQWQ_QOS_MANAGER) {
return false;
}
} else {
return false;
}
if (kn->kn_qos & _PTHREAD_PRIORITY_FALLBACK_FLAG) {
if (qos_index == THREAD_QOS_UNSPECIFIED)
qos_index = kn->kn_req_index;
} else {
if (qos_index < kn->kn_req_index)
qos_index = kn->kn_req_index;
}
if ((kn->kn_status & KN_MERGE_QOS) && (qos_index < kn->kn_qos_override)) {
return false;
}
if ((kn->kn_status & KN_LOCKED) && kn->kn_inuse) {
kn->kn_status |= KN_MERGE_QOS;
}
if (kn->kn_qos_override == qos_index) {
return false;
}
*qos_out = qos_index;
return true;
}
static void
knote_adjust_qos(struct kqueue *kq, struct knote *kn, int result)
{
thread_qos_t qos;
if (knote_should_apply_qos_override(kq, kn, result, &qos)) {
knote_dequeue(kn);
knote_apply_qos_override(kn, qos);
if (knote_enqueue(kn) && (kn->kn_status & KN_ACTIVE)) {
knote_wakeup(kn);
}
}
}
static void
knote_wakeup(struct knote *kn)
{
struct kqueue *kq = knote_get_kq(kn);
kqlock_held(kq);
if (kq->kq_state & KQ_WORKQ) {
struct kqworkq *kqwq = (struct kqworkq *)kq;
kqworkq_request_help(kqwq, kn->kn_qos_index);
} else if (kq->kq_state & KQ_WORKLOOP) {
struct kqworkloop *kqwl = (struct kqworkloop *)kq;
if (!kqworkloop_is_processing_on_current_thread(kqwl)) {
kqworkloop_request_help(kqwl, kn->kn_qos_index);
}
} else {
struct kqfile *kqf = (struct kqfile *)kq;
if (kq->kq_state & KQ_PROCESSING)
kq->kq_state |= KQ_WAKEUP;
if (kq->kq_state & (KQ_SLEEP | KQ_SEL)) {
kq->kq_state &= ~(KQ_SLEEP | KQ_SEL);
waitq_wakeup64_all((struct waitq *)&kq->kq_wqs, KQ_EVENT,
THREAD_AWAKENED, WAITQ_ALL_PRIORITIES);
}
KNOTE(&kqf->kqf_sel.si_note, 0);
}
}
static void
kqueue_interrupt(struct kqueue *kq)
{
assert((kq->kq_state & KQ_WORKQ) == 0);
if ((kq->kq_state & (KQ_SLEEP | KQ_SEL)) != 0) {
kq->kq_state &= ~(KQ_SLEEP | KQ_SEL);
(void)waitq_wakeup64_all((struct waitq *)&kq->kq_wqs,
KQ_EVENT,
THREAD_RESTART,
WAITQ_ALL_PRIORITIES);
}
if (kq->kq_state & KQ_PROCWAIT) {
struct kqtailq *suppressq;
assert(kq->kq_state & KQ_PROCESSING);
kq->kq_state &= ~KQ_PROCWAIT;
suppressq = kqueue_get_suppressed_queue(kq, NULL);
(void)waitq_wakeup64_all((struct waitq *)&kq->kq_wqs,
CAST_EVENT64_T(suppressq),
THREAD_RESTART,
WAITQ_ALL_PRIORITIES);
}
}
void
waitq_set__CALLING_PREPOST_HOOK__(void *kq_hook, void *knote_hook, int qos)
{
#pragma unused(knote_hook, qos)
struct kqueue *kq = (struct kqueue *)kq_hook;
if (kq->kq_state & KQ_WORKQ) {
struct kqworkq *kqwq = (struct kqworkq *)kq;
kqworkq_request_help(kqwq, KQWQ_QOS_MANAGER);
} else if (kq->kq_state & KQ_WORKLOOP) {
struct kqworkloop *kqwl = (struct kqworkloop *)kq;
kqworkloop_request_help(kqwl, KQWL_BUCKET_STAYACTIVE);
}
}
void
klist_init(struct klist *list)
{
SLIST_INIT(list);
}
void
knote(struct klist *list, long hint)
{
struct knote *kn;
SLIST_FOREACH(kn, list, kn_selnext) {
struct kqueue *kq = knote_get_kq(kn);
kqlock(kq);
knote_call_filter_event(kq, kn, hint);
kqunlock(kq);
}
}
int
knote_attach(struct klist *list, struct knote *kn)
{
int ret = SLIST_EMPTY(list);
SLIST_INSERT_HEAD(list, kn, kn_selnext);
return (ret);
}
int
knote_detach(struct klist *list, struct knote *kn)
{
SLIST_REMOVE(list, kn, knote, kn_selnext);
return (SLIST_EMPTY(list));
}
void
knote_vanish(struct klist *list)
{
struct knote *kn;
struct knote *kn_next;
SLIST_FOREACH_SAFE(kn, list, kn_selnext, kn_next) {
struct kqueue *kq = knote_get_kq(kn);
kqlock(kq);
if (kn->kn_status & KN_REQVANISH) {
kn->kn_status |= KN_VANISHED;
knote_activate(kn);
} else {
knote_call_filter_event(kq, kn, NOTE_REVOKE);
}
kqunlock(kq);
}
}
void
knote_link_waitqset_lazy_alloc(struct knote *kn)
{
struct kqueue *kq = knote_get_kq(kn);
waitq_set_lazy_init_link(&kq->kq_wqs);
}
boolean_t
knote_link_waitqset_should_lazy_alloc(struct knote *kn)
{
struct kqueue *kq = knote_get_kq(kn);
return waitq_set_should_lazy_init_link(&kq->kq_wqs);
}
int
knote_link_waitq(struct knote *kn, struct waitq *wq, uint64_t *reserved_link)
{
struct kqueue *kq = knote_get_kq(kn);
kern_return_t kr;
kr = waitq_link(wq, &kq->kq_wqs, WAITQ_ALREADY_LOCKED, reserved_link);
if (kr == KERN_SUCCESS) {
knote_markstayactive(kn);
return (0);
} else {
return (EINVAL);
}
}
int
knote_unlink_waitq(struct knote *kn, struct waitq *wq)
{
struct kqueue *kq = knote_get_kq(kn);
kern_return_t kr;
kr = waitq_unlink(wq, &kq->kq_wqs);
knote_clearstayactive(kn);
return ((kr != KERN_SUCCESS) ? EINVAL : 0);
}
void
knote_fdclose(struct proc *p, int fd)
{
struct klist *list;
struct knote *kn;
KNOTE_LOCK_CTX(knlc);
restart:
list = &p->p_fd->fd_knlist[fd];
SLIST_FOREACH(kn, list, kn_link) {
struct kqueue *kq = knote_get_kq(kn);
kqlock(kq);
if (kq->kq_p != p)
panic("%s: proc mismatch (kq->kq_p=%p != p=%p)",
__func__, kq->kq_p, p);
if (kn->kn_status & KN_VANISHED) {
kqunlock(kq);
continue;
}
proc_fdunlock(p);
if (!knote_lock(kq, kn, &knlc, KNOTE_KQ_LOCK_ON_SUCCESS)) {
} else if (kn->kn_status & KN_REQVANISH) {
kn->kn_status |= KN_VANISHED;
kn->kn_status &= ~KN_ATTACHED;
kqunlock(kq);
knote_fops(kn)->f_detach(kn);
if (knote_fops(kn)->f_isfd)
fp_drop(p, kn->kn_id, kn->kn_fp, 0);
kqlock(kq);
knote_activate(kn);
knote_unlock(kq, kn, &knlc, KNOTE_KQ_UNLOCK);
} else {
knote_drop(kq, kn, &knlc);
}
proc_fdlock(p);
goto restart;
}
}
static struct knote *
knote_fdfind(struct kqueue *kq,
struct kevent_internal_s *kev,
bool is_fd,
struct proc *p)
{
struct filedesc *fdp = p->p_fd;
struct klist *list = NULL;
struct knote *kn = NULL;
if (is_fd) {
if (kev->ident < (u_int)fdp->fd_knlistsize) {
list = &fdp->fd_knlist[kev->ident];
}
} else if (fdp->fd_knhashmask != 0) {
list = &fdp->fd_knhash[KN_HASH((u_long)kev->ident, fdp->fd_knhashmask)];
}
if (list != NULL) {
SLIST_FOREACH(kn, list, kn_link) {
if (kq == knote_get_kq(kn) &&
kev->ident == kn->kn_id &&
kev->filter == kn->kn_filter) {
if (kev->flags & EV_UDATA_SPECIFIC) {
if ((kn->kn_status & KN_UDATA_SPECIFIC) &&
kev->udata == kn->kn_udata) {
break;
}
} else if ((kn->kn_status & KN_UDATA_SPECIFIC) == 0) {
break;
}
}
}
}
return kn;
}
static int
kq_add_knote(struct kqueue *kq, struct knote *kn, struct knote_lock_ctx *knlc,
struct proc *p)
{
struct filedesc *fdp = p->p_fd;
struct klist *list = NULL;
int ret = 0;
bool is_fd = knote_fops(kn)->f_isfd;
if (is_fd)
proc_fdlock(p);
else
knhash_lock(p);
if (knote_fdfind(kq, &kn->kn_kevent, is_fd, p) != NULL) {
ret = ERESTART;
goto out_locked;
}
if (!is_fd) {
if (fdp->fd_knhashmask == 0) {
u_long size = 0;
list = hashinit(CONFIG_KN_HASHSIZE, M_KQUEUE, &size);
if (list == NULL) {
ret = ENOMEM;
goto out_locked;
}
fdp->fd_knhash = list;
fdp->fd_knhashmask = size;
}
list = &fdp->fd_knhash[KN_HASH(kn->kn_id, fdp->fd_knhashmask)];
SLIST_INSERT_HEAD(list, kn, kn_link);
ret = 0;
goto out_locked;
} else {
if ((u_int)fdp->fd_knlistsize <= kn->kn_id) {
u_int size = 0;
if (kn->kn_id >= (uint64_t)p->p_rlimit[RLIMIT_NOFILE].rlim_cur
|| kn->kn_id >= (uint64_t)maxfiles) {
ret = EINVAL;
goto out_locked;
}
size = fdp->fd_knlistsize;
while (size <= kn->kn_id)
size += KQEXTENT;
if (size >= (UINT_MAX/sizeof(struct klist *))) {
ret = EINVAL;
goto out_locked;
}
MALLOC(list, struct klist *,
size * sizeof(struct klist *), M_KQUEUE, M_WAITOK);
if (list == NULL) {
ret = ENOMEM;
goto out_locked;
}
bcopy((caddr_t)fdp->fd_knlist, (caddr_t)list,
fdp->fd_knlistsize * sizeof(struct klist *));
bzero((caddr_t)list +
fdp->fd_knlistsize * sizeof(struct klist *),
(size - fdp->fd_knlistsize) * sizeof(struct klist *));
FREE(fdp->fd_knlist, M_KQUEUE);
fdp->fd_knlist = list;
fdp->fd_knlistsize = size;
}
list = &fdp->fd_knlist[kn->kn_id];
SLIST_INSERT_HEAD(list, kn, kn_link);
ret = 0;
goto out_locked;
}
out_locked:
if (ret == 0) {
kqlock(kq);
assert((kn->kn_status & KN_LOCKED) == 0);
(void)knote_lock(kq, kn, knlc, KNOTE_KQ_UNLOCK);
}
if (is_fd)
proc_fdunlock(p);
else
knhash_unlock(p);
return ret;
}
static void
kq_remove_knote(struct kqueue *kq, struct knote *kn, struct proc *p,
struct knote_lock_ctx *knlc)
{
struct filedesc *fdp = p->p_fd;
struct klist *list = NULL;
uint16_t kq_state;
bool is_fd;
is_fd = knote_fops(kn)->f_isfd;
if (is_fd)
proc_fdlock(p);
else
knhash_lock(p);
if (is_fd) {
assert ((u_int)fdp->fd_knlistsize > kn->kn_id);
list = &fdp->fd_knlist[kn->kn_id];
} else {
list = &fdp->fd_knhash[KN_HASH(kn->kn_id, fdp->fd_knhashmask)];
}
SLIST_REMOVE(list, kn, knote, kn_link);
kqlock(kq);
kq_state = kq->kq_state;
if (knlc) {
knote_unlock_cancel(kq, kn, knlc, KNOTE_KQ_UNLOCK);
} else {
kqunlock(kq);
}
if (is_fd)
proc_fdunlock(p);
else
knhash_unlock(p);
if (kq_state & KQ_DYNAMIC)
kqueue_release_last(p, kq);
}
static struct knote *
kq_find_knote_and_kq_lock(struct kqueue *kq, struct kevent_internal_s *kev,
bool is_fd, struct proc *p)
{
struct knote * ret;
if (is_fd)
proc_fdlock(p);
else
knhash_lock(p);
ret = knote_fdfind(kq, kev, is_fd, p);
if (ret) {
kqlock(kq);
}
if (is_fd)
proc_fdunlock(p);
else
knhash_unlock(p);
return ret;
}
static void
knote_drop(struct kqueue *kq, struct knote *kn, struct knote_lock_ctx *knlc)
{
struct proc *p = kq->kq_p;
kqlock_held(kq);
assert((kn->kn_status & KN_DROPPING) == 0);
if (knlc == NULL) {
assert((kn->kn_status & KN_LOCKED) == 0);
}
kn->kn_status |= KN_DROPPING;
knote_unsuppress(kn);
knote_dequeue(kn);
knote_wait_for_filter_events(kq, kn);
if (kn->kn_status & KN_ATTACHED) {
knote_fops(kn)->f_detach(kn);
}
kq_remove_knote(kq, kn, p, knlc);
if (knote_fops(kn)->f_isfd && ((kn->kn_status & KN_VANISHED) == 0))
fp_drop(p, kn->kn_id, kn->kn_fp, 0);
knote_free(kn);
}
static void
knote_activate(struct knote *kn)
{
if (kn->kn_status & KN_ACTIVE)
return;
KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KNOTE_ACTIVATE),
kn->kn_udata, kn->kn_status | (kn->kn_id << 32),
kn->kn_filtid);
kn->kn_status |= KN_ACTIVE;
if (knote_enqueue(kn))
knote_wakeup(kn);
}
static void
knote_deactivate(struct knote *kn)
{
kn->kn_status &= ~KN_ACTIVE;
if ((kn->kn_status & KN_STAYACTIVE) == 0)
knote_dequeue(kn);
}
static void
knote_enable(struct knote *kn)
{
if ((kn->kn_status & KN_DISABLED) == 0)
return;
kn->kn_status &= ~KN_DISABLED;
if (kn->kn_status & KN_SUPPRESSED) {
struct kqueue *kq = knote_get_kq(kn);
if ((kq->kq_state & KQ_PROCESSING) == 0) {
knote_unsuppress(kn);
}
} else if (knote_enqueue(kn)) {
knote_wakeup(kn);
}
}
static void
knote_disable(struct knote *kn)
{
if (kn->kn_status & KN_DISABLED)
return;
kn->kn_status |= KN_DISABLED;
knote_dequeue(kn);
}
static void
knote_suppress(struct knote *kn)
{
struct kqtailq *suppressq;
struct kqueue *kq = knote_get_kq(kn);
kqlock_held(kq);
if (kn->kn_status & KN_SUPPRESSED)
return;
knote_dequeue(kn);
kn->kn_status |= KN_SUPPRESSED;
suppressq = kqueue_get_suppressed_queue(kq, kn);
TAILQ_INSERT_TAIL(suppressq, kn, kn_tqe);
}
static void
knote_unsuppress(struct knote *kn)
{
struct kqtailq *suppressq;
struct kqueue *kq = knote_get_kq(kn);
kqlock_held(kq);
if ((kn->kn_status & KN_SUPPRESSED) == 0)
return;
kn->kn_status &= ~KN_SUPPRESSED;
suppressq = kqueue_get_suppressed_queue(kq, kn);
TAILQ_REMOVE(suppressq, kn, kn_tqe);
if ((kn->kn_status & KN_ACTIVE) == 0) {
kn->kn_qos_override = kn->kn_req_index;
}
kn->kn_qos_index = kn->kn_qos_override;
if (knote_enqueue(kn) && (kn->kn_status & KN_ACTIVE)) {
knote_wakeup(kn);
}
if ((kq->kq_state & KQ_WORKLOOP) && TAILQ_EMPTY(suppressq)) {
struct kqworkloop *kqwl = (struct kqworkloop *)kq;
if (kqworkloop_is_processing_on_current_thread(kqwl)) {
} else {
kq_req_lock(kqwl);
kqworkloop_update_threads_qos(kqwl, KQWL_UTQ_RESET_WAKEUP_OVERRIDE, 0);
kq_req_unlock(kqwl);
}
}
}
static int
knote_enqueue(struct knote *kn)
{
if ((kn->kn_status & (KN_ACTIVE | KN_STAYACTIVE)) == 0 ||
(kn->kn_status & (KN_DISABLED | KN_SUPPRESSED | KN_DROPPING)))
return 0;
if ((kn->kn_status & KN_QUEUED) == 0) {
struct kqtailq *queue = knote_get_queue(kn);
struct kqueue *kq = knote_get_kq(kn);
kqlock_held(kq);
TAILQ_INSERT_TAIL(queue, kn, kn_tqe);
kn->kn_status |= KN_QUEUED;
kq->kq_count++;
return 1;
}
return ((kn->kn_status & KN_STAYACTIVE) != 0);
}
static void
knote_dequeue(struct knote *kn)
{
struct kqueue *kq = knote_get_kq(kn);
struct kqtailq *queue;
kqlock_held(kq);
if ((kn->kn_status & KN_QUEUED) == 0)
return;
queue = knote_get_queue(kn);
TAILQ_REMOVE(queue, kn, kn_tqe);
kn->kn_status &= ~KN_QUEUED;
kq->kq_count--;
}
void
knote_init(void)
{
knote_zone = zinit(sizeof(struct knote), 8192*sizeof(struct knote),
8192, "knote zone");
kqfile_zone = zinit(sizeof(struct kqfile), 8192*sizeof(struct kqfile),
8192, "kqueue file zone");
kqworkq_zone = zinit(sizeof(struct kqworkq), 8192*sizeof(struct kqworkq),
8192, "kqueue workq zone");
kqworkloop_zone = zinit(sizeof(struct kqworkloop), 8192*sizeof(struct kqworkloop),
8192, "kqueue workloop zone");
kq_lck_grp_attr = lck_grp_attr_alloc_init();
kq_lck_grp = lck_grp_alloc_init("kqueue", kq_lck_grp_attr);
kq_lck_attr = lck_attr_alloc_init();
#if CONFIG_MEMORYSTATUS
memorystatus_kevent_init(kq_lck_grp, kq_lck_attr);
#endif
}
SYSINIT(knote, SI_SUB_PSEUDO, SI_ORDER_ANY, knote_init, NULL)
const struct filterops *
knote_fops(struct knote *kn)
{
return sysfilt_ops[kn->kn_filtid];
}
static struct knote *
knote_alloc(void)
{
struct knote *kn = ((struct knote *)zalloc(knote_zone));
bzero(kn, sizeof(struct knote));
return kn;
}
static void
knote_free(struct knote *kn)
{
assert(kn->kn_inuse == 0);
assert((kn->kn_status & KN_LOCKED) == 0);
zfree(knote_zone, kn);
}
#if SOCKETS
#include <sys/param.h>
#include <sys/socket.h>
#include <sys/protosw.h>
#include <sys/domain.h>
#include <sys/mbuf.h>
#include <sys/kern_event.h>
#include <sys/malloc.h>
#include <sys/sys_domain.h>
#include <sys/syslog.h>
#ifndef ROUNDUP64
#define ROUNDUP64(x) P2ROUNDUP((x), sizeof (u_int64_t))
#endif
#ifndef ADVANCE64
#define ADVANCE64(p, n) (void*)((char *)(p) + ROUNDUP64(n))
#endif
static lck_grp_attr_t *kev_lck_grp_attr;
static lck_attr_t *kev_lck_attr;
static lck_grp_t *kev_lck_grp;
static decl_lck_rw_data(,kev_lck_data);
static lck_rw_t *kev_rwlock = &kev_lck_data;
static int kev_attach(struct socket *so, int proto, struct proc *p);
static int kev_detach(struct socket *so);
static int kev_control(struct socket *so, u_long cmd, caddr_t data,
struct ifnet *ifp, struct proc *p);
static lck_mtx_t * event_getlock(struct socket *, int);
static int event_lock(struct socket *, int, void *);
static int event_unlock(struct socket *, int, void *);
static int event_sofreelastref(struct socket *);
static void kev_delete(struct kern_event_pcb *);
static struct pr_usrreqs event_usrreqs = {
.pru_attach = kev_attach,
.pru_control = kev_control,
.pru_detach = kev_detach,
.pru_soreceive = soreceive,
};
static struct protosw eventsw[] = {
{
.pr_type = SOCK_RAW,
.pr_protocol = SYSPROTO_EVENT,
.pr_flags = PR_ATOMIC,
.pr_usrreqs = &event_usrreqs,
.pr_lock = event_lock,
.pr_unlock = event_unlock,
.pr_getlock = event_getlock,
}
};
__private_extern__ int kevt_getstat SYSCTL_HANDLER_ARGS;
__private_extern__ int kevt_pcblist SYSCTL_HANDLER_ARGS;
SYSCTL_NODE(_net_systm, OID_AUTO, kevt,
CTLFLAG_RW|CTLFLAG_LOCKED, 0, "Kernel event family");
struct kevtstat kevtstat;
SYSCTL_PROC(_net_systm_kevt, OID_AUTO, stats,
CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
kevt_getstat, "S,kevtstat", "");
SYSCTL_PROC(_net_systm_kevt, OID_AUTO, pcblist,
CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
kevt_pcblist, "S,xkevtpcb", "");
static lck_mtx_t *
event_getlock(struct socket *so, int flags)
{
#pragma unused(flags)
struct kern_event_pcb *ev_pcb = (struct kern_event_pcb *)so->so_pcb;
if (so->so_pcb != NULL) {
if (so->so_usecount < 0)
panic("%s: so=%p usecount=%d lrh= %s\n", __func__,
so, so->so_usecount, solockhistory_nr(so));
} else {
panic("%s: so=%p NULL NO so_pcb %s\n", __func__,
so, solockhistory_nr(so));
}
return (&ev_pcb->evp_mtx);
}
static int
event_lock(struct socket *so, int refcount, void *lr)
{
void *lr_saved;
if (lr == NULL)
lr_saved = __builtin_return_address(0);
else
lr_saved = lr;
if (so->so_pcb != NULL) {
lck_mtx_lock(&((struct kern_event_pcb *)so->so_pcb)->evp_mtx);
} else {
panic("%s: so=%p NO PCB! lr=%p lrh= %s\n", __func__,
so, lr_saved, solockhistory_nr(so));
}
if (so->so_usecount < 0) {
panic("%s: so=%p so_pcb=%p lr=%p ref=%d lrh= %s\n", __func__,
so, so->so_pcb, lr_saved, so->so_usecount,
solockhistory_nr(so));
}
if (refcount)
so->so_usecount++;
so->lock_lr[so->next_lock_lr] = lr_saved;
so->next_lock_lr = (so->next_lock_lr+1) % SO_LCKDBG_MAX;
return (0);
}
static int
event_unlock(struct socket *so, int refcount, void *lr)
{
void *lr_saved;
lck_mtx_t *mutex_held;
if (lr == NULL)
lr_saved = __builtin_return_address(0);
else
lr_saved = lr;
if (refcount) {
so->so_usecount--;
}
if (so->so_usecount < 0) {
panic("%s: so=%p usecount=%d lrh= %s\n", __func__,
so, so->so_usecount, solockhistory_nr(so));
}
if (so->so_pcb == NULL) {
panic("%s: so=%p NO PCB usecount=%d lr=%p lrh= %s\n", __func__,
so, so->so_usecount, (void *)lr_saved,
solockhistory_nr(so));
}
mutex_held = (&((struct kern_event_pcb *)so->so_pcb)->evp_mtx);
LCK_MTX_ASSERT(mutex_held, LCK_MTX_ASSERT_OWNED);
so->unlock_lr[so->next_unlock_lr] = lr_saved;
so->next_unlock_lr = (so->next_unlock_lr+1) % SO_LCKDBG_MAX;
if (so->so_usecount == 0) {
VERIFY(so->so_flags & SOF_PCBCLEARING);
event_sofreelastref(so);
} else {
lck_mtx_unlock(mutex_held);
}
return (0);
}
static int
event_sofreelastref(struct socket *so)
{
struct kern_event_pcb *ev_pcb = (struct kern_event_pcb *)so->so_pcb;
LCK_MTX_ASSERT(&(ev_pcb->evp_mtx), LCK_MTX_ASSERT_OWNED);
so->so_pcb = NULL;
so->so_rcv.sb_flags &= ~SB_UPCALL;
so->so_snd.sb_flags &= ~SB_UPCALL;
so->so_event = sonullevent;
lck_mtx_unlock(&(ev_pcb->evp_mtx));
LCK_MTX_ASSERT(&(ev_pcb->evp_mtx), LCK_MTX_ASSERT_NOTOWNED);
lck_rw_lock_exclusive(kev_rwlock);
LIST_REMOVE(ev_pcb, evp_link);
kevtstat.kes_pcbcount--;
kevtstat.kes_gencnt++;
lck_rw_done(kev_rwlock);
kev_delete(ev_pcb);
sofreelastref(so, 1);
return (0);
}
static int event_proto_count = (sizeof (eventsw) / sizeof (struct protosw));
static
struct kern_event_head kern_event_head;
static u_int32_t static_event_id = 0;
#define EVPCB_ZONE_MAX 65536
#define EVPCB_ZONE_NAME "kerneventpcb"
static struct zone *ev_pcb_zone;
void
kern_event_init(struct domain *dp)
{
struct protosw *pr;
int i;
VERIFY(!(dp->dom_flags & DOM_INITIALIZED));
VERIFY(dp == systemdomain);
kev_lck_grp_attr = lck_grp_attr_alloc_init();
if (kev_lck_grp_attr == NULL) {
panic("%s: lck_grp_attr_alloc_init failed\n", __func__);
}
kev_lck_grp = lck_grp_alloc_init("Kernel Event Protocol",
kev_lck_grp_attr);
if (kev_lck_grp == NULL) {
panic("%s: lck_grp_alloc_init failed\n", __func__);
}
kev_lck_attr = lck_attr_alloc_init();
if (kev_lck_attr == NULL) {
panic("%s: lck_attr_alloc_init failed\n", __func__);
}
lck_rw_init(kev_rwlock, kev_lck_grp, kev_lck_attr);
if (kev_rwlock == NULL) {
panic("%s: lck_mtx_alloc_init failed\n", __func__);
}
for (i = 0, pr = &eventsw[0]; i < event_proto_count; i++, pr++)
net_add_proto(pr, dp, 1);
ev_pcb_zone = zinit(sizeof(struct kern_event_pcb),
EVPCB_ZONE_MAX * sizeof(struct kern_event_pcb), 0, EVPCB_ZONE_NAME);
if (ev_pcb_zone == NULL) {
panic("%s: failed allocating ev_pcb_zone", __func__);
}
zone_change(ev_pcb_zone, Z_EXPAND, TRUE);
zone_change(ev_pcb_zone, Z_CALLERACCT, TRUE);
}
static int
kev_attach(struct socket *so, __unused int proto, __unused struct proc *p)
{
int error = 0;
struct kern_event_pcb *ev_pcb;
error = soreserve(so, KEV_SNDSPACE, KEV_RECVSPACE);
if (error != 0)
return (error);
if ((ev_pcb = (struct kern_event_pcb *)zalloc(ev_pcb_zone)) == NULL) {
return (ENOBUFS);
}
bzero(ev_pcb, sizeof(struct kern_event_pcb));
lck_mtx_init(&ev_pcb->evp_mtx, kev_lck_grp, kev_lck_attr);
ev_pcb->evp_socket = so;
ev_pcb->evp_vendor_code_filter = 0xffffffff;
so->so_pcb = (caddr_t) ev_pcb;
lck_rw_lock_exclusive(kev_rwlock);
LIST_INSERT_HEAD(&kern_event_head, ev_pcb, evp_link);
kevtstat.kes_pcbcount++;
kevtstat.kes_gencnt++;
lck_rw_done(kev_rwlock);
return (error);
}
static void
kev_delete(struct kern_event_pcb *ev_pcb)
{
VERIFY(ev_pcb != NULL);
lck_mtx_destroy(&ev_pcb->evp_mtx, kev_lck_grp);
zfree(ev_pcb_zone, ev_pcb);
}
static int
kev_detach(struct socket *so)
{
struct kern_event_pcb *ev_pcb = (struct kern_event_pcb *) so->so_pcb;
if (ev_pcb != NULL) {
soisdisconnected(so);
so->so_flags |= SOF_PCBCLEARING;
}
return (0);
}
errno_t kev_vendor_code_find(
const char *string,
u_int32_t *out_vendor_code)
{
if (strlen(string) >= KEV_VENDOR_CODE_MAX_STR_LEN) {
return (EINVAL);
}
return (net_str_id_find_internal(string, out_vendor_code,
NSI_VENDOR_CODE, 1));
}
errno_t
kev_msg_post(struct kev_msg *event_msg)
{
mbuf_tag_id_t min_vendor, max_vendor;
net_str_id_first_last(&min_vendor, &max_vendor, NSI_VENDOR_CODE);
if (event_msg == NULL)
return (EINVAL);
if (event_msg->vendor_code < min_vendor ||
event_msg->vendor_code > max_vendor) {
OSIncrementAtomic64((SInt64 *)&kevtstat.kes_badvendor);
return (EINVAL);
}
return (kev_post_msg(event_msg));
}
int
kev_post_msg(struct kev_msg *event_msg)
{
struct mbuf *m, *m2;
struct kern_event_pcb *ev_pcb;
struct kern_event_msg *ev;
char *tmp;
u_int32_t total_size;
int i;
total_size = KEV_MSG_HEADER_SIZE;
for (i = 0; i < 5; i++) {
if (event_msg->dv[i].data_length == 0)
break;
total_size += event_msg->dv[i].data_length;
}
if (total_size > MLEN) {
OSIncrementAtomic64((SInt64 *)&kevtstat.kes_toobig);
return (EMSGSIZE);
}
m = m_get(M_WAIT, MT_DATA);
if (m == 0) {
OSIncrementAtomic64((SInt64 *)&kevtstat.kes_nomem);
return (ENOMEM);
}
ev = mtod(m, struct kern_event_msg *);
total_size = KEV_MSG_HEADER_SIZE;
tmp = (char *) &ev->event_data[0];
for (i = 0; i < 5; i++) {
if (event_msg->dv[i].data_length == 0)
break;
total_size += event_msg->dv[i].data_length;
bcopy(event_msg->dv[i].data_ptr, tmp,
event_msg->dv[i].data_length);
tmp += event_msg->dv[i].data_length;
}
ev->id = ++static_event_id;
ev->total_size = total_size;
ev->vendor_code = event_msg->vendor_code;
ev->kev_class = event_msg->kev_class;
ev->kev_subclass = event_msg->kev_subclass;
ev->event_code = event_msg->event_code;
m->m_len = total_size;
lck_rw_lock_shared(kev_rwlock);
for (ev_pcb = LIST_FIRST(&kern_event_head);
ev_pcb;
ev_pcb = LIST_NEXT(ev_pcb, evp_link)) {
lck_mtx_lock(&ev_pcb->evp_mtx);
if (ev_pcb->evp_socket->so_pcb == NULL) {
lck_mtx_unlock(&ev_pcb->evp_mtx);
continue;
}
if (ev_pcb->evp_vendor_code_filter != KEV_ANY_VENDOR) {
if (ev_pcb->evp_vendor_code_filter != ev->vendor_code) {
lck_mtx_unlock(&ev_pcb->evp_mtx);
continue;
}
if (ev_pcb->evp_class_filter != KEV_ANY_CLASS) {
if (ev_pcb->evp_class_filter != ev->kev_class) {
lck_mtx_unlock(&ev_pcb->evp_mtx);
continue;
}
if ((ev_pcb->evp_subclass_filter !=
KEV_ANY_SUBCLASS) &&
(ev_pcb->evp_subclass_filter !=
ev->kev_subclass)) {
lck_mtx_unlock(&ev_pcb->evp_mtx);
continue;
}
}
}
m2 = m_copym(m, 0, m->m_len, M_WAIT);
if (m2 == 0) {
OSIncrementAtomic64((SInt64 *)&kevtstat.kes_nomem);
m_free(m);
lck_mtx_unlock(&ev_pcb->evp_mtx);
lck_rw_done(kev_rwlock);
return (ENOMEM);
}
if (sbappendrecord(&ev_pcb->evp_socket->so_rcv, m2)) {
so_inc_recv_data_stat(ev_pcb->evp_socket,
1, m->m_len, MBUF_TC_BE);
sorwakeup(ev_pcb->evp_socket);
OSIncrementAtomic64((SInt64 *)&kevtstat.kes_posted);
} else {
OSIncrementAtomic64((SInt64 *)&kevtstat.kes_fullsock);
}
lck_mtx_unlock(&ev_pcb->evp_mtx);
}
m_free(m);
lck_rw_done(kev_rwlock);
return (0);
}
static int
kev_control(struct socket *so,
u_long cmd,
caddr_t data,
__unused struct ifnet *ifp,
__unused struct proc *p)
{
struct kev_request *kev_req = (struct kev_request *) data;
struct kern_event_pcb *ev_pcb;
struct kev_vendor_code *kev_vendor;
u_int32_t *id_value = (u_int32_t *) data;
switch (cmd) {
case SIOCGKEVID:
*id_value = static_event_id;
break;
case SIOCSKEVFILT:
ev_pcb = (struct kern_event_pcb *) so->so_pcb;
ev_pcb->evp_vendor_code_filter = kev_req->vendor_code;
ev_pcb->evp_class_filter = kev_req->kev_class;
ev_pcb->evp_subclass_filter = kev_req->kev_subclass;
break;
case SIOCGKEVFILT:
ev_pcb = (struct kern_event_pcb *) so->so_pcb;
kev_req->vendor_code = ev_pcb->evp_vendor_code_filter;
kev_req->kev_class = ev_pcb->evp_class_filter;
kev_req->kev_subclass = ev_pcb->evp_subclass_filter;
break;
case SIOCGKEVVENDOR:
kev_vendor = (struct kev_vendor_code *)data;
kev_vendor->vendor_string[KEV_VENDOR_CODE_MAX_STR_LEN-1] = 0;
return (net_str_id_find_internal(kev_vendor->vendor_string,
&kev_vendor->vendor_code, NSI_VENDOR_CODE, 0));
default:
return (ENOTSUP);
}
return (0);
}
int
kevt_getstat SYSCTL_HANDLER_ARGS
{
#pragma unused(oidp, arg1, arg2)
int error = 0;
lck_rw_lock_shared(kev_rwlock);
if (req->newptr != USER_ADDR_NULL) {
error = EPERM;
goto done;
}
if (req->oldptr == USER_ADDR_NULL) {
req->oldidx = sizeof(struct kevtstat);
goto done;
}
error = SYSCTL_OUT(req, &kevtstat,
MIN(sizeof(struct kevtstat), req->oldlen));
done:
lck_rw_done(kev_rwlock);
return (error);
}
__private_extern__ int
kevt_pcblist SYSCTL_HANDLER_ARGS
{
#pragma unused(oidp, arg1, arg2)
int error = 0;
int n, i;
struct xsystmgen xsg;
void *buf = NULL;
size_t item_size = ROUNDUP64(sizeof (struct xkevtpcb)) +
ROUNDUP64(sizeof (struct xsocket_n)) +
2 * ROUNDUP64(sizeof (struct xsockbuf_n)) +
ROUNDUP64(sizeof (struct xsockstat_n));
struct kern_event_pcb *ev_pcb;
buf = _MALLOC(item_size, M_TEMP, M_WAITOK | M_ZERO);
if (buf == NULL)
return (ENOMEM);
lck_rw_lock_shared(kev_rwlock);
n = kevtstat.kes_pcbcount;
if (req->oldptr == USER_ADDR_NULL) {
req->oldidx = (n + n/8) * item_size;
goto done;
}
if (req->newptr != USER_ADDR_NULL) {
error = EPERM;
goto done;
}
bzero(&xsg, sizeof (xsg));
xsg.xg_len = sizeof (xsg);
xsg.xg_count = n;
xsg.xg_gen = kevtstat.kes_gencnt;
xsg.xg_sogen = so_gencnt;
error = SYSCTL_OUT(req, &xsg, sizeof (xsg));
if (error) {
goto done;
}
if (n == 0) {
goto done;
}
i = 0;
for (i = 0, ev_pcb = LIST_FIRST(&kern_event_head);
i < n && ev_pcb != NULL;
i++, ev_pcb = LIST_NEXT(ev_pcb, evp_link)) {
struct xkevtpcb *xk = (struct xkevtpcb *)buf;
struct xsocket_n *xso = (struct xsocket_n *)
ADVANCE64(xk, sizeof (*xk));
struct xsockbuf_n *xsbrcv = (struct xsockbuf_n *)
ADVANCE64(xso, sizeof (*xso));
struct xsockbuf_n *xsbsnd = (struct xsockbuf_n *)
ADVANCE64(xsbrcv, sizeof (*xsbrcv));
struct xsockstat_n *xsostats = (struct xsockstat_n *)
ADVANCE64(xsbsnd, sizeof (*xsbsnd));
bzero(buf, item_size);
lck_mtx_lock(&ev_pcb->evp_mtx);
xk->kep_len = sizeof(struct xkevtpcb);
xk->kep_kind = XSO_EVT;
xk->kep_evtpcb = (uint64_t)VM_KERNEL_ADDRPERM(ev_pcb);
xk->kep_vendor_code_filter = ev_pcb->evp_vendor_code_filter;
xk->kep_class_filter = ev_pcb->evp_class_filter;
xk->kep_subclass_filter = ev_pcb->evp_subclass_filter;
sotoxsocket_n(ev_pcb->evp_socket, xso);
sbtoxsockbuf_n(ev_pcb->evp_socket ?
&ev_pcb->evp_socket->so_rcv : NULL, xsbrcv);
sbtoxsockbuf_n(ev_pcb->evp_socket ?
&ev_pcb->evp_socket->so_snd : NULL, xsbsnd);
sbtoxsockstat_n(ev_pcb->evp_socket, xsostats);
lck_mtx_unlock(&ev_pcb->evp_mtx);
error = SYSCTL_OUT(req, buf, item_size);
}
if (error == 0) {
bzero(&xsg, sizeof (xsg));
xsg.xg_len = sizeof (xsg);
xsg.xg_count = n;
xsg.xg_gen = kevtstat.kes_gencnt;
xsg.xg_sogen = so_gencnt;
error = SYSCTL_OUT(req, &xsg, sizeof (xsg));
if (error) {
goto done;
}
}
done:
lck_rw_done(kev_rwlock);
return (error);
}
#endif
int
fill_kqueueinfo(struct kqueue *kq, struct kqueue_info * kinfo)
{
struct vinfo_stat * st;
st = &kinfo->kq_stat;
st->vst_size = kq->kq_count;
if (kq->kq_state & KQ_KEV_QOS)
st->vst_blksize = sizeof(struct kevent_qos_s);
else if (kq->kq_state & KQ_KEV64)
st->vst_blksize = sizeof(struct kevent64_s);
else
st->vst_blksize = sizeof(struct kevent);
st->vst_mode = S_IFIFO;
st->vst_ino = (kq->kq_state & KQ_DYNAMIC) ?
((struct kqworkloop *)kq)->kqwl_dynamicid : 0;
#define PROC_KQUEUE_MASK (KQ_SEL|KQ_SLEEP|KQ_KEV32|KQ_KEV64|KQ_KEV_QOS|KQ_WORKQ|KQ_WORKLOOP)
kinfo->kq_state = kq->kq_state & PROC_KQUEUE_MASK;
return (0);
}
static int
fill_kqueue_dyninfo(struct kqueue *kq, struct kqueue_dyninfo *kqdi)
{
struct kqworkloop *kqwl = (struct kqworkloop *)kq;
struct kqrequest *kqr = &kqwl->kqwl_request;
workq_threadreq_param_t trp = {};
int err;
if ((kq->kq_state & KQ_WORKLOOP) == 0) {
return EINVAL;
}
if ((err = fill_kqueueinfo(kq, &kqdi->kqdi_info))) {
return err;
}
kq_req_lock(kqwl);
kqdi->kqdi_servicer = thread_tid(kqr->kqr_thread);
kqdi->kqdi_owner = thread_tid(kqwl->kqwl_owner);
kqdi->kqdi_request_state = kqr->kqr_state;
kqdi->kqdi_async_qos = kqr->kqr_qos_index;
kqdi->kqdi_events_qos = kqr->kqr_override_index;
kqdi->kqdi_sync_waiters = kqr->kqr_dsync_waiters;
kqdi->kqdi_sync_waiter_qos = 0;
trp.trp_value = kqwl->kqwl_params;
if (trp.trp_flags & TRP_PRIORITY)
kqdi->kqdi_pri = trp.trp_pri;
else
kqdi->kqdi_pri = 0;
if (trp.trp_flags & TRP_POLICY)
kqdi->kqdi_pol = trp.trp_pol;
else
kqdi->kqdi_pol = 0;
if (trp.trp_flags & TRP_CPUPERCENT)
kqdi->kqdi_cpupercent = trp.trp_cpupercent;
else
kqdi->kqdi_cpupercent = 0;
kq_req_unlock(kqwl);
return 0;
}
void
knote_markstayactive(struct knote *kn)
{
struct kqueue *kq = knote_get_kq(kn);
kq_index_t qos;
kqlock(kq);
kn->kn_status |= KN_STAYACTIVE;
assert(kn->kn_status & KN_ATTACHING);
assert((kn->kn_status & (KN_QUEUED | KN_SUPPRESSED)) == 0);
if (kq->kq_state & KQ_WORKQ) {
qos = KQWQ_QOS_MANAGER;
} else if (kq->kq_state & KQ_WORKLOOP) {
struct kqworkloop *kqwl = (struct kqworkloop *)kq;
qos = _pthread_priority_thread_qos(kn->kn_qos);
assert(qos && qos < THREAD_QOS_LAST);
kq_req_lock(kq);
kqworkloop_update_threads_qos(kqwl, KQWL_UTQ_UPDATE_STAYACTIVE_QOS, qos);
kq_req_unlock(kq);
qos = KQWL_BUCKET_STAYACTIVE;
} else {
qos = THREAD_QOS_UNSPECIFIED;
}
kn->kn_req_index = qos;
kn->kn_qos_override = qos;
kn->kn_qos_index = qos;
knote_activate(kn);
kqunlock(kq);
}
void
knote_clearstayactive(struct knote *kn)
{
kqlock(knote_get_kq(kn));
kn->kn_status &= ~KN_STAYACTIVE;
knote_deactivate(kn);
kqunlock(knote_get_kq(kn));
}
static unsigned long
kevent_extinfo_emit(struct kqueue *kq, struct knote *kn, struct kevent_extinfo *buf,
unsigned long buflen, unsigned long nknotes)
{
for (; kn; kn = SLIST_NEXT(kn, kn_link)) {
if (kq == knote_get_kq(kn)) {
if (nknotes < buflen) {
struct kevent_extinfo *info = &buf[nknotes];
struct kevent_internal_s *kevp = &kn->kn_kevent;
kqlock(kq);
info->kqext_kev = (struct kevent_qos_s){
.ident = kevp->ident,
.filter = kevp->filter,
.flags = kevp->flags,
.fflags = kevp->fflags,
.data = (int64_t)kevp->data,
.udata = kevp->udata,
.ext[0] = kevp->ext[0],
.ext[1] = kevp->ext[1],
.ext[2] = kevp->ext[2],
.ext[3] = kevp->ext[3],
.qos = kn->kn_req_index,
};
info->kqext_sdata = kn->kn_sdata;
info->kqext_status = kn->kn_status;
info->kqext_sfflags = kn->kn_sfflags;
kqunlock(kq);
}
nknotes++;
}
}
return nknotes;
}
int
kevent_copyout_proc_dynkqids(void *proc, user_addr_t ubuf, uint32_t ubufsize,
int32_t *nkqueues_out)
{
proc_t p = (proc_t)proc;
struct filedesc *fdp = p->p_fd;
unsigned int nkqueues = 0;
unsigned long ubuflen = ubufsize / sizeof(kqueue_id_t);
size_t buflen, bufsize;
kqueue_id_t *kq_ids = NULL;
int err = 0;
assert(p != NULL);
if (ubuf == USER_ADDR_NULL && ubufsize != 0) {
err = EINVAL;
goto out;
}
buflen = min(ubuflen, PROC_PIDDYNKQUEUES_MAX);
if (ubuflen != 0) {
if (os_mul_overflow(sizeof(kqueue_id_t), buflen, &bufsize)) {
err = ERANGE;
goto out;
}
kq_ids = kalloc(bufsize);
assert(kq_ids != NULL);
}
kqhash_lock(p);
if (fdp->fd_kqhashmask > 0) {
for (uint32_t i = 0; i < fdp->fd_kqhashmask + 1; i++) {
struct kqworkloop *kqwl;
SLIST_FOREACH(kqwl, &fdp->fd_kqhash[i], kqwl_hashlink) {
if (nkqueues < buflen) {
kq_ids[nkqueues] = kqwl->kqwl_dynamicid;
}
nkqueues++;
}
}
}
kqhash_unlock(p);
if (kq_ids) {
size_t copysize;
if (os_mul_overflow(sizeof(kqueue_id_t), min(ubuflen, nkqueues), ©size)) {
err = ERANGE;
goto out;
}
assert(ubufsize >= copysize);
err = copyout(kq_ids, ubuf, copysize);
}
out:
if (kq_ids) {
kfree(kq_ids, bufsize);
}
if (!err) {
*nkqueues_out = (int)min(nkqueues, PROC_PIDDYNKQUEUES_MAX);
}
return err;
}
int
kevent_copyout_dynkqinfo(void *proc, kqueue_id_t kq_id, user_addr_t ubuf,
uint32_t ubufsize, int32_t *size_out)
{
proc_t p = (proc_t)proc;
struct kqueue *kq;
int err = 0;
struct kqueue_dyninfo kqdi = { };
assert(p != NULL);
if (ubufsize < sizeof(struct kqueue_info)) {
return ENOBUFS;
}
kqhash_lock(p);
kq = kqueue_hash_lookup(p, kq_id);
if (!kq) {
kqhash_unlock(p);
return ESRCH;
}
kqueue_retain(kq);
kqhash_unlock(p);
if (ubufsize >= sizeof(struct kqueue_dyninfo)) {
ubufsize = sizeof(struct kqueue_dyninfo);
err = fill_kqueue_dyninfo(kq, &kqdi);
} else {
ubufsize = sizeof(struct kqueue_info);
err = fill_kqueueinfo(kq, &kqdi.kqdi_info);
}
if (err == 0 && (err = copyout(&kqdi, ubuf, ubufsize)) == 0) {
*size_out = ubufsize;
}
kqueue_release_last(p, kq);
return err;
}
int
kevent_copyout_dynkqextinfo(void *proc, kqueue_id_t kq_id, user_addr_t ubuf,
uint32_t ubufsize, int32_t *nknotes_out)
{
proc_t p = (proc_t)proc;
struct kqueue *kq;
int err;
assert(p != NULL);
kqhash_lock(p);
kq = kqueue_hash_lookup(p, kq_id);
if (!kq) {
kqhash_unlock(p);
return ESRCH;
}
kqueue_retain(kq);
kqhash_unlock(p);
err = pid_kqueue_extinfo(p, kq, ubuf, ubufsize, nknotes_out);
kqueue_release_last(p, kq);
return err;
}
int
pid_kqueue_extinfo(proc_t p, struct kqueue *kq, user_addr_t ubuf,
uint32_t bufsize, int32_t *retval)
{
struct knote *kn;
int i;
int err = 0;
struct filedesc *fdp = p->p_fd;
unsigned long nknotes = 0;
unsigned long buflen = bufsize / sizeof(struct kevent_extinfo);
struct kevent_extinfo *kqext = NULL;
buflen = min(buflen, PROC_PIDFDKQUEUE_KNOTES_MAX);
kqext = kalloc(buflen * sizeof(struct kevent_extinfo));
if (kqext == NULL) {
err = ENOMEM;
goto out;
}
bzero(kqext, buflen * sizeof(struct kevent_extinfo));
proc_fdlock(p);
for (i = 0; i < fdp->fd_knlistsize; i++) {
kn = SLIST_FIRST(&fdp->fd_knlist[i]);
nknotes = kevent_extinfo_emit(kq, kn, kqext, buflen, nknotes);
}
proc_fdunlock(p);
if (fdp->fd_knhashmask != 0) {
for (i = 0; i < (int)fdp->fd_knhashmask + 1; i++) {
kqhash_lock(p);
kn = SLIST_FIRST(&fdp->fd_knhash[i]);
nknotes = kevent_extinfo_emit(kq, kn, kqext, buflen, nknotes);
kqhash_unlock(p);
}
}
assert(bufsize >= sizeof(struct kevent_extinfo) * min(buflen, nknotes));
err = copyout(kqext, ubuf, sizeof(struct kevent_extinfo) * min(buflen, nknotes));
out:
if (kqext) {
kfree(kqext, buflen * sizeof(struct kevent_extinfo));
kqext = NULL;
}
if (!err) {
*retval = min(nknotes, PROC_PIDFDKQUEUE_KNOTES_MAX);
}
return err;
}
static unsigned int
klist_copy_udata(struct klist *list, uint64_t *buf,
unsigned int buflen, unsigned int nknotes)
{
struct kevent_internal_s *kev;
struct knote *kn;
SLIST_FOREACH(kn, list, kn_link) {
if (nknotes < buflen) {
struct kqueue *kq = knote_get_kq(kn);
kqlock(kq);
kev = &(kn->kn_kevent);
buf[nknotes] = kev->udata;
kqunlock(kq);
}
nknotes++;
}
return nknotes;
}
static unsigned int
kqlist_copy_dynamicids(__assert_only proc_t p, struct kqlist *list,
uint64_t *buf, unsigned int buflen, unsigned int nids)
{
kqhash_lock_held(p);
struct kqworkloop *kqwl;
SLIST_FOREACH(kqwl, list, kqwl_hashlink) {
if (nids < buflen) {
buf[nids] = kqwl->kqwl_dynamicid;
}
nids++;
}
return nids;
}
int
kevent_proc_copy_uptrs(void *proc, uint64_t *buf, int bufsize)
{
proc_t p = (proc_t)proc;
struct filedesc *fdp = p->p_fd;
unsigned int nuptrs = 0;
unsigned long buflen = bufsize / sizeof(uint64_t);
if (buflen > 0) {
assert(buf != NULL);
}
proc_fdlock(p);
for (int i = 0; i < fdp->fd_knlistsize; i++) {
nuptrs = klist_copy_udata(&fdp->fd_knlist[i], buf, buflen, nuptrs);
}
knhash_lock(p);
proc_fdunlock(p);
if (fdp->fd_knhashmask != 0) {
for (int i = 0; i < (int)fdp->fd_knhashmask + 1; i++) {
nuptrs = klist_copy_udata(&fdp->fd_knhash[i], buf, buflen, nuptrs);
}
}
knhash_unlock(p);
kqhash_lock(p);
if (fdp->fd_kqhashmask != 0) {
for (int i = 0; i < (int)fdp->fd_kqhashmask + 1; i++) {
nuptrs = kqlist_copy_dynamicids(p, &fdp->fd_kqhash[i], buf, buflen,
nuptrs);
}
}
kqhash_unlock(p);
return (int)nuptrs;
}
static void
kevent_set_return_to_kernel_user_tsd(proc_t p, thread_t thread)
{
uint64_t ast_addr;
bool proc_is_64bit = !!(p->p_flag & P_LP64);
size_t user_addr_size = proc_is_64bit ? 8 : 4;
uint32_t ast_flags32 = 0;
uint64_t ast_flags64 = 0;
struct uthread *ut = get_bsdthread_info(thread);
if (ut->uu_kqr_bound != NULL) {
ast_flags64 |= R2K_WORKLOOP_PENDING_EVENTS;
}
if (ast_flags64 == 0) {
return;
}
if (!(p->p_flag & P_LP64)) {
ast_flags32 = (uint32_t)ast_flags64;
assert(ast_flags64 < 0x100000000ull);
}
ast_addr = thread_rettokern_addr(thread);
if (ast_addr == 0) {
return;
}
if (copyout((proc_is_64bit ? (void *)&ast_flags64 : (void *)&ast_flags32),
(user_addr_t)ast_addr,
user_addr_size) != 0) {
printf("pid %d (tid:%llu): copyout of return_to_kernel ast flags failed with "
"ast_addr = %llu\n", p->p_pid, thread_tid(current_thread()), ast_addr);
}
}
void
kevent_ast(thread_t thread, uint16_t bits)
{
proc_t p = current_proc();
if (bits & AST_KEVENT_REDRIVE_THREADREQ) {
workq_kern_threadreq_redrive(p, WORKQ_THREADREQ_CAN_CREATE_THREADS);
}
if (bits & AST_KEVENT_RETURN_TO_KERNEL) {
kevent_set_return_to_kernel_user_tsd(p, thread);
}
}
#if DEVELOPMENT || DEBUG
#define KEVENT_SYSCTL_BOUND_ID 1
static int
kevent_sysctl SYSCTL_HANDLER_ARGS
{
#pragma unused(oidp, arg2)
uintptr_t type = (uintptr_t)arg1;
uint64_t bound_id = 0;
if (type != KEVENT_SYSCTL_BOUND_ID) {
return EINVAL;
}
if (req->newptr) {
return EINVAL;
}
struct uthread *ut = get_bsdthread_info(current_thread());
if (!ut) {
return EFAULT;
}
struct kqrequest *kqr = ut->uu_kqr_bound;
if (kqr) {
if (kqr->kqr_state & KQR_WORKLOOP) {
bound_id = kqr_kqworkloop(kqr)->kqwl_dynamicid;
} else {
bound_id = -1;
}
}
return sysctl_io_number(req, bound_id, sizeof(bound_id), NULL, NULL);
}
SYSCTL_NODE(_kern, OID_AUTO, kevent, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
"kevent information");
SYSCTL_PROC(_kern_kevent, OID_AUTO, bound_id,
CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED | CTLFLAG_MASKED,
(void *)KEVENT_SYSCTL_BOUND_ID,
sizeof(kqueue_id_t), kevent_sysctl, "Q",
"get the ID of the bound kqueue");
#endif